diff --git a/.cloud66/manifest.yml b/.cloud66/manifest.yml index b31780b1..68d73dc1 100644 --- a/.cloud66/manifest.yml +++ b/.cloud66/manifest.yml @@ -1,4 +1,7 @@ production: &production + elasticsearch: + configuration: + version: 9.0.4 rack: configuration: custom_deploy_command: bin/rake db:migrate diff --git a/.env b/.env index cbd4d820..ff37c72d 100644 --- a/.env +++ b/.env @@ -1,7 +1,11 @@ AWS_REGION=us-east-2 +ELASTICSEARCH_ADDRESS= + ENVELOPE_DOWNLOADS_BUCKET=envelope-downloads +ENVELOPE_GRAPHS_BUCKET= + POSTGRESQL_ADDRESS=localhost POSTGRESQL_USERNAME=metadataregistry POSTGRESQL_PASSWORD=metadataregistry diff --git a/.github/workflows/.github/workflows/restart-deployments.yaml b/.github/workflows/.github/workflows/restart-deployments.yaml new file mode 100644 index 00000000..882d7e5a --- /dev/null +++ b/.github/workflows/.github/workflows/restart-deployments.yaml @@ -0,0 +1,80 @@ +# syntax=docker/dockerfile:1.4 +name: Restart application + +on: + workflow_dispatch: + inputs: + environment: + description: "Target environment (production, staging, sandbox)" + type: choice + required: true + default: staging + options: + - staging + - sandbox + - production + +permissions: + id-token: write + contents: read + +env: + AWS_REGION: us-east-1 + EKS_CLUSTER: ce-registry-eks + +jobs: + restart: + if: ${{ github.repository_owner == 'CredentialEngine' }} + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT }}:role/github-oidc-widget + aws-region: ${{ env.AWS_REGION }} + + - name: Install kubectl + uses: azure/setup-kubectl@v4 + with: + version: v1.29.6 + + - name: Update kubeconfig + run: | + aws eks update-kubeconfig --name "${{ env.EKS_CLUSTER }}" --region "${{ env.AWS_REGION }}" + + - name: Restart deployments + env: + TARGET_ENV: ${{ inputs.environment }} + run: | + set -euo pipefail + case "$TARGET_ENV" in + staging) NS="credreg-staging" ;; + sandbox) NS="credreg-sandbox" ;; + production) NS="credreg-prod" ;; + *) echo "Unknown environment: $TARGET_ENV" >&2; exit 1 ;; + esac + echo "Restarting deployments in namespace $NS" + kubectl -n "$NS" rollout restart deploy/worker-app + kubectl -n "$NS" rollout restart deploy/main-app + kubectl -n "$NS" rollout status deploy/worker-app --timeout=15m + kubectl -n "$NS" rollout status deploy/main-app --timeout=15m + + - name: Notify Slack (restart) + if: always() + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + REPO: ${{ github.repository }} + RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} + ENVIRONMENT: ${{ inputs.environment }} + run: | + if [ -z "${SLACK_WEBHOOK_URL}" ]; then + echo "SLACK_WEBHOOK_URL not set; skipping notification"; + exit 0; + fi + STATUS="${{ job.status }}"; EMOJI=✅; [ "$STATUS" = "failure" ] && EMOJI=❌ + MSG="$EMOJI Restart ${STATUS} for ${REPO} (env: ${ENVIRONMENT}). ${RUN_URL}" + payload=$(jq -nc --arg text "$MSG" '{text:$text}') + curl -sS -X POST -H 'Content-type: application/json' --data "$payload" "$SLACK_WEBHOOK_URL" || true diff --git a/.github/workflows/apply-configmap-and-restart.yaml b/.github/workflows/apply-configmap-and-restart.yaml new file mode 100644 index 00000000..a6684b3d --- /dev/null +++ b/.github/workflows/apply-configmap-and-restart.yaml @@ -0,0 +1,97 @@ +# syntax=docker/dockerfile:1.4 +name: Apply configmap and restart + +on: + workflow_dispatch: + inputs: + ref: + description: "Git ref (branch or tag) to check out" + type: string + required: false + default: "master" + environment: + description: "Target environment (production, staging or sandbox)" + type: choice + required: true + default: staging + options: + - staging + - sandbox + - production + +permissions: + id-token: write + contents: read + +env: + AWS_REGION: us-east-1 + EKS_CLUSTER: ce-registry-eks + +jobs: + apply-and-restart: + if: ${{ github.repository_owner == 'CredentialEngine' }} + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ inputs.ref || github.ref }} + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT }}:role/github-oidc-widget + aws-region: ${{ env.AWS_REGION }} + + - name: Install kubectl + uses: azure/setup-kubectl@v4 + with: + version: v1.29.6 + + - name: Update kubeconfig + run: | + aws eks update-kubeconfig --name "${{ env.EKS_CLUSTER }}" --region "${{ env.AWS_REGION }}" + + - name: Apply ConfigMap + working-directory: terraform/environments/eks + run: | + ENV="${{ inputs.environment }}" + case "$ENV" in + production) NS="credreg-prod" && ENV_DIR="k8s-manifests-prod";; + staging) NS="credreg-staging" && ENV_DIR="k8s-manifests-staging" ;; + sandbox) NS="credreg-sandbox" && ENV_DIR="k8s-manifests-sandbox";; + *) echo "Unknown environment: $ENV" >&2; exit 1 ;; + esac + echo "Applying ConfigMap from $ENV_DIR to namespace $NS" + kubectl -n "$NS" apply -f "$ENV_DIR/app-configmap.yaml" + + - name: Restart Deployments + run: | + ENV="${{ inputs.environment }}" + case "$ENV" in + staging) NS="credreg-staging" ;; + sandbox) NS="credreg-sandbox" ;; + production) NS="credreg-prod" ;; + *) echo "Unknown environment: $ENV" >&2; exit 1 ;; + esac + kubectl -n "$NS" rollout restart deploy/worker-app + kubectl -n "$NS" rollout restart deploy/main-app + kubectl -n "$NS" rollout status deploy/worker-app --timeout=10m + kubectl -n "$NS" rollout status deploy/main-app --timeout=10m + + - name: Notify Slack (configmap apply) + if: always() + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + REPO: ${{ github.repository }} + RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} + ENVIRONMENT: ${{ inputs.environment }} + run: | + if [ -z "${SLACK_WEBHOOK_URL}" ]; then + echo "SLACK_WEBHOOK_URL not set; skipping notification"; + exit 0; + fi + STATUS="${{ job.status }}"; EMOJI=✅; [ "$STATUS" = "failure" ] && EMOJI=❌ + MSG="$EMOJI ConfigMap apply ${STATUS} for ${REPO} (env: ${ENVIRONMENT}). ${RUN_URL}" + payload=$(jq -nc --arg text "$MSG" '{text:$text}') + curl -sS -X POST -H 'Content-type: application/json' --data "$payload" "$SLACK_WEBHOOK_URL" || true diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml new file mode 100644 index 00000000..2d2e1518 --- /dev/null +++ b/.github/workflows/build.yaml @@ -0,0 +1,156 @@ +# syntax=docker/dockerfile:1.4 +name: Build and push + +on: + push: + branches: ["eks-infrastructure","staging","main","master","production","sandbox"] + paths-ignore: + - ".github/**" + - "terraform/**" + + workflow_dispatch: + +permissions: + id-token: write + contents: read + +env: + AWS_REGION: us-east-1 + ECR_REPOSITORY: registry + EKS_CLUSTER: ce-registry-eks + +concurrency: + group: eks-cluster-image-build + cancel-in-progress: true + +jobs: + build-and-push: + if: ${{ github.repository_owner == 'CredentialEngine' }} + runs-on: ubuntu-latest + outputs: + image: ${{ steps.img.outputs.image }} + + steps: + - name: Checkout code (with submodules) + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: recursive + + - name: Verify submodules present + run: | + git submodule status + if [ ! -d vendor/grape-middleware-logger ]; then + echo "Submodule vendor/grape-middleware-logger is missing" >&2 + exit 1 + fi + ls -la vendor/grape-middleware-logger | sed -n '1,50p' + + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT }}:role/github-oidc-widget + aws-region: ${{ env.AWS_REGION }} + + - name: Login to Amazon ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Compute image tag (date.build) + id: tag + run: | + DATE_TAG=$(date -u +%Y.%m.%d) + BUILD_NUM=$(printf "%04d" $(( GITHUB_RUN_NUMBER % 10000 )) ) + TAG="$DATE_TAG.$BUILD_NUM" + echo "tag=$TAG" >> "$GITHUB_OUTPUT" + + - name: Compute ref tag (branch name) + id: ref + run: | + REF_TAG=$(echo "${GITHUB_REF_NAME}" | tr '[:upper:]' '[:lower:]' | sed -E 's#[^a-z0-9._-]+#-#g') + echo "ref_tag=$REF_TAG" >> "$GITHUB_OUTPUT" + + - name: Build Docker image (multi-stage) + id: build + uses: docker/build-push-action@v5 + with: + context: . + file: Dockerfile + platforms: linux/amd64 + push: true + tags: | + ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:${{ steps.tag.outputs.tag }} + ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:${{ steps.ref.outputs.ref_tag }} + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Export image URI + id: img + run: | + echo "image=${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:${{ steps.tag.outputs.tag }}" >> "$GITHUB_OUTPUT" + + - name: Notify Slack (build result) + if: always() + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + REPO: ${{ github.repository }} + RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} + BRANCH: ${{ github.ref_name }} + IMAGE_DATE: ${{ steps.tag.outputs.tag }} + IMAGE_BRANCH: ${{ steps.ref.outputs.ref_tag }} + DIGEST: ${{ steps.build.outputs.digest }} + run: | + if [ -z "${SLACK_WEBHOOK_URL}" ]; then + echo "SLACK_WEBHOOK_URL not set; skipping notification"; + exit 0; + fi + STATUS="${{ job.status }}" + EMOJI=✅; [ "$STATUS" = "failure" ] && EMOJI=❌ + DIGEST_TEXT="${DIGEST:-N/A}" + DEPLOY_URL="https://github.com/${{ github.repository }}/actions/workflows/deploy.yaml" + payload=$(jq -n \ + --arg repo "$REPO" \ + --arg branch "$BRANCH" \ + --arg tag_date "$IMAGE_DATE" \ + --arg tag_branch "$IMAGE_BRANCH" \ + --arg digest "$DIGEST_TEXT" \ + --arg run "$RUN_URL" \ + --arg status "$STATUS" \ + --arg emoji "$EMOJI" \ + --arg deploy "$DEPLOY_URL" \ + '{ + text: "\($emoji) Build \($status) for \($repo) (\($branch))", + blocks: [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": "\($emoji) Build \($status) for \($branch)", + "emoji": true + } + }, + { + "type": "section", + "fields": [ + {"type":"mrkdwn", "text": "*Repository:*\n\($repo)"}, + {"type":"mrkdwn", "text": "*Branch:*\n\($branch)"}, + {"type":"mrkdwn", "text": "*Tag (date.build):*\n\($tag_date)"}, + {"type":"mrkdwn", "text": "*Tag (branch):*\n\($tag_branch)"}, + {"type":"mrkdwn", "text": "*Digest:*\n\($digest)"} + ] + }, + { + "type":"section", + "text":{"type":"mrkdwn","text":"<\($run)|View run>"} + }, + { + "type":"section", + "text":{"type":"mrkdwn","text":"Ready to deploy? Launch the workflow: <\($deploy)|Deploy image>"} + } + ] + }') + curl -sS -X POST -H 'Content-type: application/json' --data "$payload" "$SLACK_WEBHOOK_URL" || true diff --git a/.github/workflows/cluster-status.yaml b/.github/workflows/cluster-status.yaml new file mode 100644 index 00000000..e67cfd4e --- /dev/null +++ b/.github/workflows/cluster-status.yaml @@ -0,0 +1,110 @@ +name: Cluster Status + +on: + workflow_dispatch: + +permissions: + id-token: write + contents: read + +env: + AWS_REGION: us-east-1 + EKS_CLUSTER: ce-registry-eks + +jobs: + status: + if: ${{ github.repository_owner == 'CredentialEngine' }} + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT }}:role/github-oidc-widget + aws-region: ${{ env.AWS_REGION }} + + - name: Install kubectl + uses: azure/setup-kubectl@v4 + with: + version: v1.29.6 + + - name: Update kubeconfig + run: | + aws eks update-kubeconfig --name "${{ env.EKS_CLUSTER }}" --region "${{ env.AWS_REGION }}" + + - name: Show nodes summary + run: | + kubectl get nodes -o wide -L env || true + + - name: Show credreg-staging status + run: | + NS=credreg-staging + echo "===== Namespace: $NS =====" + { + echo "# Pods"; + kubectl -n $NS get pods; + echo; + echo "# Deployments"; + kubectl -n $NS get deploy; + echo; + echo "# Images"; + echo -n "main-app image: "; kubectl -n $NS get deploy/main-app -o jsonpath='{.spec.template.spec.containers[?(@.name=="main-app")].image}'; echo; + echo -n "worker-app image: "; kubectl -n $NS get deploy/worker-app -o jsonpath='{.spec.template.spec.containers[?(@.name=="worker")].image}'; echo; + } | tee status-staging.txt + + - name: Show credreg-sandbox status + run: | + NS=credreg-sandbox + echo "===== Namespace: $NS =====" + { + echo "# Pods"; + kubectl -n $NS get pods; + echo; + echo "# Deployments"; + kubectl -n $NS get deploy; + echo; + echo "# Images"; + echo -n "main-app image: "; kubectl -n $NS get deploy/main-app -o jsonpath='{.spec.template.spec.containers[?(@.name=="main-app")].image}'; echo; + echo -n "worker-app image: "; kubectl -n $NS get deploy/worker-app -o jsonpath='{.spec.template.spec.containers[?(@.name=="worker")].image}'; echo; + } | tee status-sandbox.txt + + - name: Show credreg-prod status + run: | + NS=credreg-prod + echo "===== Namespace: $NS =====" + { + echo "# Pods"; + kubectl -n $NS get pods; + echo; + echo "# Deployments"; + kubectl -n $NS get deploy; + } | tee status-prod.txt + + - name: Notify Slack (cluster status) + if: always() + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + REPO: ${{ github.repository }} + RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} + run: | + if [ -z "${SLACK_WEBHOOK_URL}" ]; then + echo "SLACK_WEBHOOK_URL not set; skipping notification"; + exit 0; + fi + STATUS="${{ job.status }}"; EMOJI=✅; [ "$STATUS" = "failure" ] && EMOJI=❌ + MSG="$EMOJI Cluster status job ${STATUS} for ${REPO}. ${RUN_URL}" + STAGING=$(sed -n '1,80p' status-staging.txt 2>/dev/null | sed 's/"/\"/g') + SANDBOX=$(sed -n '1,80p' status-sandbox.txt 2>/dev/null | sed 's/"/\"/g') + PRODUCTION=$(sed -n '1,80p' status-prod.txt 2>/dev/null | sed 's/"/\"/g') + payload=$(jq -nc --arg text "$MSG" --arg staging "$STAGING" --arg sandbox "$SANDBOX" --arg production "$PRODUCTION" '{ + text: $text, + blocks: [ + {type:"section", text:{type:"mrkdwn", text:$text}}, + {type:"section", text:{type:"mrkdwn", text:("*credreg-staging*\n```\n"+$staging+"\n```")}}, + {type:"section", text:{type:"mrkdwn", text:("*credreg-sandbox*\n```\n"+$sandbox+"\n```")}}, + {type:"section", text:{type:"mrkdwn", text:("*credreg-prod*\n```\n"+$production+"\n```")}} + ] + }') + curl -sS -X POST -H 'Content-type: application/json' --data "$payload" "$SLACK_WEBHOOK_URL" || true diff --git a/.github/workflows/deploy-branch-to-env.yaml b/.github/workflows/deploy-branch-to-env.yaml new file mode 100644 index 00000000..c331d253 --- /dev/null +++ b/.github/workflows/deploy-branch-to-env.yaml @@ -0,0 +1,177 @@ +name: Build branch and deploy to env + +on: + workflow_dispatch: + inputs: + branch: + description: "Git ref (branch or tag) to build and deploy" + required: true + environment: + description: "Target environment" + type: choice + required: true + default: staging + options: + - staging + - sandbox + - production + +permissions: + id-token: write + contents: read + +env: + AWS_REGION: us-east-1 + ECR_REPOSITORY: registry + EKS_CLUSTER: ce-registry-eks + +jobs: + build-and-deploy: + if: ${{ github.repository_owner == 'CredentialEngine' }} + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ inputs.branch }} + fetch-depth: 0 + submodules: recursive + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT }}:role/github-oidc-widget + aws-region: ${{ env.AWS_REGION }} + + - name: Login to Amazon ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Compute image tags + id: tags + run: | + REF_TAG=$(echo "${{ inputs.branch }}" | sed 's#refs/heads/##' | tr '[:upper:]' '[:lower:]' | sed -E 's#[^a-z0-9._-]+#-#g') + DATE_TAG=$(date -u +%Y.%m.%d).$(printf "%04d" $(( GITHUB_RUN_NUMBER % 10000 )) ) + echo "ref_tag=$REF_TAG" >> "$GITHUB_OUTPUT" + echo "date_tag=$DATE_TAG" >> "$GITHUB_OUTPUT" + + - name: Build and push image + uses: docker/build-push-action@v5 + with: + context: . + file: Dockerfile + platforms: linux/amd64 + push: true + tags: | + ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:${{ steps.tags.outputs.ref_tag }} + ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:${{ steps.tags.outputs.date_tag }} + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Install kubectl + uses: azure/setup-kubectl@v4 + with: + version: v1.29.6 + + - name: Update kubeconfig + run: | + aws eks update-kubeconfig --name "${{ env.EKS_CLUSTER }}" --region "${{ env.AWS_REGION }}" + + - name: Deploy to selected environment (set images) + env: + IMAGE: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:${{ steps.tags.outputs.ref_tag }} + run: | + ENV="${{ inputs.environment }}" + case "$ENV" in + staging) NS="credreg-staging" ; ENV_DIR="k8s-manifests-staging" ;; + sandbox) NS="credreg-sandbox" ; ENV_DIR="k8s-manifests-sandbox" ;; + production) NS="credreg-prod" ; ENV_DIR="k8s-manifests-prod" ;; + *) echo "Unknown environment: $ENV" >&2; exit 1 ;; + esac + echo "Updating deployments in $NS to image $IMAGE" + kubectl -n "$NS" set image deploy/main-app main-app="$IMAGE" + kubectl -n "$NS" set image deploy/worker-app worker="$IMAGE" + kubectl -n "$NS" rollout status deploy/main-app --timeout=10m + kubectl -n "$NS" rollout status deploy/worker-app --timeout=10m + + echo "Applying ConfigMap for $ENV" + pushd terraform/environments/eks > /dev/null + kubectl -n "$NS" apply -f "$ENV_DIR/app-configmap.yaml" + popd > /dev/null + + echo "Restarting deployments to pick up config changes" + kubectl -n "$NS" rollout restart deploy/worker-app + kubectl -n "$NS" rollout restart deploy/main-app + kubectl -n "$NS" rollout status deploy/worker-app --timeout=10m + kubectl -n "$NS" rollout status deploy/main-app --timeout=10m + + - name: Run DB migrations + env: + IMAGE: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:${{ steps.tags.outputs.ref_tag }} + run: | + set -euo pipefail + ENV="${{ inputs.environment }}" + case "$ENV" in + staging) + NS="credreg-staging" + MANIFEST="terraform/environments/eks/k8s-manifests-staging/db-migrate-job.yaml" + ;; + sandbox) + NS="credreg-sandbox" + MANIFEST="terraform/environments/eks/k8s-manifests-sandbox/db-migrate-job.yaml" + ;; + production) + NS="credreg-prod" + MANIFEST="terraform/environments/eks/k8s-manifests-prod/db-migrate-job.yaml" + ;; + *) + echo "Unknown environment: $ENV" >&2 + exit 1 + ;; + esac + + if [ ! -f "$MANIFEST" ]; then + echo "Migration manifest $MANIFEST not found; skipping" + exit 0 + fi + + IMAGE_VALUE="${IMAGE:-}" + if [ -z "$IMAGE_VALUE" ]; then + echo "IMAGE env not set; reading current image from deploy/main-app" + IMAGE_VALUE=$(kubectl -n "$NS" get deploy/main-app -o jsonpath='{.spec.template.spec.containers[?(@.name=="main-app")].image}') + fi + if [ -z "$IMAGE_VALUE" ]; then + echo "Unable to determine image for migrations" >&2 + exit 1 + fi + + echo "Launching DB migration job in $NS with image $IMAGE_VALUE" + JOB_NAME=$( + sed -e "s#namespace: .*#namespace: $NS#" \ + -e "s#image: .*#image: $IMAGE_VALUE#" "$MANIFEST" | + kubectl -n "$NS" create -f - -o name | sed 's|job.batch/||' + ) + kubectl -n "$NS" wait --for=condition=complete "job/$JOB_NAME" --timeout=10m + kubectl -n "$NS" logs -f "job/$JOB_NAME" --all-containers=true + + - name: Notify Slack (deploy) + if: always() + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + REPO: ${{ github.repository }} + RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} + ENVIRONMENT: ${{ inputs.environment }} + BRANCH: ${{ inputs.branch }} + IMAGE: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:${{ steps.tags.outputs.ref_tag }} + run: | + if [ -z "${SLACK_WEBHOOK_URL}" ]; then + echo "SLACK_WEBHOOK_URL not set; skipping notification"; + exit 0; + fi + STATUS="${{ job.status }}"; EMOJI=✅; [ "$STATUS" = "failure" ] && EMOJI=❌ + MSG="$EMOJI Deploy ${STATUS} for ${REPO} (env: ${ENVIRONMENT}, branch: ${BRANCH}). Image: ${IMAGE}. ${RUN_URL}" + payload=$(jq -nc --arg text "$MSG" '{text:$text}') + curl -sS -X POST -H 'Content-type: application/json' --data "$payload" "$SLACK_WEBHOOK_URL" || true diff --git a/.github/workflows/deploy-feature-to-staging.yaml b/.github/workflows/deploy-feature-to-staging.yaml new file mode 100644 index 00000000..2412f966 --- /dev/null +++ b/.github/workflows/deploy-feature-to-staging.yaml @@ -0,0 +1,134 @@ +name: Deploy feature branch to staging environment + +on: + workflow_dispatch: + inputs: + branch: + description: "Git ref (branch or tag) to build and deploy" + required: true + +permissions: + id-token: write + contents: read + +env: + AWS_REGION: us-east-1 + ECR_REPOSITORY: registry + EKS_CLUSTER: ce-registry-eks + +jobs: + build-and-deploy: + if: ${{ github.repository_owner == 'CredentialEngine' }} + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ inputs.branch }} + fetch-depth: 0 + submodules: recursive + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT }}:role/github-oidc-widget + aws-region: ${{ env.AWS_REGION }} + + - name: Login to Amazon ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Compute image tags + id: tags + run: | + REF_TAG=$(echo "${{ inputs.branch }}" | sed 's#refs/heads/##' | tr '[:upper:]' '[:lower:]' | sed -E 's#[^a-z0-9._-]+#-#g') + DATE_TAG=$(date -u +%Y.%m.%d).$(printf "%04d" $(( GITHUB_RUN_NUMBER % 10000 )) ) + echo "ref_tag=$REF_TAG" >> "$GITHUB_OUTPUT" + echo "date_tag=$DATE_TAG" >> "$GITHUB_OUTPUT" + + - name: Build and push image + uses: docker/build-push-action@v5 + with: + context: . + file: Dockerfile + platforms: linux/amd64 + push: true + tags: | + ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:${{ steps.tags.outputs.ref_tag }} + ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:${{ steps.tags.outputs.date_tag }} + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Install kubectl + uses: azure/setup-kubectl@v4 + with: + version: v1.29.6 + + - name: Update kubeconfig + run: | + aws eks update-kubeconfig --name "${{ env.EKS_CLUSTER }}" --region "${{ env.AWS_REGION }}" + + - name: Deploy to credreg-staging (set images) + env: + IMAGE: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:${{ steps.tags.outputs.ref_tag }} + run: | + NS=credreg-staging + echo "Updating deployments in $NS to image $IMAGE" + kubectl -n "$NS" set image deploy/main-app main-app="$IMAGE" + kubectl -n "$NS" set image deploy/worker-app worker="$IMAGE" + kubectl -n "$NS" rollout status deploy/main-app --timeout=10m + kubectl -n "$NS" rollout status deploy/worker-app --timeout=10m + + - name: Run DB migrations (staging) + env: + IMAGE: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:${{ steps.tags.outputs.ref_tag }} + run: | + set -euo pipefail + NS="credreg-staging" + MANIFEST="terraform/environments/eks/k8s-manifests-staging/db-migrate-job.yaml" + + if [ ! -f "$MANIFEST" ]; then + echo "Migration manifest $MANIFEST not found; skipping" + exit 0 + fi + + IMAGE_VALUE="${IMAGE:-}" + if [ -z "$IMAGE_VALUE" ]; then + echo "IMAGE env not set; reading current image from deploy/main-app" + IMAGE_VALUE=$(kubectl -n "$NS" get deploy/main-app -o jsonpath='{.spec.template.spec.containers[?(@.name=="main-app")].image}') + fi + if [ -z "$IMAGE_VALUE" ]; then + echo "Unable to determine image for migrations" >&2 + exit 1 + fi + + echo "Launching DB migration job in $NS with image $IMAGE_VALUE" + JOB_NAME=$( + sed -e "s#namespace: .*#namespace: $NS#" \ + -e "s#image: .*#image: $IMAGE_VALUE#" "$MANIFEST" | + kubectl -n "$NS" create -f - -o name | sed 's|job.batch/||' + ) + kubectl -n "$NS" wait --for=condition=complete "job/$JOB_NAME" --timeout=10m + kubectl -n "$NS" logs -f "job/$JOB_NAME" --all-containers=true + + - name: Notify Slack (deploy) + if: always() + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + REPO: ${{ github.repository }} + RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} + ENVIRONMENT: staging + BRANCH: ${{ inputs.branch }} + IMAGE: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:${{ steps.tags.outputs.ref_tag }} + run: | + if [ -z "${SLACK_WEBHOOK_URL}" ]; then + echo "SLACK_WEBHOOK_URL not set; skipping notification"; + exit 0; + fi + STATUS="${{ job.status }}"; EMOJI=✅; [ "$STATUS" = "failure" ] && EMOJI=❌ + MSG="$EMOJI Deploy ${STATUS} for ${REPO} (env: ${ENVIRONMENT}, branch: ${BRANCH}). Image: ${IMAGE}. ${RUN_URL}" + payload=$(jq -nc --arg text "$MSG" '{text:$text}') + curl -sS -X POST -H 'Content-type: application/json' --data "$payload" "$SLACK_WEBHOOK_URL" || true diff --git a/.github/workflows/deploy.yaml b/.github/workflows/deploy.yaml new file mode 100644 index 00000000..83bbd098 --- /dev/null +++ b/.github/workflows/deploy.yaml @@ -0,0 +1,140 @@ +name: Deploy image + +on: + workflow_dispatch: + inputs: + image_label: + description: "Image label/tag to deploy (e.g. staging, production, sandbox, 2025.11.05.0001 or branch tag)" + type: string + required: true + environment: + description: "Target environment" + type: choice + required: true + options: + - staging + - sandbox + - production + +permissions: + id-token: write + contents: read + +env: + AWS_REGION: us-east-1 + EKS_CLUSTER: ce-registry-eks + ECR_URI: 996810415034.dkr.ecr.us-east-1.amazonaws.com/registry + +jobs: + deploy: + if: ${{ github.repository_owner == 'CredentialEngine' }} + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT }}:role/github-oidc-widget + aws-region: ${{ env.AWS_REGION }} + + - name: Install kubectl + uses: azure/setup-kubectl@v4 + with: + version: v1.29.6 + + - name: Update kubeconfig + run: | + aws eks update-kubeconfig --name "${{ env.EKS_CLUSTER }}" --region "${{ env.AWS_REGION }}" + + - name: Deploy image to selected environment + env: + IMAGE: ${{ env.ECR_URI }}:${{ inputs.image_label }} + run: | + ENV="${{ inputs.environment }}" + case "$ENV" in + staging) NS="credreg-staging" ;; + sandbox) NS="credreg-sandbox" ;; + production) NS="credreg-prod" ;; + *) echo "Unknown environment: $ENV" >&2; exit 1 ;; + esac + echo "Deploying image $IMAGE to namespace $NS" + kubectl -n "$NS" set image deploy/main-app main-app="$IMAGE" + kubectl -n "$NS" set image deploy/worker-app worker="$IMAGE" + kubectl -n "$NS" rollout status deploy/main-app --timeout=10m + kubectl -n "$NS" rollout status deploy/worker-app --timeout=10m + + echo "Forcing rollout restart to ensure pods pick up latest image/config" + kubectl -n "$NS" rollout restart deploy/worker-app + kubectl -n "$NS" rollout restart deploy/main-app + kubectl -n "$NS" rollout status deploy/worker-app --timeout=10m + kubectl -n "$NS" rollout status deploy/main-app --timeout=10m + + + - name: Run DB migrations + env: + IMAGE: ${{ inputs.image }} + run: | + set -euo pipefail + ENV="${{ inputs.environment }}" + case "$ENV" in + staging) + NS="credreg-staging" + MANIFEST="terraform/environments/eks/k8s-manifests-staging/db-migrate-job.yaml" + ;; + sandbox) + NS="credreg-sandbox" + MANIFEST="terraform/environments/eks/k8s-manifests-sandbox/db-migrate-job.yaml" + ;; + production) + NS="credreg-prod" + MANIFEST="terraform/environments/eks/k8s-manifests-prod/db-migrate-job.yaml" + ;; + *) + echo "Unknown environment: $ENV" >&2 + exit 1 + ;; + esac + + if [ ! -f "$MANIFEST" ]; then + echo "Migration manifest $MANIFEST not found; skipping" + exit 0 + fi + + IMAGE_VALUE="${IMAGE:-}" + if [ -z "$IMAGE_VALUE" ]; then + echo "IMAGE env not set; reading current image from deploy/main-app" + IMAGE_VALUE=$(kubectl -n "$NS" get deploy/main-app -o jsonpath='{.spec.template.spec.containers[?(@.name=="main-app")].image}') + fi + if [ -z "$IMAGE_VALUE" ]; then + echo "Unable to determine image for migrations" >&2 + exit 1 + fi + + echo "Launching DB migration job in $NS with image $IMAGE_VALUE" + JOB_NAME=$( + sed -e "s#namespace: .*#namespace: $NS#" \ + -e "s#image: .*#image: $IMAGE_VALUE#" "$MANIFEST" | + kubectl -n "$NS" create -f - -o name | sed 's|job.batch/||' + ) + kubectl -n "$NS" wait --for=condition=complete "job/$JOB_NAME" --timeout=10m + kubectl -n "$NS" logs -f "job/$JOB_NAME" --all-containers=true + + - name: Notify Slack (deploy) + if: always() + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + REPO: ${{ github.repository }} + RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} + ENVIRONMENT: ${{ inputs.environment }} + IMAGE: ${{ env.ECR_URI }}:${{ inputs.image_label }} + run: | + if [ -z "${SLACK_WEBHOOK_URL}" ]; then + echo "SLACK_WEBHOOK_URL not set; skipping notification"; + exit 0; + fi + STATUS="${{ job.status }}"; EMOJI=✅; [ "$STATUS" = "failure" ] && EMOJI=❌ + MSG="$EMOJI Deploy ${STATUS} for ${REPO} (env: ${ENVIRONMENT}). Image: ${IMAGE}. ${RUN_URL}" + payload=$(jq -nc --arg text "$MSG" '{text:$text}') + curl -sS -X POST -H 'Content-type: application/json' --data "$payload" "$SLACK_WEBHOOK_URL" || true diff --git a/.github/workflows/sast.yaml b/.github/workflows/sast.yaml new file mode 100644 index 00000000..110945fb --- /dev/null +++ b/.github/workflows/sast.yaml @@ -0,0 +1,54 @@ +name: Semgrep SAST + +on: + pull_request: + workflow_dispatch: + +permissions: + contents: read + +jobs: + semgrep: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Run Semgrep (p/default on app, lib, config) + run: | + docker run --rm \ + -v "$PWD:/src" \ + returntocorp/semgrep \ + semgrep scan \ + --config p/default \ + --include app \ + --include lib \ + --include config \ + --json -o semgrep.json \ + --error || true + # Print a short summary to logs + jq -r '"Semgrep findings: \(.results|length)"' semgrep.json || true + + - name: Upload Semgrep report + uses: actions/upload-artifact@v4 + with: + name: semgrep-report + path: semgrep.json + + - name: Export Semgrep SARIF + if: always() + run: | + docker run --rm \ + -v "$PWD:/src" \ + returntocorp/semgrep \ + semgrep scan \ + --config p/r2c-security-audit \ + --include app \ + --include lib \ + --sarif -o semgrep.sarif || true + + - name: Upload Semgrep SARIF + if: always() + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: semgrep.sarif diff --git a/.github/workflows/skooner-token.yaml b/.github/workflows/skooner-token.yaml new file mode 100644 index 00000000..920a08fa --- /dev/null +++ b/.github/workflows/skooner-token.yaml @@ -0,0 +1,186 @@ +name: Generate Skooner token + +on: + workflow_dispatch: + inputs: + duration: + description: "Token duration" + type: choice + required: true + default: 2h + options: + - 30m + - 2h + - 6h + whitelist_source_range: + description: "Optional NGINX whitelist-source-range (CIDR(s), comma-separated)" + type: string + required: false + role: + description: "Access level for skooner-sa" + type: choice + required: true + default: view + options: + - view + - developer + - cluster-admin + +permissions: + id-token: write + contents: read + +env: + AWS_REGION: us-east-1 + EKS_CLUSTER: ce-registry-eks + SA_NAMESPACE: kube-system + SA_NAME: skooner-sa + +jobs: + token: + if: ${{ github.repository_owner == 'CredentialEngine' }} + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT }}:role/github-oidc-widget + aws-region: ${{ env.AWS_REGION }} + + - name: Install kubectl + uses: azure/setup-kubectl@v4 + with: + version: v1.29.6 + + - name: Update kubeconfig + run: | + aws eks update-kubeconfig --name "${{ env.EKS_CLUSTER }}" --region "${{ env.AWS_REGION }}" + + - name: Ensure RBAC for skooner-sa + id: rbac + env: + ROLE: ${{ inputs.role }} + run: | + NS="${SA_NAMESPACE}" + SA="${SA_NAME}" + echo "Requested role: ${ROLE}" + if [ "${ROLE}" = "cluster-admin" ]; then + kubectl create clusterrolebinding "${SA}" \ + --clusterrole="cluster-admin" \ + --serviceaccount="${NS}:${SA}" \ + --dry-run=client -o yaml | kubectl apply --validate=false -f - + echo "binding_kind=ClusterRoleBinding" >> "$GITHUB_OUTPUT" + elif [ "${ROLE}" = "developer" ]; then + # Cluster-wide read-only via view + kubectl create clusterrolebinding "${SA}-developer-view" \ + --clusterrole="view" \ + --serviceaccount="${NS}:${SA}" \ + --dry-run=client -o yaml | kubectl apply --validate=false -f - + # Namespaced edit in sandbox and staging + for TNS in credreg-sandbox credreg-staging; do + kubectl -n "$TNS" create rolebinding "${SA}-developer-edit" \ + --clusterrole="edit" \ + --serviceaccount="${NS}:${SA}" \ + --dry-run=client -o yaml | kubectl apply --validate=false -f - + done + # Ensure no lingering admin CRB remains + kubectl delete clusterrolebinding "${SA}" --ignore-not-found + echo "binding_kind=developer" >> "$GITHUB_OUTPUT" + echo "binding_namespace=cluster-wide,credreg-staging,credreg-sandbox" >> "$GITHUB_OUTPUT" + else + # Ensure no lingering admin CRB remains + kubectl delete clusterrolebinding "${SA}" --ignore-not-found + # Remove developer bindings if present + kubectl delete clusterrolebinding "${SA}-developer-view" --ignore-not-found || true + for TNS in credreg-sandbox credreg-staging; do + kubectl -n "$TNS" delete rolebinding "${SA}-developer-edit" --ignore-not-found || true + done + echo "binding_kind=view-only" >> "$GITHUB_OUTPUT" + echo "binding_namespace=-" >> "$GITHUB_OUTPUT" + fi + echo "binding_namespace=${NS}" >> "$GITHUB_OUTPUT" + + - name: Update Skooner Ingress whitelist (optional) + if: ${{ inputs.whitelist_source_range != '' }} + env: + WHITELIST: ${{ inputs.whitelist_source_range }} + run: | + echo "Setting whitelist-source-range to: ${WHITELIST}" + kubectl -n kube-system annotate ingress skooner-ingress \ + nginx.ingress.kubernetes.io/whitelist-source-range="${WHITELIST}" --overwrite + + - name: Create Skooner token + id: tok + env: + DURATION: ${{ inputs.duration }} + SA_NAMESPACE: ${{ env.SA_NAMESPACE }} + SA_NAME: ${{ env.SA_NAME }} + run: | + SA_NS="${SA_NAMESPACE:-kube-system}" + echo "Creating token for SA ${SA_NS}/${SA_NAME} (duration ${DURATION})" + TOKEN=$(kubectl -n "${SA_NS}" create token "${SA_NAME}" --duration="${DURATION}") + # Do not echo token to logs; only set as output + echo "token=${TOKEN}" >> "$GITHUB_OUTPUT" + echo "sa_ns=${SA_NS}" >> "$GITHUB_OUTPUT" + + - name: Send token to Slack + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + REPO: ${{ github.repository }} + RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} + ACTOR: ${{ github.actor }} + DURATION: ${{ inputs.duration }} + ROLE: ${{ inputs.role }} + WHITELIST: ${{ inputs.whitelist_source_range }} + BINDING_KIND: ${{ steps.rbac.outputs.binding_kind }} + BINDING_NS: ${{ steps.rbac.outputs.binding_namespace }} + SA_NAME: ${{ env.SA_NAME }} + SA_NS: ${{ steps.tok.outputs.sa_ns }} + TOKEN: ${{ steps.tok.outputs.token }} + run: | + if [ -z "${SLACK_WEBHOOK_URL}" ]; then + echo "SLACK_WEBHOOK_URL not set; skipping notification"; + exit 0; + fi + if [ -z "${ROLE}" ]; then ROLE="cluster-admin"; fi + if [ -z "${SA_NAME}" ]; then SA_NAME="skooner-sa"; fi + # Build Slack blocks payload with code block for the token (jq string interpolation) + payload=$(jq -nc \ + --arg repo "$REPO" \ + --arg run "$RUN_URL" \ + --arg actor "$ACTOR" \ + --arg duration "$DURATION" \ + --arg role "$ROLE" \ + --arg ns "$BINDING_NS" \ + --arg wl "${WHITELIST:-}" \ + --arg token "$TOKEN" \ + ' + { + text: "Skooner token generated", + blocks: [ + { "type": "header", "text": { "type": "plain_text", "text": "Skooner token" } }, + { "type": "section", "fields": [ + {"type":"mrkdwn", "text": "*Requested by:*\n\($actor)"}, + {"type":"mrkdwn", "text": "*Duration:*\n\($duration)"}, + {"type":"mrkdwn", "text": "*Role:*\n\($role)"}, + {"type":"mrkdwn", "text": "*Namespace:*\n\($ns)"}, + ] + }, + { "type": "section", "fields": [ + {"type":"mrkdwn", "text": "*Whitelist:*\n\($wl)"} + ] + }, + { "type": "section", "text": { "type": "mrkdwn", "text": "*Token:*\n```\($token)```" } }, + { "type": "context", "elements": [ + {"type":"mrkdwn", "text": "<\($run)|View run>"}, + {"type":"mrkdwn", "text": $repo}, + {"type":"mrkdwn", "text": ""} + ] + } + ] + } + ') + curl -sS -X POST -H 'Content-type: application/json' --data "$payload" "$SLACK_WEBHOOK_URL" || true diff --git a/.github/workflows/terraform.yaml b/.github/workflows/terraform.yaml new file mode 100644 index 00000000..62426787 --- /dev/null +++ b/.github/workflows/terraform.yaml @@ -0,0 +1,165 @@ +name: Terraform CI + +on: + pull_request: + paths: + - 'terraform/**' + workflow_dispatch: + inputs: + action: + description: 'Choose action: plan or apply' + required: true + default: 'plan' + type: choice + options: + - plan + - apply + +permissions: + id-token: write + contents: read + +env: + AWS_REGION: us-east-1 + +jobs: + fmt-validate: + name: Terraform fmt & validate + runs-on: ubuntu-latest + defaults: + run: + working-directory: terraform/environments/eks + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: recursive + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: 1.8.5 + + - name: Terraform fmt + run: terraform fmt -check -recursive + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT }}:role/github-oidc-widget + aws-region: ${{ env.AWS_REGION }} + + - name: Terraform init + run: terraform init -input=false + + - name: Terraform validate + run: terraform validate + + plan: + name: Terraform plan + needs: fmt-validate + runs-on: ubuntu-latest + if: github.event_name == 'pull_request' || github.event.inputs.action == 'plan' + defaults: + run: + working-directory: terraform/environments/eks + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: recursive + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: 1.8.5 + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT }}:role/github-oidc-widget + aws-region: ${{ env.AWS_REGION }} + + - name: Terraform init + run: terraform init -input=false + + - name: Terraform plan + id: plan + run: | + terraform plan -input=false -no-color -out=tfplan + terraform show -no-color tfplan > plan.txt + + - name: Upload plan artifact + uses: actions/upload-artifact@v4 + with: + name: terraform-plan + path: terraform/environments/eks/plan.txt + + # - name: Notify Slack on drift + # if: always() + # env: + # SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + # REPO: ${{ github.repository }} + # RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} + # BRANCH: ${{ github.ref_name }} + # run: | + # # Skip if webhook not configured + # if [ -z "${SLACK_WEBHOOK_URL}" ]; then + # echo "SLACK_WEBHOOK_URL not set; skipping notification"; + # exit 0; + # fi + # # If any non-zero adds/changes/destroys are present, notify + # if grep -Eq '([1-9][0-9]* to add|[1-9][0-9]* to change|[1-9][0-9]* to destroy)' plan.txt; then + # summary=$(grep -Eo '[0-9]+ to add, [0-9]+ to change, [0-9]+ to destroy' plan.txt | head -n1) + # snippet=$(sed -n '1,60p' plan.txt | sed 's/"/\"/g') + # msg="Terraform drift detected in ${REPO} (${BRANCH}) — ${summary}.\nReview and run Apply if approved. ${RUN_URL}" + # payload=$(jq -nc --arg text "$msg" --arg snippet "$snippet" '{ + # text: $text, + # blocks: [ + # {type:"section", text:{type:"mrkdwn", text:$text}}, + # {type:"section", text:{type:"mrkdwn", text:("```\n"+$snippet+"\n```")}} + # ] + # }') + # curl -sS -X POST -H 'Content-type: application/json' --data "$payload" "$SLACK_WEBHOOK_URL" || true + # else + # echo "No drift detected; no Slack notification"; + # fi + + apply: + name: Terraform apply + needs: plan + runs-on: ubuntu-latest + if: github.event_name == 'workflow_dispatch' && github.event.inputs.action == 'apply' + environment: + name: terraform-apply + defaults: + run: + working-directory: terraform/environments/eks + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: recursive + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: 1.8.5 + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT }}:role/github-oidc-widget + aws-region: ${{ env.AWS_REGION }} + + - name: Terraform init + run: terraform init -input=false + + - name: Terraform plan (for apply) + run: terraform plan -input=false -no-color -out=tfplan + + - name: Terraform apply + run: terraform apply -input=false -auto-approve tfplan diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 79fda324..6f8a7623 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -41,9 +41,25 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: ruby/setup-ruby@v1.256.0 + submodules: recursive + + + - name: Pre-cache grape-middleware-logger gem + run: | + mkdir -p vendor/cache + if [ -f local_packages/grape-middleware-logger-2.4.0.gem ]; then + cp -v local_packages/grape-middleware-logger-2.4.0.gem vendor/cache/ + fi + + - uses: ruby/setup-ruby@v1 with: - bundler-cache: true + bundler-cache: false + ruby-version: '3.4' + - name: Install gems (non-frozen) + run: | + bundle config set path vendor/bundle + bundle config set frozen false + bundle install --jobs 4 - run: RACK_ENV=test bundle exec rake db:migrate # Rubocop, bundler-audit, etc. are executed through Overcommit hooks. @@ -62,40 +78,8 @@ jobs: SONAR_HOST_URL: ${{ vars.SONAR_HOST_URL }} - name: Upload coverage report - if: always() + if: ${{ always() && hashFiles('coverage/**') != '' }} uses: actions/upload-artifact@v4 with: name: coverage-report - path: coverage - - semgrep: - name: "Semgrep SAST" - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: '3.x' - - name: Install Semgrep - run: | - python3 -m pip install --upgrade pip - python3 -m pip install semgrep - - name: Run Semgrep (Ruby/JS) - run: | - semgrep --config p/r2c-security-audit \ - --include app --include lib \ - --error --timeout 180 - - name: Export Semgrep SARIF - if: always() - run: | - semgrep --config p/r2c-security-audit \ - --include app --include lib \ - --sarif -o semgrep.sarif || true - - name: Upload Semgrep SARIF - if: always() - uses: github/codeql-action/upload-sarif@v3 - with: - sarif_file: semgrep.sarif + path: coverage/ diff --git a/.gitignore b/.gitignore index 46916250..3b99f763 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -*.gem + *.rbc *.pid /.config diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..25b7e7e4 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "vendor/grape-middleware-logger"] + path = vendor/grape-middleware-logger + url = https://github.com/soverin/grape-middleware-logger.git diff --git a/Dockerfile b/Dockerfile index fd868efc..fdee2679 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,46 +1,110 @@ - -# Use Red Hat Universal Base Image 8 -FROM registry.access.redhat.com/ubi8:8.10-1752733233 +# Build stage (UBI 10 minimal) +FROM registry.access.redhat.com/ubi10/ubi-minimal:10.0-1758185635 AS builder ARG PLAT=x86_64 +ARG RUBY_VERSION=3.4.7 ENV APP_PATH=/app/ ENV LANGUAGE=en_US:en ENV LANG=C.UTF-8 ENV LC_ALL=C.UTF-8 -ARG RUBY_VERSION=3.4.3 ENV BUNDLE_PATH=/app/vendor/bundle ENV PLAT=$PLAT ENV RUBY_VERSION=$RUBY_VERSION -ENV PG_REPO=https://download.postgresql.org/pub/repos/yum -ENV RPMFIND_REPO=https://rpmfind.net/linux/almalinux/8.10 -ENV PATH="/usr/local/rvm/gems/ruby-${RUBY_VERSION}@global/bin:/usr/local/rvm/rubies/ruby-${RUBY_VERSION}/bin:$PATH" -ENV GEM_HOME='/usr/local/rvm/gems/ruby-${RUBY_VERSION}@global' -ENV GEM_PATH='/usr/local/rvm/gems/ruby-${RUBY_VERSION}@global' -ENV MY_RUBY_HOME='/usr/local/rvm/rubies/ruby-${RUBY_VERSION}' -ENV IRBRC='/usr/local/rvm/rubies/ruby-${RUBY_VERSION}/.irbrc' +ENV PATH="/usr/local/bin:$PATH" WORKDIR $APP_PATH -# Install necessary tools and deps +# Install build tools and runtime libs in builder +RUN set -eux; \ + microdnf -y update; \ + microdnf -y install --setopt=install_weak_deps=0 --setopt=tsflags=nodocs \ + git gcc-c++ make which tar bzip2 \ + curl gnupg2 \ + autoconf automake patch \ + unzip zip \ + m4 \ + openssl openssl-devel \ + zlib zlib-devel \ + libyaml libyaml-devel \ + libffi libffi-devel \ + ncurses ncurses-devel \ + findutils diffutils procps-ng \ + ca-certificates \ + libpq libpq-devel \ + krb5-libs \ + openldap \ + cyrus-sasl-lib \ + keyutils-libs \ + libevent \ + lz4-libs \ + tzdata \ + sqlite sqlite-devel \ + libxml2 libxml2-devel \ + libxslt libxslt-devel \ + pkgconf-pkg-config \ + && microdnf clean all + +# Install PostgreSQL 17 client from PGDG and expose binaries on PATH +RUN set -eux; \ + curl -fsSL https://download.postgresql.org/pub/repos/yum/reporpms/EL-10-x86_64/pgdg-redhat-repo-latest.noarch.rpm -o /tmp/pgdg.rpm; \ + rpm -Uvh /tmp/pgdg.rpm; \ + microdnf -y module disable postgresql || true; \ + microdnf -y install --setopt=install_weak_deps=0 --setopt=tsflags=nodocs postgresql17; \ + ln -sf /usr/pgsql-17/bin/psql /usr/bin/psql; \ + ln -sf /usr/pgsql-17/bin/pg_dump /usr/bin/pg_dump; \ + ln -sf /usr/pgsql-17/bin/pg_restore /usr/bin/pg_restore; \ + microdnf clean all -# Copy all pre-built RPMs from repository directory to a temporary location in -# the image so the repository root stays clean and the image layers remain tidy. +# Install local RPMs shipped in repo (EL10 builds) COPY rpms/ /tmp/rpms/ -RUN dnf -y install libpq.${PLAT} libpq-devel.${PLAT} dnf-plugins-core git gcc-c++ make openssl-devel \ - diffutils procps-ng zlib-devel which tar bzip2 libyaml-devel /tmp/rpms/*.rpm \ - # Install the PostgreSQL repository - ${PG_REPO}/reporpms/EL-8-${PLAT}/pgdg-redhat-repo-latest.noarch.rpm &&\ - # Install PostgreSQL - dnf -y install postgresql16 && dnf clean all && \ - # Install Ruby RVM - curl --proto "=https" --tlsv1.2 -sSf -L https://rvm.io/pkuczynski.asc | gpg2 --import - && \ - curl --proto "=https" --tlsv1.2 -sSf -L https://get.rvm.io | bash -s stable && \ - /usr/local/rvm/bin/rvm install ${RUBY_VERSION} && \ - # Cleanup temporary RPMs to keep image size small - rm -rf /tmp/rpms +RUN if ls /tmp/rpms/*.rpm >/dev/null 2>&1; then rpm -Uvh --nosignature /tmp/rpms/*.rpm; fi + +# Build and install Ruby from source (no RVM) +RUN set -eux; \ + curl -fsSL https://cache.ruby-lang.org/pub/ruby/${RUBY_VERSION%.*}/ruby-${RUBY_VERSION}.tar.gz -o /tmp/ruby.tar.gz; \ + mkdir -p /tmp/ruby-src; tar -xzf /tmp/ruby.tar.gz -C /tmp/ruby-src --strip-components=1; \ + cd /tmp/ruby-src; \ + ./configure --disable-install-doc --with-openssl-dir=/usr; \ + make -j"$(nproc)" && make install; \ + rm -rf /tmp/ruby-src /tmp/ruby.tar.gz; COPY Gemfile Gemfile.lock .ruby-version $APP_PATH -RUN gem install bundler && bundle config set deployment true && DOCKER_ENV=true RACK_ENV=production bundle install + +# Ensure path-based gems from submodules are available to Bundler +# Copy the grape-middleware-logger submodule before bundle install +COPY vendor/grape-middleware-logger $APP_PATH/vendor/grape-middleware-logger + +# Some gemspecs use `git ls-files`; submodule `.git` files reference parent repo +# which is not present in the image. Reinitialize as a standalone git repo. +RUN set -eux; \ + if [ -d "$APP_PATH/vendor/grape-middleware-logger" ]; then \ + cd "$APP_PATH/vendor/grape-middleware-logger"; \ + # If .git is a file (submodule link), remove it and init a new repo + if [ -e .git ] && [ ! -d .git ]; then rm -f .git; fi; \ + git init -q; \ + git add -A || true; \ + git -c user.email=builder@example -c user.name=builder commit -q -m "vendored submodule snapshot" || true; \ + fi + +RUN mkdir -p ./vendor && \ + mkdir -p ./vendor/cache +COPY local_packages/grape-middleware-logger-2.4.0.gem ./vendor/cache/ + +# Install the EXACT bundler version from Gemfile.lock (“BUNDLED WITH”) +RUN set -eux; \ + gem install bundler --no-document + +# Deployment settings (allows network, but stays frozen to the lockfile) +# RUN gem install bundler +RUN bundle config set path /app/vendor/cache \ + && bundle config set without 'development test' +RUN bundle install --verbose + +RUN bundle config set deployment true + +# Optional Install root certificates. + +# Copy application sources COPY app/ $APP_PATH/app COPY bin/ $APP_PATH/bin COPY config/ $APP_PATH/config @@ -52,11 +116,135 @@ COPY public/ $APP_PATH/public COPY config.ru $APP_PATH COPY Rakefile $APP_PATH -COPY docker-entrypoint.sh /usr/bin/ -RUN chmod +x /usr/bin/docker-entrypoint.sh && useradd -m registry && chown -R registry:registry /app -USER registry +COPY docker-entrypoint.sh /tmp/docker-entrypoint.sh + +# Collect runtime artifacts to a staging dir +RUN mkdir -p /runtime/usr/local /runtime/etc /runtime/usr/bin /runtime/usr/lib64 && \ + # Ruby runtime from /usr/local + mkdir -p /runtime/usr/local/bin /runtime/usr/local/lib && \ + cp -a /usr/local/bin/ruby /runtime/usr/local/bin/ && \ + cp -a /usr/local/bin/gem /runtime/usr/local/bin/ 2>/dev/null && \ + cp -a /usr/local/bin/rake /runtime/usr/local/bin/ 2>/dev/null && \ + cp -a /usr/local/bin/bundle /runtime/usr/local/bin/ 2>/dev/null && \ + cp -a /usr/local/bin/bundler /runtime/usr/local/bin/ 2>/dev/null && \ + cp -a /usr/local/lib/ruby /runtime/usr/local/lib/ && \ + cp -a /etc/pki /runtime/etc/ && \ + cp -a /etc/ssl /runtime/etc/ || true && \ + mkdir -p /runtime/etc/crypto-policies/back-ends && \ + if [ -f /etc/crypto-policies/back-ends/opensslcnf.config ]; then \ + cp -a /etc/crypto-policies/back-ends/opensslcnf.config /runtime/etc/crypto-policies/back-ends/; \ + elif [ -f /usr/share/crypto-policies/back-ends/opensslcnf.config ]; then \ + cp -a /usr/share/crypto-policies/back-ends/opensslcnf.config /runtime/etc/crypto-policies/back-ends/; \ + fi && \ + cp -a /usr/bin/openssl /runtime/usr/bin/ && \ + # Copy PostgreSQL client binaries, dereferencing symlinks if present + for b in \ + /usr/bin/psql /usr/bin/pg_dump /usr/bin/pg_restore \ + /usr/pgsql-17/bin/psql /usr/pgsql-17/bin/pg_dump /usr/pgsql-17/bin/pg_restore; do \ + [ -f "$b" ] || continue; \ + cp -aL "$b" /runtime/usr/bin/ 2>/dev/null || true; \ + done && \ + mkdir -p /runtime/usr/lib64/ossl-modules && \ + cp -a /usr/lib64/ossl-modules/* /runtime/usr/lib64/ossl-modules/ 2>/dev/null || true + +# Provide a minimal OpenSSL config that doesn't rely on system crypto policies +COPY openssl.cnf /runtime/etc/ssl/openssl.cnf +COPY openssl.cnf /runtime/etc/pki/tls/openssl.cnf + +# Auto-collect shared library dependencies for Ruby, native gems, and psql +RUN set -eux; \ + mkdir -p /runtime/usr/lib64; \ + targets="/usr/local/bin/ruby /usr/bin/psql /usr/bin/pg_dump /usr/bin/pg_restore /usr/bin/git /usr/bin/zip /usr/bin/unzip /usr/bin/find"; \ + if [ -d "$APP_PATH/vendor/bundle" ]; then \ + sofiles=$(find "$APP_PATH/vendor/bundle" -type f -name "*.so" || true); \ + targets="$targets $sofiles"; \ + fi; \ + for t in "$targets"; do \ + [ -f "$t" ] || continue; \ + ldd "$t" | awk '/=> \/|\//{print $3}' | sed -e 's/(0x[0-9a-fA-F]\+)//g' | grep -E '^/' || true; \ + done | sort -u | while read -r lib; do \ + [ -f "$lib" ] || continue; \ + cp -a "$lib" /runtime/usr/lib64/ 2>/dev/null || true; \ + done +RUN set -eux; \ + # Copy commonly required runtime shared libraries (no loop) + cp -a /usr/lib64/libpq.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libssl.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libcrypto.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libcrypt.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /lib64/libcrypt.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libgssapi_krb5.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libkrb5.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libkrb5support.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libk5crypto.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libcom_err.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libldap.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/liblber.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libsasl2.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /lib64/libgssapi_krb5.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /lib64/libkrb5.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /lib64/libkrb5support.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /lib64/libk5crypto.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /lib64/libcom_err.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /lib64/libldap*.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /lib64/liblber.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /lib64/libsasl2.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libkeyutils.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /lib64/libkeyutils.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libevent-*.so* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /lib64/libevent-*.so* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/liblz4.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /lib64/liblz4.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libyaml-0.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libreadline.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libncursesw.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libbz2.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /lib64/libbz2.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libz.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libzstd.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libgmp.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libffi.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + cp -a /usr/lib64/libgdbm.so.* /runtime/usr/lib64/ 2>/dev/null || true; \ + # App + cp -a $APP_PATH /runtime/app; \ + # Git client for gems that call `git` at runtime + if [ -x /usr/bin/git ]; then cp -a /usr/bin/git /runtime/usr/bin/git; fi; \ + if [ -d /usr/libexec/git-core ]; then mkdir -p /runtime/usr/libexec && cp -a /usr/libexec/git-core /runtime/usr/libexec/git-core; fi; \ + # Zip/unzip utilities required by certain jobs + if [ -x /usr/bin/zip ]; then cp -a /usr/bin/zip /runtime/usr/bin/zip; fi; \ + if [ -x /usr/bin/unzip ]; then cp -a /usr/bin/unzip /runtime/usr/bin/unzip; fi; \ + # find command for scripts that rely on findutils + if [ -x /usr/bin/find ]; then cp -a /usr/bin/find /runtime/usr/bin/find; fi; \ + # Timezone data for TZInfo + mkdir -p /runtime/usr/share && cp -a /usr/share/zoneinfo /runtime/usr/share/zoneinfo; \ + chmod +x /tmp/docker-entrypoint.sh; cp /tmp/docker-entrypoint.sh /runtime/usr/bin/docker-entrypoint.sh + +# Runtime stage (UBI 10 micro) +FROM registry.access.redhat.com/ubi10/ubi-micro:10.0-1754556444 + +ENV APP_PATH=/app/ +ARG RUBY_VERSION=3.4.7 +ENV PATH="/usr/local/bin:$PATH" +ENV LD_LIBRARY_PATH="/usr/lib64:/lib64:/usr/local/lib" +ENV OPENSSL_MODULES="/usr/lib64/ossl-modules" +ENV OPENSSL_CONF="/etc/pki/tls/openssl.cnf" +ENV HOME="/home/registry" +ENV BUNDLE_PATH="/app/vendor/bundle" + +WORKDIR $APP_PATH + +# Copy runtime files from builder +COPY --from=builder /runtime/ / +# Create runtime user (ubi-micro lacks useradd) +RUN set -eux; \ + uid=1000; gid=1000; \ + mkdir -p /home/registry; \ + echo "registry:x:${uid}:${gid}:Registry User:/home/registry:/bin/sh" >> /etc/passwd; \ + echo "registry:x:${gid}:" >> /etc/group; \ + chown -R ${uid}:${gid} /app /home/registry +USER 1000 -ENTRYPOINT ["docker-entrypoint.sh"] +ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"] -EXPOSE 9292 \ No newline at end of file +EXPOSE 9292 diff --git a/Gemfile b/Gemfile index f33e4a72..a8db0cfb 100644 --- a/Gemfile +++ b/Gemfile @@ -57,6 +57,7 @@ gem 'kramdown', '~> 2.5' gem 'kramdown-parser-gfm', '~> 1.1' # Search +gem 'elasticsearch', '~> 9.1' gem 'pg_search', '~> 2.3' # Configuration management diff --git a/Gemfile.lock b/Gemfile.lock index e6d39099..f555dccc 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -134,6 +134,14 @@ GEM dry-inflector (~> 1.0) dry-logic (~> 1.4) zeitwerk (~> 2.6) + elastic-transport (8.4.1) + faraday (< 3) + multi_json + elasticsearch (9.1.2) + elastic-transport (~> 8.3) + elasticsearch-api (= 9.1.2) + elasticsearch-api (9.1.2) + multi_json encryptor (3.0.0) erb (5.0.2) erubi (1.13.1) @@ -141,6 +149,12 @@ GEM activesupport (>= 5.0.0) faker (3.5.1) i18n (>= 1.8.11, < 2) + faraday (2.14.0) + faraday-net_http (>= 2.0, < 3.5) + json + logger + faraday-net_http (3.4.1) + net-http (>= 0.5.0) ffi (1.17.2) ffi-compiler (1.3.2) ffi (>= 1.15.5) @@ -237,6 +251,8 @@ GEM ruby2_keywords (~> 0.0.1) mustermann-grape (1.1.0) mustermann (>= 1.0.0) + net-http (0.6.0) + uri netrc (0.11.0) newrelic_rpm (9.20.0) nio4r (2.7.4) @@ -430,6 +446,7 @@ DEPENDENCIES dry-inflector (~> 1.2) dry-monads (~> 1.8) dry-struct (~> 1.8) + elasticsearch (~> 9.1) encryptor (~> 3.0) factory_bot (~> 6.5) faker (~> 3.5) diff --git a/app/api/entities/envelope_download.rb b/app/api/entities/envelope_download.rb index 724cd1f6..3ea180f4 100644 --- a/app/api/entities/envelope_download.rb +++ b/app/api/entities/envelope_download.rb @@ -2,14 +2,24 @@ module API module Entities # Presenter for EnvelopeDownload class EnvelopeDownload < Grape::Entity - expose :id, - documentation: { type: 'string', desc: 'ID (in UUID format)' } + expose :enqueued_at, + documentation: { type: 'string', desc: 'When the download was enqueued' }, + if: ->(object) { object.pending? } + + expose :finished_at, + documentation: { type: 'string', desc: 'When the download finished' }, + if: ->(object) { object.failed? || object.finished? } + + expose :started_at, + documentation: { type: 'string', desc: 'When the download started' }, + if: ->(object) { object.in_progress? } expose :status, documentation: { type: 'string', desc: 'Status of download' } expose :url, - documentation: { type: 'string', desc: 'AWS S3 URL' } + documentation: { type: 'string', desc: 'AWS S3 URL' }, + if: ->(object) { object.failed? || object.finished? } end end end diff --git a/app/api/v1/base.rb b/app/api/v1/base.rb index a7146caa..f58a3f58 100644 --- a/app/api/v1/base.rb +++ b/app/api/v1/base.rb @@ -17,6 +17,7 @@ require 'v1/indexed_resources' require 'v1/indexer' require 'v1/envelope_communities' +require 'v1/containers' module API module V1 @@ -45,6 +46,7 @@ class Base < Grape::API mount API::V1::Ctdl.api_class mount API::V1::IndexedResources.api_class mount API::V1::Indexer.api_class + mount API::V1::Containers.api_class route_param :community_name do mount API::V1::Resources.api_class @@ -55,6 +57,7 @@ class Base < Grape::API mount API::V1::Ctdl.api_class mount API::V1::IndexedResources.api_class mount API::V1::Indexer.api_class + mount API::V1::Containers.api_class end namespace :metadata do diff --git a/app/api/v1/containers.rb b/app/api/v1/containers.rb new file mode 100644 index 00000000..7bed08de --- /dev/null +++ b/app/api/v1/containers.rb @@ -0,0 +1,51 @@ +require 'mountable_api' +require 'container_repository' +require 'helpers/shared_helpers' +require 'helpers/community_helpers' +require 'entities/envelope' + +module API + module V1 + # Implements the endpoints related to containers + class Containers < MountableAPI + mounted do # rubocop:todo Metrics/BlockLength + helpers CommunityHelpers + helpers SharedHelpers + + resource :containers do + before do + authenticate! + end + + route_param :container_ctid do + resource :resources do + before do + ctid = params[:container_ctid]&.downcase + + @envelope = current_community + .envelopes + .containers + .find_sole_by(envelope_ceterms_ctid: ctid) + + authorize @envelope, :update? + @repository = ContainerRepository.new(@envelope) + end + + desc 'Adds a resource to the container' + patch do + @repository.add(JSON.parse(request.body.read)) + present @envelope, with: API::Entities::Envelope + end + + desc 'Removes a resource from the container' + delete ':resource_ctid' do + @repository.remove(params[:resource_ctid]) + present @envelope, with: API::Entities::Envelope + end + end + end + end + end + end + end +end diff --git a/app/api/v1/envelope_events.rb b/app/api/v1/envelope_events.rb index e13396ea..d89aa58d 100644 --- a/app/api/v1/envelope_events.rb +++ b/app/api/v1/envelope_events.rb @@ -12,11 +12,17 @@ def self.included(base) # rubocop:todo Metrics/AbcSize, Metrics/MethodLength optional :after, type: DateTime optional :ctid, type: String optional :event, type: String, values: %w[create update destroy] + optional :provisional, + default: 'include', + values: %w[exclude include only], + desc: 'Whether to include provisional records', + documentation: { param_type: 'query' } use :pagination end get do events = current_community .versions + .with_provisional_publication_status(params[:provisional]) .where(item_type: 'Envelope') .order(created_at: :desc) events.where!('created_at >= ?', params[:after]) if params[:after] diff --git a/app/api/v1/envelopes.rb b/app/api/v1/envelopes.rb index 8c4e330a..2c4f41fc 100644 --- a/app/api/v1/envelopes.rb +++ b/app/api/v1/envelopes.rb @@ -11,6 +11,7 @@ require 'v1/single_envelope' require 'v1/revisions' require 'v1/envelope_events' +require 'download_envelopes_job' module API module V1 @@ -67,6 +68,32 @@ class Envelopes < MountableAPI include API::V1::EnvelopeEvents + resources :download do + before do + authenticate! + authorize Envelope, :index? + + downloads = current_community.envelope_downloads.envelope + @envelope_download = downloads.last || downloads.create! + end + + desc 'Returns the envelope download' + get do + present @envelope_download, with: API::Entities::EnvelopeDownload + end + + desc 'Starts an envelope download' + post do + @envelope_download.update!( + enqueued_at: Time.current, + status: :pending + ) + + DownloadEnvelopesJob.perform_later(@envelope_download.id) + present @envelope_download, with: API::Entities::EnvelopeDownload + end + end + route_param :envelope_id do after_validation do id = params[:envelope_id]&.downcase @@ -86,28 +113,6 @@ class Envelopes < MountableAPI include API::V1::SingleEnvelope include API::V1::Revisions end - - resources :downloads do - before do - authenticate! - end - - desc 'Returns the download object with the given ID' - get ':id' do - authorize Envelope, :index? - - envelope_download = current_user_community.envelope_downloads.find(params[:id]) - present envelope_download, with: API::Entities::EnvelopeDownload - end - - desc 'Starts new envelope download' - post do - authorize Envelope, :index? - - present current_user_community.envelope_downloads.create!, - with: API::Entities::EnvelopeDownload - end - end end end end diff --git a/app/api/v1/graph.rb b/app/api/v1/graph.rb index a6a68c91..9b1434cc 100644 --- a/app/api/v1/graph.rb +++ b/app/api/v1/graph.rb @@ -25,6 +25,44 @@ class Graph < MountableAPI end resource :graph do + resources :download do + before do + authenticate! + authorize Envelope, :index? + + downloads = current_community.envelope_downloads.graph + @envelope_download = downloads.last || downloads.create! + end + + desc 'Returns the envelope download' + get do + present @envelope_download, with: API::Entities::EnvelopeDownload + end + + desc 'Starts an envelope download' + post do + @envelope_download.update!( + enqueued_at: Time.current, + status: :pending + ) + + DownloadEnvelopesJob.perform_later(@envelope_download.id) + present @envelope_download, with: API::Entities::EnvelopeDownload + end + end + + desc 'Returns graphs matching the given Elasticsearch query' + post :es do + status :ok + + Elasticsearch::Client + .new(host: ENV.fetch('ELASTICSEARCH_ADDRESS')) + .search( + body: JSON(request.body.read), + index: current_community.name + ) + end + namespace do desc 'Return a resource. ' \ 'If the resource is part of a graph, the entire graph is returned.' diff --git a/app/api/v1/publish.rb b/app/api/v1/publish.rb index 588b69bd..0e4ae79b 100644 --- a/app/api/v1/publish.rb +++ b/app/api/v1/publish.rb @@ -1,5 +1,6 @@ require 'policies/envelope_policy' require 'services/publish_interactor' +require 'services/sync_envelope_graph_with_s3' module API module V1 diff --git a/app/api/v1/resources.rb b/app/api/v1/resources.rb index 7c23dff8..65e9f10d 100644 --- a/app/api/v1/resources.rb +++ b/app/api/v1/resources.rb @@ -29,15 +29,39 @@ class Resources < MountableAPI desc 'Returns CTIDs of existing resources' params do requires :ctids, type: [String], desc: 'CTIDs' + optional :@type, type: String, desc: 'Resource type' end post 'check_existence' do status(:ok) - @envelope_community - .envelope_resources - .not_deleted - .where('resource_id IN (?)', params[:ctids]) - .pluck(:resource_id) + resource_types = + if (type = params[:@type]).present? + resolver = CtdlSubclassesResolver.new(envelope_community: @envelope_community) + + unless resolver.all_classes.include?(type) + error!( + { errors: ['@type does not have a valid value'] }, + :unprocessable_entity + ) + end + + resolver.root_class = type + + resolver.subclasses.filter_map do |subclass| + @envelope_community + .config + .dig('resource_type', 'values_map', subclass) + end + end + + envelope_resources = @envelope_community + .envelope_resources + .not_deleted + .where('resource_id IN (?)', params[:ctids]) + + envelope_resources.where!(resource_type: resource_types.uniq) unless resource_types.nil? + + envelope_resources.pluck(:resource_id) end desc 'Returns resources with the given CTIDs or bnodes IDs' diff --git a/app/jobs/download_envelopes_job.rb b/app/jobs/download_envelopes_job.rb index 04ccd67c..690c9450 100644 --- a/app/jobs/download_envelopes_job.rb +++ b/app/jobs/download_envelopes_job.rb @@ -1,4 +1,4 @@ -require 'entities/envelope' +require 'download_envelopes' require 'envelope_download' # Create a ZIP archive contaning all of the envelopes from a certain community, @@ -10,56 +10,8 @@ def perform(envelope_download_id) envelope_download = EnvelopeDownload.find_by(id: envelope_download_id) return unless envelope_download - envelope_download.update!( - internal_error_backtrace: [], - internal_error_message: nil, - started_at: Time.current - ) - - envelope_download.url = upload_to_s3(envelope_download) + DownloadEnvelopes.call(envelope_download:) rescue StandardError => e Airbrake.notify(e, envelope_download_id:) - envelope_download&.internal_error_backtrace = e.backtrace - envelope_download&.internal_error_message = e.message - ensure - envelope_download&.update!(finished_at: Time.current) - end - - private - - def bucket - ENV.fetch('ENVELOPE_DOWNLOADS_BUCKET') - end - - def create_zip_archive(envelope_download) - envelopes = envelope_download.envelopes.includes( - :envelope_community, :organization, :publishing_organization - ) - - file_path = MR.root_path.join(SecureRandom.hex) - - Zip::OutputStream.open(file_path) do |stream| - envelopes.find_each do |envelope| - stream.put_next_entry("#{envelope.envelope_ceterms_ctid}.json") - stream.puts(API::Entities::Envelope.represent(envelope).to_json) - end - end - - file_path - end - - def region - ENV.fetch('AWS_REGION') - end - - def upload_to_s3(envelope_download) - community = envelope_download.envelope_community.name - key = "#{community}_#{Time.current.to_i}_#{SecureRandom.hex}.zip" - path = create_zip_archive(envelope_download) - object = Aws::S3::Resource.new(region:).bucket(bucket).object(key) - object.upload_file(path) - object.public_url - ensure - File.delete(path) end end diff --git a/app/models/envelope.rb b/app/models/envelope.rb index e5d7965d..187be6c8 100644 --- a/app/models/envelope.rb +++ b/app/models/envelope.rb @@ -3,6 +3,8 @@ require 'authorized_key' require 'export_to_ocn_job' require 'delete_from_ocn_job' +require 'envelope_version' +require 'services/sync_envelope_graph_with_es' require_relative 'extensions/transactionable_envelope' require_relative 'extensions/learning_registry_resources' require_relative 'extensions/ce_registry_resources' @@ -21,9 +23,11 @@ class Envelope < ActiveRecord::Base include ResourceType has_paper_trail meta: { - envelope_ceterms_ctid: :envelope_ceterms_ctid, - envelope_community_id: :envelope_community_id - } + envelope_ceterms_ctid: :envelope_ceterms_ctid, + envelope_community_id: :envelope_community_id, + publication_status: :publication_status + }, + versions: { class_name: 'EnvelopeVersion' } belongs_to :envelope_community belongs_to :organization @@ -37,14 +41,18 @@ class Envelope < ActiveRecord::Base enum :resource_format, { json: 0, xml: 1 } enum :resource_encoding, { jwt: 0 } enum :node_headers_format, { node_headers_jwt: 0 } - enum :publication_status, { full: 0, provisional: 1 } + enum :publication_status, MR.envelope_publication_statuses before_validation :generate_envelope_id, on: :create before_validation :process_resource, :process_headers before_save :assign_last_verified_on after_save :update_headers + after_save :upload_to_s3 + after_save :index_with_es before_destroy :delete_description_sets, prepend: true after_destroy :delete_from_ocn + after_destroy :delete_from_s3 + after_destroy :delete_from_es after_commit :export_to_ocn validates :envelope_community, :envelope_type, :envelope_version, @@ -55,6 +63,7 @@ class Envelope < ActiveRecord::Base RESOURCE_PUBLISH_TYPES = %w[primary secondary].freeze validates :resource_publish_type, inclusion: { in: RESOURCE_PUBLISH_TYPES, allow_blank: true } + scope :containers, -> { where(envelope_ctdl_type: CONTAINER_CTDL_TYPES) } scope :not_deleted, -> { where(deleted_at: nil) } scope :deleted, -> { where.not(deleted_at: nil) } scope :ordered_by_date, -> { order(created_at: :desc) } @@ -75,6 +84,7 @@ class Envelope < ActiveRecord::Base end } + CONTAINER_CTDL_TYPES = %w[ceterms:Collection].freeze NOT_FOUND = 'Envelope not found'.freeze DELETED = 'Envelope deleted'.freeze @@ -257,4 +267,28 @@ def export_to_ocn ExportToOCNJob.perform_later(id) end + + def upload_to_s3 + SyncEnvelopeGraphWithS3.upload(self) + rescue StandardError => e + Airbrake.notify(e) + end + + def delete_from_s3 + SyncEnvelopeGraphWithS3.remove(self) + rescue StandardError => e + Airbrake.notify(e) + end + + def index_with_es + SyncEnvelopeGraphWithEs.index(self) + rescue StandardError => e + Airbrake.notify(e) + end + + def delete_from_es + SyncEnvelopeGraphWithEs.delete(self) + rescue StandardError => e + Airbrake.notify(e) + end end diff --git a/app/models/envelope_community.rb b/app/models/envelope_community.rb index 13ba016f..39bdc79b 100644 --- a/app/models/envelope_community.rb +++ b/app/models/envelope_community.rb @@ -6,11 +6,11 @@ class EnvelopeCommunity < ActiveRecord::Base include AttributeNormalizer has_one :envelope_community_config - has_many :envelope_downloads has_many :envelopes + has_many :envelope_downloads has_many :envelope_resources, through: :envelopes has_many :indexed_envelope_resources - has_many :versions, class_name: 'PaperTrail::Version' + has_many :versions, class_name: 'EnvelopeVersion' validates :name, presence: true, uniqueness: true validates :default, uniqueness: true, if: :default @@ -100,10 +100,6 @@ def get_resource_type_from_values_map(cfg, envelope) end end - cfg['values_map'].fetch(key) do - raise MR::SchemaDoesNotExist, - "Cannot load json-schema. The property '#{cfg['property']}' " \ - "has an invalid value '#{key}'" - end + cfg['values_map'][key] end end diff --git a/app/models/envelope_download.rb b/app/models/envelope_download.rb index 872eca79..4b17eb62 100644 --- a/app/models/envelope_download.rb +++ b/app/models/envelope_download.rb @@ -1,25 +1,20 @@ -require 'download_envelopes_job' - # Stores the status and AWS S3 URL of an asynchronously performed envelope download class EnvelopeDownload < ActiveRecord::Base + self.inheritance_column = nil + belongs_to :envelope_community has_many :envelopes, -> { not_deleted }, through: :envelope_community - after_commit :enqueue_job, on: :create - - def status - if finished_at? - return internal_error_message? ? 'failed' : 'finished' - elsif started_at? - return 'in progress' - end - - 'pending' - end + enum :status, { + failed: 'failed', + finished: 'finished', + in_progress: 'in_progress', + pending: 'pending' + } - private + enum :type, { envelope: 'envelope', graph: 'graph' } - def enqueue_job - DownloadEnvelopesJob.perform_later(id) + def with_error? + internal_error_message? end end diff --git a/app/models/envelope_version.rb b/app/models/envelope_version.rb new file mode 100644 index 00000000..49f7be17 --- /dev/null +++ b/app/models/envelope_version.rb @@ -0,0 +1,15 @@ +# The custom subclass of PaperTrail::Version for envelopes +class EnvelopeVersion < PaperTrail::Version + enum :publication_status, MR.envelope_publication_statuses + + scope :with_provisional_publication_status, lambda { |value| + case value + when 'only' + provisional + when 'include' + all + else + full + end + } +end diff --git a/app/models/extensions/ce_registry_resources.rb b/app/models/extensions/ce_registry_resources.rb index e52f1991..614b7b1c 100644 --- a/app/models/extensions/ce_registry_resources.rb +++ b/app/models/extensions/ce_registry_resources.rb @@ -22,7 +22,7 @@ def ce_registry? end def self.generate_ctid - "urn:ctid:#{SecureRandom.uuid}" + "ce-#{SecureRandom.uuid}" end end end diff --git a/app/models/indexed_envelope_resource.rb b/app/models/indexed_envelope_resource.rb index 8f734cb8..ddb3f42c 100644 --- a/app/models/indexed_envelope_resource.rb +++ b/app/models/indexed_envelope_resource.rb @@ -2,7 +2,7 @@ # A flattened out version of an envelope resource's payload class IndexedEnvelopeResource < ActiveRecord::Base - enum :publication_status, Envelope.publication_statuses + enum :publication_status, MR.envelope_publication_statuses belongs_to :envelope_community belongs_to :envelope_resource diff --git a/app/services/container_repository.rb b/app/services/container_repository.rb new file mode 100644 index 00000000..e252b84d --- /dev/null +++ b/app/services/container_repository.rb @@ -0,0 +1,47 @@ +# Manages subresources of a container +class ContainerRepository + attr_reader :envelope + + delegate :processed_resource, to: :envelope + + def initialize(envelope) + @envelope = envelope + end + + def add(subresource) + container['ceterms:hasMember'] ||= [] + subresource_id = subresource['@id'] + + unless container['ceterms:hasMember'].include?(subresource_id) + container['ceterms:hasMember'] << subresource_id + end + + graph << subresource unless graph.find { it['@id'] == subresource_id } + update_envelope! + end + + def remove(subresource_ctid) + subresource = graph.find { |obj| obj['ceterms:ctid'] == subresource_ctid } + return false unless subresource + + subresource_id = subresource['@id'] + container['ceterms:hasMember']&.delete(subresource_id) + graph.reject! { |obj| obj['@id'] == subresource_id } + update_envelope! + end + + def container + @container ||= graph.find { it['@type'] == 'ceterms:Collection' } + end + + def graph + @graph ||= processed_resource['@graph'] + end + + def update_envelope! + envelope.update!(processed_resource:) + changed = envelope.previous_changes.any? + ExtractEnvelopeResourcesJob.perform_later(envelope.id) if changed + changed + end +end diff --git a/app/services/ctdl_subclasses_resolver.rb b/app/services/ctdl_subclasses_resolver.rb index 6175a46f..23c32a8d 100644 --- a/app/services/ctdl_subclasses_resolver.rb +++ b/app/services/ctdl_subclasses_resolver.rb @@ -1,17 +1,24 @@ class CtdlSubclassesResolver # rubocop:todo Style/Documentation SUBCLASSES_MAP_FILE = MR.root_path.join('fixtures', 'subclasses_map.json') - attr_reader :envelope_community_config, :include_root, :root_class + attr_accessor :root_class + attr_reader :envelope_community_config, :include_root - def initialize(envelope_community:, root_class:, include_root: true) + def initialize(envelope_community:, root_class: nil, include_root: true) @envelope_community_config = envelope_community.config @include_root = include_root @root_class = root_class end + def all_classes(map = ctdl_subclasses_map) + map.flat_map do |type, submap| + [type, *all_classes(submap)] + end.uniq + end + def subclasses @subclasses ||= collect_subclasses(initial_map_item) + - (include_root ? [root_class] : []) + (include_root && root_class ? [root_class] : []) end def initial_map_item diff --git a/app/services/download_envelopes.rb b/app/services/download_envelopes.rb new file mode 100644 index 00000000..850a31ad --- /dev/null +++ b/app/services/download_envelopes.rb @@ -0,0 +1,51 @@ +require 'envelope_dumps/envelope_builder' +require 'envelope_dumps/graph_builder' + +# Builds an envelope community's download according to its type +class DownloadEnvelopes + attr_reader :envelope_download, :last_dumped_at + + def initialize(envelope_download) + @envelope_download = envelope_download + @last_dumped_at = envelope_download.started_at unless envelope_download.with_error? + end + + def self.call(envelope_download:) + new(envelope_download).run + end + + def builder + builder_class = + case envelope_download.type + when 'envelope' + EnvelopeDumps::EnvelopeBuilder + when 'graph' + EnvelopeDumps::GraphBuilder + else + raise "No dump builder is defined for `#{envelope_download.type}`" + end + + builder_class.new(envelope_download, last_dumped_at) + end + + def run # rubocop:todo Metrics/AbcSize, Metrics/MethodLength + envelope_download.update!( + internal_error_backtrace: [], + internal_error_message: nil, + started_at: Time.current, + status: :in_progress + ) + + envelope_download.with_lock do + envelope_download.status = :finished + envelope_download.url = builder.run + rescue StandardError => e + Airbrake.notify(e) + envelope_download&.internal_error_backtrace = e.backtrace + envelope_download&.internal_error_message = e.message + envelope_download.status = :failed + ensure + envelope_download.update!(finished_at: Time.current) + end + end +end diff --git a/app/services/envelope_dumps/base.rb b/app/services/envelope_dumps/base.rb new file mode 100644 index 00000000..bbb4abc3 --- /dev/null +++ b/app/services/envelope_dumps/base.rb @@ -0,0 +1,136 @@ +module EnvelopeDumps + # Dumps an envelope community's envelopes or graphs into a ZIP file and uploads it to S3 + class Base # rubocop:todo Metrics/ClassLength + attr_reader :envelope_download, :last_dumped_at + + delegate :envelope_community, :url, to: :envelope_download + + def initialize(envelope_download, last_dumped_at) + @envelope_download = envelope_download + @last_dumped_at = last_dumped_at + end + + def bucket + @bucket ||= Aws::S3::Resource + .new(region: ENV.fetch('AWS_REGION')) + .bucket(bucket_name) + end + + def bucket_name + raise NotImplementedError + end + + def build_content(_envelope) + raise NotImplementedError + end + + def create_or_update_entries + FileUtils.mkdir_p(dirname) + + log('Adding recently published envelopes into the dump') + + published_envelopes.find_each do |envelope| + File.write( + File.join(dirname, "#{envelope.envelope_ceterms_ctid}.json"), + build_content(envelope).to_json + ) + end + end + + def dirname + @dirname ||= [ + envelope_community.name, + Time.current.to_i, + SecureRandom.hex + ].join('_') + end + + def download_file # rubocop:todo Metrics/AbcSize + return unless url.present? + + log("Downloading the existing dump from #{url}") + previous_filename = url.split('/').last + object = bucket.object(previous_filename) + object.get(response_target: filename) + + log("Unarchiving the downloaded dump into #{dirname}") + system("unzip -qq #{filename} -d #{dirname}", exception: true) + rescue StandardError => e + Airbrake.notify(e) + end + + def destroy_envelope_events + @destroy_envelope_events ||= envelope_community + .versions + .where(event: 'destroy') + .where('created_at >= ?', last_dumped_at) + end + + def filename + @filename ||= "#{dirname}.zip" + end + + def log(message) + MR.logger.info(message) + end + + def published_envelopes + @published_envelopes ||= begin + envelopes = envelope_community + .envelopes + .not_deleted + .includes(:envelope_community, :organization, :publishing_organization) + + envelopes.where!('updated_at >= ?', last_dumped_at) if last_dumped_at + envelopes + end + end + + def remove_entries + log('Removing recently deleted envelopes from the dump') + + destroy_envelope_events.select(:id, :envelope_ceterms_ctid).find_each do |event| + FileUtils.remove_file( + File.join(dirname, "#{event.envelope_ceterms_ctid}.json"), + true + ) + end + end + + def run + if up_to_date? + log('The dump is up to date.') + return + end + + download_file + create_or_update_entries + remove_entries + upload_file + ensure + log('Deleting intermediate files.') + FileUtils.rm_rf(dirname) + FileUtils.rm_f(filename) + log('Finished.') + end + + def up_to_date? + url.present? && published_envelopes.none? && destroy_envelope_events.none? + end + + def upload_file + log('Archiving the updated dump.') + + system( + "find #{dirname} -type f -print | zip -FSjqq #{filename} -@", + exception: true + ) + + log('Uploading the updated dump to S3.') + + object = bucket.object(filename) + object.upload_file(filename) + object.public_url + end + end +end diff --git a/app/services/envelope_dumps/envelope_builder.rb b/app/services/envelope_dumps/envelope_builder.rb new file mode 100644 index 00000000..c19eaba1 --- /dev/null +++ b/app/services/envelope_dumps/envelope_builder.rb @@ -0,0 +1,13 @@ +require 'envelope_dumps/base' + +module EnvelopeDumps + class EnvelopeBuilder < Base # rubocop:todo Style/Documentation + def bucket_name + ENV.fetch('ENVELOPE_DOWNLOADS_BUCKET') + end + + def build_content(envelope) + API::Entities::Envelope.represent(envelope) + end + end +end diff --git a/app/services/envelope_dumps/graph_builder.rb b/app/services/envelope_dumps/graph_builder.rb new file mode 100644 index 00000000..1c695079 --- /dev/null +++ b/app/services/envelope_dumps/graph_builder.rb @@ -0,0 +1,13 @@ +require 'envelope_dumps/base' + +module EnvelopeDumps + class GraphBuilder < Base # rubocop:todo Style/Documentation + def bucket_name + ENV.fetch('ENVELOPE_GRAPHS_BUCKET') + end + + def build_content(envelope) + envelope.processed_resource + end + end +end diff --git a/app/services/sync_envelope_graph_with_es.rb b/app/services/sync_envelope_graph_with_es.rb new file mode 100644 index 00000000..ffd5984e --- /dev/null +++ b/app/services/sync_envelope_graph_with_es.rb @@ -0,0 +1,70 @@ +# Adds or deletes an envelope graph from the Elasticsearch index +class SyncEnvelopeGraphWithEs + attr_reader :envelope + + delegate :envelope_community, :envelope_ceterms_ctid, to: :envelope + + def initialize(envelope) + @envelope = envelope + end + + class << self + def index(envelope) + new(envelope).index + end + + def delete(envelope) + new(envelope).delete + end + end + + def client + @client ||= Elasticsearch::Client.new(host: elasticsearch_address) + end + + def elasticsearch_address + ENV['ELASTICSEARCH_ADDRESS'].presence + end + + def index + return unless elasticsearch_address + + client.index( + body: envelope.processed_resource.to_json, + id: envelope_ceterms_ctid, + index: index_name + ) + + envelope.touch(:indexed_with_es_at) + rescue Elastic::Transport::Transport::Errors::BadRequest => e + raise e unless e.message.include?('Limit of total fields') + + increase_total_fields_limit + retry + end + + def delete + return unless elasticsearch_address + + client.delete(id: envelope_ceterms_ctid, index: index_name) + rescue Elastic::Transport::Transport::Errors::NotFound + nil + end + + def increase_total_fields_limit + settings = client.indices.get_settings(index: index_name) + + current_limit = settings + .dig(index_name, 'settings', 'index', 'mapping', 'total_fields', 'limit') + .to_i + + client.indices.put_settings( + body: { 'index.mapping.total_fields.limit' => current_limit * 2 }, + index: index_name + ) + end + + def index_name + envelope_community.name + end +end diff --git a/app/services/sync_envelope_graph_with_s3.rb b/app/services/sync_envelope_graph_with_s3.rb new file mode 100644 index 00000000..6a9b39e1 --- /dev/null +++ b/app/services/sync_envelope_graph_with_s3.rb @@ -0,0 +1,57 @@ +# Uploads or deletes an envelope graph from the S3 bucket +class SyncEnvelopeGraphWithS3 + attr_reader :envelope + + delegate :envelope_community, :envelope_ceterms_ctid, to: :envelope + + def initialize(envelope) + @envelope = envelope + end + + class << self + def upload(envelope) + new(envelope).upload + end + + def remove(envelope) + new(envelope).remove + end + end + + def upload + return unless s3_bucket_name + + s3_object.put( + body: envelope.processed_resource.to_json, + content_type: 'application/json' + ) + + envelope.update_column(:s3_url, s3_object.public_url) + end + + def remove + return unless s3_bucket_name + + s3_object.delete + end + + def s3_bucket + @s3_bucket ||= s3_resource.bucket(s3_bucket_name) + end + + def s3_bucket_name + ENV['ENVELOPE_GRAPHS_BUCKET'].presence + end + + def s3_key + "#{envelope_community.name}/#{envelope_ceterms_ctid}.json" + end + + def s3_object + @s3_object ||= s3_bucket.object(s3_key) + end + + def s3_resource + @s3_resource ||= Aws::S3::Resource.new(region: ENV['AWS_REGION'].presence) + end +end diff --git a/config/application.rb b/config/application.rb index 8b654b06..6d4c920b 100644 --- a/config/application.rb +++ b/config/application.rb @@ -121,6 +121,10 @@ def log_with_labels(level, message, labels_arg = nil) def root_path @root_path ||= Pathname.new(File.expand_path('..', __dir__)) end + + def envelope_publication_statuses + { full: 0, provisional: 1 } + end end end diff --git a/db/migrate/20250830180848_add_unique_index_on_envelope_community_id_to_envelope_downloads.rb b/db/migrate/20250830180848_add_unique_index_on_envelope_community_id_to_envelope_downloads.rb new file mode 100644 index 00000000..a6b2fffd --- /dev/null +++ b/db/migrate/20250830180848_add_unique_index_on_envelope_community_id_to_envelope_downloads.rb @@ -0,0 +1,24 @@ +class AddUniqueIndexOnEnvelopeCommunityIdToEnvelopeDownloads < ActiveRecord::Migration[8.0] + def change + ActiveRecord::Base.transaction do + reversible do |dir| + dir.up do + ActiveRecord::Base.connection.execute(<<~COMMAND) + DELETE FROM envelope_downloads + WHERE created_at NOT IN ( + SELECT max_created_at + FROM ( + SELECT MAX(created_at ) as max_created_at + FROM envelope_downloads + GROUP BY envelope_community_id + ) AS t + ); + COMMAND + end + end + + remove_index :envelope_downloads, :envelope_community_id + add_index :envelope_downloads, :envelope_community_id, unique: true + end + end +end diff --git a/db/migrate/20250921174021_add_publication_status_to_versions.rb b/db/migrate/20250921174021_add_publication_status_to_versions.rb new file mode 100644 index 00000000..a5d91151 --- /dev/null +++ b/db/migrate/20250921174021_add_publication_status_to_versions.rb @@ -0,0 +1,6 @@ +class AddPublicationStatusToVersions < ActiveRecord::Migration[8.0] + def change + add_column :versions, :publication_status, :integer, default: 0, null: false + add_index :versions, :publication_status + end +end diff --git a/db/migrate/20250922224518_add_status_to_envelope_downloads.rb b/db/migrate/20250922224518_add_status_to_envelope_downloads.rb new file mode 100644 index 00000000..86cd4ecb --- /dev/null +++ b/db/migrate/20250922224518_add_status_to_envelope_downloads.rb @@ -0,0 +1,5 @@ +class AddStatusToEnvelopeDownloads < ActiveRecord::Migration[8.0] + def change + add_column :envelope_downloads, :status, :string, default: 'pending', null: false + end +end diff --git a/db/migrate/20250925025616_add_enqueued_at_to_envelope_downloads.rb b/db/migrate/20250925025616_add_enqueued_at_to_envelope_downloads.rb new file mode 100644 index 00000000..1c20b5de --- /dev/null +++ b/db/migrate/20250925025616_add_enqueued_at_to_envelope_downloads.rb @@ -0,0 +1,5 @@ +class AddEnqueuedAtToEnvelopeDownloads < ActiveRecord::Migration[8.0] + def change + add_column :envelope_downloads, :enqueued_at, :datetime + end +end diff --git a/db/migrate/20251022205617_add_s3_url_to_envelopes.rb b/db/migrate/20251022205617_add_s3_url_to_envelopes.rb new file mode 100644 index 00000000..547bedad --- /dev/null +++ b/db/migrate/20251022205617_add_s3_url_to_envelopes.rb @@ -0,0 +1,6 @@ +class AddS3UrlToEnvelopes < ActiveRecord::Migration[8.0] + def change + add_column :envelopes, :s3_url, :string + add_index :envelopes, :s3_url, unique: true + end +end diff --git a/db/migrate/20251027134426_add_indexed_with_es_at_to_envelopes.rb b/db/migrate/20251027134426_add_indexed_with_es_at_to_envelopes.rb new file mode 100644 index 00000000..a7873699 --- /dev/null +++ b/db/migrate/20251027134426_add_indexed_with_es_at_to_envelopes.rb @@ -0,0 +1,6 @@ +class AddIndexedWithEsAtToEnvelopes < ActiveRecord::Migration[8.0] + def change + add_column :envelopes, :indexed_with_es_at, :datetime + add_index :envelopes, :indexed_with_es_at + end +end diff --git a/db/migrate/20251123201629_add_type_to_envelope_downloads.rb b/db/migrate/20251123201629_add_type_to_envelope_downloads.rb new file mode 100644 index 00000000..09c90f73 --- /dev/null +++ b/db/migrate/20251123201629_add_type_to_envelope_downloads.rb @@ -0,0 +1,7 @@ +class AddTypeToEnvelopeDownloads < ActiveRecord::Migration[8.0] + def change + add_column :envelope_downloads, :type, :string, default: 'envelope', null: false + remove_index :envelope_downloads, :envelope_community_id, unique: true + add_index :envelope_downloads, %i[envelope_community_id type], unique: true + end +end diff --git a/db/structure.sql b/db/structure.sql index a5f04a49..d63c489d 100644 --- a/db/structure.sql +++ b/db/structure.sql @@ -324,7 +324,10 @@ CREATE TABLE public.envelope_downloads ( started_at timestamp(6) without time zone, url character varying, created_at timestamp(6) without time zone NOT NULL, - updated_at timestamp(6) without time zone NOT NULL + updated_at timestamp(6) without time zone NOT NULL, + status character varying DEFAULT 'pending'::character varying NOT NULL, + enqueued_at timestamp(6) without time zone, + type character varying DEFAULT 'envelope'::character varying NOT NULL ); @@ -430,7 +433,9 @@ CREATE TABLE public.envelopes ( publishing_organization_id uuid, resource_publish_type character varying, last_verified_on date, - publication_status integer DEFAULT 0 NOT NULL + publication_status integer DEFAULT 0 NOT NULL, + s3_url character varying, + indexed_with_es_at timestamp(6) without time zone ); @@ -820,7 +825,8 @@ CREATE TABLE public.versions ( created_at timestamp without time zone, object_changes text, envelope_ceterms_ctid character varying, - envelope_community_id bigint + envelope_community_id bigint, + publication_status integer DEFAULT 0 NOT NULL ); @@ -1331,10 +1337,10 @@ CREATE INDEX index_envelope_community_configs_on_envelope_community_id ON public -- --- Name: index_envelope_downloads_on_envelope_community_id; Type: INDEX; Schema: public; Owner: - +-- Name: index_envelope_downloads_on_envelope_community_id_and_type; Type: INDEX; Schema: public; Owner: - -- -CREATE INDEX index_envelope_downloads_on_envelope_community_id ON public.envelope_downloads USING btree (envelope_community_id); +CREATE UNIQUE INDEX index_envelope_downloads_on_envelope_community_id_and_type ON public.envelope_downloads USING btree (envelope_community_id, type); -- @@ -1477,6 +1483,13 @@ CREATE INDEX index_envelopes_on_purged_at ON public.envelopes USING btree (purge CREATE INDEX index_envelopes_on_resource_type ON public.envelopes USING btree (resource_type); +-- +-- Name: index_envelopes_on_s3_url; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_envelopes_on_s3_url ON public.envelopes USING btree (s3_url); + + -- -- Name: index_envelopes_on_top_level_object_ids; Type: INDEX; Schema: public; Owner: - -- @@ -1673,6 +1686,13 @@ CREATE INDEX index_versions_on_item_type_and_item_id ON public.versions USING bt CREATE INDEX index_versions_on_object ON public.versions USING gin (object); +-- +-- Name: index_versions_on_publication_status; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_versions_on_publication_status ON public.versions USING btree (publication_status); + + -- -- Name: envelope_resources envelope_resources_fts_tsvector_update; Type: TRIGGER; Schema: public; Owner: - -- @@ -1879,8 +1899,15 @@ ALTER TABLE ONLY public.envelopes SET search_path TO "$user", public; INSERT INTO "schema_migrations" (version) VALUES -('20250829235024'), +('20251123201629'), +('20251027134426'), +('20251022205617'), +('20250925025616'), +('20250922224518'), +('20250921174021'), ('20250902034147'), +('20250830180848'), +('20250829235024'), ('20250818021420'), ('20250815032532'), ('20250618195306'), @@ -1947,4 +1974,3 @@ INSERT INTO "schema_migrations" (version) VALUES ('20160414152951'), ('20160407152817'), ('20160223171632'); - diff --git a/lib/swagger_docs.rb b/lib/swagger_docs.rb index 43a62102..3a995b2e 100644 --- a/lib/swagger_docs.rb +++ b/lib/swagger_docs.rb @@ -1,6 +1,7 @@ require 'ctdl_query' require 'swagger_docs/models' require 'swagger_docs/sections/admin' +require 'swagger_docs/sections/containers' require 'swagger_docs/sections/description_sets' require 'swagger_docs/sections/envelopes' require 'swagger_docs/sections/general' @@ -18,6 +19,7 @@ class SwaggerDocs include Models include Sections::General include Sections::Admin + include Sections::Containers include Sections::DescriptionSets include Sections::Envelopes include Sections::Graphs diff --git a/lib/swagger_docs/models.rb b/lib/swagger_docs/models.rb index c5fe6bf3..ab610210 100644 --- a/lib/swagger_docs/models.rb +++ b/lib/swagger_docs/models.rb @@ -374,10 +374,6 @@ module Models # rubocop:todo Metrics/ModuleLength, Style/Documentation end swagger_schema :EnvelopeDownload do - property :id, - type: :string, - description: 'ID' - property :status, type: :string, description: 'Status (pending, in progress, finished, or failed)' diff --git a/lib/swagger_docs/sections/containers.rb b/lib/swagger_docs/sections/containers.rb new file mode 100644 index 00000000..8aefda24 --- /dev/null +++ b/lib/swagger_docs/sections/containers.rb @@ -0,0 +1,115 @@ +module MetadataRegistry + class SwaggerDocs + module Sections + # Swagger documentation for Containers API + module Containers + extend ActiveSupport::Concern + + included do + swagger_path '/{community_name}/containers/{container_ctid}/resources' do + operation :patch do # rubocop:todo Metrics/BlockLength + key :operationId, 'patchApiContainerResources' + key :description, 'Adds a resource to a container collection' + key :consumes, ['application/json'] + key :produces, ['application/json'] + key :tags, ['Containers'] + + security + + parameter community_name + parameter name: :container_ctid, + in: :path, + type: :string, + required: true, + description: 'CTID of the container collection' + + parameter do + key :name, :subresource + key :in, :body + key :description, 'JSON-LD subresource to add to the container' + key :required, true + + schema do + key :type, :object + key :description, 'A JSON-LD resource object' + property :@id do + key :type, :string + key :description, 'Resource identifier' + key :example, 'http://credentialengineregistry.org/resources/ce-abc123' + end + property :@type do + key :type, :string + key :description, 'Resource type' + key :example, 'ceterms:Credential' + end + property :'ceterms:ctid' do + key :type, :string + key :description, 'CTID of the resource' + key :example, 'ce-abc123' + end + end + end + + response 200 do + key :description, 'Successfully added resource to container' + schema { key :$ref, :Envelope } + end + + response 401 do + key :description, 'Unauthorized - authentication required' + end + + response 403 do + key :description, 'Forbidden - insufficient permissions' + end + + response 404 do + key :description, 'Container not found or not a collection type' + end + end + end + + swagger_path '/{community_name}/containers/{container_ctid}/resources/{resource_ctid}' do + operation :delete do # rubocop:todo Metrics/BlockLength + key :operationId, 'deleteApiContainerResource' + key :description, 'Removes a resource from a container collection' + key :produces, ['application/json'] + key :tags, ['Containers'] + + security + + parameter community_name + parameter name: :container_ctid, + in: :path, + type: :string, + required: true, + description: 'CTID of the container collection' + parameter name: :resource_ctid, + in: :path, + type: :string, + required: true, + description: 'CTID of the resource to remove' + + response 200 do + key :description, 'Successfully removed resource from container' + schema { key :$ref, :Envelope } + end + + response 401 do + key :description, 'Unauthorized - authentication required' + end + + response 403 do + key :description, 'Forbidden - insufficient permissions' + end + + response 404 do + key :description, 'Container not found or not a collection type' + end + end + end + end + end + end + end +end diff --git a/lib/swagger_docs/sections/envelopes.rb b/lib/swagger_docs/sections/envelopes.rb index ce0cc52f..32ca3e5e 100644 --- a/lib/swagger_docs/sections/envelopes.rb +++ b/lib/swagger_docs/sections/envelopes.rb @@ -59,37 +59,30 @@ module Envelopes # rubocop:todo Metrics/ModuleLength, Style/Documentation end end - swagger_path '/{community_name}/envelopes/downloads' do - operation :post do - key :operationId, 'postApiEnvelopesDownloads' - key :description, 'Starts new download' + swagger_path '/{community_name}/envelopes/download' do + operation :get do + key :operationId, 'getApiEnvelopesDownload' + key :description, "Returns the download's status and URL" key :produces, ['application/json'] key :tags, ['Envelopes'] parameter community_name - response 201 do + response 200 do key :description, 'Download object' schema { key :$ref, :EnvelopeDownload } end end - end - swagger_path '/{community_name}/envelopes/downloads/{id}' do - operation :get do - key :operationId, 'getApiEnvelopesDownloads' - key :description, "Returns download's status and URL" + operation :post do + key :operationId, 'postApiEnvelopesDownloads' + key :description, 'Starts a new download' key :produces, ['application/json'] key :tags, ['Envelopes'] parameter community_name - parameter name: :id, - in: :path, - type: :string, - required: true, - description: 'Download ID' - response 200 do + response 201 do key :description, 'Download object' schema { key :$ref, :EnvelopeDownload } end @@ -123,6 +116,7 @@ module Envelopes # rubocop:todo Metrics/ModuleLength, Style/Documentation enum: %w[create update destroy], required: false, description: 'Event type' + parameter provisional(default: 'include') parameter page_param parameter per_page_param diff --git a/lib/swagger_docs/sections/graphs.rb b/lib/swagger_docs/sections/graphs.rb index 6381f421..de9d84be 100644 --- a/lib/swagger_docs/sections/graphs.rb +++ b/lib/swagger_docs/sections/graphs.rb @@ -5,6 +5,68 @@ module Graphs # rubocop:todo Style/Documentation extend ActiveSupport::Concern included do + swagger_path '/{community_name}/graph/download' do + operation :get do + key :operationId, 'getApiGraphDownload' + key :description, "Returns the download's status and URL" + key :produces, ['application/json'] + key :tags, ['Graphs'] + + parameter community_name + + response 200 do + key :description, 'Download object' + schema { key :$ref, :EnvelopeDownload } + end + end + + operation :post do + key :operationId, 'postApiGraphDownloads' + key :description, 'Starts a new download' + key :produces, ['application/json'] + key :tags, ['Graphs'] + + parameter community_name + + response 201 do + key :description, 'Download object' + schema { key :$ref, :EnvelopeDownload } + end + end + end + + swagger_path '/{community_name}/graph/es' do + operation :post do + key :operationId, 'postApiGraphEs' + key :description, 'Queries graphs via Elasticsearch' + key :produces, ['application/json'] + key :tags, ['Graphs'] + + security + + parameter community_name + + parameter do + key :name, :body + key :in, :body + key :description, 'Elasticsearch query' + key :required, true + schema do + key :additionalProperties, true + end + end + + response 200 do + key :description, 'Array of graphs with the given CTIDs' + + schema do + key :type, :object + key :description, 'Elasticsearch response' + end + end + end + end + swagger_path '/{community_name}/graph/search' do operation :post do # rubocop:todo Metrics/BlockLength key :operationId, 'postApiGraphSearch' diff --git a/lib/swagger_docs/sections/resources.rb b/lib/swagger_docs/sections/resources.rb index be4298c3..ed4070d0 100644 --- a/lib/swagger_docs/sections/resources.rb +++ b/lib/swagger_docs/sections/resources.rb @@ -35,6 +35,11 @@ module Resources # rubocop:todo Metrics/ModuleLength, Style/Documentation key :type, :string end end + + property :@type do + key :type, :string + key :description, 'CTDL type' + end end end diff --git a/lib/swagger_helpers.rb b/lib/swagger_helpers.rb index 7dc0179e..7d5881ac 100644 --- a/lib/swagger_helpers.rb +++ b/lib/swagger_helpers.rb @@ -252,13 +252,13 @@ def resource_type(_in: :query) # rubocop:todo Lint/UnderscorePrefixedVariableNam } end - def provisional + def provisional(default: 'exclude') { name: :provisional, in: :query, type: :string, enum: %w[exclude include only], - default: 'exclude', + default:, description: 'Whether to include provisional records' } end diff --git a/local_packages/grape-middleware-logger-2.4.0.gem b/local_packages/grape-middleware-logger-2.4.0.gem new file mode 100644 index 00000000..379206ff Binary files /dev/null and b/local_packages/grape-middleware-logger-2.4.0.gem differ diff --git a/local_packages/grape-middleware-logger-master.zip b/local_packages/grape-middleware-logger-master.zip new file mode 100644 index 00000000..1e347a82 Binary files /dev/null and b/local_packages/grape-middleware-logger-master.zip differ diff --git a/openssl.cnf b/openssl.cnf new file mode 100644 index 00000000..adfa225f --- /dev/null +++ b/openssl.cnf @@ -0,0 +1,14 @@ +openssl_conf = openssl_init + +[openssl_init] +providers = provider_sect + +[provider_sect] +default = default_sect +legacy = legacy_sect + +[default_sect] +activate = 1 + +[legacy_sect] +activate = 1 diff --git a/rpms/RPM-DEPENDENCIES.md b/rpms/RPM-DEPENDENCIES.md index e9e444a6..5ac2bc38 100644 --- a/rpms/RPM-DEPENDENCIES.md +++ b/rpms/RPM-DEPENDENCIES.md @@ -1,8 +1,8 @@ # RPM Packages dependencies 1. name: readline-devel - source: https://rpmfind.net/linux/almalinux/8.10/BaseOS/x86_64/os/Packages/readline-devel-7.0-10.el8.x86_64.rpm + source: https://rpmfind.net/linux/almalinux/10.0/AppStream/x86_64/os/Packages/readline-devel-8.2-11.el10.x86_64.rpm 2. name: bison - source: https://rpmfind.net/linux/almalinux/8.10/AppStream//x86_64/os/Packages/bison-3.0.4-10.el8.x86_64.rpm + source: https://repo.almalinux.org/almalinux/10/AppStream/x86_64/os/Packages/bison-3.8.2-9.el10.x86_64.rpm diff --git a/rpms/bison-3.0.4-10.el8.x86_64.rpm b/rpms/bison-3.0.4-10.el8.x86_64.rpm deleted file mode 100644 index 7152525b..00000000 Binary files a/rpms/bison-3.0.4-10.el8.x86_64.rpm and /dev/null differ diff --git a/rpms/bison-3.8.2-9.el10.x86_64.rpm b/rpms/bison-3.8.2-9.el10.x86_64.rpm new file mode 100644 index 00000000..773ee925 Binary files /dev/null and b/rpms/bison-3.8.2-9.el10.x86_64.rpm differ diff --git a/rpms/readline-devel-7.0-10.el8.x86_64.rpm b/rpms/readline-devel-7.0-10.el8.x86_64.rpm deleted file mode 100644 index 92be6ac1..00000000 Binary files a/rpms/readline-devel-7.0-10.el8.x86_64.rpm and /dev/null differ diff --git a/rpms/readline-devel-8.2-11.el10.x86_64.rpm b/rpms/readline-devel-8.2-11.el10.x86_64.rpm new file mode 100644 index 00000000..d0eb7051 Binary files /dev/null and b/rpms/readline-devel-8.2-11.el10.x86_64.rpm differ diff --git a/spec/api/v1/ce_registry_spec.rb b/spec/api/v1/ce_registry_spec.rb index 9bfb5656..8352d12b 100644 --- a/spec/api/v1/ce_registry_spec.rb +++ b/spec/api/v1/ce_registry_spec.rb @@ -4,7 +4,12 @@ before { get '/ce-registry/ctid' } it { expect_status(:ok) } - it { expect(json_resp['ctid']).to match(/urn:ctid:.*/) } + + it { + # rubocop:todo Layout/LineLength + expect(json_resp['ctid']).to match(/^ce-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/) + # rubocop:enable Layout/LineLength + } end context 'Other communities' do # rubocop:todo RSpec/ContextWording diff --git a/spec/api/v1/containers_spec.rb b/spec/api/v1/containers_spec.rb new file mode 100644 index 00000000..bddb89bc --- /dev/null +++ b/spec/api/v1/containers_spec.rb @@ -0,0 +1,205 @@ +RSpec.describe API::V1::Containers do # rubocop:todo RSpec/MultipleMemoizedHelpers + let(:container_ctid) { container.envelope_ceterms_ctid } + let(:envelope_ctdl_type) { 'ceterms:Collection' } + let(:subresource) { Faker::Json.shallow_json } + let(:user) { create(:user) } + let(:repository) { instance_double(ContainerRepository) } + + let(:container) do + create( + :envelope, + :with_graph_collection, + envelope_community:, + envelope_ctdl_type: + ) + end + + before do + allow(ContainerRepository).to receive(:new).with(container).and_return(repository) + end + + context 'with default community' do # rubocop:todo RSpec/MultipleMemoizedHelpers + let(:envelope_community) do + create(:envelope_community, name: 'ce_registry', default: true) + end + + # rubocop:todo RSpec/MultipleMemoizedHelpers + describe 'PATCH /containers/:container_ctid/resources' do + # rubocop:todo RSpec/NestedGroups + context 'when not authenticated' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + it 'returns 401' do + patch "/containers/#{container_ctid}/resources", subresource + expect_status(:unauthorized) + end + end + + context 'when envelope is not a container type' do # rubocop:todo RSpec/NestedGroups + let(:envelope_ctdl_type) { 'ceterms:Credential' } + + it 'returns 404' do # rubocop:todo Layout/IndentationConsistency + patch "/containers/#{container_ctid}/resources", + subresource, + 'Authorization' => "Token #{user.auth_token.value}" + + expect_status(:not_found) + end + end + # rubocop:enable RSpec/MultipleMemoizedHelpers + + # rubocop:todo RSpec/MultipleMemoizedHelpers + context 'when authenticated and container exists' do # rubocop:todo RSpec/NestedGroups + let(:parsed_subresource) { JSON.parse(subresource) } + + before do + allow(repository).to receive(:add).with(parsed_subresource) + end + + it 'instantiates a repository and calls add with the parsed resource' do + patch "/containers/#{container_ctid}/resources", + subresource, + 'Authorization' => "Token #{user.auth_token.value}" + + expect(repository).to have_received(:add).with(parsed_subresource) + end + end + # rubocop:enable RSpec/MultipleMemoizedHelpers + end + + # rubocop:todo RSpec/MultipleMemoizedHelpers + describe 'DELETE /containers/:container_ctid/resources/:resource_ctid' do + let(:resource_ctid) { Envelope.generate_ctid } + + # rubocop:todo RSpec/NestedGroups + context 'when not authenticated' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + it 'returns 401' do + delete "/containers/#{container_ctid}/resources/#{resource_ctid}" + expect_status(:unauthorized) + end + end + + context 'when envelope is not a container type' do # rubocop:todo RSpec/NestedGroups + let(:envelope_ctdl_type) { 'ceterms:Credential' } + + it 'returns 404' do # rubocop:todo Layout/IndentationConsistency + delete "/containers/#{container_ctid}/resources/#{resource_ctid}", + {}, + 'Authorization' => "Token #{user.auth_token.value}" + + expect_status(:not_found) + end + end + # rubocop:enable RSpec/MultipleMemoizedHelpers + + # rubocop:todo RSpec/MultipleMemoizedHelpers + context 'when authenticated and container exists' do # rubocop:todo RSpec/NestedGroups + before do + allow(repository).to receive(:remove).with(resource_ctid) + end + + it 'instantiates a repository and calls remove with the resource CTID' do + delete "/containers/#{container_ctid}/resources/#{resource_ctid}", + {}, + 'Authorization' => "Token #{user.auth_token.value}" + + expect(repository).to have_received(:remove).with(resource_ctid) + end + end + # rubocop:enable RSpec/MultipleMemoizedHelpers + end + end + + context 'with explicit community' do # rubocop:todo RSpec/MultipleMemoizedHelpers + let(:envelope_community) do + create(:envelope_community, name: 'navy') + end + + # rubocop:todo RSpec/MultipleMemoizedHelpers + describe 'PATCH /:community/containers/:container_ctid/resources' do + # rubocop:todo RSpec/NestedGroups + context 'when not authenticated' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + it 'returns 401' do + patch "/navy/containers/#{container_ctid}/resources", subresource + + expect_status(:unauthorized) + end + end + + context 'when envelope is not a container type' do # rubocop:todo RSpec/NestedGroups + let(:envelope_ctdl_type) { 'ceterms:Credential' } + + it 'returns 404' do # rubocop:todo Layout/IndentationConsistency + patch "/navy/containers/#{container_ctid}/resources", + subresource, + 'Authorization' => "Token #{user.auth_token.value}" + + expect_status(:not_found) + end + end + # rubocop:enable RSpec/MultipleMemoizedHelpers + + # rubocop:todo RSpec/MultipleMemoizedHelpers + context 'when authenticated and container exists' do # rubocop:todo RSpec/NestedGroups + let(:parsed_subresource) { JSON.parse(subresource) } + + before do + allow(repository).to receive(:add).with(parsed_subresource) + end + + it 'instantiates a repository and calls add with the parsed resource' do + patch "/navy/containers/#{container_ctid}/resources", + subresource, + 'Authorization' => "Token #{user.auth_token.value}" + + expect(repository).to have_received(:add).with(parsed_subresource) + end + end + # rubocop:enable RSpec/MultipleMemoizedHelpers + end + + # rubocop:todo RSpec/MultipleMemoizedHelpers + describe 'DELETE /:community/containers/:container_ctid/resources/:resource_ctid' do + let(:resource_ctid) { Envelope.generate_ctid } + + # rubocop:todo RSpec/NestedGroups + context 'when not authenticated' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + it 'returns 401' do + delete "/navy/containers/#{container_ctid}/resources/#{resource_ctid}" + expect_status(:unauthorized) + end + end + + context 'when envelope is not a container type' do # rubocop:todo RSpec/NestedGroups + let(:envelope_ctdl_type) { 'ceterms:Credential' } + + it 'returns 404' do # rubocop:todo Layout/IndentationConsistency + delete "/navy/containers/#{container_ctid}/resources/#{resource_ctid}", + {}, + 'Authorization' => "Token #{user.auth_token.value}" + + expect_status(:not_found) + end + end + # rubocop:enable RSpec/MultipleMemoizedHelpers + + # rubocop:todo RSpec/MultipleMemoizedHelpers + context 'when authenticated and container exists' do # rubocop:todo RSpec/NestedGroups + before do + allow(repository).to receive(:remove).with(resource_ctid) + end + + it 'instantiates a repository and calls remove with the resource CTID' do + delete "/navy/containers/#{container_ctid}/resources/#{resource_ctid}", + {}, + 'Authorization' => "Token #{user.auth_token.value}" + + expect(repository).to have_received(:remove).with(resource_ctid) + end + end + # rubocop:enable RSpec/MultipleMemoizedHelpers + end + end +end diff --git a/spec/api/v1/envelope_events_spec.rb b/spec/api/v1/envelope_events_spec.rb index e8ca4dad..2e868357 100644 --- a/spec/api/v1/envelope_events_spec.rb +++ b/spec/api/v1/envelope_events_spec.rb @@ -20,6 +20,9 @@ # rubocop:todo RSpec/IndexedLet let!(:envelope2) { create(:envelope, :with_cer_credential, envelope_community: navy) } # rubocop:enable RSpec/IndexedLet + # rubocop:todo RSpec/IndexedLet + let!(:envelope3) { create(:envelope, :from_cer, :provisional, envelope_community: ce_registry) } + # rubocop:enable RSpec/IndexedLet before do travel_to updated_at do @@ -35,7 +38,7 @@ it 'returns all events' do # rubocop:todo RSpec/ExampleLength get "/#{ce_registry.name}/envelopes/events" expect_status(:ok) - expect_json_sizes(3) + expect_json_sizes(4) expect_json('0.envelope_ceterms_ctid', envelope1.envelope_ceterms_ctid) expect_json('0.event', 'destroy') @@ -45,9 +48,13 @@ expect_json('1.event', 'update') expect_json('1.created_at', updated_at.change(usec: 0).as_json) - expect_json('2.envelope_ceterms_ctid', envelope1.envelope_ceterms_ctid) + expect_json('2.envelope_ceterms_ctid', envelope3.envelope_ceterms_ctid) expect_json('2.event', 'create') - expect_json('2.created_at', envelope1.created_at.as_json) + expect_json('2.created_at', envelope3.created_at.as_json) + + expect_json('3.envelope_ceterms_ctid', envelope1.envelope_ceterms_ctid) + expect_json('3.event', 'create') + expect_json('3.created_at', envelope1.created_at.as_json) get "/#{navy.name}/envelopes/events" expect_status(:ok) @@ -117,11 +124,15 @@ it 'returns events of the given type' do # rubocop:todo RSpec/ExampleLength get "/#{ce_registry.name}/envelopes/events?event=create" expect_status(:ok) - expect_json_sizes(1) + expect_json_sizes(2) - expect_json('0.envelope_ceterms_ctid', envelope1.envelope_ceterms_ctid) + expect_json('0.envelope_ceterms_ctid', envelope3.envelope_ceterms_ctid) expect_json('0.event', 'create') - expect_json('0.created_at', envelope1.created_at.as_json) + expect_json('0.created_at', envelope3.created_at.as_json) + + expect_json('1.envelope_ceterms_ctid', envelope1.envelope_ceterms_ctid) + expect_json('1.event', 'create') + expect_json('1.created_at', envelope1.created_at.as_json) get "/#{ce_registry.name}/envelopes/events?event=destroy" expect_status(:ok) @@ -140,6 +151,34 @@ expect_json('0.created_at', updated_at.change(usec: 0).as_json) end end + + context 'with `provisional`' do + it 'returns events of the given publication status' do # rubocop:todo RSpec/ExampleLength + get "/#{ce_registry.name}/envelopes/events?provisional=exclude" + expect_status(:ok) + expect_json_sizes(3) + + expect_json('0.envelope_ceterms_ctid', envelope1.envelope_ceterms_ctid) + expect_json('0.event', 'destroy') + expect_json('0.created_at', destroyed_at.change(usec: 0).as_json) + + expect_json('1.envelope_ceterms_ctid', envelope1.envelope_ceterms_ctid) + expect_json('1.event', 'update') + expect_json('1.created_at', updated_at.change(usec: 0).as_json) + + expect_json('2.envelope_ceterms_ctid', envelope1.envelope_ceterms_ctid) + expect_json('2.event', 'create') + expect_json('2.created_at', envelope1.created_at.as_json) + + get "/#{ce_registry.name}/envelopes/events?provisional=only" + expect_status(:ok) + expect_json_sizes(1) + + expect_json('0.envelope_ceterms_ctid', envelope3.envelope_ceterms_ctid) + expect_json('0.event', 'create') + expect_json('0.created_at', envelope3.created_at.as_json) + end + end end end # rubocop:enable RSpec/MultipleMemoizedHelpers diff --git a/spec/api/v1/envelopes_spec.rb b/spec/api/v1/envelopes_spec.rb index 4f3752ef..132e67b6 100644 --- a/spec/api/v1/envelopes_spec.rb +++ b/spec/api/v1/envelopes_spec.rb @@ -145,28 +145,17 @@ end # rubocop:todo RSpec/MultipleMemoizedHelpers - context 'GET /:community/envelopes/downloads/:id' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers + context 'GET /:community/envelopes/download' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers let(:finished_at) { nil } - let(:internal_error_message) { nil } let(:started_at) { nil } - - let(:envelope_download) do - create( - :envelope_download, - envelope_community:, - finished_at:, - internal_error_message:, - started_at: - ) - end + let(:url) { nil } let(:perform_request) do - get "/envelopes/downloads/#{envelope_download.id}", - 'Authorization' => "Token #{auth_token}" + get '/envelopes/download', 'Authorization' => "Token #{auth_token}" end # rubocop:todo RSpec/MultipleMemoizedHelpers - context 'invalid token' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers + context 'with invalid token' do # rubocop:todo RSpec/MultipleMemoizedHelpers let(:auth_token) { 'invalid token' } before do @@ -179,72 +168,116 @@ end # rubocop:enable RSpec/MultipleMemoizedHelpers - # rubocop:todo RSpec/MultipleMemoizedHelpers - context 'all good' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers - before do - perform_request - expect_status(:ok) - end - - # rubocop:todo RSpec/MultipleMemoizedHelpers + context 'with valid token' do # rubocop:todo RSpec/MultipleMemoizedHelpers # rubocop:todo RSpec/NestedGroups - context 'in progress' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + context 'without envelope download' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups # rubocop:enable RSpec/NestedGroups - let(:started_at) { Time.current } + it 'creates new pending download' do + expect { perform_request }.to change(EnvelopeDownload, :count).by(1) + expect_status(:ok) - it 'returns `in progress`' do - expect_json('status', 'in progress') + envelope_download = EnvelopeDownload.last + expect(envelope_download.envelope_community).to eq(envelope_community) + expect(envelope_download.status).to eq('pending') + + expect_json_sizes(2) + expect_json('enqueued_at', nil) + expect_json('status', 'pending') end end - # rubocop:enable RSpec/MultipleMemoizedHelpers - # rubocop:todo RSpec/MultipleMemoizedHelpers # rubocop:todo RSpec/NestedGroups - context 'failed' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + context 'with envelope download' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups # rubocop:enable RSpec/NestedGroups - let(:finished_at) { Time.current } - let(:internal_error_message) { Faker::Lorem.sentence } + let!(:envelope_download) do + create( + :envelope_download, + envelope_community:, + finished_at:, + started_at:, + status:, + url: + ) + end - it 'returns `failed`' do - expect_json('status', 'failed') + # rubocop:todo RSpec/MultipleMemoizedHelpers + # rubocop:todo RSpec/NestedGroups + context 'in progress' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + let(:status) { :in_progress } + + it 'returns `in progress`' do + expect { perform_request }.not_to change(EnvelopeDownload, :count) + expect_status(:ok) + expect_json_sizes(2) + expect_json('started_at', envelope_download.started_at.as_json) + expect_json('status', 'in_progress') + end end - end - # rubocop:enable RSpec/MultipleMemoizedHelpers + # rubocop:enable RSpec/MultipleMemoizedHelpers - # rubocop:todo RSpec/MultipleMemoizedHelpers - # rubocop:todo RSpec/NestedGroups - context 'finished' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups - # rubocop:enable RSpec/NestedGroups - let(:finished_at) { Time.current } + # rubocop:todo RSpec/MultipleMemoizedHelpers + # rubocop:todo RSpec/NestedGroups + context 'failed' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + let(:finished_at) { Time.current } + let(:status) { :failed } + let(:url) { Faker::Internet.url } + + it 'returns `failed`' do + expect { perform_request }.not_to change(EnvelopeDownload, :count) + expect_status(:ok) + expect_json_sizes(3) + expect_json('finished_at', envelope_download.finished_at.as_json) + expect_json('status', 'failed') + expect_json('url', url) + end + end + # rubocop:enable RSpec/MultipleMemoizedHelpers - it 'returns `finished` and URL' do - expect_json('status', 'finished') + # rubocop:todo RSpec/MultipleMemoizedHelpers + # rubocop:todo RSpec/NestedGroups + context 'finished' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + let(:finished_at) { Time.current } + let(:status) { :finished } + let(:url) { Faker::Internet.url } + + it 'returns `finished` and URL' do + expect { perform_request }.not_to change(EnvelopeDownload, :count) + expect_status(:ok) + expect_json_sizes(3) + expect_json('finished_at', envelope_download.finished_at.as_json) + expect_json('status', 'finished') + expect_json('url', url) + end end - end - # rubocop:enable RSpec/MultipleMemoizedHelpers + # rubocop:enable RSpec/MultipleMemoizedHelpers - # rubocop:todo RSpec/MultipleMemoizedHelpers - # rubocop:todo RSpec/NestedGroups - context 'pending' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups - # rubocop:enable RSpec/NestedGroups - it 'returns `pending`' do - expect_json('status', 'pending') + # rubocop:todo RSpec/MultipleMemoizedHelpers + # rubocop:todo RSpec/NestedGroups + context 'pending' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + let(:status) { :pending } + + # rubocop:enable RSpec/NestedGroups + it 'returns `pending`' do + expect { perform_request }.not_to change(EnvelopeDownload, :count) + expect_status(:ok) + expect_json('status', 'pending') + end end + # rubocop:enable RSpec/MultipleMemoizedHelpers end - # rubocop:enable RSpec/MultipleMemoizedHelpers end - # rubocop:enable RSpec/MultipleMemoizedHelpers end # rubocop:enable RSpec/MultipleMemoizedHelpers - context 'POST /:community/envelopes/downloads' do # rubocop:todo RSpec/ContextWording + context 'POST /:community/envelopes/download' do # rubocop:todo RSpec/ContextWording let(:perform_request) do - post '/envelopes/downloads', - nil, - 'Authorization' => "Token #{auth_token}" + post '/envelopes/download', nil, 'Authorization' => "Token #{auth_token}" end - context 'invalid token' do # rubocop:todo RSpec/ContextWording + context 'with invalid token' do let(:auth_token) { 'invalid token' } before do @@ -256,26 +289,56 @@ end end - context 'all good' do # rubocop:todo RSpec/ContextWording - # rubocop:todo RSpec/MultipleExpectations - it 'starts download' do # rubocop:todo RSpec/ExampleLength, RSpec/MultipleExpectations - # rubocop:enable RSpec/MultipleExpectations - expect do - perform_request - end.to change(EnvelopeDownload, :count).by(1) + context 'with valid token' do + let(:now) { Time.current.change(usec: 0) } - envelope_download = EnvelopeDownload.last - expect(envelope_download.envelope_community.name).to eq('ce_registry') + context 'without envelope download' do # rubocop:todo RSpec/NestedGroups + # rubocop:todo RSpec/MultipleExpectations + it 'creates new pending download and enqueues job' do # rubocop:todo RSpec/ExampleLength + # rubocop:enable RSpec/MultipleExpectations + travel_to now do + expect { perform_request }.to change(EnvelopeDownload, :count).by(1) + end - expect_status(:created) - expect_json('id', envelope_download.id) + expect_status(:created) - expect(ActiveJob::Base.queue_adapter.enqueued_jobs.size).to eq(1) + envelope_download = EnvelopeDownload.last + expect(envelope_download.envelope_community).to eq(envelope_community) + expect(envelope_download.status).to eq('pending') - enqueued_job = ActiveJob::Base.queue_adapter.enqueued_jobs.first - expect(enqueued_job[:args]).to eq([envelope_download.id]) - expect(enqueued_job[:job]).to eq(DownloadEnvelopesJob) + expect_json_sizes(2) + expect_json('enqueued_at', now.as_json) + expect_json('status', 'pending') + + expect(ActiveJob::Base.queue_adapter.enqueued_jobs.size).to eq(1) + + job = ActiveJob::Base.queue_adapter.enqueued_jobs.first + expect(job.fetch('arguments')).to eq([envelope_download.id]) + expect(job.fetch('job_class')).to eq('DownloadEnvelopesJob') + end end + + # rubocop:todo RSpec/MultipleMemoizedHelpers + context 'with envelope download' do # rubocop:todo RSpec/NestedGroups + let!(:envelope_download) do + create(:envelope_download, :finished, envelope_community:) + end + + it 'enqueues job for existing download' do + travel_to now do + expect { perform_request }.to not_change(EnvelopeDownload, :count) + .and enqueue_job(DownloadEnvelopesJob).with(envelope_download.id) + end + + expect_status(:created) + expect(envelope_download.reload.status).to eq('pending') + + expect_json_sizes(2) + expect_json('enqueued_at', now.as_json) + expect_json('status', 'pending') + end + end + # rubocop:enable RSpec/MultipleMemoizedHelpers end end end diff --git a/spec/api/v1/graph_spec.rb b/spec/api/v1/graph_spec.rb index 52cbe3a6..a279df79 100644 --- a/spec/api/v1/graph_spec.rb +++ b/spec/api/v1/graph_spec.rb @@ -1,6 +1,10 @@ RSpec.describe API::V1::Graph do + let(:auth_token) { create(:user).auth_token.value } + # rubocop:todo RSpec/MultipleMemoizedHelpers context 'default community' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers + let(:envelope_community) { ec } + let!(:ec) { create(:envelope_community, name: 'ce_registry') } let!(:envelope) { create(:envelope, :from_cer, :with_cer_credential) } let(:resource) { envelope.processed_resource } @@ -145,7 +149,9 @@ expect_status(:ok) expect_json('@id': full_id) expect(json_body).to have_key(:@graph) - expect(json_body[:@graph].map { |o| o[:'ceterms:ctid'] }).to include(competency_id) + expect(json_body[:@graph].map do |o| + o[:'ceterms:ctid'] + end).to include(competency_id) end end # rubocop:enable RSpec/MultipleMemoizedHelpers @@ -160,7 +166,9 @@ expect_status(:ok) expect_json('@id': full_id) expect(json_body).to have_key(:@graph) - expect(json_body[:@graph].map { |o| o[:'ceterms:ctid'] }).to include(competency_id) + expect(json_body[:@graph].map do |o| + o[:'ceterms:ctid'] + end).to include(competency_id) end end # rubocop:enable RSpec/MultipleMemoizedHelpers @@ -251,8 +259,217 @@ end end # rubocop:enable RSpec/MultipleMemoizedHelpers + + context 'GET /:community/graph/download' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers + let(:finished_at) { nil } + let(:internal_error_message) { nil } + let(:started_at) { nil } + let(:url) { nil } + + let(:perform_request) do + get '/graph/download', 'Authorization' => "Token #{auth_token}" + end + + # rubocop:todo RSpec/MultipleMemoizedHelpers + # rubocop:todo RSpec/NestedGroups + context 'with invalid token' do # rubocop:todo RSpec/MultipleMemoizedHelpers + # rubocop:enable RSpec/NestedGroups + let(:auth_token) { 'invalid token' } + + before do + perform_request + end + + it 'returns 401' do + expect_status(:unauthorized) + end + end + # rubocop:enable RSpec/MultipleMemoizedHelpers + + # rubocop:todo RSpec/NestedGroups + context 'with valid token' do # rubocop:todo RSpec/MultipleMemoizedHelpers + # rubocop:enable RSpec/NestedGroups + # rubocop:todo RSpec/NestedGroups + context 'without envelope download' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + it 'creates new pending download' do + expect { perform_request }.to change(EnvelopeDownload, :count).by(1) + expect_status(:ok) + + envelope_download = EnvelopeDownload.last + expect(envelope_download.envelope_community).to eq(envelope_community) + expect(envelope_download.status).to eq('pending') + + expect_json_sizes(2) + expect_json('enqueued_at', nil) + expect_json('status', 'pending') + end + end + + # rubocop:todo RSpec/NestedGroups + context 'with envelope download' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + let!(:envelope_download) do + create( + :envelope_download, + envelope_community:, + finished_at:, + internal_error_message:, + started_at:, + status:, + type: :graph, + url: + ) + end + + # rubocop:todo RSpec/MultipleMemoizedHelpers + # rubocop:todo RSpec/NestedGroups + context 'in progress' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + let(:status) { :in_progress } + + it 'returns `in progress`' do + expect { perform_request }.not_to change(EnvelopeDownload, :count) + expect_status(:ok) + expect_json_sizes(2) + expect_json('started_at', envelope_download.started_at.as_json) + expect_json('status', 'in_progress') + end + end + # rubocop:enable RSpec/MultipleMemoizedHelpers + + # rubocop:todo RSpec/MultipleMemoizedHelpers + # rubocop:todo RSpec/NestedGroups + context 'failed' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + let(:finished_at) { Time.current } + let(:status) { :failed } + let(:url) { Faker::Internet.url } + + it 'returns `failed`' do + expect { perform_request }.not_to change(EnvelopeDownload, :count) + expect_status(:ok) + expect_json_sizes(3) + expect_json('finished_at', envelope_download.finished_at.as_json) + expect_json('status', 'failed') + expect_json('url', url) + end + end + # rubocop:enable RSpec/MultipleMemoizedHelpers + + # rubocop:todo RSpec/MultipleMemoizedHelpers + # rubocop:todo RSpec/NestedGroups + context 'finished' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + let(:finished_at) { Time.current } + let(:status) { :finished } + let(:url) { Faker::Internet.url } + + it 'returns `finished` and URL' do + expect { perform_request }.not_to change(EnvelopeDownload, :count) + expect_status(:ok) + expect_json_sizes(3) + expect_json('finished_at', envelope_download.finished_at.as_json) + expect_json('status', 'finished') + expect_json('url', url) + end + end + # rubocop:enable RSpec/MultipleMemoizedHelpers + + # rubocop:todo RSpec/MultipleMemoizedHelpers + # rubocop:todo RSpec/NestedGroups + context 'pending' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + let(:status) { :pending } + + # rubocop:enable RSpec/NestedGroups + it 'returns `pending`' do + expect { perform_request }.not_to change(EnvelopeDownload, :count) + expect_status(:ok) + expect_json('status', 'pending') + end + end + # rubocop:enable RSpec/MultipleMemoizedHelpers + end + end + end + # rubocop:enable RSpec/MultipleMemoizedHelpers + + # rubocop:todo RSpec/MultipleMemoizedHelpers + context 'POST /:community/graph/download' do # rubocop:todo RSpec/ContextWording + let(:perform_request) do + post '/graph/download', nil, 'Authorization' => "Token #{auth_token}" + end + + # rubocop:todo RSpec/NestedGroups + context 'with invalid token' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + let(:auth_token) { 'invalid token' } + + before do + perform_request + end + + it 'returns 401' do + expect_status(:unauthorized) + end + end + + # rubocop:todo RSpec/NestedGroups + context 'with valid token' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + let(:now) { Time.current.change(usec: 0) } + + context 'without envelope download' do # rubocop:todo RSpec/NestedGroups + # rubocop:todo RSpec/MultipleExpectations + it 'creates new pending download and enqueues job' do # rubocop:todo RSpec/ExampleLength + # rubocop:enable RSpec/MultipleExpectations + travel_to now do + expect { perform_request }.to change(EnvelopeDownload, :count).by(1) + end + + expect_status(:created) + + envelope_download = EnvelopeDownload.last + expect(envelope_download.envelope_community).to eq(envelope_community) + expect(envelope_download.status).to eq('pending') + + expect_json_sizes(2) + expect_json('enqueued_at', now.as_json) + expect_json('status', 'pending') + + expect(ActiveJob::Base.queue_adapter.enqueued_jobs.size).to eq(1) + + job = ActiveJob::Base.queue_adapter.enqueued_jobs.first + expect(job.fetch('arguments')).to eq([envelope_download.id]) + expect(job.fetch('job_class')).to eq('DownloadEnvelopesJob') + end + end + # rubocop:enable RSpec/MultipleMemoizedHelpers + + # rubocop:todo RSpec/MultipleMemoizedHelpers + context 'with envelope download' do # rubocop:todo RSpec/NestedGroups + let!(:envelope_download) do + create(:envelope_download, :finished, envelope_community:, type: :graph) + end + + it 'enqueues job for existing download' do + travel_to now do + expect { perform_request }.to not_change(EnvelopeDownload, :count) + .and enqueue_job(DownloadEnvelopesJob).with(envelope_download.id) + end + + expect_status(:created) + expect(envelope_download.reload.status).to eq('pending') + + expect_json_sizes(2) + expect_json('enqueued_at', now.as_json) + expect_json('status', 'pending') + end + end + # rubocop:enable RSpec/MultipleMemoizedHelpers + end + end end - # rubocop:enable RSpec/MultipleMemoizedHelpers context 'with community' do let!(:name) { ec.name } @@ -266,6 +483,7 @@ ) end + # rubocop:todo RSpec/MultipleMemoizedHelpers context 'GET /:community_name/graph/:id' do # rubocop:todo RSpec/ContextWording let!(:id) { '123-123-123' } let!(:processed_resource) { attributes_for(:cer_org).merge('@id': id) } @@ -388,7 +606,9 @@ end # rubocop:enable RSpec/MultipleMemoizedHelpers end + # rubocop:enable RSpec/MultipleMemoizedHelpers + # rubocop:todo RSpec/MultipleMemoizedHelpers context 'POST /:community_name/graph/search' do # rubocop:todo RSpec/ContextWording let!(:envelope1) do # rubocop:todo RSpec/IndexedLet create(:envelope, :with_cer_credential, envelope_community: ec) @@ -498,5 +718,6 @@ end # rubocop:enable RSpec/MultipleMemoizedHelpers end + # rubocop:enable RSpec/MultipleMemoizedHelpers end end diff --git a/spec/api/v1/resources_spec.rb b/spec/api/v1/resources_spec.rb index 49886df0..fc1307f5 100644 --- a/spec/api/v1/resources_spec.rb +++ b/spec/api/v1/resources_spec.rb @@ -368,6 +368,7 @@ let(:ctid1) { Faker::Lorem.characters(number: 32) } # rubocop:todo RSpec/IndexedLet let(:ctid2) { Faker::Lorem.characters(number: 32) } # rubocop:todo RSpec/IndexedLet let(:ctid3) { Faker::Lorem.characters(number: 32) } # rubocop:todo RSpec/IndexedLet + let(:ctid4) { Faker::Lorem.characters(number: 32) } # rubocop:todo RSpec/IndexedLet before do resource1 = attributes_for(:cer_competency_framework, ctid: ctid1) @@ -382,6 +383,10 @@ .except(:id) .stringify_keys + resource4 = attributes_for(:cer_org, ctid: ctid4) + .except(:id) + .stringify_keys + create( :envelope, :from_cer, @@ -402,12 +407,41 @@ processed_resource: attributes_for(:cer_graph_competency_framework) .merge(:@graph => [resource3]) ) + + create( + :envelope, + :from_cer, + processed_resource: attributes_for(:cer_org) + .merge(:@graph => [resource4]) + ) end - it 'returns existing CTIDs' do - post '/resources/check_existence', { ctids: [ctid1, ctid2, ctid3] } + # rubocop:todo RSpec/MultipleExpectations + it 'returns existing CTIDs' do # rubocop:todo RSpec/ExampleLength + # rubocop:enable RSpec/MultipleExpectations + post '/resources/check_existence', { ctids: [ctid1, ctid2, ctid3, ctid4] } + expect_status(:ok) + expect(JSON(response.body)).to contain_exactly(ctid1, ctid4) + + post '/resources/check_existence', + { ctids: [ctid1, ctid2, ctid3, ctid4], '@type': 'foobar' } + expect_status(:unprocessable_entity) + expect_json(errors: ['@type does not have a valid value']) + + post '/resources/check_existence', + { ctids: [ctid1, ctid2, ctid3, ctid4], '@type': 'ceasn:CompetencyFramework' } expect_status(:ok) expect(JSON(response.body)).to contain_exactly(ctid1) + + post '/resources/check_existence', + { ctids: [ctid1, ctid2, ctid3, ctid4], '@type': 'ceterms:CredentialOrganization' } + expect_status(:ok) + expect(JSON(response.body)).to contain_exactly(ctid4) + + post '/resources/check_existence', + { ctids: [ctid1, ctid2, ctid3, ctid4], '@type': 'ceterms:Organization' } + expect_status(:ok) + expect(JSON(response.body)).to contain_exactly(ctid4) end end # rubocop:enable RSpec/MultipleMemoizedHelpers diff --git a/spec/factories/envelope_downloads.rb b/spec/factories/envelope_downloads.rb index dc788927..caf0a418 100644 --- a/spec/factories/envelope_downloads.rb +++ b/spec/factories/envelope_downloads.rb @@ -1,7 +1,26 @@ FactoryBot.define do factory :envelope_download do + enqueued_at { Time.current.change(usec: 0) } # rubocop:todo FactoryBot/FactoryAssociationWithStrategy envelope_community { create(:envelope_community, :with_random_name) } # rubocop:enable FactoryBot/FactoryAssociationWithStrategy + + trait :failed do + finished_at { Time.current.change(usec: 0) } + internal_error_message { Faker::Lorem.sentence } + started_at { Time.current.change(usec: 0) } + status { :finished } + end + + trait :finished do + finished_at { Time.current.change(usec: 0) } + started_at { Time.current.change(usec: 0) } + status { :finished } + end + + trait :in_progress do + started_at { Time.current.change(usec: 0) } + status { :in_progress } + end end end diff --git a/spec/factories/envelopes.rb b/spec/factories/envelopes.rb index 9c5ca056..d6bff98f 100644 --- a/spec/factories/envelopes.rb +++ b/spec/factories/envelopes.rb @@ -1,6 +1,6 @@ FactoryBot.define do factory :envelope do - envelope_ceterms_ctid { Envelope.generate_ctid } + envelope_ceterms_ctid { processed_resource[:'ceterms:ctid'] || Envelope.generate_ctid } envelope_ctdl_type { 'ceterms:CredentialOrganization' } envelope_type { :resource_data } envelope_version { '0.52.0' } @@ -81,6 +81,10 @@ processed_resource { attributes_for(:cer_graph_competency_framework, provisional:) } end + trait :with_graph_collection do + processed_resource { attributes_for(:cer_graph_collection, provisional:) } + end + trait :provisional do provisional { true } end diff --git a/spec/factories/resources.rb b/spec/factories/resources.rb index fcf6b309..26e3f1fd 100644 --- a/spec/factories/resources.rb +++ b/spec/factories/resources.rb @@ -1,12 +1,15 @@ FactoryBot.define do factory :base_resource, class: 'Hashie::Mash' do transient do + ctid { Envelope.generate_ctid } provisional { false } end add_attribute(:'adms:status') do 'graphPublicationStatus:Provisional' if provisional end + + add_attribute(:'ceterms:ctid') { ctid } end factory :resource, parent: :base_resource do @@ -19,11 +22,9 @@ factory :cer_org, parent: :base_resource do add_attribute(:@type) { 'ceterms:CredentialOrganization' } add_attribute(:@context) { 'http://credreg.net/ctdl/schema/context/json' } - transient { ctid { Envelope.generate_ctid } } add_attribute(:@id) do "http://credentialengineregistry.org/resources/#{ctid}" end - add_attribute(:'ceterms:ctid') { ctid } add_attribute(:'ceterms:name') { 'Test Org' } add_attribute(:'ceterms:description') { 'Org Description' } add_attribute(:'ceterms:subjectWebpage') { 'http://example.com/test-org' } @@ -51,8 +52,6 @@ end add_attribute(:@type) { 'ceterms:Certificate' } add_attribute(:@context) { 'http://credreg.net/ctdl/schema/context/json' } - transient { ctid { Envelope.generate_ctid } } - add_attribute(:'ceterms:ctid') { ctid } add_attribute(:'ceterms:name') { 'Test Cred' } add_attribute(:'ceterms:description') { 'Test Cred Description' } add_attribute(:'ceterms:subjectWebpage') { 'http://example.com/test-cred' } @@ -69,22 +68,18 @@ factory :cer_ass_prof, parent: :base_resource do add_attribute(:@type) { 'ceterms:AssessmentProfile' } add_attribute(:@context) { 'http://credreg.net/ctdl/schema/context/json' } - transient { ctid { Envelope.generate_ctid } } add_attribute(:@id) do "http://credentialengineregistry.org/resources/#{ctid}" end - add_attribute(:'ceterms:ctid') { ctid } add_attribute(:'ceterms:name') { 'Test Assessment Profile' } end factory :cer_cond_man, parent: :base_resource do add_attribute(:@type) { 'ceterms:ConditionManifest' } add_attribute(:@context) { 'http://credreg.net/ctdl/schema/context/json' } - transient { ctid { Envelope.generate_ctid } } add_attribute(:@id) do "http://credentialengineregistry.org/resources/#{ctid}" end - add_attribute(:'ceterms:ctid') { ctid } add_attribute(:'ceterms:name') { 'Test Cond Man' } add_attribute(:'ceterms:conditionManifestOf') { [{ '@id' => 'AgentID' }] } end @@ -92,11 +87,9 @@ factory :cer_cost_man, parent: :base_resource do add_attribute(:@type) { 'ceterms:CostManifest' } add_attribute(:@context) { 'http://credreg.net/ctdl/schema/context/json' } - transient { ctid { Envelope.generate_ctid } } add_attribute(:@id) do "http://credentialengineregistry.org/resources/#{ctid}" end - add_attribute(:'ceterms:ctid') { ctid } add_attribute(:'ceterms:name') { 'Test Cost Man' } add_attribute(:'ceterms:costDetails') { 'CostDetails' } add_attribute(:'ceterms:costManifestOf') { [{ '@id' => 'AgentID' }] } @@ -105,11 +98,9 @@ factory :cer_lrn_opp_prof, parent: :base_resource do add_attribute(:@type) { 'ceterms:CostManifest' } add_attribute(:@context) { 'http://credreg.net/ctdl/schema/context/json' } - transient { ctid { Envelope.generate_ctid } } add_attribute(:@id) do "http://credentialengineregistry.org/resources/#{ctid}" end - add_attribute(:'ceterms:ctid') { ctid } add_attribute(:'ceterms:name') { 'Test Lrn Opp Prof' } add_attribute(:'ceterms:costDetails') { 'CostDetails' } add_attribute(:'ceterms:costManifestOf') { [{ '@id' => 'AgentID' }] } @@ -141,7 +132,6 @@ add_attribute(:@id) { ctid } add_attribute(:@type) { 'ceterms:AssessmentProfile' } add_attribute(:@context) { 'http://credreg.net/ctdl/schema/context/json' } - add_attribute(:'ceterms:ctid') { ctid } add_attribute(:'ceterms:name') { 'Test Assessment Profile' } add_attribute(:'ceasn:isPartOf') { part_of } end @@ -149,29 +139,24 @@ factory :cer_competency, parent: :base_resource do transient { part_of { nil } } transient { competency_text { 'This is the competency text...' } } - transient { ctid { Envelope.generate_ctid } } id { "http://credentialengineregistry.org/resources/#{ctid}" } add_attribute(:@id) { id } add_attribute(:@type) { 'ceasn:Competency' } - add_attribute(:'ceterms:ctid') { ctid } add_attribute(:'ceasn:isPartOf') { part_of } add_attribute(:'ceasn:inLanguage') { ['en'] } add_attribute(:'ceasn:competencyText') { { 'en-us' => competency_text } } end factory :cer_competency_framework, parent: :base_resource do - transient { ctid { Envelope.generate_ctid } } id { "http://credentialengineregistry.org/resources/#{ctid}" } add_attribute(:@id) { id } add_attribute(:@type) { 'ceasn:CompetencyFramework' } - add_attribute(:'ceterms:ctid') { ctid } add_attribute(:'ceasn:inLanguage') { ['en'] } add_attribute(:'ceasn:name') { { 'en-us' => 'Competency Framework name' } } add_attribute(:'ceasn:description') { { 'en-us' => 'Competency Framework description' } } end factory :cer_graph_competency_framework, parent: :base_resource do - transient { ctid { Envelope.generate_ctid } } id { "http://credentialengineregistry.org/resources/#{ctid}" } add_attribute(:@id) { id } add_attribute(:@type) { 'ceasn:CompetencyFramework' } @@ -186,6 +171,44 @@ attributes_for(:cer_competency_framework, ctid: ctid) ] end + end + + factory :cer_graph_collection, parent: :base_resource do + transient { ctid { Envelope.generate_ctid } } + id { "http://credentialengineregistry.org/resources/#{ctid}" } + add_attribute(:@id) { id } + add_attribute(:@context) { 'http://credreg.net/ctdlasn/schema/context/json' } + add_attribute(:@graph) do + [ + attributes_for(:cer_collection, part_of: id), + attributes_for(:cer_collection_member, part_of: id) + ] + end + add_attribute(:'ceterms:ctid') { ctid } + end + + factory :cer_collection, parent: :base_resource do + transient do + ctid { Envelope.generate_ctid } + member_ids { [] } + end + id { "http://credentialengineregistry.org/resources/#{ctid}" } + add_attribute(:@id) { id } + add_attribute(:@type) { 'ceterms:Collection' } + add_attribute(:@context) { 'http://credreg.net/ctdlasn/schema/context/json' } + add_attribute(:'ceterms:ctid') { ctid } + add_attribute(:'ceterms:hasMember') { member_ids } + end + + factory :cer_collection_member, parent: :base_resource do + transient do + ctid { Envelope.generate_ctid } + member_ids { [] } + end + id { "http://credentialengineregistry.org/resources/#{ctid}" } + add_attribute(:@id) { id } + add_attribute(:@type) { 'ceterms:CollectionMember' } + add_attribute(:@context) { 'http://credreg.net/ctdlasn/schema/context/json' } add_attribute(:'ceterms:ctid') { ctid } end end diff --git a/spec/jobs/download_envelopes_job_spec.rb b/spec/jobs/download_envelopes_job_spec.rb index d2a20f8f..d5c2250c 100644 --- a/spec/jobs/download_envelopes_job_spec.rb +++ b/spec/jobs/download_envelopes_job_spec.rb @@ -1,198 +1,29 @@ require 'spec_helper' -RSpec.describe DownloadEnvelopesJob do # rubocop:todo RSpec/MultipleMemoizedHelpers - let(:bucket) { double('bucket') } # rubocop:todo RSpec/VerifiedDoubles - let(:bucket_name) { 'envelope-downloads-bucket-test' } - let(:envelope_download) { create(:envelope_download, envelope_community:) } - let(:hex) { Faker::Lorem.characters } - let(:key) { "ce_registry_#{now.to_i}_#{hex}.zip" } - let(:now) { Time.current.change(usec: 0) } - let(:object) { double('object') } # rubocop:todo RSpec/VerifiedDoubles - let(:region) { 'aws-region-test' } - let(:resource) { double('resource') } # rubocop:todo RSpec/VerifiedDoubles - let(:url) { Faker::Internet.url } - - let(:envelope_community) do - EnvelopeCommunity.find_or_create_by!(name: 'ce_registry') - end - - let(:perform) do - travel_to now do - described_class.new.perform(envelope_download.id) - end - end - - # rubocop:todo RSpec/MultipleMemoizedHelpers - context 'no download' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers - it 'does nothing' do - expect(described_class.new.perform(Faker::Lorem.word)).to be_nil - end - end - # rubocop:enable RSpec/MultipleMemoizedHelpers - - context 'with download' do # rubocop:todo RSpec/MultipleMemoizedHelpers - let!(:envelope1) do # rubocop:todo RSpec/IndexedLet - create(:envelope, :from_cer) - end - - let!(:envelope2) do # rubocop:todo RSpec/IndexedLet - create(:envelope, :from_cer, :with_cer_credential) - end - - before do - allow(ENV).to receive(:fetch).with('AWS_REGION').and_return(region) - - allow(ENV).to receive(:fetch) - .with('ENVELOPE_DOWNLOADS_BUCKET') - .and_return(bucket_name) - - allow(Aws::S3::Resource).to receive(:new) - .with(region:) - .and_return(resource) - - allow(SecureRandom).to receive(:hex).and_return(hex) - - allow(resource).to receive(:bucket).with(bucket_name).and_return(bucket) - allow(bucket).to receive(:object).with(key).and_return(object) - end - - # rubocop:todo RSpec/MultipleMemoizedHelpers - context 'no error' do # rubocop:todo RSpec/ContextWording, RSpec/MultipleMemoizedHelpers - before do - # rubocop:todo RSpec/MessageSpies - expect(object).to receive(:upload_file) do |path| # rubocop:todo RSpec/ExpectInHook, RSpec/MessageSpies - # rubocop:enable RSpec/MessageSpies - entries = {} - - Zip::InputStream.open(path) do |stream| - loop do - entry = stream.get_next_entry - break unless entry - - entries[entry.name] = JSON(stream.read) - end - end - - entry1 = entries.fetch("#{envelope1.envelope_ceterms_ctid}.json") - entry2 = entries.fetch("#{envelope2.envelope_ceterms_ctid}.json") - - expect(entry1.fetch('envelope_ceterms_ctid')).to eq( # rubocop:todo RSpec/ExpectInHook - envelope1.envelope_ceterms_ctid - ) - expect(entry1.fetch('decoded_resource')).to eq( # rubocop:todo RSpec/ExpectInHook - envelope1.processed_resource - ) - # rubocop:todo RSpec/ExpectInHook - expect(entry1.fetch('updated_at').to_time).to be_within(1.second).of( - # rubocop:enable RSpec/ExpectInHook - envelope1.updated_at - ) - - expect(entry2.fetch('envelope_ceterms_ctid')).to eq( # rubocop:todo RSpec/ExpectInHook - envelope2.envelope_ceterms_ctid - ) - expect(entry2.fetch('decoded_resource')).to eq( # rubocop:todo RSpec/ExpectInHook - envelope2.processed_resource - ) - # rubocop:todo RSpec/ExpectInHook - expect(entry2.fetch('updated_at').to_time).to be_within(1.second).of( - # rubocop:enable RSpec/ExpectInHook - envelope2.updated_at - ) - end - - # rubocop:todo RSpec/StubbedMock - # rubocop:todo RSpec/MessageSpies - expect(object).to receive(:public_url).and_return(url) # rubocop:todo RSpec/ExpectInHook, RSpec/MessageSpies, RSpec/StubbedMock - # rubocop:enable RSpec/MessageSpies - # rubocop:enable RSpec/StubbedMock - end - - it 'creates and uploads ZIP archive to S3' do - expect do - perform - envelope_download.reload - end.to change(envelope_download, :finished_at).to(now) - .and change(envelope_download, :url).to(url) - # rubocop:todo Layout/LineLength - .and not_change { - # rubocop:enable Layout/LineLength - # rubocop:todo Layout/LineLength - envelope_download.internal_error_message - # rubocop:enable Layout/LineLength - } +RSpec.describe DownloadEnvelopesJob do + let(:envelope_download) { create(:envelope_download) } + + describe '#perform' do + context 'without error' do + it 'calls DownloadEnvelopes' do + allow(DownloadEnvelopes).to receive(:call).with(envelope_download:) + described_class.new.perform(envelope_download.id) end end - # rubocop:enable RSpec/MultipleMemoizedHelpers - context 'with error' do # rubocop:todo RSpec/MultipleMemoizedHelpers - let(:error) { StandardError.new(error_message) } - let(:error_message) { Faker::Lorem.sentence } + context 'with error' do + let(:error) { StandardError.new } - before do - # rubocop:todo RSpec/MessageSpies - expect(Airbrake).to receive(:notify).with(error, # rubocop:todo RSpec/ExpectInHook, RSpec/MessageSpies - # rubocop:enable RSpec/MessageSpies - envelope_download_id: envelope_download.id) - end - - # rubocop:todo RSpec/NestedGroups - context 'when EnvelopeDownload.find_by fails' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups - # rubocop:enable RSpec/NestedGroups - before do - # rubocop:todo RSpec/StubbedMock - # rubocop:todo RSpec/MessageSpies - # rubocop:todo Layout/LineLength - expect(EnvelopeDownload).to receive(:find_by).with(id: envelope_download.id).and_raise(error) # rubocop:todo RSpec/ExpectInHook, RSpec/MessageSpies, RSpec/StubbedMock - # rubocop:enable Layout/LineLength - # rubocop:enable RSpec/MessageSpies - # rubocop:enable RSpec/StubbedMock - end - - it 'notifies Airbrake' do # rubocop:todo RSpec/ExampleLength - expect do - perform - envelope_download.reload - end.to not_change(envelope_download, - :finished_at).and not_change(envelope_download, - :internal_error_backtrace) - .and not_change(envelope_download, - :internal_error_message) - .and not_change { - envelope_download.url - } - end - end + it 'logs error' do + allow(Airbrake).to receive(:notify) + .with(error, envelope_download_id: envelope_download.id) - # rubocop:todo RSpec/MultipleMemoizedHelpers - context 'when Aws::S3::Object#upload_file fails' do # rubocop:todo RSpec/NestedGroups - before do - # rubocop:todo RSpec/StubbedMock - # rubocop:todo RSpec/MessageSpies - expect(object).to receive(:upload_file).and_raise(error) # rubocop:todo RSpec/ExpectInHook, RSpec/MessageSpies, RSpec/StubbedMock - # rubocop:enable RSpec/MessageSpies - # rubocop:enable RSpec/StubbedMock - end + allow(DownloadEnvelopes).to receive(:call) + .with(envelope_download:) + .and_raise(error) - it 'notifies Airbrake and persists error' do - expect do - perform - envelope_download.reload - end.to change(envelope_download, :finished_at).to(now) - .and change(envelope_download, - # rubocop:todo Layout/LineLength - :internal_error_message).to(error_message) - # rubocop:enable Layout/LineLength - # rubocop:todo Layout/LineLength - .and not_change { - # rubocop:enable Layout/LineLength - # rubocop:todo Layout/LineLength - envelope_download.url - # rubocop:enable Layout/LineLength - } - end + described_class.new.perform(envelope_download.id) end - # rubocop:enable RSpec/MultipleMemoizedHelpers end end end diff --git a/spec/models/envelope_spec.rb b/spec/models/envelope_spec.rb index ec903bac..aa88b8bb 100644 --- a/spec/models/envelope_spec.rb +++ b/spec/models/envelope_spec.rb @@ -337,7 +337,9 @@ def resource(ctid) end it 'generates ctids' do - expect(described_class.generate_ctid).to match(/urn:ctid:.*/) + # rubocop:todo Layout/LineLength + expect(described_class.generate_ctid).to match(/^ce-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/) + # rubocop:enable Layout/LineLength end it 'validates uniqueness for ctid' do # rubocop:todo RSpec/MultipleExpectations diff --git a/spec/services/container_repository_spec.rb b/spec/services/container_repository_spec.rb new file mode 100644 index 00000000..63c4c276 --- /dev/null +++ b/spec/services/container_repository_spec.rb @@ -0,0 +1,109 @@ +RSpec.describe ContainerRepository do # rubocop:todo RSpec/MultipleMemoizedHelpers + subject(:container_repository) { described_class.new(envelope) } + + let(:graph) { envelope.reload.processed_resource.fetch('@graph') } + let(:existing_subresource) { attributes_for(:cer_collection_member).stringify_keys } + let(:initial_graph) { [initial_container, existing_subresource] } + let(:new_subresource) { attributes_for(:cer_collection_member).stringify_keys } + let(:today) { Date.current + 1.week } + + let(:envelope) do + create( + :envelope, + :from_cer, + processed_resource: { '@graph' => initial_graph } + ) + end + + let(:initial_container) do + attributes_for( + :cer_collection, + member_ids: [existing_subresource['@id']] + ).stringify_keys + end + + describe '#add' do # rubocop:todo RSpec/MultipleMemoizedHelpers + context 'when the item is new' do # rubocop:todo RSpec/MultipleMemoizedHelpers + let(:updated_container) do + initial_container.merge('ceterms:hasMember' => [ + existing_subresource['@id'], + new_subresource['@id'] + ]) + end + + it 'adds the member to the container' do # rubocop:todo RSpec/ExampleLength + expect do + travel_to(today) do + expect(container_repository.add(new_subresource)).to be(true) + end + + envelope.reload + end.to change { + envelope.processed_resource['@graph'] + }.from(initial_graph).to([updated_container, existing_subresource, new_subresource]) + .and change(envelope, :last_verified_on).to(today) + # rubocop:todo Layout/LineLength + .and enqueue_job(ExtractEnvelopeResourcesJob) + # rubocop:enable Layout/LineLength + .with(envelope.id) + end + end + + context 'when the item already exists' do # rubocop:todo RSpec/MultipleMemoizedHelpers + it 'does nothing' do + expect do + travel_to(today) do + expect(container_repository.add(existing_subresource)).to be(false) + end + + envelope.reload + end.to not_change { + envelope.processed_resource['@graph'] + } + .and not_change { envelope.last_verified_on } + .and not_enqueue_job(ExtractEnvelopeResourcesJob) + end + end + end + + describe '#remove' do # rubocop:todo RSpec/MultipleMemoizedHelpers + context 'when the item exists' do # rubocop:todo RSpec/MultipleMemoizedHelpers + let(:updated_container) do + initial_container.merge('ceterms:hasMember' => []) + end + + it 'removes the member from the container' do # rubocop:todo RSpec/ExampleLength + expect do + travel_to(today) do + expect(container_repository.remove(existing_subresource['ceterms:ctid'])).to be(true) + end + + envelope.reload + end.to change { + envelope.processed_resource['@graph'] + }.from(initial_graph).to([updated_container]) + .and change(envelope, :last_verified_on).to(today) + # rubocop:todo Layout/LineLength + .and enqueue_job(ExtractEnvelopeResourcesJob) + # rubocop:enable Layout/LineLength + .with(envelope.id) + end + end + + context 'when the item does not exist' do # rubocop:todo RSpec/MultipleMemoizedHelpers + it 'does nothing' do + expect do + travel_to(today) do + expect(container_repository.remove('non-existent-id')).to be(false) + end + + envelope.reload + end.to not_change { + envelope.processed_resource['@graph'] + } + .and not_change { envelope.last_verified_on } + .and not_enqueue_job(ExtractEnvelopeResourcesJob) + end + end + end +end diff --git a/spec/services/download_envelopes_spec.rb b/spec/services/download_envelopes_spec.rb new file mode 100644 index 00000000..77b7a9ec --- /dev/null +++ b/spec/services/download_envelopes_spec.rb @@ -0,0 +1,126 @@ +RSpec.describe DownloadEnvelopes do # rubocop:todo RSpec/MultipleMemoizedHelpers + let(:builder) { double('builder') } # rubocop:todo RSpec/VerifiedDoubles + let(:envelope_download) { create(:envelope_download, type:) } + let(:error) { StandardError.new(error_message) } + let(:error_message) { Faker::Lorem.sentence } + let(:now) { Date.current } + let(:url) { Faker::Internet.url } + + let(:download_envelopes) do + travel_to now do + described_class.call(envelope_download:) + end + end + + before do + allow(builder_class).to receive(:new) + .with(envelope_download, envelope_download.started_at) + .and_return(builder) + end + + describe '.call' do # rubocop:todo RSpec/MultipleMemoizedHelpers + context 'with envelope builder' do # rubocop:todo RSpec/MultipleMemoizedHelpers + let(:builder_class) { EnvelopeDumps::EnvelopeBuilder } + let(:type) { :envelope } + + # rubocop:todo RSpec/NestedGroups + context 'with error' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + before do + allow(builder).to receive(:run).and_raise(error) + end + + it 'stores error message' do + expect do + download_envelopes + envelope_download.reload + end.to change(envelope_download, :status).to('failed') + .and change(envelope_download, + # rubocop:todo Layout/LineLength + :internal_error_message).to(error_message) + # rubocop:enable Layout/LineLength + .and change(envelope_download, + :started_at).to(now) + .and change(envelope_download, + :finished_at).to(now) + end + end + + # rubocop:todo RSpec/NestedGroups + context 'without error' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + before do + allow(builder).to receive(:run).and_return(url) + end + + it 'stores URL' do # rubocop:todo RSpec/ExampleLength + expect do + download_envelopes + envelope_download.reload + end.to change(envelope_download, :status).to('finished') + .and change(envelope_download, :url).to(url) + .and change( + # rubocop:todo Layout/LineLength + envelope_download, :started_at + # rubocop:enable Layout/LineLength + ).to(now) + .and change( + envelope_download, :finished_at + ).to(now) + end + end + end + + context 'with graph builder' do # rubocop:todo RSpec/MultipleMemoizedHelpers + let(:builder_class) { EnvelopeDumps::GraphBuilder } + let(:type) { :graph } + + # rubocop:todo RSpec/NestedGroups + context 'with error' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + before do + allow(builder).to receive(:run).and_raise(error) + end + + it 'stores error message' do + expect do + download_envelopes + envelope_download.reload + end.to change(envelope_download, :status).to('failed') + .and change(envelope_download, + # rubocop:todo Layout/LineLength + :internal_error_message).to(error_message) + # rubocop:enable Layout/LineLength + .and change(envelope_download, + :started_at).to(now) + .and change(envelope_download, + :finished_at).to(now) + end + end + + # rubocop:todo RSpec/NestedGroups + context 'without error' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + before do + allow(builder).to receive(:run).and_return(url) + end + + it 'stores URL' do # rubocop:todo RSpec/ExampleLength + expect do + download_envelopes + envelope_download.reload + end.to change(envelope_download, :status).to('finished') + .and change(envelope_download, :url).to(url) + .and change( + # rubocop:todo Layout/LineLength + envelope_download, :started_at + # rubocop:enable Layout/LineLength + ).to(now) + .and change( + envelope_download, :finished_at + ).to(now) + end + end + end + end +end diff --git a/spec/services/envelope_dumps/envelope_builder_spec.rb b/spec/services/envelope_dumps/envelope_builder_spec.rb new file mode 100644 index 00000000..2131f2df --- /dev/null +++ b/spec/services/envelope_dumps/envelope_builder_spec.rb @@ -0,0 +1,210 @@ +RSpec.describe EnvelopeDumps::EnvelopeBuilder do # rubocop:todo RSpec/MultipleMemoizedHelpers + let(:bucket) { double('bucket') } # rubocop:todo RSpec/VerifiedDoubles + let(:bucket_name) { 'envelope-downloads-bucket-test' } + let(:envelope_download) { create(:envelope_download, envelope_community:) } + let(:entries) { {} } + let(:hex) { Faker::Lorem.characters.first(32) } + let(:key) { "ce_registry_#{now.to_i}_#{hex}.zip" } + let(:now) { Time.current.change(usec: 0) } + let(:region) { 'aws-region-test' } + let(:resource) { double('resource') } # rubocop:todo RSpec/VerifiedDoubles + let(:s3_object) { double('s3_object') } # rubocop:todo RSpec/VerifiedDoubles + let(:url) { Faker::Internet.url } + + let(:build_dump) do + travel_to now do + described_class.new(envelope_download, envelope_download.started_at).run + end + end + + let(:envelope_community) do + EnvelopeCommunity.find_or_create_by!(name: 'ce_registry') + end + + let!(:envelope1) do # rubocop:todo RSpec/IndexedLet + create(:envelope, :from_cer, updated_at: now) + end + + let!(:envelope2) do # rubocop:todo RSpec/IndexedLet + create(:envelope, :from_cer, updated_at: now) + end + + let!(:envelope3) do # rubocop:todo RSpec/IndexedLet + create(:envelope, :from_cer, updated_at: now) + end + + before do + allow(ENV).to receive(:fetch).with('AWS_REGION').and_return(region) + + allow(ENV).to receive(:fetch) + .with('ENVELOPE_DOWNLOADS_BUCKET') + .and_return(bucket_name) + + allow(Aws::S3::Resource).to receive(:new) + .with(region:) + .and_return(resource) + + allow(SecureRandom).to receive(:hex).and_return(hex) + + allow(resource).to receive(:bucket).with(bucket_name).and_return(bucket) + allow(bucket).to receive(:object).with(key).and_return(s3_object) + end + + describe '.call' do # rubocop:todo RSpec/MultipleMemoizedHelpers + context 'without error' do # rubocop:todo RSpec/MultipleMemoizedHelpers + before do + allow(s3_object).to receive(:upload_file) do |path| + Zip::InputStream.open(path) do |stream| + loop do + entry = stream.get_next_entry + break unless entry + + entries[entry.name] = JSON(stream.read) + end + end + end + + allow(s3_object).to receive(:public_url).and_return(url) + end + + # rubocop:todo RSpec/NestedGroups + context 'without previous download' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + # rubocop:todo RSpec/MultipleExpectations + it 'creates a new download' do # rubocop:todo RSpec/ExampleLength + # rubocop:enable RSpec/MultipleExpectations + build_dump + expect(entries.size).to eq(3) + + entry1 = entries.fetch("#{envelope1.envelope_ceterms_ctid}.json") + entry2 = entries.fetch("#{envelope2.envelope_ceterms_ctid}.json") + entry3 = entries.fetch("#{envelope3.envelope_ceterms_ctid}.json") + + expect(entry1.fetch('envelope_ceterms_ctid')).to eq( + envelope1.envelope_ceterms_ctid + ) + expect(entry1.fetch('decoded_resource')).to eq( + envelope1.processed_resource + ) + expect(entry1.fetch('updated_at').to_time).to be_within(1.second).of( + envelope1.updated_at + ) + + expect(entry2.fetch('envelope_ceterms_ctid')).to eq( + envelope2.envelope_ceterms_ctid + ) + expect(entry2.fetch('decoded_resource')).to eq( + envelope2.processed_resource + ) + expect(entry2.fetch('updated_at').to_time).to be_within(1.second).of( + envelope2.updated_at + ) + + expect(entry3.fetch('envelope_ceterms_ctid')).to eq( + envelope3.envelope_ceterms_ctid + ) + expect(entry3.fetch('decoded_resource')).to eq( + envelope3.processed_resource + ) + expect(entry3.fetch('updated_at').to_time).to be_within(1.second).of( + envelope3.updated_at + ) + end + end + + # rubocop:todo RSpec/NestedGroups + context 'with previous download' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + let(:previous_key) { Faker::Lorem.characters.first(32) } + + let(:dump) do + buffer = StringIO.new + + Zip::OutputStream.write_buffer(buffer) do |stream| + [envelope1, envelope2, envelope3].each do |envelope| + stream.put_next_entry("#{envelope.envelope_ceterms_ctid}.json") + stream.puts('{}') + end + end + + buffer.string + end + + let(:envelope_download) do + create( + :envelope_download, + envelope_community:, + started_at: now + 1.second, + url: "#{Faker::Internet.url}/#{previous_key}" + ) + end + + let!(:envelope4) do + create(:envelope, :from_cer, updated_at: envelope_download.started_at) + end + + before do + PaperTrail.enabled = true + + allow(bucket).to receive(:object).with(previous_key).and_return(s3_object) + + allow(s3_object).to receive(:get).with(response_target: key) do + File.write(key, dump) + end + + envelope2.update_column(:updated_at, envelope_download.started_at) + + travel_to envelope_download.started_at do + envelope3.destroy + end + end + + after do + PaperTrail.enabled = false + end + + # rubocop:todo RSpec/MultipleExpectations + it 'updates the existing download' do # rubocop:todo RSpec/ExampleLength, RSpec/MultipleExpectations + # rubocop:enable RSpec/MultipleExpectations + build_dump + expect(entries.size).to eq(3) + + entry1 = entries.fetch("#{envelope1.envelope_ceterms_ctid}.json") + entry2 = entries.fetch("#{envelope2.envelope_ceterms_ctid}.json") + entry3 = entries.fetch("#{envelope4.envelope_ceterms_ctid}.json") + + expect(entry1).to eq({}) + + expect(entry2.fetch('envelope_ceterms_ctid')).to eq( + envelope2.envelope_ceterms_ctid + ) + expect(entry2.fetch('decoded_resource')).to eq( + envelope2.processed_resource + ) + expect(entry2.fetch('updated_at').to_time).to be_within(1.second).of( + envelope2.updated_at + ) + + expect(entry3.fetch('envelope_ceterms_ctid')).to eq( + envelope4.envelope_ceterms_ctid + ) + expect(entry3.fetch('decoded_resource')).to eq( + envelope4.processed_resource + ) + expect(entry3.fetch('updated_at').to_time).to be_within(1.second).of( + envelope4.updated_at + ) + end + end + end + + context 'with error' do # rubocop:todo RSpec/MultipleMemoizedHelpers + let(:error) { StandardError.new } + + it 'notifies Airbrake and persists error' do + allow(s3_object).to receive(:upload_file).and_raise(error) + expect { build_dump }.to raise_error(error) + end + end + end +end diff --git a/spec/services/envelope_dumps/graph_builder_spec.rb b/spec/services/envelope_dumps/graph_builder_spec.rb new file mode 100644 index 00000000..7450f42e --- /dev/null +++ b/spec/services/envelope_dumps/graph_builder_spec.rb @@ -0,0 +1,168 @@ +RSpec.describe EnvelopeDumps::GraphBuilder do # rubocop:todo RSpec/MultipleMemoizedHelpers + let(:bucket) { double('bucket') } # rubocop:todo RSpec/VerifiedDoubles + let(:bucket_name) { 'graph-downloads-bucket-test' } + let(:envelope_download) { create(:envelope_download, envelope_community:) } + let(:entries) { {} } + let(:hex) { Faker::Lorem.characters.first(32) } + let(:key) { "ce_registry_#{now.to_i}_#{hex}.zip" } + let(:now) { Time.current.change(usec: 0) } + let(:region) { 'aws-region-test' } + let(:resource) { double('resource') } # rubocop:todo RSpec/VerifiedDoubles + let(:s3_object) { double('s3_object') } # rubocop:todo RSpec/VerifiedDoubles + let(:url) { Faker::Internet.url } + + let(:build_dump) do + travel_to now do + described_class.new(envelope_download, envelope_download.started_at).run + end + end + + let(:envelope_community) do + EnvelopeCommunity.find_or_create_by!(name: 'ce_registry') + end + + let!(:envelope1) do # rubocop:todo RSpec/IndexedLet + create(:envelope, :from_cer, updated_at: now) + end + + let!(:envelope2) do # rubocop:todo RSpec/IndexedLet + create(:envelope, :from_cer, updated_at: now) + end + + let!(:envelope3) do # rubocop:todo RSpec/IndexedLet + create(:envelope, :from_cer, updated_at: now) + end + + before do + allow(ENV).to receive(:fetch).with('AWS_REGION').and_return(region) + + allow(ENV).to receive(:fetch) + .with('ENVELOPE_GRAPHS_BUCKET') + .and_return(bucket_name) + + allow(Aws::S3::Resource).to receive(:new) + .with(region:) + .and_return(resource) + + allow(SecureRandom).to receive(:hex).and_return(hex) + + allow(resource).to receive(:bucket).with(bucket_name).and_return(bucket) + allow(bucket).to receive(:object).with(key).and_return(s3_object) + end + + describe '.call' do # rubocop:todo RSpec/MultipleMemoizedHelpers + context 'without error' do # rubocop:todo RSpec/MultipleMemoizedHelpers + before do + allow(s3_object).to receive(:upload_file) do |path| + Zip::InputStream.open(path) do |stream| + loop do + entry = stream.get_next_entry + break unless entry + + entries[entry.name] = JSON(stream.read) + end + end + end + + allow(s3_object).to receive(:public_url).and_return(url) + end + + # rubocop:todo RSpec/NestedGroups + context 'without previous download' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + # rubocop:enable RSpec/NestedGroups + # rubocop:todo RSpec/MultipleExpectations + it 'creates a new download' do + # rubocop:enable RSpec/MultipleExpectations + build_dump + expect(entries.size).to eq(3) + + entry1 = entries.fetch("#{envelope1.envelope_ceterms_ctid}.json") + entry2 = entries.fetch("#{envelope2.envelope_ceterms_ctid}.json") + entry3 = entries.fetch("#{envelope3.envelope_ceterms_ctid}.json") + + expect(entry1).to eq(envelope1.processed_resource) + expect(entry2).to eq(envelope2.processed_resource) + expect(entry3).to eq(envelope3.processed_resource) + end + end + + # rubocop:todo RSpec/NestedGroups + context 'with previous download' do # rubocop:todo RSpec/MultipleMemoizedHelpers, RSpec/NestedGroups + let(:previous_key) { Faker::Lorem.characters.first(32) } + + # rubocop:enable RSpec/NestedGroups + let(:dump) do + buffer = StringIO.new + + Zip::OutputStream.write_buffer(buffer) do |stream| + [envelope1, envelope2, envelope3].each do |envelope| + stream.put_next_entry("#{envelope.envelope_ceterms_ctid}.json") + stream.puts('{}') + end + end + + buffer.string + end + + let(:envelope_download) do + create( + :envelope_download, + envelope_community:, + started_at: now + 1.second, + url: "#{Faker::Internet.url}/#{previous_key}" + ) + end + + let!(:envelope4) do + create(:envelope, :from_cer, updated_at: envelope_download.started_at) + end + + before do + PaperTrail.enabled = true + + allow(bucket).to receive(:object).with(previous_key).and_return(s3_object) + + allow(s3_object).to receive(:get).with(response_target: key) do + File.write(key, dump) + end + + envelope2.update_column(:updated_at, envelope_download.started_at) + + travel_to envelope_download.started_at do + envelope3.destroy + end + + stub_request(:get, envelope_download.url).to_return(body: dump) + end + + after do + PaperTrail.enabled = false + end + + # rubocop:todo RSpec/MultipleExpectations + it 'updates the existing download' do # rubocop:todo RSpec/MultipleExpectations + # rubocop:enable RSpec/MultipleExpectations + build_dump + expect(entries.size).to eq(3) + + entry1 = entries.fetch("#{envelope1.envelope_ceterms_ctid}.json") + entry2 = entries.fetch("#{envelope2.envelope_ceterms_ctid}.json") + entry3 = entries.fetch("#{envelope4.envelope_ceterms_ctid}.json") + + expect(entry1).to eq({}) + expect(entry2).to eq(envelope2.processed_resource) + expect(entry3).to eq(envelope4.processed_resource) + end + end + end + + context 'with error' do # rubocop:todo RSpec/MultipleMemoizedHelpers + let(:error) { StandardError.new } + + it 'notifies Airbrake and persists error' do + allow(s3_object).to receive(:upload_file).and_raise(error) + expect { build_dump }.to raise_error(error) + end + end + end +end diff --git a/spec/services/sync_envelope_graph_with_s3_spec.rb b/spec/services/sync_envelope_graph_with_s3_spec.rb new file mode 100644 index 00000000..2ec8f604 --- /dev/null +++ b/spec/services/sync_envelope_graph_with_s3_spec.rb @@ -0,0 +1,79 @@ +RSpec.describe SyncEnvelopeGraphWithS3 do # rubocop:todo RSpec/MultipleMemoizedHelpers + let(:envelope) { build(:envelope, :from_cer) } + let(:s3_bucket) { double('s3_bucket') } # rubocop:todo RSpec/VerifiedDoubles + let(:s3_bucket_name) { Faker::Lorem.word } + let(:s3_object) { double('s3_object') } # rubocop:todo RSpec/VerifiedDoubles + let(:s3_region) { 'aws-s3_region-test' } + let(:s3_resource) { double('s3_resource') } # rubocop:todo RSpec/VerifiedDoubles + let(:s3_url) { Faker::Internet.url } + + context 'without bucket' do # rubocop:todo RSpec/MultipleMemoizedHelpers + describe '.upload' do # rubocop:todo RSpec/MultipleMemoizedHelpers + it 'does nothing' do + expect { described_class.upload(envelope) }.not_to raise_error + end + end + + describe '.remove' do # rubocop:todo RSpec/MultipleMemoizedHelpers + it 'does nothing' do + expect { described_class.remove(envelope) }.not_to raise_error + end + end + end + + context 'with bucket' do # rubocop:todo RSpec/MultipleMemoizedHelpers + before do + ENV['AWS_REGION'] = s3_region + ENV['ENVELOPE_GRAPHS_BUCKET'] = s3_bucket_name + + # rubocop:todo RSpec/MessageSpies + expect(Aws::S3::Resource).to receive(:new) # rubocop:todo RSpec/ExpectInHook, RSpec/MessageSpies + # rubocop:enable RSpec/MessageSpies + .with(region: s3_region) + .and_return(s3_resource) + .at_least(:once) + + # rubocop:todo RSpec/MessageSpies + expect(s3_resource).to receive(:bucket) # rubocop:todo RSpec/ExpectInHook, RSpec/MessageSpies + # rubocop:enable RSpec/MessageSpies + .with(s3_bucket_name) + .and_return(s3_bucket) + .at_least(:once) + + # rubocop:todo RSpec/MessageSpies + expect(s3_bucket).to receive(:object) # rubocop:todo RSpec/ExpectInHook, RSpec/MessageSpies + # rubocop:enable RSpec/MessageSpies + .with("ce_registry/#{envelope.envelope_ceterms_ctid}.json") + .and_return(s3_object) + .at_least(:once) + + # rubocop:todo RSpec/MessageSpies + expect(s3_object).to receive(:put).with( # rubocop:todo RSpec/ExpectInHook, RSpec/MessageSpies + # rubocop:enable RSpec/MessageSpies + body: envelope.processed_resource.to_json, + content_type: 'application/json' + ) + + # rubocop:todo RSpec/StubbedMock + # rubocop:todo RSpec/MessageSpies + expect(s3_object).to receive(:public_url).and_return(s3_url) # rubocop:todo RSpec/ExpectInHook, RSpec/MessageSpies, RSpec/StubbedMock + # rubocop:enable RSpec/MessageSpies + # rubocop:enable RSpec/StubbedMock + end + + describe '.upload' do # rubocop:todo RSpec/MultipleMemoizedHelpers + it 'uploads the s3_resource to S3' do + envelope.save! + expect(envelope.s3_url).to eq(s3_url) + end + end + + describe '.remove' do # rubocop:todo RSpec/MultipleMemoizedHelpers + it 'uploads the s3_resource to S3' do + expect(s3_object).to receive(:delete) # rubocop:todo RSpec/MessageSpies + envelope.save! + expect { envelope.destroy }.not_to raise_error + end + end + end +end diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb index 677ff33c..fcd2c364 100644 --- a/spec/spec_helper.rb +++ b/spec/spec_helper.rb @@ -107,3 +107,4 @@ end RSpec::Matchers.define_negated_matcher :not_change, :change +RSpec::Matchers.define_negated_matcher :not_enqueue_job, :enqueue_job diff --git a/terraform/environments/eks/.terraform/modules/modules.json b/terraform/environments/eks/.terraform/modules/modules.json new file mode 100644 index 00000000..e6ef47c7 --- /dev/null +++ b/terraform/environments/eks/.terraform/modules/modules.json @@ -0,0 +1 @@ +{"Modules":[{"Key":"","Source":"","Dir":"."},{"Key":"application_secret","Source":"../../modules/secrets","Dir":""},{"Key":"application_secret_prod","Source":"../../modules/secrets","Dir":""},{"Key":"application_secret_sandbox","Source":"../../modules/secrets","Dir":""},{"Key":"ecr","Source":"../../modules/ecr","Dir":""},{"Key":"eks","Source":"../../modules/eks","Dir":""},{"Key":"envelope_graphs_s3_prod","Source":"../../modules/envelope_graphs_s3","Dir":""},{"Key":"envelope_graphs_s3_sandbox","Source":"../../modules/envelope_graphs_s3","Dir":""},{"Key":"envelope_graphs_s3_staging","Source":"../../modules/envelope_graphs_s3","Dir":""},{"Key":"rds-sandbox","Source":"../../modules/rds","Dir":""},{"Key":"rds-staging","Source":"../../modules/rds","Dir":""},{"Key":"vpc","Source":"../../modules/vpc","Dir":""}]} \ No newline at end of file diff --git a/terraform/environments/eks/.terraform/terraform.tfstate b/terraform/environments/eks/.terraform/terraform.tfstate new file mode 100644 index 00000000..b10db166 --- /dev/null +++ b/terraform/environments/eks/.terraform/terraform.tfstate @@ -0,0 +1,52 @@ +{ + "version": 3, + "serial": 1, + "lineage": "d83daf34-5f85-2619-718f-8d1e358e5aaf", + "backend": { + "type": "s3", + "config": { + "access_key": null, + "acl": null, + "assume_role_duration_seconds": null, + "assume_role_policy": null, + "assume_role_policy_arns": null, + "assume_role_tags": null, + "assume_role_transitive_tag_keys": null, + "bucket": "terraform-state-o1r8", + "dynamodb_endpoint": null, + "dynamodb_table": "terraform-state-locks", + "encrypt": true, + "endpoint": null, + "external_id": null, + "force_path_style": null, + "iam_endpoint": null, + "key": "eks-registry/tfstate", + "kms_key_id": null, + "max_retries": null, + "profile": null, + "region": "us-east-1", + "role_arn": null, + "secret_key": null, + "session_name": null, + "shared_credentials_file": null, + "skip_credentials_validation": null, + "skip_metadata_api_check": null, + "skip_region_validation": null, + "sse_customer_key": null, + "sts_endpoint": null, + "token": null, + "workspace_key_prefix": null + }, + "hash": 2518525947 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/terraform/environments/eks/ELASTICSEARCH-TROUBLESHOOTING.md b/terraform/environments/eks/ELASTICSEARCH-TROUBLESHOOTING.md new file mode 100644 index 00000000..623c39c0 --- /dev/null +++ b/terraform/environments/eks/ELASTICSEARCH-TROUBLESHOOTING.md @@ -0,0 +1,85 @@ +Elasticsearch Cluster Formation Runbook + +Purpose: quick steps to diagnose and recover master election and discovery issues for sandbox, staging, and production. + +1) Identify the namespace and files +- Sandbox: namespace `credreg-sandbox` + - StatefulSet: `terraform/environments/eks/k8s-manifests-sandbox/elasticsearch-statefulset.yaml` + - Headless discovery Service: `terraform/environments/eks/k8s-manifests-sandbox/elasticsearch-headless-svc.yaml` + - Client Service (HTTP 9200): `terraform/environments/eks/k8s-manifests-sandbox/elasticsearch-svc.yaml` +- Staging: namespace `credreg-staging` (same filenames under `k8s-manifests-staging`) +- Production: namespace `credreg-prod` (same filenames under `k8s-manifests-prod`) + +2) Verify discovery wiring +- Check headless (discovery) Service is truly headless and exposes transport 9300: + - `kubectl -n get svc elasticsearch-discovery -o yaml` + - Expect: `spec.clusterIP: None`, port 9300 present. In prod we only expose 9300. +- Ensure it publishes NotReady addresses (needed for bootstrap): + - Expect: `spec.publishNotReadyAddresses: true` +- Endpoints resolve to Pod IPs: + - `kubectl -n get endpoints elasticsearch-discovery -o wide` + - Expect two 10.x IPs with port 9300. + +3) Verify Pod DNS and subdomain +- StatefulSet Pod template must set the subdomain to the discovery Service name: + - Expect in StatefulSet: `spec.template.spec.subdomain: elasticsearch-discovery` +- From a Pod, verify DNS: + - `kubectl -n exec elasticsearch-0 -- getent hosts elasticsearch-discovery..svc.cluster.local` + - `kubectl -n exec elasticsearch-0 -- getent hosts elasticsearch-1.elasticsearch-discovery..svc.cluster.local` + +4) Check transport connectivity +- From each Pod: + - `kubectl -n exec elasticsearch-0 -- sh -c "nc -zv elasticsearch-1.elasticsearch-discovery 9300 || true"` + - `kubectl -n exec elasticsearch-1 -- sh -c "nc -zv elasticsearch-0.elasticsearch-discovery 9300 || true"` +- If connection fails, check NetworkPolicies: + - `kubectl -n get networkpolicy` + +5) First-time bootstrap (one-time only) +- For a cluster that has never formed and logs show: + - "master not discovered yet … this node has not previously joined a bootstrapped cluster" +- Temporarily add bootstrap env to the StatefulSet (do not commit to git): + - `kubectl -n patch statefulset elasticsearch --type='json' -p='[{"op":"add","path":"/spec/template/spec/containers/0/env/-","value":{"name":"cluster.initial_master_nodes","value":"elasticsearch-0,elasticsearch-1"}}]'` +- Restart Pods to pick it up: + - `kubectl -n delete pod -l app=elasticsearch` +- Verify health: + - `kubectl -n port-forward statefulset/elasticsearch 9200:9200 &` + - `curl -s http://localhost:9200/_cluster/health?pretty` (expect yellow/green, nodes: 2) +- Remove the bootstrap env after cluster forms: + - `IDX=$(kubectl -n get sts elasticsearch -o json | jq -r '.spec.template.spec.containers[0].env | map(.name) | index("cluster.initial_master_nodes")')` + - `if [ "$IDX" != "null" ]; then kubectl -n patch sts elasticsearch --type='json' -p="[{\"op\":\"remove\",\"path\":\"/spec/template/spec/containers/0/env/$IDX\"}]"; fi` + - `kubectl -n rollout status statefulset/elasticsearch` + +6) Recover from stale data (destructive, wipes ES data) +- If logs show "locked into cluster UUID … remove this setting" or bootstrap still fails and you accept data loss: + - Scale down: `kubectl -n scale sts/elasticsearch --replicas=0` + - Delete PVCs: + - `kubectl -n delete pvc elasticsearch-data-elasticsearch-0 || true` + - `kubectl -n delete pvc elasticsearch-data-elasticsearch-1 || true` + - Ensure bootstrap env is present on the StatefulSet (see step 5). + - Scale up: `kubectl -n scale sts/elasticsearch --replicas=2` + - Verify health, then remove bootstrap env and roll once (as in step 5). + +7) Common warnings and fixes +- "address [172.x.x.x:9300] … connect_timeout" → discovery resolving to a non-headless Service. Ensure `clusterIP: None` on discovery Service. +- Per-pod DNS not resolving during bootstrap → set `publishNotReadyAddresses: true` on discovery Service and `subdomain: elasticsearch-discovery` on the Pod template. +- Field limit errors when indexing (e.g., `Limit of total fields [0] has been exceeded …`): + - Raise per-index limit: `curl -X PUT http://elasticsearch:9200//_settings -H 'Content-Type: application/json' -d '{"index.mapping.total_fields.limit": 20000}'` + - Or set a default template: `/_index_template/ce-default` with `index.mapping.total_fields.limit`. + +8) Service separation best practice +- discovery/headless Service: P2P transport only (9300) +- client Service: HTTP only (9200). App ConfigMaps point to `http://elasticsearch:9200`. + +9) Health and indices quick checks +- Health: `curl -s http://elasticsearch:9200/_cluster/health?pretty` +- Indices: `curl -s http://elasticsearch:9200/_cat/indices?h=index,docs.count,store.size` +- Total docs: `curl -s http://elasticsearch:9200/_stats/docs?pretty | jq '.indices | to_entries | map(.value.primaries.docs.count) | add'` + +10) Apply manifests +- After editing files under `k8s-manifests-*`, apply them per environment: + - `kubectl -n apply -f elasticsearch-headless-svc.yaml` + - `kubectl -n apply -f elasticsearch-svc.yaml` (if present) + - `kubectl -n apply -f elasticsearch-statefulset.yaml` + +Keep this runbook updated as we evolve manifests and procedures. + diff --git a/terraform/environments/eks/backend.tf b/terraform/environments/eks/backend.tf new file mode 100644 index 00000000..37313cac --- /dev/null +++ b/terraform/environments/eks/backend.tf @@ -0,0 +1,9 @@ +terraform { + backend "s3" { + bucket = "terraform-state-o1r8" + key = "eks-registry/tfstate" + region = "us-east-1" + encrypt = true + dynamodb_table = "terraform-state-locks" + } +} diff --git a/terraform/environments/eks/k8s-manifests-prod/app-configmap.yaml b/terraform/environments/eks/k8s-manifests-prod/app-configmap.yaml new file mode 100644 index 00000000..4b8a3d51 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/app-configmap.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: main-app-config + namespace: credreg-prod +data: + POSTGRESQL_DATABASE: credential_registry_production + POSTGRESQL_USERNAME: credential_registry_production + RACK_ENV: production + DOCKER_ENV: "true" + ENVELOPE_GRAPHS_BUCKET: cer-envelope-graphs-prod + ENVELOPE_DOWNLOADS_BUCKET: cer-envelope-downloads + AIRBRAKE_PROJECT_ID: '270205' + SIDEKIQ_CONCURRENCY: '10' + API_KEY_VALIDATION_ENDPOINT: https://apps.credentialengine.org/accountsAPI/Organization/ValidateApiKey + ELASTICSEARCH_ADDRESS: http://elasticsearch:9200 \ No newline at end of file diff --git a/terraform/environments/eks/k8s-manifests-prod/app-deployment.yaml b/terraform/environments/eks/k8s-manifests-prod/app-deployment.yaml new file mode 100644 index 00000000..cd6ff2d7 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/app-deployment.yaml @@ -0,0 +1,69 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: main-app + namespace: credreg-prod + labels: + app: main +spec: + replicas: 2 + selector: + matchLabels: + app: main-app + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: main-app + app-lang: ruby + spec: + priorityClassName: prod-high + nodeSelector: + env: production + tolerations: + - key: "env" + operator: "Equal" + value: "production" + effect: "NoSchedule" + serviceAccountName: main-app-service-account + containers: + - name: main-app + image: 996810415034.dkr.ecr.us-east-1.amazonaws.com/registry:production + imagePullPolicy: Always + command: ["/bin/bash", "-c", "bin/rackup -o 0.0.0.0"] + env: + - name: NEW_RELIC_APP_NAME + value: "Credential Engine Production" + ports: + - containerPort: 9292 + envFrom: + - secretRef: + name: app-secrets + - configMapRef: + name: main-app-config + resources: + requests: + cpu: "500m" + memory: "512Mi" + limits: + cpu: "1500m" + memory: "1536Mi" + +--- +apiVersion: v1 +kind: Service +metadata: + name: main-app + namespace: credreg-prod +spec: + type: ClusterIP + selector: + app: main-app + ports: + - protocol: TCP + port: 9292 + targetPort: 9292 + diff --git a/terraform/environments/eks/k8s-manifests-prod/app-hpa.yaml b/terraform/environments/eks/k8s-manifests-prod/app-hpa.yaml new file mode 100644 index 00000000..beb5af7d --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/app-hpa.yaml @@ -0,0 +1,41 @@ +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: main-app + namespace: credreg-prod +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: main-app + minReplicas: 2 + maxReplicas: 6 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 70 + +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: worker-app + namespace: credreg-prod +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: worker-app + minReplicas: 2 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 70 + diff --git a/terraform/environments/eks/k8s-manifests-prod/app-ingress.yaml b/terraform/environments/eks/k8s-manifests-prod/app-ingress.yaml new file mode 100644 index 00000000..8d09a096 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/app-ingress.yaml @@ -0,0 +1,28 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: main-app + namespace: credreg-prod +spec: + ingressClassName: nginx + tls: + - hosts: + - registry-prod.credentialengineregistry.org + secretName: registry-tls-temp + # tls: + # - hosts: + # - credentialengineregistry.org + # secretName: registry-tls + rules: + - host: registry-prod.credentialengineregistry.org + # - host: credentialengineregistry.org + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: main-app + port: + number: 9292 + diff --git a/terraform/environments/eks/k8s-manifests-prod/app-namespace.yaml b/terraform/environments/eks/k8s-manifests-prod/app-namespace.yaml new file mode 100644 index 00000000..4b3e5359 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/app-namespace.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: credreg-prod + diff --git a/terraform/environments/eks/k8s-manifests-prod/app-secrets.yaml b/terraform/environments/eks/k8s-manifests-prod/app-secrets.yaml new file mode 100644 index 00000000..57d4b28d --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/app-secrets.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + name: app-secrets + namespace: credreg-prod +type: Opaque +stringData: + # Populate via External Secrets or CI; placeholders here + SECRET_KEY_BASE: "" + POSTGRESQL_ADDRESS: "" + POSTGRESQL_PASSWORD: "" + REDIS_URL: "" + diff --git a/terraform/environments/eks/k8s-manifests-prod/app-service-account.yaml b/terraform/environments/eks/k8s-manifests-prod/app-service-account.yaml new file mode 100644 index 00000000..6dc5ad89 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/app-service-account.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: main-app-service-account + namespace: credreg-prod + annotations: + eks.amazonaws.com/role-arn: "arn:aws:iam::996810415034:role/ce-registry-eks-application-irsa-role" + diff --git a/terraform/environments/eks/k8s-manifests-prod/certificate.yaml b/terraform/environments/eks/k8s-manifests-prod/certificate.yaml new file mode 100644 index 00000000..14679255 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/certificate.yaml @@ -0,0 +1,26 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: registry-certificate-temp + namespace: credreg-prod +spec: + secretName: registry-tls-temp + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - registry-prod.credentialengineregistry.org + +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: registry-certificate + namespace: credreg-prod +spec: + secretName: registry-tls + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - credentialengineregistry.org \ No newline at end of file diff --git a/terraform/environments/eks/k8s-manifests-prod/db-migrate-job.yaml b/terraform/environments/eks/k8s-manifests-prod/db-migrate-job.yaml new file mode 100644 index 00000000..2c398ca0 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/db-migrate-job.yaml @@ -0,0 +1,28 @@ +apiVersion: batch/v1 +kind: Job +metadata: + generateName: db-migrate- + namespace: credreg-prod + labels: + app: main-app +spec: + backoffLimit: 1 + activeDeadlineSeconds: 900 + ttlSecondsAfterFinished: 600 + template: + metadata: + labels: + app: main-app + spec: + serviceAccountName: main-app-service-account + restartPolicy: Never + containers: + - name: db-migrate + image: 996810415034.dkr.ecr.us-east-1.amazonaws.com/registry:production + imagePullPolicy: Always + command: ["/bin/bash","-lc","bundle exec rake db:migrate RACK_ENV=production"] + envFrom: + - secretRef: + name: app-secrets + - configMapRef: + name: main-app-config diff --git a/terraform/environments/eks/k8s-manifests-prod/debug-aws-pod.yaml b/terraform/environments/eks/k8s-manifests-prod/debug-aws-pod.yaml new file mode 100644 index 00000000..3a96f0d5 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/debug-aws-pod.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Pod +metadata: + name: debug-aws-cli + namespace: credreg-prod + labels: + app: debug-aws-cli +spec: + serviceAccountName: main-app-service-account + restartPolicy: Never + priorityClassName: prod-high + nodeSelector: + env: production + tolerations: + - key: "env" + operator: "Equal" + value: "production" + effect: "NoSchedule" + containers: + - name: awscli + image: public.ecr.aws/aws-cli/aws-cli:latest + imagePullPolicy: IfNotPresent + command: ["sh", "-lc", "echo Ready; sleep 3600"] + env: + - name: AWS_REGION + value: us-east-1 + - name: AWS_DEFAULT_REGION + value: us-east-1 + resources: + requests: + cpu: "50m" + memory: "64Mi" + limits: + cpu: "500m" + memory: "256Mi" + diff --git a/terraform/environments/eks/k8s-manifests-prod/elasticsearch-headless-svc.yaml b/terraform/environments/eks/k8s-manifests-prod/elasticsearch-headless-svc.yaml new file mode 100644 index 00000000..faeddb5d --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/elasticsearch-headless-svc.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: elasticsearch-discovery + namespace: credreg-prod + labels: + app: elasticsearch +spec: + clusterIP: None + publishNotReadyAddresses: true + selector: + app: elasticsearch + ports: + - name: http + port: 9200 + targetPort: 9200 + - name: transport + port: 9300 + targetPort: 9300 diff --git a/terraform/environments/eks/k8s-manifests-prod/elasticsearch-pvc.yaml b/terraform/environments/eks/k8s-manifests-prod/elasticsearch-pvc.yaml new file mode 100644 index 00000000..703ec9b7 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/elasticsearch-pvc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: credreg-prod + name: elasticsearch-data +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + storageClassName: gp3 diff --git a/terraform/environments/eks/k8s-manifests-prod/elasticsearch-statefulset.yaml b/terraform/environments/eks/k8s-manifests-prod/elasticsearch-statefulset.yaml new file mode 100644 index 00000000..8c6cf22c --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/elasticsearch-statefulset.yaml @@ -0,0 +1,67 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: elasticsearch + namespace: credreg-prod + labels: + app: elasticsearch +spec: + serviceName: elasticsearch-discovery + replicas: 2 + selector: + matchLabels: + app: elasticsearch + template: + metadata: + labels: + app: elasticsearch + spec: + subdomain: elasticsearch-discovery + priorityClassName: prod-high + nodeSelector: + env: production + tolerations: + - key: "env" + operator: "Equal" + value: "production" + effect: "NoSchedule" + securityContext: + fsGroup: 1000 + runAsUser: 1000 + runAsGroup: 1000 + containers: + - name: elasticsearch + image: docker.elastic.co/elasticsearch/elasticsearch:9.2.0 + ports: + - containerPort: 9200 + - containerPort: 9300 + resources: + requests: + cpu: "1000m" + memory: "6Gi" + limits: + cpu: "2000m" + memory: "8Gi" + env: + - name: ES_JAVA_OPTS + value: "-Xms4g -Xmx4g" + - name: cluster.name + value: "elasticsearch" + - name: xpack.security.enabled + value: "false" + - name: network.host + value: "0.0.0.0" + - name: discovery.seed_hosts + value: "elasticsearch-discovery" + volumeMounts: + - name: elasticsearch-data + mountPath: /usr/share/elasticsearch/data + volumeClaimTemplates: + - metadata: + name: elasticsearch-data + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: gp3 + resources: + requests: + storage: 50Gi diff --git a/terraform/environments/eks/k8s-manifests-prod/elasticsearch-svc.yaml b/terraform/environments/eks/k8s-manifests-prod/elasticsearch-svc.yaml new file mode 100644 index 00000000..2d5a0a65 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/elasticsearch-svc.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: elasticsearch + namespace: credreg-prod + labels: + app: elasticsearch +spec: + type: ClusterIP + selector: + app: elasticsearch + ports: + - name: http + port: 9200 + targetPort: 9200 + diff --git a/terraform/environments/eks/k8s-manifests-prod/external-secrets-operator.yaml b/terraform/environments/eks/k8s-manifests-prod/external-secrets-operator.yaml new file mode 100644 index 00000000..bcf9344f --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/external-secrets-operator.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: app-secret + namespace: credreg-prod +spec: + refreshInterval: 1h + secretStoreRef: + name: aws-secret-manager + kind: ClusterSecretStore + target: + name: app-secrets + creationPolicy: Owner + dataFrom: + - extract: + key: credreg-secrets-eks-production diff --git a/terraform/environments/eks/k8s-manifests-prod/newrelic-apm-enable.yaml b/terraform/environments/eks/k8s-manifests-prod/newrelic-apm-enable.yaml new file mode 100644 index 00000000..935e4d2f --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/newrelic-apm-enable.yaml @@ -0,0 +1,22 @@ +apiVersion: newrelic.com/v1beta2 +kind: Instrumentation +metadata: + name: newrelic-instrumentation + namespace: newrelic +spec: + agent: + language: ruby + image: newrelic/newrelic-ruby-init:latest + + namespaceLabelSelector: + matchExpressions: + - key: "kubernetes.io/metadata.name" + operator: "In" + values: ["credreg-prod"] + + podLabelSelector: + matchExpressions: + - key: "app-lang" + operator: "In" + values: ["ruby"] + diff --git a/terraform/environments/eks/k8s-manifests-prod/redis-configmap.yaml b/terraform/environments/eks/k8s-manifests-prod/redis-configmap.yaml new file mode 100644 index 00000000..ab49d6a4 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/redis-configmap.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: redis-config + namespace: credreg-prod +data: + redis.conf: | + bind 0.0.0.0 + port 6379 + requirepass your_secure_password + appendonly yes + maxmemory 500mb + maxmemory-policy allkeys-lru + tcp-keepalive 300 + protected-mode yes diff --git a/terraform/environments/eks/k8s-manifests-prod/redis-deployment.yaml b/terraform/environments/eks/k8s-manifests-prod/redis-deployment.yaml new file mode 100644 index 00000000..1f11891c --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/redis-deployment.yaml @@ -0,0 +1,81 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: redis + namespace: credreg-prod + labels: + app: redis +spec: + serviceName: redis-service + replicas: 1 # For production, use 3 with Redis Sentinel + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + spec: + priorityClassName: prod-high + nodeSelector: + env: sandbox + tolerations: + - key: "env" + operator: "Equal" + value: "sandbox" + effect: "NoSchedule" + containers: + - name: redis + image: redis:7.2-alpine # Official Redis image + command: ["redis-server", "/usr/local/etc/redis/redis.conf"] + ports: + - containerPort: 6379 + volumeMounts: + - name: redis-data + mountPath: /data + - name: redis-config + mountPath: /usr/local/etc/redis + resources: + requests: + cpu: "100m" + memory: "256Mi" + limits: + cpu: "500m" + memory: "1Gi" + livenessProbe: + exec: + command: ["redis-cli", "ping"] + initialDelaySeconds: 30 + periodSeconds: 10 + readinessProbe: + exec: + command: ["redis-cli", "ping"] + initialDelaySeconds: 5 + periodSeconds: 5 + volumes: + - name: redis-config + configMap: + name: redis-config + volumeClaimTemplates: + - metadata: + name: redis-data + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: "gp2" # AWS EBS gp3 (adjust if needed) + resources: + requests: + storage: 10Gi + +--- +apiVersion: v1 +kind: Service +metadata: + name: redis + namespace: credreg-prod +spec: + clusterIP: None # Headless service for direct pod access + ports: + - port: 6379 + targetPort: 6379 + selector: + app: redis diff --git a/terraform/environments/eks/k8s-manifests-prod/worker-deployment.yaml b/terraform/environments/eks/k8s-manifests-prod/worker-deployment.yaml new file mode 100644 index 00000000..20918753 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-prod/worker-deployment.yaml @@ -0,0 +1,62 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: worker-app + namespace: credreg-prod + labels: + app: worker-app +spec: + replicas: 1 + selector: + matchLabels: + app: worker-app + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: worker-app + app-lang: ruby + spec: + priorityClassName: prod-high + nodeSelector: + env: production + tolerations: + - key: "env" + operator: "Equal" + value: "production" + effect: "NoSchedule" + serviceAccountName: main-app-service-account + containers: + - name: worker + image: 996810415034.dkr.ecr.us-east-1.amazonaws.com/registry:production + imagePullPolicy: Always + env: + - name: NEW_RELIC_APP_NAME + value: "Credential Engine Production" + - name: PATH + value: "/app/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin" + command: ["/bin/bash","-lc"] + args: + - | + if [ -x ./bin/sidekiq ]; then + ./bin/sidekiq -r ./config/application.rb + else + bundle exec sidekiq -r ./config/application.rb + fi + envFrom: + - secretRef: + name: app-secrets + - configMapRef: + name: main-app-config + resources: + requests: + cpu: "400m" + memory: "256Mi" + limits: + cpu: "2000m" + memory: "2048Mi" + + diff --git a/terraform/environments/eks/k8s-manifests-sandbox/app-configmap.yaml b/terraform/environments/eks/k8s-manifests-sandbox/app-configmap.yaml new file mode 100644 index 00000000..85f98be6 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/app-configmap.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: main-app-config + namespace: credreg-sandbox +data: + POSTGRESQL_DATABASE: credential_registry_sandbox + POSTGRESQL_USERNAME: credential_registry_sandbox + RACK_ENV: sandbox + DOCKER_ENV: "true" + ENVELOPE_GRAPHS_BUCKET: cer-envelope-graphs-sandb + ENVELOPE_DOWNLOADS_BUCKET: cer-envelope-downloads + IAM_COMMUNITY_ROLE_ADMIN: ROLE_ADMINISTRATOR + IAM_COMMUNITY_ROLE_READER: ROLE_READER + IAM_COMMUNITY_ROLE_PUBLISHER: ROLE_PUBLISHER + IAM_COMMUNITY_CLAIM_NAME: community_name + IAM_CLIENT_ID: RegistryAPI + IAM_URL: https://login.sandbox.credentialengine.org/realms/CE-Sandbox + AIRBRAKE_PROJECT_ID: '270205' + SIDEKIQ_CONCURRENCY: '10' + API_KEY_VALIDATION_ENDPOINT: https://sandbox.credentialengine.org/accountsAPI/Organization/ValidateCommunityAccess + ELASTICSEARCH_ADDRESS: http://elasticsearch:9200 diff --git a/terraform/environments/eks/k8s-manifests-sandbox/app-deployment.yaml b/terraform/environments/eks/k8s-manifests-sandbox/app-deployment.yaml new file mode 100644 index 00000000..c8265b87 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/app-deployment.yaml @@ -0,0 +1,69 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: main-app + namespace: credreg-sandbox + labels: + app: main +spec: + replicas: 1 # Adjust based on traffic + selector: + matchLabels: + app: main-app + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: main-app + app-lang: ruby + spec: + priorityClassName: sandbox-medium + nodeSelector: + env: sandbox + tolerations: + - key: "env" + operator: "Equal" + value: "sandbox" + effect: "NoSchedule" + serviceAccountName: main-app-service-account + # DB migrations are handled via a dedicated Job + containers: + - name: main-app + image: 996810415034.dkr.ecr.us-east-1.amazonaws.com/registry:sandbox + imagePullPolicy: Always + command: ["/bin/bash", "-c", "bin/rackup -o 0.0.0.0"] + env: + - name: NEW_RELIC_APP_NAME + value: "Credential Engine Sandbox" + ports: + - containerPort: 9292 + envFrom: + - secretRef: + name: app-secrets # DB credentials, APP_KEY, etc. + - configMapRef: + name: main-app-config + resources: + requests: + cpu: "500m" + memory: "256Mi" + limits: + cpu: "1000m" + memory: "1024Mi" + +--- +apiVersion: v1 +kind: Service +metadata: + name: main-app + namespace: credreg-sandbox +spec: + type: ClusterIP + selector: + app: main-app + ports: + - protocol: TCP + port: 9292 + targetPort: 9292 diff --git a/terraform/environments/eks/k8s-manifests-sandbox/app-hpa.yaml b/terraform/environments/eks/k8s-manifests-sandbox/app-hpa.yaml new file mode 100644 index 00000000..430647c4 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/app-hpa.yaml @@ -0,0 +1,27 @@ +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: main-app-hpa + namespace: credreg-sandbox + labels: + app: laravel +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: main-app + minReplicas: 1 + maxReplicas: 5 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 60 + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: 70 diff --git a/terraform/environments/eks/k8s-manifests-sandbox/app-ingress.yaml b/terraform/environments/eks/k8s-manifests-sandbox/app-ingress.yaml new file mode 100644 index 00000000..49fb51cb --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/app-ingress.yaml @@ -0,0 +1,26 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: main-app-ingress + annotations: + # NGINX Ingress Controller annotations + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" # HTTP → HTTPS + nginx.ingress.kubernetes.io/backend-protocol: "HTTP" + nginx.ingress.kubernetes.io/proxy-body-size: "10m" +spec: + ingressClassName: nginx + tls: + - hosts: + - sandbox.credentialengineregistry.org + secretName: sandbox-credentialengineregistry-org-tls + rules: + - host: sandbox.credentialengineregistry.org + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: main-app + port: + number: 9292 diff --git a/terraform/environments/eks/k8s-manifests-sandbox/app-namespace.yaml b/terraform/environments/eks/k8s-manifests-sandbox/app-namespace.yaml new file mode 100644 index 00000000..884c146a --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/app-namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: credreg-sandbox + labels: + name: main-app \ No newline at end of file diff --git a/terraform/environments/eks/k8s-manifests-sandbox/app-secrets.yaml b/terraform/environments/eks/k8s-manifests-sandbox/app-secrets.yaml new file mode 100644 index 00000000..31653072 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/app-secrets.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + name: main-app-secrets +type: Opaque +stringData: + POSTGRESQL_PASSWORD=[secret value] + SECRET_KEY_BASE=[openssl rand -hex 32] + POSTGRESQL_ADDRESS=[POSTGRESQL_ADDRESS] + SIDEKIQ_USERNAME=[SIDEKIQ_USERNAME] + SIDEKIQ_PASSWORD=[SIDEKIQ_PASSWORD] + REDIS_URL=[REDIS_URL] + AIRBRAKE_PROJECT_KEY=[AIRBRAKE_PROJECT_KEY] \ No newline at end of file diff --git a/terraform/environments/eks/k8s-manifests-sandbox/app-service-account.yaml b/terraform/environments/eks/k8s-manifests-sandbox/app-service-account.yaml new file mode 100644 index 00000000..286c697e --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/app-service-account.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: main-app-service-account + annotations: + eks.amazonaws.com/role-arn: "arn:aws:iam::996810415034:role/ce-registry-eks-application-irsa-role" diff --git a/terraform/environments/eks/k8s-manifests-sandbox/certificate.yaml b/terraform/environments/eks/k8s-manifests-sandbox/certificate.yaml new file mode 100644 index 00000000..64425ad4 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/certificate.yaml @@ -0,0 +1,12 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: sandbox-credentialengineregistry-org-tls + namespace: credreg-sandbox +spec: + secretName: sandbox-credentialengineregistry-org-tls + dnsNames: + - sandbox.credentialengineregistry.org + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer \ No newline at end of file diff --git a/terraform/environments/eks/k8s-manifests-sandbox/cluster-autoscaler.yaml b/terraform/environments/eks/k8s-manifests-sandbox/cluster-autoscaler.yaml new file mode 100644 index 00000000..fb8aba78 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/cluster-autoscaler.yaml @@ -0,0 +1,141 @@ +# Cluster Autoscaler manifest for the ${local.project_name}-${var.env} EKS cluster +# +# IMPORTANT: Replace with the actual IAM role +# ARN that Terraform outputs (cluster_autoscaler_irsa_role_arn) before applying +# this manifest. +# +# You can get the role ARN after running `terraform apply`: +# terraform -chdir=../../environments/dev output -raw cluster_autoscaler_irsa_role_arn +# and then update the annotation below accordingly. + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cluster-autoscaler + namespace: kube-system + annotations: + eks.amazonaws.com/role-arn: arn:aws:iam::996810415034:role/ce-registry-eks-cluster-autoscaler-irsa-role + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cluster-autoscaler +rules: + - apiGroups: [""] + resources: ["events", "endpoints"] + verbs: ["create", "patch"] + - apiGroups: [""] + resources: ["pods/eviction"] + verbs: ["create"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["pods", "nodes", "services"] + verbs: ["watch", "list", "get", "patch", "update"] + # Allow autoscaler to patch node status (to add deletion candidate taints, etc.) + - apiGroups: [""] + resources: ["nodes/status"] + verbs: ["patch", "update"] + - apiGroups: ["autoscaling.k8s.io"] + resources: ["*"] + verbs: ["*"] + - apiGroups: ["apps"] + resources: ["replicasets", "statefulsets", "daemonsets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["list", "watch", "get", "patch"] + # Additional resources required by autoscaler for status and discovery + - apiGroups: [""] + resources: ["replicationcontrollers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + # Added to support leader election using Leases in coordination.k8s.io + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["get", "list", "watch"] + # Storage resources + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"] + verbs: ["get", "list", "watch"] + # Namespaces list is needed for TopologySpreadConstraints & PDBs + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cluster-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-autoscaler +subjects: + - kind: ServiceAccount + name: cluster-autoscaler + namespace: kube-system + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + app: cluster-autoscaler +spec: + replicas: 1 + selector: + matchLabels: + app: cluster-autoscaler + template: + metadata: + labels: + app: cluster-autoscaler + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + spec: + serviceAccountName: cluster-autoscaler + containers: + - name: cluster-autoscaler + image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.4 + command: + - ./cluster-autoscaler + - --cluster-name=ce-registry-eks + - --cloud-provider=aws + - --scan-interval=10s + - --balance-similar-node-groups + - --skip-nodes-with-system-pods=false + - --skip-nodes-with-local-storage=false + - --aws-use-static-instance-list=true + - --expander=least-waste + # The line below ensures the autoscaler understands the min/max for the managed node group. + - --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/ce-registry-eks + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: ssl-certs + mountPath: /etc/ssl/certs/ca-certificates.crt + readOnly: true + volumes: + - name: ssl-certs + hostPath: + path: /etc/ssl/certs/ca-bundle.crt + type: FileOrCreate diff --git a/terraform/environments/eks/k8s-manifests-sandbox/clusterissuer.yaml b/terraform/environments/eks/k8s-manifests-sandbox/clusterissuer.yaml new file mode 100644 index 00000000..0fad230e --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/clusterissuer.yaml @@ -0,0 +1,14 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-prod +spec: + acme: + email: ariel@learningtapestry.com + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: letsencrypt-prod + solvers: + - dns01: + route53: + hostedZoneID: Z1N75467P1FUL5 diff --git a/terraform/environments/eks/k8s-manifests-sandbox/db-migrate-job.yaml b/terraform/environments/eks/k8s-manifests-sandbox/db-migrate-job.yaml new file mode 100644 index 00000000..7c89ec3e --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/db-migrate-job.yaml @@ -0,0 +1,28 @@ +apiVersion: batch/v1 +kind: Job +metadata: + generateName: db-migrate- + namespace: credreg-sandbox + labels: + app: main-app +spec: + backoffLimit: 1 + activeDeadlineSeconds: 900 + ttlSecondsAfterFinished: 600 + template: + metadata: + labels: + app: main-app + spec: + serviceAccountName: main-app-service-account + restartPolicy: Never + containers: + - name: db-migrate + image: 996810415034.dkr.ecr.us-east-1.amazonaws.com/registry:sandbox + imagePullPolicy: Always + command: ["/bin/bash","-lc","bundle exec rake db:migrate RACK_ENV=production"] + envFrom: + - secretRef: + name: app-secrets + - configMapRef: + name: main-app-config diff --git a/terraform/environments/eks/k8s-manifests-sandbox/debug-aws-pod.yaml b/terraform/environments/eks/k8s-manifests-sandbox/debug-aws-pod.yaml new file mode 100644 index 00000000..dd1b425e --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/debug-aws-pod.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Pod +metadata: + name: debug-aws-cli + namespace: credreg-sandbox + labels: + app: debug-aws-cli +spec: + serviceAccountName: main-app-service-account + restartPolicy: Never + priorityClassName: sandbox-low + nodeSelector: + env: sandbox + tolerations: + - key: "env" + operator: "Equal" + value: "sandbox" + effect: "NoSchedule" + containers: + - name: awscli + image: public.ecr.aws/aws-cli/aws-cli:latest + imagePullPolicy: IfNotPresent + command: ["sh", "-lc", "echo Ready; sleep 3600"] + env: + - name: AWS_REGION + value: us-east-1 + - name: AWS_DEFAULT_REGION + value: us-east-1 + resources: + requests: + cpu: "50m" + memory: "64Mi" + limits: + cpu: "500m" + memory: "256Mi" + diff --git a/terraform/environments/eks/k8s-manifests-sandbox/elasticsearch-headless-svc.yaml b/terraform/environments/eks/k8s-manifests-sandbox/elasticsearch-headless-svc.yaml new file mode 100644 index 00000000..5294ceae --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/elasticsearch-headless-svc.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: elasticsearch-discovery + namespace: credreg-sandbox + labels: + app: elasticsearch +spec: + clusterIP: None + publishNotReadyAddresses: true + selector: + app: elasticsearch + ports: + - name: http + port: 9200 + targetPort: 9200 + - name: transport + port: 9300 + targetPort: 9300 diff --git a/terraform/environments/eks/k8s-manifests-sandbox/elasticsearch-pvc.yaml b/terraform/environments/eks/k8s-manifests-sandbox/elasticsearch-pvc.yaml new file mode 100644 index 00000000..3df45680 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/elasticsearch-pvc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: credreg-sandbox + name: elasticsearch-data +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + storageClassName: gp3 diff --git a/terraform/environments/eks/k8s-manifests-sandbox/elasticsearch-statefulset.yaml b/terraform/environments/eks/k8s-manifests-sandbox/elasticsearch-statefulset.yaml new file mode 100644 index 00000000..8c21dc59 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/elasticsearch-statefulset.yaml @@ -0,0 +1,73 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: elasticsearch + namespace: credreg-sandbox + labels: + app: elasticsearch +spec: + serviceName: elasticsearch-discovery + replicas: 2 + selector: + matchLabels: + app: elasticsearch + template: + metadata: + labels: + app: elasticsearch + spec: + subdomain: elasticsearch-discovery + priorityClassName: sandbox-medium + nodeSelector: + env: sandbox + tolerations: + - key: "env" + operator: "Equal" + value: "sandbox" + effect: "NoSchedule" + securityContext: + fsGroup: 1000 + runAsUser: 1000 + runAsGroup: 1000 + containers: + - name: elasticsearch + image: docker.elastic.co/elasticsearch/elasticsearch:9.2.0 + ports: + - containerPort: 9200 + - containerPort: 9300 + resources: + requests: + cpu: "256m" + memory: "2Gi" + limits: + cpu: "1000m" + memory: "4Gi" + env: + - name: ES_JAVA_OPTS + value: "-Xms2g -Xmx2g" + - name: node.name + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: cluster.name + value: "elasticsearch" + - name: xpack.security.enabled + value: "false" + - name: network.host + value: "0.0.0.0" + - name: discovery.seed_hosts + value: "elasticsearch-0.elasticsearch-discovery,elasticsearch-1.elasticsearch-discovery" + - name: cluster.initial_master_nodes + value: "elasticsearch-0,elasticsearch-1" + volumeMounts: + - name: elasticsearch-data + mountPath: /usr/share/elasticsearch/data + volumeClaimTemplates: + - metadata: + name: elasticsearch-data + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: gp3 + resources: + requests: + storage: 20Gi diff --git a/terraform/environments/eks/k8s-manifests-sandbox/elasticsearch-svc.yaml b/terraform/environments/eks/k8s-manifests-sandbox/elasticsearch-svc.yaml new file mode 100644 index 00000000..4772943b --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/elasticsearch-svc.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: elasticsearch + namespace: credreg-sandbox + labels: + app: elasticsearch +spec: + type: ClusterIP + selector: + app: elasticsearch + ports: + - name: http + port: 9200 + targetPort: 9200 + diff --git a/terraform/environments/eks/k8s-manifests-sandbox/external-secrets-operator.yaml b/terraform/environments/eks/k8s-manifests-sandbox/external-secrets-operator.yaml new file mode 100644 index 00000000..8496f9ad --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/external-secrets-operator.yaml @@ -0,0 +1,52 @@ + +--- +# 1. Namespace +apiVersion: v1 +kind: Namespace +metadata: + name: external-secrets + +--- +# 2. ServiceAccount with IRSA annotation +apiVersion: v1 +kind: ServiceAccount +metadata: + name: external-secrets + namespace: external-secrets + annotations: + eks.amazonaws.com/role-arn: arn:aws:iam::996810415034:role/ce-registry-eks-external-secrets-irsa-role + + +--- +apiVersion: external-secrets.io/v1 +kind: ClusterSecretStore +metadata: + name: aws-secret-manager +spec: + provider: + aws: + service: SecretsManager + region: us-east-1 + auth: + jwt: + serviceAccountRef: + name: external-secrets + namespace: external-secrets + +--- +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: app-secret + namespace: credreg-sandbox +spec: + refreshInterval: 1h + secretStoreRef: + name: aws-secret-manager + kind: ClusterSecretStore + target: + name: app-secrets + creationPolicy: Owner + dataFrom: + - extract: + key: credreg-secrets-eks-sandbox diff --git a/terraform/environments/eks/k8s-manifests-sandbox/external-secrets-values.yaml b/terraform/environments/eks/k8s-manifests-sandbox/external-secrets-values.yaml new file mode 100644 index 00000000..a1dbd08c --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/external-secrets-values.yaml @@ -0,0 +1,35 @@ +# Helm values for External Secrets Operator (ESO) +# ============================================== +# These values are consumed by the official Helm chart hosted at +# https://charts.external-secrets.io +# +# The file is parameterised with two placeholders that **must** be +# substituted before you run `helm upgrade --install`: +# +# – the IAM Role ARN that Terraform +# outputs as `external_secrets_irsa_role_arn`. +# – the AWS region where your cluster and +# Secrets Manager live (e.g. us-east-1). +# +# Either replace those strings manually or use `envsubst` as shown in +# the README. + +installCRDs: true + +serviceAccount: + name: external-secrets + annotations: + eks.amazonaws.com/role-arn: arn:aws:iam::996810415034:role/ce-registry-eks-external-secrets-irsa-role + +env: + AWS_REGION: us-east-1 + AWS_DEFAULT_REGION: us-east-1 + +# Default resource requests/limits are conservative but can be tuned. +resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi diff --git a/terraform/environments/eks/k8s-manifests-sandbox/newrelic-apm-enable.yaml b/terraform/environments/eks/k8s-manifests-sandbox/newrelic-apm-enable.yaml new file mode 100644 index 00000000..7c0015c7 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/newrelic-apm-enable.yaml @@ -0,0 +1,25 @@ + +apiVersion: newrelic.com/v1beta2 +kind: Instrumentation +metadata: + name: newrelic-instrumentation + namespace: newrelic +spec: + agent: + language: ruby + + image: newrelic/newrelic-ruby-init:latest + + # Select a namespace with a specific name by using "kubernetes.io/metadata.name" label + namespaceLabelSelector: + matchExpressions: + - key: "kubernetes.io/metadata.name" + operator: "In" + values: ["credreg-sandbox"] + + # Narrow to pods labeled as Ruby apps + podLabelSelector: + matchExpressions: + - key: "app-lang" + operator: "In" + values: ["ruby"] diff --git a/terraform/environments/eks/k8s-manifests-sandbox/opensearch-deployment.yaml b/terraform/environments/eks/k8s-manifests-sandbox/opensearch-deployment.yaml new file mode 100644 index 00000000..899b246e --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/opensearch-deployment.yaml @@ -0,0 +1,80 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: credreg-sandbox + name: opensearch + labels: + app: opensearch +spec: + replicas: 1 + selector: + matchLabels: + app: opensearch + template: + metadata: + labels: + app: opensearch + spec: + securityContext: + fsGroup: 1000 # ensure mounted volume is writable by OpenSearch user + runAsUser: 1000 + runAsGroup: 1000 + containers: + - name: opensearch + image: opensearchproject/opensearch:3.3.1 + ports: + - containerPort: 9200 # OpenSearch HTTP port + resources: + requests: + cpu: "512m" + memory: "4096Mi" + limits: + cpu: "512m" + memory: "4096Mi" + env: + - name: OPENSEARCH_JAVA_OPTS + value: "-Xms2048m -Xmx2048m" + - name: DISABLE_INSTALL_DEMO_CONFIG + value: "true" + - name: cluster.name + value: "opensearch" + - name: bootstrap.memory_lock + value: "true" + - name: discovery.type + value: "single-node" + - name: DISABLE_SECURITY_PLUGIN + value: "true" + - name: OPENSEARCH_INITIAL_ADMIN_PASSWORD + value: "password" + - name: network.host + value: "0.0.0.0" + - name: http.cors.enabled + value: "true" + - name: http.cors.allow-origin + value: "*" + - name: indices.query.bool.max_clause_count + value: "4096" + securityContext: + capabilities: + add: ["IPC_LOCK"] + volumeMounts: + - name: opensearch-data + mountPath: /usr/share/opensearch/data + restartPolicy: Always + volumes: + - name: opensearch-data + persistentVolumeClaim: + claimName: opensearch-data +--- +apiVersion: v1 +kind: Service +metadata: + name: opensearch +spec: + selector: + app: opensearch + ports: + - protocol: TCP + port: 9200 + targetPort: 9200 + type: ClusterIP diff --git a/terraform/environments/eks/k8s-manifests-sandbox/opensearch-pvc.yaml b/terraform/environments/eks/k8s-manifests-sandbox/opensearch-pvc.yaml new file mode 100644 index 00000000..ca6a1f9b --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/opensearch-pvc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: credreg-sandbox + name: opensearch-data +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + storageClassName: gp2 diff --git a/terraform/environments/eks/k8s-manifests-sandbox/priorityclasses.yaml b/terraform/environments/eks/k8s-manifests-sandbox/priorityclasses.yaml new file mode 100644 index 00000000..d18850f3 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/priorityclasses.yaml @@ -0,0 +1,24 @@ +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: prod-high +value: 1000 +globalDefault: false +description: "High priority for production workloads" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: sandbox-medium +value: 500 +globalDefault: false +description: "Medium priority for sandbox workloads" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: sandbox-low +value: 100 +globalDefault: false +description: "Low priority for sandbox workloads" + diff --git a/terraform/environments/eks/k8s-manifests-sandbox/redis-configmap.yaml b/terraform/environments/eks/k8s-manifests-sandbox/redis-configmap.yaml new file mode 100644 index 00000000..c852bb70 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/redis-configmap.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: redis-config +data: + redis.conf: | + bind 0.0.0.0 + port 6379 + requirepass your_secure_password + appendonly yes + maxmemory 500mb + maxmemory-policy allkeys-lru + tcp-keepalive 300 + protected-mode yes diff --git a/terraform/environments/eks/k8s-manifests-sandbox/redis-deployment.yaml b/terraform/environments/eks/k8s-manifests-sandbox/redis-deployment.yaml new file mode 100644 index 00000000..96287391 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/redis-deployment.yaml @@ -0,0 +1,81 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: redis + namespace: credreg-sandbox + labels: + app: redis +spec: + serviceName: redis-service + replicas: 1 # For production, use 3 with Redis Sentinel + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + spec: + priorityClassName: sandbox-medium + nodeSelector: + env: sandbox + tolerations: + - key: "env" + operator: "Equal" + value: "sandbox" + effect: "NoSchedule" + containers: + - name: redis + image: redis:7.2-alpine # Official Redis image + command: ["redis-server", "/usr/local/etc/redis/redis.conf"] + ports: + - containerPort: 6379 + volumeMounts: + - name: redis-data + mountPath: /data + - name: redis-config + mountPath: /usr/local/etc/redis + resources: + requests: + cpu: "100m" + memory: "256Mi" + limits: + cpu: "500m" + memory: "1Gi" + livenessProbe: + exec: + command: ["redis-cli", "ping"] + initialDelaySeconds: 30 + periodSeconds: 10 + readinessProbe: + exec: + command: ["redis-cli", "ping"] + initialDelaySeconds: 5 + periodSeconds: 5 + volumes: + - name: redis-config + configMap: + name: redis-config + volumeClaimTemplates: + - metadata: + name: redis-data + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: "gp2" # AWS EBS gp3 (adjust if needed) + resources: + requests: + storage: 10Gi + +--- +apiVersion: v1 +kind: Service +metadata: + name: redis + namespace: credreg-sandbox +spec: + clusterIP: None # Headless service for direct pod access + ports: + - port: 6379 + targetPort: 6379 + selector: + app: redis diff --git a/terraform/environments/eks/k8s-manifests-sandbox/sync-es-graphs-job.yaml b/terraform/environments/eks/k8s-manifests-sandbox/sync-es-graphs-job.yaml new file mode 100644 index 00000000..20cd0671 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/sync-es-graphs-job.yaml @@ -0,0 +1,87 @@ +apiVersion: batch/v1 +kind: Job +metadata: + generateName: sync-es-graphs- + namespace: credreg-sandbox + labels: + app: main-app +spec: + backoffLimit: 0 + activeDeadlineSeconds: 43200 + ttlSecondsAfterFinished: 1800 + template: + metadata: + labels: + app: main-app + spec: + priorityClassName: prod-high + serviceAccountName: main-app-service-account + restartPolicy: Never + containers: + - name: sync-es-graphs + image: 996810415034.dkr.ecr.us-east-1.amazonaws.com/registry:sandbox + imagePullPolicy: Always + resources: + requests: + cpu: "512m" + memory: "4096Mi" + limits: + cpu: "1024" + memory: "4096Mi" + command: + - /bin/bash + - -lc + - | + RACK_ENV=production bundle exec ruby - <<'RUBY' + require 'benchmark' + require './config/application' + require 'json' + $stdout.sync = true + errors = {} + processed = 0 + total = Envelope.not_deleted.count + puts "Starting ES sync for #{total} envelopes at #{Time.now.utc}" + puts Benchmark.measure { + Envelope.not_deleted.includes(:envelope_community).find_each do |envelope| + processed += 1 + begin + SyncEnvelopeGraphWithEs.index(envelope) + rescue => e + errors[envelope.id] = e.message + end + # Progress line with current error count every 100 records + puts "Progress: processed=#{processed}/#{total} errors=#{errors.size}" if (processed % 100).zero? + end + } + puts "Finished at #{Time.now.utc} — processed=#{processed}, errors=#{errors.size}" + begin + File.write('/tmp/errors.json', JSON.pretty_generate(errors)) + puts "Wrote /tmp/errors.json (#{errors.size} entries)" + rescue => fe + warn "Failed to write /tmp/errors.json: #{fe.class}: #{fe.message}" + end + # Try to upload errors file to S3 for later analysis + begin + require 'aws-sdk-s3' + bucket = ENV['S3_ERRORS_BUCKET'] || 'cer-envelope-graphs-sandbox' + key = ENV['S3_ERRORS_KEY'] || "errors/errors-#{Time.now.utc.strftime('%Y%m%dT%H%M%SZ')}.json" + region = ENV['AWS_REGION'] || 'us-east-1' + s3 = Aws::S3::Client.new(region: region) + s3.put_object(bucket: bucket, key: key, body: File.open('/tmp/errors.json', 'rb')) + puts "Uploaded /tmp/errors.json to s3://#{bucket}/#{key}" + rescue LoadError + warn 'aws-sdk-s3 gem not available; skipping S3 upload of errors.json' + rescue => se + warn "Failed to upload errors.json to S3: #{se.class}: #{se.message}" + end + unless errors.empty? + sample = errors.to_a.first(5).to_h + warn "Encountered #{errors.size} errors. Sample: #{sample.inspect}" + exit 1 + end + RUBY + envFrom: + - secretRef: + name: app-secrets + - configMapRef: + name: main-app-config diff --git a/terraform/environments/eks/k8s-manifests-sandbox/worker-deployment.yaml b/terraform/environments/eks/k8s-manifests-sandbox/worker-deployment.yaml new file mode 100644 index 00000000..029c2874 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-sandbox/worker-deployment.yaml @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: worker-app + namespace: credreg-sandbox + labels: + app: worker-app +spec: + replicas: 1 + selector: + matchLabels: + app: worker-app + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: worker-app + app-lang: ruby + spec: + priorityClassName: sandbox-medium + nodeSelector: + env: sandbox + tolerations: + - key: "env" + operator: "Equal" + value: "sandbox" + effect: "NoSchedule" + serviceAccountName: main-app-service-account + containers: + - name: worker + image: 996810415034.dkr.ecr.us-east-1.amazonaws.com/registry:sandbox + imagePullPolicy: Always + env: + - name: NEW_RELIC_APP_NAME + value: "Credential Engine Sandbox" + - name: PATH + value: "/app/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin" + command: ["/bin/bash","-lc"] + args: + - | + if [ -x ./bin/sidekiq ]; then + ./bin/sidekiq -r ./config/application.rb + else + bundle exec sidekiq -r ./config/application.rb + fi + envFrom: + - secretRef: + name: app-secrets + - configMapRef: + name: main-app-config + resources: + requests: + cpu: "400m" + memory: "256Mi" + limits: + cpu: "2000m" + memory: "2048Mi" diff --git a/terraform/environments/eks/k8s-manifests-staging/app-configmap.yaml b/terraform/environments/eks/k8s-manifests-staging/app-configmap.yaml new file mode 100644 index 00000000..59c98cad --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/app-configmap.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: main-app-config + namespace: credreg-staging +data: + POSTGRESQL_DATABASE: credential_registry_production + POSTGRESQL_USERNAME: credential_registry_production + RACK_ENV: staging + DOCKER_ENV: "true" + ENVELOPE_GRAPHS_BUCKET: cer-envelope-graphs-staging + ENVELOPE_DOWNLOADS_BUCKET: cer-envelope-downloads + IAM_COMMUNITY_ROLE_ADMIN: ROLE_ADMINISTRATOR + IAM_COMMUNITY_ROLE_READEE: ROLE_READER + IAM_COMMUNITY_ROLE_PUBLISHER: ROLE_PUBLISHER + IAM_COMMUNITY_CLAIM_NAME: community_name + IAM_CLIENT_ID: RegistryAPI + IAM_URL: https://test-ce-kc-002.credentialengine.org/realms/CE-Test + IAM_CLIENT: TestStagingRegistryAPI + AIRBRAKE_PROJECT_ID: '270205' + SIDEKIQ_CONCURRENCY: '10' + ELASTICSEARCH_ADDRESS: http://elasticsearch:9200 \ No newline at end of file diff --git a/terraform/environments/eks/k8s-manifests-staging/app-deployment.yaml b/terraform/environments/eks/k8s-manifests-staging/app-deployment.yaml new file mode 100644 index 00000000..f28d8473 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/app-deployment.yaml @@ -0,0 +1,69 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: main-app + namespace: credreg-staging + labels: + app: main +spec: + replicas: 1 # Adjust based on traffic + selector: + matchLabels: + app: main-app + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: main-app + app-lang: ruby + spec: + priorityClassName: staging-medium + nodeSelector: + env: staging + tolerations: + - key: "env" + operator: "Equal" + value: "staging" + effect: "NoSchedule" + serviceAccountName: main-app-service-account + # DB migrations are handled via a dedicated Job + containers: + - name: main-app + image: 996810415034.dkr.ecr.us-east-1.amazonaws.com/registry:staging + imagePullPolicy: Always + command: ["/bin/bash", "-c", "bin/rackup -o 0.0.0.0"] + env: + - name: NEW_RELIC_APP_NAME + value: "Credential Engine Staging" + ports: + - containerPort: 9292 + envFrom: + - secretRef: + name: app-secrets # DB credentials, APP_KEY, etc. + - configMapRef: + name: main-app-config + resources: + requests: + cpu: "500m" + memory: "256Mi" + limits: + cpu: "1000m" + memory: "1024Mi" + +--- +apiVersion: v1 +kind: Service +metadata: + name: main-app + namespace: credreg-staging +spec: + type: ClusterIP + selector: + app: main-app + ports: + - protocol: TCP + port: 9292 + targetPort: 9292 diff --git a/terraform/environments/eks/k8s-manifests-staging/app-hpa.yaml b/terraform/environments/eks/k8s-manifests-staging/app-hpa.yaml new file mode 100644 index 00000000..261531df --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/app-hpa.yaml @@ -0,0 +1,27 @@ +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: main-app-hpa + namespace: credreg-staging + labels: + app: laravel +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: main-app + minReplicas: 1 + maxReplicas: 5 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 60 + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: 70 diff --git a/terraform/environments/eks/k8s-manifests-staging/app-ingress.yaml b/terraform/environments/eks/k8s-manifests-staging/app-ingress.yaml new file mode 100644 index 00000000..b070fb1e --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/app-ingress.yaml @@ -0,0 +1,26 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: main-app-ingress + annotations: + # NGINX Ingress Controller annotations + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" # HTTP → HTTPS + nginx.ingress.kubernetes.io/backend-protocol: "HTTP" + nginx.ingress.kubernetes.io/proxy-body-size: "10m" +spec: + ingressClassName: nginx + tls: + - hosts: + - staging.credentialengineregistry.org + secretName: staging-credentialengineregistry-org-tls + rules: + - host: staging.credentialengineregistry.org + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: main-app + port: + number: 9292 diff --git a/terraform/environments/eks/k8s-manifests-staging/app-namespace.yaml b/terraform/environments/eks/k8s-manifests-staging/app-namespace.yaml new file mode 100644 index 00000000..6a69da5b --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/app-namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: credreg-staging + labels: + name: main-app \ No newline at end of file diff --git a/terraform/environments/eks/k8s-manifests-staging/app-secrets.yaml b/terraform/environments/eks/k8s-manifests-staging/app-secrets.yaml new file mode 100644 index 00000000..31653072 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/app-secrets.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + name: main-app-secrets +type: Opaque +stringData: + POSTGRESQL_PASSWORD=[secret value] + SECRET_KEY_BASE=[openssl rand -hex 32] + POSTGRESQL_ADDRESS=[POSTGRESQL_ADDRESS] + SIDEKIQ_USERNAME=[SIDEKIQ_USERNAME] + SIDEKIQ_PASSWORD=[SIDEKIQ_PASSWORD] + REDIS_URL=[REDIS_URL] + AIRBRAKE_PROJECT_KEY=[AIRBRAKE_PROJECT_KEY] \ No newline at end of file diff --git a/terraform/environments/eks/k8s-manifests-staging/app-service-account.yaml b/terraform/environments/eks/k8s-manifests-staging/app-service-account.yaml new file mode 100644 index 00000000..286c697e --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/app-service-account.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: main-app-service-account + annotations: + eks.amazonaws.com/role-arn: "arn:aws:iam::996810415034:role/ce-registry-eks-application-irsa-role" diff --git a/terraform/environments/eks/k8s-manifests-staging/certificate.yaml b/terraform/environments/eks/k8s-manifests-staging/certificate.yaml new file mode 100644 index 00000000..7cf9fb0d --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/certificate.yaml @@ -0,0 +1,12 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: staging-credentialengineregistry-org-tls + namespace: credreg-staging +spec: + secretName: staging-credentialengineregistry-org-tls + dnsNames: + - staging.credentialengineregistry.org + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer \ No newline at end of file diff --git a/terraform/environments/eks/k8s-manifests-staging/cluster-autoscaler.yaml b/terraform/environments/eks/k8s-manifests-staging/cluster-autoscaler.yaml new file mode 100644 index 00000000..fb8aba78 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/cluster-autoscaler.yaml @@ -0,0 +1,141 @@ +# Cluster Autoscaler manifest for the ${local.project_name}-${var.env} EKS cluster +# +# IMPORTANT: Replace with the actual IAM role +# ARN that Terraform outputs (cluster_autoscaler_irsa_role_arn) before applying +# this manifest. +# +# You can get the role ARN after running `terraform apply`: +# terraform -chdir=../../environments/dev output -raw cluster_autoscaler_irsa_role_arn +# and then update the annotation below accordingly. + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cluster-autoscaler + namespace: kube-system + annotations: + eks.amazonaws.com/role-arn: arn:aws:iam::996810415034:role/ce-registry-eks-cluster-autoscaler-irsa-role + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cluster-autoscaler +rules: + - apiGroups: [""] + resources: ["events", "endpoints"] + verbs: ["create", "patch"] + - apiGroups: [""] + resources: ["pods/eviction"] + verbs: ["create"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["pods", "nodes", "services"] + verbs: ["watch", "list", "get", "patch", "update"] + # Allow autoscaler to patch node status (to add deletion candidate taints, etc.) + - apiGroups: [""] + resources: ["nodes/status"] + verbs: ["patch", "update"] + - apiGroups: ["autoscaling.k8s.io"] + resources: ["*"] + verbs: ["*"] + - apiGroups: ["apps"] + resources: ["replicasets", "statefulsets", "daemonsets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["list", "watch", "get", "patch"] + # Additional resources required by autoscaler for status and discovery + - apiGroups: [""] + resources: ["replicationcontrollers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + # Added to support leader election using Leases in coordination.k8s.io + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["get", "list", "watch"] + # Storage resources + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"] + verbs: ["get", "list", "watch"] + # Namespaces list is needed for TopologySpreadConstraints & PDBs + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cluster-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-autoscaler +subjects: + - kind: ServiceAccount + name: cluster-autoscaler + namespace: kube-system + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + app: cluster-autoscaler +spec: + replicas: 1 + selector: + matchLabels: + app: cluster-autoscaler + template: + metadata: + labels: + app: cluster-autoscaler + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + spec: + serviceAccountName: cluster-autoscaler + containers: + - name: cluster-autoscaler + image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.4 + command: + - ./cluster-autoscaler + - --cluster-name=ce-registry-eks + - --cloud-provider=aws + - --scan-interval=10s + - --balance-similar-node-groups + - --skip-nodes-with-system-pods=false + - --skip-nodes-with-local-storage=false + - --aws-use-static-instance-list=true + - --expander=least-waste + # The line below ensures the autoscaler understands the min/max for the managed node group. + - --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/ce-registry-eks + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: ssl-certs + mountPath: /etc/ssl/certs/ca-certificates.crt + readOnly: true + volumes: + - name: ssl-certs + hostPath: + path: /etc/ssl/certs/ca-bundle.crt + type: FileOrCreate diff --git a/terraform/environments/eks/k8s-manifests-staging/clusterissuer.yaml b/terraform/environments/eks/k8s-manifests-staging/clusterissuer.yaml new file mode 100644 index 00000000..0fad230e --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/clusterissuer.yaml @@ -0,0 +1,14 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-prod +spec: + acme: + email: ariel@learningtapestry.com + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: letsencrypt-prod + solvers: + - dns01: + route53: + hostedZoneID: Z1N75467P1FUL5 diff --git a/terraform/environments/eks/k8s-manifests-staging/db-migrate-job.yaml b/terraform/environments/eks/k8s-manifests-staging/db-migrate-job.yaml new file mode 100644 index 00000000..1ff2d6fb --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/db-migrate-job.yaml @@ -0,0 +1,28 @@ +apiVersion: batch/v1 +kind: Job +metadata: + generateName: db-migrate- + namespace: credreg-staging + labels: + app: main-app +spec: + backoffLimit: 1 + activeDeadlineSeconds: 900 + ttlSecondsAfterFinished: 600 + template: + metadata: + labels: + app: main-app + spec: + serviceAccountName: main-app-service-account + restartPolicy: Never + containers: + - name: db-migrate + image: 996810415034.dkr.ecr.us-east-1.amazonaws.com/registry:staging + imagePullPolicy: Always + command: ["/bin/bash","-lc","bundle exec rake db:migrate RACK_ENV=production"] + envFrom: + - secretRef: + name: app-secrets + - configMapRef: + name: main-app-config diff --git a/terraform/environments/eks/k8s-manifests-staging/debug-aws-pod.yaml b/terraform/environments/eks/k8s-manifests-staging/debug-aws-pod.yaml new file mode 100644 index 00000000..f1c4c60b --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/debug-aws-pod.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Pod +metadata: + name: debug-aws-cli + namespace: credreg-staging + labels: + app: debug-aws-cli +spec: + serviceAccountName: main-app-service-account + restartPolicy: Never + priorityClassName: staging-medium + nodeSelector: + env: staging + tolerations: + - key: "env" + operator: "Equal" + value: "staging" + effect: "NoSchedule" + containers: + - name: awscli + image: public.ecr.aws/aws-cli/aws-cli:latest + imagePullPolicy: IfNotPresent + command: ["sh", "-lc", "echo Ready; sleep 3600"] + env: + - name: AWS_REGION + value: us-east-1 + - name: AWS_DEFAULT_REGION + value: us-east-1 + resources: + requests: + cpu: "50m" + memory: "64Mi" + limits: + cpu: "500m" + memory: "256Mi" + diff --git a/terraform/environments/eks/k8s-manifests-staging/elasticsearch-headless-svc.yaml b/terraform/environments/eks/k8s-manifests-staging/elasticsearch-headless-svc.yaml new file mode 100644 index 00000000..ae2b6133 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/elasticsearch-headless-svc.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: elasticsearch-discovery + namespace: credreg-staging + labels: + app: elasticsearch +spec: + clusterIP: None + publishNotReadyAddresses: true + selector: + app: elasticsearch + ports: + - name: http + port: 9200 + targetPort: 9200 + - name: transport + port: 9300 + targetPort: 9300 diff --git a/terraform/environments/eks/k8s-manifests-staging/elasticsearch-pvc.yaml b/terraform/environments/eks/k8s-manifests-staging/elasticsearch-pvc.yaml new file mode 100644 index 00000000..fb02982c --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/elasticsearch-pvc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: credreg-staging + name: elasticsearch-data +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + storageClassName: gp3 diff --git a/terraform/environments/eks/k8s-manifests-staging/elasticsearch-statefulset.yaml b/terraform/environments/eks/k8s-manifests-staging/elasticsearch-statefulset.yaml new file mode 100644 index 00000000..b845c13d --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/elasticsearch-statefulset.yaml @@ -0,0 +1,67 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: elasticsearch + namespace: credreg-staging + labels: + app: elasticsearch +spec: + serviceName: elasticsearch-discovery + replicas: 2 + selector: + matchLabels: + app: elasticsearch + template: + metadata: + labels: + app: elasticsearch + spec: + subdomain: elasticsearch-discovery + priorityClassName: staging-medium + nodeSelector: + env: staging + tolerations: + - key: "env" + operator: "Equal" + value: "staging" + effect: "NoSchedule" + securityContext: + fsGroup: 1000 + runAsUser: 1000 + runAsGroup: 1000 + containers: + - name: elasticsearch + image: docker.elastic.co/elasticsearch/elasticsearch:9.2.0 + ports: + - containerPort: 9200 + - containerPort: 9300 + resources: + requests: + cpu: "256m" + memory: "1Gi" + limits: + cpu: "1000m" + memory: "4Gi" + env: + - name: ES_JAVA_OPTS + value: "-Xms2g -Xmx2g" + - name: cluster.name + value: "elasticsearch" + - name: xpack.security.enabled + value: "false" + - name: network.host + value: "0.0.0.0" + - name: discovery.seed_hosts + value: "elasticsearch-discovery" + volumeMounts: + - name: elasticsearch-data + mountPath: /usr/share/elasticsearch/data + volumeClaimTemplates: + - metadata: + name: elasticsearch-data + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: gp3 + resources: + requests: + storage: 20Gi diff --git a/terraform/environments/eks/k8s-manifests-staging/elasticsearch-svc.yaml b/terraform/environments/eks/k8s-manifests-staging/elasticsearch-svc.yaml new file mode 100644 index 00000000..809688ea --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/elasticsearch-svc.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: elasticsearch + namespace: credreg-staging + labels: + app: elasticsearch +spec: + type: ClusterIP + selector: + app: elasticsearch + ports: + - name: http + port: 9200 + targetPort: 9200 + diff --git a/terraform/environments/eks/k8s-manifests-staging/external-secrets-operator.yaml b/terraform/environments/eks/k8s-manifests-staging/external-secrets-operator.yaml new file mode 100644 index 00000000..4f186d8b --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/external-secrets-operator.yaml @@ -0,0 +1,52 @@ + +--- +# 1. Namespace +apiVersion: v1 +kind: Namespace +metadata: + name: external-secrets + +--- +# 2. ServiceAccount with IRSA annotation +apiVersion: v1 +kind: ServiceAccount +metadata: + name: external-secrets + namespace: external-secrets + annotations: + eks.amazonaws.com/role-arn: arn:aws:iam::996810415034:role/ce-registry-eks-external-secrets-irsa-role + + +--- +apiVersion: external-secrets.io/v1 +kind: ClusterSecretStore +metadata: + name: aws-secret-manager +spec: + provider: + aws: + service: SecretsManager + region: us-east-1 + auth: + jwt: + serviceAccountRef: + name: external-secrets + namespace: external-secrets + +--- +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: app-secret + namespace: credreg-staging +spec: + refreshInterval: 1h + secretStoreRef: + name: aws-secret-manager + kind: ClusterSecretStore + target: + name: app-secrets + creationPolicy: Owner + dataFrom: + - extract: + key: credreg-secrets-eks-staging diff --git a/terraform/environments/eks/k8s-manifests-staging/external-secrets-values.yaml b/terraform/environments/eks/k8s-manifests-staging/external-secrets-values.yaml new file mode 100644 index 00000000..a1dbd08c --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/external-secrets-values.yaml @@ -0,0 +1,35 @@ +# Helm values for External Secrets Operator (ESO) +# ============================================== +# These values are consumed by the official Helm chart hosted at +# https://charts.external-secrets.io +# +# The file is parameterised with two placeholders that **must** be +# substituted before you run `helm upgrade --install`: +# +# – the IAM Role ARN that Terraform +# outputs as `external_secrets_irsa_role_arn`. +# – the AWS region where your cluster and +# Secrets Manager live (e.g. us-east-1). +# +# Either replace those strings manually or use `envsubst` as shown in +# the README. + +installCRDs: true + +serviceAccount: + name: external-secrets + annotations: + eks.amazonaws.com/role-arn: arn:aws:iam::996810415034:role/ce-registry-eks-external-secrets-irsa-role + +env: + AWS_REGION: us-east-1 + AWS_DEFAULT_REGION: us-east-1 + +# Default resource requests/limits are conservative but can be tuned. +resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi diff --git a/terraform/environments/eks/k8s-manifests-staging/img-inspect.yaml b/terraform/environments/eks/k8s-manifests-staging/img-inspect.yaml new file mode 100644 index 00000000..734bff2a --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/img-inspect.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: img-inspect + namespace: credreg-staging + labels: + app: img-inspect +spec: + restartPolicy: Never + serviceAccountName: main-app-service-account + containers: + - name: inspector + image: 996810415034.dkr.ecr.us-east-1.amazonaws.com/registry:staging + imagePullPolicy: Always + command: ["/bin/bash","-lc","sleep infinity"] + envFrom: + - secretRef: + name: app-secrets + - configMapRef: + name: main-app-config + resources: + requests: + cpu: "100m" + memory: "128Mi" + limits: + cpu: "500m" + memory: "512Mi" diff --git a/terraform/environments/eks/k8s-manifests-staging/newrelic-apm-enable.yaml b/terraform/environments/eks/k8s-manifests-staging/newrelic-apm-enable.yaml new file mode 100644 index 00000000..0ff1637e --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/newrelic-apm-enable.yaml @@ -0,0 +1,21 @@ +apiVersion: newrelic.com/v1beta2 +kind: Instrumentation +metadata: + name: newrelic-instrumentation + namespace: newrelic +spec: + agent: + language: ruby + image: newrelic/newrelic-ruby-init:latest + + namespaceLabelSelector: + matchExpressions: + - key: "kubernetes.io/metadata.name" + operator: "In" + values: ["credreg-staging"] + + podLabelSelector: + matchExpressions: + - key: "app-lang" + operator: "In" + values: ["ruby"] diff --git a/terraform/environments/eks/k8s-manifests-staging/opensearch-deployment.yaml b/terraform/environments/eks/k8s-manifests-staging/opensearch-deployment.yaml new file mode 100644 index 00000000..59590209 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/opensearch-deployment.yaml @@ -0,0 +1,80 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: credreg-staging + name: opensearch + labels: + app: opensearch +spec: + replicas: 1 + selector: + matchLabels: + app: opensearch + template: + metadata: + labels: + app: opensearch + spec: + securityContext: + fsGroup: 1000 # ensure mounted volume is writable by OpenSearch user + runAsUser: 1000 + runAsGroup: 1000 + containers: + - name: opensearch + image: opensearchproject/opensearch:3.3.1 + ports: + - containerPort: 9200 # OpenSearch HTTP port + resources: + requests: + cpu: "512m" + memory: "4096Mi" + limits: + cpu: "512m" + memory: "4096Mi" + env: + - name: OPENSEARCH_JAVA_OPTS + value: "-Xms2048m -Xmx2048m" + - name: DISABLE_INSTALL_DEMO_CONFIG + value: "true" + - name: cluster.name + value: "opensearch" + - name: bootstrap.memory_lock + value: "true" + - name: discovery.type + value: "single-node" + - name: DISABLE_SECURITY_PLUGIN + value: "true" + - name: OPENSEARCH_INITIAL_ADMIN_PASSWORD + value: "password" + - name: network.host + value: "0.0.0.0" + - name: http.cors.enabled + value: "true" + - name: http.cors.allow-origin + value: "*" + - name: indices.query.bool.max_clause_count + value: "4096" + securityContext: + capabilities: + add: ["IPC_LOCK"] + volumeMounts: + - name: opensearch-data + mountPath: /usr/share/opensearch/data + restartPolicy: Always + volumes: + - name: opensearch-data + persistentVolumeClaim: + claimName: opensearch-data +--- +apiVersion: v1 +kind: Service +metadata: + name: opensearch +spec: + selector: + app: opensearch + ports: + - protocol: TCP + port: 9200 + targetPort: 9200 + type: ClusterIP diff --git a/terraform/environments/eks/k8s-manifests-staging/opensearch-pvc.yaml b/terraform/environments/eks/k8s-manifests-staging/opensearch-pvc.yaml new file mode 100644 index 00000000..648817c3 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/opensearch-pvc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: credreg-staging + name: opensearch-data +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + storageClassName: gp2 diff --git a/terraform/environments/eks/k8s-manifests-staging/priorityclasses.yaml b/terraform/environments/eks/k8s-manifests-staging/priorityclasses.yaml new file mode 100644 index 00000000..211a3510 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/priorityclasses.yaml @@ -0,0 +1,24 @@ +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: prod-high +value: 1000 +globalDefault: false +description: "High priority for production workloads" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: staging-medium +value: 500 +globalDefault: false +description: "Medium priority for staging workloads" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: sandbox-low +value: 100 +globalDefault: false +description: "Low priority for sandbox workloads" + diff --git a/terraform/environments/eks/k8s-manifests-staging/redis-configmap.yaml b/terraform/environments/eks/k8s-manifests-staging/redis-configmap.yaml new file mode 100644 index 00000000..c852bb70 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/redis-configmap.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: redis-config +data: + redis.conf: | + bind 0.0.0.0 + port 6379 + requirepass your_secure_password + appendonly yes + maxmemory 500mb + maxmemory-policy allkeys-lru + tcp-keepalive 300 + protected-mode yes diff --git a/terraform/environments/eks/k8s-manifests-staging/redis-deployment.yaml b/terraform/environments/eks/k8s-manifests-staging/redis-deployment.yaml new file mode 100644 index 00000000..6babdbf1 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/redis-deployment.yaml @@ -0,0 +1,81 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: redis + namespace: credreg-staging + labels: + app: redis +spec: + serviceName: redis-service + replicas: 1 # For production, use 3 with Redis Sentinel + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + spec: + priorityClassName: staging-medium + nodeSelector: + env: staging + tolerations: + - key: "env" + operator: "Equal" + value: "staging" + effect: "NoSchedule" + containers: + - name: redis + image: redis:7.2-alpine # Official Redis image + command: ["redis-server", "/usr/local/etc/redis/redis.conf"] + ports: + - containerPort: 6379 + volumeMounts: + - name: redis-data + mountPath: /data + - name: redis-config + mountPath: /usr/local/etc/redis + resources: + requests: + cpu: "100m" + memory: "256Mi" + limits: + cpu: "500m" + memory: "1Gi" + livenessProbe: + exec: + command: ["redis-cli", "ping"] + initialDelaySeconds: 30 + periodSeconds: 10 + readinessProbe: + exec: + command: ["redis-cli", "ping"] + initialDelaySeconds: 5 + periodSeconds: 5 + volumes: + - name: redis-config + configMap: + name: redis-config + volumeClaimTemplates: + - metadata: + name: redis-data + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: "gp2" # AWS EBS gp3 (adjust if needed) + resources: + requests: + storage: 10Gi + +--- +apiVersion: v1 +kind: Service +metadata: + name: redis + namespace: credreg-staging +spec: + clusterIP: None # Headless service for direct pod access + ports: + - port: 6379 + targetPort: 6379 + selector: + app: redis diff --git a/terraform/environments/eks/k8s-manifests-staging/storageclass-gp3.yaml b/terraform/environments/eks/k8s-manifests-staging/storageclass-gp3.yaml new file mode 100644 index 00000000..74340d71 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/storageclass-gp3.yaml @@ -0,0 +1,11 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: gp3 +provisioner: ebs.csi.aws.com +parameters: + type: gp3 +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true + diff --git a/terraform/environments/eks/k8s-manifests-staging/worker-deployment.yaml b/terraform/environments/eks/k8s-manifests-staging/worker-deployment.yaml new file mode 100644 index 00000000..879f2c16 --- /dev/null +++ b/terraform/environments/eks/k8s-manifests-staging/worker-deployment.yaml @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: worker-app + namespace: credreg-staging + labels: + app: worker-app +spec: + replicas: 1 + selector: + matchLabels: + app: worker-app + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: worker-app + app-lang: ruby + spec: + priorityClassName: staging-medium + nodeSelector: + env: staging + tolerations: + - key: "env" + operator: "Equal" + value: "staging" + effect: "NoSchedule" + serviceAccountName: main-app-service-account + containers: + - name: worker + image: 996810415034.dkr.ecr.us-east-1.amazonaws.com/registry:staging + imagePullPolicy: Always + env: + - name: NEW_RELIC_APP_NAME + value: "Credential Engine Staging" + - name: PATH + value: "/app/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin" + command: ["/bin/bash","-lc"] + args: + - | + if [ -x ./bin/sidekiq ]; then + ./bin/sidekiq -r ./config/application.rb + else + bundle exec sidekiq -r ./config/application.rb + fi + envFrom: + - secretRef: + name: app-secrets + - configMapRef: + name: main-app-config + resources: + requests: + cpu: "200m" + memory: "256Mi" + limits: + cpu: "1000m" + memory: "1024Mi" diff --git a/terraform/environments/eks/main.tf b/terraform/environments/eks/main.tf new file mode 100644 index 00000000..364c8447 --- /dev/null +++ b/terraform/environments/eks/main.tf @@ -0,0 +1,223 @@ +locals { + project_name = "ce-registry" + common_tags = { + "project" = local.project_name + "environment" = var.env + } +} + + +module "vpc" { + source = "../../modules/vpc" + project_name = local.project_name + env = var.env + vpc_cidr = var.vpc_cidr + public_subnet_cidrs = var.public_subnet_cidrs + private_subnet_cidrs = var.private_subnet_cidrs + azs = var.azs + common_tags = local.common_tags +} + +## Staging RDS instance +module "rds-staging" { + source = "../../modules/rds" + enable_db_instance = false + project_name = local.project_name + security_group_description = "Allow inbound traffic from bastion" + env = var.env + vpc_id = module.vpc.vpc_id + vpc_cidr = var.vpc_cidr + subnet_ids = module.vpc.public_subnet_ids + db_name = var.db_name_staging + db_username = var.db_username_staging + instance_class = var.instance_class + common_tags = local.common_tags + ssm_db_password_arn = var.ssm_db_password_arn + rds_engine_version = var.rds_engine_version + allocated_storage = var.allocated_storage + # Allow destroying without snapshot for staging + skip_final_snapshot = true + deletion_protection = false + name_suffix = "-staging" +} + +module "rds-sandbox" { + source = "../../modules/rds" + enable_db_instance = false + project_name = local.project_name + security_group_description = "Allow inbound traffic from bastion" + env = var.env + vpc_id = module.vpc.vpc_id + vpc_cidr = var.vpc_cidr + subnet_ids = module.vpc.public_subnet_ids + db_name = var.db_name_sandbox + db_username = var.db_username_sandbox + instance_class = var.instance_class + common_tags = local.common_tags + ssm_db_password_arn = var.ssm_db_password_arn + rds_engine_version = var.rds_engine_version + allocated_storage = var.allocated_storage + # Allow destroying without snapshot for sandbox + skip_final_snapshot = true + deletion_protection = false + name_suffix = "-sandbox" +} +## Production RDS instance +# module "rds-production" { +# source = "../../modules/rds" +# project_name = local.project_name +# security_group_description = "Allow inbound traffic from bastion" +# env = var.env +# vpc_id = module.vpc.vpc_id +# vpc_cidr = var.vpc_cidr +# subnet_ids = module.vpc.public_subnet_ids +# db_name = var.db_name_prod +# db_username = var.db_username_prod +# instance_class = var.instance_class +# common_tags = local.common_tags +# ssm_db_password_arn = var.ssm_db_password_arn +# rds_engine_version = var.rds_engine_version +# allocated_storage = var.allocated_storage +# # Leave production safer by default; override if needed during teardown +# skip_final_snapshot = false +# deletion_protection = true +# name_suffix = "-prod" +# } + +module "ecr" { + source = "../../modules/ecr" + project_name = var.ecr_repository_name + env = var.env +} + +output "ecr_repository_url" { + value = module.ecr.repository_url +} + +output "cluster_autoscaler_irsa_role_arn" { + description = "IAM role ARN that the Cluster Autoscaler service account should assume via IRSA" + value = module.eks.cluster_autoscaler_irsa_role_arn +} + +module "eks" { + source = "../../modules/eks" + environment = var.env + cluster_name = "${local.project_name}-${var.env}" + cluster_version = var.cluster_version + private_subnets = module.vpc.private_subnet_ids + common_tags = local.common_tags + priv_ng_max_size = var.priv_ng_max_size + priv_ng_min_size = var.priv_ng_min_size + priv_ng_des_size = var.priv_ng_des_size + priv_ng_instance_type = var.priv_ng_instance_type + route53_hosted_zone_id = var.route53_hosted_zone_id ## For IRSA role and cert-manager issuance + app_namespace = var.app_namespace_staging + app_service_account = coalesce(var.app_service_account_staging, var.app_service_account) + app_namespace_sandbox = var.app_namespace_sandbox + app_service_account_sandbox = var.app_service_account_sandbox + app_namespace_prod = var.app_namespace_prod + app_service_account_prod = var.app_service_account_prod + # Env node group scaling + ng_staging_min_size = var.ng_staging_min_size + ng_staging_desired_size = var.ng_staging_desired_size + ng_staging_max_size = var.ng_staging_max_size + ng_sandbox_min_size = var.ng_sandbox_min_size + ng_sandbox_desired_size = var.ng_sandbox_desired_size + ng_sandbox_max_size = var.ng_sandbox_max_size + ng_prod_min_size = var.ng_prod_min_size + ng_prod_desired_size = var.ng_prod_desired_size + ng_prod_max_size = var.ng_prod_max_size +} + +module "application_secret" { + source = "../../modules/secrets" + + secret_name = "credreg-secrets-${var.env}-staging" + description = "credreg application secrets for the ${var.env} staging environment" + + secret_values = { + POSTGRESQL_PASSWORD = var.db_password_staging + SECRET_KEY_BASE = var.secret_key_base_staging + POSTGRESQL_ADDRESS = var.db_host_staging + REDIS_URL = var.redis_url_staging + SIDEKIQ_USERNAME = var.sidekiq_username_staging + SIDEKIQ_PASSWORD = var.sidekiq_password_staging + } + + tags = local.common_tags +} + +module "application_secret_sandbox" { + source = "../../modules/secrets" + + secret_name = "credreg-secrets-${var.env}-sandbox" + description = "credreg application secrets for the ${var.env} sandbox environment" + + secret_values = { + POSTGRESQL_PASSWORD = var.db_password_sandbox + SECRET_KEY_BASE = var.secret_key_base_sandbox + POSTGRESQL_ADDRESS = var.db_host_sandbox + REDIS_URL = var.redis_url_sandbox + SIDEKIQ_USERNAME = var.sidekiq_username_sandbox + SIDEKIQ_PASSWORD = var.sidekiq_password_sandbox + } + + tags = local.common_tags +} + +module "application_secret_prod" { + source = "../../modules/secrets" + + secret_name = "credreg-secrets-${var.env}-production" + description = "credreg application secrets for the ${var.env} production environment" + + secret_values = { + POSTGRESQL_PASSWORD = var.db_password_prod + SECRET_KEY_BASE = var.secret_key_base_prod + POSTGRESQL_ADDRESS = var.db_host_prod + REDIS_URL = var.redis_url_prod + SIDEKIQ_USERNAME = var.sidekiq_username_prod + SIDEKIQ_PASSWORD = var.sidekiq_password_prod + } + + tags = local.common_tags +} + +## Staging S3: Envelope Graphs (module) +module "envelope_graphs_s3_staging" { + source = "../../modules/envelope_graphs_s3" + bucket_name = var.envelope_graphs_bucket_name_staging + environment = "staging" + common_tags = local.common_tags +} + +output "cer_envelope_graphs_bucket_name" { + value = module.envelope_graphs_s3_staging.bucket_name + description = "Staging S3 bucket name for envelope graphs" +} + +## Sandbox S3: Envelope Graphs (module) +module "envelope_graphs_s3_sandbox" { + source = "../../modules/envelope_graphs_s3" + bucket_name = var.envelope_graphs_bucket_name_sandbox + environment = "sandbox" + common_tags = local.common_tags +} + +output "cer_envelope_graphs_bucket_name_sandbox" { + value = module.envelope_graphs_s3_sandbox.bucket_name + description = "Sandbox S3 bucket name for envelope graphs" +} + +## Production S3: Envelope Graphs (module) +module "envelope_graphs_s3_prod" { + source = "../../modules/envelope_graphs_s3" + bucket_name = var.envelope_graphs_bucket_name_prod + environment = "production" + common_tags = local.common_tags +} + +output "cer_envelope_graphs_bucket_name_prod" { + value = module.envelope_graphs_s3_prod.bucket_name + description = "Production S3 bucket name for envelope graphs" +} diff --git a/terraform/environments/eks/providers.tf b/terraform/environments/eks/providers.tf new file mode 100644 index 00000000..cdbfe290 --- /dev/null +++ b/terraform/environments/eks/providers.tf @@ -0,0 +1,13 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 5.50" + } + } +} + +# Configure the AWS provider. Region can also be set via the AWS_REGION env var. +provider "aws" { + region = "us-east-1" +} diff --git a/terraform/environments/eks/skooner/certificate.yaml b/terraform/environments/eks/skooner/certificate.yaml new file mode 100644 index 00000000..e200766d --- /dev/null +++ b/terraform/environments/eks/skooner/certificate.yaml @@ -0,0 +1,13 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: skooner-certificate + namespace: kube-system +spec: + secretName: skooner-tls + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - status.credentialengineregistry.org + diff --git a/terraform/environments/eks/skooner/skooner.yaml b/terraform/environments/eks/skooner/skooner.yaml new file mode 100644 index 00000000..9c9f6e55 --- /dev/null +++ b/terraform/environments/eks/skooner/skooner.yaml @@ -0,0 +1,88 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: skooner-sa + namespace: kube-system + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: skooner-sa +subjects: + - kind: ServiceAccount + name: skooner-sa + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: skooner + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: skooner + template: + metadata: + labels: + k8s-app: skooner + spec: + serviceAccountName: skooner-sa + containers: + - name: skooner + image: ghcr.io/skooner-k8s/skooner:stable + ports: + - containerPort: 4654 + livenessProbe: + httpGet: + scheme: HTTP + path: / + port: 4654 + initialDelaySeconds: 30 + timeoutSeconds: 30 + nodeSelector: + 'kubernetes.io/os': linux + +--- +apiVersion: v1 +kind: Service +metadata: + name: skooner + namespace: kube-system +spec: + ports: + - port: 80 + targetPort: 4654 + selector: + k8s-app: skooner + +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: skooner-ingress + namespace: kube-system +spec: + ingressClassName: nginx + tls: + - hosts: + - status.credentialengineregistry.org + secretName: skooner-tls + rules: + - host: status.credentialengineregistry.org + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: skooner + port: + number: 80 diff --git a/terraform/environments/eks/terraform.tfvars b/terraform/environments/eks/terraform.tfvars new file mode 100644 index 00000000..8f7c6ab0 --- /dev/null +++ b/terraform/environments/eks/terraform.tfvars @@ -0,0 +1,79 @@ +public_subnet_cidrs = ["10.19.1.0/24", "10.19.2.0/24"] +private_subnet_cidrs = ["10.19.3.0/24", "10.19.4.0/24"] +azs = ["us-east-1a", "us-east-1b"] +vpc_cidr = "10.19.0.0/16" +env = "eks" +instance_class = "db.t4g.medium" ## DB instance +db_name_sandbox = "ceregistrysandbox" +db_name_staging = "ceregistrystaging" +db_name_prod = "ceregistryprod" + +ssm_db_password_arn = "arn:aws:ssm:us-east-1:996810415034:parameter/ce-registry/rds/rds_db_password" +image_tag_prod = "production" +image_tag_staging = "staging" +image_tag_sandbox = "sandbox" + +rds_engine_version = "17.5" +allocated_storage = 40 +cluster_version = 1.33 + +db_username_sandbox = "ceregistrysandbox" +db_username_staging = "ceregistrystaging" +db_username_prod = "ceregistryprod" + +priv_ng_max_size = 10 +priv_ng_min_size = 0 +priv_ng_des_size = 2 ## this is irrelevant since the cluster uses the autoscaler to determine the appropriate value for it +priv_ng_instance_type = "t3.large" +route53_hosted_zone_id = "Z1N75467P1FUL5" + +# Env node group scaling +ng_staging_min_size = 1 +ng_staging_desired_size = 1 +ng_staging_max_size = 4 +ng_sandbox_min_size = 1 +ng_sandbox_desired_size = 1 +ng_sandbox_max_size = 4 +ng_prod_min_size = 2 +ng_prod_desired_size = 2 +ng_prod_max_size = 4 + +ecr_repository_name = "registry" +# --------------------------------------------------------------------------- +# Sensitive values for the Laravel application secret. Provide real values via +# secure means (e.g. CI secrets, SSM Parameter Store) before running +# `terraform apply`. +# --------------------------------------------------------------------------- + +db_password_staging = "CHANGEME-db-pass" +secret_key_base_staging = "CHANGEME" +db_host_staging = "CHANGEME" +redis_url_staging = "CHANGEME" +sidekiq_username_staging = "CHANGEME" +sidekiq_password_staging = "CHANGEME" + +db_password_sandbox = "CHANGEME-db-pass" +secret_key_base_sandbox = "CHANGEME" +db_host_sandbox = "CHANGEME" +redis_url_sandbox = "CHANGEME" +sidekiq_username_sandbox = "CHANGEME" +sidekiq_password_sandbox = "CHANGEME" + +db_password_prod = "CHANGEME-db-pass" +secret_key_base_prod = "CHANGEME" +db_host_prod = "CHANGEME" +redis_url_prod = "CHANGEME" +sidekiq_username_prod = "CHANGEME" +sidekiq_password_prod = "CHANGEME" + +app_namespace_sandbox = "credreg-sandbox" +app_namespace_staging = "credreg-staging" +app_namespace_prod = "credreg-prod" +app_service_account_staging = "main-app-service-account" +app_service_account_prod = "main-app-service-account" +app_service_account_sandbox = "main-app-service-account" + +# Staging S3 bucket for envelope graphs +envelope_graphs_bucket_name_staging = "cer-envelope-graphs-staging" +envelope_graphs_bucket_name_sandbox = "cer-envelope-graphs-sandb" +envelope_graphs_bucket_name_prod = "cer-envelope-graphs-prod" diff --git a/terraform/environments/eks/variables.tf b/terraform/environments/eks/variables.tf new file mode 100644 index 00000000..66b3a2d3 --- /dev/null +++ b/terraform/environments/eks/variables.tf @@ -0,0 +1,323 @@ +variable "env" { + description = "Environment name" + type = string +} + +# VPC Variables +variable "vpc_cidr" { + description = "VPC Cidr" +} + +variable "public_subnet_cidrs" { + description = "public_subnet_cidrs" +} + +variable "private_subnet_cidrs" { + description = "private_subnet_cidrs" +} + +variable "azs" { + type = list(string) + description = "Availability zones" +} + +variable "instance_class" { + type = string + description = "RDS instance class" +} + +variable "db_name_sandbox" { + type = string + description = "Staging DB instance name" +} +variable "db_name_staging" { + type = string + description = "Staging DB instance name" +} +variable "db_name_prod" { + type = string + description = "Production DB instance name" +} + +variable "db_username_sandbox" { + type = string + description = "Staging Database master username" +} + +variable "db_username_staging" { + type = string + description = "Staging Database master username" +} +variable "db_username_prod" { + type = string + description = "Production Database master username" +} + +variable "ssm_db_password_arn" { + type = string + description = "ssm_db_password_arn" +} + +variable "image_tag_sandbox" { + type = string + description = "Staging Image tag to deploy from ECR" +} +variable "image_tag_staging" { + type = string + description = "Staging Image tag to deploy from ECR" +} +variable "image_tag_prod" { + type = string + description = "Production Image tag to deploy from ECR" +} +variable "rds_engine_version" { + type = string + description = "rds_engine_version" +} + +variable "allocated_storage" { + type = number + description = "RDS Allocated storage" +} + +variable "cluster_version" { + type = string + description = "Kubernetes version" +} + +variable "priv_ng_max_size" { + type = number + description = "EKS node group max size" +} + +variable "priv_ng_min_size" { + type = number + description = "EKS node group min size" +} + +variable "priv_ng_des_size" { + type = number + description = "EKS node group desired size" +} + +variable "priv_ng_instance_type" { + type = string + description = "EKS node group instance type" +} + +# Scaling for environment node groups +variable "ng_staging_min_size" { + type = number + description = "Staging node group min size" +} + +variable "ng_staging_desired_size" { + type = number + description = "Staging node group desired size" +} + +variable "ng_staging_max_size" { + type = number + description = "Staging node group max size" +} + +variable "ng_sandbox_min_size" { + type = number + description = "Sandbox node group min size" +} + +variable "ng_sandbox_desired_size" { + type = number + description = "Sandbox node group desired size" +} + +variable "ng_sandbox_max_size" { + type = number + description = "Sandbox node group max size" +} + +variable "ng_prod_min_size" { + type = number + description = "Production node group min size" +} + +variable "ng_prod_desired_size" { + type = number + description = "Production node group desired size" +} + +variable "ng_prod_max_size" { + type = number + description = "Production node group max size" +} + +# --------------------------------------------------------------------------- +# Secret values for Laravel application (stored in AWS Secrets Manager) +# --------------------------------------------------------------------------- + +variable "db_password_sandbox" { + type = string + description = "Primary database password (sensitive)" + sensitive = true +} +variable "db_password_staging" { + type = string + description = "Primary database password (sensitive)" + sensitive = true +} + +variable "db_password_prod" { + type = string + description = "Primary database password (sensitive)" + sensitive = true +} + +variable "secret_key_base_sandbox" { + type = string + description = "secret key base (sensitive)" + sensitive = true +} + +variable "secret_key_base_staging" { + type = string + description = "secret key base (sensitive)" + sensitive = true +} +variable "secret_key_base_prod" { + type = string + description = "secret key base (sensitive)" + sensitive = true +} + +variable "db_host_sandbox" { + type = string + description = "DB host url (sensitive)" + sensitive = true +} +variable "db_host_staging" { + type = string + description = "DB host url (sensitive)" + sensitive = true +} + +variable "db_host_prod" { + type = string + description = "DB host url (sensitive)" + sensitive = true +} + +variable "redis_url_sandbox" { + type = string + description = "Redis host url (sensitive)" + sensitive = true +} +variable "redis_url_staging" { + type = string + description = "Redis host url (sensitive)" + sensitive = true +} + +variable "redis_url_prod" { + type = string + description = "Redis host url (sensitive)" + sensitive = true +} + +variable "sidekiq_username_sandbox" { + type = string + description = "Sidekiq UI username (sensitive)" + sensitive = true +} + +variable "sidekiq_username_staging" { + type = string + description = "Sidekiq UI username (sensitive)" + sensitive = true +} + +variable "sidekiq_username_prod" { + type = string + description = "Sidekiq UI username (sensitive)" + sensitive = true +} + +variable "sidekiq_password_sandbox" { + type = string + description = "Sidekiq UI password (sensitive)" + sensitive = true +} + +variable "sidekiq_password_staging" { + type = string + description = "Sidekiq UI password (sensitive)" + sensitive = true +} + +variable "sidekiq_password_prod" { + type = string + description = "Sidekiq UI password (sensitive)" + sensitive = true +} + +variable "route53_hosted_zone_id" { + description = "route53_hosted_zone_id" + type = string +} + +variable "app_namespace_sandbox" { + description = "Staging K8s application namespace" + type = string +} + +variable "app_namespace_staging" { + description = "Staging K8s application namespace" + type = string +} + +variable "app_namespace_prod" { + description = "Production K8s application namespace" + type = string +} +variable "app_service_account_staging" { + description = "Staging K8s application service account name" + type = string +} + +# Deprecated: keep for backward compatibility (prefer app_service_account_staging) +variable "app_service_account" { + description = "[DEPRECATED] Use app_service_account_staging" + type = string + default = null +} + +variable "app_service_account_sandbox" { + description = "Sandbox K8s application service account name" + type = string + default = null +} + +variable "app_service_account_prod" { + description = "Production K8s application service account name" + type = string + default = null +} + +variable "ecr_repository_name" { + description = "Name of the AWS ECR repository" + type = string +} + +variable "envelope_graphs_bucket_name_staging" { + description = "S3 bucket name for envelope graphs (staging)" + type = string +} + +variable "envelope_graphs_bucket_name_sandbox" { + description = "S3 bucket name for envelope graphs (sandbox)" + type = string +} + +variable "envelope_graphs_bucket_name_prod" { + description = "S3 bucket name for envelope graphs (production)" + type = string +} diff --git a/vendor/.gitkeep b/vendor/.gitkeep new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/vendor/.gitkeep @@ -0,0 +1 @@ + diff --git a/vendor/grape-middleware-logger b/vendor/grape-middleware-logger new file mode 160000 index 00000000..646dbfec --- /dev/null +++ b/vendor/grape-middleware-logger @@ -0,0 +1 @@ +Subproject commit 646dbfec4abbfa8605efa932195f3473f2f412ea