From 80b608a8cb19b57c6aa35222ed008ad64a414e2a Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Sat, 8 Mar 2025 21:35:29 -0500 Subject: [PATCH 01/29] feat: rewrite everything Rewrite everything in order to make implementations much more consistent between assertions of different types. --- .golangci.yaml | 263 +++++++ .mise.toml | 4 + assertions.go | 45 ++ go.mod | 8 +- go.sum | 18 + internal/assertion/assertion.go | 54 ++ internal/assertion/assertion_options.go | 89 +++ internal/assertion/common_assertion.go | 133 ++++ internal/assertion/list_options.go | 27 + internal/assertionhelpers/assertionhelpers.go | 211 +++++ internal/crds/crds.go | 111 +++ internal/crds/crds_test.go | 105 +++ internal/crds/testdata/crd.yaml | 254 +++++++ internal/deployments/1deployment_test.go | 445 +++++++++++ internal/deployments/3deployment_test.go | 503 ++++++++++++ internal/deployments/deployments.go | 718 ++++++++++++++++++ internal/deployments/deployments_test.go | 85 +++ .../deployments/testdata/bad-deployment.yaml | 60 ++ internal/deployments/testdata/config.yaml | 30 + internal/deployments/testdata/deployment.yaml | 52 ++ internal/namespaces/1namespace_test.go | 198 +++++ internal/namespaces/3namespace_test.go | 184 +++++ internal/namespaces/namespaces.go | 180 +++++ internal/namespaces/namespaces_test.go | 30 + .../testdata/restricted-namespace.yaml | 8 + .../testdata/unrestricted-namespace.yaml | 7 + internal/pods/1pod_test.go | 131 ++++ internal/pods/3pod_test.go | 150 ++++ internal/pods/pods.go | 182 +++++ internal/pods/pods_test.go | 40 + internal/pods/testdata/config.yaml | 30 + internal/pods/testdata/deployment.yaml | 49 ++ internal/pods/testdata/ready-pod.yaml | 40 + internal/secrets/1secret_test.go | 117 +++ internal/secrets/3secret_test.go | 167 ++++ internal/secrets/secrets.go | 194 +++++ internal/secrets/secrets_test.go | 36 + internal/secrets/testdata/secret.yaml | 9 + internal/testhelpers/test_asserts.go | 48 ++ internal/testhelpers/testhelpers.go | 93 +++ kubeassert.go | 297 -------- 41 files changed, 5107 insertions(+), 298 deletions(-) create mode 100644 .golangci.yaml create mode 100644 .mise.toml create mode 100644 assertions.go create mode 100644 internal/assertion/assertion.go create mode 100644 internal/assertion/assertion_options.go create mode 100644 internal/assertion/common_assertion.go create mode 100644 internal/assertion/list_options.go create mode 100644 internal/assertionhelpers/assertionhelpers.go create mode 100644 internal/crds/crds.go create mode 100644 internal/crds/crds_test.go create mode 100644 internal/crds/testdata/crd.yaml create mode 100644 internal/deployments/1deployment_test.go create mode 100644 internal/deployments/3deployment_test.go create mode 100644 internal/deployments/deployments.go create mode 100644 internal/deployments/deployments_test.go create mode 100644 internal/deployments/testdata/bad-deployment.yaml create mode 100644 internal/deployments/testdata/config.yaml create mode 100644 internal/deployments/testdata/deployment.yaml create mode 100644 internal/namespaces/1namespace_test.go create mode 100644 internal/namespaces/3namespace_test.go create mode 100644 internal/namespaces/namespaces.go create mode 100644 internal/namespaces/namespaces_test.go create mode 100644 internal/namespaces/testdata/restricted-namespace.yaml create mode 100644 internal/namespaces/testdata/unrestricted-namespace.yaml create mode 100644 internal/pods/1pod_test.go create mode 100644 internal/pods/3pod_test.go create mode 100644 internal/pods/pods.go create mode 100644 internal/pods/pods_test.go create mode 100644 internal/pods/testdata/config.yaml create mode 100644 internal/pods/testdata/deployment.yaml create mode 100644 internal/pods/testdata/ready-pod.yaml create mode 100644 internal/secrets/1secret_test.go create mode 100644 internal/secrets/3secret_test.go create mode 100644 internal/secrets/secrets.go create mode 100644 internal/secrets/secrets_test.go create mode 100644 internal/secrets/testdata/secret.yaml create mode 100644 internal/testhelpers/test_asserts.go create mode 100644 internal/testhelpers/testhelpers.go diff --git a/.golangci.yaml b/.golangci.yaml new file mode 100644 index 0000000..01049de --- /dev/null +++ b/.golangci.yaml @@ -0,0 +1,263 @@ +run: + timeout: 5m +output: + formats: + - format: colored-line-number +issues: + uniq-by-line: false + exclude-use-default: false + exclude: + - EXC0001 + - EXC0003 + - EXC0004 + - EXC0005 + - EXC0006 + - EXC0007 + - EXC0008 + - EXC0009 + - EXC0011 + - EXC0010 + - EXC0013 + exclude-rules: + - text: "import-shadowing: The name 'require' shadows an import name" + linters: + - revive + - text: should have a package comment + path: main.go + linters: + - revive + - text: package comment should be of the form "(.+)... + linters: + - stylecheck + - revive + - path: "(.+)_test.go" + linters: + - dupl + - funlen + - err113 +linters: + enable: + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - unused + - asasalint + - asciicheck + - bidichk + - bodyclose + - containedctx + - contextcheck + - cyclop + - decorder + - depguard + - dogsled + - dupl + - dupword + - durationcheck + - err113 + - errchkjson + - errname + - errorlint + - exhaustive + - forbidigo + - forcetypeassert + - funlen + - gci + - ginkgolinter + - gocheckcompilerdirectives + - gochecknoinits + - gochecksumtype + - gocognit + - goconst + - gocritic + - gocyclo + - godot + - gofmt + - gofumpt + - goheader + - goimports + - gomodguard + - goprintffuncname + - gosec + - gosmopolitan + - grouper + - importas + - inamedparam + - interfacebloat + - ireturn + - lll + - loggercheck + - maintidx + - makezero + - mirror + - misspell + - mnd + - musttag + - nakedret + - nestif + - nilerr + - nilnil + - nlreturn + - noctx + - nolintlint + - nosprintfhostport + - perfsprint + - prealloc + - predeclared + - promlinter + - protogetter + - reassign + - revive + - rowserrcheck + - sloglint + - spancheck + - sqlclosecheck + - stylecheck + - tagalign + - tagliatelle + - testableexamples + - testifylint + - testpackage + - unconvert + - unparam + - usestdlibvars + - usetesting + - varnamelen + - whitespace + - wsl + +linters-settings: + depguard: + rules: + main: + list-mode: lax + deny: + - pkg: github.com/pkg/errors + desc: Should be replaced by stdlib errors package + gci: + sections: + - standard + - default + - localmodule + custom-order: true + decorder: + ignore-underscore-vars: true + disable-dec-order-check: false + disable-init-func-first-check: false + disable-dec-num-check: false + disable-type-dec-num-check: false + disable-const-dec-num-check: false + disable-var-dec-num-check: false + varnamelen: + # tc: test cases for table driven tests + # t: interface for mocks in tests + ignore-names: [tc, t] + ignore-decls: + - "w http.ResponseWriter" + - "r *http.Request" + - "wg sync.WaitGroup" + - "eg errgroup.Group" + - "ao workflow.ActivityOptions" + revive: + ignore-generated-header: true + rules: + - name: argument-limit + - name: atomic + - name: banned-characters + - name: bare-return + - name: blank-imports + - name: bool-literal-in-expr + - name: call-to-gc + - name: comment-spacings + - name: confusing-results + - name: constant-logical-expr + - name: context-as-argument + arguments: + - allowTypesBefore: "*testing.T,*github.com/user/repo/testing.Harness" + - name: context-keys-type + - name: datarace + - name: deep-exit + - name: defer + - name: dot-imports + - name: duplicated-imports + - name: early-return + arguments: + - "preserveScope" + - name: empty-block + - name: empty-lines + - name: enforce-map-style + arguments: + - "make" + - name: enforce-repeated-arg-type-style + arguments: + - "short" + - name: enforce-slice-style + arguments: + - "make" + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + arguments: + - "sayRepetitiveInsteadOfStutters" + - name: flag-parameter + - name: function-result-limit + - name: get-return + - name: identical-branches + - name: if-return + - name: increment-decrement + - name: indent-error-flow + arguments: + - "preserveScope" + - name: import-alias-naming + arguments: + - "^[a-z][a-z0-9]{0,}$" + - name: import-shadowing + - name: max-control-nesting + - name: modifies-parameter + - name: modifies-value-receiver + - name: nested-structs + - name: optimize-operands-order + - name: package-comments + - name: range + - name: range-val-in-closure + - name: range-val-address + - name: receiver-naming + - name: redundant-import-alias + - name: redefines-builtin-id + - name: string-of-int + - name: struct-tag + arguments: + - "json,inline" + - "bson,outline,gnu" + - name: superfluous-else + arguments: + - "preserveScope" + - name: time-equal + - name: time-naming + - name: var-declaration + - name: unconditional-recursion + - name: unexported-naming + - name: unexported-return + - name: unhandled-error + - name: unnecessary-stmt + - name: unreachable-code + - name: unused-parameter + arguments: + - allowRegex: "^_" + - name: unused-receiver + - name: useless-break + - name: waitgroup-by-value + wsl: + # Allow cuddling with logging calls + allow-cuddle-with-calls: ["Lock", "RLock", "Info", "Debug", "Warn", "Error"] + funlen: + ignore-comments: true + stylecheck: + checks: ["all", "-ST1003"] + usetesting: + os-setenv: true + os-temp-dir: true diff --git a/.mise.toml b/.mise.toml new file mode 100644 index 0000000..ace8672 --- /dev/null +++ b/.mise.toml @@ -0,0 +1,4 @@ +[tools] +go = "latest" +"go:github.com/mitranim/gow" = "latest" +"go:gotest.tools/gotestsum" = "latest" diff --git a/assertions.go b/assertions.go new file mode 100644 index 0000000..4a9ec2d --- /dev/null +++ b/assertions.go @@ -0,0 +1,45 @@ +package kubeassert + +import ( + "github.com/DWSR/kubeassert-go/internal/assertion" + "github.com/DWSR/kubeassert-go/internal/assertionhelpers" + "github.com/DWSR/kubeassert-go/internal/crds" + "github.com/DWSR/kubeassert-go/internal/deployments" + "github.com/DWSR/kubeassert-go/internal/namespaces" + "github.com/DWSR/kubeassert-go/internal/pods" + "github.com/DWSR/kubeassert-go/internal/secrets" +) + +type ( + Assertion = assertion.Assertion + DeploymentAssertion = deployments.DeploymentAssertion + NamespaceAssertion = namespaces.NamespaceAssertion + CRDAssertion = crds.CRDAssertion + PodAssertion = pods.PodAssertion +) + +var ( + WithLabels = assertion.WithResourceLabels + WithFields = assertion.WithResourceFields + WithInterval = assertion.WithInterval + WithTimeout = assertion.WithTimeout + WithBuilder = assertion.WithBuilder + WithRequireT = assertion.WithRequireT + WithNamespace = assertion.WithResourceNamespace + WithNamespaceFromEnv = assertion.WithResourceNamespaceFromTestEnv + WithResourceName = assertion.WithResourceName + WithSetup = assertion.WithSetup + WithTeardown = assertion.WithTeardown + + NewDeploymentAssertion = deployments.NewDeploymentAssertion + NewNamespaceAssertion = namespaces.NewNamespaceAssertion + NewCRDAssertion = crds.NewCRDAssertion + NewPodAssertion = pods.NewPodAssertion + NewSecretAssertion = secrets.NewSecretAssertion + + ApplyKustomization = assertionhelpers.ApplyKustomization + CreateResourceFromPath = assertionhelpers.CreateResourceFromPath + DeleteResourceFromPath = assertionhelpers.DeleteResourceFromPath + Sleep = assertionhelpers.Sleep + TestAssertions = assertionhelpers.TestAssertions +) diff --git a/go.mod b/go.mod index f90c977..0334bf8 100644 --- a/go.mod +++ b/go.mod @@ -3,12 +3,15 @@ module github.com/DWSR/kubeassert-go go 1.24.0 require ( + github.com/google/go-cmp v0.6.0 github.com/stretchr/testify v1.10.0 k8s.io/api v0.32.2 k8s.io/apiextensions-apiserver v0.32.2 k8s.io/apimachinery v0.32.2 k8s.io/client-go v0.32.2 sigs.k8s.io/e2e-framework v0.6.0 + sigs.k8s.io/kustomize/api v0.19.0 + sigs.k8s.io/kustomize/kyaml v0.19.0 ) require ( @@ -23,6 +26,7 @@ require ( github.com/fatih/color v1.16.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect @@ -30,7 +34,6 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.9 // indirect - github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect @@ -44,6 +47,7 @@ require ( github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pkg/errors v0.9.1 // indirect @@ -54,7 +58,9 @@ require ( github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/vladimirvivien/gexe v0.4.1 // indirect github.com/x448/float16 v0.8.4 // indirect + github.com/xlab/treeprint v1.2.0 // indirect go.opentelemetry.io/otel v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect golang.org/x/mod v0.21.0 // indirect diff --git a/go.sum b/go.sum index 97e6062..0e5e9cc 100644 --- a/go.sum +++ b/go.sum @@ -27,6 +27,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -92,6 +94,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= @@ -117,6 +121,8 @@ github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUz github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -124,7 +130,10 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= @@ -134,6 +143,8 @@ github.com/vladimirvivien/gexe v0.4.1 h1:W9gWkp8vSPjDoXDu04Yp4KljpVMaSt8IQuHswLD github.com/vladimirvivien/gexe v0.4.1/go.mod h1:3gjgTqE2c0VyHnU5UOIwk7gyNzZDGulPb/DJPgcw64E= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= @@ -141,6 +152,8 @@ go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= @@ -266,6 +279,7 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/gotestsum v1.12.0 h1:CmwtaGDkHxrZm4Ib0Vob89MTfpc3GrEFMJKovliPwGk= gotest.tools/gotestsum v1.12.0/go.mod h1:fAvqkSptospfSbQw26CTYzNwnsE/ztqLeyhP0h67ARY= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= k8s.io/api v0.32.2 h1:bZrMLEkgizC24G9eViHGOPbW+aRo9duEISRIJKfdJuw= k8s.io/api v0.32.2/go.mod h1:hKlhk4x1sJyYnHENsrdCWw31FEmCijNGPJO5WzHiJ6Y= @@ -289,6 +303,10 @@ sigs.k8s.io/e2e-framework v0.6.0 h1:p7hFzHnLKO7eNsWGI2AbC1Mo2IYxidg49BiT4njxkrM= sigs.k8s.io/e2e-framework v0.6.0/go.mod h1:IREnCHnKgRCioLRmNi0hxSJ1kJ+aAdjEKK/gokcZu4k= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ= +sigs.k8s.io/kustomize/api v0.19.0/go.mod h1:/BbwnivGVcBh1r+8m3tH1VNxJmHSk1PzP5fkP6lbL1o= +sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA= +sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/internal/assertion/assertion.go b/internal/assertion/assertion.go new file mode 100644 index 0000000..85089b5 --- /dev/null +++ b/internal/assertion/assertion.go @@ -0,0 +1,54 @@ +package assertion + +import ( + "context" + "time" + + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apimachinerywait "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" +) + +type ( + Assertion interface { + setLabels(assertLabels map[string]string) + + GetLabels() map[string]string + + setFields(assertFields map[string]string) + + GetFields() map[string]string + + setListOptionsFn(fn listOptionsFunc) + + ListOptions(cfg *envconf.Config) metav1.ListOptions + + setInterval(interval time.Duration) + + GetInterval() time.Duration + + setTimeout(timeout time.Duration) + + GetTimeout() time.Duration + + GetBuilder() *features.FeatureBuilder + + SetBuilder(builder *features.FeatureBuilder) + + GetRequireT() require.TestingT + + setRequireT(t require.TestingT) + + WaitForCondition(ctx context.Context, conditionFunc apimachinerywait.ConditionWithContextFunc) error + + clone() Assertion + + AsFeature() features.Feature + } +) + +func CloneAssertion(a Assertion) Assertion { + return a.clone() +} diff --git a/internal/assertion/assertion_options.go b/internal/assertion/assertion_options.go new file mode 100644 index 0000000..40b6dbd --- /dev/null +++ b/internal/assertion/assertion_options.go @@ -0,0 +1,89 @@ +package assertion + +import ( + "time" + + "github.com/stretchr/testify/require" + "sigs.k8s.io/e2e-framework/pkg/features" + e2etypes "sigs.k8s.io/e2e-framework/pkg/types" +) + +type AssertionOption func(Assertion) + +func WithResourceLabels(labels map[string]string) AssertionOption { + return func(a Assertion) { + a.setLabels(labels) + } +} + +func WithResourceFields(fields map[string]string) AssertionOption { + return func(a Assertion) { + a.setFields(fields) + } +} + +func WithInterval(interval time.Duration) AssertionOption { + return func(a Assertion) { + a.setInterval(interval) + } +} + +func WithTimeout(timeout time.Duration) AssertionOption { + return func(a Assertion) { + a.setTimeout(timeout) + } +} + +func WithBuilder(builder *features.FeatureBuilder) AssertionOption { + return func(a Assertion) { + a.SetBuilder(builder) + } +} + +func WithRequireT(requireT require.TestingT) AssertionOption { + return func(a Assertion) { + a.setRequireT(requireT) + } +} + +func WithResourceNamespace(namespaceName string) AssertionOption { + return func(a Assertion) { + newFields := a.GetFields() + newFields["metadata.namespace"] = namespaceName + a.setFields(newFields) + } +} + +func WithResourceNamespaceFromTestEnv() AssertionOption { + return func(a Assertion) { + a.setListOptionsFn(listOptionsWithNamespaceFromEnv) + } +} + +func WithResourceName(name string) AssertionOption { + return func(a Assertion) { + newFields := a.GetFields() + newFields["metadata.name"] = name + a.setFields(newFields) + } +} + +func WithSetup(steps ...e2etypes.StepFunc) AssertionOption { + return func(a Assertion) { + builder := a.GetBuilder() + for _, s := range steps { + builder = builder.Setup(s) + } + a.SetBuilder(builder) + } +} + +func WithTeardown(steps ...e2etypes.StepFunc) AssertionOption { + return func(a Assertion) { + builder := a.GetBuilder() + for _, s := range steps { + builder = builder.Teardown(s) + } + a.SetBuilder(builder) + } +} diff --git a/internal/assertion/common_assertion.go b/internal/assertion/common_assertion.go new file mode 100644 index 0000000..c68c255 --- /dev/null +++ b/internal/assertion/common_assertion.go @@ -0,0 +1,133 @@ +package assertion + +import ( + "context" + "time" + + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + apimachinerywait "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/e2e-framework/klient/wait" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" +) + +type ( + commonAssertion struct { + builder *features.FeatureBuilder + interval time.Duration + assertFields map[string]string + assertLabels map[string]string + labelRequirements labels.Requirements + timeout time.Duration + requireT require.TestingT + listOptionsFn listOptionsFunc + } +) + +const ( + defaultTimeout = 30 * time.Second + defaultInterval = 1 * time.Second +) + +func (ca *commonAssertion) setLabels(assertLabels map[string]string) { + ca.assertLabels = assertLabels +} + +func (ca *commonAssertion) GetLabels() map[string]string { + return ca.assertLabels +} + +func (ca *commonAssertion) setFields(assertFields map[string]string) { + ca.assertFields = assertFields +} + +func (ca *commonAssertion) GetFields() map[string]string { + return ca.assertFields +} + +func (ca *commonAssertion) setInterval(interval time.Duration) { + ca.interval = interval +} + +func (ca *commonAssertion) GetInterval() time.Duration { + return ca.interval +} + +func (ca *commonAssertion) setTimeout(timeout time.Duration) { + ca.timeout = timeout +} + +func (ca *commonAssertion) GetTimeout() time.Duration { + return ca.timeout +} + +func (ca *commonAssertion) SetBuilder(builder *features.FeatureBuilder) { + ca.builder = builder +} + +func (ca *commonAssertion) GetBuilder() *features.FeatureBuilder { + return ca.builder +} + +func (ca *commonAssertion) setRequireT(requireT require.TestingT) { + ca.requireT = requireT +} + +func (ca *commonAssertion) GetRequireT() require.TestingT { + return ca.requireT +} + +func (ca *commonAssertion) AsFeature() features.Feature { + return ca.builder.Feature() +} + +func (ca *commonAssertion) setListOptionsFn(fn listOptionsFunc) { + ca.listOptionsFn = fn +} + +func (ca *commonAssertion) ListOptions(cfg *envconf.Config) metav1.ListOptions { + return ca.listOptionsFn(ca, cfg) +} + +func (ca *commonAssertion) clone() Assertion { + return &commonAssertion{ + builder: ca.builder, + interval: ca.interval, + assertFields: ca.assertFields, + assertLabels: ca.assertLabels, + labelRequirements: ca.labelRequirements, + timeout: ca.timeout, + requireT: ca.requireT, + listOptionsFn: ca.listOptionsFn, + } +} + +func (ca *commonAssertion) WaitForCondition(ctx context.Context, conditionFunc apimachinerywait.ConditionWithContextFunc) error { + return wait.For( + conditionFunc, + wait.WithContext(ctx), + wait.WithTimeout(ca.timeout), + wait.WithInterval(ca.interval), + wait.WithImmediate(), + ) +} + +func NewAssertion(opts ...AssertionOption) Assertion { + assertion := commonAssertion{ + builder: features.New("default"), + assertFields: map[string]string{}, + assertLabels: map[string]string{}, + timeout: defaultTimeout, + interval: defaultInterval, + requireT: nil, + listOptionsFn: defaultListOptions, + } + + for _, opt := range opts { + opt(&assertion) + } + + return &assertion +} diff --git a/internal/assertion/list_options.go b/internal/assertion/list_options.go new file mode 100644 index 0000000..71e38c5 --- /dev/null +++ b/internal/assertion/list_options.go @@ -0,0 +1,27 @@ +package assertion + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/e2e-framework/pkg/envconf" +) + +type listOptionsFunc func(*commonAssertion, *envconf.Config) metav1.ListOptions + +func defaultListOptions(ca *commonAssertion, cfg *envconf.Config) metav1.ListOptions { + return metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set(ca.assertLabels)).String(), + FieldSelector: fields.SelectorFromSet(fields.Set(ca.assertFields)).String(), + } +} + +func listOptionsWithNamespaceFromEnv(ca *commonAssertion, cfg *envconf.Config) metav1.ListOptions { + selectorFields := fields.Set(ca.assertFields) + selectorFields["metadata.namespace"] = cfg.Namespace() + + return metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set(ca.assertLabels)).String(), + FieldSelector: fields.SelectorFromSet(selectorFields).String(), + } +} diff --git a/internal/assertionhelpers/assertionhelpers.go b/internal/assertionhelpers/assertionhelpers.go new file mode 100644 index 0000000..eeee655 --- /dev/null +++ b/internal/assertionhelpers/assertionhelpers.go @@ -0,0 +1,211 @@ +package assertionhelpers + +import ( + "bytes" + "context" + "log/slog" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/restmapper" + "sigs.k8s.io/e2e-framework/klient/decoder" + "sigs.k8s.io/e2e-framework/klient/k8s/resources" + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/envconf" + e2etypes "sigs.k8s.io/e2e-framework/pkg/types" + "sigs.k8s.io/kustomize/api/krusty" + kusttypes "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/filesys" + + "github.com/DWSR/kubeassert-go/internal/assertion" +) + +func CreateResourceFromPathWithNamespaceFromEnv(resourcePath string, decoderOpts ...decoder.DecodeOption) e2etypes.StepFunc { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + nsName := cfg.Namespace() + + r, err := resources.New(cfg.Client().RESTConfig()) + require.NoError(t, err) + + file, err := os.Open(filepath.Clean(resourcePath)) + require.NoError(t, err) + defer func() { _ = file.Close() }() + + err = decoder.DecodeEach(ctx, file, decoder.CreateHandler(r), append(decoderOpts, decoder.MutateNamespace(nsName))...) + require.NoError(t, err) + + return ctx + } +} + +func CreateResourceFromPath(resourcePath string, decoderOpts ...decoder.DecodeOption) e2etypes.StepFunc { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + r, err := resources.New(cfg.Client().RESTConfig()) + require.NoError(t, err) + + file, err := os.Open(filepath.Clean(resourcePath)) + require.NoError(t, err) + defer func() { _ = file.Close() }() + + err = decoder.DecodeEach(ctx, file, decoder.CreateHandler(r), decoderOpts...) + require.NoError(t, err) + + return ctx + } +} + +func DeleteResourceFromPath(resourcePath string, decoderOpts ...decoder.DecodeOption) e2etypes.StepFunc { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + r, err := resources.New(cfg.Client().RESTConfig()) + require.NoError(t, err) + + file, err := os.Open(filepath.Clean(resourcePath)) + require.NoError(t, err) + defer func() { _ = file.Close() }() + + err = decoder.DecodeEach(ctx, file, decoder.DeleteHandler(r), decoderOpts...) + require.NoError(t, err) + + return ctx + } +} + +func Sleep(sleepTime time.Duration) e2etypes.StepFunc { + return func(ctx context.Context, _ *testing.T, _ *envconf.Config) context.Context { + time.Sleep(sleepTime) + + return ctx + } +} + +func DynamicClientFromEnvconf(t require.TestingT, cfg *envconf.Config) *dynamic.DynamicClient { + klient, err := cfg.NewClient() + require.NoError(t, err) + + client, err := dynamic.NewForConfig(klient.RESTConfig()) + require.NoError(t, err) + + return client +} + +func RequireTIfNotNil(testingT *testing.T, requireT require.TestingT) require.TestingT { + if requireT != nil { + return requireT + } + + return testingT +} + +func TestAssertions(t *testing.T, testEnv env.Environment, assertions ...assertion.Assertion) { + tests := make([]e2etypes.Feature, 0, len(assertions)) + + for _, assertion := range assertions { + tests = append(tests, assertion.AsFeature()) + } + + testEnv.Test(t, tests...) +} + +func ApplyKustomization(kustDir string) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + diskFS := filesys.MakeFsOnDisk() + opts := krusty.MakeDefaultOptions() + opts.PluginConfig.HelmConfig = kusttypes.HelmConfig{ + Enabled: true, + Command: "helm", + Debug: false, + } + opts.PluginConfig.FnpLoadingOptions.Network = true + opts.LoadRestrictions = kusttypes.LoadRestrictionsNone + opts.Reorder = krusty.ReorderOptionLegacy + kust := krusty.MakeKustomizer(opts) + + slog.Debug("rendering kustomization") + + resMap, err := kust.Run(diskFS, kustDir) + if err != nil { + return ctx, err + } + + slog.Debug("creating client") + + klient, err := cfg.NewClient() + if err != nil { + return ctx, err + } + + client, err := dynamic.NewForConfig(klient.RESTConfig()) + if err != nil { + return ctx, err + } + + slog.Debug("applying kustomization") + + for _, res := range resMap.Resources() { + // Do this inside the loop to account for new CRDs, etc. that get applied + slog.Debug("creating resource mapper") + + discoveryClient, err := discovery.NewDiscoveryClientForConfig(klient.RESTConfig()) + if err != nil { + return ctx, err + } + + gr, err := restmapper.GetAPIGroupResources(discoveryClient) + if err != nil { + return ctx, err + } + + restMapper := restmapper.NewDiscoveryRESTMapper(gr) + + slog.Debug("transmuting resMap resource to unstructured") + yamlBytes, err := res.AsYAML() + if err != nil { + return ctx, err + } + + obj := &unstructured.Unstructured{} + + decoder := yaml.NewYAMLOrJSONDecoder(bytes.NewReader(yamlBytes), len(yamlBytes)) + if err := decoder.Decode(obj); err != nil { + return ctx, err + } + + gvk := obj.GroupVersionKind() + + mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return ctx, err + } + + slog.Debug("applying resource", "kind", obj.GetKind(), "name", obj.GetName(), "gvr", mapping.Resource) + + var resourceClient dynamic.ResourceInterface + + switch mapping.Scope.Name() { + case meta.RESTScopeNameNamespace: + resourceClient = client.Resource(mapping.Resource).Namespace(obj.GetNamespace()) + case meta.RESTScopeNameRoot: + resourceClient = client.Resource(mapping.Resource) + } + + _, err = resourceClient.Apply(ctx, obj.GetName(), obj, metav1.ApplyOptions{ + Force: true, + FieldManager: "e2e-test", + }) + if err != nil { + return ctx, err + } + } + + return ctx, nil + } +} diff --git a/internal/crds/crds.go b/internal/crds/crds.go new file mode 100644 index 0000000..c496ae3 --- /dev/null +++ b/internal/crds/crds.go @@ -0,0 +1,111 @@ +package crds + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" +) + +type CRDAssertion struct { + assertion.Assertion +} + +func (ca CRDAssertion) clone() CRDAssertion { + return CRDAssertion{ + Assertion: assertion.CloneAssertion(ca.Assertion), + } +} + +func (ca CRDAssertion) Exists() CRDAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, ca.GetRequireT()) + conditionFunc := func(ctx context.Context) (bool, error) { + pods, err := ca.getCRDs(ctx, t, cfg) + require.NoError(t, err) + + return len(pods.Items) == 1, nil + } + + require.NoError(t, ca.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := ca.clone() + res.SetBuilder(res.GetBuilder().Assess("exists", fn)) + + return res +} + +func (ca CRDAssertion) getCRDs(ctx context.Context, t require.TestingT, cfg *envconf.Config) (extv1.CustomResourceDefinitionList, error) { + client := helpers.DynamicClientFromEnvconf(t, cfg) + + var crdList extv1.CustomResourceDefinitionList + + list, err := client. + Resource(extv1.SchemeGroupVersion.WithResource("customresourcedefinitions")). + List(ctx, ca.ListOptions(cfg)) + if err != nil { + return crdList, err + } + + err = runtime.DefaultUnstructuredConverter.FromUnstructured(list.UnstructuredContent(), &crdList) + if err != nil { + return crdList, err + } + + return crdList, nil +} + +func (ca CRDAssertion) HasVersion(crdVersion string) CRDAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, ca.GetRequireT()) + conditionFunc := func(ctx context.Context) (bool, error) { + crds, err := ca.getCRDs(ctx, t, cfg) + require.NoError(t, err) + + if len(crds.Items) != 1 { + return false, nil + } + + foundVersion := false + + for _, version := range crds.Items[0].Spec.Versions { + if version.Name == crdVersion { + foundVersion = true + break + } + } + + return foundVersion, nil + } + + require.NoError(t, ca.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := ca.clone() + res.SetBuilder(res.GetBuilder().Assess("hasVersion", fn)) + + return res +} + +func NewCRDAssertion(opts ...assertion.AssertionOption) CRDAssertion { + return CRDAssertion{ + Assertion: assertion.NewAssertion( + append( + []assertion.AssertionOption{assertion.WithBuilder(features.New("CRD").WithLabel("type", "customresourcedefinition"))}, + opts..., + )..., + ), + } +} diff --git a/internal/crds/crds_test.go b/internal/crds/crds_test.go new file mode 100644 index 0000000..3c2f447 --- /dev/null +++ b/internal/crds/crds_test.go @@ -0,0 +1,105 @@ +package crds_test + +import ( + "log/slog" + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/envfuncs" + "sigs.k8s.io/e2e-framework/support/kind" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" + "github.com/DWSR/kubeassert-go/internal/crds" + "github.com/DWSR/kubeassert-go/internal/testhelpers" +) + +const crdPath = "./testdata/crd.yaml" + +var testEnv env.Environment + +func TestMain(m *testing.M) { + kindClusterName := envconf.RandomName("kind", 16) + + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelInfo}))) + + testEnv = env.New(). + Setup( + envfuncs.CreateCluster(kind.NewProvider(), kindClusterName), + ). + Finish( + envfuncs.DestroyCluster(kindClusterName), + ) + + os.Exit(testEnv.Run(m)) +} + +func Test_CRD_Success(t *testing.T) { + asserts := []testhelpers.SuccessfulAssert{ + { + Name: "Exists", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return crds.NewCRDAssertion( + assertion.WithResourceName("orders.acme.cert-manager.io"), + assertion.WithSetup(helpers.CreateResourceFromPath(crdPath)), + assertion.WithTeardown( + helpers.DeleteResourceFromPath(crdPath), + helpers.Sleep(5*time.Second), + ), + ).Exists() + }, + }, + { + Name: "HasVersion", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return crds.NewCRDAssertion( + assertion.WithResourceName("orders.acme.cert-manager.io"), + assertion.WithSetup(helpers.CreateResourceFromPath(crdPath)), + assertion.WithTeardown( + helpers.DeleteResourceFromPath(crdPath), + helpers.Sleep(5*time.Second), + ), + ).Exists().HasVersion("v1") + }, + }, + } + + testhelpers.TestSuccessfulAsserts(t, testEnv, asserts...) +} + +func Test_CRD_Fail(t *testing.T) { + asserts := []testhelpers.FailingAssert{ + { + Name: "Exists", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return crds.NewCRDAssertion( + assertion.WithRequireT(t), + assertion.WithInterval(100*time.Millisecond), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithResourceName("orders.acme.cert-manager.io"), + ).Exists() + }, + }, + { + Name: "HasVersion", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return crds.NewCRDAssertion( + assertion.WithRequireT(t), + assertion.WithInterval(100*time.Millisecond), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithResourceName("orders.acme.cert-manager.io"), + assertion.WithSetup(helpers.CreateResourceFromPath(crdPath)), + assertion.WithTeardown( + helpers.DeleteResourceFromPath(crdPath), + helpers.Sleep(5*time.Second), + ), + ).Exists().HasVersion("v1alpha1") + }, + }, + } + testhelpers.TestFailingAsserts(t, testEnv, asserts...) +} diff --git a/internal/crds/testdata/crd.yaml b/internal/crds/testdata/crd.yaml new file mode 100644 index 0000000..3d34adf --- /dev/null +++ b/internal/crds/testdata/crd.yaml @@ -0,0 +1,254 @@ +# from https://github.com/cert-manager/cert-manager/blob/ee6a12d57a8947e37368ac6ffeaed4868a744e00/deploy/crds/crd-orders.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: orders.acme.cert-manager.io +spec: + group: acme.cert-manager.io + names: + kind: Order + listKind: OrderList + plural: orders + singular: order + categories: + - cert-manager + - cert-manager-acme + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: Order is a type to represent an Order with an ACME server + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + type: object + required: + - issuerRef + - request + properties: + commonName: + description: |- + CommonName is the common name as specified on the DER encoded CSR. + If specified, this value must also be present in `dnsNames` or `ipAddresses`. + This field must match the corresponding field on the DER encoded CSR. + type: string + dnsNames: + description: |- + DNSNames is a list of DNS names that should be included as part of the Order + validation process. + This field must match the corresponding field on the DER encoded CSR. + type: array + items: + type: string + duration: + description: |- + Duration is the duration for the not after date for the requested certificate. + this is set on order creation as pe the ACME spec. + type: string + ipAddresses: + description: |- + IPAddresses is a list of IP addresses that should be included as part of the Order + validation process. + This field must match the corresponding field on the DER encoded CSR. + type: array + items: + type: string + issuerRef: + description: |- + IssuerRef references a properly configured ACME-type Issuer which should + be used to create this Order. + If the Issuer does not exist, processing will be retried. + If the Issuer is not an 'ACME' Issuer, an error will be returned and the + Order will be marked as failed. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + request: + description: |- + Certificate signing request bytes in DER encoding. + This will be used when finalizing the order. + This field must be set on the order. + type: string + format: byte + status: + type: object + properties: + authorizations: + description: |- + Authorizations contains data returned from the ACME server on what + authorizations must be completed in order to validate the DNS names + specified on the Order. + type: array + items: + description: |- + ACMEAuthorization contains data returned from the ACME server on an + authorization that must be completed in order validate a DNS name on an ACME + Order resource. + type: object + required: + - url + properties: + challenges: + description: |- + Challenges specifies the challenge types offered by the ACME server. + One of these challenge types will be selected when validating the DNS + name and an appropriate Challenge resource will be created to perform + the ACME challenge process. + type: array + items: + description: |- + Challenge specifies a challenge offered by the ACME server for an Order. + An appropriate Challenge resource can be created to perform the ACME + challenge process. + type: object + required: + - token + - type + - url + properties: + token: + description: |- + Token is the token that must be presented for this challenge. + This is used to compute the 'key' that must also be presented. + type: string + type: + description: |- + Type is the type of challenge being offered, e.g. 'http-01', 'dns-01', + 'tls-sni-01', etc. + This is the raw value retrieved from the ACME server. + Only 'http-01' and 'dns-01' are supported by cert-manager, other values + will be ignored. + type: string + url: + description: |- + URL is the URL of this challenge. It can be used to retrieve additional + metadata about the Challenge from the ACME server. + type: string + identifier: + description: Identifier is the DNS name to be validated as part of this authorization + type: string + initialState: + description: |- + InitialState is the initial state of the ACME authorization when first + fetched from the ACME server. + If an Authorization is already 'valid', the Order controller will not + create a Challenge resource for the authorization. This will occur when + working with an ACME server that enables 'authz reuse' (such as Let's + Encrypt's production endpoint). + If not set and 'identifier' is set, the state is assumed to be pending + and a Challenge will be created. + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + url: + description: URL is the URL of the Authorization that must be completed + type: string + wildcard: + description: |- + Wildcard will be true if this authorization is for a wildcard DNS name. + If this is true, the identifier will be the *non-wildcard* version of + the DNS name. + For example, if '*.example.com' is the DNS name being validated, this + field will be 'true' and the 'identifier' field will be 'example.com'. + type: boolean + certificate: + description: |- + Certificate is a copy of the PEM encoded certificate for this Order. + This field will be populated after the order has been successfully + finalized with the ACME server, and the order has transitioned to the + 'valid' state. + type: string + format: byte + failureTime: + description: |- + FailureTime stores the time that this order failed. + This is used to influence garbage collection and back-off. + type: string + format: date-time + finalizeURL: + description: |- + FinalizeURL of the Order. + This is used to obtain certificates for this order once it has been completed. + type: string + reason: + description: |- + Reason optionally provides more information about a why the order is in + the current state. + type: string + state: + description: |- + State contains the current state of this Order resource. + States 'success' and 'expired' are 'final' + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + url: + description: |- + URL of the Order. + This will initially be empty when the resource is first created. + The Order controller will populate this field when the Order is first processed. + This field will be immutable after it is initially set. + type: string + served: true + storage: true diff --git a/internal/deployments/1deployment_test.go b/internal/deployments/1deployment_test.go new file mode 100644 index 0000000..0ae1e39 --- /dev/null +++ b/internal/deployments/1deployment_test.go @@ -0,0 +1,445 @@ +package deployments_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" + "github.com/DWSR/kubeassert-go/internal/deployments" + "github.com/DWSR/kubeassert-go/internal/testhelpers" +) + +const ( + badDeploymentPath = "./testdata/bad-deployment.yaml" + deploymentPath = "./testdata/deployment.yaml" + configPath = "./testdata/config.yaml" +) + +func Test_1Deployment_Success(t *testing.T) { + asserts := []testhelpers.SuccessfulAssert{ + { + Name: "Exists_Labels", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "deployments_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deploymentPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + ), + ).Exists() + }, + }, + { + Name: "Exists_Name", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceName("test-deployment"), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deploymentPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + ), + ).Exists() + }, + }, + { + Name: "IsAvailable_Labels", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "deployments_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deploymentPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + ), + ).Exists().IsAvailable() + }, + }, + { + Name: "IsAvailable_Name", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceName("test-deployment"), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deploymentPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + ), + ).Exists().IsAvailable() + }, + }, + { + Name: "IsSystemClusterCritical_Labels", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "deployments_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deploymentPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + ), + ).Exists().IsSystemClusterCritical() + }, + }, + { + Name: "IsSystemClusterCritical_Name", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceName("test-deployment"), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deploymentPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + ), + ).Exists().IsSystemClusterCritical() + }, + }, + { + Name: "HasNoCPULimits_Labels", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "deployments_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deploymentPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + ), + ).Exists().HasNoCPULimits() + }, + }, + { + Name: "HasNoCPULimits_Name", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceName("test-deployment"), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deploymentPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + ), + ).Exists().HasNoCPULimits() + }, + }, + { + Name: "HasMemoryLimitsEqualToRequests_Labels", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "deployments_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deploymentPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + ), + ).Exists().HasMemoryLimitsEqualToRequests() + }, + }, + { + Name: "HasMemoryLimitsEqualToRequests_Name", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceName("test-deployment"), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deploymentPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + ), + ).Exists().HasMemoryLimitsEqualToRequests() + }, + }, + { + Name: "HasMemoryLimits_Labels", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "deployments_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deploymentPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + ), + ).Exists().HasMemoryLimits() + }, + }, + { + Name: "HasMemoryLimits_Name", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceName("test-deployment"), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deploymentPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + ), + ).Exists().HasMemoryLimits() + }, + }, + { + Name: "HasMemoryRequests_Labels", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "deployments_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deploymentPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + ), + ).Exists().HasMemoryRequests() + }, + }, + { + Name: "HasMemoryRequests_Name", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceName("test-deployment"), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deploymentPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + ), + ).Exists().HasMemoryRequests() + }, + }, + { + Name: "HasCPURequests_Labels", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "deployments_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deploymentPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + ), + ).Exists().HasCPURequests() + }, + }, + { + Name: "HasCPURequests_Name", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceName("test-deployment"), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deploymentPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + ), + ).Exists().HasCPURequests() + }, + }, + } + + testhelpers.TestSuccessfulAsserts(t, testEnv, asserts...) +} + +func Test_1Deployment_Fail(t *testing.T) { + asserts := []testhelpers.FailingAssert{ + { + Name: "Exists_Labels", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "deployments_test"}), + ).Exists() + }, + }, + { + Name: "Exists_Name", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceName("test-deployment"), + ).Exists() + }, + }, + { + Name: "IsAvailable_Labels", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "deployments_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deploymentPath), + ), + ).Exists().IsAvailable() + }, + }, + { + Name: "IsAvailable_Name", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceName("test-deployment"), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deploymentPath), + ), + ).Exists().IsAvailable() + }, + }, + { + Name: "IsSystemClusterCritical_Labels", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "deployments_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(badDeploymentPath), + ), + ).Exists().IsSystemClusterCritical() + }, + }, + { + Name: "IsSystemClusterCritical_Name", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceName("test-deployment"), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(badDeploymentPath), + ), + ).Exists().IsSystemClusterCritical() + }, + }, + { + Name: "HasNoCPULimits_Labels", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "deployments_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(badDeploymentPath), + ), + ).Exists().HasNoCPULimits() + }, + }, + { + Name: "HasNoCPULimits_Name", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceName("test-deployment"), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(badDeploymentPath), + ), + ).Exists().HasNoCPULimits() + }, + }, + { + Name: "HasMemoryLimitsEqualToRequests_Labels", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "deployments_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(badDeploymentPath), + ), + ).Exists().HasMemoryLimitsEqualToRequests() + }, + }, + { + Name: "HasMemoryLimitsEqualToRequests_Name", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceName("test-deployment"), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(badDeploymentPath), + ), + ).Exists().HasMemoryLimitsEqualToRequests() + }, + }, + { + Name: "HasMemoryLimits_Labels", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "deployments_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(badDeploymentPath), + ), + ).Exists().HasMemoryLimits() + }, + }, + { + Name: "HasMemoryLimits_Name", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceName("test-deployment"), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(badDeploymentPath), + ), + ).Exists().HasMemoryLimits() + }, + }, + { + Name: "HasCPURequests_Labels", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "deployments_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(badDeploymentPath), + ), + ).Exists().HasCPURequests() + }, + }, + { + Name: "HasCPURequests_Name", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceName("test-deployment"), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(badDeploymentPath), + ), + ).Exists().HasCPURequests() + }, + }, + } + + testhelpers.TestFailingAsserts(t, testEnv, asserts...) +} diff --git a/internal/deployments/3deployment_test.go b/internal/deployments/3deployment_test.go new file mode 100644 index 0000000..8daf49e --- /dev/null +++ b/internal/deployments/3deployment_test.go @@ -0,0 +1,503 @@ +package deployments_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + e2etypes "sigs.k8s.io/e2e-framework/pkg/types" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" + "github.com/DWSR/kubeassert-go/internal/deployments" + "github.com/DWSR/kubeassert-go/internal/testhelpers" +) + +func Test_3Deployments_Success(t *testing.T) { + asserts := []testhelpers.SuccessfulAssert{ + { + Name: "ExactlyNExist", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup(createGoodDeploys(deployNames)...), + ).ExactlyNExist(3) + }, + }, + { + Name: "AtLeastNExist", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup(createGoodDeploys(deployNames)...), + ).AtLeastNExist(2) + }, + }, + { + Name: "ExactlyNAreAvailable", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup( + append([]e2etypes.StepFunc{helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath)}, + createGoodDeploys(deployNames)...)..., + ), + ).ExactlyNExist(3).ExactlyNAreAvailable(3) + }, + }, + { + Name: "AtLeastNAreAvailable", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup( + append([]e2etypes.StepFunc{helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath)}, + createGoodDeploys(deployNames)...)..., + ), + ).ExactlyNExist(3).AtLeastNAreAvailable(2) + }, + }, + { + Name: "ExactlyNAreSystemClusterCritical", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup( + append([]e2etypes.StepFunc{helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath)}, + createGoodDeploys(deployNames)...)..., + ), + ).ExactlyNExist(3).ExactlyNAreSystemClusterCritical(3) + }, + }, + { + Name: "AtLeastNAreSystemClusterCritical", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup( + append([]e2etypes.StepFunc{helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath)}, + createGoodDeploys(deployNames)...)..., + ), + ).ExactlyNExist(3).AtLeastNAreSystemClusterCritical(2) + }, + }, + { + Name: "ExactlyNHaveNoCPULimits", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup( + append([]e2etypes.StepFunc{helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath)}, + createGoodDeploys(deployNames)...)..., + ), + ).ExactlyNExist(3).ExactlyNHaveNoCPULimits(3) + }, + }, + { + Name: "AtLeastNHaveNoCPULimits", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup( + append([]e2etypes.StepFunc{helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath)}, + createGoodDeploys(deployNames)...)..., + ), + ).ExactlyNExist(3).AtLeastNHaveNoCPULimits(2) + }, + }, + { + Name: "ExactlyNHaveMemoryLimitsEqualToRequests", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup( + append([]e2etypes.StepFunc{helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath)}, + createGoodDeploys(deployNames)...)..., + ), + ).ExactlyNExist(3).ExactlyNHaveMemoryLimitsEqualToRequests(3) + }, + }, + { + Name: "AtLeastNHaveMemoryLimitsEqualToRequests", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup( + append([]e2etypes.StepFunc{helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath)}, + createGoodDeploys(deployNames)...)..., + ), + ).ExactlyNExist(3).AtLeastNHaveMemoryLimitsEqualToRequests(2) + }, + }, + { + Name: "ExactlyNHaveMemoryLimits", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup( + append([]e2etypes.StepFunc{helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath)}, + createGoodDeploys(deployNames)...)..., + ), + ).ExactlyNExist(3).ExactlyNHaveMemoryLimits(3) + }, + }, + { + Name: "AtLeastNHaveMemoryLimits", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup( + append([]e2etypes.StepFunc{helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath)}, + createGoodDeploys(deployNames)...)..., + ), + ).ExactlyNExist(3).AtLeastNHaveMemoryLimits(2) + }, + }, + { + Name: "ExactlyNHaveMemoryRequests", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup( + append([]e2etypes.StepFunc{helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath)}, + createGoodDeploys(deployNames)...)..., + ), + ).ExactlyNExist(3).ExactlyNHaveMemoryRequests(3) + }, + }, + { + Name: "AtLeastNHaveMemoryRequests", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup( + append([]e2etypes.StepFunc{helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath)}, + createGoodDeploys(deployNames)...)..., + ), + ).ExactlyNExist(3).AtLeastNHaveMemoryRequests(2) + }, + }, + { + Name: "ExactlyNHaveCPURequests", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup( + append([]e2etypes.StepFunc{helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath)}, + createGoodDeploys(deployNames)...)..., + ), + ).ExactlyNExist(3).ExactlyNHaveCPURequests(3) + }, + }, + { + Name: "AtLeastNHaveCPURequests", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup( + append([]e2etypes.StepFunc{helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath)}, + createGoodDeploys(deployNames)...)..., + ), + ).ExactlyNExist(3).AtLeastNHaveCPURequests(2) + }, + }, + } + + testhelpers.TestSuccessfulAsserts(t, testEnv, asserts...) +} + +func Test_3Deployments_Fail(t *testing.T) { + asserts := []testhelpers.FailingAssert{ + { + Name: "ExactlyNExist", + FailingAssert: func(t require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup(createBadDeploys(deployNames)...), + ).ExactlyNExist(2) + }, + }, + { + Name: "AtLeastNExist", + FailingAssert: func(t require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup(createBadDeploys(deployNames)...), + ).AtLeastNExist(4) + }, + }, + { + Name: "ExactlyNAreAvailable", + FailingAssert: func(t require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(15*time.Second), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup( + append([]e2etypes.StepFunc{helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath)}, + createGoodDeploys(deployNames)...)..., + ), + ).ExactlyNExist(3).ExactlyNAreAvailable(2) + }, + }, + { + Name: "AtLeastNAreAvailable", + FailingAssert: func(t require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(15*time.Second), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup(createGoodDeploys(deployNames)...), + ).ExactlyNExist(3).AtLeastNAreAvailable(4) + }, + }, + { + Name: "ExactlyNAreSystemClusterCritical", + FailingAssert: func(t require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup(createGoodDeploys(deployNames)...), + ).ExactlyNExist(3).ExactlyNAreSystemClusterCritical(2) + }, + }, + { + Name: "AtLeastNAreSystemClusterCritical", + FailingAssert: func(t require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup(createGoodDeploys(deployNames)...), + ).ExactlyNExist(3).AtLeastNAreSystemClusterCritical(4) + }, + }, + { + Name: "ExactlyNHaveNoCPULimits", + FailingAssert: func(t require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup(createGoodDeploys(deployNames)...), + ).ExactlyNExist(3).ExactlyNHaveNoCPULimits(2) + }, + }, + { + Name: "AtLeastNHaveNoCPULimits", + FailingAssert: func(t require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup(createGoodDeploys(deployNames)...), + ).ExactlyNExist(3).AtLeastNHaveNoCPULimits(4) + }, + }, + { + Name: "ExactlyNHaveMemoryLimitsEqualToRequests", + FailingAssert: func(t require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup(createGoodDeploys(deployNames)...), + ).ExactlyNExist(3).ExactlyNHaveMemoryLimitsEqualToRequests(2) + }, + }, + { + Name: "AtLeastNHaveMemoryLimitsEqualToRequests", + FailingAssert: func(t require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup(createGoodDeploys(deployNames)...), + ).ExactlyNExist(3).AtLeastNHaveMemoryLimitsEqualToRequests(4) + }, + }, + { + Name: "ExactlyNHaveMemoryLimits", + FailingAssert: func(t require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup(createGoodDeploys(deployNames)...), + ).ExactlyNExist(3).ExactlyNHaveMemoryLimits(2) + }, + }, + { + Name: "AtLeastNHaveMemoryLimits", + FailingAssert: func(t require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup(createGoodDeploys(deployNames)...), + ).ExactlyNExist(3).AtLeastNHaveMemoryLimits(4) + }, + }, + { + Name: "ExactlyNHaveMemoryRequests", + FailingAssert: func(t require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup(createGoodDeploys(deployNames)...), + ).ExactlyNExist(3).ExactlyNHaveMemoryRequests(2) + }, + }, + { + Name: "AtLeastNHaveMemoryRequests", + FailingAssert: func(t require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup(createGoodDeploys(deployNames)...), + ).ExactlyNExist(3).AtLeastNHaveMemoryRequests(4) + }, + }, + { + Name: "ExactlyNHaveCPURequests", + FailingAssert: func(t require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup(createGoodDeploys(deployNames)...), + ).ExactlyNExist(3).ExactlyNHaveCPURequests(2) + }, + }, + { + Name: "AtLeastNHaveCPURequests", + FailingAssert: func(t require.TestingT) assertion.Assertion { + deployNames := generateDeploymentNames() + + return deployments.NewDeploymentAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), + assertion.WithSetup(createGoodDeploys(deployNames)...), + ).ExactlyNExist(3).AtLeastNHaveCPURequests(4) + }, + }, + } + + testhelpers.TestFailingAsserts(t, testEnv, asserts...) +} diff --git a/internal/deployments/deployments.go b/internal/deployments/deployments.go new file mode 100644 index 0000000..8906bb1 --- /dev/null +++ b/internal/deployments/deployments.go @@ -0,0 +1,718 @@ +package deployments + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" +) + +type DeploymentAssertion struct { + assertion.Assertion +} + +func (da DeploymentAssertion) clone() DeploymentAssertion { + return DeploymentAssertion{ + Assertion: assertion.CloneAssertion(da.Assertion), + } +} + +func (da DeploymentAssertion) Exists() DeploymentAssertion { + return da.ExactlyNExist(1) +} + +func (da DeploymentAssertion) ExactlyNExist(count int) DeploymentAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) + conditionFunc := func(ctx context.Context) (bool, error) { + deploys, err := da.getDeployments(ctx, t, cfg) + require.NoError(t, err) + + return len(deploys.Items) == count, nil + } + + require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNExist", fn)) + + return res +} + +func (da DeploymentAssertion) AtLeastNExist(count int) DeploymentAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) + conditionFunc := func(ctx context.Context) (bool, error) { + deploys, err := da.getDeployments(ctx, t, cfg) + require.NoError(t, err) + + return len(deploys.Items) >= count, nil + } + + require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNExist", fn)) + + return res +} + +func (da DeploymentAssertion) getDeployments(ctx context.Context, t require.TestingT, cfg *envconf.Config) (appsv1.DeploymentList, error) { + client := helpers.DynamicClientFromEnvconf(t, cfg) + + var deploys appsv1.DeploymentList + + list, err := client. + Resource(appsv1.SchemeGroupVersion.WithResource("deployments")). + List(ctx, da.ListOptions(cfg)) + if err != nil { + return deploys, err + } + + err = runtime.DefaultUnstructuredConverter.FromUnstructured(list.UnstructuredContent(), &deploys) + if err != nil { + return deploys, err + } + + return deploys, nil +} + +func (da DeploymentAssertion) IsAvailable() DeploymentAssertion { + return da.ExactlyNAreAvailable(1) +} + +func (da DeploymentAssertion) ExactlyNAreAvailable(count int) DeploymentAssertion { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) + + conditionFunc := func(ctx context.Context) (bool, error) { + deploys, err := da.getDeployments(ctx, t, cfg) + require.NoError(t, err) + + if len(deploys.Items) < count { + return false, nil + } + + availableCount := 0 + + for _, deploy := range deploys.Items { + for _, condition := range deploy.Status.Conditions { + if condition.Type == appsv1.DeploymentAvailable && condition.Status == corev1.ConditionTrue { + availableCount += 1 + } + } + } + + return availableCount == count, nil + } + + require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNAreAvailable", stepFn)) + + return res +} + +func (da DeploymentAssertion) AtLeastNAreAvailable(count int) DeploymentAssertion { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) + + conditionFunc := func(ctx context.Context) (bool, error) { + deploys, err := da.getDeployments(ctx, t, cfg) + require.NoError(t, err) + + if len(deploys.Items) < count { + return false, nil + } + + availableCount := 0 + + for _, deploy := range deploys.Items { + for _, condition := range deploy.Status.Conditions { + if condition.Type == appsv1.DeploymentAvailable && condition.Status == corev1.ConditionTrue { + availableCount++ + } + } + } + + return availableCount >= count, nil + } + + require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNAreAvailable", stepFn)) + + return res +} + +func (da DeploymentAssertion) IsSystemClusterCritical() DeploymentAssertion { + return da.ExactlyNAreSystemClusterCritical(1) +} + +func (da DeploymentAssertion) ExactlyNAreSystemClusterCritical(count int) DeploymentAssertion { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) + + conditionFunc := func(ctx context.Context) (bool, error) { + deploys, err := da.getDeployments(ctx, t, cfg) + require.NoError(t, err) + + if len(deploys.Items) < count { + return false, nil + } + + systemClusterCriticalCount := 0 + + for _, deploy := range deploys.Items { + if deploy.Spec.Template.Spec.PriorityClassName == "system-cluster-critical" { + systemClusterCriticalCount++ + } + } + + return systemClusterCriticalCount == count, nil + } + + require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNAreSystemClusterCritical", stepFn)) + + return res +} + +func (da DeploymentAssertion) AtLeastNAreSystemClusterCritical(count int) DeploymentAssertion { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) + + conditionFunc := func(ctx context.Context) (bool, error) { + deploys, err := da.getDeployments(ctx, t, cfg) + require.NoError(t, err) + + if len(deploys.Items) < count { + return false, nil + } + + systemClusterCriticalCount := 0 + + for _, deploy := range deploys.Items { + if deploy.Spec.Template.Spec.PriorityClassName == "system-cluster-critical" { + systemClusterCriticalCount++ + } + } + + return systemClusterCriticalCount >= count, nil + } + + require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNAreSystemClusterCritical", stepFn)) + + return res +} + +func (da DeploymentAssertion) HasNoCPULimits() DeploymentAssertion { + return da.ExactlyNHaveNoCPULimits(1) +} + +func (da DeploymentAssertion) ExactlyNHaveNoCPULimits(count int) DeploymentAssertion { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) + + conditionFunc := func(ctx context.Context) (bool, error) { + deploys, err := da.getDeployments(ctx, t, cfg) + require.NoError(t, err) + + if len(deploys.Items) < count { + return false, nil + } + + hasNoCPULimits := 0 + + for _, deploy := range deploys.Items { + allContainersHaveNoCPULimits := true + + for _, container := range deploy.Spec.Template.Spec.Containers { + if !container.Resources.Limits.Cpu().IsZero() { + allContainersHaveNoCPULimits = false + + break + } + } + + if allContainersHaveNoCPULimits { + hasNoCPULimits++ + } + } + + return hasNoCPULimits == count, nil + } + + require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNHaveNoCPULimits", stepFn)) + + return res +} + +func (da DeploymentAssertion) AtLeastNHaveNoCPULimits(count int) DeploymentAssertion { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) + + conditionFunc := func(ctx context.Context) (bool, error) { + deploys, err := da.getDeployments(ctx, t, cfg) + require.NoError(t, err) + + if len(deploys.Items) < count { + return false, nil + } + + hasNoCPULimits := 0 + + for _, deploy := range deploys.Items { + allContainersHaveNoCPULimits := true + + for _, container := range deploy.Spec.Template.Spec.Containers { + if !container.Resources.Limits.Cpu().IsZero() { + allContainersHaveNoCPULimits = false + + break + } + } + + if allContainersHaveNoCPULimits { + hasNoCPULimits++ + } + } + + return hasNoCPULimits >= count, nil + } + + require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNHaveNoCPULimits", stepFn)) + + return res +} + +func (da DeploymentAssertion) HasMemoryLimitsEqualToRequests() DeploymentAssertion { + return da.ExactlyNHaveMemoryLimitsEqualToRequests(1) +} + +func (da DeploymentAssertion) ExactlyNHaveMemoryLimitsEqualToRequests(count int) DeploymentAssertion { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) + + conditionFunc := func(ctx context.Context) (bool, error) { + deploys, err := da.getDeployments(ctx, t, cfg) + require.NoError(t, err) + + if len(deploys.Items) < count { + return false, nil + } + + hasMemoryLimitsEqualToRequests := 0 + + for _, deploy := range deploys.Items { + allContainersHaveMemoryLimitsEqualToRequests := true + + for _, container := range deploy.Spec.Template.Spec.Containers { + memoryRequests := container.Resources.Requests.Memory() + memoryLimits := container.Resources.Limits.Memory() + + if !cmp.Equal(memoryLimits, memoryRequests) { + allContainersHaveMemoryLimitsEqualToRequests = false + + break + } + } + + if allContainersHaveMemoryLimitsEqualToRequests { + hasMemoryLimitsEqualToRequests++ + } + } + + return hasMemoryLimitsEqualToRequests == count, nil + } + + require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNHaveMemoryLimitsEqualToRequests", stepFn)) + + return res +} + +func (da DeploymentAssertion) AtLeastNHaveMemoryLimitsEqualToRequests(count int) DeploymentAssertion { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) + + conditionFunc := func(ctx context.Context) (bool, error) { + deploys, err := da.getDeployments(ctx, t, cfg) + require.NoError(t, err) + + if len(deploys.Items) < count { + return false, nil + } + + hasMemoryLimitsEqualToRequests := 0 + + for _, deploy := range deploys.Items { + allContainersHaveMemoryLimitsEqualToRequests := true + + for _, container := range deploy.Spec.Template.Spec.Containers { + memoryRequests := container.Resources.Requests.Memory() + memoryLimits := container.Resources.Limits.Memory() + + if !cmp.Equal(memoryLimits, memoryRequests) { + allContainersHaveMemoryLimitsEqualToRequests = false + + break + } + } + + if allContainersHaveMemoryLimitsEqualToRequests { + hasMemoryLimitsEqualToRequests++ + } + } + + return hasMemoryLimitsEqualToRequests >= count, nil + } + + require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNHaveMemoryLimitsEqualToRequests", stepFn)) + + return res +} + +func (da DeploymentAssertion) HasMemoryLimits() DeploymentAssertion { + return da.ExactlyNHaveMemoryLimits(1) +} + +func (da DeploymentAssertion) ExactlyNHaveMemoryLimits(count int) DeploymentAssertion { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) + + conditionFunc := func(ctx context.Context) (bool, error) { + deploys, err := da.getDeployments(ctx, t, cfg) + require.NoError(t, err) + + if len(deploys.Items) < count { + return false, nil + } + + hasMemoryLimits := 0 + + for _, deploy := range deploys.Items { + allContainersHaveMemoryLimits := true + + for _, container := range deploy.Spec.Template.Spec.Containers { + if container.Resources.Limits.Memory().IsZero() { + allContainersHaveMemoryLimits = false + + break + } + } + + if allContainersHaveMemoryLimits { + hasMemoryLimits++ + } + } + + return hasMemoryLimits == count, nil + } + + require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNHaveMemoryLimits", stepFn)) + + return res +} + +func (da DeploymentAssertion) AtLeastNHaveMemoryLimits(count int) DeploymentAssertion { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) + + conditionFunc := func(ctx context.Context) (bool, error) { + deploys, err := da.getDeployments(ctx, t, cfg) + require.NoError(t, err) + + if len(deploys.Items) < count { + return false, nil + } + + hasMemoryLimits := 0 + + for _, deploy := range deploys.Items { + allContainersHaveMemoryLimits := true + + for _, container := range deploy.Spec.Template.Spec.Containers { + if container.Resources.Limits.Memory().IsZero() { + allContainersHaveMemoryLimits = false + + break + } + } + + if allContainersHaveMemoryLimits { + hasMemoryLimits++ + } + } + + return hasMemoryLimits >= count, nil + } + + require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNHaveMemoryLimits", stepFn)) + + return res +} + +func (da DeploymentAssertion) HasMemoryRequests() DeploymentAssertion { + return da.ExactlyNHaveMemoryRequests(1) +} + +func (da DeploymentAssertion) ExactlyNHaveMemoryRequests(count int) DeploymentAssertion { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) + + conditionFunc := func(ctx context.Context) (bool, error) { + deploys, err := da.getDeployments(ctx, t, cfg) + require.NoError(t, err) + + if len(deploys.Items) < count { + return false, nil + } + + hasMemoryRequests := 0 + + for _, deploy := range deploys.Items { + allContainersHaveMemoryRequests := true + + for _, container := range deploy.Spec.Template.Spec.Containers { + if container.Resources.Requests.Memory().IsZero() { + allContainersHaveMemoryRequests = false + + break + } + } + + if allContainersHaveMemoryRequests { + hasMemoryRequests++ + } + } + + return hasMemoryRequests == count, nil + } + + require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNHaveMemoryRequests", stepFn)) + + return res +} + +func (da DeploymentAssertion) AtLeastNHaveMemoryRequests(count int) DeploymentAssertion { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) + + conditionFunc := func(ctx context.Context) (bool, error) { + deploys, err := da.getDeployments(ctx, t, cfg) + require.NoError(t, err) + + if len(deploys.Items) < count { + return false, nil + } + + hasMemoryRequests := 0 + + for _, deploy := range deploys.Items { + allContainersHaveMemoryRequests := true + + for _, container := range deploy.Spec.Template.Spec.Containers { + if container.Resources.Requests.Memory().IsZero() { + allContainersHaveMemoryRequests = false + + break + } + } + + if allContainersHaveMemoryRequests { + hasMemoryRequests++ + } + } + + return hasMemoryRequests >= count, nil + } + + require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNHaveMemoryRequests", stepFn)) + + return res +} + +func (da DeploymentAssertion) HasCPURequests() DeploymentAssertion { + return da.ExactlyNHaveCPURequests(1) +} + +func (da DeploymentAssertion) ExactlyNHaveCPURequests(count int) DeploymentAssertion { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) + + conditionFunc := func(ctx context.Context) (bool, error) { + deploys, err := da.getDeployments(ctx, t, cfg) + require.NoError(t, err) + + if len(deploys.Items) < count { + return false, nil + } + + hasCPURequests := 0 + + for _, deploy := range deploys.Items { + allContainersHaveCPURequests := true + + for _, container := range deploy.Spec.Template.Spec.Containers { + if container.Resources.Requests.Cpu().IsZero() { + allContainersHaveCPURequests = false + + break + } + } + + if allContainersHaveCPURequests { + hasCPURequests++ + } + } + + return hasCPURequests == count, nil + } + + require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNHaveCPURequests", stepFn)) + + return res +} + +func (da DeploymentAssertion) AtLeastNHaveCPURequests(count int) DeploymentAssertion { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) + + conditionFunc := func(ctx context.Context) (bool, error) { + deploys, err := da.getDeployments(ctx, t, cfg) + require.NoError(t, err) + + if len(deploys.Items) < count { + return false, nil + } + + hasCPURequests := 0 + + for _, deploy := range deploys.Items { + allContainersHaveCPURequests := true + + for _, container := range deploy.Spec.Template.Spec.Containers { + if container.Resources.Requests.Cpu().IsZero() { + allContainersHaveCPURequests = false + + break + } + } + + if allContainersHaveCPURequests { + hasCPURequests++ + } + } + + return hasCPURequests >= count, nil + } + + require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNHaveCPURequests", stepFn)) + + return res +} + +func NewDeploymentAssertion(opts ...assertion.AssertionOption) DeploymentAssertion { + return DeploymentAssertion{ + Assertion: assertion.NewAssertion( + append( + []assertion.AssertionOption{assertion.WithBuilder(features.New("Deployment").WithLabel("type", "deployment"))}, + opts..., + )..., + ), + } +} diff --git a/internal/deployments/deployments_test.go b/internal/deployments/deployments_test.go new file mode 100644 index 0000000..779d4c3 --- /dev/null +++ b/internal/deployments/deployments_test.go @@ -0,0 +1,85 @@ +package deployments_test + +import ( + "log/slog" + "os" + "testing" + + "sigs.k8s.io/e2e-framework/klient/decoder" + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/envfuncs" + e2etypes "sigs.k8s.io/e2e-framework/pkg/types" + "sigs.k8s.io/e2e-framework/support/kind" + + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" + "github.com/DWSR/kubeassert-go/internal/testhelpers" +) + +var testEnv env.Environment + +func TestMain(m *testing.M) { + kindClusterName := envconf.RandomName("kind", 16) + + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelInfo}))) + + testEnv = env.New(). + Setup( + envfuncs.CreateCluster(kind.NewProvider(), kindClusterName), + ). + BeforeEachFeature(testhelpers.CreateRandomNamespaceBeforeEachFeature()). + AfterEachFeature(testhelpers.DeleteRandomNamespaceAfterEachFeature()). + Finish( + envfuncs.DestroyCluster(kindClusterName), + ) + + os.Exit(testEnv.Run(m)) +} + +func generateDeploymentNames() []string { + res := make([]string, 3) + + for i := range res { + res[i] = envconf.RandomName("test", 20) + } + + return res +} + +func createGoodDeploys(deploymentNames []string) []e2etypes.StepFunc { + if len(deploymentNames) == 0 { + panic("must supply deployment names") + } + + res := make([]e2etypes.StepFunc, len(deploymentNames)) + labelValue := deploymentNames[0] + + for i, deployName := range deploymentNames { + res[i] = helpers.CreateResourceFromPathWithNamespaceFromEnv( + deploymentPath, + testhelpers.MutateResourceName(deployName), + decoder.MutateLabels(map[string]string{"app.kubernetes.io/name": labelValue}), + ) + } + + return res +} + +func createBadDeploys(deploymentNames []string) []e2etypes.StepFunc { + if len(deploymentNames) == 0 { + panic("must supply deployment names") + } + + res := make([]e2etypes.StepFunc, len(deploymentNames)) + labelValue := deploymentNames[0] + + for i, deployName := range deploymentNames { + res[i] = helpers.CreateResourceFromPathWithNamespaceFromEnv( + badDeploymentPath, + testhelpers.MutateResourceName(deployName), + decoder.MutateLabels(map[string]string{"app.kubernetes.io/name": labelValue}), + ) + } + + return res +} diff --git a/internal/deployments/testdata/bad-deployment.yaml b/internal/deployments/testdata/bad-deployment.yaml new file mode 100644 index 0000000..dcf382e --- /dev/null +++ b/internal/deployments/testdata/bad-deployment.yaml @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-deployment + labels: + app.kubernetes.io/name: deployments_test + app.kubernetes.io/component: deployment +spec: + replicas: 3 + selector: + matchLabels: + app.kubernetes.io/name: deployments_test + app.kubernetes.io/component: deployment + template: + metadata: + labels: + app.kubernetes.io/name: deployments_test + app.kubernetes.io/component: deployment + spec: + containers: + - name: test + image: docker.io/library/nginx:1.27.4-alpine-slim@sha256:b05aceb5ec1844435cae920267ff9949887df5b88f70e11d8b2871651a596612 + resources: + requests: + cpu: 100m + memory: 32Mi + limits: + memory: 32Mi + livenessProbe: + httpGet: + path: /healthz + port: 80 + initialDelaySeconds: 2 + periodSeconds: 5 + readinessProbe: + httpGet: + path: /readyz + port: 80 + initialDelaySeconds: 3 + periodSeconds: 5 + volumeMounts: + - name: test-config + mountPath: /etc/nginx/conf.d/default.conf + subPath: default.conf + - name: test-2 + image: registry.k8s.io/pause:3.10@sha256:ee6521f290b2168b6e0935a181d4cff9be1ac3f505666ef0e3c98fae8199917a + resources: + limits: + cpu: 10m + memory: 16Mi + - name: test-3 + image: registry.k8s.io/pause:3.10@sha256:ee6521f290b2168b6e0935a181d4cff9be1ac3f505666ef0e3c98fae8199917a + priorityClassName: default + volumes: + - name: test-config + configMap: + name: test-config + items: + - key: default.conf + path: default.conf diff --git a/internal/deployments/testdata/config.yaml b/internal/deployments/testdata/config.yaml new file mode 100644 index 0000000..a388742 --- /dev/null +++ b/internal/deployments/testdata/config.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-config +data: + default.conf: | + server { + listen 80; + server_name localhost; + + location / { + root /usr/share/nginx/html; + index index.html index.htm; + } + + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/share/nginx/html; + } + + location = /healthz { + add_header 'Content-Type' 'application/json'; + return 200 '{"status": "UP"}'; + } + + location = /readyz { + add_header 'Content-Type' 'application/json'; + return 200 '{"ready": true}'; + } + } diff --git a/internal/deployments/testdata/deployment.yaml b/internal/deployments/testdata/deployment.yaml new file mode 100644 index 0000000..160b304 --- /dev/null +++ b/internal/deployments/testdata/deployment.yaml @@ -0,0 +1,52 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-deployment + labels: + app.kubernetes.io/name: deployments_test + app.kubernetes.io/component: deployment +spec: + replicas: 3 + selector: + matchLabels: + app.kubernetes.io/name: deployments_test + app.kubernetes.io/component: deployment + template: + metadata: + labels: + app.kubernetes.io/name: deployments_test + app.kubernetes.io/component: deployment + spec: + containers: + - name: test + image: docker.io/library/nginx:1.27.4-alpine-slim@sha256:b05aceb5ec1844435cae920267ff9949887df5b88f70e11d8b2871651a596612 + resources: + requests: + cpu: 100m + memory: 32Mi + limits: + memory: 32Mi + livenessProbe: + httpGet: + path: /healthz + port: 80 + initialDelaySeconds: 2 + periodSeconds: 5 + readinessProbe: + httpGet: + path: /readyz + port: 80 + initialDelaySeconds: 3 + periodSeconds: 5 + volumeMounts: + - name: test-config + mountPath: /etc/nginx/conf.d/default.conf + subPath: default.conf + priorityClassName: system-cluster-critical + volumes: + - name: test-config + configMap: + name: test-config + items: + - key: default.conf + path: default.conf diff --git a/internal/namespaces/1namespace_test.go b/internal/namespaces/1namespace_test.go new file mode 100644 index 0000000..6b3a168 --- /dev/null +++ b/internal/namespaces/1namespace_test.go @@ -0,0 +1,198 @@ +package namespaces_test + +import ( + "testing" + "time" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" + "github.com/DWSR/kubeassert-go/internal/namespaces" + "github.com/DWSR/kubeassert-go/internal/testhelpers" + "github.com/stretchr/testify/require" + "sigs.k8s.io/e2e-framework/klient/decoder" + "sigs.k8s.io/e2e-framework/pkg/envconf" +) + +func Test_1Namespace_Success(t *testing.T) { + asserts := []testhelpers.SuccessfulAssert{ + { + Name: "Exists_Labels", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + namespaceName := envconf.RandomName("test", 20) + + return namespaces.NewNamespaceAssertion( + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": namespaceName}), + assertion.WithSetup( + helpers.CreateResourceFromPath( + "./testdata/restricted-namespace.yaml", + testhelpers.MutateResourceName(namespaceName), + decoder.MutateLabels(map[string]string{"app.kubernetes.io/name": namespaceName}), + ), + ), + assertion.WithTeardown( + helpers.DeleteResourceFromPath( + "./testdata/restricted-namespace.yaml", + testhelpers.MutateResourceName(namespaceName), + ), + ), + ).Exists() + }, + }, + { + Name: "Exists_Name", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + namespaceName := envconf.RandomName("test", 20) + + return namespaces.NewNamespaceAssertion( + assertion.WithResourceName(namespaceName), + assertion.WithSetup( + helpers.CreateResourceFromPath( + "./testdata/restricted-namespace.yaml", + testhelpers.MutateResourceName(namespaceName), + decoder.MutateLabels(map[string]string{"app.kubernetes.io/name": namespaceName}), + ), + ), + assertion.WithTeardown( + helpers.DeleteResourceFromPath( + "./testdata/restricted-namespace.yaml", + testhelpers.MutateResourceName(namespaceName), + ), + ), + ).Exists() + }, + }, + { + Name: "IsRestricted_Labels", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + namespaceName := envconf.RandomName("test", 20) + + return namespaces.NewNamespaceAssertion( + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": namespaceName}), + assertion.WithSetup( + helpers.CreateResourceFromPath( + "./testdata/restricted-namespace.yaml", + testhelpers.MutateResourceName(namespaceName), + decoder.MutateLabels(map[string]string{"app.kubernetes.io/name": namespaceName}), + ), + ), + assertion.WithTeardown( + helpers.DeleteResourceFromPath( + "./testdata/restricted-namespace.yaml", + testhelpers.MutateResourceName(namespaceName), + ), + ), + ).Exists().IsRestricted() + }, + }, + { + Name: "IsRestricted_Name", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + namespaceName := envconf.RandomName("test", 20) + + return namespaces.NewNamespaceAssertion( + assertion.WithResourceName(namespaceName), + assertion.WithSetup( + helpers.CreateResourceFromPath( + "./testdata/restricted-namespace.yaml", + testhelpers.MutateResourceName(namespaceName), + decoder.MutateLabels(map[string]string{"app.kubernetes.io/name": namespaceName}), + ), + ), + assertion.WithTeardown( + helpers.DeleteResourceFromPath( + "./testdata/restricted-namespace.yaml", + testhelpers.MutateResourceName(namespaceName), + ), + ), + ).Exists().IsRestricted() + }, + }, + } + + testhelpers.TestSuccessfulAsserts(t, testEnv, asserts...) +} + +func Test_1Namespace_Fail(t *testing.T) { + asserts := []testhelpers.FailingAssert{ + { + Name: "Exists_Labels", + FailingAssert: func(t require.TestingT) assertion.Assertion { + namespaceName := envconf.RandomName("test", 20) + + return namespaces.NewNamespaceAssertion( + assertion.WithRequireT(t), + assertion.WithInterval(100*time.Millisecond), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": namespaceName}), + ).Exists() + }, + }, + { + Name: "Exists_Name", + FailingAssert: func(t require.TestingT) assertion.Assertion { + namespaceName := envconf.RandomName("test", 20) + + return namespaces.NewNamespaceAssertion( + assertion.WithRequireT(t), + assertion.WithInterval(100*time.Millisecond), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithResourceName(namespaceName), + ).Exists() + }, + }, + { + Name: "IsRestricted_Labels", + FailingAssert: func(t require.TestingT) assertion.Assertion { + namespaceName := envconf.RandomName("test", 20) + + return namespaces.NewNamespaceAssertion( + assertion.WithRequireT(t), + assertion.WithInterval(100*time.Millisecond), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": namespaceName}), + assertion.WithSetup( + helpers.CreateResourceFromPath( + "./testdata/unrestricted-namespace.yaml", + testhelpers.MutateResourceName(namespaceName), + decoder.MutateLabels(map[string]string{"app.kubernetes.io/name": namespaceName}), + ), + ), + assertion.WithTeardown( + helpers.DeleteResourceFromPath( + "./testdata/unrestricted-namespace.yaml", + testhelpers.MutateResourceName(namespaceName), + ), + ), + ).Exists().IsRestricted() + }, + }, + { + Name: "IsRestricted_Name", + FailingAssert: func(t require.TestingT) assertion.Assertion { + namespaceName := envconf.RandomName("test", 20) + + return namespaces.NewNamespaceAssertion( + assertion.WithRequireT(t), + assertion.WithInterval(100*time.Millisecond), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithResourceName(namespaceName), + assertion.WithSetup( + helpers.CreateResourceFromPath( + "./testdata/unrestricted-namespace.yaml", + testhelpers.MutateResourceName(namespaceName), + decoder.MutateLabels(map[string]string{"app.kubernetes.io/name": namespaceName}), + ), + ), + assertion.WithTeardown( + helpers.DeleteResourceFromPath( + "./testdata/unrestricted-namespace.yaml", + testhelpers.MutateResourceName(namespaceName), + ), + ), + ).Exists().IsRestricted() + }, + }, + } + + testhelpers.TestFailingAsserts(t, testEnv, asserts...) +} diff --git a/internal/namespaces/3namespace_test.go b/internal/namespaces/3namespace_test.go new file mode 100644 index 0000000..9390e8c --- /dev/null +++ b/internal/namespaces/3namespace_test.go @@ -0,0 +1,184 @@ +package namespaces_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "sigs.k8s.io/e2e-framework/klient/decoder" + "sigs.k8s.io/e2e-framework/pkg/envconf" + e2etypes "sigs.k8s.io/e2e-framework/pkg/types" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" + "github.com/DWSR/kubeassert-go/internal/namespaces" + "github.com/DWSR/kubeassert-go/internal/testhelpers" +) + +func Test_3Namespace_Success(t *testing.T) { + asserts := []testhelpers.SuccessfulAssert{ + { + Name: "AtLeastNExist", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + namespaceNames := generateNamespaceNames() + + return namespaces.NewNamespaceAssertion( + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": namespaceNames[0]}), + assertion.WithSetup(createNamespaces("./testdata/restricted-namespace.yaml", namespaceNames)...), + assertion.WithTeardown(deleteNamespaces("./testdata/restricted-namespace.yaml", namespaceNames)...), + ).AtLeastNExist(2) + }, + }, + { + Name: "ExactlyNExist", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + namespaceNames := generateNamespaceNames() + + return namespaces.NewNamespaceAssertion( + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": namespaceNames[0]}), + assertion.WithSetup(createNamespaces("./testdata/restricted-namespace.yaml", namespaceNames)...), + assertion.WithTeardown(deleteNamespaces("./testdata/restricted-namespace.yaml", namespaceNames)...), + ).ExactlyNExist(3) + }, + }, + { + Name: "AtLeastNAreRestricted", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + namespaceNames := generateNamespaceNames() + + return namespaces.NewNamespaceAssertion( + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": namespaceNames[0]}), + assertion.WithSetup(createNamespaces("./testdata/restricted-namespace.yaml", namespaceNames)...), + assertion.WithTeardown(deleteNamespaces("./testdata/restricted-namespace.yaml", namespaceNames)...), + ).AtLeastNExist(2).AtLeastNAreRestricted(2) + }, + }, + { + Name: "ExactlyNAreRestricted", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + namespaceNames := generateNamespaceNames() + + return namespaces.NewNamespaceAssertion( + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": namespaceNames[0]}), + assertion.WithSetup(createNamespaces("./testdata/restricted-namespace.yaml", namespaceNames)...), + assertion.WithTeardown(deleteNamespaces("./testdata/restricted-namespace.yaml", namespaceNames)...), + ).ExactlyNExist(3).ExactlyNAreRestricted(3) + }, + }, + } + + testhelpers.TestSuccessfulAsserts(t, testEnv, asserts...) +} + +func Test_3Namespace_Fail(t *testing.T) { + asserts := []testhelpers.FailingAssert{ + { + Name: "AtLeastNExist", + FailingAssert: func(t require.TestingT) assertion.Assertion { + namespaceNames := generateNamespaceNames() + + return namespaces.NewNamespaceAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": namespaceNames[0]}), + assertion.WithSetup(createNamespaces("./testdata/restricted-namespace.yaml", namespaceNames)...), + assertion.WithTeardown(deleteNamespaces("./testdata/restricted-namespace.yaml", namespaceNames)...), + ).AtLeastNExist(4) + }, + }, + { + Name: "ExactlyNExist", + FailingAssert: func(t require.TestingT) assertion.Assertion { + namespaceNames := generateNamespaceNames() + + return namespaces.NewNamespaceAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": namespaceNames[0]}), + assertion.WithSetup(createNamespaces("./testdata/restricted-namespace.yaml", namespaceNames)...), + assertion.WithTeardown(deleteNamespaces("./testdata/restricted-namespace.yaml", namespaceNames)...), + ).ExactlyNExist(2) + }, + }, + { + Name: "AtLeastNAreRestricted", + FailingAssert: func(t require.TestingT) assertion.Assertion { + namespaceNames := generateNamespaceNames() + + return namespaces.NewNamespaceAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": namespaceNames[0]}), + assertion.WithSetup(createNamespaces("./testdata/unrestricted-namespace.yaml", namespaceNames)...), + assertion.WithTeardown(deleteNamespaces("./testdata/unrestricted-namespace.yaml", namespaceNames)...), + ).AtLeastNExist(3).AtLeastNAreRestricted(3) + }, + }, + { + Name: "ExactlyNAreRestricted", + FailingAssert: func(t require.TestingT) assertion.Assertion { + namespaceNames := generateNamespaceNames() + + return namespaces.NewNamespaceAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": namespaceNames[0]}), + assertion.WithSetup(createNamespaces("./testdata/unrestricted-namespace.yaml", namespaceNames)...), + assertion.WithTeardown(deleteNamespaces("./testdata/unrestricted-namespace.yaml", namespaceNames)...), + ).ExactlyNExist(3).ExactlyNAreRestricted(3) + }, + }, + } + + testhelpers.TestFailingAsserts(t, testEnv, asserts...) +} + +func generateNamespaceNames() []string { + res := make([]string, 3) + + for i := range res { + res[i] = envconf.RandomName("test", 20) + } + + return res +} + +func createNamespaces(resourcePath string, namespaceNames []string) []e2etypes.StepFunc { + if len(namespaceNames) == 0 { + panic("must supply namespace names") + } + + res := make([]e2etypes.StepFunc, len(namespaceNames)) + labelValue := namespaceNames[0] + + for i, nsName := range namespaceNames { + res[i] = helpers.CreateResourceFromPath( + resourcePath, + testhelpers.MutateResourceName(nsName), + decoder.MutateLabels(map[string]string{"app.kubernetes.io/name": labelValue}), + ) + } + + return res +} + +func deleteNamespaces(resourcePath string, namespaceNames []string) []e2etypes.StepFunc { + if len(namespaceNames) == 0 { + panic("must supply namespace names") + } + + res := make([]e2etypes.StepFunc, len(namespaceNames)) + + for i, nsName := range namespaceNames { + res[i] = helpers.DeleteResourceFromPath( + resourcePath, + testhelpers.MutateResourceName(nsName), + ) + } + + return res +} diff --git a/internal/namespaces/namespaces.go b/internal/namespaces/namespaces.go new file mode 100644 index 0000000..e23dd46 --- /dev/null +++ b/internal/namespaces/namespaces.go @@ -0,0 +1,180 @@ +package namespaces + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" +) + +type NamespaceAssertion struct { + assertion.Assertion +} + +const ( + podSecurityEnforceLabelKey = "pod-security.kubernetes.io/enforce" +) + +func (na NamespaceAssertion) clone() NamespaceAssertion { + return NamespaceAssertion{ + Assertion: assertion.CloneAssertion(na.Assertion), + } +} + +func (na NamespaceAssertion) getNamespaces(ctx context.Context, t require.TestingT, cfg *envconf.Config) (corev1.NamespaceList, error) { + client := helpers.DynamicClientFromEnvconf(t, cfg) + + var nsList corev1.NamespaceList + + list, err := client.Resource(corev1.SchemeGroupVersion.WithResource("namespaces")).List(ctx, na.ListOptions(cfg)) + if err != nil { + return nsList, err + } + + err = runtime.DefaultUnstructuredConverter.FromUnstructured(list.UnstructuredContent(), &nsList) + if err != nil { + return nsList, err + } + + return nsList, nil +} + +func (na NamespaceAssertion) Exists() NamespaceAssertion { + return na.ExactlyNExist(1) +} + +func (na NamespaceAssertion) ExactlyNExist(count int) NamespaceAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, na.GetRequireT()) + conditionFunc := func(ctx context.Context) (bool, error) { + nsList, err := na.getNamespaces(ctx, t, cfg) + require.NoError(t, err) + + return len(nsList.Items) == count, nil + } + + require.NoError(t, na.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := na.clone() + res.SetBuilder(res.GetBuilder().Assess("exists", fn)) + + return res +} + +func (na NamespaceAssertion) AtLeastNExist(count int) NamespaceAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, na.GetRequireT()) + conditionFunc := func(ctx context.Context) (bool, error) { + nsList, err := na.getNamespaces(ctx, t, cfg) + require.NoError(t, err) + + return len(nsList.Items) >= count, nil + } + + require.NoError(t, na.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := na.clone() + res.SetBuilder(res.GetBuilder().Assess("exists", fn)) + + return res +} + +func (na NamespaceAssertion) AtLeastNAreRestricted(count int) NamespaceAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, na.GetRequireT()) + conditionFunc := func(ctx context.Context) (bool, error) { + nsList, err := na.getNamespaces(ctx, t, cfg) + require.NoError(t, err) + + if len(nsList.Items) < count { + return false, nil + } + + restrictedCount := 0 + + for _, ns := range nsList.Items { + nsLabels := ns.GetLabels() + + enforceLabel, ok := nsLabels[podSecurityEnforceLabelKey] + if ok && enforceLabel == "restricted" { + restrictedCount += 1 + } + } + + return restrictedCount >= count, nil + } + + require.NoError(t, na.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := na.clone() + res.SetBuilder(na.GetBuilder().Assess("atLeastNAreRestricted", fn)) + + return res +} + +func (na NamespaceAssertion) IsRestricted() NamespaceAssertion { + return na.ExactlyNAreRestricted(1) +} + +func (na NamespaceAssertion) ExactlyNAreRestricted(count int) NamespaceAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, na.GetRequireT()) + conditionFunc := func(ctx context.Context) (bool, error) { + nsList, err := na.getNamespaces(ctx, t, cfg) + require.NoError(t, err) + + if len(nsList.Items) < count { + return false, nil + } + + restrictedCount := 0 + + for _, ns := range nsList.Items { + nsLabels := ns.GetLabels() + + enforceLabel, ok := nsLabels[podSecurityEnforceLabelKey] + if ok && enforceLabel == "restricted" { + restrictedCount += 1 + } + } + + return restrictedCount == count, nil + } + + require.NoError(t, na.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := na.clone() + res.SetBuilder(na.GetBuilder().Assess("atLeastNAreRestricted", fn)) + + return res +} + +func NewNamespaceAssertion(opts ...assertion.AssertionOption) NamespaceAssertion { + return NamespaceAssertion{ + Assertion: assertion.NewAssertion( + append( + []assertion.AssertionOption{assertion.WithBuilder(features.New("Namespace").WithLabel("type", "namespace"))}, + opts..., + )..., + ), + } +} diff --git a/internal/namespaces/namespaces_test.go b/internal/namespaces/namespaces_test.go new file mode 100644 index 0000000..8230502 --- /dev/null +++ b/internal/namespaces/namespaces_test.go @@ -0,0 +1,30 @@ +package namespaces_test + +import ( + "log/slog" + "os" + "testing" + + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/envfuncs" + "sigs.k8s.io/e2e-framework/support/kind" +) + +var testEnv env.Environment + +func TestMain(m *testing.M) { + kindClusterName := envconf.RandomName("kind", 16) + + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelInfo}))) + + testEnv = env.New(). + Setup( + envfuncs.CreateCluster(kind.NewProvider(), kindClusterName), + ). + Finish( + envfuncs.DestroyCluster(kindClusterName), + ) + + os.Exit(testEnv.Run(m)) +} diff --git a/internal/namespaces/testdata/restricted-namespace.yaml b/internal/namespaces/testdata/restricted-namespace.yaml new file mode 100644 index 0000000..4f208d1 --- /dev/null +++ b/internal/namespaces/testdata/restricted-namespace.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: restricted-namespace + labels: + foo: bar + app.kubernetes.io/name: namespaces_test + pod-security.kubernetes.io/enforce: restricted diff --git a/internal/namespaces/testdata/unrestricted-namespace.yaml b/internal/namespaces/testdata/unrestricted-namespace.yaml new file mode 100644 index 0000000..59d7fc7 --- /dev/null +++ b/internal/namespaces/testdata/unrestricted-namespace.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: unrestricted-namespace + labels: + foo: bar + app.kubernetes.io/name: namespaces_test diff --git a/internal/pods/1pod_test.go b/internal/pods/1pod_test.go new file mode 100644 index 0000000..903d538 --- /dev/null +++ b/internal/pods/1pod_test.go @@ -0,0 +1,131 @@ +package pods_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" + "github.com/DWSR/kubeassert-go/internal/pods" + "github.com/DWSR/kubeassert-go/internal/testhelpers" +) + +func Test_1Pod_Success(t *testing.T) { + asserts := []testhelpers.SuccessfulAssert{ + { + Name: "Exists_Labels", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return pods.NewPodAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "pods_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(readyPodPath), + ), + ).Exists() + }, + }, + { + Name: "Exists_Name", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return pods.NewPodAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceName("test-pod"), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(readyPodPath), + ), + ).Exists() + }, + }, + { + Name: "IsReady_Labels", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return pods.NewPodAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "pods_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(readyPodPath), + ), + ).Exists().IsReady() + }, + }, + { + Name: "IsReady_Name", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return pods.NewPodAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceName("test-pod"), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(readyPodPath), + ), + ).Exists().IsReady() + }, + }, + } + + testhelpers.TestSuccessfulAsserts(t, testEnv, asserts...) +} + +func Test_1Pod_Fail(t *testing.T) { + asserts := []testhelpers.FailingAssert{ + { + Name: "Exists_Labels", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return pods.NewPodAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithRequireT(t), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "pods_test"}), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + ).Exists() + }, + }, + { + Name: "Exists_Name", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return pods.NewPodAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithRequireT(t), + assertion.WithResourceName("test-pod"), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + ).Exists() + }, + }, + { + Name: "IsReady_Labels", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return pods.NewPodAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithRequireT(t), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "pods_test"}), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(readyPodPath), + ), + ).Exists().IsReady() + }, + }, + { + Name: "IsReady_Name", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return pods.NewPodAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithRequireT(t), + assertion.WithResourceName("test-pod"), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(readyPodPath), + ), + ).Exists().IsReady() + }, + }, + } + + testhelpers.TestFailingAsserts(t, testEnv, asserts...) +} diff --git a/internal/pods/3pod_test.go b/internal/pods/3pod_test.go new file mode 100644 index 0000000..ba0981c --- /dev/null +++ b/internal/pods/3pod_test.go @@ -0,0 +1,150 @@ +package pods_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "sigs.k8s.io/e2e-framework/pkg/features" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" + "github.com/DWSR/kubeassert-go/internal/pods" + "github.com/DWSR/kubeassert-go/internal/testhelpers" +) + +func Test_3Pod_Success(t *testing.T) { + testCases := []struct { + name string + assertion pods.PodAssertion + }{ + { + name: "AtLeastNExist", + assertion: pods.NewPodAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "pods_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deployPath), + ), + ).AtLeastNExist(2), + }, + { + name: "ExactlyNExist", + assertion: pods.NewPodAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "pods_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deployPath), + ), + ).ExactlyNExist(3), + }, + { + name: "AtLeastNAreReady", + assertion: pods.NewPodAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "pods_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(deployPath), + ), + ).AtLeastNExist(2).AtLeastNAreReady(2), + }, + { + name: "ExactlyNAreReady", + assertion: pods.NewPodAssertion( + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "pods_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(deployPath), + ), + ).ExactlyNExist(3).ExactlyNAreReady(3), + }, + } + + features := make([]features.Feature, 0) + + for _, a := range testCases { + features = append(features, a.assertion.AsFeature()) + } + + testEnv.TestInParallel(t, features...) +} + +func Test_3Pod_Fail(t *testing.T) { + testCases := []struct { + name string + failingAssertion func(require.TestingT) pods.PodAssertion + }{ + { + name: "AtLeastNExist", + failingAssertion: func(t require.TestingT) pods.PodAssertion { + return pods.NewPodAssertion( + assertion.WithRequireT(t), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "pods_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deployPath), + ), + ).AtLeastNExist(4) + }, + }, + { + name: "ExactlyNExist", + failingAssertion: func(t require.TestingT) pods.PodAssertion { + return pods.NewPodAssertion( + assertion.WithRequireT(t), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "pods_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deployPath), + ), + ).ExactlyNExist(2) + }, + }, + { + name: "AtLeastNAreReady", + failingAssertion: func(t require.TestingT) pods.PodAssertion { + return pods.NewPodAssertion( + assertion.WithRequireT(t), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithTimeout(10*time.Second), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "pods_test"}), + + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(configPath), + helpers.CreateResourceFromPathWithNamespaceFromEnv(deployPath), + ), + ).AtLeastNExist(3).AtLeastNAreReady(4) + }, + }, + { + name: "ExactlyNAreReady", + failingAssertion: func(t require.TestingT) pods.PodAssertion { + return pods.NewPodAssertion( + assertion.WithRequireT(t), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "pods_test"}), + assertion.WithSetup( + helpers.CreateResourceFromPathWithNamespaceFromEnv(deployPath), + ), + ).ExactlyNExist(3).ExactlyNAreReady(3) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockT := &testhelpers.MockT{} + testEnv.Test(t, tc.failingAssertion(mockT).AsFeature()) + assert.True(t, mockT.Failed) + }) + } +} diff --git a/internal/pods/pods.go b/internal/pods/pods.go new file mode 100644 index 0000000..2b8b725 --- /dev/null +++ b/internal/pods/pods.go @@ -0,0 +1,182 @@ +package pods + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" +) + +type PodAssertion struct { + assertion.Assertion +} + +func (pa PodAssertion) clone() PodAssertion { + return PodAssertion{ + Assertion: assertion.CloneAssertion(pa.Assertion), + } +} + +func (pa PodAssertion) Exists() PodAssertion { + return pa.ExactlyNExist(1) +} + +func (pa PodAssertion) ExactlyNExist(count int) PodAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, pa.GetRequireT()) + conditionFunc := func(ctx context.Context) (bool, error) { + pods, err := pa.getPods(ctx, t, cfg) + require.NoError(t, err) + + return len(pods.Items) == count, nil + } + + require.NoError(t, pa.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := pa.clone() + res.SetBuilder(res.GetBuilder().Assess("exists", fn)) + + return res +} + +func (pa PodAssertion) AtLeastNExist(count int) PodAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, pa.GetRequireT()) + conditionFunc := func(ctx context.Context) (bool, error) { + pods, err := pa.getPods(ctx, t, cfg) + require.NoError(t, err) + + return len(pods.Items) >= count, nil + } + + require.NoError(t, pa.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := pa.clone() + res.SetBuilder(res.GetBuilder().Assess("exists", fn)) + + return res +} + +// return default value instead of a nil pointer so that negative assertions (i.e. testing for false positives) can use +// a mock require.TestingT object. +func (pa PodAssertion) getPods(ctx context.Context, t require.TestingT, cfg *envconf.Config) (corev1.PodList, error) { + client := helpers.DynamicClientFromEnvconf(t, cfg) + + var pods corev1.PodList + + list, err := client. + Resource(corev1.SchemeGroupVersion.WithResource("pods")). + List(ctx, pa.ListOptions(cfg)) + if err != nil { + return pods, err + } + + err = runtime.DefaultUnstructuredConverter.FromUnstructured(list.UnstructuredContent(), &pods) + if err != nil { + return pods, err + } + + return pods, nil +} + +func (pa PodAssertion) IsReady() PodAssertion { + return pa.ExactlyNAreReady(1) +} + +func (pa PodAssertion) ExactlyNAreReady(count int) PodAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, pa.GetRequireT()) + + conditionFunc := func(ctx context.Context) (bool, error) { + pods, err := pa.getPods(ctx, t, cfg) + require.NoError(t, err) + + if len(pods.Items) < count { + return false, nil + } + + readyCount := 0 + + for _, pod := range pods.Items { + for _, cond := range pod.Status.Conditions { + if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue { + readyCount += 1 + break + } + } + } + + return readyCount == count, nil + } + + require.NoError(t, pa.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := pa.clone() + res.SetBuilder(pa.GetBuilder().Assess("isReady", fn)) + + return res +} + +func (pa PodAssertion) AtLeastNAreReady(count int) PodAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, pa.GetRequireT()) + + conditionFunc := func(ctx context.Context) (bool, error) { + pods, err := pa.getPods(ctx, t, cfg) + require.NoError(t, err) + + if len(pods.Items) < count { + return false, nil + } + + readyCount := 0 + + for _, pod := range pods.Items { + for _, cond := range pod.Status.Conditions { + if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue { + readyCount += 1 + break + } + } + } + + return readyCount >= count, nil + } + + require.NoError(t, pa.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := pa.clone() + res.SetBuilder(pa.GetBuilder().Assess("isReady", fn)) + + return res +} + +func NewPodAssertion(opts ...assertion.AssertionOption) PodAssertion { + return PodAssertion{ + Assertion: assertion.NewAssertion( + append( + []assertion.AssertionOption{assertion.WithBuilder(features.New("Pod").WithLabel("type", "pod"))}, + opts..., + )..., + ), + } +} diff --git a/internal/pods/pods_test.go b/internal/pods/pods_test.go new file mode 100644 index 0000000..f8010aa --- /dev/null +++ b/internal/pods/pods_test.go @@ -0,0 +1,40 @@ +package pods_test + +import ( + "log/slog" + "os" + "testing" + + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/envfuncs" + "sigs.k8s.io/e2e-framework/support/kind" + + "github.com/DWSR/kubeassert-go/internal/testhelpers" +) + +const ( + readyPodPath = "./testdata/ready-pod.yaml" + configPath = "./testdata/config.yaml" + deployPath = "./testdata/deployment.yaml" +) + +var testEnv env.Environment + +func TestMain(m *testing.M) { + kindClusterName := envconf.RandomName("kind", 16) + + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelInfo}))) + + testEnv = env.New(). + Setup( + envfuncs.CreateCluster(kind.NewProvider(), kindClusterName), + ). + BeforeEachFeature(testhelpers.CreateRandomNamespaceBeforeEachFeature()). + AfterEachFeature(testhelpers.DeleteRandomNamespaceAfterEachFeature()). + Finish( + envfuncs.DestroyCluster(kindClusterName), + ) + + os.Exit(testEnv.Run(m)) +} diff --git a/internal/pods/testdata/config.yaml b/internal/pods/testdata/config.yaml new file mode 100644 index 0000000..a388742 --- /dev/null +++ b/internal/pods/testdata/config.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-config +data: + default.conf: | + server { + listen 80; + server_name localhost; + + location / { + root /usr/share/nginx/html; + index index.html index.htm; + } + + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/share/nginx/html; + } + + location = /healthz { + add_header 'Content-Type' 'application/json'; + return 200 '{"status": "UP"}'; + } + + location = /readyz { + add_header 'Content-Type' 'application/json'; + return 200 '{"ready": true}'; + } + } diff --git a/internal/pods/testdata/deployment.yaml b/internal/pods/testdata/deployment.yaml new file mode 100644 index 0000000..70ec3ca --- /dev/null +++ b/internal/pods/testdata/deployment.yaml @@ -0,0 +1,49 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-deploy +spec: + replicas: 3 + selector: + matchLabels: + app.kubernetes.io/name: pods_test + app.kubernetes.io/component: deployment + template: + metadata: + labels: + app.kubernetes.io/name: pods_test + app.kubernetes.io/component: deployment + spec: + containers: + - name: test + image: docker.io/library/nginx:1.27.4-alpine-slim@sha256:b05aceb5ec1844435cae920267ff9949887df5b88f70e11d8b2871651a596612 + resources: + requests: + cpu: 10m + memory: 16Mi + limits: + cpu: 100m + memory: 16Mi + livenessProbe: + httpGet: + path: /healthz + port: 80 + initialDelaySeconds: 2 + periodSeconds: 5 + readinessProbe: + httpGet: + path: /readyz + port: 80 + initialDelaySeconds: 3 + periodSeconds: 5 + volumeMounts: + - name: test-config + mountPath: /etc/nginx/conf.d/default.conf + subPath: default.conf + volumes: + - name: test-config + configMap: + name: test-config + items: + - key: default.conf + path: default.conf diff --git a/internal/pods/testdata/ready-pod.yaml b/internal/pods/testdata/ready-pod.yaml new file mode 100644 index 0000000..3a49b5d --- /dev/null +++ b/internal/pods/testdata/ready-pod.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-pod + labels: + app.kubernetes.io/name: pods_test +spec: + containers: + - name: test + image: docker.io/library/nginx:1.27.4-alpine-slim@sha256:b05aceb5ec1844435cae920267ff9949887df5b88f70e11d8b2871651a596612 + resources: + requests: + cpu: 10m + memory: 16Mi + limits: + cpu: 100m + memory: 16Mi + livenessProbe: + httpGet: + path: /healthz + port: 80 + initialDelaySeconds: 2 + periodSeconds: 1 + readinessProbe: + httpGet: + path: /readyz + port: 80 + initialDelaySeconds: 2 + periodSeconds: 1 + volumeMounts: + - name: test-config + mountPath: /etc/nginx/conf.d/default.conf + subPath: default.conf + volumes: + - name: test-config + configMap: + name: test-config + items: + - key: default.conf + path: default.conf diff --git a/internal/secrets/1secret_test.go b/internal/secrets/1secret_test.go new file mode 100644 index 0000000..ee1fe90 --- /dev/null +++ b/internal/secrets/1secret_test.go @@ -0,0 +1,117 @@ +package secrets_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" + "github.com/DWSR/kubeassert-go/internal/secrets" + "github.com/DWSR/kubeassert-go/internal/testhelpers" +) + +func Test_1Secret_Success(t *testing.T) { + asserts := []testhelpers.SuccessfulAssert{ + { + Name: "Exists_Labels", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return secrets.NewSecretAssertion( + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "secrets_test"}), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithSetup(helpers.CreateResourceFromPathWithNamespaceFromEnv(secretPath)), + ).Exists() + }, + }, + { + Name: "Exists_Name", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return secrets.NewSecretAssertion( + assertion.WithResourceName("test-secret"), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithSetup(helpers.CreateResourceFromPathWithNamespaceFromEnv(secretPath)), + ).Exists() + }, + }, + { + Name: "HasContent_Labels", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return secrets.NewSecretAssertion( + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "secrets_test"}), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithSetup(helpers.CreateResourceFromPathWithNamespaceFromEnv(secretPath)), + ).Exists().HasContent(map[string]string{"foo": "bar"}) + }, + }, + { + Name: "HasContent_Name", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + return secrets.NewSecretAssertion( + assertion.WithResourceName("test-secret"), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithSetup(helpers.CreateResourceFromPathWithNamespaceFromEnv(secretPath)), + ).Exists().HasContent(map[string]string{"foo": "bar", "bar": "baz"}) + }, + }, + } + + testhelpers.TestSuccessfulAsserts(t, testEnv, asserts...) +} + +func Test_1Secret_Fail(t *testing.T) { + asserts := []testhelpers.FailingAssert{ + { + Name: "Exists_Labels", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return secrets.NewSecretAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "secrets_test"}), + assertion.WithResourceNamespaceFromTestEnv(), + ).Exists() + }, + }, + { + Name: "Exists_Name", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return secrets.NewSecretAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceName("test-secret"), + assertion.WithResourceNamespaceFromTestEnv(), + ).Exists() + }, + }, + { + Name: "HasContent_Labels", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return secrets.NewSecretAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": "secrets_test"}), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithSetup(helpers.CreateResourceFromPathWithNamespaceFromEnv(secretPath)), + ).Exists().HasContent(map[string]string{"foo": "bar", "baz": "qux"}) + }, + }, + { + Name: "HasContent_Name", + FailingAssert: func(t require.TestingT) assertion.Assertion { + return secrets.NewSecretAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceName("test-secret"), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithSetup(helpers.CreateResourceFromPathWithNamespaceFromEnv(secretPath)), + ).Exists().HasContent(map[string]string{"baz": "qux"}) + }, + }, + } + + testhelpers.TestFailingAsserts(t, testEnv, asserts...) +} diff --git a/internal/secrets/3secret_test.go b/internal/secrets/3secret_test.go new file mode 100644 index 0000000..2d3b5dc --- /dev/null +++ b/internal/secrets/3secret_test.go @@ -0,0 +1,167 @@ +package secrets_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "sigs.k8s.io/e2e-framework/klient/decoder" + "sigs.k8s.io/e2e-framework/pkg/envconf" + e2etypes "sigs.k8s.io/e2e-framework/pkg/types" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" + "github.com/DWSR/kubeassert-go/internal/secrets" + "github.com/DWSR/kubeassert-go/internal/testhelpers" +) + +func Test_3Secret_Success(t *testing.T) { + asserts := []testhelpers.SuccessfulAssert{ + { + Name: "ExactlyNExist", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + secretNames := generateSecretNames() + + return secrets.NewSecretAssertion( + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": secretNames[0]}), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithSetup(createSecrets(secretNames)...), + ).ExactlyNExist(3) + }, + }, + { + Name: "ExactlyNHaveContent", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + secretNames := generateSecretNames() + + return secrets.NewSecretAssertion( + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": secretNames[0]}), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithSetup(createSecrets(secretNames)...), + ).ExactlyNExist(3).ExactlyNHaveContent(3, map[string]string{"foo": "bar"}) + }, + }, + { + Name: "AtLeastNExist", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + secretNames := generateSecretNames() + + return secrets.NewSecretAssertion( + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": secretNames[0]}), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithSetup(createSecrets(secretNames)...), + ).AtLeastNExist(2) + }, + }, + { + Name: "AtLeastNHaveContent", + SuccessfulAssert: func(_ require.TestingT) assertion.Assertion { + secretNames := generateSecretNames() + + return secrets.NewSecretAssertion( + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": secretNames[0]}), + assertion.WithResourceNamespaceFromTestEnv(), + assertion.WithSetup(createSecrets(secretNames)...), + ).AtLeastNExist(2).AtLeastNHaveContent(2, map[string]string{"foo": "bar"}) + }, + }, + } + + testhelpers.TestSuccessfulAsserts(t, testEnv, asserts...) +} + +func Test_3Secret_Fail(t *testing.T) { + asserts := []testhelpers.FailingAssert{ + { + Name: "ExactlyNExist", + FailingAssert: func(t require.TestingT) assertion.Assertion { + secretNames := generateSecretNames() + + return secrets.NewSecretAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": secretNames[0]}), + assertion.WithSetup(createSecrets(secretNames)...), + assertion.WithResourceNamespaceFromTestEnv(), + ).ExactlyNExist(2) + }, + }, + { + Name: "AtLeastNExist", + FailingAssert: func(t require.TestingT) assertion.Assertion { + secretNames := generateSecretNames() + + return secrets.NewSecretAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": secretNames[0]}), + assertion.WithSetup(createSecrets(secretNames)...), + assertion.WithResourceNamespaceFromTestEnv(), + ).AtLeastNExist(4) + }, + }, + { + Name: "ExactlyNHaveContent", + FailingAssert: func(t require.TestingT) assertion.Assertion { + secretNames := generateSecretNames() + + return secrets.NewSecretAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": secretNames[0]}), + assertion.WithSetup(createSecrets(secretNames)...), + assertion.WithResourceNamespaceFromTestEnv(), + ).ExactlyNExist(3).ExactlyNHaveContent(2, map[string]string{"foo": "bar"}) + }, + }, + { + Name: "AtLeastNHaveContent", + FailingAssert: func(t require.TestingT) assertion.Assertion { + secretNames := generateSecretNames() + + return secrets.NewSecretAssertion( + assertion.WithRequireT(t), + assertion.WithTimeout(500*time.Millisecond), + assertion.WithInterval(100*time.Millisecond), + assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": secretNames[0]}), + assertion.WithSetup(createSecrets(secretNames)...), + assertion.WithResourceNamespaceFromTestEnv(), + ).ExactlyNExist(3).ExactlyNHaveContent(4, map[string]string{"foo": "bar"}) + }, + }, + } + + testhelpers.TestFailingAsserts(t, testEnv, asserts...) +} + +func generateSecretNames() []string { + res := make([]string, 3) + + for i := range res { + res[i] = envconf.RandomName("test", 20) + } + + return res +} + +func createSecrets(secretNames []string) []e2etypes.StepFunc { + if len(secretNames) == 0 { + panic("must supply secret names") + } + + res := make([]e2etypes.StepFunc, len(secretNames)) + labelValue := secretNames[0] + + for i, nsName := range secretNames { + res[i] = helpers.CreateResourceFromPathWithNamespaceFromEnv( + secretPath, + testhelpers.MutateResourceName(nsName), + decoder.MutateLabels(map[string]string{"app.kubernetes.io/name": labelValue}), + ) + } + + return res +} diff --git a/internal/secrets/secrets.go b/internal/secrets/secrets.go new file mode 100644 index 0000000..749981a --- /dev/null +++ b/internal/secrets/secrets.go @@ -0,0 +1,194 @@ +package secrets + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" +) + +type SecretAssertion struct { + assertion.Assertion +} + +func (sa SecretAssertion) clone() SecretAssertion { + return SecretAssertion{ + Assertion: assertion.CloneAssertion(sa.Assertion), + } +} + +func (sa SecretAssertion) Exists() SecretAssertion { + return sa.ExactlyNExist(1) +} + +func (sa SecretAssertion) ExactlyNExist(count int) SecretAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, sa.GetRequireT()) + conditionFunc := func(ctx context.Context) (bool, error) { + secrets, err := sa.getSecrets(ctx, t, cfg) + require.NoError(t, err) + + return len(secrets.Items) == count, nil + } + + require.NoError(t, sa.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := sa.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNExist", fn)) + + return res +} + +func (sa SecretAssertion) AtLeastNExist(count int) SecretAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, sa.GetRequireT()) + conditionFunc := func(ctx context.Context) (bool, error) { + secrets, err := sa.getSecrets(ctx, t, cfg) + require.NoError(t, err) + + return len(secrets.Items) >= count, nil + } + + require.NoError(t, sa.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := sa.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNExist", fn)) + + return res +} + +func (sa SecretAssertion) getSecrets(ctx context.Context, t require.TestingT, cfg *envconf.Config) (corev1.SecretList, error) { + client := helpers.DynamicClientFromEnvconf(t, cfg) + + var secrets corev1.SecretList + + list, err := client. + Resource(corev1.SchemeGroupVersion.WithResource("secrets")). + List(ctx, sa.ListOptions(cfg)) + if err != nil { + return secrets, err + } + + err = runtime.DefaultUnstructuredConverter.FromUnstructured(list.UnstructuredContent(), &secrets) + if err != nil { + return secrets, err + } + + return secrets, nil +} + +func (sa SecretAssertion) HasContent(content map[string]string) SecretAssertion { + return sa.ExactlyNHaveContent(1, content) +} + +func (sa SecretAssertion) ExactlyNHaveContent(count int, content map[string]string) SecretAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, sa.GetRequireT()) + conditionFunc := func(ctx context.Context) (bool, error) { + secrets, err := sa.getSecrets(ctx, t, cfg) + require.NoError(t, err) + + if len(secrets.Items) < count { + return false, nil + } + + haveContent := 0 + + for _, secret := range secrets.Items { + hasContent := true + + for key, value := range content { + secData, ok := secret.Data[key] + if !ok || string(secData) != value { + hasContent = false + + break + } + } + + if hasContent { + haveContent++ + } + } + + return haveContent == count, nil + } + + require.NoError(t, sa.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := sa.clone() + res.SetBuilder(res.GetBuilder().Assess("hasContent", fn)) + + return res +} + +func (sa SecretAssertion) AtLeastNHaveContent(count int, content map[string]string) SecretAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, sa.GetRequireT()) + conditionFunc := func(ctx context.Context) (bool, error) { + secrets, err := sa.getSecrets(ctx, t, cfg) + require.NoError(t, err) + + if len(secrets.Items) < count { + return false, nil + } + + haveContent := 0 + + for _, secret := range secrets.Items { + hasContent := true + + for key, value := range content { + secData, ok := secret.Data[key] + if !ok || string(secData) != value { + hasContent = false + + break + } + } + + if hasContent { + haveContent++ + } + } + + return haveContent >= count, nil + } + + require.NoError(t, sa.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + + res := sa.clone() + res.SetBuilder(res.GetBuilder().Assess("hasContent", fn)) + + return res +} + +func NewSecretAssertion(opts ...assertion.AssertionOption) SecretAssertion { + return SecretAssertion{ + Assertion: assertion.NewAssertion( + append( + []assertion.AssertionOption{assertion.WithBuilder(features.New("Secret").WithLabel("type", "secret"))}, + opts..., + )..., + ), + } +} diff --git a/internal/secrets/secrets_test.go b/internal/secrets/secrets_test.go new file mode 100644 index 0000000..7b43810 --- /dev/null +++ b/internal/secrets/secrets_test.go @@ -0,0 +1,36 @@ +package secrets_test + +import ( + "log/slog" + "os" + "testing" + + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/envfuncs" + "sigs.k8s.io/e2e-framework/support/kind" + + "github.com/DWSR/kubeassert-go/internal/testhelpers" +) + +const secretPath = "./testdata/secret.yaml" + +var testEnv env.Environment + +func TestMain(m *testing.M) { + kindClusterName := envconf.RandomName("kind", 16) + + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelInfo}))) + + testEnv = env.New(). + Setup( + envfuncs.CreateCluster(kind.NewProvider(), kindClusterName), + ). + BeforeEachFeature(testhelpers.CreateRandomNamespaceBeforeEachFeature()). + AfterEachFeature(testhelpers.DeleteRandomNamespaceAfterEachFeature()). + Finish( + envfuncs.DestroyCluster(kindClusterName), + ) + + os.Exit(testEnv.Run(m)) +} diff --git a/internal/secrets/testdata/secret.yaml b/internal/secrets/testdata/secret.yaml new file mode 100644 index 0000000..da9eb97 --- /dev/null +++ b/internal/secrets/testdata/secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: test-secret + labels: + app.kubernetes.io/name: secrets_test +stringData: + foo: bar + bar: baz diff --git a/internal/testhelpers/test_asserts.go b/internal/testhelpers/test_asserts.go new file mode 100644 index 0000000..ba82e92 --- /dev/null +++ b/internal/testhelpers/test_asserts.go @@ -0,0 +1,48 @@ +package testhelpers + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/features" + + "github.com/DWSR/kubeassert-go/internal/assertion" +) + +type ( + SuccessfulAssert struct { + Name string + SuccessfulAssert func(t require.TestingT) assertion.Assertion + } + + FailingAssert struct { + Name string + FailingAssert func(t require.TestingT) assertion.Assertion + } +) + +func TestSuccessfulAsserts(t *testing.T, testEnv env.Environment, asserts ...SuccessfulAssert) { + t.Helper() + + assertFeatures := make([]features.Feature, 0) + + for _, a := range asserts { + assertFeatures = append(assertFeatures, a.SuccessfulAssert(t).AsFeature()) + } + + testEnv.Test(t, assertFeatures...) +} + +func TestFailingAsserts(t *testing.T, testEnv env.Environment, asserts ...FailingAssert) { + t.Helper() + + for _, tc := range asserts { + t.Run(tc.Name, func(t *testing.T) { + mockT := &MockT{} + testEnv.Test(t, tc.FailingAssert(mockT).AsFeature()) + assert.True(t, mockT.Failed) + }) + } +} diff --git a/internal/testhelpers/testhelpers.go b/internal/testhelpers/testhelpers.go new file mode 100644 index 0000000..4d53b6f --- /dev/null +++ b/internal/testhelpers/testhelpers.go @@ -0,0 +1,93 @@ +package testhelpers + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/e2e-framework/klient/decoder" + "sigs.k8s.io/e2e-framework/klient/k8s" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/envfuncs" + e2etypes "sigs.k8s.io/e2e-framework/pkg/types" +) + +type ( + MockT struct { + Failed bool + } +) + +func (t *MockT) Errorf(_ string, _ ...interface{}) {} + +func (t *MockT) FailNow() { + t.Failed = true +} + +func CreateNamespaceBeforeEachFeature(namespaceName string) e2etypes.FeatureEnvFunc { + return func(ctx context.Context, cfg *envconf.Config, _ *testing.T, _ e2etypes.Feature) (context.Context, error) { + namespace := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespaceName}} + + client, err := cfg.NewClient() + if err != nil { + return ctx, err + } + + if err := client.Resources().Create(ctx, &namespace); err != nil { + return ctx, err + } + + cfg = cfg.WithNamespace(namespaceName) + + return context.WithValue(ctx, envfuncs.NamespaceContextKey(namespaceName), namespace), nil + } +} + +func DeleteNamespaceBeforeEachFeature(namespaceName string) e2etypes.FeatureEnvFunc { + return func(ctx context.Context, cfg *envconf.Config, _ *testing.T, _ e2etypes.Feature) (context.Context, error) { + var ns corev1.Namespace + + nsFromCtx := ctx.Value(envfuncs.NamespaceContextKey(namespaceName)) + if nsFromCtx == nil { + ns = corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespaceName}} + } else { + ns = nsFromCtx.(corev1.Namespace) + } + + client, err := cfg.NewClient() + if err != nil { + return ctx, err + } + + if err := client.Resources().Delete(ctx, &ns); err != nil { + return ctx, err + } + + cfg.WithNamespace("") + + return ctx, nil + } +} + +func CreateRandomNamespaceBeforeEachFeature() e2etypes.FeatureEnvFunc { + return func(ctx context.Context, cfg *envconf.Config, t *testing.T, feat e2etypes.Feature) (context.Context, error) { + return CreateNamespaceBeforeEachFeature(envconf.RandomName("test", 20))(ctx, cfg, t, feat) + } +} + +func DeleteRandomNamespaceAfterEachFeature() e2etypes.FeatureEnvFunc { + return func(ctx context.Context, cfg *envconf.Config, t *testing.T, feat e2etypes.Feature) (context.Context, error) { + nsName := cfg.Namespace() + + return DeleteNamespaceBeforeEachFeature(nsName)(ctx, cfg, t, feat) + } +} + +func MutateResourceName(resourceName string) decoder.DecodeOption { + return decoder.MutateOption(func(obj k8s.Object) error { + obj.SetName(resourceName) + + return nil + }) +} diff --git a/kubeassert.go b/kubeassert.go index ba561e4..891353a 100644 --- a/kubeassert.go +++ b/kubeassert.go @@ -3,312 +3,15 @@ package kubeassert import ( "context" "testing" - "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" - extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/dynamic" - "sigs.k8s.io/e2e-framework/klient/wait" - "sigs.k8s.io/e2e-framework/klient/wait/conditions" "sigs.k8s.io/e2e-framework/pkg/envconf" "sigs.k8s.io/e2e-framework/pkg/features" e2etypes "sigs.k8s.io/e2e-framework/pkg/types" ) -func NamespaceIsRestricted(namespaceName string) e2etypes.Feature { - return features.New("NamespaceIsRestricted"). - WithLabel("type", "namespace"). - AssessWithDescription( - "restrictedNamespace", - "Namespace should be restricted", - func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - var ns corev1.Namespace - err := cfg.Client().Resources().Get(ctx, namespaceName, "", &ns) - require.NoError(t, err) - - nsLabels := ns.GetLabels() - - assert.Contains(t, nsLabels, "pod-security.kubernetes.io/enforce") - assert.Equal(t, "restricted", nsLabels["pod-security.kubernetes.io/enforce"]) - assert.Contains(t, nsLabels, "pod-security.kubernetes.io/audit") - assert.Equal(t, "restricted", nsLabels["pod-security.kubernetes.io/audit"]) - - return ctx - }). - Feature() -} - -func NamespaceExists(namespaceName string) e2etypes.Feature { - return features.New("NamespaceExists"). - WithLabel("type", "namespace"). - AssessWithDescription( - "namespaceExists", - "Namespace should exist", - func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - ns := &corev1.NamespaceList{ - Items: []corev1.Namespace{{ObjectMeta: metav1.ObjectMeta{Name: namespaceName}}}, - } - - err := wait.For( - conditions.New(cfg.Client().Resources()).ResourcesFound(ns), - wait.WithTimeout(3*time.Second), - wait.WithImmediate(), - ) - require.NoError(t, err) - - return ctx - }). - Feature() -} - -func DeploymentExists(namespaceName, deploymentName string) e2etypes.Feature { - return features.New("DeploymentExists"). - WithLabel("type", "deployment"). - AssessWithDescription( - "deploymentExists", - "Deployment should exist", - func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - var dep appsv1.Deployment - - err := cfg.Client(). - Resources("deployments"). - WithNamespace(namespaceName). - Get(ctx, deploymentName, namespaceName, &dep) - require.NoError(t, err) - - return ctx - }). - Feature() -} - -func DeploymentAvailable(namespaceName, deploymentName string) e2etypes.Feature { - return features.New("DeploymentAvailable"). - WithLabel("type", "deployment"). - AssessWithDescription( - "deploymentAvailable", - "Deployment should be available", - func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - err := wait.For( - conditions.New(cfg.Client().Resources()).DeploymentAvailable(deploymentName, namespaceName), - wait.WithTimeout(2*time.Minute), - wait.WithImmediate(), - ) - require.NoError(t, err) - - return ctx - }). - Feature() -} - -func SecretExists(namespaceName, secretName string) e2etypes.Feature { - return features.New("SecretExists"). - WithLabel("type", "secret"). - AssessWithDescription( - "secretExists", - "Secret should exist", - func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - var sec corev1.Secret - - err := cfg.Client(). - Resources("secrets"). - WithNamespace(namespaceName). - Get(ctx, secretName, namespaceName, &sec) - require.NoError(t, err) - - return ctx - }). - Feature() -} - -func SecretHasContent(namespaceName, secretName string, content map[string]string) e2etypes.Feature { - return features.New("SecretHasContent"). - WithLabel("type", "secret"). - AssessWithDescription( - "secretHasContent", - "Secret should have content", - func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - var sec corev1.Secret - - err := cfg.Client(). - Resources("secrets"). - WithNamespace(namespaceName). - Get(ctx, secretName, namespaceName, &sec) - require.NoError(t, err) - - for k, v := range content { - secData, exists := sec.Data[k] - - require.True(t, exists) - assert.Equal(t, v, string(secData)) - } - - return ctx - }). - Feature() -} - -func DeploymentIsSystemClusterCritical(namespaceName, deploymentName string) e2etypes.Feature { - return features.New("DeploymentIsSystemClusterCritical"). - WithLabel("type", "deployment"). - AssessWithDescription( - "deploymentIsSystemClusterCritical", - "Deployment should be system-cluster-critical priority", - func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - var deploy appsv1.Deployment - - err := cfg.Client(). - Resources("deployments"). - WithNamespace(namespaceName). - Get(ctx, deploymentName, namespaceName, &deploy) - require.NoError(t, err) - - assert.Equal(t, "system-cluster-critical", deploy.Spec.Template.Spec.PriorityClassName) - - return ctx - }). - Feature() -} - -func DeploymentHasNoCPULimits(namespaceName, deploymentName string) e2etypes.Feature { - return features.New("DeploymentHasNoCPULimits"). - WithLabel("type", "deployment"). - AssessWithDescription( - "deploymentHasNoCPULimits", - "Deployment should have no CPU limits", - func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - var deploy appsv1.Deployment - - err := cfg.Client(). - Resources("deployments"). - WithNamespace(namespaceName). - Get(ctx, deploymentName, namespaceName, &deploy) - require.NoError(t, err) - - for _, container := range deploy.Spec.Template.Spec.Containers { - assert.True(t, container.Resources.Limits.Cpu().IsZero()) - } - - return ctx - }). - Feature() -} - -func DeploymentHasMemoryLimitsEqualToRequests(namespaceName, deploymentName string) e2etypes.Feature { - return features.New("DeploymentHasMemoryLimitsEqualToRequests"). - WithLabel("type", "deployment"). - AssessWithDescription( - "deploymentHasMemoryLimitsEqualToRequests", - "Deployment should have memory limits equal to requests", - func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - var deploy appsv1.Deployment - - err := cfg.Client(). - Resources("deployments"). - WithNamespace(namespaceName). - Get(ctx, deploymentName, namespaceName, &deploy) - require.NoError(t, err) - - for _, container := range deploy.Spec.Template.Spec.Containers { - assert.NotNil(t, container.Resources.Limits.Memory()) - assert.Equal(t, container.Resources.Requests.Memory(), container.Resources.Limits.Memory()) - } - - return ctx - }). - Feature() -} - -func DeploymentHasMemoryRequests(namespaceName, deploymentName string) e2etypes.Feature { - return features.New("DeploymentHasMemoryRequests"). - WithLabel("type", "deployment"). - AssessWithDescription( - "deploymentHasMemoryRequests", - "Deployment should have memory requests", - func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - var deploy appsv1.Deployment - - err := cfg.Client(). - Resources("deployments"). - WithNamespace(namespaceName). - Get(ctx, deploymentName, namespaceName, &deploy) - require.NoError(t, err) - - for _, container := range deploy.Spec.Template.Spec.Containers { - assert.False(t, container.Resources.Requests.Memory().IsZero()) - } - - return ctx - }). - Feature() -} - -func DeploymentHasCPURequests(namespaceName, deploymentName string) e2etypes.Feature { - return features.New("DeploymentHasCPURequests"). - WithLabel("type", "deployment"). - AssessWithDescription( - "deploymentHasCPURequests", - "Deployment should have CPU requests", - func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - var deploy appsv1.Deployment - - err := cfg.Client(). - Resources("deployments"). - WithNamespace(namespaceName). - Get(ctx, deploymentName, namespaceName, &deploy) - require.NoError(t, err) - - for _, container := range deploy.Spec.Template.Spec.Containers { - assert.False(t, container.Resources.Requests.Cpu().IsZero()) - } - - return ctx - }). - Feature() -} - -func CRDExists(crdName, crdVersion string) e2etypes.Feature { - return features.New("CRDExists"). - WithLabel("type", "crd"). - AssessWithDescription( - "crdExists", - "CRD should exist", - func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - var crd extv1.CustomResourceDefinition - - klient, err := cfg.NewClient() - require.NoError(t, err) - - client, err := dynamic.NewForConfig(klient.RESTConfig()) - require.NoError(t, err) - - unstructuredCRD, err := client. - Resource(extv1.SchemeGroupVersion.WithResource("customresourcedefinitions")). - Get(ctx, crdName, metav1.GetOptions{}) - require.NoError(t, err) - - err = runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredCRD.UnstructuredContent(), &crd) - require.NoError(t, err) - - foundVersion := false - for _, v := range crd.Spec.Versions { - if crdVersion == v.Name { - foundVersion = true - } - } - - assert.True(t, foundVersion) - - return ctx - }). - Feature() -} - func PodDisruptionBudgetExists(namespaceName, pdbName string) e2etypes.Feature { return features.New("PodDisruptionBudgetExists"). WithLabel("type", "pdb"). From 8d8b36803c7c9b13a55f57c651b1b7301e89555a Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Sat, 8 Mar 2025 21:44:04 -0500 Subject: [PATCH 02/29] use matrix for tests --- .github/workflows/pull_request.yml | 2 +- .github/workflows/push-main.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 93dc6c1..74d1875 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -32,7 +32,7 @@ jobs: tests: name: Tests - uses: DWSR/actions/.github/workflows/go-test-single.yml@main + uses: DWSR/actions/.github/workflows/go-test-matrix.yml@main autoapprove-dependabot: name: Auto-approve Dependabot PRs diff --git a/.github/workflows/push-main.yml b/.github/workflows/push-main.yml index 8f51e5c..74a51a8 100644 --- a/.github/workflows/push-main.yml +++ b/.github/workflows/push-main.yml @@ -32,4 +32,4 @@ jobs: tests: name: Tests - uses: DWSR/actions/.github/workflows/go-test-single.yml@main + uses: DWSR/actions/.github/workflows/go-test-matrix.yml@main From ae054035753ea11456082f83221c30bb4f592f97 Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Sun, 9 Mar 2025 15:37:39 -0400 Subject: [PATCH 03/29] wip --- assertions.go | 45 -------------------- internal/pdbs/pdbs.go | 65 ++++++++++++++++++++++++++++ kubeassert.go | 98 +++++++++++++++++-------------------------- 3 files changed, 104 insertions(+), 104 deletions(-) delete mode 100644 assertions.go create mode 100644 internal/pdbs/pdbs.go diff --git a/assertions.go b/assertions.go deleted file mode 100644 index 4a9ec2d..0000000 --- a/assertions.go +++ /dev/null @@ -1,45 +0,0 @@ -package kubeassert - -import ( - "github.com/DWSR/kubeassert-go/internal/assertion" - "github.com/DWSR/kubeassert-go/internal/assertionhelpers" - "github.com/DWSR/kubeassert-go/internal/crds" - "github.com/DWSR/kubeassert-go/internal/deployments" - "github.com/DWSR/kubeassert-go/internal/namespaces" - "github.com/DWSR/kubeassert-go/internal/pods" - "github.com/DWSR/kubeassert-go/internal/secrets" -) - -type ( - Assertion = assertion.Assertion - DeploymentAssertion = deployments.DeploymentAssertion - NamespaceAssertion = namespaces.NamespaceAssertion - CRDAssertion = crds.CRDAssertion - PodAssertion = pods.PodAssertion -) - -var ( - WithLabels = assertion.WithResourceLabels - WithFields = assertion.WithResourceFields - WithInterval = assertion.WithInterval - WithTimeout = assertion.WithTimeout - WithBuilder = assertion.WithBuilder - WithRequireT = assertion.WithRequireT - WithNamespace = assertion.WithResourceNamespace - WithNamespaceFromEnv = assertion.WithResourceNamespaceFromTestEnv - WithResourceName = assertion.WithResourceName - WithSetup = assertion.WithSetup - WithTeardown = assertion.WithTeardown - - NewDeploymentAssertion = deployments.NewDeploymentAssertion - NewNamespaceAssertion = namespaces.NewNamespaceAssertion - NewCRDAssertion = crds.NewCRDAssertion - NewPodAssertion = pods.NewPodAssertion - NewSecretAssertion = secrets.NewSecretAssertion - - ApplyKustomization = assertionhelpers.ApplyKustomization - CreateResourceFromPath = assertionhelpers.CreateResourceFromPath - DeleteResourceFromPath = assertionhelpers.DeleteResourceFromPath - Sleep = assertionhelpers.Sleep - TestAssertions = assertionhelpers.TestAssertions -) diff --git a/internal/pdbs/pdbs.go b/internal/pdbs/pdbs.go new file mode 100644 index 0000000..e78e815 --- /dev/null +++ b/internal/pdbs/pdbs.go @@ -0,0 +1,65 @@ +package pdbs + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + policyv1 "k8s.io/api/policy/v1" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" + e2etypes "sigs.k8s.io/e2e-framework/pkg/types" +) + +func PodDisruptionBudgetExists(namespaceName, pdbName string) e2etypes.Feature { + return features.New("PodDisruptionBudgetExists"). + WithLabel("type", "pdb"). + AssessWithDescription( + "pdbExists", + "PDB should exist", + func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + var pdb policyv1.PodDisruptionBudget + + err := cfg.Client(). + Resources("poddisruptionbudgets"). + WithNamespace(namespaceName). + Get(ctx, pdbName, namespaceName, &pdb) + require.NoError(t, err) + + return ctx + }). + Feature() +} + +func PodDisruptionBudgetTargetsDeployment(namespaceName, pdbName, deployName string) e2etypes.Feature { + return features.New("PodDisruptionBudgetTargetsDeployment"). + WithLabel("type", "pdb"). + AssessWithDescription( + "pdbTargetsDeployment", + "PDB should target deployment", + func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + var pdb policyv1.PodDisruptionBudget + var deploy appsv1.Deployment + + err := cfg.Client(). + Resources("poddisruptionbudgets"). + WithNamespace(namespaceName). + Get(ctx, pdbName, namespaceName, &pdb) + require.NoError(t, err) + + err = cfg.Client(). + Resources("deployments"). + WithNamespace(namespaceName). + Get(ctx, deployName, namespaceName, &deploy) + require.NoError(t, err) + + for labelKey, labelValue := range pdb.Spec.Selector.MatchLabels { + require.Equal(t, deploy.Spec.Selector.MatchLabels, labelKey) + require.Equal(t, deploy.Spec.Selector.MatchLabels[labelKey], labelValue) + } + + return ctx + }). + Feature() +} diff --git a/kubeassert.go b/kubeassert.go index 891353a..4a9ec2d 100644 --- a/kubeassert.go +++ b/kubeassert.go @@ -1,65 +1,45 @@ package kubeassert import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" - policyv1 "k8s.io/api/policy/v1" - "sigs.k8s.io/e2e-framework/pkg/envconf" - "sigs.k8s.io/e2e-framework/pkg/features" - e2etypes "sigs.k8s.io/e2e-framework/pkg/types" + "github.com/DWSR/kubeassert-go/internal/assertion" + "github.com/DWSR/kubeassert-go/internal/assertionhelpers" + "github.com/DWSR/kubeassert-go/internal/crds" + "github.com/DWSR/kubeassert-go/internal/deployments" + "github.com/DWSR/kubeassert-go/internal/namespaces" + "github.com/DWSR/kubeassert-go/internal/pods" + "github.com/DWSR/kubeassert-go/internal/secrets" ) -func PodDisruptionBudgetExists(namespaceName, pdbName string) e2etypes.Feature { - return features.New("PodDisruptionBudgetExists"). - WithLabel("type", "pdb"). - AssessWithDescription( - "pdbExists", - "PDB should exist", - func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - var pdb policyv1.PodDisruptionBudget - - err := cfg.Client(). - Resources("poddisruptionbudgets"). - WithNamespace(namespaceName). - Get(ctx, pdbName, namespaceName, &pdb) - require.NoError(t, err) - - return ctx - }). - Feature() -} - -func PodDisruptionBudgetTargetsDeployment(namespaceName, pdbName, deployName string) e2etypes.Feature { - return features.New("PodDisruptionBudgetTargetsDeployment"). - WithLabel("type", "pdb"). - AssessWithDescription( - "pdbTargetsDeployment", - "PDB should target deployment", - func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - var pdb policyv1.PodDisruptionBudget - var deploy appsv1.Deployment - - err := cfg.Client(). - Resources("poddisruptionbudgets"). - WithNamespace(namespaceName). - Get(ctx, pdbName, namespaceName, &pdb) - require.NoError(t, err) - - err = cfg.Client(). - Resources("deployments"). - WithNamespace(namespaceName). - Get(ctx, deployName, namespaceName, &deploy) - require.NoError(t, err) - - for labelKey, labelValue := range pdb.Spec.Selector.MatchLabels { - require.Equal(t, deploy.Spec.Selector.MatchLabels, labelKey) - require.Equal(t, deploy.Spec.Selector.MatchLabels[labelKey], labelValue) - } +type ( + Assertion = assertion.Assertion + DeploymentAssertion = deployments.DeploymentAssertion + NamespaceAssertion = namespaces.NamespaceAssertion + CRDAssertion = crds.CRDAssertion + PodAssertion = pods.PodAssertion +) - return ctx - }). - Feature() -} +var ( + WithLabels = assertion.WithResourceLabels + WithFields = assertion.WithResourceFields + WithInterval = assertion.WithInterval + WithTimeout = assertion.WithTimeout + WithBuilder = assertion.WithBuilder + WithRequireT = assertion.WithRequireT + WithNamespace = assertion.WithResourceNamespace + WithNamespaceFromEnv = assertion.WithResourceNamespaceFromTestEnv + WithResourceName = assertion.WithResourceName + WithSetup = assertion.WithSetup + WithTeardown = assertion.WithTeardown + + NewDeploymentAssertion = deployments.NewDeploymentAssertion + NewNamespaceAssertion = namespaces.NewNamespaceAssertion + NewCRDAssertion = crds.NewCRDAssertion + NewPodAssertion = pods.NewPodAssertion + NewSecretAssertion = secrets.NewSecretAssertion + + ApplyKustomization = assertionhelpers.ApplyKustomization + CreateResourceFromPath = assertionhelpers.CreateResourceFromPath + DeleteResourceFromPath = assertionhelpers.DeleteResourceFromPath + Sleep = assertionhelpers.Sleep + TestAssertions = assertionhelpers.TestAssertions +) From 65ce3c697a533ee23ed5dcdde7bfa7003bb39e77 Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Sun, 9 Mar 2025 15:50:42 -0400 Subject: [PATCH 04/29] wip --- kubeassert.go | 1 - 1 file changed, 1 deletion(-) diff --git a/kubeassert.go b/kubeassert.go index 4a9ec2d..d330af1 100644 --- a/kubeassert.go +++ b/kubeassert.go @@ -11,7 +11,6 @@ import ( ) type ( - Assertion = assertion.Assertion DeploymentAssertion = deployments.DeploymentAssertion NamespaceAssertion = namespaces.NamespaceAssertion CRDAssertion = crds.CRDAssertion From 1ba923ee10fb27df7540609034d92014c8e23fc2 Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Sun, 9 Mar 2025 15:55:03 -0400 Subject: [PATCH 05/29] wip --- internal/pdbs/pdbs.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/internal/pdbs/pdbs.go b/internal/pdbs/pdbs.go index e78e815..0ea76d3 100644 --- a/internal/pdbs/pdbs.go +++ b/internal/pdbs/pdbs.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/DWSR/kubeassert-go/internal/assertion" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" policyv1 "k8s.io/api/policy/v1" @@ -12,6 +13,16 @@ import ( e2etypes "sigs.k8s.io/e2e-framework/pkg/types" ) +type PDBAssertion struct { + assertion.Assertion +} + +func (pa PDBAssertion) clone() PDBAssertion { + return PDBAssertion{ + Assertion: assertion.CloneAssertion(pa.Assertion), + } +} + func PodDisruptionBudgetExists(namespaceName, pdbName string) e2etypes.Feature { return features.New("PodDisruptionBudgetExists"). WithLabel("type", "pdb"). From e8f1eaceb0e9f55977aca8a82b9eebbb256f40f5 Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Sun, 9 Mar 2025 15:59:31 -0400 Subject: [PATCH 06/29] wip --- internal/pdbs/pdbs.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/internal/pdbs/pdbs.go b/internal/pdbs/pdbs.go index 0ea76d3..d1722dc 100644 --- a/internal/pdbs/pdbs.go +++ b/internal/pdbs/pdbs.go @@ -74,3 +74,14 @@ func PodDisruptionBudgetTargetsDeployment(namespaceName, pdbName, deployName str }). Feature() } + +func NewPDBAssertion(opts ...assertion.AssertionOption) PDBAssertion { + return PDBAssertion{ + Assertion: assertion.NewAssertion( + append( + []assertion.AssertionOption{assertion.WithBuilder(features.New("CRD").WithLabel("type", "poddisruptionbudget"))}, + opts..., + )..., + ), + } +} From 9d09425dcce19cf5e9c1b16e06750ce41957088d Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Sun, 9 Mar 2025 16:20:49 -0400 Subject: [PATCH 07/29] wip --- internal/pdbs/pdbs.go | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/internal/pdbs/pdbs.go b/internal/pdbs/pdbs.go index d1722dc..ae64796 100644 --- a/internal/pdbs/pdbs.go +++ b/internal/pdbs/pdbs.go @@ -4,13 +4,16 @@ import ( "context" "testing" - "github.com/DWSR/kubeassert-go/internal/assertion" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" policyv1 "k8s.io/api/policy/v1" + "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/e2e-framework/pkg/envconf" "sigs.k8s.io/e2e-framework/pkg/features" e2etypes "sigs.k8s.io/e2e-framework/pkg/types" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" ) type PDBAssertion struct { @@ -23,6 +26,29 @@ func (pa PDBAssertion) clone() PDBAssertion { } } +func (pa PDBAssertion) getPDBs( + ctx context.Context, + t require.TestingT, + cfg *envconf.Config, +) (policyv1.PodDisruptionBudgetList, error) { + client := helpers.DynamicClientFromEnvconf(t, cfg) + + var pdbList policyv1.PodDisruptionBudgetList + + list, err := client.Resource(policyv1.SchemeGroupVersion.WithResource("poddisruptionbudgets")). + List(ctx, pa.ListOptions(cfg)) + if err != nil { + return pdbList, err + } + + err = runtime.DefaultUnstructuredConverter.FromUnstructured(list.UnstructuredContent(), &pdbList) + if err != nil { + return pdbList, err + } + + return pdbList, nil +} + func PodDisruptionBudgetExists(namespaceName, pdbName string) e2etypes.Feature { return features.New("PodDisruptionBudgetExists"). WithLabel("type", "pdb"). From eb817f27ba0686386d9c3e9464a20df915ba4d35 Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Sun, 9 Mar 2025 16:25:31 -0400 Subject: [PATCH 08/29] wip --- internal/pdbs/pdbs.go | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/internal/pdbs/pdbs.go b/internal/pdbs/pdbs.go index ae64796..1bc8686 100644 --- a/internal/pdbs/pdbs.go +++ b/internal/pdbs/pdbs.go @@ -49,6 +49,27 @@ func (pa PDBAssertion) getPDBs( return pdbList, nil } +func (pa PDBAssertion) ExactlyNExist(count int) PDBAssertion { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, pa.GetRequireT()) + + conditionFunc := func(ctx context.Context) (bool, error) { + pdbs, err := pa.getPDBs(ctx, t, cfg) + require.NoError(t, err) + + return len(pdbs.Items) == count, nil + } + + require.NoError(t, pa.WaitForCondition(ctx, conditionFunc)) + + return ctx + } + res := pa.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNExist", stepFn)) + + return res +} + func PodDisruptionBudgetExists(namespaceName, pdbName string) e2etypes.Feature { return features.New("PodDisruptionBudgetExists"). WithLabel("type", "pdb"). From 9d5513692578a896538fdbe40f92f51c2de540c0 Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Sun, 9 Mar 2025 16:31:11 -0400 Subject: [PATCH 09/29] wip --- internal/pdbs/pdbs.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/pdbs/pdbs.go b/internal/pdbs/pdbs.go index 1bc8686..a0d20b8 100644 --- a/internal/pdbs/pdbs.go +++ b/internal/pdbs/pdbs.go @@ -49,6 +49,10 @@ func (pa PDBAssertion) getPDBs( return pdbList, nil } +func (pa PDBAssertion) Exists() PDBAssertion { + return pa.ExactlyNExist(1) +} + func (pa PDBAssertion) ExactlyNExist(count int) PDBAssertion { stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { t := helpers.RequireTIfNotNil(testingT, pa.GetRequireT()) From b15e1051cac22aad9ab31e2d298b60de88679e07 Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Sun, 9 Mar 2025 16:36:36 -0400 Subject: [PATCH 10/29] wip --- internal/deployments/3deployment_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/deployments/3deployment_test.go b/internal/deployments/3deployment_test.go index 8daf49e..7437a9a 100644 --- a/internal/deployments/3deployment_test.go +++ b/internal/deployments/3deployment_test.go @@ -293,7 +293,7 @@ func Test_3Deployments_Fail(t *testing.T) { return deployments.NewDeploymentAssertion( assertion.WithRequireT(t), - assertion.WithTimeout(15*time.Second), + assertion.WithTimeout(20*time.Second), assertion.WithResourceNamespaceFromTestEnv(), assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), assertion.WithSetup( @@ -310,7 +310,7 @@ func Test_3Deployments_Fail(t *testing.T) { return deployments.NewDeploymentAssertion( assertion.WithRequireT(t), - assertion.WithTimeout(15*time.Second), + assertion.WithTimeout(20*time.Second), assertion.WithResourceNamespaceFromTestEnv(), assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), assertion.WithSetup(createGoodDeploys(deployNames)...), From 6294409bbc7328ef573102ebf413eb1350692bef Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Sun, 9 Mar 2025 19:50:07 -0400 Subject: [PATCH 11/29] wip --- internal/deployments/3deployment_test.go | 6 +++--- internal/deployments/deployments.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/deployments/3deployment_test.go b/internal/deployments/3deployment_test.go index 7437a9a..034d7ef 100644 --- a/internal/deployments/3deployment_test.go +++ b/internal/deployments/3deployment_test.go @@ -343,7 +343,7 @@ func Test_3Deployments_Fail(t *testing.T) { assertion.WithInterval(100*time.Millisecond), assertion.WithResourceNamespaceFromTestEnv(), assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), - assertion.WithSetup(createGoodDeploys(deployNames)...), + assertion.WithSetup(createBadDeploys(deployNames)...), ).ExactlyNExist(3).AtLeastNAreSystemClusterCritical(4) }, }, @@ -358,8 +358,8 @@ func Test_3Deployments_Fail(t *testing.T) { assertion.WithInterval(100*time.Millisecond), assertion.WithResourceNamespaceFromTestEnv(), assertion.WithResourceLabels(map[string]string{"app.kubernetes.io/name": deployNames[0]}), - assertion.WithSetup(createGoodDeploys(deployNames)...), - ).ExactlyNExist(3).ExactlyNHaveNoCPULimits(2) + assertion.WithSetup(createBadDeploys(deployNames)...), + ).ExactlyNExist(3).ExactlyNHaveNoCPULimits(3) }, }, { diff --git a/internal/deployments/deployments.go b/internal/deployments/deployments.go index 8906bb1..1391158 100644 --- a/internal/deployments/deployments.go +++ b/internal/deployments/deployments.go @@ -104,7 +104,7 @@ func (da DeploymentAssertion) ExactlyNAreAvailable(count int) DeploymentAssertio deploys, err := da.getDeployments(ctx, t, cfg) require.NoError(t, err) - if len(deploys.Items) < count { + if len(deploys.Items) != count { return false, nil } From d0210c362a500556a882271f267fb2b4e783eafb Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Sun, 9 Mar 2025 20:11:37 -0400 Subject: [PATCH 12/29] wip --- internal/pdbs/pdbs.go | 62 ++----------------------------------------- 1 file changed, 2 insertions(+), 60 deletions(-) diff --git a/internal/pdbs/pdbs.go b/internal/pdbs/pdbs.go index a0d20b8..b361d49 100644 --- a/internal/pdbs/pdbs.go +++ b/internal/pdbs/pdbs.go @@ -5,12 +5,10 @@ import ( "testing" "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" policyv1 "k8s.io/api/policy/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/e2e-framework/pkg/envconf" "sigs.k8s.io/e2e-framework/pkg/features" - e2etypes "sigs.k8s.io/e2e-framework/pkg/types" "github.com/DWSR/kubeassert-go/internal/assertion" helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" @@ -50,10 +48,6 @@ func (pa PDBAssertion) getPDBs( } func (pa PDBAssertion) Exists() PDBAssertion { - return pa.ExactlyNExist(1) -} - -func (pa PDBAssertion) ExactlyNExist(count int) PDBAssertion { stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { t := helpers.RequireTIfNotNil(testingT, pa.GetRequireT()) @@ -61,7 +55,7 @@ func (pa PDBAssertion) ExactlyNExist(count int) PDBAssertion { pdbs, err := pa.getPDBs(ctx, t, cfg) require.NoError(t, err) - return len(pdbs.Items) == count, nil + return len(pdbs.Items) == 1, nil } require.NoError(t, pa.WaitForCondition(ctx, conditionFunc)) @@ -69,63 +63,11 @@ func (pa PDBAssertion) ExactlyNExist(count int) PDBAssertion { return ctx } res := pa.clone() - res.SetBuilder(res.GetBuilder().Assess("exactlyNExist", stepFn)) + res.SetBuilder(res.GetBuilder().Assess("exists", stepFn)) return res } -func PodDisruptionBudgetExists(namespaceName, pdbName string) e2etypes.Feature { - return features.New("PodDisruptionBudgetExists"). - WithLabel("type", "pdb"). - AssessWithDescription( - "pdbExists", - "PDB should exist", - func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - var pdb policyv1.PodDisruptionBudget - - err := cfg.Client(). - Resources("poddisruptionbudgets"). - WithNamespace(namespaceName). - Get(ctx, pdbName, namespaceName, &pdb) - require.NoError(t, err) - - return ctx - }). - Feature() -} - -func PodDisruptionBudgetTargetsDeployment(namespaceName, pdbName, deployName string) e2etypes.Feature { - return features.New("PodDisruptionBudgetTargetsDeployment"). - WithLabel("type", "pdb"). - AssessWithDescription( - "pdbTargetsDeployment", - "PDB should target deployment", - func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - var pdb policyv1.PodDisruptionBudget - var deploy appsv1.Deployment - - err := cfg.Client(). - Resources("poddisruptionbudgets"). - WithNamespace(namespaceName). - Get(ctx, pdbName, namespaceName, &pdb) - require.NoError(t, err) - - err = cfg.Client(). - Resources("deployments"). - WithNamespace(namespaceName). - Get(ctx, deployName, namespaceName, &deploy) - require.NoError(t, err) - - for labelKey, labelValue := range pdb.Spec.Selector.MatchLabels { - require.Equal(t, deploy.Spec.Selector.MatchLabels, labelKey) - require.Equal(t, deploy.Spec.Selector.MatchLabels[labelKey], labelValue) - } - - return ctx - }). - Feature() -} - func NewPDBAssertion(opts ...assertion.AssertionOption) PDBAssertion { return PDBAssertion{ Assertion: assertion.NewAssertion( From c1c69c9ff45cf8d308fe33f6eee423eecbda1487 Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Sun, 9 Mar 2025 20:47:56 -0400 Subject: [PATCH 13/29] wip --- internal/assertion/assertion.go | 56 +++++++++++++------ internal/assertion/assertion_options.go | 25 +++++---- internal/assertion/common_assertion.go | 28 +++------- internal/assertion/list_options.go | 2 +- internal/assertionhelpers/assertionhelpers.go | 22 +++++++- internal/crds/crds.go | 10 ++-- internal/deployments/deployments.go | 38 ++++++------- internal/namespaces/namespaces.go | 14 ++--- internal/pdbs/pdbs.go | 8 +-- internal/pods/3pod_test.go | 4 +- internal/pods/pods.go | 14 ++--- internal/secrets/secrets.go | 14 ++--- 12 files changed, 131 insertions(+), 104 deletions(-) diff --git a/internal/assertion/assertion.go b/internal/assertion/assertion.go index 85089b5..b30a6dc 100644 --- a/internal/assertion/assertion.go +++ b/internal/assertion/assertion.go @@ -1,54 +1,74 @@ +// The assertion package provides common functionality used to define assertions against a set of one or more +// Kubernetes resources. package assertion import ( - "context" "time" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - apimachinerywait "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/e2e-framework/pkg/envconf" "sigs.k8s.io/e2e-framework/pkg/features" ) type ( + // Assertion is an interface for a generic assertion (har) about the state of one or more resources in a + // Kubernetes cluster. It is embedded into other assertion types to provide common functionality. Assertion interface { - setLabels(assertLabels map[string]string) + optionSetters - GetLabels() map[string]string + clone() Assertion - setFields(assertFields map[string]string) + // GetLabels returns the labels (i.e. metadata.labels) used to select resources for the assertion. + GetLabels() map[string]string + // GetFields returns the fields used to select resources for the assertion. GetFields() map[string]string - setListOptionsFn(fn listOptionsFunc) - + // GetListOptions returns the ListOptions used to when listing resources from the API server. ListOptions(cfg *envconf.Config) metav1.ListOptions - setInterval(interval time.Duration) - + // GetInterval returns the interval used when polling for the assertion to be true. GetInterval() time.Duration - setTimeout(timeout time.Duration) - + // GetTimeout returns the timeout used when polling for the assertion to be true. GetTimeout() time.Duration + // GetBuilder returns the features.FeatureBuilder used to build the e2e-framework Feature. GetBuilder() *features.FeatureBuilder + // SetBuilder sets the features.FeatureBuilder used to build the e2e-framework Feature. SetBuilder(builder *features.FeatureBuilder) + // GetRequireT returns the require.TestingT used to report test failures. This is largely intended for testing + // as it enables detection of failing require/assert statements. GetRequireT() require.TestingT + } + // private methods to enable the use of assertion options without accidentally exposing the functionality outside of + // the package. + optionSetters interface { + setLabels(assertLabels map[string]string) + setFields(assertFields map[string]string) + setListOptionsFn(fn listOptionsFunc) + setInterval(interval time.Duration) + setTimeout(timeout time.Duration) setRequireT(t require.TestingT) - - WaitForCondition(ctx context.Context, conditionFunc apimachinerywait.ConditionWithContextFunc) error - - clone() Assertion - - AsFeature() features.Feature } ) -func CloneAssertion(a Assertion) Assertion { +// Clone clones an assertion. This is done this way instead of providing an exported Clone method in order +// to avoid having the Clone method exported on all assertion types. +// +//nolint:ireturn +func Clone(a Assertion) Assertion { return a.clone() } + +// AsFeature returns an e2e-framework Feature based on the supplied Assertion. This can be used to integrate Assertions +// into existing e2e-framework tests. +// +//nolint:ireturn +func AsFeature(assert Assertion) features.Feature { + return assert.GetBuilder().Feature() +} diff --git a/internal/assertion/assertion_options.go b/internal/assertion/assertion_options.go index 40b6dbd..2dc5e76 100644 --- a/internal/assertion/assertion_options.go +++ b/internal/assertion/assertion_options.go @@ -8,45 +8,46 @@ import ( e2etypes "sigs.k8s.io/e2e-framework/pkg/types" ) -type AssertionOption func(Assertion) +// Option is a function that configures one or more facets of an Assertion. +type Option func(Assertion) -func WithResourceLabels(labels map[string]string) AssertionOption { +func WithResourceLabels(labels map[string]string) Option { return func(a Assertion) { a.setLabels(labels) } } -func WithResourceFields(fields map[string]string) AssertionOption { +func WithResourceFields(fields map[string]string) Option { return func(a Assertion) { a.setFields(fields) } } -func WithInterval(interval time.Duration) AssertionOption { +func WithInterval(interval time.Duration) Option { return func(a Assertion) { a.setInterval(interval) } } -func WithTimeout(timeout time.Duration) AssertionOption { +func WithTimeout(timeout time.Duration) Option { return func(a Assertion) { a.setTimeout(timeout) } } -func WithBuilder(builder *features.FeatureBuilder) AssertionOption { +func WithBuilder(builder *features.FeatureBuilder) Option { return func(a Assertion) { a.SetBuilder(builder) } } -func WithRequireT(requireT require.TestingT) AssertionOption { +func WithRequireT(requireT require.TestingT) Option { return func(a Assertion) { a.setRequireT(requireT) } } -func WithResourceNamespace(namespaceName string) AssertionOption { +func WithResourceNamespace(namespaceName string) Option { return func(a Assertion) { newFields := a.GetFields() newFields["metadata.namespace"] = namespaceName @@ -54,13 +55,13 @@ func WithResourceNamespace(namespaceName string) AssertionOption { } } -func WithResourceNamespaceFromTestEnv() AssertionOption { +func WithResourceNamespaceFromTestEnv() Option { return func(a Assertion) { a.setListOptionsFn(listOptionsWithNamespaceFromEnv) } } -func WithResourceName(name string) AssertionOption { +func WithResourceName(name string) Option { return func(a Assertion) { newFields := a.GetFields() newFields["metadata.name"] = name @@ -68,7 +69,7 @@ func WithResourceName(name string) AssertionOption { } } -func WithSetup(steps ...e2etypes.StepFunc) AssertionOption { +func WithSetup(steps ...e2etypes.StepFunc) Option { return func(a Assertion) { builder := a.GetBuilder() for _, s := range steps { @@ -78,7 +79,7 @@ func WithSetup(steps ...e2etypes.StepFunc) AssertionOption { } } -func WithTeardown(steps ...e2etypes.StepFunc) AssertionOption { +func WithTeardown(steps ...e2etypes.StepFunc) Option { return func(a Assertion) { builder := a.GetBuilder() for _, s := range steps { diff --git a/internal/assertion/common_assertion.go b/internal/assertion/common_assertion.go index c68c255..68961ac 100644 --- a/internal/assertion/common_assertion.go +++ b/internal/assertion/common_assertion.go @@ -1,14 +1,11 @@ package assertion import ( - "context" "time" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - apimachinerywait "k8s.io/apimachinery/pkg/util/wait" - "sigs.k8s.io/e2e-framework/klient/wait" "sigs.k8s.io/e2e-framework/pkg/envconf" "sigs.k8s.io/e2e-framework/pkg/features" ) @@ -75,14 +72,11 @@ func (ca *commonAssertion) setRequireT(requireT require.TestingT) { ca.requireT = requireT } +//nolint:ireturn func (ca *commonAssertion) GetRequireT() require.TestingT { return ca.requireT } -func (ca *commonAssertion) AsFeature() features.Feature { - return ca.builder.Feature() -} - func (ca *commonAssertion) setListOptionsFn(fn listOptionsFunc) { ca.listOptionsFn = fn } @@ -91,6 +85,7 @@ func (ca *commonAssertion) ListOptions(cfg *envconf.Config) metav1.ListOptions { return ca.listOptionsFn(ca, cfg) } +//nolint:ireturn func (ca *commonAssertion) clone() Assertion { return &commonAssertion{ builder: ca.builder, @@ -104,21 +99,14 @@ func (ca *commonAssertion) clone() Assertion { } } -func (ca *commonAssertion) WaitForCondition(ctx context.Context, conditionFunc apimachinerywait.ConditionWithContextFunc) error { - return wait.For( - conditionFunc, - wait.WithContext(ctx), - wait.WithTimeout(ca.timeout), - wait.WithInterval(ca.interval), - wait.WithImmediate(), - ) -} - -func NewAssertion(opts ...AssertionOption) Assertion { +// NewAssertion creates a new Assertion with the provided options. +// +//nolint:ireturn +func NewAssertion(opts ...Option) Assertion { assertion := commonAssertion{ builder: features.New("default"), - assertFields: map[string]string{}, - assertLabels: map[string]string{}, + assertFields: make(map[string]string), + assertLabels: make(map[string]string), timeout: defaultTimeout, interval: defaultInterval, requireT: nil, diff --git a/internal/assertion/list_options.go b/internal/assertion/list_options.go index 71e38c5..cc45828 100644 --- a/internal/assertion/list_options.go +++ b/internal/assertion/list_options.go @@ -9,7 +9,7 @@ import ( type listOptionsFunc func(*commonAssertion, *envconf.Config) metav1.ListOptions -func defaultListOptions(ca *commonAssertion, cfg *envconf.Config) metav1.ListOptions { +func defaultListOptions(ca *commonAssertion, _ *envconf.Config) metav1.ListOptions { return metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(labels.Set(ca.assertLabels)).String(), FieldSelector: fields.SelectorFromSet(fields.Set(ca.assertFields)).String(), diff --git a/internal/assertionhelpers/assertionhelpers.go b/internal/assertionhelpers/assertionhelpers.go index eeee655..3f81b45 100644 --- a/internal/assertionhelpers/assertionhelpers.go +++ b/internal/assertionhelpers/assertionhelpers.go @@ -13,12 +13,14 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + apimachinerywait "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/client-go/restmapper" "sigs.k8s.io/e2e-framework/klient/decoder" "sigs.k8s.io/e2e-framework/klient/k8s/resources" + "sigs.k8s.io/e2e-framework/klient/wait" "sigs.k8s.io/e2e-framework/pkg/env" "sigs.k8s.io/e2e-framework/pkg/envconf" e2etypes "sigs.k8s.io/e2e-framework/pkg/types" @@ -108,8 +110,8 @@ func RequireTIfNotNil(testingT *testing.T, requireT require.TestingT) require.Te func TestAssertions(t *testing.T, testEnv env.Environment, assertions ...assertion.Assertion) { tests := make([]e2etypes.Feature, 0, len(assertions)) - for _, assertion := range assertions { - tests = append(tests, assertion.AsFeature()) + for _, assert := range assertions { + tests = append(tests, assertion.AsFeature(assert)) } testEnv.Test(t, tests...) @@ -209,3 +211,19 @@ func ApplyKustomization(kustDir string) env.Func { return ctx, nil } } + +// WaitForCondition waits for a conditionFunc to be satisfied (i.e. return true) based on the timeout and interval set +// on the Assertion. +func WaitForCondition( + ctx context.Context, + assert assertion.Assertion, + conditionFunc apimachinerywait.ConditionWithContextFunc, +) error { + return wait.For( + conditionFunc, + wait.WithContext(ctx), + wait.WithTimeout(assert.GetTimeout()), + wait.WithInterval(assert.GetInterval()), + wait.WithImmediate(), + ) +} diff --git a/internal/crds/crds.go b/internal/crds/crds.go index c496ae3..dca376b 100644 --- a/internal/crds/crds.go +++ b/internal/crds/crds.go @@ -20,7 +20,7 @@ type CRDAssertion struct { func (ca CRDAssertion) clone() CRDAssertion { return CRDAssertion{ - Assertion: assertion.CloneAssertion(ca.Assertion), + Assertion: assertion.Clone(ca.Assertion), } } @@ -34,7 +34,7 @@ func (ca CRDAssertion) Exists() CRDAssertion { return len(pods.Items) == 1, nil } - require.NoError(t, ca.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, ca, conditionFunc)) return ctx } @@ -88,7 +88,7 @@ func (ca CRDAssertion) HasVersion(crdVersion string) CRDAssertion { return foundVersion, nil } - require.NoError(t, ca.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, ca, conditionFunc)) return ctx } @@ -99,11 +99,11 @@ func (ca CRDAssertion) HasVersion(crdVersion string) CRDAssertion { return res } -func NewCRDAssertion(opts ...assertion.AssertionOption) CRDAssertion { +func NewCRDAssertion(opts ...assertion.Option) CRDAssertion { return CRDAssertion{ Assertion: assertion.NewAssertion( append( - []assertion.AssertionOption{assertion.WithBuilder(features.New("CRD").WithLabel("type", "customresourcedefinition"))}, + []assertion.Option{assertion.WithBuilder(features.New("CRD").WithLabel("type", "customresourcedefinition"))}, opts..., )..., ), diff --git a/internal/deployments/deployments.go b/internal/deployments/deployments.go index 1391158..6c9a566 100644 --- a/internal/deployments/deployments.go +++ b/internal/deployments/deployments.go @@ -22,7 +22,7 @@ type DeploymentAssertion struct { func (da DeploymentAssertion) clone() DeploymentAssertion { return DeploymentAssertion{ - Assertion: assertion.CloneAssertion(da.Assertion), + Assertion: assertion.Clone(da.Assertion), } } @@ -40,7 +40,7 @@ func (da DeploymentAssertion) ExactlyNExist(count int) DeploymentAssertion { return len(deploys.Items) == count, nil } - require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) return ctx } @@ -61,7 +61,7 @@ func (da DeploymentAssertion) AtLeastNExist(count int) DeploymentAssertion { return len(deploys.Items) >= count, nil } - require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) return ctx } @@ -121,7 +121,7 @@ func (da DeploymentAssertion) ExactlyNAreAvailable(count int) DeploymentAssertio return availableCount == count, nil } - require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) return ctx } @@ -157,7 +157,7 @@ func (da DeploymentAssertion) AtLeastNAreAvailable(count int) DeploymentAssertio return availableCount >= count, nil } - require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) return ctx } @@ -195,7 +195,7 @@ func (da DeploymentAssertion) ExactlyNAreSystemClusterCritical(count int) Deploy return systemClusterCriticalCount == count, nil } - require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) return ctx } @@ -229,7 +229,7 @@ func (da DeploymentAssertion) AtLeastNAreSystemClusterCritical(count int) Deploy return systemClusterCriticalCount >= count, nil } - require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) return ctx } @@ -277,7 +277,7 @@ func (da DeploymentAssertion) ExactlyNHaveNoCPULimits(count int) DeploymentAsser return hasNoCPULimits == count, nil } - require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) return ctx } @@ -321,7 +321,7 @@ func (da DeploymentAssertion) AtLeastNHaveNoCPULimits(count int) DeploymentAsser return hasNoCPULimits >= count, nil } - require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) return ctx } @@ -372,7 +372,7 @@ func (da DeploymentAssertion) ExactlyNHaveMemoryLimitsEqualToRequests(count int) return hasMemoryLimitsEqualToRequests == count, nil } - require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) return ctx } @@ -419,7 +419,7 @@ func (da DeploymentAssertion) AtLeastNHaveMemoryLimitsEqualToRequests(count int) return hasMemoryLimitsEqualToRequests >= count, nil } - require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) return ctx } @@ -467,7 +467,7 @@ func (da DeploymentAssertion) ExactlyNHaveMemoryLimits(count int) DeploymentAsse return hasMemoryLimits == count, nil } - require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) return ctx } @@ -511,7 +511,7 @@ func (da DeploymentAssertion) AtLeastNHaveMemoryLimits(count int) DeploymentAsse return hasMemoryLimits >= count, nil } - require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) return ctx } @@ -559,7 +559,7 @@ func (da DeploymentAssertion) ExactlyNHaveMemoryRequests(count int) DeploymentAs return hasMemoryRequests == count, nil } - require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) return ctx } @@ -603,7 +603,7 @@ func (da DeploymentAssertion) AtLeastNHaveMemoryRequests(count int) DeploymentAs return hasMemoryRequests >= count, nil } - require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) return ctx } @@ -651,7 +651,7 @@ func (da DeploymentAssertion) ExactlyNHaveCPURequests(count int) DeploymentAsser return hasCPURequests == count, nil } - require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) return ctx } @@ -695,7 +695,7 @@ func (da DeploymentAssertion) AtLeastNHaveCPURequests(count int) DeploymentAsser return hasCPURequests >= count, nil } - require.NoError(t, da.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) return ctx } @@ -706,11 +706,11 @@ func (da DeploymentAssertion) AtLeastNHaveCPURequests(count int) DeploymentAsser return res } -func NewDeploymentAssertion(opts ...assertion.AssertionOption) DeploymentAssertion { +func NewDeploymentAssertion(opts ...assertion.Option) DeploymentAssertion { return DeploymentAssertion{ Assertion: assertion.NewAssertion( append( - []assertion.AssertionOption{assertion.WithBuilder(features.New("Deployment").WithLabel("type", "deployment"))}, + []assertion.Option{assertion.WithBuilder(features.New("Deployment").WithLabel("type", "deployment"))}, opts..., )..., ), diff --git a/internal/namespaces/namespaces.go b/internal/namespaces/namespaces.go index e23dd46..b8d3d33 100644 --- a/internal/namespaces/namespaces.go +++ b/internal/namespaces/namespaces.go @@ -24,7 +24,7 @@ const ( func (na NamespaceAssertion) clone() NamespaceAssertion { return NamespaceAssertion{ - Assertion: assertion.CloneAssertion(na.Assertion), + Assertion: assertion.Clone(na.Assertion), } } @@ -60,7 +60,7 @@ func (na NamespaceAssertion) ExactlyNExist(count int) NamespaceAssertion { return len(nsList.Items) == count, nil } - require.NoError(t, na.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, na, conditionFunc)) return ctx } @@ -81,7 +81,7 @@ func (na NamespaceAssertion) AtLeastNExist(count int) NamespaceAssertion { return len(nsList.Items) >= count, nil } - require.NoError(t, na.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, na, conditionFunc)) return ctx } @@ -117,7 +117,7 @@ func (na NamespaceAssertion) AtLeastNAreRestricted(count int) NamespaceAssertion return restrictedCount >= count, nil } - require.NoError(t, na.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, na, conditionFunc)) return ctx } @@ -157,7 +157,7 @@ func (na NamespaceAssertion) ExactlyNAreRestricted(count int) NamespaceAssertion return restrictedCount == count, nil } - require.NoError(t, na.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, na, conditionFunc)) return ctx } @@ -168,11 +168,11 @@ func (na NamespaceAssertion) ExactlyNAreRestricted(count int) NamespaceAssertion return res } -func NewNamespaceAssertion(opts ...assertion.AssertionOption) NamespaceAssertion { +func NewNamespaceAssertion(opts ...assertion.Option) NamespaceAssertion { return NamespaceAssertion{ Assertion: assertion.NewAssertion( append( - []assertion.AssertionOption{assertion.WithBuilder(features.New("Namespace").WithLabel("type", "namespace"))}, + []assertion.Option{assertion.WithBuilder(features.New("Namespace").WithLabel("type", "namespace"))}, opts..., )..., ), diff --git a/internal/pdbs/pdbs.go b/internal/pdbs/pdbs.go index b361d49..93f997d 100644 --- a/internal/pdbs/pdbs.go +++ b/internal/pdbs/pdbs.go @@ -20,7 +20,7 @@ type PDBAssertion struct { func (pa PDBAssertion) clone() PDBAssertion { return PDBAssertion{ - Assertion: assertion.CloneAssertion(pa.Assertion), + Assertion: assertion.Clone(pa.Assertion), } } @@ -58,7 +58,7 @@ func (pa PDBAssertion) Exists() PDBAssertion { return len(pdbs.Items) == 1, nil } - require.NoError(t, pa.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, pa, conditionFunc)) return ctx } @@ -68,11 +68,11 @@ func (pa PDBAssertion) Exists() PDBAssertion { return res } -func NewPDBAssertion(opts ...assertion.AssertionOption) PDBAssertion { +func NewPDBAssertion(opts ...assertion.Option) PDBAssertion { return PDBAssertion{ Assertion: assertion.NewAssertion( append( - []assertion.AssertionOption{assertion.WithBuilder(features.New("CRD").WithLabel("type", "poddisruptionbudget"))}, + []assertion.Option{assertion.WithBuilder(features.New("CRD").WithLabel("type", "poddisruptionbudget"))}, opts..., )..., ), diff --git a/internal/pods/3pod_test.go b/internal/pods/3pod_test.go index ba0981c..1817a54 100644 --- a/internal/pods/3pod_test.go +++ b/internal/pods/3pod_test.go @@ -66,7 +66,7 @@ func Test_3Pod_Success(t *testing.T) { features := make([]features.Feature, 0) for _, a := range testCases { - features = append(features, a.assertion.AsFeature()) + features = append(features, assertion.AsFeature(a.assertion)) } testEnv.TestInParallel(t, features...) @@ -143,7 +143,7 @@ func Test_3Pod_Fail(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { mockT := &testhelpers.MockT{} - testEnv.Test(t, tc.failingAssertion(mockT).AsFeature()) + testEnv.Test(t, assertion.AsFeature(tc.failingAssertion(mockT))) assert.True(t, mockT.Failed) }) } diff --git a/internal/pods/pods.go b/internal/pods/pods.go index 2b8b725..c25886a 100644 --- a/internal/pods/pods.go +++ b/internal/pods/pods.go @@ -20,7 +20,7 @@ type PodAssertion struct { func (pa PodAssertion) clone() PodAssertion { return PodAssertion{ - Assertion: assertion.CloneAssertion(pa.Assertion), + Assertion: assertion.Clone(pa.Assertion), } } @@ -38,7 +38,7 @@ func (pa PodAssertion) ExactlyNExist(count int) PodAssertion { return len(pods.Items) == count, nil } - require.NoError(t, pa.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, pa, conditionFunc)) return ctx } @@ -59,7 +59,7 @@ func (pa PodAssertion) AtLeastNExist(count int) PodAssertion { return len(pods.Items) >= count, nil } - require.NoError(t, pa.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, pa, conditionFunc)) return ctx } @@ -122,7 +122,7 @@ func (pa PodAssertion) ExactlyNAreReady(count int) PodAssertion { return readyCount == count, nil } - require.NoError(t, pa.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, pa, conditionFunc)) return ctx } @@ -159,7 +159,7 @@ func (pa PodAssertion) AtLeastNAreReady(count int) PodAssertion { return readyCount >= count, nil } - require.NoError(t, pa.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, pa, conditionFunc)) return ctx } @@ -170,11 +170,11 @@ func (pa PodAssertion) AtLeastNAreReady(count int) PodAssertion { return res } -func NewPodAssertion(opts ...assertion.AssertionOption) PodAssertion { +func NewPodAssertion(opts ...assertion.Option) PodAssertion { return PodAssertion{ Assertion: assertion.NewAssertion( append( - []assertion.AssertionOption{assertion.WithBuilder(features.New("Pod").WithLabel("type", "pod"))}, + []assertion.Option{assertion.WithBuilder(features.New("Pod").WithLabel("type", "pod"))}, opts..., )..., ), diff --git a/internal/secrets/secrets.go b/internal/secrets/secrets.go index 749981a..6a07c60 100644 --- a/internal/secrets/secrets.go +++ b/internal/secrets/secrets.go @@ -20,7 +20,7 @@ type SecretAssertion struct { func (sa SecretAssertion) clone() SecretAssertion { return SecretAssertion{ - Assertion: assertion.CloneAssertion(sa.Assertion), + Assertion: assertion.Clone(sa.Assertion), } } @@ -38,7 +38,7 @@ func (sa SecretAssertion) ExactlyNExist(count int) SecretAssertion { return len(secrets.Items) == count, nil } - require.NoError(t, sa.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, sa, conditionFunc)) return ctx } @@ -59,7 +59,7 @@ func (sa SecretAssertion) AtLeastNExist(count int) SecretAssertion { return len(secrets.Items) >= count, nil } - require.NoError(t, sa.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, sa, conditionFunc)) return ctx } @@ -127,7 +127,7 @@ func (sa SecretAssertion) ExactlyNHaveContent(count int, content map[string]stri return haveContent == count, nil } - require.NoError(t, sa.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, sa, conditionFunc)) return ctx } @@ -171,7 +171,7 @@ func (sa SecretAssertion) AtLeastNHaveContent(count int, content map[string]stri return haveContent >= count, nil } - require.NoError(t, sa.WaitForCondition(ctx, conditionFunc)) + require.NoError(t, helpers.WaitForCondition(ctx, sa, conditionFunc)) return ctx } @@ -182,11 +182,11 @@ func (sa SecretAssertion) AtLeastNHaveContent(count int, content map[string]stri return res } -func NewSecretAssertion(opts ...assertion.AssertionOption) SecretAssertion { +func NewSecretAssertion(opts ...assertion.Option) SecretAssertion { return SecretAssertion{ Assertion: assertion.NewAssertion( append( - []assertion.AssertionOption{assertion.WithBuilder(features.New("Secret").WithLabel("type", "secret"))}, + []assertion.Option{assertion.WithBuilder(features.New("Secret").WithLabel("type", "secret"))}, opts..., )..., ), From 75cecd51789cec78da5498e94e411de93868ed34 Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Sun, 9 Mar 2025 20:53:49 -0400 Subject: [PATCH 14/29] wip --- internal/assertion/assertion_options.go | 28 +++++++++++++++++++------ internal/testhelpers/test_asserts.go | 10 +++++++-- 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/internal/assertion/assertion_options.go b/internal/assertion/assertion_options.go index 2dc5e76..f567c10 100644 --- a/internal/assertion/assertion_options.go +++ b/internal/assertion/assertion_options.go @@ -11,42 +11,49 @@ import ( // Option is a function that configures one or more facets of an Assertion. type Option func(Assertion) +// WithResourceLabels sets the labels to be used when selecting resources for the assertion. func WithResourceLabels(labels map[string]string) Option { return func(a Assertion) { a.setLabels(labels) } } +// WithResourceFields sets the fields to be used when selecting resources for the assertion. func WithResourceFields(fields map[string]string) Option { return func(a Assertion) { a.setFields(fields) } } +// WithInterval sets the interval used when polling for the assertion to be true. func WithInterval(interval time.Duration) Option { return func(a Assertion) { a.setInterval(interval) } } +// WithTimeout sets the timeout used when polling for the assertion to be true. func WithTimeout(timeout time.Duration) Option { return func(a Assertion) { a.setTimeout(timeout) } } +// WithBuilder sets the features.FeatureBuilder used to build the e2e-framework Feature. func WithBuilder(builder *features.FeatureBuilder) Option { return func(a Assertion) { a.SetBuilder(builder) } } +// WithRequireT sets the require.TestingT used to report test failures. func WithRequireT(requireT require.TestingT) Option { return func(a Assertion) { a.setRequireT(requireT) } } +// WithResourceNamespace sets the namespace to be used when selecting resources for the assertion. func WithResourceNamespace(namespaceName string) Option { return func(a Assertion) { newFields := a.GetFields() @@ -55,12 +62,15 @@ func WithResourceNamespace(namespaceName string) Option { } } +// WithResourceNamespaceFromTestEnv sets the namespace to be used when selecting resources for the assertion to the +// namespace set in the test environment. func WithResourceNamespaceFromTestEnv() Option { return func(a Assertion) { a.setListOptionsFn(listOptionsWithNamespaceFromEnv) } } +// WithResourceName sets the name (i.e. metadata.name) to be used when selecting resources for the assertion. func WithResourceName(name string) Option { return func(a Assertion) { newFields := a.GetFields() @@ -69,22 +79,28 @@ func WithResourceName(name string) Option { } } +// WithSetup adds setup steps to the assertion. func WithSetup(steps ...e2etypes.StepFunc) Option { - return func(a Assertion) { - builder := a.GetBuilder() + return func(assert Assertion) { + builder := assert.GetBuilder() + for _, s := range steps { builder = builder.Setup(s) } - a.SetBuilder(builder) + + assert.SetBuilder(builder) } } +// WithTeardown adds teardown steps to the assertion. func WithTeardown(steps ...e2etypes.StepFunc) Option { - return func(a Assertion) { - builder := a.GetBuilder() + return func(assert Assertion) { + builder := assert.GetBuilder() + for _, s := range steps { builder = builder.Teardown(s) } - a.SetBuilder(builder) + + assert.SetBuilder(builder) } } diff --git a/internal/testhelpers/test_asserts.go b/internal/testhelpers/test_asserts.go index ba82e92..33300ff 100644 --- a/internal/testhelpers/test_asserts.go +++ b/internal/testhelpers/test_asserts.go @@ -12,36 +12,42 @@ import ( ) type ( + // SuccessfulAssert is a struct that contains the name of the test and a function that returns an assertion.Assertion + // that should be true or pass. SuccessfulAssert struct { Name string SuccessfulAssert func(t require.TestingT) assertion.Assertion } + // FailingAssert is a struct that contains the name of the test and a function that returns an assertion.Assertion + // that should be false or fail. FailingAssert struct { Name string FailingAssert func(t require.TestingT) assertion.Assertion } ) +// TestSuccessfulAsserts is a helper function that runs a series of successful asserts. func TestSuccessfulAsserts(t *testing.T, testEnv env.Environment, asserts ...SuccessfulAssert) { t.Helper() assertFeatures := make([]features.Feature, 0) for _, a := range asserts { - assertFeatures = append(assertFeatures, a.SuccessfulAssert(t).AsFeature()) + assertFeatures = append(assertFeatures, assertion.AsFeature(a.SuccessfulAssert(t))) } testEnv.Test(t, assertFeatures...) } +// TestFailingAsserts is a helper function that runs a series of failing asserts. func TestFailingAsserts(t *testing.T, testEnv env.Environment, asserts ...FailingAssert) { t.Helper() for _, tc := range asserts { t.Run(tc.Name, func(t *testing.T) { mockT := &MockT{} - testEnv.Test(t, tc.FailingAssert(mockT).AsFeature()) + testEnv.Test(t, assertion.AsFeature(tc.FailingAssert(mockT))) assert.True(t, mockT.Failed) }) } From 58fde7265d3ee66eb3f8e7f85114dfeda56b8402 Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Sun, 9 Mar 2025 21:02:23 -0400 Subject: [PATCH 15/29] wip --- internal/testhelpers/testhelpers.go | 36 +++++++++++++++++++++++------ 1 file changed, 29 insertions(+), 7 deletions(-) diff --git a/internal/testhelpers/testhelpers.go b/internal/testhelpers/testhelpers.go index 4d53b6f..482330f 100644 --- a/internal/testhelpers/testhelpers.go +++ b/internal/testhelpers/testhelpers.go @@ -1,3 +1,5 @@ +// testhelpers contains helper functions specifically for testing assertion functionality. Any code that could be +// useful to consumers of kubeassert should go in the assertionhelpers package. package testhelpers import ( @@ -14,17 +16,25 @@ import ( ) type ( + // MockT implements the require.TestingT interface and enables the detection of failing assert/require statements. + // This enables testing assertions for expected failures. MockT struct { Failed bool } ) -func (t *MockT) Errorf(_ string, _ ...interface{}) {} +const randomNamespaceNameLength = 20 +// Errorf is a no-op function that satisfies the require.TestingT interface. +func (*MockT) Errorf(_ string, _ ...interface{}) {} + +// FailNow sets the Failed field to true, indicating that a failing assertion was detected. func (t *MockT) FailNow() { t.Failed = true } +// CreateNamespaceBeforeEachFeature is a FeatureEnvFunc that creates a namespace. This helps run +// Features in isolation without requiring that features handle their own setup or teardown. func CreateNamespaceBeforeEachFeature(namespaceName string) e2etypes.FeatureEnvFunc { return func(ctx context.Context, cfg *envconf.Config, _ *testing.T, _ e2etypes.Feature) (context.Context, error) { namespace := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespaceName}} @@ -38,21 +48,29 @@ func CreateNamespaceBeforeEachFeature(namespaceName string) e2etypes.FeatureEnvF return ctx, err } - cfg = cfg.WithNamespace(namespaceName) + _ = cfg.WithNamespace(namespaceName) return context.WithValue(ctx, envfuncs.NamespaceContextKey(namespaceName), namespace), nil } } +// DeleteNamespaceBeforeEachFeature is a FeatureEnvFunc that deletes a namespace. This helps run +// Features in isolation without requiring that features handle their own setup or teardown. func DeleteNamespaceBeforeEachFeature(namespaceName string) e2etypes.FeatureEnvFunc { return func(ctx context.Context, cfg *envconf.Config, _ *testing.T, _ e2etypes.Feature) (context.Context, error) { - var ns corev1.Namespace + var namespace corev1.Namespace nsFromCtx := ctx.Value(envfuncs.NamespaceContextKey(namespaceName)) if nsFromCtx == nil { - ns = corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespaceName}} + namespace = corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespaceName}} } else { - ns = nsFromCtx.(corev1.Namespace) + var ok bool + + namespace, ok = nsFromCtx.(corev1.Namespace) + + if !ok { + panic("namespace is not of type corev1.Namespace") + } } client, err := cfg.NewClient() @@ -60,7 +78,7 @@ func DeleteNamespaceBeforeEachFeature(namespaceName string) e2etypes.FeatureEnvF return ctx, err } - if err := client.Resources().Delete(ctx, &ns); err != nil { + if err := client.Resources().Delete(ctx, &namespace); err != nil { return ctx, err } @@ -70,12 +88,15 @@ func DeleteNamespaceBeforeEachFeature(namespaceName string) e2etypes.FeatureEnvF } } +// CreateRandomNamespaceBeforeEachFeature is a FeatureEnvFunc that creates a namespace with a random name. func CreateRandomNamespaceBeforeEachFeature() e2etypes.FeatureEnvFunc { return func(ctx context.Context, cfg *envconf.Config, t *testing.T, feat e2etypes.Feature) (context.Context, error) { - return CreateNamespaceBeforeEachFeature(envconf.RandomName("test", 20))(ctx, cfg, t, feat) + return CreateNamespaceBeforeEachFeature(envconf.RandomName("test", randomNamespaceNameLength))(ctx, cfg, t, feat) } } +// DeleteRandomNamespaceAfterEachFeature is a FeatureEnvFunc that deletes the namespace set in the test environment +// after each feature. func DeleteRandomNamespaceAfterEachFeature() e2etypes.FeatureEnvFunc { return func(ctx context.Context, cfg *envconf.Config, t *testing.T, feat e2etypes.Feature) (context.Context, error) { nsName := cfg.Namespace() @@ -84,6 +105,7 @@ func DeleteRandomNamespaceAfterEachFeature() e2etypes.FeatureEnvFunc { } } +// MutateResourceName is a DecodeOption that mutates the name of a resource. func MutateResourceName(resourceName string) decoder.DecodeOption { return decoder.MutateOption(func(obj k8s.Object) error { obj.SetName(resourceName) From 8bfd1b01028263d4e24b9e5b2c18d994bf223711 Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Sun, 9 Mar 2025 21:06:52 -0400 Subject: [PATCH 16/29] wip --- internal/secrets/secrets.go | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/internal/secrets/secrets.go b/internal/secrets/secrets.go index 6a07c60..4e229f0 100644 --- a/internal/secrets/secrets.go +++ b/internal/secrets/secrets.go @@ -14,6 +14,8 @@ import ( helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" ) +// SecretAssertion is a wrapper around the assertion.Assertion type and provides a set of assertions for Kubernetes +// Secrets. type SecretAssertion struct { assertion.Assertion } @@ -24,12 +26,14 @@ func (sa SecretAssertion) clone() SecretAssertion { } } +// Exists asserts that exactly one Secret exists in the cluster that matches the provided options. func (sa SecretAssertion) Exists() SecretAssertion { return sa.ExactlyNExist(1) } +// ExactlyNExist asserts that exactly N Secrets exist in the cluster that match the provided options. func (sa SecretAssertion) ExactlyNExist(count int) SecretAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { t := helpers.RequireTIfNotNil(testingT, sa.GetRequireT()) conditionFunc := func(ctx context.Context) (bool, error) { secrets, err := sa.getSecrets(ctx, t, cfg) @@ -44,13 +48,14 @@ func (sa SecretAssertion) ExactlyNExist(count int) SecretAssertion { } res := sa.clone() - res.SetBuilder(res.GetBuilder().Assess("exactlyNExist", fn)) + res.SetBuilder(res.GetBuilder().Assess("exactlyNExist", stepFn)) return res } +// AtLeastNExist asserts that at least N Secrets exist in the cluster that match the provided options. func (sa SecretAssertion) AtLeastNExist(count int) SecretAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { t := helpers.RequireTIfNotNil(testingT, sa.GetRequireT()) conditionFunc := func(ctx context.Context) (bool, error) { secrets, err := sa.getSecrets(ctx, t, cfg) @@ -65,12 +70,16 @@ func (sa SecretAssertion) AtLeastNExist(count int) SecretAssertion { } res := sa.clone() - res.SetBuilder(res.GetBuilder().Assess("atLeastNExist", fn)) + res.SetBuilder(res.GetBuilder().Assess("atLeastNExist", stepFn)) return res } -func (sa SecretAssertion) getSecrets(ctx context.Context, t require.TestingT, cfg *envconf.Config) (corev1.SecretList, error) { +func (sa SecretAssertion) getSecrets( + ctx context.Context, + t require.TestingT, + cfg *envconf.Config, +) (corev1.SecretList, error) { client := helpers.DynamicClientFromEnvconf(t, cfg) var secrets corev1.SecretList @@ -90,18 +99,22 @@ func (sa SecretAssertion) getSecrets(ctx context.Context, t require.TestingT, cf return secrets, nil } +// HasContent asserts that exactly one Secret in the cluster contains the provided content. This match is not exclusive +// meaning that the Secret can contain additional content. func (sa SecretAssertion) HasContent(content map[string]string) SecretAssertion { return sa.ExactlyNHaveContent(1, content) } +// ExactlyNHaveContent asserts that exactly N Secrets in the cluster contain the provided content. This match is not +// exclusive meaning that the Secrets can contain additional content. func (sa SecretAssertion) ExactlyNHaveContent(count int, content map[string]string) SecretAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { t := helpers.RequireTIfNotNil(testingT, sa.GetRequireT()) conditionFunc := func(ctx context.Context) (bool, error) { secrets, err := sa.getSecrets(ctx, t, cfg) require.NoError(t, err) - if len(secrets.Items) < count { + if len(secrets.Items) != count { return false, nil } @@ -133,13 +146,15 @@ func (sa SecretAssertion) ExactlyNHaveContent(count int, content map[string]stri } res := sa.clone() - res.SetBuilder(res.GetBuilder().Assess("hasContent", fn)) + res.SetBuilder(res.GetBuilder().Assess("exactlyNHaveContent", stepFn)) return res } +// AtLeastNHaveContent asserts that at least N Secrets in the cluster contain the provided content. This match is not +// exclusive meaning that the Secrets can contain additional content. func (sa SecretAssertion) AtLeastNHaveContent(count int, content map[string]string) SecretAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { t := helpers.RequireTIfNotNil(testingT, sa.GetRequireT()) conditionFunc := func(ctx context.Context) (bool, error) { secrets, err := sa.getSecrets(ctx, t, cfg) @@ -177,11 +192,12 @@ func (sa SecretAssertion) AtLeastNHaveContent(count int, content map[string]stri } res := sa.clone() - res.SetBuilder(res.GetBuilder().Assess("hasContent", fn)) + res.SetBuilder(res.GetBuilder().Assess("atLeastNHaveContent", stepFn)) return res } +// NewSecretAssertion creates a new SecretAssertion with the provided options. func NewSecretAssertion(opts ...assertion.Option) SecretAssertion { return SecretAssertion{ Assertion: assertion.NewAssertion( From 7900a8d796d57985e8e88c897143e79c84ea61ae Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Tue, 11 Mar 2025 00:13:18 -0400 Subject: [PATCH 17/29] wip --- internal/assertionhelpers/assertionhelpers.go | 80 +++++++- internal/pods/assertion.go | 99 ++++++++++ internal/pods/pods.go | 152 +++------------ internal/secrets/assertion.go | 96 +++++++++ internal/secrets/secrets.go | 183 ++++-------------- internal/secrets/secrets_test.go | 1 + 6 files changed, 338 insertions(+), 273 deletions(-) create mode 100644 internal/pods/assertion.go create mode 100644 internal/secrets/assertion.go diff --git a/internal/assertionhelpers/assertionhelpers.go b/internal/assertionhelpers/assertionhelpers.go index 3f81b45..4f2b711 100644 --- a/internal/assertionhelpers/assertionhelpers.go +++ b/internal/assertionhelpers/assertionhelpers.go @@ -23,6 +23,7 @@ import ( "sigs.k8s.io/e2e-framework/klient/wait" "sigs.k8s.io/e2e-framework/pkg/env" "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" e2etypes "sigs.k8s.io/e2e-framework/pkg/types" "sigs.k8s.io/kustomize/api/krusty" kusttypes "sigs.k8s.io/kustomize/api/types" @@ -31,18 +32,71 @@ import ( "github.com/DWSR/kubeassert-go/internal/assertion" ) -func CreateResourceFromPathWithNamespaceFromEnv(resourcePath string, decoderOpts ...decoder.DecodeOption) e2etypes.StepFunc { +type ( + // IntCompareFunc is a function that compares two integers and returns a boolean. + IntCompareFunc func(int, int) bool + + // ConditionFunc is a function that returns a boolean based on a condition being satisfied. + ConditionFunc = apimachinerywait.ConditionWithContextFunc + + // ConditionFuncFactory is a function that returns a ConditionFunc. + ConditionFuncFactory = func( + require.TestingT, assertion.Assertion, *envconf.Config, int, IntCompareFunc, IntCompareFunc, + ) ConditionFunc + + // StepFunc is a function that performs a step in a test. + StepFunc = features.Func +) + +var ( + // IntCompareFuncLessThan is a function that compares two integers and returns true if the first integer is less + // than the second. + IntCompareFuncLessThan IntCompareFunc = func(a, b int) bool { return a < b } + + // IntCompareFuncLessThanOrEqualTo is a function that compares two integers and returns true if the first integer is + // less than or equal to the second. + IntCompareFuncLessThanOrEqualTo IntCompareFunc = func(a, b int) bool { return a <= b } + + // IntCompareFuncEqualTo is a function that compares two integers and returns true if the first integer is equal to + // the second. + IntCompareFuncEqualTo IntCompareFunc = func(a, b int) bool { return a == b } + + // IntCompareFuncGreaterThan is a function that compares two integers and returns true if the first integer is + // greater than the second. + IntCompareFuncGreaterThan IntCompareFunc = func(a, b int) bool { return a > b } + + // IntCompareFuncGreaterThanOrEqualTo is a function that compares two integers and returns true if the first integer + // is greater than or equal to the second. + IntCompareFuncGreaterThanOrEqualTo IntCompareFunc = func(a, b int) bool { return a >= b } + + // IntCompareFuncNotEqualTo is a function that compares two integers and returns true if the first integer is not + // equal to the second. + IntCompareFuncNotEqualTo IntCompareFunc = func(a, b int) bool { return a != b } +) + +// CreateResourceFromPathWithNamespaceFromEnv creates a resource from a file at the provided path and sets the +// resource's namespace to the one provided in the environment configuration. +func CreateResourceFromPathWithNamespaceFromEnv( + resourcePath string, + decoderOpts ...decoder.DecodeOption, +) e2etypes.StepFunc { return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { nsName := cfg.Namespace() - r, err := resources.New(cfg.Client().RESTConfig()) + res, err := resources.New(cfg.Client().RESTConfig()) require.NoError(t, err) file, err := os.Open(filepath.Clean(resourcePath)) require.NoError(t, err) + defer func() { _ = file.Close() }() - err = decoder.DecodeEach(ctx, file, decoder.CreateHandler(r), append(decoderOpts, decoder.MutateNamespace(nsName))...) + err = decoder.DecodeEach( + ctx, + file, + decoder.CreateHandler(res), + append(decoderOpts, decoder.MutateNamespace(nsName))..., + ) require.NoError(t, err) return ctx @@ -169,6 +223,7 @@ func ApplyKustomization(kustDir string) env.Func { restMapper := restmapper.NewDiscoveryRESTMapper(gr) slog.Debug("transmuting resMap resource to unstructured") + yamlBytes, err := res.AsYAML() if err != nil { return ctx, err @@ -227,3 +282,22 @@ func WaitForCondition( wait.WithImmediate(), ) } + +// AsStepFunc returns a StepFunc that waits for a condition to be satisfied based on the provided ConditionFuncFactory. +// The count parameter is used to determine the number of resources that should satisfy the condition. The itemCountFn +// parameter is used to evalute the number of resources under consideration to satisfy the condition. The resultFn +// is used to evaluate the number of items that satisfy the condition relative to the count. +func AsStepFunc( + assert assertion.Assertion, + conditionFactory ConditionFuncFactory, + count int, + itemCountFn, resultFn IntCompareFunc, +) StepFunc { + return func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := RequireTIfNotNil(testingT, assert.GetRequireT()) + + require.NoError(t, WaitForCondition(ctx, assert, conditionFactory(t, assert, cfg, count, itemCountFn, resultFn))) + + return ctx + } +} diff --git a/internal/pods/assertion.go b/internal/pods/assertion.go new file mode 100644 index 0000000..25b43e1 --- /dev/null +++ b/internal/pods/assertion.go @@ -0,0 +1,99 @@ +package pods + +import ( + "context" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" +) + +type PodAssertion struct { + assertion.Assertion +} + +func (pa PodAssertion) clone() PodAssertion { + return PodAssertion{ + Assertion: assertion.Clone(pa.Assertion), + } +} + +func (pa PodAssertion) Exists() PodAssertion { + return pa.ExactlyNExist(1) +} + +func (pa PodAssertion) ExactlyNExist(count int) PodAssertion { + stepFn := helpers.AsStepFunc(pa, exist(), count, helpers.IntCompareFuncEqualTo, nil) + + res := pa.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNExist", stepFn)) + + return res +} + +func (pa PodAssertion) AtLeastNExist(count int) PodAssertion { + stepFn := helpers.AsStepFunc(pa, exist(), count, helpers.IntCompareFuncGreaterThanOrEqualTo, nil) + + res := pa.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNExist", stepFn)) + + return res +} + +// return default value instead of a nil pointer so that negative assertions (i.e. testing for false positives) can use +// a mock require.TestingT object. +func (pa PodAssertion) getPods(ctx context.Context, t require.TestingT, cfg *envconf.Config) (corev1.PodList, error) { + client := helpers.DynamicClientFromEnvconf(t, cfg) + + var pods corev1.PodList + + list, err := client. + Resource(corev1.SchemeGroupVersion.WithResource("pods")). + List(ctx, pa.ListOptions(cfg)) + if err != nil { + return pods, err + } + + err = runtime.DefaultUnstructuredConverter.FromUnstructured(list.UnstructuredContent(), &pods) + if err != nil { + return pods, err + } + + return pods, nil +} + +func (pa PodAssertion) IsReady() PodAssertion { + return pa.ExactlyNAreReady(1) +} + +func (pa PodAssertion) ExactlyNAreReady(count int) PodAssertion { + stepFn := helpers.AsStepFunc(pa, areReady(), count, helpers.IntCompareFuncNotEqualTo, helpers.IntCompareFuncEqualTo) + res := pa.clone() + res.SetBuilder(pa.GetBuilder().Assess("exactlyNAreReady", stepFn)) + + return res +} + +func (pa PodAssertion) AtLeastNAreReady(count int) PodAssertion { + stepFn := helpers.AsStepFunc(pa, areReady(), count, helpers.IntCompareFuncLessThan, helpers.IntCompareFuncGreaterThanOrEqualTo) + res := pa.clone() + res.SetBuilder(pa.GetBuilder().Assess("atLeastNAreReady", stepFn)) + + return res +} + +func NewPodAssertion(opts ...assertion.Option) PodAssertion { + return PodAssertion{ + Assertion: assertion.NewAssertion( + append( + []assertion.Option{assertion.WithBuilder(features.New("Pod").WithLabel("type", "pod"))}, + opts..., + )..., + ), + } +} diff --git a/internal/pods/pods.go b/internal/pods/pods.go index c25886a..2f0c7f2 100644 --- a/internal/pods/pods.go +++ b/internal/pods/pods.go @@ -2,84 +2,27 @@ package pods import ( "context" - "testing" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/e2e-framework/pkg/envconf" - "sigs.k8s.io/e2e-framework/pkg/features" "github.com/DWSR/kubeassert-go/internal/assertion" helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" ) -type PodAssertion struct { - assertion.Assertion -} - -func (pa PodAssertion) clone() PodAssertion { - return PodAssertion{ - Assertion: assertion.Clone(pa.Assertion), - } -} - -func (pa PodAssertion) Exists() PodAssertion { - return pa.ExactlyNExist(1) -} - -func (pa PodAssertion) ExactlyNExist(count int) PodAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, pa.GetRequireT()) - conditionFunc := func(ctx context.Context) (bool, error) { - pods, err := pa.getPods(ctx, t, cfg) - require.NoError(t, err) - - return len(pods.Items) == count, nil - } - - require.NoError(t, helpers.WaitForCondition(ctx, pa, conditionFunc)) - - return ctx - } - - res := pa.clone() - res.SetBuilder(res.GetBuilder().Assess("exists", fn)) - - return res -} - -func (pa PodAssertion) AtLeastNExist(count int) PodAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, pa.GetRequireT()) - conditionFunc := func(ctx context.Context) (bool, error) { - pods, err := pa.getPods(ctx, t, cfg) - require.NoError(t, err) - - return len(pods.Items) >= count, nil - } - - require.NoError(t, helpers.WaitForCondition(ctx, pa, conditionFunc)) - - return ctx - } - - res := pa.clone() - res.SetBuilder(res.GetBuilder().Assess("exists", fn)) - - return res -} - // return default value instead of a nil pointer so that negative assertions (i.e. testing for false positives) can use // a mock require.TestingT object. -func (pa PodAssertion) getPods(ctx context.Context, t require.TestingT, cfg *envconf.Config) (corev1.PodList, error) { +func getPods(ctx context.Context, t require.TestingT, cfg *envconf.Config, listOpts metav1.ListOptions) (corev1.PodList, error) { client := helpers.DynamicClientFromEnvconf(t, cfg) var pods corev1.PodList list, err := client. Resource(corev1.SchemeGroupVersion.WithResource("pods")). - List(ctx, pa.ListOptions(cfg)) + List(ctx, listOpts) if err != nil { return pods, err } @@ -92,56 +35,36 @@ func (pa PodAssertion) getPods(ctx context.Context, t require.TestingT, cfg *env return pods, nil } -func (pa PodAssertion) IsReady() PodAssertion { - return pa.ExactlyNAreReady(1) -} - -func (pa PodAssertion) ExactlyNAreReady(count int) PodAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, pa.GetRequireT()) - - conditionFunc := func(ctx context.Context) (bool, error) { - pods, err := pa.getPods(ctx, t, cfg) +func exist() helpers.ConditionFuncFactory { + return func( + t require.TestingT, + assert assertion.Assertion, + cfg *envconf.Config, + count int, + itemCountFn, _ helpers.IntCompareFunc, + ) helpers.ConditionFunc { + return func(ctx context.Context) (bool, error) { + secrets, err := getPods(ctx, t, cfg, assert.ListOptions(cfg)) require.NoError(t, err) - if len(pods.Items) < count { - return false, nil - } - - readyCount := 0 - - for _, pod := range pods.Items { - for _, cond := range pod.Status.Conditions { - if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue { - readyCount += 1 - break - } - } - } - - return readyCount == count, nil + return itemCountFn(len(secrets.Items), count), nil } - - require.NoError(t, helpers.WaitForCondition(ctx, pa, conditionFunc)) - - return ctx } - - res := pa.clone() - res.SetBuilder(pa.GetBuilder().Assess("isReady", fn)) - - return res } -func (pa PodAssertion) AtLeastNAreReady(count int) PodAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, pa.GetRequireT()) - - conditionFunc := func(ctx context.Context) (bool, error) { - pods, err := pa.getPods(ctx, t, cfg) +func areReady() helpers.ConditionFuncFactory { + return func( + t require.TestingT, + assert assertion.Assertion, + cfg *envconf.Config, + count int, + itemCountFn, resultFn helpers.IntCompareFunc, + ) helpers.ConditionFunc { + return func(ctx context.Context) (bool, error) { + pods, err := getPods(ctx, t, cfg, assert.ListOptions(cfg)) require.NoError(t, err) - if len(pods.Items) < count { + if itemCountFn(len(pods.Items), count) { return false, nil } @@ -150,33 +73,14 @@ func (pa PodAssertion) AtLeastNAreReady(count int) PodAssertion { for _, pod := range pods.Items { for _, cond := range pod.Status.Conditions { if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue { - readyCount += 1 + readyCount++ + break } } } - return readyCount >= count, nil + return resultFn(readyCount, count), nil } - - require.NoError(t, helpers.WaitForCondition(ctx, pa, conditionFunc)) - - return ctx - } - - res := pa.clone() - res.SetBuilder(pa.GetBuilder().Assess("isReady", fn)) - - return res -} - -func NewPodAssertion(opts ...assertion.Option) PodAssertion { - return PodAssertion{ - Assertion: assertion.NewAssertion( - append( - []assertion.Option{assertion.WithBuilder(features.New("Pod").WithLabel("type", "pod"))}, - opts..., - )..., - ), } } diff --git a/internal/secrets/assertion.go b/internal/secrets/assertion.go new file mode 100644 index 0000000..8dd5d8c --- /dev/null +++ b/internal/secrets/assertion.go @@ -0,0 +1,96 @@ +package secrets + +import ( + "sigs.k8s.io/e2e-framework/pkg/features" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" +) + +// SecretAssertion is a wrapper around the assertion.Assertion type and provides a set of assertions for Kubernetes +// Secrets. +type SecretAssertion struct { + assertion.Assertion +} + +func (sa SecretAssertion) clone() SecretAssertion { + return SecretAssertion{ + Assertion: assertion.Clone(sa.Assertion), + } +} + +// Exists asserts that exactly one Secret exists in the cluster that matches the provided options. +func (sa SecretAssertion) Exists() SecretAssertion { + return sa.ExactlyNExist(1) +} + +// ExactlyNExist asserts that exactly N Secrets exist in the cluster that match the provided options. +func (sa SecretAssertion) ExactlyNExist(count int) SecretAssertion { + stepFn := helpers.AsStepFunc(sa, exist(), count, helpers.IntCompareFuncEqualTo, nil) + + res := sa.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNExist", stepFn)) + + return res +} + +// AtLeastNExist asserts that at least N Secrets exist in the cluster that match the provided options. +func (sa SecretAssertion) AtLeastNExist(count int) SecretAssertion { + stepFn := helpers.AsStepFunc(sa, exist(), count, helpers.IntCompareFuncGreaterThanOrEqualTo, nil) + + res := sa.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNExist", stepFn)) + + return res +} + +// HasContent asserts that exactly one Secret in the cluster contains the provided content. This match is not exclusive +// meaning that the Secret can contain additional content. +func (sa SecretAssertion) HasContent(content map[string]string) SecretAssertion { + return sa.ExactlyNHaveContent(1, content) +} + +// ExactlyNHaveContent asserts that exactly N Secrets in the cluster contain the provided content. This match is not +// exclusive meaning that the Secrets can contain additional content. +func (sa SecretAssertion) ExactlyNHaveContent(count int, content map[string]string) SecretAssertion { + stepFn := helpers.AsStepFunc( + sa, + haveContent(content), + count, + helpers.IntCompareFuncNotEqualTo, + helpers.IntCompareFuncEqualTo, + ) + res := sa.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNHaveContent", stepFn)) + + return res +} + +// AtLeastNHaveContent asserts that at least N Secrets in the cluster contain the provided content. This match is not +// exclusive meaning that the Secrets can contain additional content. +func (sa SecretAssertion) AtLeastNHaveContent(count int, content map[string]string) SecretAssertion { + stepFn := helpers.AsStepFunc( + sa, + haveContent(content), + count, + helpers.IntCompareFuncLessThan, + helpers.IntCompareFuncGreaterThanOrEqualTo, + ) + + res := sa.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNHaveContent", stepFn)) + + return res +} + +// NewSecretAssertion creates a new SecretAssertion with the provided options. +func NewSecretAssertion(opts ...assertion.Option) SecretAssertion { + return SecretAssertion{ + Assertion: assertion.NewAssertion( + append( + []assertion.Option{assertion.WithBuilder(features.New("Secret").WithLabel("type", "secret"))}, + opts..., + )..., + ), + } +} diff --git a/internal/secrets/secrets.go b/internal/secrets/secrets.go index 4e229f0..a00f159 100644 --- a/internal/secrets/secrets.go +++ b/internal/secrets/secrets.go @@ -1,84 +1,39 @@ +// secrets contains assertions for Kubernetes Secrets. package secrets import ( "context" - "testing" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/e2e-framework/pkg/envconf" - "sigs.k8s.io/e2e-framework/pkg/features" "github.com/DWSR/kubeassert-go/internal/assertion" helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" ) -// SecretAssertion is a wrapper around the assertion.Assertion type and provides a set of assertions for Kubernetes -// Secrets. -type SecretAssertion struct { - assertion.Assertion -} - -func (sa SecretAssertion) clone() SecretAssertion { - return SecretAssertion{ - Assertion: assertion.Clone(sa.Assertion), - } -} - -// Exists asserts that exactly one Secret exists in the cluster that matches the provided options. -func (sa SecretAssertion) Exists() SecretAssertion { - return sa.ExactlyNExist(1) -} - -// ExactlyNExist asserts that exactly N Secrets exist in the cluster that match the provided options. -func (sa SecretAssertion) ExactlyNExist(count int) SecretAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, sa.GetRequireT()) - conditionFunc := func(ctx context.Context) (bool, error) { - secrets, err := sa.getSecrets(ctx, t, cfg) - require.NoError(t, err) - - return len(secrets.Items) == count, nil - } - - require.NoError(t, helpers.WaitForCondition(ctx, sa, conditionFunc)) - - return ctx - } +func secretHasContent(secret corev1.Secret, content map[string]string) bool { + hasContent := true - res := sa.clone() - res.SetBuilder(res.GetBuilder().Assess("exactlyNExist", stepFn)) + for key, value := range content { + secData, ok := secret.Data[key] + if !ok || string(secData) != value { + hasContent = false - return res -} - -// AtLeastNExist asserts that at least N Secrets exist in the cluster that match the provided options. -func (sa SecretAssertion) AtLeastNExist(count int) SecretAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, sa.GetRequireT()) - conditionFunc := func(ctx context.Context) (bool, error) { - secrets, err := sa.getSecrets(ctx, t, cfg) - require.NoError(t, err) - - return len(secrets.Items) >= count, nil + break } - - require.NoError(t, helpers.WaitForCondition(ctx, sa, conditionFunc)) - - return ctx } - res := sa.clone() - res.SetBuilder(res.GetBuilder().Assess("atLeastNExist", stepFn)) - - return res + return hasContent } -func (sa SecretAssertion) getSecrets( +func getSecrets( ctx context.Context, t require.TestingT, cfg *envconf.Config, + listOpts metav1.ListOptions, ) (corev1.SecretList, error) { client := helpers.DynamicClientFromEnvconf(t, cfg) @@ -86,7 +41,7 @@ func (sa SecretAssertion) getSecrets( list, err := client. Resource(corev1.SchemeGroupVersion.WithResource("secrets")). - List(ctx, sa.ListOptions(cfg)) + List(ctx, listOpts) if err != nil { return secrets, err } @@ -99,112 +54,48 @@ func (sa SecretAssertion) getSecrets( return secrets, nil } -// HasContent asserts that exactly one Secret in the cluster contains the provided content. This match is not exclusive -// meaning that the Secret can contain additional content. -func (sa SecretAssertion) HasContent(content map[string]string) SecretAssertion { - return sa.ExactlyNHaveContent(1, content) -} - -// ExactlyNHaveContent asserts that exactly N Secrets in the cluster contain the provided content. This match is not -// exclusive meaning that the Secrets can contain additional content. -func (sa SecretAssertion) ExactlyNHaveContent(count int, content map[string]string) SecretAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, sa.GetRequireT()) - conditionFunc := func(ctx context.Context) (bool, error) { - secrets, err := sa.getSecrets(ctx, t, cfg) +func exist() helpers.ConditionFuncFactory { + return func( + t require.TestingT, + assert assertion.Assertion, + cfg *envconf.Config, + count int, + itemCountFn, _ helpers.IntCompareFunc, + ) helpers.ConditionFunc { + return func(ctx context.Context) (bool, error) { + secrets, err := getSecrets(ctx, t, cfg, assert.ListOptions(cfg)) require.NoError(t, err) - if len(secrets.Items) != count { - return false, nil - } - - haveContent := 0 - - for _, secret := range secrets.Items { - hasContent := true - - for key, value := range content { - secData, ok := secret.Data[key] - if !ok || string(secData) != value { - hasContent = false - - break - } - } - - if hasContent { - haveContent++ - } - } - - return haveContent == count, nil + return itemCountFn(len(secrets.Items), count), nil } - - require.NoError(t, helpers.WaitForCondition(ctx, sa, conditionFunc)) - - return ctx } - - res := sa.clone() - res.SetBuilder(res.GetBuilder().Assess("exactlyNHaveContent", stepFn)) - - return res } -// AtLeastNHaveContent asserts that at least N Secrets in the cluster contain the provided content. This match is not -// exclusive meaning that the Secrets can contain additional content. -func (sa SecretAssertion) AtLeastNHaveContent(count int, content map[string]string) SecretAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, sa.GetRequireT()) - conditionFunc := func(ctx context.Context) (bool, error) { - secrets, err := sa.getSecrets(ctx, t, cfg) +func haveContent(content map[string]string) helpers.ConditionFuncFactory { + return func( + t require.TestingT, + assert assertion.Assertion, + cfg *envconf.Config, + count int, + itemCountFn, resultFn helpers.IntCompareFunc, + ) helpers.ConditionFunc { + return func(ctx context.Context) (bool, error) { + secrets, err := getSecrets(ctx, t, cfg, assert.ListOptions(cfg)) require.NoError(t, err) - if len(secrets.Items) < count { + if itemCountFn(len(secrets.Items), count) { return false, nil } haveContent := 0 for _, secret := range secrets.Items { - hasContent := true - - for key, value := range content { - secData, ok := secret.Data[key] - if !ok || string(secData) != value { - hasContent = false - - break - } - } - - if hasContent { + if secretHasContent(secret, content) { haveContent++ } } - return haveContent >= count, nil + return resultFn(haveContent, count), nil } - - require.NoError(t, helpers.WaitForCondition(ctx, sa, conditionFunc)) - - return ctx - } - - res := sa.clone() - res.SetBuilder(res.GetBuilder().Assess("atLeastNHaveContent", stepFn)) - - return res -} - -// NewSecretAssertion creates a new SecretAssertion with the provided options. -func NewSecretAssertion(opts ...assertion.Option) SecretAssertion { - return SecretAssertion{ - Assertion: assertion.NewAssertion( - append( - []assertion.Option{assertion.WithBuilder(features.New("Secret").WithLabel("type", "secret"))}, - opts..., - )..., - ), } } diff --git a/internal/secrets/secrets_test.go b/internal/secrets/secrets_test.go index 7b43810..995fd76 100644 --- a/internal/secrets/secrets_test.go +++ b/internal/secrets/secrets_test.go @@ -13,6 +13,7 @@ import ( "github.com/DWSR/kubeassert-go/internal/testhelpers" ) +//nolint:gosec const secretPath = "./testdata/secret.yaml" var testEnv env.Environment From b436fab4a181e46600c8a338e3cc83d014df6d83 Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Tue, 11 Mar 2025 00:21:45 -0400 Subject: [PATCH 18/29] wip --- internal/assertionhelpers/assertionhelpers.go | 25 ++++++++++++++++--- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/internal/assertionhelpers/assertionhelpers.go b/internal/assertionhelpers/assertionhelpers.go index 4f2b711..14dd063 100644 --- a/internal/assertionhelpers/assertionhelpers.go +++ b/internal/assertionhelpers/assertionhelpers.go @@ -1,3 +1,4 @@ +// assertionhelpers contains functionality that aids in the creation and use of assertions. package assertionhelpers import ( @@ -103,38 +104,43 @@ func CreateResourceFromPathWithNamespaceFromEnv( } } +// CreateResourceFromPath creates a resource from a file at the provided path. func CreateResourceFromPath(resourcePath string, decoderOpts ...decoder.DecodeOption) e2etypes.StepFunc { return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - r, err := resources.New(cfg.Client().RESTConfig()) + res, err := resources.New(cfg.Client().RESTConfig()) require.NoError(t, err) file, err := os.Open(filepath.Clean(resourcePath)) require.NoError(t, err) + defer func() { _ = file.Close() }() - err = decoder.DecodeEach(ctx, file, decoder.CreateHandler(r), decoderOpts...) + err = decoder.DecodeEach(ctx, file, decoder.CreateHandler(res), decoderOpts...) require.NoError(t, err) return ctx } } +// DeleteResourceFromPath deletes a resource from a file at the provided path. func DeleteResourceFromPath(resourcePath string, decoderOpts ...decoder.DecodeOption) e2etypes.StepFunc { return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - r, err := resources.New(cfg.Client().RESTConfig()) + res, err := resources.New(cfg.Client().RESTConfig()) require.NoError(t, err) file, err := os.Open(filepath.Clean(resourcePath)) require.NoError(t, err) + defer func() { _ = file.Close() }() - err = decoder.DecodeEach(ctx, file, decoder.DeleteHandler(r), decoderOpts...) + err = decoder.DecodeEach(ctx, file, decoder.DeleteHandler(res), decoderOpts...) require.NoError(t, err) return ctx } } +// Sleep returns a StepFunc that sleeps for the provided duration. func Sleep(sleepTime time.Duration) e2etypes.StepFunc { return func(ctx context.Context, _ *testing.T, _ *envconf.Config) context.Context { time.Sleep(sleepTime) @@ -143,6 +149,7 @@ func Sleep(sleepTime time.Duration) e2etypes.StepFunc { } } +// DynamicClientFromEnvconf creates a dynamic client from the environment configuration. func DynamicClientFromEnvconf(t require.TestingT, cfg *envconf.Config) *dynamic.DynamicClient { klient, err := cfg.NewClient() require.NoError(t, err) @@ -153,6 +160,10 @@ func DynamicClientFromEnvconf(t require.TestingT, cfg *envconf.Config) *dynamic. return client } +// RequireTIfNotNil returns the require.TestingT object if it is not nil, otherwise it returns the provided testing.T +// object. This is primarily intended to enable injection of a mock for testing assertion code fails as expected. +// +//nolint:ireturn func RequireTIfNotNil(testingT *testing.T, requireT require.TestingT) require.TestingT { if requireT != nil { return requireT @@ -161,6 +172,7 @@ func RequireTIfNotNil(testingT *testing.T, requireT require.TestingT) require.Te return testingT } +// TestAssertions tests the provided assertions. func TestAssertions(t *testing.T, testEnv env.Environment, assertions ...assertion.Assertion) { tests := make([]e2etypes.Feature, 0, len(assertions)) @@ -171,6 +183,11 @@ func TestAssertions(t *testing.T, testEnv env.Environment, assertions ...asserti testEnv.Test(t, tests...) } +// ApplyKustomization applies a kustomization at the provided directory. +// +// TODO: refactor this function to be more testable and also simpler +// +//nolint:funlen,cyclop func ApplyKustomization(kustDir string) env.Func { return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { diskFS := filesys.MakeFsOnDisk() From 0ec9a1db7542807169c65d94bdee619e1fb948bb Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Tue, 11 Mar 2025 00:39:34 -0400 Subject: [PATCH 19/29] wip --- internal/pods/assertion.go | 44 +++++++++++++------------------------- internal/pods/pods.go | 8 ++++++- 2 files changed, 22 insertions(+), 30 deletions(-) diff --git a/internal/pods/assertion.go b/internal/pods/assertion.go index 25b43e1..e0032a4 100644 --- a/internal/pods/assertion.go +++ b/internal/pods/assertion.go @@ -1,18 +1,13 @@ package pods import ( - "context" - - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/e2e-framework/pkg/envconf" "sigs.k8s.io/e2e-framework/pkg/features" "github.com/DWSR/kubeassert-go/internal/assertion" helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" ) +// PodAssertion is a wrapper around the assertion.Assertion type and provides a set of assertions for Kubernetes Pods. type PodAssertion struct { assertion.Assertion } @@ -23,10 +18,12 @@ func (pa PodAssertion) clone() PodAssertion { } } +// Exists asserts that exactly one Pod exists in the cluster that matches the provided options. func (pa PodAssertion) Exists() PodAssertion { return pa.ExactlyNExist(1) } +// ExactlyNExist asserts that exactly N Pods exist in the cluster that match the provided options. func (pa PodAssertion) ExactlyNExist(count int) PodAssertion { stepFn := helpers.AsStepFunc(pa, exist(), count, helpers.IntCompareFuncEqualTo, nil) @@ -36,6 +33,7 @@ func (pa PodAssertion) ExactlyNExist(count int) PodAssertion { return res } +// AtLeastNExist asserts that at least N Pods exist in the cluster that match the provided options. func (pa PodAssertion) AtLeastNExist(count int) PodAssertion { stepFn := helpers.AsStepFunc(pa, exist(), count, helpers.IntCompareFuncGreaterThanOrEqualTo, nil) @@ -45,32 +43,12 @@ func (pa PodAssertion) AtLeastNExist(count int) PodAssertion { return res } -// return default value instead of a nil pointer so that negative assertions (i.e. testing for false positives) can use -// a mock require.TestingT object. -func (pa PodAssertion) getPods(ctx context.Context, t require.TestingT, cfg *envconf.Config) (corev1.PodList, error) { - client := helpers.DynamicClientFromEnvconf(t, cfg) - - var pods corev1.PodList - - list, err := client. - Resource(corev1.SchemeGroupVersion.WithResource("pods")). - List(ctx, pa.ListOptions(cfg)) - if err != nil { - return pods, err - } - - err = runtime.DefaultUnstructuredConverter.FromUnstructured(list.UnstructuredContent(), &pods) - if err != nil { - return pods, err - } - - return pods, nil -} - +// IsReady asserts that exactly one Pod is ready in the cluster that matches the provided options. func (pa PodAssertion) IsReady() PodAssertion { return pa.ExactlyNAreReady(1) } +// ExactlyNAreReady asserts that exactly N Pods are ready in the cluster that match the provided options. func (pa PodAssertion) ExactlyNAreReady(count int) PodAssertion { stepFn := helpers.AsStepFunc(pa, areReady(), count, helpers.IntCompareFuncNotEqualTo, helpers.IntCompareFuncEqualTo) res := pa.clone() @@ -79,14 +57,22 @@ func (pa PodAssertion) ExactlyNAreReady(count int) PodAssertion { return res } +// AtLeastNAreReady asserts that at least N Pods are ready in the cluster that match the provided options. func (pa PodAssertion) AtLeastNAreReady(count int) PodAssertion { - stepFn := helpers.AsStepFunc(pa, areReady(), count, helpers.IntCompareFuncLessThan, helpers.IntCompareFuncGreaterThanOrEqualTo) + stepFn := helpers.AsStepFunc( + pa, + areReady(), + count, + helpers.IntCompareFuncLessThan, + helpers.IntCompareFuncGreaterThanOrEqualTo, + ) res := pa.clone() res.SetBuilder(pa.GetBuilder().Assess("atLeastNAreReady", stepFn)) return res } +// NewPodAssertion creates a new PodAssertion with the provided options. func NewPodAssertion(opts ...assertion.Option) PodAssertion { return PodAssertion{ Assertion: assertion.NewAssertion( diff --git a/internal/pods/pods.go b/internal/pods/pods.go index 2f0c7f2..30a6ae7 100644 --- a/internal/pods/pods.go +++ b/internal/pods/pods.go @@ -1,3 +1,4 @@ +// pods contains assertions for Kubernetes Pods. package pods import ( @@ -15,7 +16,12 @@ import ( // return default value instead of a nil pointer so that negative assertions (i.e. testing for false positives) can use // a mock require.TestingT object. -func getPods(ctx context.Context, t require.TestingT, cfg *envconf.Config, listOpts metav1.ListOptions) (corev1.PodList, error) { +func getPods( + ctx context.Context, + t require.TestingT, + cfg *envconf.Config, + listOpts metav1.ListOptions, +) (corev1.PodList, error) { client := helpers.DynamicClientFromEnvconf(t, cfg) var pods corev1.PodList From f247f30b011bdd82213e4be55b6fcb023d2ac1cc Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Tue, 11 Mar 2025 08:32:19 -0400 Subject: [PATCH 20/29] wip --- internal/namespaces/assertion.go | 156 +++++++++++++++++++++++++++++ internal/namespaces/namespaces.go | 159 +++--------------------------- 2 files changed, 170 insertions(+), 145 deletions(-) create mode 100644 internal/namespaces/assertion.go diff --git a/internal/namespaces/assertion.go b/internal/namespaces/assertion.go new file mode 100644 index 0000000..bbf8dd4 --- /dev/null +++ b/internal/namespaces/assertion.go @@ -0,0 +1,156 @@ +package namespaces + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" +) + +type NamespaceAssertion struct { + assertion.Assertion +} + +const ( + podSecurityEnforceLabelKey = "pod-security.kubernetes.io/enforce" +) + +func (na NamespaceAssertion) clone() NamespaceAssertion { + return NamespaceAssertion{ + Assertion: assertion.Clone(na.Assertion), + } +} + +func (na NamespaceAssertion) getNamespaces(ctx context.Context, t require.TestingT, cfg *envconf.Config) (corev1.NamespaceList, error) { + client := helpers.DynamicClientFromEnvconf(t, cfg) + + var nsList corev1.NamespaceList + + list, err := client.Resource(corev1.SchemeGroupVersion.WithResource("namespaces")).List(ctx, na.ListOptions(cfg)) + if err != nil { + return nsList, err + } + + err = runtime.DefaultUnstructuredConverter.FromUnstructured(list.UnstructuredContent(), &nsList) + if err != nil { + return nsList, err + } + + return nsList, nil +} + +func (na NamespaceAssertion) Exists() NamespaceAssertion { + return na.ExactlyNExist(1) +} + +func (na NamespaceAssertion) ExactlyNExist(count int) NamespaceAssertion { + stepFn := helpers.AsStepFunc(na, exist(), count, helpers.IntCompareFuncEqualTo, nil) + + res := na.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNExist", stepFn)) + + return res +} + +func (na NamespaceAssertion) AtLeastNExist(count int) NamespaceAssertion { + stepFn := helpers.AsStepFunc(na, exist(), count, helpers.IntCompareFuncGreaterThanOrEqualTo, nil) + + res := na.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNExist", stepFn)) + + return res +} + +func (na NamespaceAssertion) AtLeastNAreRestricted(count int) NamespaceAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, na.GetRequireT()) + conditionFunc := func(ctx context.Context) (bool, error) { + nsList, err := na.getNamespaces(ctx, t, cfg) + require.NoError(t, err) + + if len(nsList.Items) < count { + return false, nil + } + + restrictedCount := 0 + + for _, ns := range nsList.Items { + nsLabels := ns.GetLabels() + + enforceLabel, ok := nsLabels[podSecurityEnforceLabelKey] + if ok && enforceLabel == "restricted" { + restrictedCount += 1 + } + } + + return restrictedCount >= count, nil + } + + require.NoError(t, helpers.WaitForCondition(ctx, na, conditionFunc)) + + return ctx + } + + res := na.clone() + res.SetBuilder(na.GetBuilder().Assess("atLeastNAreRestricted", fn)) + + return res +} + +func (na NamespaceAssertion) IsRestricted() NamespaceAssertion { + return na.ExactlyNAreRestricted(1) +} + +func (na NamespaceAssertion) ExactlyNAreRestricted(count int) NamespaceAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, na.GetRequireT()) + conditionFunc := func(ctx context.Context) (bool, error) { + nsList, err := na.getNamespaces(ctx, t, cfg) + require.NoError(t, err) + + if len(nsList.Items) < count { + return false, nil + } + + restrictedCount := 0 + + for _, ns := range nsList.Items { + nsLabels := ns.GetLabels() + + enforceLabel, ok := nsLabels[podSecurityEnforceLabelKey] + if ok && enforceLabel == "restricted" { + restrictedCount += 1 + } + } + + return restrictedCount == count, nil + } + + require.NoError(t, helpers.WaitForCondition(ctx, na, conditionFunc)) + + return ctx + } + + res := na.clone() + res.SetBuilder(na.GetBuilder().Assess("atLeastNAreRestricted", fn)) + + return res +} + +func NewNamespaceAssertion(opts ...assertion.Option) NamespaceAssertion { + return NamespaceAssertion{ + Assertion: assertion.NewAssertion( + append( + []assertion.Option{assertion.WithBuilder(features.New("Namespace").WithLabel("type", "namespace"))}, + opts..., + )..., + ), + } +} diff --git a/internal/namespaces/namespaces.go b/internal/namespaces/namespaces.go index b8d3d33..a5026d9 100644 --- a/internal/namespaces/namespaces.go +++ b/internal/namespaces/namespaces.go @@ -2,38 +2,23 @@ package namespaces import ( "context" - "testing" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/e2e-framework/pkg/envconf" - "sigs.k8s.io/e2e-framework/pkg/features" "github.com/DWSR/kubeassert-go/internal/assertion" helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" ) -type NamespaceAssertion struct { - assertion.Assertion -} - -const ( - podSecurityEnforceLabelKey = "pod-security.kubernetes.io/enforce" -) - -func (na NamespaceAssertion) clone() NamespaceAssertion { - return NamespaceAssertion{ - Assertion: assertion.Clone(na.Assertion), - } -} - -func (na NamespaceAssertion) getNamespaces(ctx context.Context, t require.TestingT, cfg *envconf.Config) (corev1.NamespaceList, error) { +func getNamespaces(ctx context.Context, t require.TestingT, cfg *envconf.Config, listOpts metav1.ListOptions) (corev1.NamespaceList, error) { client := helpers.DynamicClientFromEnvconf(t, cfg) var nsList corev1.NamespaceList - list, err := client.Resource(corev1.SchemeGroupVersion.WithResource("namespaces")).List(ctx, na.ListOptions(cfg)) + list, err := client.Resource(corev1.SchemeGroupVersion.WithResource("namespaces")).List(ctx, listOpts) if err != nil { return nsList, err } @@ -46,135 +31,19 @@ func (na NamespaceAssertion) getNamespaces(ctx context.Context, t require.Testin return nsList, nil } -func (na NamespaceAssertion) Exists() NamespaceAssertion { - return na.ExactlyNExist(1) -} - -func (na NamespaceAssertion) ExactlyNExist(count int) NamespaceAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, na.GetRequireT()) - conditionFunc := func(ctx context.Context) (bool, error) { - nsList, err := na.getNamespaces(ctx, t, cfg) - require.NoError(t, err) - - return len(nsList.Items) == count, nil - } - - require.NoError(t, helpers.WaitForCondition(ctx, na, conditionFunc)) - - return ctx - } - - res := na.clone() - res.SetBuilder(res.GetBuilder().Assess("exists", fn)) - - return res -} - -func (na NamespaceAssertion) AtLeastNExist(count int) NamespaceAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, na.GetRequireT()) - conditionFunc := func(ctx context.Context) (bool, error) { - nsList, err := na.getNamespaces(ctx, t, cfg) - require.NoError(t, err) - - return len(nsList.Items) >= count, nil - } - - require.NoError(t, helpers.WaitForCondition(ctx, na, conditionFunc)) - - return ctx - } - - res := na.clone() - res.SetBuilder(res.GetBuilder().Assess("exists", fn)) - - return res -} - -func (na NamespaceAssertion) AtLeastNAreRestricted(count int) NamespaceAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, na.GetRequireT()) - conditionFunc := func(ctx context.Context) (bool, error) { - nsList, err := na.getNamespaces(ctx, t, cfg) - require.NoError(t, err) - - if len(nsList.Items) < count { - return false, nil - } - - restrictedCount := 0 - - for _, ns := range nsList.Items { - nsLabels := ns.GetLabels() - - enforceLabel, ok := nsLabels[podSecurityEnforceLabelKey] - if ok && enforceLabel == "restricted" { - restrictedCount += 1 - } - } - - return restrictedCount >= count, nil - } - - require.NoError(t, helpers.WaitForCondition(ctx, na, conditionFunc)) - - return ctx - } - - res := na.clone() - res.SetBuilder(na.GetBuilder().Assess("atLeastNAreRestricted", fn)) - - return res -} - -func (na NamespaceAssertion) IsRestricted() NamespaceAssertion { - return na.ExactlyNAreRestricted(1) -} - -func (na NamespaceAssertion) ExactlyNAreRestricted(count int) NamespaceAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, na.GetRequireT()) - conditionFunc := func(ctx context.Context) (bool, error) { - nsList, err := na.getNamespaces(ctx, t, cfg) +func exist() helpers.ConditionFuncFactory { + return func( + t require.TestingT, + assert assertion.Assertion, + cfg *envconf.Config, + count int, + itemCountFn, _ helpers.IntCompareFunc, + ) helpers.ConditionFunc { + return func(ctx context.Context) (bool, error) { + secrets, err := getNamespaces(ctx, t, cfg, assert.ListOptions(cfg)) require.NoError(t, err) - if len(nsList.Items) < count { - return false, nil - } - - restrictedCount := 0 - - for _, ns := range nsList.Items { - nsLabels := ns.GetLabels() - - enforceLabel, ok := nsLabels[podSecurityEnforceLabelKey] - if ok && enforceLabel == "restricted" { - restrictedCount += 1 - } - } - - return restrictedCount == count, nil + return itemCountFn(len(secrets.Items), count), nil } - - require.NoError(t, helpers.WaitForCondition(ctx, na, conditionFunc)) - - return ctx - } - - res := na.clone() - res.SetBuilder(na.GetBuilder().Assess("atLeastNAreRestricted", fn)) - - return res -} - -func NewNamespaceAssertion(opts ...assertion.Option) NamespaceAssertion { - return NamespaceAssertion{ - Assertion: assertion.NewAssertion( - append( - []assertion.Option{assertion.WithBuilder(features.New("Namespace").WithLabel("type", "namespace"))}, - opts..., - )..., - ), } } From d8223f3a009ff5330e8442b842c7d4340250a255 Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Wed, 12 Mar 2025 23:06:48 -0400 Subject: [PATCH 21/29] wip --- internal/namespaces/assertion.go | 117 +++++++----------------------- internal/namespaces/namespaces.go | 40 +++++++++- internal/pdbs/assertion.go | 40 ++++++++++ internal/pdbs/pdbs.go | 56 ++++---------- 4 files changed, 123 insertions(+), 130 deletions(-) create mode 100644 internal/pdbs/assertion.go diff --git a/internal/namespaces/assertion.go b/internal/namespaces/assertion.go index bbf8dd4..af4b2a8 100644 --- a/internal/namespaces/assertion.go +++ b/internal/namespaces/assertion.go @@ -1,19 +1,13 @@ package namespaces import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/e2e-framework/pkg/envconf" "sigs.k8s.io/e2e-framework/pkg/features" "github.com/DWSR/kubeassert-go/internal/assertion" helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" ) +// NamespaceAssertion is an assertion for Kubernetes Namespaces. type NamespaceAssertion struct { assertion.Assertion } @@ -28,28 +22,12 @@ func (na NamespaceAssertion) clone() NamespaceAssertion { } } -func (na NamespaceAssertion) getNamespaces(ctx context.Context, t require.TestingT, cfg *envconf.Config) (corev1.NamespaceList, error) { - client := helpers.DynamicClientFromEnvconf(t, cfg) - - var nsList corev1.NamespaceList - - list, err := client.Resource(corev1.SchemeGroupVersion.WithResource("namespaces")).List(ctx, na.ListOptions(cfg)) - if err != nil { - return nsList, err - } - - err = runtime.DefaultUnstructuredConverter.FromUnstructured(list.UnstructuredContent(), &nsList) - if err != nil { - return nsList, err - } - - return nsList, nil -} - +// Exists asserts that exactly one Namespace exists. func (na NamespaceAssertion) Exists() NamespaceAssertion { return na.ExactlyNExist(1) } +// ExactlyNExist asserts that exactly N Namespaces exist. func (na NamespaceAssertion) ExactlyNExist(count int) NamespaceAssertion { stepFn := helpers.AsStepFunc(na, exist(), count, helpers.IntCompareFuncEqualTo, nil) @@ -59,6 +37,7 @@ func (na NamespaceAssertion) ExactlyNExist(count int) NamespaceAssertion { return res } +// AtLeastNExist asserts that at least N Namespaces exist. func (na NamespaceAssertion) AtLeastNExist(count int) NamespaceAssertion { stepFn := helpers.AsStepFunc(na, exist(), count, helpers.IntCompareFuncGreaterThanOrEqualTo, nil) @@ -68,82 +47,42 @@ func (na NamespaceAssertion) AtLeastNExist(count int) NamespaceAssertion { return res } -func (na NamespaceAssertion) AtLeastNAreRestricted(count int) NamespaceAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, na.GetRequireT()) - conditionFunc := func(ctx context.Context) (bool, error) { - nsList, err := na.getNamespaces(ctx, t, cfg) - require.NoError(t, err) - - if len(nsList.Items) < count { - return false, nil - } - - restrictedCount := 0 - - for _, ns := range nsList.Items { - nsLabels := ns.GetLabels() - - enforceLabel, ok := nsLabels[podSecurityEnforceLabelKey] - if ok && enforceLabel == "restricted" { - restrictedCount += 1 - } - } - - return restrictedCount >= count, nil - } - - require.NoError(t, helpers.WaitForCondition(ctx, na, conditionFunc)) - - return ctx - } - - res := na.clone() - res.SetBuilder(na.GetBuilder().Assess("atLeastNAreRestricted", fn)) - - return res -} - +// IsRestricted asserts that exactly one Namespace uses the default "restricted" pod security standard. func (na NamespaceAssertion) IsRestricted() NamespaceAssertion { return na.ExactlyNAreRestricted(1) } +// ExactlyNAreRestricted asserts that exactly N Namespaces use the default "restricted" pod security standard. func (na NamespaceAssertion) ExactlyNAreRestricted(count int) NamespaceAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, na.GetRequireT()) - conditionFunc := func(ctx context.Context) (bool, error) { - nsList, err := na.getNamespaces(ctx, t, cfg) - require.NoError(t, err) - - if len(nsList.Items) < count { - return false, nil - } - - restrictedCount := 0 - - for _, ns := range nsList.Items { - nsLabels := ns.GetLabels() - - enforceLabel, ok := nsLabels[podSecurityEnforceLabelKey] - if ok && enforceLabel == "restricted" { - restrictedCount += 1 - } - } - - return restrictedCount == count, nil - } - - require.NoError(t, helpers.WaitForCondition(ctx, na, conditionFunc)) + stepFn := helpers.AsStepFunc( + na, + areRestricted(), + count, + helpers.IntCompareFuncNotEqualTo, + helpers.IntCompareFuncEqualTo, + ) + res := na.clone() + res.SetBuilder(na.GetBuilder().Assess("exactlyNAreRestricted", stepFn)) - return ctx - } + return res +} +// AtLeastNAreRestricted asserts that at least N Namespaces use the default "restricted" pod security standard. +func (na NamespaceAssertion) AtLeastNAreRestricted(count int) NamespaceAssertion { + stepFn := helpers.AsStepFunc( + na, + areRestricted(), + count, + helpers.IntCompareFuncLessThan, + helpers.IntCompareFuncGreaterThanOrEqualTo, + ) res := na.clone() - res.SetBuilder(na.GetBuilder().Assess("atLeastNAreRestricted", fn)) + res.SetBuilder(na.GetBuilder().Assess("atLeastNAreRestricted", stepFn)) return res } +// NewNamespaceAssertion creates a new NamespaceAssertion. func NewNamespaceAssertion(opts ...assertion.Option) NamespaceAssertion { return NamespaceAssertion{ Assertion: assertion.NewAssertion( diff --git a/internal/namespaces/namespaces.go b/internal/namespaces/namespaces.go index a5026d9..7cdee81 100644 --- a/internal/namespaces/namespaces.go +++ b/internal/namespaces/namespaces.go @@ -1,3 +1,4 @@ +// namespace contains assertions for Kubernetes Namespaces. package namespaces import ( @@ -13,7 +14,12 @@ import ( helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" ) -func getNamespaces(ctx context.Context, t require.TestingT, cfg *envconf.Config, listOpts metav1.ListOptions) (corev1.NamespaceList, error) { +func getNamespaces( + ctx context.Context, + t require.TestingT, + cfg *envconf.Config, + listOpts metav1.ListOptions, +) (corev1.NamespaceList, error) { client := helpers.DynamicClientFromEnvconf(t, cfg) var nsList corev1.NamespaceList @@ -47,3 +53,35 @@ func exist() helpers.ConditionFuncFactory { } } } + +func areRestricted() helpers.ConditionFuncFactory { + return func( + t require.TestingT, + assert assertion.Assertion, + cfg *envconf.Config, + count int, + itemCountFn, resultFn helpers.IntCompareFunc, + ) helpers.ConditionFunc { + return func(ctx context.Context) (bool, error) { + nsList, err := getNamespaces(ctx, t, cfg, assert.ListOptions(cfg)) + require.NoError(t, err) + + if itemCountFn(len(nsList.Items), count) { + return false, nil + } + + restrictedCount := 0 + + for _, namespace := range nsList.Items { + nsLabels := namespace.GetLabels() + + enforceLabel, ok := nsLabels[podSecurityEnforceLabelKey] + if ok && enforceLabel == "restricted" { + restrictedCount++ + } + } + + return resultFn(restrictedCount, count), nil + } + } +} diff --git a/internal/pdbs/assertion.go b/internal/pdbs/assertion.go new file mode 100644 index 0000000..248bba3 --- /dev/null +++ b/internal/pdbs/assertion.go @@ -0,0 +1,40 @@ +package pdbs + +import ( + "sigs.k8s.io/e2e-framework/pkg/features" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" +) + +type PDBAssertion struct { + assertion.Assertion +} + +func (pa PDBAssertion) clone() PDBAssertion { + return PDBAssertion{ + Assertion: assertion.Clone(pa.Assertion), + } +} + +// Exists asserts that exactly one PodDisruptionBudget exists in the cluster that matches the provided options. +func (pa PDBAssertion) Exists() PDBAssertion { + stepFn := helpers.AsStepFunc(pa, exist(), 1, helpers.IntCompareFuncEqualTo, nil) + + res := pa.clone() + res.SetBuilder(res.GetBuilder().Assess("exists", stepFn)) + + return res +} + +// NewPDBAssertion creates a new PDBAssertion with the provided options. +func NewPDBAssertion(opts ...assertion.Option) PDBAssertion { + return PDBAssertion{ + Assertion: assertion.NewAssertion( + append( + []assertion.Option{assertion.WithBuilder(features.New("PDB").WithLabel("type", "poddisruptionbudget"))}, + opts..., + )..., + ), + } +} diff --git a/internal/pdbs/pdbs.go b/internal/pdbs/pdbs.go index 93f997d..eca29ec 100644 --- a/internal/pdbs/pdbs.go +++ b/internal/pdbs/pdbs.go @@ -1,40 +1,31 @@ +// pdbs contains assertions for Kubernetes PodDisruptionBudgets. package pdbs import ( "context" - "testing" "github.com/stretchr/testify/require" policyv1 "k8s.io/api/policy/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/e2e-framework/pkg/envconf" - "sigs.k8s.io/e2e-framework/pkg/features" "github.com/DWSR/kubeassert-go/internal/assertion" helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" ) -type PDBAssertion struct { - assertion.Assertion -} - -func (pa PDBAssertion) clone() PDBAssertion { - return PDBAssertion{ - Assertion: assertion.Clone(pa.Assertion), - } -} - -func (pa PDBAssertion) getPDBs( +func getPDBs( ctx context.Context, t require.TestingT, cfg *envconf.Config, + listOpts metav1.ListOptions, ) (policyv1.PodDisruptionBudgetList, error) { client := helpers.DynamicClientFromEnvconf(t, cfg) var pdbList policyv1.PodDisruptionBudgetList list, err := client.Resource(policyv1.SchemeGroupVersion.WithResource("poddisruptionbudgets")). - List(ctx, pa.ListOptions(cfg)) + List(ctx, listOpts) if err != nil { return pdbList, err } @@ -47,34 +38,19 @@ func (pa PDBAssertion) getPDBs( return pdbList, nil } -func (pa PDBAssertion) Exists() PDBAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, pa.GetRequireT()) - - conditionFunc := func(ctx context.Context) (bool, error) { - pdbs, err := pa.getPDBs(ctx, t, cfg) +func exist() helpers.ConditionFuncFactory { + return func( + t require.TestingT, + assert assertion.Assertion, + cfg *envconf.Config, + count int, + itemCountFn, _ helpers.IntCompareFunc, + ) helpers.ConditionFunc { + return func(ctx context.Context) (bool, error) { + pdbs, err := getPDBs(ctx, t, cfg, assert.ListOptions(cfg)) require.NoError(t, err) - return len(pdbs.Items) == 1, nil + return itemCountFn(len(pdbs.Items), count), nil } - - require.NoError(t, helpers.WaitForCondition(ctx, pa, conditionFunc)) - - return ctx - } - res := pa.clone() - res.SetBuilder(res.GetBuilder().Assess("exists", stepFn)) - - return res -} - -func NewPDBAssertion(opts ...assertion.Option) PDBAssertion { - return PDBAssertion{ - Assertion: assertion.NewAssertion( - append( - []assertion.Option{assertion.WithBuilder(features.New("CRD").WithLabel("type", "poddisruptionbudget"))}, - opts..., - )..., - ), } } From 761265a33666078604c9d668dda0ad338f99364a Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Wed, 12 Mar 2025 23:54:19 -0400 Subject: [PATCH 22/29] wip --- internal/deployments/assertion.go | 307 ++++++++++++++ internal/deployments/deployments.go | 622 +++++----------------------- 2 files changed, 417 insertions(+), 512 deletions(-) create mode 100644 internal/deployments/assertion.go diff --git a/internal/deployments/assertion.go b/internal/deployments/assertion.go new file mode 100644 index 0000000..9a3d1cf --- /dev/null +++ b/internal/deployments/assertion.go @@ -0,0 +1,307 @@ +package deployments + +import ( + "context" + + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" +) + +// DeploymentAssertion is a wrapper around assertion.Assertion that provides a set of assertion functions for +// Deployments. +type DeploymentAssertion struct { + assertion.Assertion +} + +func (da DeploymentAssertion) clone() DeploymentAssertion { + return DeploymentAssertion{ + Assertion: assertion.Clone(da.Assertion), + } +} + +// Exists asserts that exactly one Deployment exists in the cluster that matches the provided options. +func (da DeploymentAssertion) Exists() DeploymentAssertion { + return da.ExactlyNExist(1) +} + +// ExactlyNExist asserts that exactly N Deployments exist in the cluster that match the provided options. +func (da DeploymentAssertion) ExactlyNExist(count int) DeploymentAssertion { + stepFn := helpers.AsStepFunc(da, exist(), count, helpers.IntCompareFuncEqualTo, nil) + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNExist", stepFn)) + + return res +} + +// AtLeastNExist asserts that at least N Deployments exist in the cluster that match the provided options. +func (da DeploymentAssertion) AtLeastNExist(count int) DeploymentAssertion { + stepFn := helpers.AsStepFunc(da, exist(), count, helpers.IntCompareFuncGreaterThanOrEqualTo, nil) + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNExist", stepFn)) + + return res +} + +// IsAvailable asserts that exactly one Deployment is available in the cluster that matches the provided options. +func (da DeploymentAssertion) IsAvailable() DeploymentAssertion { + return da.ExactlyNAreAvailable(1) +} + +// ExactlyNAreAvailable asserts that exactly N Deployments are available in the cluster that match the provided options. +func (da DeploymentAssertion) ExactlyNAreAvailable(count int) DeploymentAssertion { + stepFn := helpers.AsStepFunc( + da, + areAvailable(), + count, + helpers.IntCompareFuncNotEqualTo, + helpers.IntCompareFuncEqualTo, + ) + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNAreAvailable", stepFn)) + + return res +} + +// AtLeastNAreAvailable asserts that at least N Deployments are available in the cluster that match the provided options. +func (da DeploymentAssertion) AtLeastNAreAvailable(count int) DeploymentAssertion { + stepFn := helpers.AsStepFunc( + da, + areAvailable(), + count, + helpers.IntCompareFuncLessThan, + helpers.IntCompareFuncGreaterThanOrEqualTo, + ) + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNAreAvailable", stepFn)) + + return res +} + +// IsSystemClusterCritical asserts that exactly one Deployment is system cluster critical in the cluster that matches the +// provided options. +func (da DeploymentAssertion) IsSystemClusterCritical() DeploymentAssertion { + return da.ExactlyNAreSystemClusterCritical(1) +} + +// ExactlyNAreSystemClusterCritical asserts that exactly N Deployments are system cluster critical in the cluster that +// match the provided options. +func (da DeploymentAssertion) ExactlyNAreSystemClusterCritical(count int) DeploymentAssertion { + stepFn := helpers.AsStepFunc( + da, + areSystemClusterCritical(), + count, + helpers.IntCompareFuncNotEqualTo, + helpers.IntCompareFuncEqualTo, + ) + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNAreSystemClusterCritical", stepFn)) + + return res +} + +func (da DeploymentAssertion) AtLeastNAreSystemClusterCritical(count int) DeploymentAssertion { + stepFn := helpers.AsStepFunc( + da, + areSystemClusterCritical(), + count, + helpers.IntCompareFuncLessThan, + helpers.IntCompareFuncGreaterThanOrEqualTo, + ) + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNAreSystemClusterCritical", stepFn)) + + return res +} + +func (da DeploymentAssertion) HasNoCPULimits() DeploymentAssertion { + return da.ExactlyNHaveNoCPULimits(1) +} + +func (da DeploymentAssertion) ExactlyNHaveNoCPULimits(count int) DeploymentAssertion { + stepFn := helpers.AsStepFunc( + da, + haveNoCPULimits(), + count, + helpers.IntCompareFuncNotEqualTo, + helpers.IntCompareFuncEqualTo, + ) + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNHaveNoCPULimits", stepFn)) + + return res +} + +func (da DeploymentAssertion) AtLeastNHaveNoCPULimits(count int) DeploymentAssertion { + stepFn := helpers.AsStepFunc( + da, + haveNoCPULimits(), + count, + helpers.IntCompareFuncLessThan, + helpers.IntCompareFuncGreaterThanOrEqualTo, + ) + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNHaveNoCPULimits", stepFn)) + + return res +} + +func (da DeploymentAssertion) HasMemoryLimitsEqualToRequests() DeploymentAssertion { + return da.ExactlyNHaveMemoryLimitsEqualToRequests(1) +} + +func (da DeploymentAssertion) ExactlyNHaveMemoryLimitsEqualToRequests(count int) DeploymentAssertion { + stepFn := helpers.AsStepFunc( + da, + haveMemoryLimitsEqualToRequests(), + count, + helpers.IntCompareFuncNotEqualTo, + helpers.IntCompareFuncEqualTo, + ) + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNHaveMemoryLimitsEqualToRequests", stepFn)) + + return res +} + +func (da DeploymentAssertion) AtLeastNHaveMemoryLimitsEqualToRequests(count int) DeploymentAssertion { + stepFn := helpers.AsStepFunc( + da, + haveMemoryLimitsEqualToRequests(), + count, + helpers.IntCompareFuncLessThan, + helpers.IntCompareFuncGreaterThanOrEqualTo, + ) + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNHaveMemoryLimitsEqualToRequests", stepFn)) + + return res +} + +func (da DeploymentAssertion) HasMemoryLimits() DeploymentAssertion { + return da.ExactlyNHaveMemoryLimits(1) +} + +func (da DeploymentAssertion) ExactlyNHaveMemoryLimits(count int) DeploymentAssertion { + stepFn := helpers.AsStepFunc( + da, + haveMemoryLimits(), + count, + helpers.IntCompareFuncNotEqualTo, + helpers.IntCompareFuncEqualTo, + ) + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNHaveMemoryLimits", stepFn)) + + return res +} + +func (da DeploymentAssertion) AtLeastNHaveMemoryLimits(count int) DeploymentAssertion { + stepFn := helpers.AsStepFunc( + da, + haveMemoryLimits(), + count, + helpers.IntCompareFuncLessThan, + helpers.IntCompareFuncGreaterThanOrEqualTo, + ) + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNHaveMemoryLimits", stepFn)) + + return res +} + +func (da DeploymentAssertion) HasMemoryRequests() DeploymentAssertion { + return da.ExactlyNHaveMemoryRequests(1) +} + +func (da DeploymentAssertion) ExactlyNHaveMemoryRequests(count int) DeploymentAssertion { + stepFn := helpers.AsStepFunc( + da, + haveMemoryRequests(), + count, + helpers.IntCompareFuncNotEqualTo, + helpers.IntCompareFuncEqualTo, + ) + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNHaveMemoryRequests", stepFn)) + + return res +} + +func (da DeploymentAssertion) AtLeastNHaveMemoryRequests(count int) DeploymentAssertion { + stepFn := helpers.AsStepFunc( + da, + haveMemoryRequests(), + count, + helpers.IntCompareFuncLessThan, + helpers.IntCompareFuncGreaterThanOrEqualTo, + ) + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNHaveMemoryRequests", stepFn)) + + return res +} + +func (da DeploymentAssertion) HasCPURequests() DeploymentAssertion { + return da.ExactlyNHaveCPURequests(1) +} + +func (da DeploymentAssertion) ExactlyNHaveCPURequests(count int) DeploymentAssertion { + stepFn := helpers.AsStepFunc( + da, + haveCPURequests(), + count, + helpers.IntCompareFuncNotEqualTo, + helpers.IntCompareFuncEqualTo, + ) + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("exactlyNHaveCPURequests", stepFn)) + + return res +} + +func (da DeploymentAssertion) AtLeastNHaveCPURequests(count int) DeploymentAssertion { + stepFn := helpers.AsStepFunc( + da, + haveCPURequests(), + count, + helpers.IntCompareFuncLessThan, + helpers.IntCompareFuncGreaterThanOrEqualTo, + ) + + res := da.clone() + res.SetBuilder(res.GetBuilder().Assess("atLeastNHaveCPURequests", stepFn)) + + return res +} + +func NewDeploymentAssertion(opts ...assertion.Option) DeploymentAssertion { + return DeploymentAssertion{ + Assertion: assertion.NewAssertion( + append( + []assertion.Option{assertion.WithBuilder(features.New("Deployment").WithLabel("type", "deployment"))}, + opts..., + )..., + ), + } +} diff --git a/internal/deployments/deployments.go b/internal/deployments/deployments.go index 6c9a566..da9c406 100644 --- a/internal/deployments/deployments.go +++ b/internal/deployments/deployments.go @@ -2,84 +2,32 @@ package deployments import ( "context" - "testing" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/e2e-framework/pkg/envconf" - "sigs.k8s.io/e2e-framework/pkg/features" "github.com/DWSR/kubeassert-go/internal/assertion" helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" ) -type DeploymentAssertion struct { - assertion.Assertion -} - -func (da DeploymentAssertion) clone() DeploymentAssertion { - return DeploymentAssertion{ - Assertion: assertion.Clone(da.Assertion), - } -} - -func (da DeploymentAssertion) Exists() DeploymentAssertion { - return da.ExactlyNExist(1) -} - -func (da DeploymentAssertion) ExactlyNExist(count int) DeploymentAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) - conditionFunc := func(ctx context.Context) (bool, error) { - deploys, err := da.getDeployments(ctx, t, cfg) - require.NoError(t, err) - - return len(deploys.Items) == count, nil - } - - require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) - - return ctx - } - - res := da.clone() - res.SetBuilder(res.GetBuilder().Assess("exactlyNExist", fn)) - - return res -} - -func (da DeploymentAssertion) AtLeastNExist(count int) DeploymentAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) - conditionFunc := func(ctx context.Context) (bool, error) { - deploys, err := da.getDeployments(ctx, t, cfg) - require.NoError(t, err) - - return len(deploys.Items) >= count, nil - } - - require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) - - return ctx - } - - res := da.clone() - res.SetBuilder(res.GetBuilder().Assess("atLeastNExist", fn)) - - return res -} - -func (da DeploymentAssertion) getDeployments(ctx context.Context, t require.TestingT, cfg *envconf.Config) (appsv1.DeploymentList, error) { +func getDeployments( + ctx context.Context, + t require.TestingT, + cfg *envconf.Config, + listOpts metav1.ListOptions, +) (appsv1.DeploymentList, error) { client := helpers.DynamicClientFromEnvconf(t, cfg) var deploys appsv1.DeploymentList list, err := client. Resource(appsv1.SchemeGroupVersion.WithResource("deployments")). - List(ctx, da.ListOptions(cfg)) + List(ctx, listOpts) if err != nil { return deploys, err } @@ -92,61 +40,42 @@ func (da DeploymentAssertion) getDeployments(ctx context.Context, t require.Test return deploys, nil } -func (da DeploymentAssertion) IsAvailable() DeploymentAssertion { - return da.ExactlyNAreAvailable(1) -} - -func (da DeploymentAssertion) ExactlyNAreAvailable(count int) DeploymentAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) - - conditionFunc := func(ctx context.Context) (bool, error) { - deploys, err := da.getDeployments(ctx, t, cfg) +func exist() helpers.ConditionFuncFactory { + return func( + t require.TestingT, + assert assertion.Assertion, + cfg *envconf.Config, + count int, + itemCountFn, _ helpers.IntCompareFunc, + ) helpers.ConditionFunc { + return func(ctx context.Context) (bool, error) { + deployments, err := getDeployments(ctx, t, cfg, assert.ListOptions(cfg)) require.NoError(t, err) - if len(deploys.Items) != count { - return false, nil - } - - availableCount := 0 - - for _, deploy := range deploys.Items { - for _, condition := range deploy.Status.Conditions { - if condition.Type == appsv1.DeploymentAvailable && condition.Status == corev1.ConditionTrue { - availableCount += 1 - } - } - } - - return availableCount == count, nil + return itemCountFn(len(deployments.Items), count), nil } - - require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) - - return ctx } - - res := da.clone() - res.SetBuilder(res.GetBuilder().Assess("exactlyNAreAvailable", stepFn)) - - return res } -func (da DeploymentAssertion) AtLeastNAreAvailable(count int) DeploymentAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) - - conditionFunc := func(ctx context.Context) (bool, error) { - deploys, err := da.getDeployments(ctx, t, cfg) +func areAvailable() helpers.ConditionFuncFactory { + return func( + t require.TestingT, + assert assertion.Assertion, + cfg *envconf.Config, + count int, + itemCountFn, resultFn helpers.IntCompareFunc, + ) helpers.ConditionFunc { + return func(ctx context.Context) (bool, error) { + deployments, err := getDeployments(ctx, t, cfg, assert.ListOptions(cfg)) require.NoError(t, err) - if len(deploys.Items) < count { + if itemCountFn(len(deployments.Items), count) { return false, nil } availableCount := 0 - for _, deploy := range deploys.Items { + for _, deploy := range deployments.Items { for _, condition := range deploy.Status.Conditions { if condition.Type == appsv1.DeploymentAvailable && condition.Status == corev1.ConditionTrue { availableCount++ @@ -154,155 +83,59 @@ func (da DeploymentAssertion) AtLeastNAreAvailable(count int) DeploymentAssertio } } - return availableCount >= count, nil + return resultFn(availableCount, count), nil } - - require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) - - return ctx } - - res := da.clone() - res.SetBuilder(res.GetBuilder().Assess("atLeastNAreAvailable", stepFn)) - - return res -} - -func (da DeploymentAssertion) IsSystemClusterCritical() DeploymentAssertion { - return da.ExactlyNAreSystemClusterCritical(1) } -func (da DeploymentAssertion) ExactlyNAreSystemClusterCritical(count int) DeploymentAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) - - conditionFunc := func(ctx context.Context) (bool, error) { - deploys, err := da.getDeployments(ctx, t, cfg) +func areSystemClusterCritical() helpers.ConditionFuncFactory { + return func( + t require.TestingT, + assert assertion.Assertion, + cfg *envconf.Config, + count int, + itemCountFn, resultFn helpers.IntCompareFunc, + ) helpers.ConditionFunc { + return func(ctx context.Context) (bool, error) { + deployments, err := getDeployments(ctx, t, cfg, assert.ListOptions(cfg)) require.NoError(t, err) - if len(deploys.Items) < count { + if itemCountFn(len(deployments.Items), count) { return false, nil } systemClusterCriticalCount := 0 - for _, deploy := range deploys.Items { + for _, deploy := range deployments.Items { if deploy.Spec.Template.Spec.PriorityClassName == "system-cluster-critical" { systemClusterCriticalCount++ } } - return systemClusterCriticalCount == count, nil + return resultFn(systemClusterCriticalCount, count), nil } - - require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) - - return ctx } - - res := da.clone() - res.SetBuilder(res.GetBuilder().Assess("exactlyNAreSystemClusterCritical", stepFn)) - - return res } -func (da DeploymentAssertion) AtLeastNAreSystemClusterCritical(count int) DeploymentAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) - - conditionFunc := func(ctx context.Context) (bool, error) { - deploys, err := da.getDeployments(ctx, t, cfg) +func haveNoCPULimits() helpers.ConditionFuncFactory { + return func( + t require.TestingT, + assert assertion.Assertion, + cfg *envconf.Config, + count int, + itemCountFn, resultFn helpers.IntCompareFunc, + ) helpers.ConditionFunc { + return func(ctx context.Context) (bool, error) { + deployments, err := getDeployments(ctx, t, cfg, assert.ListOptions(cfg)) require.NoError(t, err) - if len(deploys.Items) < count { - return false, nil - } - - systemClusterCriticalCount := 0 - - for _, deploy := range deploys.Items { - if deploy.Spec.Template.Spec.PriorityClassName == "system-cluster-critical" { - systemClusterCriticalCount++ - } - } - - return systemClusterCriticalCount >= count, nil - } - - require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) - - return ctx - } - - res := da.clone() - res.SetBuilder(res.GetBuilder().Assess("atLeastNAreSystemClusterCritical", stepFn)) - - return res -} - -func (da DeploymentAssertion) HasNoCPULimits() DeploymentAssertion { - return da.ExactlyNHaveNoCPULimits(1) -} - -func (da DeploymentAssertion) ExactlyNHaveNoCPULimits(count int) DeploymentAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) - - conditionFunc := func(ctx context.Context) (bool, error) { - deploys, err := da.getDeployments(ctx, t, cfg) - require.NoError(t, err) - - if len(deploys.Items) < count { - return false, nil - } - - hasNoCPULimits := 0 - - for _, deploy := range deploys.Items { - allContainersHaveNoCPULimits := true - - for _, container := range deploy.Spec.Template.Spec.Containers { - if !container.Resources.Limits.Cpu().IsZero() { - allContainersHaveNoCPULimits = false - - break - } - } - - if allContainersHaveNoCPULimits { - hasNoCPULimits++ - } - } - - return hasNoCPULimits == count, nil - } - - require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) - - return ctx - } - - res := da.clone() - res.SetBuilder(res.GetBuilder().Assess("exactlyNHaveNoCPULimits", stepFn)) - - return res -} - -func (da DeploymentAssertion) AtLeastNHaveNoCPULimits(count int) DeploymentAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) - - conditionFunc := func(ctx context.Context) (bool, error) { - deploys, err := da.getDeployments(ctx, t, cfg) - require.NoError(t, err) - - if len(deploys.Items) < count { + if itemCountFn(len(deployments.Items), count) { return false, nil } hasNoCPULimits := 0 - for _, deploy := range deploys.Items { + for _, deploy := range deployments.Items { allContainersHaveNoCPULimits := true for _, container := range deploy.Spec.Template.Spec.Containers { @@ -318,86 +151,30 @@ func (da DeploymentAssertion) AtLeastNHaveNoCPULimits(count int) DeploymentAsser } } - return hasNoCPULimits >= count, nil - } - - require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) - - return ctx - } - - res := da.clone() - res.SetBuilder(res.GetBuilder().Assess("atLeastNHaveNoCPULimits", stepFn)) - - return res -} - -func (da DeploymentAssertion) HasMemoryLimitsEqualToRequests() DeploymentAssertion { - return da.ExactlyNHaveMemoryLimitsEqualToRequests(1) -} - -func (da DeploymentAssertion) ExactlyNHaveMemoryLimitsEqualToRequests(count int) DeploymentAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) - - conditionFunc := func(ctx context.Context) (bool, error) { - deploys, err := da.getDeployments(ctx, t, cfg) - require.NoError(t, err) - - if len(deploys.Items) < count { - return false, nil - } - - hasMemoryLimitsEqualToRequests := 0 - - for _, deploy := range deploys.Items { - allContainersHaveMemoryLimitsEqualToRequests := true - - for _, container := range deploy.Spec.Template.Spec.Containers { - memoryRequests := container.Resources.Requests.Memory() - memoryLimits := container.Resources.Limits.Memory() - - if !cmp.Equal(memoryLimits, memoryRequests) { - allContainersHaveMemoryLimitsEqualToRequests = false - - break - } - } - - if allContainersHaveMemoryLimitsEqualToRequests { - hasMemoryLimitsEqualToRequests++ - } - } - - return hasMemoryLimitsEqualToRequests == count, nil + return resultFn(hasNoCPULimits, count), nil } - - require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) - - return ctx } - - res := da.clone() - res.SetBuilder(res.GetBuilder().Assess("exactlyNHaveMemoryLimitsEqualToRequests", stepFn)) - - return res } -func (da DeploymentAssertion) AtLeastNHaveMemoryLimitsEqualToRequests(count int) DeploymentAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) - - conditionFunc := func(ctx context.Context) (bool, error) { - deploys, err := da.getDeployments(ctx, t, cfg) +func haveMemoryLimitsEqualToRequests() helpers.ConditionFuncFactory { + return func( + t require.TestingT, + assert assertion.Assertion, + cfg *envconf.Config, + count int, + itemCountFn, resultFn helpers.IntCompareFunc, + ) helpers.ConditionFunc { + return func(ctx context.Context) (bool, error) { + deployments, err := getDeployments(ctx, t, cfg, assert.ListOptions(cfg)) require.NoError(t, err) - if len(deploys.Items) < count { + if itemCountFn(len(deployments.Items), count) { return false, nil } hasMemoryLimitsEqualToRequests := 0 - for _, deploy := range deploys.Items { + for _, deploy := range deployments.Items { allContainersHaveMemoryLimitsEqualToRequests := true for _, container := range deploy.Spec.Template.Spec.Containers { @@ -416,39 +193,30 @@ func (da DeploymentAssertion) AtLeastNHaveMemoryLimitsEqualToRequests(count int) } } - return hasMemoryLimitsEqualToRequests >= count, nil + return resultFn(hasMemoryLimitsEqualToRequests, count), nil } - - require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) - - return ctx } - - res := da.clone() - res.SetBuilder(res.GetBuilder().Assess("atLeastNHaveMemoryLimitsEqualToRequests", stepFn)) - - return res } -func (da DeploymentAssertion) HasMemoryLimits() DeploymentAssertion { - return da.ExactlyNHaveMemoryLimits(1) -} - -func (da DeploymentAssertion) ExactlyNHaveMemoryLimits(count int) DeploymentAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) - - conditionFunc := func(ctx context.Context) (bool, error) { - deploys, err := da.getDeployments(ctx, t, cfg) +func haveMemoryLimits() helpers.ConditionFuncFactory { + return func( + t require.TestingT, + assert assertion.Assertion, + cfg *envconf.Config, + count int, + itemCountFn, resultFn helpers.IntCompareFunc, + ) helpers.ConditionFunc { + return func(ctx context.Context) (bool, error) { + deployments, err := getDeployments(ctx, t, cfg, assert.ListOptions(cfg)) require.NoError(t, err) - if len(deploys.Items) < count { + if itemCountFn(len(deployments.Items), count) { return false, nil } hasMemoryLimits := 0 - for _, deploy := range deploys.Items { + for _, deploy := range deployments.Items { allContainersHaveMemoryLimits := true for _, container := range deploy.Spec.Template.Spec.Containers { @@ -464,83 +232,30 @@ func (da DeploymentAssertion) ExactlyNHaveMemoryLimits(count int) DeploymentAsse } } - return hasMemoryLimits == count, nil + return resultFn(hasMemoryLimits, count), nil } - - require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) - - return ctx } - - res := da.clone() - res.SetBuilder(res.GetBuilder().Assess("exactlyNHaveMemoryLimits", stepFn)) - - return res } -func (da DeploymentAssertion) AtLeastNHaveMemoryLimits(count int) DeploymentAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) - - conditionFunc := func(ctx context.Context) (bool, error) { - deploys, err := da.getDeployments(ctx, t, cfg) +func haveMemoryRequests() helpers.ConditionFuncFactory { + return func( + t require.TestingT, + assert assertion.Assertion, + cfg *envconf.Config, + count int, + itemCountFn, resultFn helpers.IntCompareFunc, + ) helpers.ConditionFunc { + return func(ctx context.Context) (bool, error) { + deployments, err := getDeployments(ctx, t, cfg, assert.ListOptions(cfg)) require.NoError(t, err) - if len(deploys.Items) < count { - return false, nil - } - - hasMemoryLimits := 0 - - for _, deploy := range deploys.Items { - allContainersHaveMemoryLimits := true - - for _, container := range deploy.Spec.Template.Spec.Containers { - if container.Resources.Limits.Memory().IsZero() { - allContainersHaveMemoryLimits = false - - break - } - } - - if allContainersHaveMemoryLimits { - hasMemoryLimits++ - } - } - - return hasMemoryLimits >= count, nil - } - - require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) - - return ctx - } - - res := da.clone() - res.SetBuilder(res.GetBuilder().Assess("atLeastNHaveMemoryLimits", stepFn)) - - return res -} - -func (da DeploymentAssertion) HasMemoryRequests() DeploymentAssertion { - return da.ExactlyNHaveMemoryRequests(1) -} - -func (da DeploymentAssertion) ExactlyNHaveMemoryRequests(count int) DeploymentAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) - - conditionFunc := func(ctx context.Context) (bool, error) { - deploys, err := da.getDeployments(ctx, t, cfg) - require.NoError(t, err) - - if len(deploys.Items) < count { + if itemCountFn(len(deployments.Items), count) { return false, nil } hasMemoryRequests := 0 - for _, deploy := range deploys.Items { + for _, deploy := range deployments.Items { allContainersHaveMemoryRequests := true for _, container := range deploy.Spec.Template.Spec.Containers { @@ -556,127 +271,30 @@ func (da DeploymentAssertion) ExactlyNHaveMemoryRequests(count int) DeploymentAs } } - return hasMemoryRequests == count, nil + return resultFn(hasMemoryRequests, count), nil } - - require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) - - return ctx } - - res := da.clone() - res.SetBuilder(res.GetBuilder().Assess("exactlyNHaveMemoryRequests", stepFn)) - - return res } -func (da DeploymentAssertion) AtLeastNHaveMemoryRequests(count int) DeploymentAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) - - conditionFunc := func(ctx context.Context) (bool, error) { - deploys, err := da.getDeployments(ctx, t, cfg) +func haveCPURequests() helpers.ConditionFuncFactory { + return func( + t require.TestingT, + assert assertion.Assertion, + cfg *envconf.Config, + count int, + itemCountFn, resultFn helpers.IntCompareFunc, + ) helpers.ConditionFunc { + return func(ctx context.Context) (bool, error) { + deployments, err := getDeployments(ctx, t, cfg, assert.ListOptions(cfg)) require.NoError(t, err) - if len(deploys.Items) < count { - return false, nil - } - - hasMemoryRequests := 0 - - for _, deploy := range deploys.Items { - allContainersHaveMemoryRequests := true - - for _, container := range deploy.Spec.Template.Spec.Containers { - if container.Resources.Requests.Memory().IsZero() { - allContainersHaveMemoryRequests = false - - break - } - } - - if allContainersHaveMemoryRequests { - hasMemoryRequests++ - } - } - - return hasMemoryRequests >= count, nil - } - - require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) - - return ctx - } - - res := da.clone() - res.SetBuilder(res.GetBuilder().Assess("atLeastNHaveMemoryRequests", stepFn)) - - return res -} - -func (da DeploymentAssertion) HasCPURequests() DeploymentAssertion { - return da.ExactlyNHaveCPURequests(1) -} - -func (da DeploymentAssertion) ExactlyNHaveCPURequests(count int) DeploymentAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) - - conditionFunc := func(ctx context.Context) (bool, error) { - deploys, err := da.getDeployments(ctx, t, cfg) - require.NoError(t, err) - - if len(deploys.Items) < count { - return false, nil - } - - hasCPURequests := 0 - - for _, deploy := range deploys.Items { - allContainersHaveCPURequests := true - - for _, container := range deploy.Spec.Template.Spec.Containers { - if container.Resources.Requests.Cpu().IsZero() { - allContainersHaveCPURequests = false - - break - } - } - - if allContainersHaveCPURequests { - hasCPURequests++ - } - } - - return hasCPURequests == count, nil - } - - require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) - - return ctx - } - - res := da.clone() - res.SetBuilder(res.GetBuilder().Assess("exactlyNHaveCPURequests", stepFn)) - - return res -} - -func (da DeploymentAssertion) AtLeastNHaveCPURequests(count int) DeploymentAssertion { - stepFn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, da.GetRequireT()) - - conditionFunc := func(ctx context.Context) (bool, error) { - deploys, err := da.getDeployments(ctx, t, cfg) - require.NoError(t, err) - - if len(deploys.Items) < count { + if itemCountFn(len(deployments.Items), count) { return false, nil } hasCPURequests := 0 - for _, deploy := range deploys.Items { + for _, deploy := range deployments.Items { allContainersHaveCPURequests := true for _, container := range deploy.Spec.Template.Spec.Containers { @@ -692,27 +310,7 @@ func (da DeploymentAssertion) AtLeastNHaveCPURequests(count int) DeploymentAsser } } - return hasCPURequests >= count, nil + return resultFn(hasCPURequests, count), nil } - - require.NoError(t, helpers.WaitForCondition(ctx, da, conditionFunc)) - - return ctx - } - - res := da.clone() - res.SetBuilder(res.GetBuilder().Assess("atLeastNHaveCPURequests", stepFn)) - - return res -} - -func NewDeploymentAssertion(opts ...assertion.Option) DeploymentAssertion { - return DeploymentAssertion{ - Assertion: assertion.NewAssertion( - append( - []assertion.Option{assertion.WithBuilder(features.New("Deployment").WithLabel("type", "deployment"))}, - opts..., - )..., - ), } } From c273eb94c6e2fc9db4b565245c4440a2a81d249e Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Wed, 12 Mar 2025 23:54:46 -0400 Subject: [PATCH 23/29] wip --- internal/deployments/assertion.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/internal/deployments/assertion.go b/internal/deployments/assertion.go index 9a3d1cf..17377a7 100644 --- a/internal/deployments/assertion.go +++ b/internal/deployments/assertion.go @@ -1,12 +1,6 @@ package deployments import ( - "context" - - "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/e2e-framework/pkg/envconf" "sigs.k8s.io/e2e-framework/pkg/features" "github.com/DWSR/kubeassert-go/internal/assertion" From cb4f8339ff752e8ad42b101f4659a29d6184f10b Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Thu, 13 Mar 2025 00:01:44 -0400 Subject: [PATCH 24/29] wip --- internal/deployments/assertion.go | 37 ++++++++++++++++++++++++++--- internal/deployments/deployments.go | 1 + 2 files changed, 35 insertions(+), 3 deletions(-) diff --git a/internal/deployments/assertion.go b/internal/deployments/assertion.go index 17377a7..d802a33 100644 --- a/internal/deployments/assertion.go +++ b/internal/deployments/assertion.go @@ -65,7 +65,8 @@ func (da DeploymentAssertion) ExactlyNAreAvailable(count int) DeploymentAssertio return res } -// AtLeastNAreAvailable asserts that at least N Deployments are available in the cluster that match the provided options. +// AtLeastNAreAvailable asserts that at least N Deployments are available in the cluster that match the provided +// options. func (da DeploymentAssertion) AtLeastNAreAvailable(count int) DeploymentAssertion { stepFn := helpers.AsStepFunc( da, @@ -81,8 +82,8 @@ func (da DeploymentAssertion) AtLeastNAreAvailable(count int) DeploymentAssertio return res } -// IsSystemClusterCritical asserts that exactly one Deployment is system cluster critical in the cluster that matches the -// provided options. +// IsSystemClusterCritical asserts that exactly one Deployment is system cluster critical in the cluster that matches +// the provided options. func (da DeploymentAssertion) IsSystemClusterCritical() DeploymentAssertion { return da.ExactlyNAreSystemClusterCritical(1) } @@ -104,6 +105,8 @@ func (da DeploymentAssertion) ExactlyNAreSystemClusterCritical(count int) Deploy return res } +// AtLeastNAreSystemClusterCritical asserts that at least N Deployments are system cluster critical in the cluster that +// match the provided options. func (da DeploymentAssertion) AtLeastNAreSystemClusterCritical(count int) DeploymentAssertion { stepFn := helpers.AsStepFunc( da, @@ -119,10 +122,13 @@ func (da DeploymentAssertion) AtLeastNAreSystemClusterCritical(count int) Deploy return res } +// HasNoCPULimits asserts that exactly one Deployment has no CPU limits in the cluster that match the provided options. func (da DeploymentAssertion) HasNoCPULimits() DeploymentAssertion { return da.ExactlyNHaveNoCPULimits(1) } +// ExactlyNHaveNoCPULimits asserts that exactly N Deployments have no CPU limits in the cluster that match the provided +// options. func (da DeploymentAssertion) ExactlyNHaveNoCPULimits(count int) DeploymentAssertion { stepFn := helpers.AsStepFunc( da, @@ -138,6 +144,8 @@ func (da DeploymentAssertion) ExactlyNHaveNoCPULimits(count int) DeploymentAsser return res } +// AtLeastNHaveNoCPULimits asserts that at least N Deployments have no CPU limits in the cluster that match the provided +// options. func (da DeploymentAssertion) AtLeastNHaveNoCPULimits(count int) DeploymentAssertion { stepFn := helpers.AsStepFunc( da, @@ -153,10 +161,14 @@ func (da DeploymentAssertion) AtLeastNHaveNoCPULimits(count int) DeploymentAsser return res } +// HasMemoryLimitsEqualToRequests asserts that exactly one Deployment has memory limits set equal to requests in the +// cluster that match the provided options. func (da DeploymentAssertion) HasMemoryLimitsEqualToRequests() DeploymentAssertion { return da.ExactlyNHaveMemoryLimitsEqualToRequests(1) } +// ExactlyNHaveMemoryLimitsEqualToRequests asserts that exactly N Deployments have memory limits set equal to requests +// in the cluster that match the provided options. func (da DeploymentAssertion) ExactlyNHaveMemoryLimitsEqualToRequests(count int) DeploymentAssertion { stepFn := helpers.AsStepFunc( da, @@ -172,6 +184,8 @@ func (da DeploymentAssertion) ExactlyNHaveMemoryLimitsEqualToRequests(count int) return res } +// AtLeastNHaveMemoryLimitsEqualToRequests asserts that at least N Deployments have memory limits set equal to requests +// in the cluster that match the provided options. func (da DeploymentAssertion) AtLeastNHaveMemoryLimitsEqualToRequests(count int) DeploymentAssertion { stepFn := helpers.AsStepFunc( da, @@ -187,10 +201,13 @@ func (da DeploymentAssertion) AtLeastNHaveMemoryLimitsEqualToRequests(count int) return res } +// HasMemoryLimits asserts that exactly one Deployment has memory limits in the cluster that match the provided options. func (da DeploymentAssertion) HasMemoryLimits() DeploymentAssertion { return da.ExactlyNHaveMemoryLimits(1) } +// ExactlyNHaveMemoryLimits asserts that exactly N Deployments have memory limits in the cluster that match the provided +// options. func (da DeploymentAssertion) ExactlyNHaveMemoryLimits(count int) DeploymentAssertion { stepFn := helpers.AsStepFunc( da, @@ -206,6 +223,8 @@ func (da DeploymentAssertion) ExactlyNHaveMemoryLimits(count int) DeploymentAsse return res } +// AtLeastNHaveMemoryLimits asserts that at least N Deployments have memory limits in the cluster that match the +// provided options. func (da DeploymentAssertion) AtLeastNHaveMemoryLimits(count int) DeploymentAssertion { stepFn := helpers.AsStepFunc( da, @@ -221,10 +240,14 @@ func (da DeploymentAssertion) AtLeastNHaveMemoryLimits(count int) DeploymentAsse return res } +// HasMemoryRequests asserts that exactly one Deployment has memory requests in the cluster that match the provided +// options. func (da DeploymentAssertion) HasMemoryRequests() DeploymentAssertion { return da.ExactlyNHaveMemoryRequests(1) } +// ExactlyNHaveMemoryRequests asserts that exactly N Deployments have memory requests in the cluster that match the +// provided options. func (da DeploymentAssertion) ExactlyNHaveMemoryRequests(count int) DeploymentAssertion { stepFn := helpers.AsStepFunc( da, @@ -240,6 +263,8 @@ func (da DeploymentAssertion) ExactlyNHaveMemoryRequests(count int) DeploymentAs return res } +// AtLeastNHaveMemoryRequests asserts that at least N Deployments have memory requests in the cluster that match the +// provided options. func (da DeploymentAssertion) AtLeastNHaveMemoryRequests(count int) DeploymentAssertion { stepFn := helpers.AsStepFunc( da, @@ -255,10 +280,13 @@ func (da DeploymentAssertion) AtLeastNHaveMemoryRequests(count int) DeploymentAs return res } +// HasCPURequests asserts that exactly one Deployment has CPU requests in the cluster that match the provided options. func (da DeploymentAssertion) HasCPURequests() DeploymentAssertion { return da.ExactlyNHaveCPURequests(1) } +// ExactlyNHaveCPURequests asserts that exactly N Deployments have CPU requests in the cluster that match the provided +// options. func (da DeploymentAssertion) ExactlyNHaveCPURequests(count int) DeploymentAssertion { stepFn := helpers.AsStepFunc( da, @@ -274,6 +302,8 @@ func (da DeploymentAssertion) ExactlyNHaveCPURequests(count int) DeploymentAsser return res } +// AtLeastNHaveCPURequests asserts that at least N Deployments have CPU requests in the cluster that match the provided +// options. func (da DeploymentAssertion) AtLeastNHaveCPURequests(count int) DeploymentAssertion { stepFn := helpers.AsStepFunc( da, @@ -289,6 +319,7 @@ func (da DeploymentAssertion) AtLeastNHaveCPURequests(count int) DeploymentAsser return res } +// NewDeploymentAssertion creates a new DeploymentAssertion with the provided options. func NewDeploymentAssertion(opts ...assertion.Option) DeploymentAssertion { return DeploymentAssertion{ Assertion: assertion.NewAssertion( diff --git a/internal/deployments/deployments.go b/internal/deployments/deployments.go index da9c406..e836023 100644 --- a/internal/deployments/deployments.go +++ b/internal/deployments/deployments.go @@ -1,3 +1,4 @@ +// deployments package contains assertions for Kubernetes Deployments. package deployments import ( From 9866875f9a7933dbf9979340a52ebc52d9848cd2 Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Thu, 13 Mar 2025 00:14:07 -0400 Subject: [PATCH 25/29] wip --- internal/crds/assertion.go | 100 +++++++++++++++++++++++++++++++++++++ internal/crds/crds.go | 99 +++++++++++++++--------------------- 2 files changed, 139 insertions(+), 60 deletions(-) create mode 100644 internal/crds/assertion.go diff --git a/internal/crds/assertion.go b/internal/crds/assertion.go new file mode 100644 index 0000000..befa33d --- /dev/null +++ b/internal/crds/assertion.go @@ -0,0 +1,100 @@ +package crds + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" + + "github.com/DWSR/kubeassert-go/internal/assertion" + helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" +) + +type CRDAssertion struct { + assertion.Assertion +} + +func (ca CRDAssertion) clone() CRDAssertion { + return CRDAssertion{ + Assertion: assertion.Clone(ca.Assertion), + } +} + +func (ca CRDAssertion) Exists() CRDAssertion { + stepFn := helpers.AsStepFunc(ca, exist(), 1, helpers.IntCompareFuncEqualTo, nil) + + res := ca.clone() + res.SetBuilder(res.GetBuilder().Assess("exists", stepFn)) + + return res +} + +func (ca CRDAssertion) getCRDs(ctx context.Context, t require.TestingT, cfg *envconf.Config) (extv1.CustomResourceDefinitionList, error) { + client := helpers.DynamicClientFromEnvconf(t, cfg) + + var crdList extv1.CustomResourceDefinitionList + + list, err := client. + Resource(extv1.SchemeGroupVersion.WithResource("customresourcedefinitions")). + List(ctx, ca.ListOptions(cfg)) + if err != nil { + return crdList, err + } + + err = runtime.DefaultUnstructuredConverter.FromUnstructured(list.UnstructuredContent(), &crdList) + if err != nil { + return crdList, err + } + + return crdList, nil +} + +func (ca CRDAssertion) HasVersion(crdVersion string) CRDAssertion { + fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { + t := helpers.RequireTIfNotNil(testingT, ca.GetRequireT()) + conditionFunc := func(ctx context.Context) (bool, error) { + crds, err := ca.getCRDs(ctx, t, cfg) + require.NoError(t, err) + + if len(crds.Items) != 1 { + return false, nil + } + + foundVersion := false + + for _, version := range crds.Items[0].Spec.Versions { + if version.Name == crdVersion { + foundVersion = true + + break + } + } + + return foundVersion, nil + } + + require.NoError(t, helpers.WaitForCondition(ctx, ca, conditionFunc)) + + return ctx + } + + res := ca.clone() + res.SetBuilder(res.GetBuilder().Assess("hasVersion", fn)) + + return res +} + +func NewCRDAssertion(opts ...assertion.Option) CRDAssertion { + return CRDAssertion{ + Assertion: assertion.NewAssertion( + append( + []assertion.Option{assertion.WithBuilder(features.New("CRD").WithLabel("type", "customresourcedefinition"))}, + opts..., + )..., + ), + } +} diff --git a/internal/crds/crds.go b/internal/crds/crds.go index dca376b..47948d5 100644 --- a/internal/crds/crds.go +++ b/internal/crds/crds.go @@ -5,7 +5,9 @@ import ( "testing" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/e2e-framework/pkg/envconf" "sigs.k8s.io/e2e-framework/pkg/features" @@ -14,45 +16,19 @@ import ( helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" ) -type CRDAssertion struct { - assertion.Assertion -} - -func (ca CRDAssertion) clone() CRDAssertion { - return CRDAssertion{ - Assertion: assertion.Clone(ca.Assertion), - } -} - -func (ca CRDAssertion) Exists() CRDAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, ca.GetRequireT()) - conditionFunc := func(ctx context.Context) (bool, error) { - pods, err := ca.getCRDs(ctx, t, cfg) - require.NoError(t, err) - - return len(pods.Items) == 1, nil - } - - require.NoError(t, helpers.WaitForCondition(ctx, ca, conditionFunc)) - - return ctx - } - - res := ca.clone() - res.SetBuilder(res.GetBuilder().Assess("exists", fn)) - - return res -} - -func (ca CRDAssertion) getCRDs(ctx context.Context, t require.TestingT, cfg *envconf.Config) (extv1.CustomResourceDefinitionList, error) { +func getCRDs( + ctx context.Context, + t require.TestingT, + cfg *envconf.Config, + listOpts metav1.ListOptions, +) (extv1.CustomResourceDefinitionList, error) { client := helpers.DynamicClientFromEnvconf(t, cfg) var crdList extv1.CustomResourceDefinitionList list, err := client. Resource(extv1.SchemeGroupVersion.WithResource("customresourcedefinitions")). - List(ctx, ca.ListOptions(cfg)) + List(ctx, listOpts) if err != nil { return crdList, err } @@ -65,47 +41,50 @@ func (ca CRDAssertion) getCRDs(ctx context.Context, t require.TestingT, cfg *env return crdList, nil } -func (ca CRDAssertion) HasVersion(crdVersion string) CRDAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, ca.GetRequireT()) - conditionFunc := func(ctx context.Context) (bool, error) { - crds, err := ca.getCRDs(ctx, t, cfg) +func exist() helpers.ConditionFuncFactory { + return func( + t require.TestingT, + assert assertion.Assertion, + cfg *envconf.Config, + count int, + itemCountFn, _ helpers.IntCompareFunc, + ) helpers.ConditionFunc { + return func(ctx context.Context) (bool, error) { + crdList, err := getCRDs(ctx, t, cfg, assert.ListOptions(cfg)) require.NoError(t, err) - if len(crds.Items) != 1 { + return itemCountFn(len(crdList.Items), count), nil + } + } +} + +func hasVersion(crdVersion string) helpers.ConditionFuncFactory { + return func( + t require.TestingT, + assert assertion.Assertion, + cfg *envconf.Config, + count int, + itemCountFn, _ helpers.IntCompareFunc, + ) helpers.ConditionFunc { + return func(ctx context.Context) (bool, error) { + crdList, err := getCRDs(ctx, t, cfg, assert.ListOptions(cfg)) + require.NoError(t, err) + + if itemCountFn(len(crdList.Items), count) { return false, nil } foundVersion := false - for _, version := range crds.Items[0].Spec.Versions { + for _, version := range crdList.Items[0].Spec.Versions { if version.Name == crdVersion { foundVersion = true + break } } return foundVersion, nil } - - require.NoError(t, helpers.WaitForCondition(ctx, ca, conditionFunc)) - - return ctx - } - - res := ca.clone() - res.SetBuilder(res.GetBuilder().Assess("hasVersion", fn)) - - return res -} - -func NewCRDAssertion(opts ...assertion.Option) CRDAssertion { - return CRDAssertion{ - Assertion: assertion.NewAssertion( - append( - []assertion.Option{assertion.WithBuilder(features.New("CRD").WithLabel("type", "customresourcedefinition"))}, - opts..., - )..., - ), } } From a9ccd30aae5a69930d3f007b70dce9ee3ee7ca64 Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Thu, 13 Mar 2025 00:15:45 -0400 Subject: [PATCH 26/29] wip --- internal/crds/assertion.go | 32 ++------------------------------ internal/crds/crds.go | 3 --- 2 files changed, 2 insertions(+), 33 deletions(-) diff --git a/internal/crds/assertion.go b/internal/crds/assertion.go index befa33d..a5b1f47 100644 --- a/internal/crds/assertion.go +++ b/internal/crds/assertion.go @@ -2,7 +2,6 @@ package crds import ( "context" - "testing" "github.com/stretchr/testify/require" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -54,36 +53,9 @@ func (ca CRDAssertion) getCRDs(ctx context.Context, t require.TestingT, cfg *env } func (ca CRDAssertion) HasVersion(crdVersion string) CRDAssertion { - fn := func(ctx context.Context, testingT *testing.T, cfg *envconf.Config) context.Context { - t := helpers.RequireTIfNotNil(testingT, ca.GetRequireT()) - conditionFunc := func(ctx context.Context) (bool, error) { - crds, err := ca.getCRDs(ctx, t, cfg) - require.NoError(t, err) - - if len(crds.Items) != 1 { - return false, nil - } - - foundVersion := false - - for _, version := range crds.Items[0].Spec.Versions { - if version.Name == crdVersion { - foundVersion = true - - break - } - } - - return foundVersion, nil - } - - require.NoError(t, helpers.WaitForCondition(ctx, ca, conditionFunc)) - - return ctx - } - + stepFn := helpers.AsStepFunc(ca, hasVersion(crdVersion), 1, helpers.IntCompareFuncEqualTo, nil) res := ca.clone() - res.SetBuilder(res.GetBuilder().Assess("hasVersion", fn)) + res.SetBuilder(res.GetBuilder().Assess("hasVersion", stepFn)) return res } diff --git a/internal/crds/crds.go b/internal/crds/crds.go index 47948d5..88df27f 100644 --- a/internal/crds/crds.go +++ b/internal/crds/crds.go @@ -2,15 +2,12 @@ package crds import ( "context" - "testing" "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/e2e-framework/pkg/envconf" - "sigs.k8s.io/e2e-framework/pkg/features" "github.com/DWSR/kubeassert-go/internal/assertion" helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" From 2e845358556f37ea06324351bbfd32bc591b9c1d Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Thu, 13 Mar 2025 11:12:33 -0400 Subject: [PATCH 27/29] wip --- internal/crds/assertion.go | 2 +- internal/crds/crds.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/crds/assertion.go b/internal/crds/assertion.go index a5b1f47..39ec689 100644 --- a/internal/crds/assertion.go +++ b/internal/crds/assertion.go @@ -53,7 +53,7 @@ func (ca CRDAssertion) getCRDs(ctx context.Context, t require.TestingT, cfg *env } func (ca CRDAssertion) HasVersion(crdVersion string) CRDAssertion { - stepFn := helpers.AsStepFunc(ca, hasVersion(crdVersion), 1, helpers.IntCompareFuncEqualTo, nil) + stepFn := helpers.AsStepFunc(ca, hasVersion(crdVersion), 1, helpers.IntCompareFuncNotEqualTo, helpers.IntCompareFuncEqualTo) res := ca.clone() res.SetBuilder(res.GetBuilder().Assess("hasVersion", stepFn)) diff --git a/internal/crds/crds.go b/internal/crds/crds.go index 88df27f..fdbe3ec 100644 --- a/internal/crds/crds.go +++ b/internal/crds/crds.go @@ -1,3 +1,4 @@ +// crds contains assertions for CustomResourceDefinitions. package crds import ( From fdae2ff51b753fe0e8f21c788335f70782974e67 Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Thu, 13 Mar 2025 11:14:32 -0400 Subject: [PATCH 28/29] wip --- internal/crds/assertion.go | 38 +++++++++++--------------------------- 1 file changed, 11 insertions(+), 27 deletions(-) diff --git a/internal/crds/assertion.go b/internal/crds/assertion.go index 39ec689..cb70f97 100644 --- a/internal/crds/assertion.go +++ b/internal/crds/assertion.go @@ -1,18 +1,13 @@ package crds import ( - "context" - - "github.com/stretchr/testify/require" - extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/e2e-framework/pkg/envconf" "sigs.k8s.io/e2e-framework/pkg/features" "github.com/DWSR/kubeassert-go/internal/assertion" helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" ) +// CRDAssertion is an assertion for CustomResourceDefinitions. type CRDAssertion struct { assertion.Assertion } @@ -23,6 +18,7 @@ func (ca CRDAssertion) clone() CRDAssertion { } } +// Exists asserts that exactly one CRD exists that matches the provided options. func (ca CRDAssertion) Exists() CRDAssertion { stepFn := helpers.AsStepFunc(ca, exist(), 1, helpers.IntCompareFuncEqualTo, nil) @@ -32,34 +28,22 @@ func (ca CRDAssertion) Exists() CRDAssertion { return res } -func (ca CRDAssertion) getCRDs(ctx context.Context, t require.TestingT, cfg *envconf.Config) (extv1.CustomResourceDefinitionList, error) { - client := helpers.DynamicClientFromEnvconf(t, cfg) - - var crdList extv1.CustomResourceDefinitionList - - list, err := client. - Resource(extv1.SchemeGroupVersion.WithResource("customresourcedefinitions")). - List(ctx, ca.ListOptions(cfg)) - if err != nil { - return crdList, err - } - - err = runtime.DefaultUnstructuredConverter.FromUnstructured(list.UnstructuredContent(), &crdList) - if err != nil { - return crdList, err - } - - return crdList, nil -} - +// HasVersion asserts that exactly one CRD that matches the supplied options has the supplied version. func (ca CRDAssertion) HasVersion(crdVersion string) CRDAssertion { - stepFn := helpers.AsStepFunc(ca, hasVersion(crdVersion), 1, helpers.IntCompareFuncNotEqualTo, helpers.IntCompareFuncEqualTo) + stepFn := helpers.AsStepFunc( + ca, + hasVersion(crdVersion), + 1, + helpers.IntCompareFuncNotEqualTo, + helpers.IntCompareFuncEqualTo, + ) res := ca.clone() res.SetBuilder(res.GetBuilder().Assess("hasVersion", stepFn)) return res } +// NewCRDAssertion creates a new CRDAssertion with the supplied options. func NewCRDAssertion(opts ...assertion.Option) CRDAssertion { return CRDAssertion{ Assertion: assertion.NewAssertion( From 63acca8d673f3c44e4a7fd8c3e78cd96309dc496 Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Thu, 13 Mar 2025 11:48:58 -0400 Subject: [PATCH 29/29] wip --- internal/namespaces/1namespace_test.go | 7 ++++--- internal/pdbs/assertion.go | 1 + internal/pods/3pod_test.go | 6 +++--- kubeassert.go | 5 +++++ 4 files changed, 13 insertions(+), 6 deletions(-) diff --git a/internal/namespaces/1namespace_test.go b/internal/namespaces/1namespace_test.go index 6b3a168..82cff6a 100644 --- a/internal/namespaces/1namespace_test.go +++ b/internal/namespaces/1namespace_test.go @@ -4,13 +4,14 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "sigs.k8s.io/e2e-framework/klient/decoder" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "github.com/DWSR/kubeassert-go/internal/assertion" helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" "github.com/DWSR/kubeassert-go/internal/namespaces" "github.com/DWSR/kubeassert-go/internal/testhelpers" - "github.com/stretchr/testify/require" - "sigs.k8s.io/e2e-framework/klient/decoder" - "sigs.k8s.io/e2e-framework/pkg/envconf" ) func Test_1Namespace_Success(t *testing.T) { diff --git a/internal/pdbs/assertion.go b/internal/pdbs/assertion.go index 248bba3..9669bee 100644 --- a/internal/pdbs/assertion.go +++ b/internal/pdbs/assertion.go @@ -7,6 +7,7 @@ import ( helpers "github.com/DWSR/kubeassert-go/internal/assertionhelpers" ) +// PDBAssertion is a wrapper around assertion.Assertion that provides additional functionality for PodDisruptionBudgets. type PDBAssertion struct { assertion.Assertion } diff --git a/internal/pods/3pod_test.go b/internal/pods/3pod_test.go index 1817a54..da6ac1e 100644 --- a/internal/pods/3pod_test.go +++ b/internal/pods/3pod_test.go @@ -63,13 +63,13 @@ func Test_3Pod_Success(t *testing.T) { }, } - features := make([]features.Feature, 0) + tests := make([]features.Feature, 0) for _, a := range testCases { - features = append(features, assertion.AsFeature(a.assertion)) + tests = append(tests, assertion.AsFeature(a.assertion)) } - testEnv.TestInParallel(t, features...) + testEnv.TestInParallel(t, tests...) } func Test_3Pod_Fail(t *testing.T) { diff --git a/kubeassert.go b/kubeassert.go index d330af1..689b378 100644 --- a/kubeassert.go +++ b/kubeassert.go @@ -1,3 +1,7 @@ +// kubeassert provides a set of assertion helpers for testing Kubernetes resources. It extends the e2e-framework +// package with a set of predefined assertions (e.g. a Deployment is ready) as well as additional helper functionality. +// +//revive:disable:exported package kubeassert import ( @@ -15,6 +19,7 @@ type ( NamespaceAssertion = namespaces.NamespaceAssertion CRDAssertion = crds.CRDAssertion PodAssertion = pods.PodAssertion + SecretAssertion = secrets.SecretAssertion ) var (