diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 00000000..41c332d2 --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,75 @@ +# Copilot Instructions — kube-workshop + +## Project Overview + +An Eleventy v3 static site generating a hands-on Kubernetes (AKS) workshop. Content is authored in Markdown under `content/`, built to `_site/`, and deployed to GitHub Pages. The workshop walks developers through deploying a multi-tier app (Postgres → API → Frontend) on AKS. + +The intent of this project is to provide a comprehensive, step-by-step learning experience for developers new to Kubernetes, with a focus on practical application and real-world scenarios. The content is structured into sections that cover everything from cluster setup to advanced operations, with a mix of explanations, code snippets, and exercises. + +## Build & Dev Commands + +- `npm start` — dev server with hot reload at `http://localhost:8080` +- `npm run build` — production build to `_site/` +- `npm run lint` — auto-format Markdown with Prettier (`--write`) +- `npm run lint:check` — CI formatting check (runs in GitHub Actions) +- `npm run clean` — remove `_site/` + +## Content Authoring + +### Frontmatter (required on every section page) + +```yaml +--- +tags: section # "section" (main flow 00–09), "extra" (bonus 10–12), or "alternative" (e.g. 09a) +index: 4 # Numeric order, matches directory prefix +title: Deploying The Backend +summary: One-line description for the home page listing +layout: default.njk # Always this value +icon: 🚀 # Single emoji shown in sidebar and headings +--- +``` + +### Directory & File Conventions + +- Section directories: `content/{NN}-{slug}/index.md` (zero-padded two-digit prefix) +- Supporting files (YAML manifests, `.sql`, `.png`, `.sh`, `.svg`) go alongside `index.md` — Eleventy copies them via passthrough +- YAML manifests use `__ACR_NAME__` as a user-replaceable placeholder +- The home page `content/index.md` has only `title` and `layout` (no tags/index/icon/summary) + +### Content Patterns + +- Raw HTML is enabled in Markdown (`html: true` in markdown-it config) +- Use `
`/`` for collapsible solution/cheat blocks containing YAML code +- Use `markdown-it-attrs` syntax (`{.class #id}`) for adding attributes to elements +- External links auto-open in new tabs (custom markdown-it plugin) +- Prefix external doc links with 📚 emoji, e.g. `[📚 Kubernetes Docs: Deployments](...)` +- Use emojis as sub-section visual markers (🔨, 🧪, 🌡️, etc.) +- Prettier config: 120-char width, `proseWrap: "always"` — run `npm run lint` before committing + +### Navigation + +- `tags: section` pages get automatic prev/next links and appear in sidebar +- `tags: extra` pages appear in a separate sidebar group below a divider +- `tags: alternative` pages are not auto-listed — link to them manually from related sections + +## Key Files + +- `eleventy.config.js` — Eleventy plugins (syntax highlight, markdown-it-attrs), passthrough copy rules, custom filters (`zeroPad`, `cssmin`), external links plugin +- `content/_includes/default.njk` — Single layout template with sidebar nav, theme toggle, prev/next footer +- `content/_includes/main.css` / `main.js` — Inlined (not linked) into the template via Nunjucks `{% include %}` + `cssmin` filter +- `content/.prettierrc` — Prettier config (`printWidth: 120`, `proseWrap: "always"`) +- `gitops/` — Kustomize manifests used by the GitOps/Flux section (section 11); contains `base/`, `apps/`, `disabled/` directories + +## CI/CD + +Single workflow `.github/workflows/ci-build-deploy.yaml`: + +1. **lint** job — `npm run lint:check` on all pushes/PRs to `main` +2. **deploy** job — builds site and deploys to GitHub Pages (only on `main` branch) + +## Gotchas + +- Never edit files in `_site/` — it's a generated output directory +- The `archive/k3s/` directory contains a deprecated K3S workshop path — don't update it +- Collections are sorted by `index` field, not directory name — keep them in sync +- CSS/JS are inlined into the HTML template, not served as separate static files diff --git a/.github/workflows/ci-build-deploy.yaml b/.github/workflows/ci-build-deploy.yaml index 86562887..6c3c00e1 100644 --- a/.github/workflows/ci-build-deploy.yaml +++ b/.github/workflows/ci-build-deploy.yaml @@ -21,12 +21,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up Node - uses: actions/setup-node@v4 + uses: actions/setup-node@v6 with: - node-version: "22" + node-version: "lts/*" - name: Install dependencies run: npm ci diff --git a/LICENSE b/LICENSE index 058df877..2e7395f4 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright 2025 Ben Coleman +Copyright 2026 Ben Coleman Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: diff --git a/content/00-pre-reqs/index.md b/content/00-pre-reqs/index.md index 90a09d1d..47beb3a5 100644 --- a/content/00-pre-reqs/index.md +++ b/content/00-pre-reqs/index.md @@ -7,7 +7,7 @@ layout: default.njk icon: ⚒️ --- -# {{ icon }} Workshop Pre Requisites +# {{ icon }} {{ title }} As this is an entirely hands on workshop, you will need several things before you can start: @@ -115,7 +115,6 @@ Double check that everything is installed and working correctly with: ```bash # Verify Azure CLI and Helm are working -# Try commands with tab completion az helm ``` @@ -167,7 +166,7 @@ RES_GROUP="kube-workshop" REGION="westeurope" AKS_NAME="__change_me__" ACR_NAME="__change_me__" -KUBE_VERSION="1.27.1" +KUBE_VERSION="1.33.6" ``` > New versions of Kubernetes are released all the time, and eventually older versions are removed from Azure. Rather diff --git a/content/00-pre-reqs/vars.sh.sample b/content/00-pre-reqs/vars.sh.sample index 3878ccb9..fd37372c 100644 --- a/content/00-pre-reqs/vars.sh.sample +++ b/content/00-pre-reqs/vars.sh.sample @@ -2,4 +2,8 @@ RES_GROUP="kube-workshop" REGION="westeurope" AKS_NAME="__change_me__" ACR_NAME="__change_me__" # NOTE: Can not contain underscores or hyphens -KUBE_VERSION="1.33" \ No newline at end of file +KUBE_VERSION="1.33.6" + +# Do not edit below this line + +echo -e "Using variables: Region=$REGION\nResource Group=$RES_GROUP\nAKS Name=$AKS_NAME\nACR Name=$ACR_NAME\nKubernetes Version=$KUBE_VERSION" \ No newline at end of file diff --git a/content/01-cluster/index.md b/content/01-cluster/index.md index 4991443e..4a247087 100644 --- a/content/01-cluster/index.md +++ b/content/01-cluster/index.md @@ -7,7 +7,7 @@ layout: default.njk icon: 🚀 --- -# {{ icon }} Deploying Kubernetes +# {{ icon }} {{ title }} Deploying AKS and Kubernetes can be extremely complex, with many networking, compute and other aspects to consider. However for the purposes of this workshop, a default and basic cluster can be deployed very quickly. @@ -65,7 +65,11 @@ This should take around 5 minutes to complete, and creates a new AKS cluster wit - Two small B-Series _Nodes_ in a single node pool. _Nodes_ are what your workloads will be running on. This is about as small and cheap as you can go and still have cluster that is useful for learning and experimentation. -- Basic 'Kubenet' networking, which creates an Azure network and subnet etc for us. + - It's quite possible the subscription you are using has limits or controls on what VM sizes can be used, if you get + an error about the VM size not being available try changing to a different size, e.g. `Standard_D4ds_v5`. +- It will use 'Azure CNI Overlay' networking, which creates an Azure network and subnet etc for us, we don't have to + worry about any of the underlying network configuration, and it will just work with Azure services. + [See docs if you wish to learn more about this topic](https://docs.microsoft.com/azure/aks/operator-best-practices-network) [See docs if you wish to learn more about this topic](https://docs.microsoft.com/azure/aks/operator-best-practices-network) - Local cluster admin account, with RBAC enabled, this means we don't need to worry about setting up users or assigning roles etc. diff --git a/content/02-container-registry/index.md b/content/02-container-registry/index.md index 558a5399..0d965252 100644 --- a/content/02-container-registry/index.md +++ b/content/02-container-registry/index.md @@ -7,7 +7,7 @@ layout: default.njk icon: 📦 --- -# {{ icon }} Container Registry & Images +# {{ icon }} {{ title }} We will deploy & use a private registry to hold the application container images. This is not strictly necessary as we could pull the images directly from the public, however using a private registry is a more realistic approach. @@ -22,8 +22,7 @@ Deploying a new ACR is very simple: ```bash az acr create --name $ACR_NAME --resource-group $RES_GROUP \ ---sku Standard \ ---admin-enabled true +--sku Standard ``` > When you pick a name for the resource with `$ACR_NAME`, this has to be **globally unique**, and not contain any @@ -102,9 +101,10 @@ will need to proceed to the alternative approach below. ## 🔌 Connect AKS to ACR - Alternative Workaround -If you do not have 'Owner' permissions in Azure, you will need to fall back to an alternative approach. This involves -two things: +If you do not have 'Owner' permissions in Azure (to the resource group you are using), you will need to fall back to an +alternative approach. This involves two things: +- Enable password authentication by running `az acr update --name $ACR_NAME --admin-enabled true` - Adding an _Secret_ to the cluster containing the credentials to pull images from the ACR. - Including a reference to this _Secret_ in every _Deployment_ you create or update the _ServiceAccount_ used by the _Pods_ to reference this _Secret_. diff --git a/content/03-the-application/index.md b/content/03-the-application/index.md index 308b21e7..886770dc 100644 --- a/content/03-the-application/index.md +++ b/content/03-the-application/index.md @@ -7,7 +7,7 @@ layout: default.njk icon: ❇️ --- -# {{ icon }} Overview Of The Application +# {{ icon }} {{ title }} This section simply serves as an introduction to the application, there are no tasks to be carried out. diff --git a/content/04-deployment/index.md b/content/04-deployment/index.md index 2f0abd53..250fa2bc 100644 --- a/content/04-deployment/index.md +++ b/content/04-deployment/index.md @@ -7,7 +7,7 @@ layout: default.njk icon: 🚀 --- -# {{ icon }} Deploying The Backend +# {{ icon }} {{ title }} We'll deploy the app piece by piece, and at first we'll deploy & configure things in a sub-optimal way. This is in order to explore the Kubernetes concepts and show their purpose. Then we'll iterate and improve towards the final diff --git a/content/05-network-basics/index.md b/content/05-network-basics/index.md index 59f7aaeb..de04284b 100644 --- a/content/05-network-basics/index.md +++ b/content/05-network-basics/index.md @@ -7,7 +7,7 @@ layout: default.njk icon: 🌐 --- -# {{ icon }} Basic Networking +# {{ icon }} {{ title }} Pods are both ephemeral and "mortal", they should be considered effectively transient. Kubernetes can terminate and reschedule pods for a whole range of reasons, including rolling updates, hitting resource limits, scaling up & down and @@ -17,6 +17,12 @@ directly (e.g. by name or IP address). Kubernetes solves this with _Services_, which act as a network abstraction over a group of pods, and have their own independent and more stable life cycle. We can use them to greatly improve what we've deployed. +Networking in Kubernetes is a complex topic, and could be the subject of an entire workshop on its own. For now we will +cover just enough to get our app working, and in the next part we will look at how to expose the frontend to the +internet. + +[📚 Kubernetes Docs: Cluster Networking](https://kubernetes.io/docs/concepts/cluster-administration/networking/) + ## 🧩 Deploy PostgreSQL Service Now to put a _Service_ in front of the PostgreSQL pod, if you want to create the service YAML yourself, you can refer to diff --git a/content/06-frontend/index.md b/content/06-frontend/index.md index 76fcb773..914178d1 100644 --- a/content/06-frontend/index.md +++ b/content/06-frontend/index.md @@ -7,7 +7,7 @@ layout: default.njk icon: 💻 --- -# {{ icon }} Adding The Frontend +# {{ icon }} {{ title }} We've ignored the frontend until this point, with the API and DB in place we are finally ready to deploy it. We need to use a _Deployment_ and _Service_ just as before (you might be starting to see a pattern!). We can pick up the pace a diff --git a/content/07-improvements/api-deployment.yaml b/content/07-improvements/api-deployment.yaml index f018ace1..f92663a3 100644 --- a/content/07-improvements/api-deployment.yaml +++ b/content/07-improvements/api-deployment.yaml @@ -28,7 +28,6 @@ spec: cpu: 50m memory: 50Mi limits: - cpu: 100m memory: 128Mi readinessProbe: diff --git a/content/07-improvements/frontend-deployment.yaml b/content/07-improvements/frontend-deployment.yaml index a8a2e600..a49fd901 100644 --- a/content/07-improvements/frontend-deployment.yaml +++ b/content/07-improvements/frontend-deployment.yaml @@ -28,7 +28,6 @@ spec: cpu: 50m memory: 50Mi limits: - cpu: 100m memory: 128Mi readinessProbe: diff --git a/content/07-improvements/index.md b/content/07-improvements/index.md index 143b9ab7..f6ce75e4 100644 --- a/content/07-improvements/index.md +++ b/content/07-improvements/index.md @@ -7,7 +7,7 @@ layout: default.njk icon: ✨ --- -# {{ icon }} Path to Production Readiness +# {{ icon }} {{ title }} We've cut several corners so far in order to simplify things and introduce concepts one at a time, now it is time to make some improvements. What constitutes best practice is a moving target, and often subjective, but there are some @@ -24,10 +24,14 @@ can do this two ways: - **Resource requests**: Used by the Kubernetes scheduler to help assign _Pods_ to a node with sufficient resources. This is only used when starting & scheduling pods, and not enforced after they start. - **Resource limits**: _Pods_ will be prevented from using more resources than their assigned limits. These limits are - enforced and can result in a _Pod_ being terminated. It's highly recommended to set limits to prevent one workload - from monopolizing cluster resources and starving other workloads. + enforced and can result in a _Pod_ being terminated. -It's worth reading the offical docs especially on the units & specifiers used for memory and CPU, which can feel a +It's highly recommended to set **memory limits** to prevent one workload from monopolizing cluster resources and +starving other workloads. However, **CPU limits are widely considered harmful** and can cause more problems than they +solve, so we'll only set CPU requests, and not limits. You can do some further reading on this topic if you like, but +we'll skip the reasoning for this in the interest of time. + +It's worth reading the official docs especially on the units & specifiers used for memory and CPU, which can feel a little unintuitive at first. [📚 Kubernetes Docs: Resource Management](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) @@ -42,7 +46,6 @@ resources: cpu: 50m memory: 50Mi limits: - cpu: 100m memory: 128Mi ``` @@ -53,7 +56,6 @@ resources: cpu: 50m memory: 100Mi limits: - cpu: 100m memory: 512Mi ``` diff --git a/content/07-improvements/postgres-deployment.yaml b/content/07-improvements/postgres-deployment.yaml index 28fbdbf9..87536468 100644 --- a/content/07-improvements/postgres-deployment.yaml +++ b/content/07-improvements/postgres-deployment.yaml @@ -39,7 +39,6 @@ spec: cpu: 50m memory: 100Mi limits: - cpu: 100m memory: 512Mi readinessProbe: diff --git a/content/08-more-improvements/index.md b/content/08-more-improvements/index.md index 77a54d99..e4f44a32 100644 --- a/content/08-more-improvements/index.md +++ b/content/08-more-improvements/index.md @@ -1,13 +1,13 @@ --- tags: section index: 8 -title: Production Readiness Continued +title: Production Readiness (Cont.) summary: More recommended practices; ConfigMaps & Volumes layout: default.njk icon: 🏆 --- -# {{ icon }} Production Readiness Continued +# {{ icon }} {{ title }} We're not done improving things yet! This section is a continuation of the previous one, where we will further enhance our deployment by adding a few more important features. Using _ConfigMaps_ and volumes, we'll continue stepping towards @@ -48,23 +48,24 @@ kubectl create configmap nanomon-sql-init --from-file=nanomon_init.sql > Like every object in Kubernetes, ConfigMaps can also be created with a YAML manifest, but when working with external > files/scripts etc, kubectl is your only real option. -There are three mains ways to use a _ConfigMap_ in with a _Pod_: as container command and args, as environment -variables, or as files in a volume. In this section we'll use the volume method. +There are two main ways to use a _ConfigMap_ in with a _Pod_: as environment variables, or as (virtual) files in a +volume. In this section we'll use the volume method. Which means we need to explain volumes and volume mounts first, +before we can use the _ConfigMap_. ## 💾 Volumes & Volume Mounts A Volume in Kubernetes is a directory that is accessible to containers in a pod. Volumes are used to persist data, share data between containers, and manage configuration. When it comes to persisting data and storage in Kubernetes, it's a -stageringly complex & deep topic. However volumes can also be used to easily provide a container with access to +staggeringly complex & deep topic. However volumes can also be used to easily provide a container with access to configuration files, via a _ConfigMap_. [📚 Kubernetes Docs: Volumes](https://kubernetes.io/docs/concepts/storage/volumes/) There's always two parts to using a volume: -1. Define the volume in the _Pod_ spec, and specify the source of the volume. -2. Define a volume mount in the container spec, which references the volume, and specifies the filesystem path inside - the container where the volume should be mounted. +1. Define the **volume** in the _Pod_ spec, and specify the source of the volume, there are many types of sources. +2. Define a **volume mount** in the container spec, which references the volume, and specifies the filesystem path + inside the container where the volume should be mounted. Update the Postgres deployment manifest to include the volume and volume mount, as follows: @@ -86,12 +87,12 @@ volumeMounts: readOnly: true ``` -Hey, what's this `/docker-entrypoint-initdb.d` path? Is this some Kubernetes thing? No, this is a special directory in -the official Postgres image. Any `*.sql` or `*.sh` files found in this directory when the container starts will be -automatically executed by the Postgres entrypoint script. This is a really useful feature of the official Postgres -image, and is why we don't need to create our own custom Postgres image. +Hey, what's this `/docker-entrypoint-initdb.d` path? Is this some Kubernetes thing? No, this is a special directory (yes +it looks like a file but is actually a directory) used by the official Postgres image. Any `*.sql` or `*.sh` files found +in this directory when the container starts will be automatically executed when the container is initialized. This is a +really useful feature of the official Postgres image, and is why we don't need to create our own custom Postgres image. -The last thing to do is to update the Postgres container spec to use the official Postgres image, hosted publically on +The last thing to do is to update the Postgres container spec to use the official Postgres image, hosted publicly on Dockerhub, rather than our custom one. Change the image line to: ```yaml diff --git a/content/08-more-improvements/postgres-deployment.yaml b/content/08-more-improvements/postgres-deployment.yaml index 2e4b4e4c..6c0443ff 100644 --- a/content/08-more-improvements/postgres-deployment.yaml +++ b/content/08-more-improvements/postgres-deployment.yaml @@ -44,7 +44,6 @@ spec: cpu: 50m memory: 100Mi limits: - cpu: 100m memory: 512Mi readinessProbe: diff --git a/content/08-more-improvements/runner-deployment.yaml b/content/08-more-improvements/runner-deployment.yaml index 28470f65..bf1a435f 100644 --- a/content/08-more-improvements/runner-deployment.yaml +++ b/content/08-more-improvements/runner-deployment.yaml @@ -25,7 +25,6 @@ spec: cpu: 50m memory: 50Mi limits: - cpu: 100m memory: 128Mi env: diff --git a/content/09-helm-ingress/frontend-deployment.yaml b/content/09-helm-ingress/frontend-deployment.yaml index f154cf4d..4fc488d8 100644 --- a/content/09-helm-ingress/frontend-deployment.yaml +++ b/content/09-helm-ingress/frontend-deployment.yaml @@ -28,7 +28,6 @@ spec: cpu: 50m memory: 50Mi limits: - cpu: 100m memory: 128Mi readinessProbe: diff --git a/content/09-helm-ingress/index.md b/content/09-helm-ingress/index.md index f77d325d..4434f89a 100644 --- a/content/09-helm-ingress/index.md +++ b/content/09-helm-ingress/index.md @@ -7,7 +7,7 @@ layout: default.njk icon: 🌎 --- -# {{ icon }} Helm & Ingress +# {{ icon }} {{ title }} 🔥 At this point in the workshop you have a choice: @@ -61,8 +61,7 @@ either applications written and developed in house, or external 3rd party softwa - Helm charts support dynamic parameters called _values_. Charts expose a set of default _values_ through their `values.yaml` file, and these _values_ can be set and over-ridden at _release_ time. - The use of _values_ is critical for automated deployments and CI/CD. -- Charts can referenced through the local filesystem, or in a remote repository called a _chart repository_. The can - also be kept in a container registry but that is an advanced and experimental topic. +- Charts can referenced through the local filesystem, or in a remote repository called a _chart repository_. We'll add the Helm chart repository for the ingress we will be deploying, this is done with the `helm repo` command. This is a public repo & chart of the extremely popular NGINX ingress controller (more on that below). diff --git a/content/09a-helm-gateway-api/frontend-deployment.yaml b/content/09a-helm-gateway-api/frontend-deployment.yaml index f154cf4d..4fc488d8 100644 --- a/content/09a-helm-gateway-api/frontend-deployment.yaml +++ b/content/09a-helm-gateway-api/frontend-deployment.yaml @@ -28,7 +28,6 @@ spec: cpu: 50m memory: 50Mi limits: - cpu: 100m memory: 128Mi readinessProbe: diff --git a/content/09a-helm-gateway-api/index.md b/content/09a-helm-gateway-api/index.md index 8f2c6bd4..986e0897 100644 --- a/content/09a-helm-gateway-api/index.md +++ b/content/09a-helm-gateway-api/index.md @@ -7,7 +7,7 @@ layout: default.njk icon: 🌎 --- -# 🌎 Helm & Gateway API +# {{ icon }} {{ title }} 🔥 This section is an alternative to the [Helm & Ingress section](../09-helm-ingress/), but instead of covering the legacy Ingress API, it uses the newer Gateway API which is still evolving, but represents the future of L4/L7 routing in @@ -45,7 +45,7 @@ Before we can use the Gateway API in our cluster and create instances of the res Resource Definitions_ (CRDs) which define the new resources. This is done with a single command: ```bash -kubectl kustomize "https://github.com/nginx/nginx-gateway-fabric/config/crd/gateway-api/standard?ref=v2.1.0" | kubectl apply -f - +kubectl apply --server-side -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.4.1/standard-install.yaml ``` ## 🗃️ Namespaces @@ -85,8 +85,7 @@ either applications written and developed in house, or external 3rd party softwa - Helm charts support dynamic parameters called _values_. Charts expose a set of default _values_ through their `values.yaml` file, and these _values_ can be set and over-ridden at _release_ time. - The use of _values_ is critical for automated deployments and CI/CD. -- Charts can referenced through the local filesystem, or in a remote repository called a _chart repository_. The can - also be kept in a container registry but that is an advanced and experimental topic. +- Charts can referenced through the local filesystem, or in a remote repository called a _chart repository_ ## 🚪 Deploying the NGINX Gateway diff --git a/content/10-extra-advanced/index.md b/content/10-extra-advanced/index.md index 02a7ff0e..6da2ba12 100644 --- a/content/10-extra-advanced/index.md +++ b/content/10-extra-advanced/index.md @@ -1,13 +1,13 @@ --- -tags: extra index: 10 title: Scaling & Stateful Workloads summary: Scaling (manual & auto), stateful workloads, persitent volumes, plus more Helm. layout: default.njk -icon: 🤯 +icon: ⚖️ +tags: section --- -# 🤯 Scaling, Stateful Workloads & Helm +# {{ icon }} {{ title }} This final section touches on some slightly more advanced and optional concepts we've skipped over. They aren't required to get a basic app up & running, but generally come up in practice and real world use of Kubernetes. @@ -51,7 +51,7 @@ modify the number of replicas dynamically. To set up an _Horizontal Pod Autoscaler_ you can give it a deployment and some simple targets, as follows: ```bash -kubectl autoscale deployment nanomon-api --cpu="50%" --min=2 --max=10 +kubectl autoscale deployment nanomon-api --cpu-percent=50 --min=2 --max=10 ```
@@ -59,7 +59,7 @@ kubectl autoscale deployment nanomon-api --cpu="50%" --min=2 --max=10 ```yaml kind: HorizontalPodAutoscaler -apiVersion: autoscaling/v1 +apiVersion: autoscaling/v2 metadata: name: nanomon-api spec: @@ -69,7 +69,13 @@ spec: apiVersion: apps/v1 kind: Deployment name: nanomon-api - targetCPUUtilizationPercentage: 50 + metrics: + - resource: + name: cpu + target: + averageUtilization: 50 + type: Utilization + type: Resource ```
@@ -92,7 +98,7 @@ chmod +x hey_linux_amd64 After about 1~2 mins you should see new API pods being created. Once the `hey` command completes and the load stops, it will probably be around ~5 mins before the pods scale back down to their original number. The command -`kubectl describe hpa` is useful and will show you the current status of the autoscaler. +`kubectl describe hpa` is very useful and will show you the current status of the autoscaler. ## 🛢️ Improving The PostgreSQL Backend @@ -111,13 +117,13 @@ _StatefulSets_ which greatly helps with the complexities of running multiple sta ⚠️ But wait _StatefulSets_ are not a magic wand! Any stateful workload such as a database **still needs to be made aware** it is running in multiple places and handle the data synchronization/replication. This can be setup for -PostgreSQL, but is deemed too complex for this workshop. +PostgreSQL, but is deemed too complex for this workshop, so we'll have to settle for a single instance in this case. -However we can address the issue of data persistence. +However we can at least address the issue of data persistence. 🧪 **Optional Experiment**: Try using the app and adding a monitor, then run `kubectl delete pod {postgres-pod-name}` You will see that Kubernetes immediately restarts it. However when the app recovers and reconnects to the DB (which -might take a few seconds), you will see the data you created is gone. +might take a few seconds), you will see the data you created is gone! To resolve the data persistence issues, we need do three things: @@ -198,7 +204,6 @@ spec: cpu: 50m memory: 100Mi limits: - cpu: 100m memory: 512Mi readinessProbe: @@ -226,8 +231,8 @@ new `postgres-statefulset.yaml` file. Some comments: - Running `kubectl get pv,pvc` you will see the new _PersistentVolume_ and _PersistentVolumeClaim_ that have been created. The _Pod_ might take a little while to start while the volume is created, and is "bound" to the _Pod_ -If you repeat the pod deletion experiment above, you should see that the data is maintained after you delete the -`postgres-0` pod and it restarts. +If you repeat the pod deletion experiment above (note, the name now will be `postgres-0` as StatefulSet pods have stable +names), you should see that the data is maintained after you delete the `postgres-0` pod and it restarts. ## 💥 Installing The App with Helm diff --git a/content/10-extra-advanced/postgres-statefulset.yaml b/content/10-extra-advanced/postgres-statefulset.yaml index 7cdbc6a4..d5459b3a 100644 --- a/content/10-extra-advanced/postgres-statefulset.yaml +++ b/content/10-extra-advanced/postgres-statefulset.yaml @@ -55,7 +55,6 @@ spec: cpu: 50m memory: 100Mi limits: - cpu: 100m memory: 512Mi readinessProbe: diff --git a/content/11-observability/index.md b/content/11-observability/index.md new file mode 100644 index 00000000..31ddc367 --- /dev/null +++ b/content/11-observability/index.md @@ -0,0 +1,224 @@ +--- +tags: extra +index: 11 +title: Observability & Monitoring +summary: Monitor & observe Kubernetes and applications using tools like Prometheus and Grafana. +layout: default.njk +icon: 📊 +--- + +# {{ icon }} {{ title }} + +In this section, which is completely optional, you'll learn about observability and monitoring in Kubernetes. We'll +cover how to set up Prometheus and Grafana to collect and visualize metrics from your cluster and applications. These +tools are not part of the core Kubernetes platform, but are widely used in the ecosystem for monitoring and +observability. + +## 🔥 What Is Prometheus? + +Prometheus is an open-source monitoring and alerting toolkit, originally built at SoundCloud and now a graduated project +of the Cloud Native Computing Foundation (CNCF). It has become the de-facto standard for monitoring Kubernetes clusters +and the workloads running on them. + +[📚 Prometheus Docs: Overview](https://prometheus.io/docs/introduction/overview/) + +Key concepts to understand: + +- **Pull-based model**: Unlike traditional monitoring systems that rely on agents pushing data, Prometheus _scrapes_ + (pulls) metrics from HTTP endpoints exposed by your applications and infrastructure. This is a fundamentally different + approach that simplifies configuration and discovery. +- **Time-series data**: All data is stored as time-series, identified by a metric name and a set of key-value labels. + For example `http_requests_total{method="GET", status="200"}` is a time series tracking HTTP GET requests with a 200 + status code. +- **PromQL**: Prometheus has its own powerful query language called PromQL, used to select and aggregate time-series + data. It's used for building dashboards, alerts, and ad-hoc queries. +- **Service discovery**: Prometheus integrates with Kubernetes natively and can automatically discover pods, services, + and nodes to scrape, without you needing to manually configure each target. + +In a Kubernetes context, many components already expose metrics in a Prometheus-compatible format out of the box, +including the Kubernetes API server, kubelet, kube-state-metrics, and more. This means you get a wealth of cluster-level +metrics with very little effort. + +## 🚀 Installing Prometheus With Helm + +We'll use Helm (which we introduced back in section 9 - [link here](../09-helm-ingress) to install the +[kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack). +This is a popular community Helm chart that bundles together: + +- **Prometheus** — the metrics collection and storage engine +- **Grafana** — a powerful dashboarding and visualization tool +- **Alertmanager** — handles routing and managing alerts +- **kube-state-metrics** — exposes metrics about the state of Kubernetes objects +- **node-exporter** — exposes hardware and OS-level metrics from each node + +This "batteries included" approach saves a lot of setup time and is widely used in production clusters. + +First, add the Helm chart repository and update: + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts + +helm repo update +``` + +Create a namespace for the monitoring stack: + +```bash +kubectl create namespace monitoring +``` + +Now install the chart. We'll pass a few values to make it easier to access Prometheus and Grafana during this workshop: + +```bash +helm install kube-mon prometheus-community/kube-prometheus-stack \ + --namespace monitoring \ + --set grafana.adminPassword=workshopAdmin \ + --set prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues=false \ + --set prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false +``` + +> The two `NilUsesHelmValues` settings tell Prometheus to discover _all_ ServiceMonitors and PodMonitors in the cluster, +> not just those created by this Helm release. This makes it much easier to add monitoring for your own applications +> later. + +This will take a minute or two to get everything up and running. Check the status of the pods: + +```bash +kubectl get pods -n monitoring +``` + +You should see several pods spinning up, including Prometheus, Grafana, Alertmanager, kube-state-metrics, and +node-exporter pods. Wait until all pods show `Running` and are ready. + +## 🔎 Exploring The Prometheus UI + +Prometheus has a built-in web UI that lets you run queries and explore metrics. To access it, we'll use +`kubectl port-forward` to create a local tunnel to the Prometheus service: + +```bash +kubectl port-forward -n monitoring svc/kube-mon-kube-prometheus-s-prometheus 9090:9090 +``` + +> Leave this running in a terminal and open a new terminal for further commands. If the service name doesn't match, you +> can find the correct name with `kubectl get svc -n monitoring | grep prometheus` + +Now open your browser and navigate to `http://localhost:9090`. You should see the Prometheus web interface. + +Let's try a few PromQL queries to explore what's available. Paste these into the query box and click "Execute": + +**Number of running pods per namespace:** + +```promql +count by (namespace) (kube_pod_status_phase{phase="Running"}) +``` + +**CPU usage across all nodes:** + +```promql +rate(node_cpu_seconds_total{mode!="idle"}[5m]) +``` + +**Total memory usage as a percentage per node:** + +```promql +100 - (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100) +``` + +🧪 **Experiment**: Click on the "Graph" tab after running a query to see a time-series visualization. Try changing the +time range to see how metrics have changed since you deployed the monitoring stack. + +You can also explore the "Status > Targets" page to see all of the endpoints Prometheus is currently scraping. You +should see your Kubernetes components (API server, kubelet, etc.) listed as active targets. + +## 📊 Grafana Dashboards + +While the Prometheus UI is great for ad-hoc queries, Grafana is where you'll spend most of your time when it comes to +visualizing metrics. The kube-prometheus-stack chart comes pre-loaded with a rich set of dashboards for monitoring your +cluster. + +Set up port forwarding to Grafana: + +```bash +kubectl port-forward -n monitoring svc/kube-mon-grafana 3000:80 +``` + +Open `http://localhost:3000` in your browser and log in with: + +- **Username**: `admin` +- **Password**: `workshopAdmin` + +Once logged in, click on the hamburger menu (☰) and navigate to **Dashboards**. You'll find a collection of +pre-installed dashboards organized into folders. Some highlights worth exploring: + +- **Kubernetes / Compute Resources / Cluster** — A high-level overview of CPU and memory usage across your entire + cluster. +- **Kubernetes / Compute Resources / Namespace (Pods)** — Drill into a specific namespace to see resource usage per pod. + Try selecting the `default` namespace to see your NanoMon application pods. +- **Kubernetes / Networking / Cluster** — Network traffic and bandwidth metrics across your cluster. +- **Node Exporter / Nodes** — Hardware-level metrics from your cluster nodes: CPU, memory, disk, and network. + +🧪 **Experiment**: Open the "Kubernetes / Compute Resources / Namespace (Pods)" dashboard and select the `default` +namespace. Can you identify your NanoMon API pods? What does their CPU and memory usage look like? + +## 💾 Data Sources + +How does Grafana know where to get the metrics from? In Grafana, you configure "data sources" that tell it how to +connect to sources of data. In our case, we have a Prometheus data source that points to the Prometheus instance we +installed. This was automatically set up for us by the Helm chart, but it's useful to understand how it works. + +To see the data source configuration, open the side menu (☰) and go to **Configuration > Data Sources**. Click on the +"Prometheus" data source to see its settings. The URL should be set to +`http://kube-mon-kube-prometheus-s-prometheus.monitoring.svc:9090`, which is the internal address of the Prometheus +service within the cluster. This allows Grafana to query Prometheus directly from within the cluster. + +## 👀 Creating Custom Dashboards + +The pre-installed dashboards are great for general cluster monitoring, but one of the most powerful features of Grafana +is the ability to create your own custom dashboards. This allows you to visualize the specific metrics that are most +relevant to your applications and use cases. + +Let's build a simple dashboard that shows network traffic flowing into the NanoMon API pods. + +1. Click on the "+" icon in the side menu and select **Dashboard**. +2. Click **Add visualization** and choose **Prometheus** as the data source. +3. In the query editor at the bottom, switch to the "Code" mode (toggle in the top-right of the query editor) and enter + the following PromQL query: + +```promql +rate(container_network_receive_bytes_total{namespace="default", pod=~".*api.*"}[5m]) +``` + +This query uses `container_network_receive_bytes_total`, a metric that is already being collected for every container in +the cluster. It calculates the per-second rate (which is what the `rate()` function does) of bytes received by pods +matching `.*api.*` in the `default` namespace over a 5-minute window. + +4. In the panel options on the right sidebar: + - Set the **Title** to something like "NanoMon API Network Traffic". + - Under **Graph styles**, ensure the **Style** is set to "Lines" for a time-series line graph. +5. Click **Apply** in the top-right to save the panel. + +You should now see a line graph showing the rate of network traffic into your API pods. Generate some traffic by +visiting the NanoMon frontend and clicking around to see the graph respond. + +You can continue adding more panels to the dashboard — try adding panels for CPU or memory usage per pod. When you're +done, click the 💾 save icon at the top of the dashboard and give it a name. + +🧪 **Experiment**: Try adding a second panel with the query +`rate(container_cpu_usage_seconds_total{namespace="default", pod=~".*api.*"}[5m])` to track CPU usage of the API pods +alongside network traffic. + +Also try changing the visualization type (it's in the top right when editing a panel) to a bar graph or gauge to see how +it looks. Grafana's flexibility allows you to create dashboards that are tailored to your specific monitoring needs. + +## 🧹 Cleaning Up + +The monitoring stack uses a fair amount of resources. If you want to remove it to free up cluster resources: + +```bash +helm uninstall kube-mon --namespace monitoring +kubectl delete namespace monitoring +``` + +> Note: Helm uninstall won't remove the CRDs (Custom Resource Definitions) that were created. These are harmless but if +> you want a completely clean cluster you can remove them with +> `kubectl delete crd -l app.kubernetes.io/part-of=kube-prometheus-stack` diff --git a/content/12-cicd-actions/index.md b/content/12-cicd-actions/index.md index d1f04756..f2dd08a2 100644 --- a/content/12-cicd-actions/index.md +++ b/content/12-cicd-actions/index.md @@ -1,13 +1,13 @@ --- tags: extra index: 12 -title: DevOps & CI/CD with Kubernetes +title: DevOps & CI/CD summary: How to manage CI/CD pipelines using Github Actions. layout: default.njk icon: 🏗️ --- -# {{ icon }} DevOps & CI/CD with Kubernetes +# {{ icon }} {{ title }} This is an optional section detailing how to set up a continuous integration (CI) and continuous deployment (CD) pipeline, which will deploy to Kubernetes using Helm. diff --git a/content/11-gitops-flux/base/deployment.yaml b/content/13-gitops-flux/base/deployment.yaml similarity index 93% rename from content/11-gitops-flux/base/deployment.yaml rename to content/13-gitops-flux/base/deployment.yaml index a90bb7e5..36b9f74f 100644 --- a/content/11-gitops-flux/base/deployment.yaml +++ b/content/13-gitops-flux/base/deployment.yaml @@ -17,6 +17,5 @@ spec: resources: limits: memory: "128Mi" - cpu: "500m" ports: - containerPort: 80 diff --git a/content/11-gitops-flux/base/kustomization.yaml b/content/13-gitops-flux/base/kustomization.yaml similarity index 100% rename from content/11-gitops-flux/base/kustomization.yaml rename to content/13-gitops-flux/base/kustomization.yaml diff --git a/content/11-gitops-flux/gitops.png b/content/13-gitops-flux/gitops.png similarity index 100% rename from content/11-gitops-flux/gitops.png rename to content/13-gitops-flux/gitops.png diff --git a/content/11-gitops-flux/index.md b/content/13-gitops-flux/index.md similarity index 95% rename from content/11-gitops-flux/index.md rename to content/13-gitops-flux/index.md index 9522230d..3ccaa1e4 100644 --- a/content/11-gitops-flux/index.md +++ b/content/13-gitops-flux/index.md @@ -1,6 +1,6 @@ --- tags: extra -index: 11 +index: 13 title: GitOps & Flux summary: Introduction to Kustomize and deploying apps through GitOps with Flux. layout: default.njk @@ -9,7 +9,10 @@ icon: 🧬 # {{ icon }} GitOps & Flux -This is an advanced optional section going into two topics; Kustomize and also GitOps, using FluxCD. +This is an advanced and highly optional section going into two topics; Kustomize and also GitOps, using FluxCD. These +are highly specialized topics, adopted by some teams and organizations, but not universally used. They are also quite +complex topics, so we won't go into too much depth, but this will give you a good introduction to the concepts and how +they work in practice. ## 🪓 Kustomize @@ -52,7 +55,6 @@ spec: resources: limits: memory: "128Mi" - cpu: "500m" ports: - containerPort: 80 ``` @@ -97,8 +99,9 @@ spec: containers: - name: webserver resources: - limits: - cpu: 330m + requests: + cpu: 50m + memory: 50Mi env: - name: SOME_ENV_VAR value: Hello! @@ -192,8 +195,8 @@ key part of the GitOps methodology to have a single source of truth. ### 💽 Install Flux into AKS [Flux is available as an AKS Extension](https://docs.microsoft.com/en-us/azure/azure-arc/kubernetes/tutorial-use-gitops-flux2) -which is intended to simplify installing Flux into your cluster & configuring it. As of Jan 2022, it requires some -extensions to the Azure CLI to be installed first. +which is intended to simplify installing Flux into your cluster & configuring it, however it requires some extensions to +the Azure CLI to be installed first. Add the CLI extensions with: diff --git a/content/11-gitops-flux/overlay/kustomization.yaml b/content/13-gitops-flux/overlay/kustomization.yaml similarity index 100% rename from content/11-gitops-flux/overlay/kustomization.yaml rename to content/13-gitops-flux/overlay/kustomization.yaml diff --git a/content/11-gitops-flux/overlay/override.yaml b/content/13-gitops-flux/overlay/override.yaml similarity index 81% rename from content/11-gitops-flux/overlay/override.yaml rename to content/13-gitops-flux/overlay/override.yaml index c32e0e58..d9011c79 100644 --- a/content/11-gitops-flux/overlay/override.yaml +++ b/content/13-gitops-flux/overlay/override.yaml @@ -10,8 +10,9 @@ spec: containers: - name: webserver resources: - limits: - cpu: 330m + requests: + cpu: 50m + memory: 50Mi env: - name: SOME_ENV_VAR value: Hello! diff --git a/content/14-nodes/cluster-architecture.drawio.svg b/content/14-nodes/cluster-architecture.drawio.svg new file mode 100644 index 00000000..ffcde419 --- /dev/null +++ b/content/14-nodes/cluster-architecture.drawio.svg @@ -0,0 +1,637 @@ + + + + + + + + + + +
+
+
+ ☁️ Control Plane (Managed by Azure) +
+
+
+
+ + ☁️ Control Plane (Managed by Azure) + +
+
+
+ + + + + + + +
+
+
+ 🌐 API Server +
+
+
+
+ + 🌐 API Server + +
+
+
+ + + + + + + +
+
+
+ 💾 etcd +
+
+
+
+ + 💾 etcd + +
+
+
+ + + + + + + +
+
+
+ 📋 Scheduler +
+
+
+
+ + 📋 Scheduler + +
+
+
+ + + + + + + +
+
+
+ 🔄 Controller Mgr +
+
+
+
+ + 🔄 Controller Mgr + +
+
+
+ + + + + + + +
+
+
+ ...and more... +
+
+
+
+ + ...and more... + +
+
+
+ + + + + + + +
+
+
+ ⚙️ System Node Pool +
+
+
+
+ + ⚙️ System Node Pool + +
+
+
+ + + + + + + +
+
+
+ 📦 User Node Pool +
+
+
+
+ + 📦 User Node Pool + +
+
+
+ + + + + + + +
+
+
+ Node: user-0001 +
+
+
+
+ + Node: user-0001 + +
+
+
+ + + + + + + +
+
+
+ 🟢 frontend +
+
+
+
+ + 🟢 frontend + +
+
+
+ + + + + + + +
+
+
+ 🟢 nanomon-api +
+
+
+
+ + 🟢 nanomon-api + +
+
+
+ + + + + + + pod + + + + + + + + + + pod + + + + + + + + + + + + + +
+
+
+ Node: user-0002 +
+
+
+
+ + Node: user-0002 + +
+
+
+ + + + + + + +
+
+
+ 🟢 nanomon-api +
+
+
+
+ + 🟢 nanomon-api + +
+
+
+ + + + + + + +
+
+
+ 🟢 postgres +
+
+
+
+ + 🟢 postgres + +
+
+
+ + + + + + + pod + + + + + + + + + + pod + + + + + + + + + + + + + + + + + + + + + +
+
+
+ Node: sys-0001 +
+
+
+
+ + Node: sys-0001 + +
+
+
+ + + + + + + +
+
+
+ Node: sys-0002 +
+
+
+
+ + Node: sys-0002 + +
+
+
+ + + + + + + +
+
+
+ 📡 kube-proxy +
+
+
+
+ + 📡 kube-proxy + +
+
+
+ + + + + + + +
+
+
+ 🔤 CoreDNS +
+
+
+
+ + 🔤 CoreDNS + +
+
+
+ + + + + + + +
+
+
+ 💿 CSI Drivers +
+
+
+
+ + 💿 CSI Drivers + +
+
+
+ + + + + + + +
+
+
+ 📡 kube-proxy +
+
+
+
+ + 📡 kube-proxy + +
+
+
+ + + + + + + +
+
+
+ 🔤 CoreDNS +
+
+
+
+ + 🔤 CoreDNS + +
+
+
+ + + + + + + +
+
+
+ 💿 CSI Drivers +
+
+
+
+ + 💿 CSI Drivers + +
+
+
+ + + + + + + +
+
+
+ ...and more... +
+
+
+
+ + ...and more... + +
+
+
+ + + + + + + +
+
+
+ ...and more... +
+
+
+
+ + ...and more... + +
+
+
+ + + + + + + pod + + + + + + + + + + pod + + + + + + + + + + pod + + + + + + + + + + pod + + + + + + + + + + pod + + + + + + + + + + pod + + + + + + + + + +
+ + + + + Text is not SVG - cannot display + + + +
\ No newline at end of file diff --git a/content/14-nodes/index.md b/content/14-nodes/index.md new file mode 100644 index 00000000..c551b985 --- /dev/null +++ b/content/14-nodes/index.md @@ -0,0 +1,411 @@ +--- +tags: extra +index: 14 +title: Nodes & Scheduling +summary: A look at the underlying nodes that run workloads, and how to control pod scheduling +layout: default.njk +icon: ⚙️ +--- + +# {{ icon }} {{ title }} + +In this section we'll take a look at the the nodes that run our workloads. This is not strictly necessary to know in +order to deploy and run applications, but it is useful to understand the fundamentals of how Kubernetes works under the +hood, and it will give you a better understanding of the cluster and how to troubleshoot it when things go wrong. + +This section is a little more Azure & AKS specific, as we'll be taking about nodepools and some specifics of how AKS +manages nodes. However the concepts of nodes, labels, selectors, taints, and tolerations are all fundamental Kubernetes +concepts that apply to any cluster, regardless of where it's running. + +In Kubernetes, the term "node" refers to a machine in the cluster, you might also see them referred to as "worker nodes" +or "agent nodes". + +> Note. We won't be going into cluster level networking, i.e. how nodes and pods communicate with each other, VNets or +> how services route traffic to pods, otherwise this would be a 2 week deep dive! If you are really interested in that, +> check out [Kubernetes networking concepts](https://kubernetes.io/docs/concepts/cluster-administration/networking/). +> For AKS specific networking, check out the +> [AKS CNI networking overview](https://learn.microsoft.com/en-gb/azure/aks/concepts-network-cni-overview) + +## 🏗️ Cluster Architecture Overview + +Every Kubernetes cluster consists of two main parts: + +- **Control plane**: The "brains" of the cluster. It manages the overall state of the cluster, schedules workloads, and + responds to events (like a pod crashing). In AKS this is fully managed by Azure — you don't see or pay for the VMs + running it. The control plane includes components like the API server, etcd (the cluster database), the scheduler, and + controller manager. +- **Worker nodes**: These are the VMs (or physical machines) where your application _Pods_ actually run. In AKS, these + are Azure Virtual Machines that you _do_ pay for, and they are organized into **node pools** which are backed with + [Azure VM Scale Sets](https://learn.microsoft.com/en-us/azure/virtual-machine-scale-sets/overview). + +When you created your AKS cluster back in section 1, you specified `--node-count 2`, which created a single node pool +with two worker nodes. The control plane was provisioned for you transparently by Azure. + +The following diagram shows a high-level view of a Kubernetes cluster architecture, with the control plane, system node +pool, and user node pools. Don't worry about understanding every component in the diagram, just get a sense of how the +control plane manages the nodes and how the system node pool runs critical cluster infrastructure while user node pools +run your application workloads. + +Kubernetes cluster architecture diagram showing the control plane, system node pool, and user node pool + +[📚 Kubernetes Docs: Cluster Architecture](https://kubernetes.io/docs/concepts/architecture/) + +## 🔍 Exploring Nodes + +Let's start by listing the nodes in the cluster: + +```bash +kubectl get nodes -o wide +``` + +This will show you the nodes with additional detail including the OS image, kernel version, container runtime, and +internal IP addresses. You should see two nodes, both with a status of `Ready`. + +To get much more detailed information about a specific node, use `describe`: + +```bash +kubectl describe node +``` + +This command outputs a wealth of information. Some key sections to look at: + +- **Labels**: Metadata attached to the node. AKS automatically adds labels such as the node pool name, OS, VM size, and + availability zone. Labels are critical for scheduling decisions. +- **Conditions**: Shows the health status of the node — whether it has sufficient memory, disk space, and if it's ready + to accept pods. +- **Capacity vs Allocatable**: The total resources on the node (capacity) versus what's actually available for your + workloads (allocatable). The difference is reserved for the OS and Kubernetes system components like the kubelet. +- **Non-terminated Pods**: A list of every pod running on that node, including system pods in the `kube-system` + namespace. +- **Allocated resources**: A summary of how much CPU and memory has been _requested_ by pods on that node, and how much + of the node's capacity is committed. + +🧪 **Experiment**: Run `kubectl describe node` on one of your nodes and look at the "Allocated resources" section. How +much of the node's CPU and memory is being used by requests? If you scaled up a deployment to many replicas, what would +happen when the node runs out of allocatable resources? + +## 📦 Node Components + +Each worker node runs a few essential components that keep it functioning as part of the cluster. The most important is +the **kubelet** — the primary agent on each node that communicates with the control plane and ensures containers are +running. It runs as a system service directly on the node (not as a pod), so you won't see it in `kubectl` output. The +**container runtime** (`containerd` on AKS) is the software that actually runs the containers. + +Beyond those invisible node-level services, AKS deploys a number of system pods into the `kube-system` namespace. Let's +take a look: + +```bash +kubectl get pods -n kube-system -o wide +``` + +You'll see quite a few pods here, some of the notable ones include: + +- **kube-proxy** — Manages network rules on each node, enabling _Service_ routing to the correct pods. +- **CoreDNS** — Provides DNS resolution within the cluster, so pods can find _Services_ by name (e.g. + `postgres.default.svc.cluster.local`). +- **CSI drivers** (e.g. `csi-azuredisk`, `csi-azurefile`) — Container Storage Interface drivers that allow pods to use + Azure Disks and Azure Files as persistent volumes. +- **cloud-node-manager** — An Azure-specific component that keeps the Kubernetes node objects in sync with the + underlying Azure VMs. +- **metrics-server** — Collects resource usage data from the kubelets, which powers `kubectl top`. + +Don't worry about understanding all of these — the key point is that a lot of infrastructure runs in the background to +keep your cluster operational, and much of it is visible in the `kube-system` namespace. + +## 🏊 Adding A Second Node Pool + +To really explore node scheduling and placement, it helps to have more than one node pool. Let's add a second small pool +called `extra` with a single node. This will give us a concrete target for node selectors, taints, and other scheduling +features we'll explore in this section. + +```bash +az aks nodepool add \ + --resource-group $RES_GROUP \ + --cluster-name $AKS_NAME \ + --name extra \ + --node-count 1 \ + --node-vm-size Standard_B2ms \ + --labels workload=extra +``` + +This will take a couple of minutes. Once it completes, verify the new node has joined the cluster: + +```bash +kubectl get nodes -o wide +``` + +You should now see three nodes — two from your original `nodepool1` and one from the new `extra` pool. Note down the +name of the node in the `extra` pool, you'll need it later. You can easily identify it with: + +```bash +kubectl get nodes -l agentpool=extra +``` + +> Adding a node pool will increase your Azure costs. Remember to remove it when you're done with this section using: +> `az aks nodepool delete --resource-group $RES_GROUP --cluster-name $AKS_NAME --name extra` + +## 🏷️ Labels & Selectors + +Until now we've been deploying our workloads without any control over which nodes they run on — the Kubernetes scheduler +has been placing them wherever it sees fit based on resource availability. This is fine for many workloads, but +sometimes you want more control over where your pods run. This is where **labels** and **selectors** come in. + +Nodes use labels extensively, and understanding them is key to controlling where your workloads run. Let's see what +labels are on your nodes: + +```bash +kubectl get nodes --show-labels +``` + +The output will be quite verbose! Some important labels that AKS sets automatically include: + +- `kubernetes.io/os` — The operating system (typically `linux`). +- `node.kubernetes.io/instance-type` — The Azure VM size, e.g. `Standard_B2ms`. +- `topology.kubernetes.io/zone` — The Azure availability zone, if your cluster uses them. +- `agentpool` — The name of the AKS node pool. + +Notice the `agentpool` label — your original nodes will show `agentpool=nodepool1` while the new node shows +`agentpool=extra`. You should also see the custom label `workload=extra` on the new node, which we set when creating the +pool. + +These labels become very powerful when combined with **node selectors** or **node affinity** rules in your pod specs, +which let you control which nodes a pod can be scheduled on. + +## 🎯 Node Selectors & Affinity + +The simplest way to influence pod scheduling is with a `nodeSelector`. This is added to your pod template spec and tells +the scheduler to only place the pod on nodes matching specific labels. + +Let's try this with the `extra` node pool we just created. We can target it using the `agentpool` label that AKS +automatically sets, or the custom `workload` label we added. Let's use our custom label: + +Edit the deployment manifest for your API and add a `nodeSelector`: + +```yaml +spec: + # Extra stuff omitted for brevity + spec: + # Place this just above the containers: section + nodeSelector: + workload: extra +``` + +Now apply the updated manifest with `kubectl apply -f` and watch what happens to the pods: + +```bash +kubectl get pods -l app=nanomon-api -o wide +``` + +You should see all the API pods running on the `extra` node. Remove the `nodeSelector` and reapply to restore normal +scheduling. + +For more sophisticated scheduling, Kubernetes offers **node affinity**, which provides richer matching expressions +including "preferred" (soft) and "required" (hard) rules. + +[📚 Kubernetes Docs: Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) + +Here's an example of a preferred node affinity that _tries_ to schedule pods on the `extra` pool, but doesn't fail if +the node is unavailable. You don't need to update your manifest to test this — just read through the example to +understand how it works: + +```yaml +spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: agentpool + operator: In + values: + - extra + containers: + - name: nanomon-api + image: __ACR_NAME__.azurecr.io/nanomon-api:latest +``` + +The difference between "preferred" and "required" is important — a required rule +(`requiredDuringSchedulingIgnoredDuringExecution`) will cause pods to remain in a `Pending` state if no matching node is +available, while a preferred rule will fall back to any available node. + +## 🪣 Taints & Tolerations + +Taints and tolerations work alongside node selectors, but in the _opposite direction_. While node selectors attract pods +to certain nodes, **taints** are used to _repel_ pods from nodes unless they explicitly **tolerate** the taint. + +A taint is applied to a node and has three parts: a key, a value, and an effect. The effect can be: + +- `NoSchedule` — New pods without a matching toleration will not be scheduled on this node. +- `PreferNoSchedule` — The scheduler will _try_ to avoid placing pods here, but it's not guaranteed. +- `NoExecute` — Existing pods without a matching toleration will be evicted from the node. + +Let's use our `extra` node pool to see this in action. First, taint the `extra` node so that normal pods are repelled +from it: + +```bash +kubectl taint nodes -l agentpool=extra dedicated=special:NoSchedule +``` + +> Here we use `-l agentpool=extra` to target the node by label rather than by name, which is often more convenient. + +Now scale up a deployment and see what happens: + +```bash +kubectl scale deployment nanomon-api --replicas 6 +kubectl get pods -l app=nanomon-api -o wide +``` + +Wait why aren't any pods starting they are all pending! Well we still have the node selector in the manifest that is +forcing all the pods to be scheduled on the `extra` node, but now we have a taint on that node that is preventing any +pods from being scheduled there. So we have a scheduling conflict — the node selector says "schedule here" but the taint +says "don't schedule here". The result is that the pods remain in a `Pending` state indefinitely. + +You could remove the node selector to allow the pods to be scheduled on the other nodes, but let's instead add a +toleration to allow the pods to be scheduled on the tainted node: + +```yaml +spec: + # Extra stuff omitted for brevity + spec: + tolerations: + - key: "dedicated" + operator: "Equal" + value: "special" + effect: "NoSchedule" + # Lines below remain unchanged + nodeSelector: + workload: extra +``` + +Note that we combine the toleration with a `nodeSelector` — the toleration _allows_ the pod to run on the tainted node, +but doesn't _force_ it there. The `nodeSelector` handles the placement. + +[📚 Kubernetes Docs: Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) + +When you're done experimenting, remove the taint and scale back down: + +```bash +kubectl taint nodes -l agentpool=extra dedicated=special:NoSchedule- +kubectl scale deployment nanomon-api --replicas 2 +``` + +The trailing `-` on the taint command removes it, which is easy to be mistaken for a typo, so be careful! + +## 📊 Resource Monitoring + +Back in section 7 we set resource requests and limits on our pods. But how do we see _actual_ resource usage on the +nodes? The `kubectl top` command gives us a quick view: + +```bash +# Show resource usage per node +kubectl top nodes + +# Show resource usage per pod +kubectl top pods +``` + +This shows the real-time CPU and memory consumption. Comparing these values with the node's allocatable resources (from +`kubectl describe node`) gives you a good sense of how much headroom you have. + +> If `kubectl top` returns an error, it means the metrics server isn't installed. In AKS, this is typically enabled by +> default, but you can install it with +> `kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml` + +## 🛠️ Node Maintenance & Cordoning + +Sometimes you need to take a node out of service for maintenance, upgrades, or troubleshooting. This sort of cluster +level operation is not common for application developers, but an awareness of how it works is useful for understanding +cluster operations and troubleshooting. + +There are three main activities for managing node availability: + +- **Cordoning a node**: This marks the node as unschedulable, preventing new pods from being scheduled on it, but does + not affect existing pods. `kubectl cordon ` +- **Draining a node**: This evicts all pods from the node and marks it as unschedulable. This is used for maintenance or + decommissioning. `kubectl drain --ignore-daemonsets` +- **Uncordoning a node**: This marks a previously cordoned or drained node as schedulable again, allowing pods to be + scheduled on it. `kubectl uncordon ` + +## 🔁 DaemonSets + +You may have noticed the `--ignore-daemonsets` flag in the drain command above, and wondered what a DaemonSet is. If you +looked at the system pods in `kube-system` earlier, you might also have noticed that some pods (like `kube-proxy` and +the CSI drivers) have one instance running on _every_ node. That's because they are managed by a _DaemonSet_. + +A _DaemonSet_ is a workload resource (like a _Deployment_) but instead of running a set number of replicas, it ensures +that a copy of a pod runs on **every node** in the cluster. When a new node is added, the DaemonSet automatically +schedules a pod onto it. When a node is removed, the pod is cleaned up. You don't specify a `replicas` count — the +number of pods is determined by the number of nodes. + +This makes DaemonSets ideal for node-level infrastructure concerns such as: + +- Log collection agents (e.g. Fluentd, Fluent Bit) +- Monitoring and metrics exporters (e.g. Prometheus node-exporter) +- Network plugins and proxies (e.g. kube-proxy) +- Storage drivers (e.g. CSI node plugins) + +You can see the DaemonSets running in your cluster with: + +```bash +kubectl get daemonsets -A +``` + +Notice how the `DESIRED` and `CURRENT` columns match for each DaemonSet — that's telling you every node has its required +pod running. DaemonSets can also use node selectors to target only a subset of nodes if needed. + +[📚 Kubernetes Docs: DaemonSets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) + +## 🏊 Node Pools In Practice + +We've already been using node pools throughout this section — the `extra` pool we created earlier is a great example. In +AKS, nodes are organized into **node pools**, groups of nodes with the same VM size and configuration. + +In production, it's common to have several node pools with different characteristics: + +- A **system pool** with small VMs for running critical cluster infrastructure (CoreDNS, kube-proxy, etc.). You should + not run your application workloads on this pool, if your application was to misbehave, it could impact the stability + of the entire cluster. +- A **general pool** with mid-sized VMs for typical application workloads. +- A **compute pool** with large or GPU-equipped VMs for data processing or machine learning. +- A **spot pool** using Azure Spot VMs for fault-tolerant batch workloads at reduced cost. + +You can manage your node pools with the Azure CLI, for example listing the pools in your cluster: + +```bash +az aks nodepool list --resource-group $RES_GROUP --cluster-name $AKS_NAME -o table +``` + +You should see both `nodepool1` and `extra` listed. The combination of node pools with the labels, selectors, taints, +and tolerations we explored above gives you fine-grained control over workload placement. + +[📚 AKS Docs: Node Pools](https://learn.microsoft.com/azure/aks/create-node-pools) + +## 🧹 Cleanup + +If you added the `extra` node pool during this section, now is a good time to remove it to avoid unnecessary Azure +costs: + +```bash +az aks nodepool delete --resource-group $RES_GROUP --cluster-name $AKS_NAME --name extra --no-wait +``` + +The `--no-wait` flag returns immediately while the deletion happens in the background. Your pods will be rescheduled +onto the remaining nodes automatically. + +If you still have your API pods pinned to the `extra` node with a `nodeSelector`, you'll notice your API pods are now in +a `Pending` state after the node pool is deleted. Remove the `nodeSelector` from your manifest and reapply to restore +normal scheduling. + +## 🧠 Key Takeaways + +Understanding nodes and cluster architecture might seem like "infrastructure plumbing", but it's knowledge that pays off +when things go wrong. Here's a quick summary of what we covered: + +- The cluster is split into a **control plane** (managed by AKS) and **worker nodes** (your VMs). +- Nodes run the **kubelet**, **kube-proxy**, and a **container runtime**. +- **Node pools** in AKS let you run heterogeneous hardware in the same cluster. +- **Labels**, **node selectors**, and **affinity** rules let you control pod placement. +- **Taints and tolerations** let you reserve or restrict nodes. +- **Cordoning and draining** safely remove nodes from service. +- **Resource monitoring** with `kubectl top` helps you understand utilization. diff --git a/content/15-operations/index.md b/content/15-operations/index.md new file mode 100644 index 00000000..8cc959ad --- /dev/null +++ b/content/15-operations/index.md @@ -0,0 +1,118 @@ +--- +tags: extra +index: 15 +title: Operations Cheat Sheet +summary: This cheat sheet provides a quick reference to essential operations. +layout: default.njk +icon: 💊 +--- + +# 💊 Operations Cheat Sheet + +This cheat sheet is broken in to sections covering different aspects of Kubernetes operations, from basic commands to +more advanced topics. Use it as a quick reference guide when working with your cluster. + +## Investigate & Debug + +| Operation | Command | +| ------------------------------------ | ----------------------------------------------------------------------- | +| Pod logs | `kubectl logs ` | +| See where a pod is running | `kubectl get pods -o wide` | +| Follow & watch pod logs | `kubectl logs -f ` | +| Pod events & details | `kubectl describe pod ` | +| Node status | `kubectl get nodes` | +| Resource usage | `kubectl top nodes` and `kubectl top pods` | +| Exec into pods | `kubectl exec -it -- /bin/bash` | +| Debug a pod with ephemeral container | `kubectl debug -it --image=alpine --target=` | +| Run a shell inside a debug pod | `kubectl run --rm -it debug --image=alpine --restart=Never -- sh` | +| View & watch cluster events | `kubectl get events -w` | +| Logs from a previous/crashed pod | `kubectl logs --previous` | +| Find non-running pods | `kubectl get pods --field-selector=status.phase!=Running` | + +## Remediate & Manage + +| Operation | Command | +| -------------------- | ----------------------------------------------------------------------------- | +| Restart a pod | `kubectl delete pod ` (it will be recreated by the deployment) | +| Restart a deployment | `kubectl rollout restart deployment ` | +| Scale a deployment | `kubectl scale deployment --replicas=` | +| Update an image | `kubectl set image deployment/ =` | +| Apply a manifest | `kubectl apply -f ` | +| Edit a resource live | `kubectl edit ` | + +## Network & Services + +| Operation | Command | +| ------------------------------ | ----------------------------------------------------------------------------------------------------- | +| Port forward to a pod | `kubectl port-forward :` | +| Get service details | `kubectl describe service ` | +| Get ingress details | `kubectl describe ingress ` | +| Test service connectivity | `kubectl run --rm -it --image=alpine test-conn -- sh -c "apk add curl && curl :"` | +| Check endpoints for a service | `kubectl get endpointslice` | +| Port forward to a service | `kubectl port-forward svc/ :` | +| DNS lookup from inside cluster | `kubectl run --rm -it dns-test --image=busybox --restart=Never -- nslookup ` | +| View network policies | `kubectl get networkpolicy` | + +## Advanced Operations + +| Operation | Command | +| ---------------- | -------------------------------------------------------------------------------- | +| Taint a node | `kubectl taint nodes key=value:NoSchedule` | +| Tolerate a taint | Add `tolerations` to your pod spec to allow it to be scheduled on tainted nodes. | +| Cordon a node | `kubectl cordon ` (mark node as unschedulable) | +| Drain a node | `kubectl drain --ignore-daemonsets` (safely evict pods from a node) | +| Uncordon a node | `kubectl uncordon ` (mark node as schedulable again) | +| Remove a taint | `kubectl taint nodes key=value:NoSchedule-` (note the trailing `-`) | + +## Rollbacks & History + +| Operation | Command | +| ---------------------------- | ------------------------------------------------------------------- | +| View rollout history | `kubectl rollout history deployment ` | +| Rollback to previous version | `kubectl rollout undo deployment ` | +| Rollback to specific version | `kubectl rollout undo deployment --to-revision=2` | +| Check rollout status | `kubectl rollout status deployment ` | + +## Configuration & Secrets + +| Operation | Command | +| ------------------------------ | -------------------------------------------------------------------------- | +| List configmaps | `kubectl get configmaps` | +| View a configmap | `kubectl describe configmap ` | +| List secrets | `kubectl get secrets` | +| Decode a secret value | `kubectl get secret -o jsonpath='{.data.}' \| base64 --decode` | +| Create a secret from literals | `kubectl create secret generic --from-literal=key=value` | +| Create a configmap from a file | `kubectl create configmap --from-file=` | + +## Resource Inspection + +| Operation | Command | +| ---------------------------------- | ------------------------------------------------------------ | +| Get resource as YAML | `kubectl get -o yaml` | +| Get resource as JSON | `kubectl get -o json` | +| Extract a field with JSONPath | `kubectl get pods -o jsonpath='{.items[*].metadata.name}'` | +| Diff live state vs. local manifest | `kubectl diff -f ` | +| Show resource in a specific ns | `kubectl get pods -n ` | +| Show resources in all namespaces | `kubectl get pods --all-namespaces` or `kubectl get pods -A` | + +## Context & Namespace Management + +| Operation | Command | +| --------------------- | -------------------------------------------------------------- | +| View current context | `kubectl config current-context` | +| Set default namespace | `kubectl config set-context --current --namespace=` | +| List all contexts | `kubectl config get-contexts` | +| Switch context | `kubectl config use-context ` | +| View cluster info | `kubectl cluster-info` | +| List all namespaces | `kubectl get namespaces` | + +## Cleanup + +| Operation | Command | +| --------------------------------- | ---------------------------------------------------------- | +| Delete a resource | `kubectl delete ` | +| Delete resources from a manifest | `kubectl delete -f ` | +| Force delete a stuck pod | `kubectl delete pod --grace-period=0 --force` | +| Delete all pods in a namespace | `kubectl delete pods --all -n ` | +| Remove completed/failed jobs | `kubectl delete jobs --field-selector status.successful=1` | +| Prune resources not in a manifest | `kubectl apply -f --prune -l ` | diff --git a/content/99-kind/api-deployment.yaml b/content/99-kind/api-deployment.yaml new file mode 100644 index 00000000..28f5ea91 --- /dev/null +++ b/content/99-kind/api-deployment.yaml @@ -0,0 +1,47 @@ +kind: Deployment +apiVersion: apps/v1 + +metadata: + name: nanomon-api + +spec: + replicas: 2 + selector: + matchLabels: + app: nanomon-api + template: + metadata: + labels: + app: nanomon-api + spec: + containers: + - name: api-container + + image: ghcr.io/benc-uk/nanomon-api:latest + imagePullPolicy: Always + + ports: + - containerPort: 8000 + + resources: + requests: + cpu: 50m + memory: 50Mi + limits: + memory: 128Mi + + readinessProbe: + httpGet: + port: 8000 + path: /api/health + initialDelaySeconds: 5 + periodSeconds: 10 + + env: + - name: POSTGRES_DSN + value: "host=database user=nanomon dbname=nanomon sslmode=disable" + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: database-creds + key: password diff --git a/content/99-kind/api-service.yaml b/content/99-kind/api-service.yaml new file mode 100644 index 00000000..10b4900a --- /dev/null +++ b/content/99-kind/api-service.yaml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 + +metadata: + name: api + +spec: + type: ClusterIP + selector: + app: nanomon-api + ports: + - protocol: TCP + port: 80 + targetPort: 8000 diff --git a/content/99-kind/cluster.yaml b/content/99-kind/cluster.yaml new file mode 100644 index 00000000..91c671e4 --- /dev/null +++ b/content/99-kind/cluster.yaml @@ -0,0 +1,16 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: + - role: control-plane + extraPortMappings: + - containerPort: 30000 + hostPort: 30000 + protocol: TCP + - containerPort: 30001 + hostPort: 30001 + protocol: TCP + - containerPort: 30002 + hostPort: 30002 + protocol: TCP + - role: worker + - role: worker diff --git a/content/99-kind/frontend-deployment.yaml b/content/99-kind/frontend-deployment.yaml new file mode 100644 index 00000000..22a803ac --- /dev/null +++ b/content/99-kind/frontend-deployment.yaml @@ -0,0 +1,42 @@ +kind: Deployment +apiVersion: apps/v1 + +metadata: + name: nanomon-frontend + +spec: + replicas: 1 + selector: + matchLabels: + app: nanomon-frontend + template: + metadata: + labels: + app: nanomon-frontend + spec: + containers: + - name: frontend-container + + image: ghcr.io/benc-uk/nanomon-frontend:latest + imagePullPolicy: Always + + ports: + - containerPort: 8001 + + resources: + requests: + cpu: 50m + memory: 50Mi + limits: + memory: 128Mi + + readinessProbe: + httpGet: + port: 8001 + path: / + initialDelaySeconds: 5 + periodSeconds: 10 + + env: + - name: API_ENDPOINT + value: /api diff --git a/content/99-kind/frontend-service.yaml b/content/99-kind/frontend-service.yaml new file mode 100644 index 00000000..a7b72fb4 --- /dev/null +++ b/content/99-kind/frontend-service.yaml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 + +metadata: + name: frontend + +spec: + type: ClusterIP + selector: + app: nanomon-frontend + ports: + - protocol: TCP + port: 80 + targetPort: 8001 diff --git a/content/99-kind/index.md b/content/99-kind/index.md new file mode 100644 index 00000000..6a3386a2 --- /dev/null +++ b/content/99-kind/index.md @@ -0,0 +1,194 @@ +--- +tags: ignore +index: 99 +title: Using Kind instead of Azure Kubernetes Service +summary: This page provides notes and modifications for using Kind for this workshop, instead of AKS +layout: default.njk +icon: 📌 +--- + +# {{ icon }} {{ title }} + +## Introduction + +If you don't have access to a cloud environment or prefer to do your development and testing locally, you can use +[Kind (Kubernetes IN Docker)](https://kind.sigs.k8s.io/) to create a local Kubernetes cluster. Kind runs Kubernetes +clusters in Docker containers, making it an excellent tool for local development and testing. However there are some +differences and modifications needed when using Kind compared to a cloud-based Kubernetes cluster, especially in the +context of this workshop. Rather than add a lot of specific instructions to the main workshop content, this page +provides notes and modifications for using Kind as a local Kubernetes cluster for development and testing. + +## Setup and Usage + +The installation of Kind is straightforward, and the documentation is quite good. However, there are a few things to +keep in mind when using Kind for local Kubernetes development. + +1. See [Kind's official documentation](https://kind.sigs.k8s.io/) for installation instructions and usage details. You + will need Docker or Podman installed and set up on your machine before you begin. +1. When creating a Kind cluster, you will need to specify port mappings to access your services from outside the + cluster. Create a YAML file (e.g., `kind-config.yaml`) with the following content: + +```yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: + # NodePorts to allow external access + - role: control-plane + extraPortMappings: + - containerPort: 30000 + hostPort: 30000 + protocol: TCP + - containerPort: 30001 + hostPort: 30001 + protocol: TCP + # Three worker nodes, these aren't real nodes! + - role: worker + - role: worker + - role: worker +``` + +1.Then create the cluster using this configuration: + +```bash +kind create cluster --config kind-config.yaml +``` + +This should create a Kind cluster with the specified port mappings, allowing you to access services on ports 30000, +30001, and 30002 from your host machine. + +## Workshop Modifications + +If you want to use Kind for this workshop, there are a few modifications you will need to make to the instructions and +manifests. We'll not provision any resources in the cloud or Azure so some of the steps will be skipped, and some of the +manifests will need to be modified to work with Kind. + +### General Modifications + +We will not use our own container registry, so for any image references in the manifests we'll use the public images +published on GitHub Container Registry instead of the ones from Azure Container Registry, as follows: + +- API: `ghcr.io/benc-uk/nanomon-api:latest` +- Frontend: `ghcr.io/benc-uk/nanomon-frontend:latest` +- Runner: `ghcr.io/benc-uk/nanomon-runner:latest` +- Preconfigured PostgreSQL: `ghcr.io/benc-uk/nanomon-postgres:latest` + +### Section 01 & 02 - Cluster and Registry Setup + +Skip these sections as we won't be provisioning any resources in the cloud or Azure. Instead, we'll create a local Kind +cluster and use public images from GitHub Container Registry. + +### Section 05 - Network Basics + +External load balancers are hard to get working in Kind, so instead of using a `LoadBalancer` service type for the API, +we will use `NodePort` and access it via localhost and the mapped port. + +> What is a `NodePort` type of _Service_? A `NodePort` service exposes the service on a static port on each node in the +> cluster. This means that you can access the service from outside the cluster by sending requests to any node's IP +> address and the specified `NodePort`. When working in the cloud you rarely if ever use `NodePort` services, but in a +> local development environment like Kind, it's a common way to access services running in the cluster. + +When creating the _Service_ for the API, modify the manifest to use `NodePort` instead of `LoadBalancer`, and specify a +port that matches the port mapping we set up in the Kind cluster configuration (e.g., 30000): + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: api +spec: + type: NodePort + selector: + app: nanomon-api + ports: + - protocol: TCP + port: 8000 + targetPort: 8000 + nodePort: 30000 +``` + +### Section 06 - Frontend Deployment + +When deploying the frontend, do the same thing as with the API, use `NodePort` instead of `LoadBalancer` and specify a +different port that matches the port mapping in the Kind cluster configuration (e.g., 30001): + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: frontend +spec: + type: NodePort + selector: + app: nanomon-frontend + ports: + - protocol: TCP + port: 8001 + targetPort: 8001 + nodePort: 30001 +``` + +- Instead of using `__API_EXTERNAL_IP__` in the frontend configuration, you use `localhost:30000` to access the API from + the frontend. +- Instead of using an external IP address to access the frontend, you can access it at `http://localhost:30001` from + your host machine. + +### Section 09 - Ingress + +- When deploying Nginx Ingress Controller, you can still use Helm but we need to use a `NodePort` _Service_ instead of + `LoadBalancer`, and we won't have an external IP address. Instead, we'll access the ingress controller using + `localhost` and the assigned `NodePort`. + +```bash +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +helm install ingress-nginx ingress-nginx/ingress-nginx \ + --set controller.service.type=NodePort \ + --set controller.service.nodePorts.http=30000 \ + --namespace ingress +``` + +### Section 09a - Gateway API + +- Be sure delete the API and frontend services created in previous sections, before creating the Gateway API resources, + as they will conflict with the Gateway API controller's own services. `kubectl delete svc api frontend -n default` + should do the trick. +- When deploying the Gateway API controller, we will use a `NodePort` _Service_ and access it via `localhost` and the + assigned `NodePort`. Note we use the same `NodePort` values as we did in sections 05 and 06 which is why we need to + delete the previous services, otherwise there will be conflicts. + +```bash +helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --namespace nginx-gateway \ + --set nginx.service.type=NodePort \ + --set-json 'nginx.service.nodePorts=[{"port":30000,"listenerPort":80}, {"port":30001,"listenerPort":8443}]' +``` + +- You can now recreate the _Service_'s for the API and frontend as `ClusterIP` type, as described in 'Reconfiguring The + App' + +### Section 10 - Extra Advanced + +The part on persisting data with Azure Disk won't be relevant for Kind, but you can still use a `PersistentVolume` and +`PersistentVolumeClaim` with a local storage class. + +The only difference is that instead of using `default` storage class, you will need to use `standard`,so in the +StatefulSet manifest for PostgreSQL, modify the `storageClassName` to `standard` + +### Section 11 - Observability + +No modification is needed for this section, and surprisingly the kube-prometheus-stack can be deployed in Kind without +problem + +### Section 12 - CI/CD with GitHub Actions + +Getting GitHub Actions to deploy to a local Kind cluster is clearly a non-starter, you will not be able to fdo much with +this section. However, you can still build and push your images to GitHub Container Registry, and deploy them to + +### Section 13 - GitOps & Flux + +The section on Kustomize can be followed, however installation and use of Flux is not really feasible in a local Kind +cluster, so you can skip that part. + +### Section 14 - Nodes + +Kind runs Kubernetes clusters in Docker containers, so you won't have access to the underlying nodes in the same way you +would with a cloud-based Kubernetes cluster. However, you can still play with labelling nodes diff --git a/content/99-kind/ingress.yaml b/content/99-kind/ingress.yaml new file mode 100644 index 00000000..ca9afa84 --- /dev/null +++ b/content/99-kind/ingress.yaml @@ -0,0 +1,29 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress + +metadata: + name: nanomon + labels: + name: nanomon + +spec: + host: + ingressClassName: nginx + rules: + - http: + paths: + - pathType: Prefix + path: "/" + backend: + service: + name: frontend + port: + number: 80 + + - pathType: Prefix + path: "/api" + backend: + service: + name: api + port: + number: 80 diff --git a/content/99-kind/postgres-service.yaml b/content/99-kind/postgres-service.yaml new file mode 100644 index 00000000..12771ecb --- /dev/null +++ b/content/99-kind/postgres-service.yaml @@ -0,0 +1,15 @@ +kind: Service +apiVersion: v1 + +metadata: + # We purposefully pick a different name for the service from the deployment + name: database + +spec: + type: ClusterIP + selector: + app: postgres + ports: + - protocol: TCP + port: 5432 + targetPort: 5432 diff --git a/content/99-kind/postgres-statefulset.yaml b/content/99-kind/postgres-statefulset.yaml new file mode 100644 index 00000000..9c8ca9db --- /dev/null +++ b/content/99-kind/postgres-statefulset.yaml @@ -0,0 +1,72 @@ +apiVersion: apps/v1 +kind: StatefulSet + +metadata: + name: postgres + +spec: + serviceName: postgres + replicas: 1 + selector: + matchLabels: + app: postgres + + volumeClaimTemplates: + - metadata: + name: postgres-pvc + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: standard + resources: + requests: + storage: 500M + + template: + metadata: + labels: + app: postgres + + spec: + volumes: + - name: initdb-vol + configMap: + name: nanomon-sql-init + + containers: + - name: postgres + image: postgres:17 + + ports: + - containerPort: 5432 + + env: + - name: POSTGRES_DB + value: "nanomon" + - name: POSTGRES_USER + value: "nanomon" + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: database-creds + key: password + + resources: + requests: + cpu: 50m + memory: 100Mi + limits: + memory: 512Mi + + readinessProbe: + exec: + command: ["pg_isready", "-U", "nanomon"] + initialDelaySeconds: 5 + periodSeconds: 10 + + volumeMounts: + - name: initdb-vol + mountPath: /docker-entrypoint-initdb.d + readOnly: true + - name: postgres-pvc + mountPath: /var/lib/postgresql/data + subPath: data diff --git a/content/99-kind/runner-deployment.yaml b/content/99-kind/runner-deployment.yaml new file mode 100644 index 00000000..4faa49dd --- /dev/null +++ b/content/99-kind/runner-deployment.yaml @@ -0,0 +1,37 @@ +kind: Deployment +apiVersion: apps/v1 + +metadata: + name: nanomon-runner + +spec: + replicas: 1 + selector: + matchLabels: + app: nanomon-runner + template: + metadata: + labels: + app: nanomon-runner + spec: + containers: + - name: runner-container + + image: ghcr.io/benc-uk/nanomon-runner:latest + imagePullPolicy: Always + + resources: + requests: + cpu: 50m + memory: 50Mi + limits: + memory: 128Mi + + env: + - name: POSTGRES_DSN + value: "host=database user=nanomon dbname=nanomon sslmode=disable" + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: database-creds + key: password diff --git a/content/99-kind/setup.sh b/content/99-kind/setup.sh new file mode 100644 index 00000000..c9cdcf9c --- /dev/null +++ b/content/99-kind/setup.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +DIR=$(dirname "$(readlink -f "$0")") +curl -Ssl https://kube-workshop.benco.io/08-more-improvements/nanomon_init.sql -o /tmp/nanomon_init.sql +kubectl create configmap nanomon-sql-init --from-file=/tmp/nanomon_init.sql +kubectl create secret generic database-creds --from-literal password='kindaSecret123!' + +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +helm install ingress-nginx ingress-nginx/ingress-nginx \ + --set controller.service.type=NodePort \ + --set controller.service.nodePorts.http=30000 \ + --namespace ingress-nginx --create-namespace + +kubectl apply --namespace default -f $DIR/ \ No newline at end of file diff --git a/content/_includes/main.css b/content/_includes/main.css index 170eb10c..f008bd83 100644 --- a/content/_includes/main.css +++ b/content/_includes/main.css @@ -371,6 +371,17 @@ footer { transition: none !important; } +td { + padding: 10px; + border: 1px solid var(--border-color); + transition: border-color 0.3s ease; +} + +/* remove cell spacing */ +table { + border-collapse: collapse; +} + /* Mobile responsive styles */ @media (max-width: 768px) { body { diff --git a/content/_includes/main.js b/content/_includes/main.js index ca7af33a..e2096dc9 100644 --- a/content/_includes/main.js +++ b/content/_includes/main.js @@ -53,11 +53,11 @@ function updateThemeButtonText() { const currentTheme = html.getAttribute("data-theme"); if (currentTheme === "dark") { - themeToggle.textContent = "Light"; + themeToggle.textContent = "Dark"; } else if (currentTheme === "light") { - themeToggle.textContent = "Auto"; + themeToggle.textContent = "Light"; } else { - themeToggle.textContent = "Dark"; + themeToggle.textContent = "Auto"; } } diff --git a/content/index.md b/content/index.md index 454984fe..fb909dfa 100644 --- a/content/index.md +++ b/content/index.md @@ -26,7 +26,10 @@ code, and working files for all of the sections. ## Azure Kubernetes Service (AKS) -You'll be using AKS to learn how to work with Kubernetes running as a managed service in Azure. +The main workshop has been built around using Azure Kubernetes Service (AKS) as the Kubernetes environment, almost none +of the content is specific to AKS, and you can follow along with any Kubernetes cluster. However, if you want to follow +along with the main content, you will need access to an Azure subscription to create the AKS cluster and other +resources. Workshop sections & topics: @@ -40,7 +43,7 @@ Workshop sections & topics: {%- endfor -%} -> Some familarity with Azure is required for sections 1 and 2, but after that the focus is on Kubernetes itself. +> Some familiarity with Azure is required for sections 1 and 2, but after that the focus is on Kubernetes itself. ### 🍵 Optional Sections @@ -57,6 +60,19 @@ attempted, and they do not run in order. {%- endfor -%} +### 📌 Using 'Kind' Instead of AKS + +The workshop was designed with AKS in mind, but if you don't have access to Azure or prefer to do your development and +testing locally, you can use [Kind (Kubernetes IN Docker)](https://kind.sigs.k8s.io/) to create a local Kubernetes +cluster. Kind runs Kubernetes clusters locally in Docker containers, making it an excellent tool for local development +and testing. + +Below is a link to notes and modifications for if you want to try this path. It's not a section in the main flow of the +workshop, but rather a separate page with notes and alternative instructions for each of the main sections of the +workshop (above) + +- [Using Kind instead of Azure Kubernetes Service](99-kind) + ### 📖 Extra Reading & Teach Yourself Exercises A very brief list of potential topics and Kubernetes features you may want to look at after finishing: diff --git a/eleventy.config.js b/eleventy.config.js index 19cbe40d..41a4be9d 100644 --- a/eleventy.config.js +++ b/eleventy.config.js @@ -16,6 +16,19 @@ export default function (eleventyConfig) { return String(num).padStart(places, "0"); }); + // Custom sorted collections to ensure correct ordering by index + eleventyConfig.addCollection("section", function (collectionApi) { + return collectionApi + .getFilteredByTag("section") + .sort((a, b) => a.data.index - b.data.index); + }); + + eleventyConfig.addCollection("extra", function (collectionApi) { + return collectionApi + .getFilteredByTag("extra") + .sort((a, b) => a.data.index - b.data.index); + }); + let options = { html: true, }; diff --git a/package-lock.json b/package-lock.json index ddb4abc5..4037a622 100644 --- a/package-lock.json +++ b/package-lock.json @@ -12,28 +12,28 @@ "@11ty/eleventy": "^3.1.2", "@11ty/eleventy-plugin-syntaxhighlight": "^5.0.2", "clean-css": "^5.3.3", - "markdown-it": "^14.1.0", + "markdown-it": "^14.1.1", "markdown-it-attrs": "^4.3.1", - "prettier": "^3.6.2" + "prettier": "^3.8.1" } }, "node_modules/@11ty/dependency-tree": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@11ty/dependency-tree/-/dependency-tree-4.0.0.tgz", - "integrity": "sha512-PTOnwM8Xt+GdJmwRKg4pZ8EKAgGoK7pedZBfNSOChXu8MYk2FdEsxdJYecX4t62owpGw3xK60q9TQv/5JI59jw==", + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@11ty/dependency-tree/-/dependency-tree-4.0.2.tgz", + "integrity": "sha512-RTF6VTZHatYf7fSZBUN3RKwiUeJh5dhWV61gDPrHhQF2/gzruAkYz8yXuvGLx3w3ZBKreGrR+MfYpSVkdbdbLA==", "license": "MIT", "dependencies": { "@11ty/eleventy-utils": "^2.0.1" } }, "node_modules/@11ty/dependency-tree-esm": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@11ty/dependency-tree-esm/-/dependency-tree-esm-2.0.0.tgz", - "integrity": "sha512-+4ySOON4aEAiyAGuH6XQJtxpGSpo6nibfG01krgix00sqjhman2+UaDUopq6Ksv8/jBB3hqkhsHe3fDE4z8rbA==", + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@11ty/dependency-tree-esm/-/dependency-tree-esm-2.0.4.tgz", + "integrity": "sha512-MYKC0Ac77ILr1HnRJalzKDlb9Z8To3kXQCltx299pUXXUFtJ1RIONtULlknknqW8cLe19DLVgmxVCtjEFm7h0A==", "license": "MIT", "dependencies": { - "@11ty/eleventy-utils": "^2.0.1", - "acorn": "^8.14.0", + "@11ty/eleventy-utils": "^2.0.7", + "acorn": "^8.15.0", "dependency-graph": "^1.0.0", "normalize-path": "^3.0.0" } @@ -163,18 +163,6 @@ "url": "https://opencollective.com/11ty" } }, - "node_modules/@11ty/eleventy/node_modules/entities": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", - "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", - "license": "BSD-2-Clause", - "engines": { - "node": ">=0.12" - }, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, "node_modules/@11ty/lodash-custom": { "version": "4.17.21", "resolved": "https://registry.npmjs.org/@11ty/lodash-custom/-/lodash-custom-4.17.21.tgz", @@ -189,9 +177,9 @@ } }, "node_modules/@11ty/posthtml-urls": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@11ty/posthtml-urls/-/posthtml-urls-1.0.1.tgz", - "integrity": "sha512-6EFN/yYSxC/OzYXpq4gXDyDMlX/W+2MgCvvoxf11X1z76bqkqFJ8eep5RiBWfGT5j0323a1pwpelcJJdR46MCw==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@11ty/posthtml-urls/-/posthtml-urls-1.0.2.tgz", + "integrity": "sha512-0vaV3Wt0surZ+oS1VdKKe0axeeupuM+l7W/Z866WFQwF+dGg2Tc/nmhk/5l74/Y55P8KyImnLN9CdygNw2huHg==", "license": "MIT", "dependencies": { "evaluate-value": "^2.0.0", @@ -204,9 +192,9 @@ } }, "node_modules/@11ty/recursive-copy": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@11ty/recursive-copy/-/recursive-copy-4.0.2.tgz", - "integrity": "sha512-174nFXxL/6KcYbLYpra+q3nDbfKxLxRTNVY1atq2M1pYYiPfHse++3IFNl8mjPFsd7y2qQjxLORzIjHMjL3NDQ==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/@11ty/recursive-copy/-/recursive-copy-4.0.3.tgz", + "integrity": "sha512-SX48BTLEGX8T/OsKWORsHAAeiDsbFl79Oa/0Wg/mv/d27b7trCVZs7fMHvpSgDvZz/fZqx5rDk8+nx5oyT7xBw==", "license": "ISC", "dependencies": { "errno": "^1.0.0", @@ -486,9 +474,9 @@ "license": "MIT" }, "node_modules/debug": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", - "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", "license": "MIT", "dependencies": { "ms": "^2.1.3" @@ -600,9 +588,9 @@ } }, "node_modules/entities": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", - "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", + "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", "license": "BSD-2-Clause", "engines": { "node": ">=0.12" @@ -732,17 +720,17 @@ } }, "node_modules/finalhandler": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", - "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.2.tgz", + "integrity": "sha512-aA4RyPcd3badbdABGDuTXCMTtOneUCAYH/gxoYRTZlIJdF0YPWuGqiAsIrhNnnqdXGswYk6dGujem4w80UJFhg==", "license": "MIT", "dependencies": { "debug": "2.6.9", "encodeurl": "~2.0.0", "escape-html": "~1.0.3", - "on-finished": "2.4.1", + "on-finished": "~2.4.1", "parseurl": "~1.3.3", - "statuses": "2.0.1", + "statuses": "~2.0.2", "unpipe": "~1.0.0" }, "engines": { @@ -824,9 +812,9 @@ } }, "node_modules/gray-matter/node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", "license": "MIT", "dependencies": { "argparse": "^1.0.7", @@ -877,19 +865,23 @@ } }, "node_modules/http-errors": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", - "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz", + "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==", "license": "MIT", "dependencies": { - "depd": "2.0.0", - "inherits": "2.0.4", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "toidentifier": "1.0.1" + "depd": "~2.0.0", + "inherits": "~2.0.4", + "setprototypeof": "~1.2.0", + "statuses": "~2.0.2", + "toidentifier": "~1.0.1" }, "engines": { "node": ">= 0.8" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" } }, "node_modules/inherits": { @@ -999,9 +991,9 @@ } }, "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "license": "MIT", "dependencies": { "argparse": "^2.0.1" @@ -1047,9 +1039,9 @@ } }, "node_modules/liquidjs": { - "version": "10.21.1", - "resolved": "https://registry.npmjs.org/liquidjs/-/liquidjs-10.21.1.tgz", - "integrity": "sha512-NZXmCwv3RG5nire3fmIn9HsOyJX3vo+ptp0yaXUHAMzSNBhx74Hm+dAGJvscUA6lNqbLuYfXgNavRQ9UbUJhQQ==", + "version": "10.24.0", + "resolved": "https://registry.npmjs.org/liquidjs/-/liquidjs-10.24.0.tgz", + "integrity": "sha512-TAUNAdgwaAXjjcUFuYVJm9kOVH7zc0mTKxsG9t9Lu4qdWjB2BEblyVIYpjWcmJLMGgiYqnGNJjpNMHx0gp/46A==", "license": "MIT", "dependencies": { "commander": "^10.0.0" @@ -1059,7 +1051,7 @@ "liquidjs": "bin/liquid.js" }, "engines": { - "node": ">=14" + "node": ">=16" }, "funding": { "type": "opencollective", @@ -1073,18 +1065,18 @@ "license": "MIT" }, "node_modules/luxon": { - "version": "3.7.1", - "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.7.1.tgz", - "integrity": "sha512-RkRWjA926cTvz5rAb1BqyWkKbbjzCGchDUIKMCUvNi17j6f6j8uHGDV82Aqcqtzd+icoYpELmG3ksgGiFNNcNg==", + "version": "3.7.2", + "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.7.2.tgz", + "integrity": "sha512-vtEhXh/gNjI9Yg1u4jX/0YVPMvxzHuGgCm6tC5kZyb08yjGWGnqAjGJvcXbqQR2P3MyMEFnRbpcdFS6PBcLqew==", "license": "MIT", "engines": { "node": ">=12" } }, "node_modules/markdown-it": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-14.1.0.tgz", - "integrity": "sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-14.1.1.tgz", + "integrity": "sha512-BuU2qnTti9YKgK5N+IeMubp14ZUKUUw7yeJbkjtosvHiP0AZ5c8IAgEMk79D0eC8F23r4Ac/q8cAIFdm2FtyoA==", "license": "MIT", "dependencies": { "argparse": "^2.0.1", @@ -1110,6 +1102,18 @@ "markdown-it": ">= 9.0.0" } }, + "node_modules/markdown-it/node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, "node_modules/maximatch": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/maximatch/-/maximatch-0.1.0.tgz", @@ -1153,15 +1157,19 @@ } }, "node_modules/mime-types": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.1.tgz", - "integrity": "sha512-xRc4oEhT6eaBpU1XF7AjpOFD+xQmXNB5OVKwp4tqCuBpHLS/ZbBDrc07mYTDqVMg6PfxUjjNp85O6Cd2Z/5HWA==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.2.tgz", + "integrity": "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==", "license": "MIT", "dependencies": { "mime-db": "^1.54.0" }, "engines": { - "node": ">= 0.6" + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" } }, "node_modules/minimatch": { @@ -1201,9 +1209,9 @@ "license": "BSD-3-Clause" }, "node_modules/morphdom": { - "version": "2.7.7", - "resolved": "https://registry.npmjs.org/morphdom/-/morphdom-2.7.7.tgz", - "integrity": "sha512-04GmsiBcalrSCNmzfo+UjU8tt3PhZJKzcOy+r1FlGA7/zri8wre3I1WkYN9PT3sIeIKfW9bpyElA+VzOg2E24g==", + "version": "2.7.8", + "resolved": "https://registry.npmjs.org/morphdom/-/morphdom-2.7.8.tgz", + "integrity": "sha512-D/fR4xgGUyVRbdMGU6Nejea1RFzYxYtyurG4Fbv2Fi/daKlWKuXGLOdXtl+3eIwL110cI2hz1ZojGICjjFLgTg==", "license": "MIT" }, "node_modules/ms": { @@ -1315,9 +1323,9 @@ } }, "node_modules/posthtml": { - "version": "0.16.6", - "resolved": "https://registry.npmjs.org/posthtml/-/posthtml-0.16.6.tgz", - "integrity": "sha512-JcEmHlyLK/o0uGAlj65vgg+7LIms0xKXe60lcDOTU7oVX/3LuEuLwrQpW3VJ7de5TaFKiW4kWkaIpJL42FEgxQ==", + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/posthtml/-/posthtml-0.16.7.tgz", + "integrity": "sha512-7Hc+IvlQ7hlaIfQFZnxlRl0jnpWq2qwibORBhQYIb0QbNtuicc5ZxvKkVT71HJ4Py1wSZ/3VR1r8LfkCtoCzhw==", "license": "MIT", "dependencies": { "posthtml-parser": "^0.11.0", @@ -1364,9 +1372,9 @@ } }, "node_modules/prettier": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", - "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.8.1.tgz", + "integrity": "sha512-UOnG6LftzbdaHZcKoPFtOcCKztrQ57WkHDeRD9t/PTQtmT0NHSeWWepj6pS0z/N7+08BHFDQVUrfmfMRcZwbMg==", "license": "MIT", "bin": { "prettier": "bin/prettier.cjs" @@ -1449,9 +1457,9 @@ } }, "node_modules/semver": { - "version": "7.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", - "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", "license": "ISC", "bin": { "semver": "bin/semver.js" @@ -1467,25 +1475,29 @@ "license": "MIT" }, "node_modules/send": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/send/-/send-1.2.0.tgz", - "integrity": "sha512-uaW0WwXKpL9blXE2o0bRhoL2EGXIrZxQ2ZQ4mgcfoBxdFmQold+qWsD2jLrfZ0trjKL6vOw0j//eAwcALFjKSw==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/send/-/send-1.2.1.tgz", + "integrity": "sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ==", "license": "MIT", "dependencies": { - "debug": "^4.3.5", + "debug": "^4.4.3", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "etag": "^1.8.1", "fresh": "^2.0.0", - "http-errors": "^2.0.0", - "mime-types": "^3.0.1", + "http-errors": "^2.0.1", + "mime-types": "^3.0.2", "ms": "^2.1.3", "on-finished": "^2.4.1", "range-parser": "^1.2.1", - "statuses": "^2.0.1" + "statuses": "^2.0.2" }, "engines": { "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" } }, "node_modules/setprototypeof": { @@ -1540,9 +1552,9 @@ } }, "node_modules/statuses": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", - "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz", + "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==", "license": "MIT", "engines": { "node": ">= 0.8" @@ -1558,13 +1570,13 @@ } }, "node_modules/tinyglobby": { - "version": "0.2.14", - "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.14.tgz", - "integrity": "sha512-tX5e7OM1HnYr2+a2C/4V0htOcSQcoSTH9KgJnVvNm5zm/cyEWKJ7j7YutsH9CxMdtOkkLFy2AHrMci9IM8IPZQ==", + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", "license": "MIT", "dependencies": { - "fdir": "^6.4.4", - "picomatch": "^4.0.2" + "fdir": "^6.5.0", + "picomatch": "^4.0.3" }, "engines": { "node": ">=12.0.0" @@ -1616,9 +1628,9 @@ "license": "MIT" }, "node_modules/ws": { - "version": "8.18.3", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", - "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", + "version": "8.19.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.19.0.tgz", + "integrity": "sha512-blAT2mjOEIi0ZzruJfIhb3nps74PRWTCz1IjglWEEpQl5XS/UNama6u2/rjFkDDouqr4L67ry+1aGIALViWjDg==", "license": "MIT", "engines": { "node": ">=10.0.0" diff --git a/package.json b/package.json index 63cd53d0..231b84c6 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "kube-workshop", - "version": "2.0.0", - "description": "A workshop covering the basics of Kubernetes, using Azure Kubernetes Service (AKS) as the platform.", + "version": "2.0.1", + "description": "A workshop to learn Kubernetes, with hands-on labs and exercises to enable developers to get up and running", "author": "Ben Coleman", "license": "MIT", "type": "module", @@ -16,8 +16,8 @@ "@11ty/eleventy": "^3.1.2", "@11ty/eleventy-plugin-syntaxhighlight": "^5.0.2", "clean-css": "^5.3.3", - "markdown-it": "^14.1.0", + "markdown-it": "^14.1.1", "markdown-it-attrs": "^4.3.1", - "prettier": "^3.6.2" + "prettier": "^3.8.1" } -} +} \ No newline at end of file