diff --git a/.github/workflows/test-fastpaths.yaml b/.github/workflows/test-fastpaths.yaml index d7a88bd1d7..db919f3399 100644 --- a/.github/workflows/test-fastpaths.yaml +++ b/.github/workflows/test-fastpaths.yaml @@ -3,7 +3,7 @@ name: Test - Fastpaths on: workflow_dispatch: schedule: - - cron: "0 17 * * 5" + - cron: "0 7 * * 5" permissions: id-token: write diff --git a/Makefile b/Makefile index 726b654547..131f0c9346 100644 --- a/Makefile +++ b/Makefile @@ -4,15 +4,28 @@ environment='' shell_command='' shell_simple_command='' glob='-' +cluster='all' .PHONY: install install: yarn install +.PHONY: build +build: install + yarn build + +.PHONY: warning +warning: + @echo "Note: 'make serve' now does a full static build. For dev mode, use 'make start' instead." + .PHONY: serve -serve: install +serve: warning build yarn serve +.PHONY: start +start: install + yarn start + .PHONY: tf-fmt tf-fmt: cd ./terraform && terraform fmt --recursive @@ -37,13 +50,17 @@ reset-environment: delete-environment: bash hack/shell.sh $(environment) delete-environment +.PHONY: pre-provision +pre-provision: + bash hack/pre-provision-resources.sh $(environment) $(action) + .PHONY: create-infrastructure create-infrastructure: - bash hack/create-infrastructure.sh $(environment) + bash hack/create-infrastructure.sh $(environment) $(cluster) .PHONY: destroy-infrastructure destroy-infrastructure: - bash hack/destroy-infrastructure.sh $(environment) + bash hack/destroy-infrastructure.sh $(environment) $(cluster) .PHONY: deploy-ide deploy-ide: @@ -55,4 +72,5 @@ destroy-ide: .PHONY: lint lint: - yarn lint \ No newline at end of file + yarn lint + diff --git a/cluster/eksctl/access-entries.yaml b/cluster/eksctl/access-entries.yaml new file mode 100644 index 0000000000..acadf890e9 --- /dev/null +++ b/cluster/eksctl/access-entries.yaml @@ -0,0 +1,6 @@ + accessEntries: + - principalARN: ${RESOURCE_CODEBUILD_ROLE_ARN} + accessPolicies: + - policyARN: "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" + accessScope: + type: cluster diff --git a/cluster/eksctl/cluster-auto.yaml b/cluster/eksctl/cluster-auto.yaml new file mode 100644 index 0000000000..44d2fc0788 --- /dev/null +++ b/cluster/eksctl/cluster-auto.yaml @@ -0,0 +1,25 @@ +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +availabilityZones: + - ${AWS_REGION}a + - ${AWS_REGION}b + - ${AWS_REGION}c +metadata: + name: ${EKS_CLUSTER_AUTO_NAME} + region: ${AWS_REGION} + version: "1.33" + tags: + karpenter.sh/discovery: ${EKS_CLUSTER_AUTO_NAME} + created-by: eks-workshop-v2 + env: ${EKS_CLUSTER_AUTO_NAME} +vpc: + cidr: 10.43.0.0/16 + clusterEndpoints: + privateAccess: true + publicAccess: true +autoModeConfig: + enabled: true + nodePools: [general-purpose, system] +accessConfig: + authenticationMode: API + bootstrapClusterCreatorAdminPermissions: true diff --git a/cluster/eksctl/cluster.yaml b/cluster/eksctl/cluster.yaml index 196674cf47..9690b0418e 100644 --- a/cluster/eksctl/cluster.yaml +++ b/cluster/eksctl/cluster.yaml @@ -40,4 +40,7 @@ remoteNetworkConfig: remoteNodeNetworks: - cidrs: ["10.52.0.0/16"] remotePodNetworks: - - cidrs: ["10.53.0.0/16"] \ No newline at end of file + - cidrs: ["10.53.0.0/16"] +accessConfig: + authenticationMode: API + bootstrapClusterCreatorAdminPermissions: true diff --git a/docs/style_guide.md b/docs/style_guide.md index 2d8e787c1b..36492e2f50 100644 --- a/docs/style_guide.md +++ b/docs/style_guide.md @@ -64,6 +64,14 @@ sidebar_custom_props: { "explore": "https://" } --- ``` +To mark your module as optional: +``` +--- +... +sidebar_custom_props: { "optional": "true" } +--- +``` + ### Navigating the AWS console There are instances where the user needs to navigate to specific screens in the AWS console. It is preferable to provide a link to the exact screen if possible, or a close as can be done. diff --git a/hack/create-infrastructure.sh b/hack/create-infrastructure.sh index 14775e0afb..46d6a9341c 100644 --- a/hack/create-infrastructure.sh +++ b/hack/create-infrastructure.sh @@ -1,9 +1,13 @@ #!/bin/bash environment=$1 +cluster=${2:-all} +export USE_CURRENT_USER=${USE_CURRENT_USER:-1} # We don't want to change the ARN in exec +echo "Creating infrastructure for environment ${environment} and cluster ${cluster}" set -Eeuo pipefail set -u +set -x SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) @@ -13,12 +17,30 @@ bash $SCRIPT_DIR/update-iam-role.sh $environment sleep 5 +pids=() + cluster_exists=0 aws eks describe-cluster --name "${EKS_CLUSTER_NAME}" &> /dev/null || cluster_exists=$? -if [ $cluster_exists -eq 0 ]; then +if [ $cluster_exists -ne 0 ] && [[ "$cluster" == "standard" || "$cluster" == "all" ]]; then + echo "Creating cluster ${EKS_CLUSTER_NAME}" + bash $SCRIPT_DIR/exec.sh "${environment}" 'cat /cluster/eksctl/cluster.yaml /cluster/eksctl/access-entries.yaml | envsubst | eksctl create cluster -f -' & + pids+=($!) +else echo "Cluster ${EKS_CLUSTER_NAME} already exists" +fi + +auto_cluster_exists=0 +aws eks describe-cluster --name "${EKS_CLUSTER_AUTO_NAME}" &> /dev/null || auto_cluster_exists=$? + +if [ $auto_cluster_exists -ne 0 ] && [[ "$cluster" == "auto" || "$cluster" == "all" ]]; then + echo "Creating auto mode cluster ${EKS_CLUSTER_AUTO_NAME}" + bash $SCRIPT_DIR/exec.sh "${environment}" 'cat /cluster/eksctl/cluster-auto.yaml /cluster/eksctl/access-entries.yaml | envsubst | eksctl create cluster -f -' & + pids+=($!) else - echo "Creating cluster ${EKS_CLUSTER_NAME}" - bash $SCRIPT_DIR/exec.sh "${environment}" 'cat /cluster/eksctl/cluster.yaml | envsubst | eksctl create cluster -f -' -fi \ No newline at end of file + echo "Auto mode cluster ${EKS_CLUSTER_AUTO_NAME} already exists" +fi + +for pid in "${pids[@]}"; do + wait "$pid" || exit 1 +done \ No newline at end of file diff --git a/hack/deploy-ide-cfn.sh b/hack/deploy-ide-cfn.sh index 5ba31f903e..4464926e3a 100644 --- a/hack/deploy-ide-cfn.sh +++ b/hack/deploy-ide-cfn.sh @@ -10,13 +10,21 @@ outfile=$(mktemp) bash $SCRIPT_DIR/build-ide-cfn.sh $outfile +REPOSITORY_OWNER=${REPOSITORY_OWNER:-"aws-samples"} +REPOSITORY_NAME=${REPOSITORY_NAME:-"eks-workshop-v2"} +REPOSITORY_REF=${REPOSITORY_REF:-"main"} + source $SCRIPT_DIR/lib/resolve-source-ip.sh STACK_NAME="$EKS_CLUSTER_NAME-cfn" aws cloudformation deploy --stack-name "$STACK_NAME" \ --capabilities CAPABILITY_NAMED_IAM --disable-rollback --template-file $outfile \ - --parameter-overrides InboundCIDR="$INBOUND_CIDRS" + --parameter-overrides \ + RepositoryOwner="$REPOSITORY_OWNER" \ + RepositoryName="$REPOSITORY_NAME" \ + RepositoryRef="$REPOSITORY_REF" \ + InboundCIDR="$INBOUND_CIDRS" if [ -z "$CI" ]; then IDE_URL=$(aws cloudformation describe-stacks --stack-name "$STACK_NAME" \ @@ -28,4 +36,4 @@ if [ -z "$CI" ]; then echo "" echo "IDE URL: $IDE_URL" echo "IDE Password: $IDE_PASSWORD" -fi \ No newline at end of file +fi diff --git a/hack/destroy-infrastructure.sh b/hack/destroy-infrastructure.sh index 4597dcd6fa..01642ba57d 100644 --- a/hack/destroy-infrastructure.sh +++ b/hack/destroy-infrastructure.sh @@ -1,24 +1,43 @@ #!/bin/bash environment=$1 +cluster=${2:-all} +echo "Destroying infrastructure for environment ${environment} and cluster ${cluster}" set -Eeuo pipefail set -u SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) - +export USE_CURRENT_USER=${USE_CURRENT_USER:-1}; source $SCRIPT_DIR/lib/common-env.sh cluster_exists=0 aws eks describe-cluster --name "${EKS_CLUSTER_NAME}" &> /dev/null || cluster_exists=$? -if [ $cluster_exists -eq 0 ]; then +if [ $cluster_exists -eq 0 ] && [[ "$cluster" == "standard" || "$cluster" == "all" ]]; then echo "Deleting cluster ${EKS_CLUSTER_NAME}" bash $SCRIPT_DIR/shell.sh "${environment}" 'delete-environment' || true + bash $SCRIPT_DIR/exec.sh "${environment}" 'eksctl delete cluster --name ${EKS_CLUSTER_NAME} --region ${AWS_REGION} --wait --force --disable-nodegroup-eviction --timeout 45m'& +else + echo "Cluster ${EKS_CLUSTER_NAME} does not exist or skipped" +fi - bash $SCRIPT_DIR/exec.sh "${environment}" 'eksctl delete cluster --name ${EKS_CLUSTER_NAME} --region ${AWS_REGION} --wait --force --disable-nodegroup-eviction --timeout 45m' +export EKS_CLUSTER_AUTO_NAME="${EKS_CLUSTER_AUTO_NAME}" +auto_cluster_exists=0 +aws eks describe-cluster --name "${EKS_CLUSTER_AUTO_NAME}" &> /dev/null || auto_cluster_exists=$? + +if [ $auto_cluster_exists -eq 0 ] && [[ "$cluster" == "auto" || "$cluster" == "all" ]]; then + echo "Deleting auto mode cluster ${EKS_CLUSTER_AUTO_NAME}" + #bash $SCRIPT_DIR/shell.sh "${environment}" 'delete-environment' || true # Needed ? + bash $SCRIPT_DIR/exec.sh "${environment}" 'eksctl delete cluster --name ${EKS_CLUSTER_AUTO_NAME} --region ${AWS_REGION} --wait --force --disable-nodegroup-eviction --timeout 45m' else - echo "Cluster ${EKS_CLUSTER_NAME} does not exist" + echo "Auto mode cluster ${EKS_CLUSTER_AUTO_NAME} does not exist or skipped" fi -aws cloudformation delete-stack --stack-name ${EKS_CLUSTER_NAME}-ide-role || true \ No newline at end of file +wait + +# Only delete ide-role if all clusters are deleted +if [ "$cluster" == "all" ]; then + aws cloudformation delete-stack --stack-name ${EKS_CLUSTER_NAME}-ide-role || true + echo "Deleted role" +fi \ No newline at end of file diff --git a/hack/exec.sh b/hack/exec.sh index ea785e5b06..31c0a926d8 100644 --- a/hack/exec.sh +++ b/hack/exec.sh @@ -19,9 +19,19 @@ container_image='eks-workshop-environment' (cd $SCRIPT_DIR/../lab && $CONTAINER_CLI build -q -t $container_image .) -if [ -z "$SKIP_CREDENTIALS" ]; then + +if [ "${SKIP_CREDENTIALS:-0}" = "0" ] && [ "${USE_CURRENT_USER:-0}" = "0" ]; then + echo "Passing temp AWS credentials" source $SCRIPT_DIR/lib/generate-aws-creds.sh +elif [ "${USE_CURRENT_USER:-0}" != "0" ]; then + if [ -z "$AWS_ACCESS_KEY_ID" ]; then + echo "No AWS_ACCESS_KEY_ID found, please check your AWS credentials" + exit 1 + fi + echo "Using USE_CURRENT_USER" + aws_credential_args="-e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY -e AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN:-}" else + echo "Using DEFAULT no credentials passed" aws_credential_args="" fi @@ -31,5 +41,6 @@ $CONTAINER_CLI run --rm \ -v $SCRIPT_DIR/../manifests:/manifests \ -v $SCRIPT_DIR/../cluster:/cluster \ --entrypoint /bin/bash \ - -e 'EKS_CLUSTER_NAME' -e 'AWS_REGION' -e 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' \ + -e "RESET_NO_DELETE=true" \ + -e 'EKS_CLUSTER_NAME' -e 'EKS_CLUSTER_AUTO_NAME' -e 'AWS_REGION' -e 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' -e RESOURCE_CODEBUILD_ROLE_ARN \ $aws_credential_args $container_image -c "$shell_command" \ No newline at end of file diff --git a/hack/lib/common-env.sh b/hack/lib/common-env.sh index 02ca65dad3..b029b3b69f 100644 --- a/hack/lib/common-env.sh +++ b/hack/lib/common-env.sh @@ -1,9 +1,13 @@ environment=${environment:-""} +set -x + if [ -z "$environment" ]; then export EKS_CLUSTER_NAME="eks-workshop" + export EKS_CLUSTER_AUTO_NAME="eks-workshop-auto" else export EKS_CLUSTER_NAME="eks-workshop-${environment}" + export EKS_CLUSTER_AUTO_NAME="eks-workshop-${environment}-auto" fi AWS_REGION=${AWS_REGION:-""} @@ -15,6 +19,8 @@ if [ -z "$AWS_REGION" ]; then fi SKIP_CREDENTIALS=${SKIP_CREDENTIALS:-""} +USE_CURRENT_USER=${USE_CURRENT_USER:-""} +AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-""} # We check the access key if [ -z "$SKIP_CREDENTIALS" ]; then ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) @@ -23,4 +29,9 @@ if [ -z "$SKIP_CREDENTIALS" ]; then IDE_ROLE_ARN="arn:aws:iam::${ACCOUNT_ID}:role/${IDE_ROLE_NAME}" fi +# Set RESOURCE_CODEBUILD_ROLE_ARN if not already provided (e.g. by Workshop Studio) +if [ -z "${RESOURCE_CODEBUILD_ROLE_ARN:-}" ]; then + export RESOURCE_CODEBUILD_ROLE_ARN="${IDE_ROLE_ARN:-}" +fi + export DOCKER_CLI_HINTS="false" \ No newline at end of file diff --git a/hack/pre-provision-resources.sh b/hack/pre-provision-resources.sh index 78752f8174..f3080b0e7f 100644 --- a/hack/pre-provision-resources.sh +++ b/hack/pre-provision-resources.sh @@ -1,45 +1,61 @@ #!/bin/bash -environment=$1 -action=$2 +set -e -if [ -z $action ]; then - action="plan" -fi +environment=$1 +action=${2:-"plan"} SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) source $SCRIPT_DIR/lib/common-env.sh -terraform_dir="${SCRIPT_DIR}/../terraform-resources" -manifests_dir="${SCRIPT_DIR}/../manifests" +# Allow overriding paths from outside (e.g. buildspec) +terraform_dir="${TERRAFORM_PREPROVISION_DIR:-${SCRIPT_DIR}/../terraform-resources}" +manifests_dir="${MANIFESTS_DIR:-${SCRIPT_DIR}/../manifests}" mkdir -p "$terraform_dir" conf_dir="$terraform_dir/conf" rm -rf $conf_dir - mkdir -p "$conf_dir" -cat << EOF > $conf_dir/backend_override.tf -terraform { +# Backend configuration: S3 or local +if [ -n "${TF_STATE_S3_BUCKET:-}" ]; then + cat << EOF > $conf_dir/backend_override.tf +terraform { + backend "s3" {} +} +EOF + backend_init_args="--backend-config=bucket=${TF_STATE_S3_BUCKET} --backend-config=key=terraform.tfstate --backend-config=region=${AWS_REGION}" +else + cat << EOF > $conf_dir/backend_override.tf +terraform { backend "local" { path = "../terraform.tfstate" } } EOF + backend_init_args="" +fi cp $manifests_dir/.workshop/terraform/base.tf $conf_dir/base.tf find $manifests_dir/modules -type d -name "preprovision" -print0 | while read -d $'\0' file do - target=$(echo $file | md5sum | cut -f1 -d" ") + md5=$(echo ${file#"$manifests_dir/modules/"} | md5sum | cut -f1 -d" " | cut -d'/' -f1 | rev) + first_path=$(echo ${file#"$manifests_dir/modules/"} | cut -d'/' -f1,2 | tr '/' '_') + target="${first_path}-$md5" + cp -R $file $conf_dir/$target cat << EOF > $conf_dir/$target.tf module "gen-$target" { source = "./$target" + providers = { + helm.auto_mode = helm.auto_mode + kubernetes.auto_mode = kubernetes.auto_mode + } eks_cluster_id = local.eks_cluster_id tags = local.tags @@ -47,12 +63,13 @@ module "gen-$target" { EOF done -terraform -chdir="${conf_dir}" init +ls -la $conf_dir -approve_args='' +terraform -chdir="${conf_dir}" init $backend_init_args +approve_args='' if [[ "$action" != 'plan' ]]; then approve_args='--auto-approve' fi -terraform -chdir="${conf_dir}" "$action" -var="eks_cluster_id=$EKS_CLUSTER_NAME" $approve_args \ No newline at end of file +terraform -chdir="${conf_dir}" "$action" -var="eks_cluster_id=$EKS_CLUSTER_NAME" $approve_args diff --git a/hack/run-tests.sh b/hack/run-tests.sh index 7672ec69de..40fc767f24 100755 --- a/hack/run-tests.sh +++ b/hack/run-tests.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash -x environment=$1 module=$2 @@ -39,7 +39,16 @@ container_image='eks-workshop-test' (cd $SCRIPT_DIR/../testing && $CONTAINER_CLI build -q -t $container_image .) -source $SCRIPT_DIR/lib/generate-aws-creds.sh +if [ -n "${USE_CURRENT_USER:-}" ]; then + if [ -z "$AWS_ACCESS_KEY_ID" ]; then + echo "No AWS_ACCESS_KEY_ID found" + exit 1 + fi + echo "Using current user credentials" + aws_credential_args="-e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY -e AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN:-}" +else + source $SCRIPT_DIR/lib/generate-aws-creds.sh +fi BACKGROUND=${BACKGROUND:-""} @@ -78,17 +87,13 @@ source $SCRIPT_DIR/lib/resolve-source-ip.sh echo "Running test suite..." -# get current IDs -USER_ID=$(id -u) -GROUP_ID=$(id -g) - exit_code=0 $CONTAINER_CLI run $background_args $dns_args \ --name $container_name \ -v $SCRIPT_DIR/../website/docs:/content \ -v $SCRIPT_DIR/../manifests:/eks-workshop/manifests \ - -e 'EKS_CLUSTER_NAME' -e 'AWS_REGION' -e 'RESOURCES_PRECREATED' -e 'INBOUND_CIDRS' \ + -e 'EKS_CLUSTER_NAME' -e 'EKS_CLUSTER_AUTO_NAME' -e 'AWS_REGION' -e 'RESOURCES_PRECREATED' -e 'INBOUND_CIDRS' \ $aws_credential_args $container_image -g "${actual_glob}" --hook-timeout 3600 --timeout 3600 $output_args ${AWS_EKS_WORKSHOP_TEST_FLAGS} || exit_code=$? if [ $exit_code -eq 0 ]; then diff --git a/hack/shell.sh b/hack/shell.sh index 9702c12cd3..76c55c286b 100644 --- a/hack/shell.sh +++ b/hack/shell.sh @@ -42,6 +42,7 @@ source $SCRIPT_DIR/lib/resolve-source-ip.sh $CONTAINER_CLI run --rm $interactive_args $dns_args \ -v $SCRIPT_DIR/../manifests:/eks-workshop/manifests \ -v $SCRIPT_DIR/../cluster:/cluster \ - -e 'EKS_CLUSTER_NAME' -e 'AWS_REGION' -e 'INBOUND_CIDRS' \ + -e "RESET_NO_DELETE=true" \ + -e 'EKS_CLUSTER_NAME' -e 'EKS_CLUSTER_AUTO_NAME' -e 'AWS_REGION' -e 'INBOUND_CIDRS' \ -p 8889:8889 \ $aws_credential_args $container_image $shell_command \ No newline at end of file diff --git a/hack/validate-terraform.sh b/hack/validate-terraform.sh index 068126058a..a7d0f5ad75 100755 --- a/hack/validate-terraform.sh +++ b/hack/validate-terraform.sh @@ -7,6 +7,8 @@ environment=$1 SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) terraform_dir="$(mktemp -d)" +echo $terraform_dir + manifests_dir="${SCRIPT_DIR}/../manifests" conf_dir="$terraform_dir/conf" @@ -17,12 +19,18 @@ cp $manifests_dir/.workshop/terraform/base.tf $conf_dir/base.tf find $manifests_dir/modules -type d -name "terraform" -print0 | while read -d $'\0' file do - target=$(echo $file | md5sum | cut -f1 -d" ") + md5=$(echo ${file#"$manifests_dir/modules/"} | md5sum | cut -f1 -d" " | cut -d'/' -f1 | rev) # In case of non-unique + first_path=$(echo ${file#"$manifests_dir/modules/"} | cut -d'/' -f1,2 | tr '/' '_') + target="${first_path}-$md5" cp -R $file $conf_dir/$target cat << EOF > $conf_dir/$target.tf module "gen-$target" { source = "./$target" + providers = { + helm.auto_mode = helm.auto_mode + kubernetes.auto_mode = kubernetes.auto_mode + } eks_cluster_id = local.eks_cluster_id eks_cluster_version = local.eks_cluster_version @@ -35,6 +43,12 @@ module "gen-$target" { EOF done +cleanup() { + echo "" + echo "Terraform directory: ${conf_dir}" +} +trap cleanup EXIT + terraform -chdir="${conf_dir}" init -backend=false echo "" diff --git a/lab/bin/reset-environment b/lab/bin/reset-environment index e78f68da12..9dbebfe974 100644 --- a/lab/bin/reset-environment +++ b/lab/bin/reset-environment @@ -1,5 +1,7 @@ #!/bin/bash +set -e + GREEN='\033[1;32m' RED='\033[1;31m' PURPLE='\033[1;35m' @@ -39,8 +41,11 @@ if [ -z "$EKS_DEFAULT_MNG_MIN" ]; then fi rm -f /home/ec2-user/.kube/config +EKS_CLUSTER_AUTO_NAME=${EKS_CLUSTER_AUTO_NAME:-"eks-workshop-auto"} +export EKS_CLUSTER_AUTO_NAME -aws eks update-kubeconfig --name $EKS_CLUSTER_NAME --alias default --user-alias default +aws eks update-kubeconfig --name $EKS_CLUSTER_AUTO_NAME --alias eks-workshop-auto 2>/dev/null || true +aws eks update-kubeconfig --name $EKS_CLUSTER_NAME --alias default --user-alias default 2>/dev/null || true module=$1 @@ -64,10 +69,14 @@ mkdir -p /eks-workshop logmessage "🚀 We're preparing your environment for the next lab, sit tight!" REPOSITORY_REF=${REPOSITORY_REF:-""} +RESET_NO_DELETE=${RESET_NO_DELETE:-""} -if [ ! -z "${REPOSITORY_REF}" ]; then +if [ ! -z "${REPOSITORY_REF}" ] && [ -z "${RESET_NO_DELETE}" ]; then rm -f /home/ec2-user/environment/eks-workshop - rm -rf $manifests_path + if [ -z "${RESET_NO_DELETE}" ]; then + rm -rf $manifests_path + mkdir -p $manifests_path + fi rm -rf $repository_path logmessage "📦 Refreshing copy of workshop repository from GitHub..." @@ -76,7 +85,12 @@ if [ ! -z "${REPOSITORY_REF}" ]; then logmessage "" - cp -R $repository_path/manifests $manifests_path + # In containers we cannot delete the base path so we need to copy inside + for dir in $repository_path/manifests/*/ $repository_path/manifests/.*/; do + [ -d "$dir" ] || continue + [[ "$dir" == */. || "$dir" == */.. ]] && continue + cp -R "$dir" $manifests_path/ + done ln -s $manifests_path /home/ec2-user/environment/eks-workshop fi @@ -89,7 +103,106 @@ if [ ! -z "$module" ]; then curl --get -s --data-urlencode "lab=$module" --data-urlencode "account_id=$AWS_ACCOUNT_ID" $ANALYTICS_ENDPOINT || true fi - if [ $module = "introduction/getting-started" ]; then + if [[ "$module" = "fastpaths/"* ]]; then + kubectl config use-context eks-workshop-auto + + # getting-started teaches deployment from scratch — skip everything + if [[ "$module" = "fastpaths/getting-started" ]]; then + exit + fi + + # Run cleanup from previous fastpaths module if it exists + if [ -f "/eks-workshop/hooks/cleanup.sh" ]; then + logmessage "\n🧹 Cleaning up previous lab..." + bash /eks-workshop/hooks/cleanup.sh || true + rm -f /eks-workshop/hooks/cleanup.sh + fi + + # Only delete StatefulSets if EBS lab modified them (StorageClass ebs-sc exists) + # Otherwise the base app apply is idempotent and StatefulSets don't need recreation + if kubectl get storageclass ebs-sc &>/dev/null 2>&1; then + kubectl delete statefulset -l app.kubernetes.io/created-by=eks-workshop -A --ignore-not-found 2>/dev/null || true + sleep 5 + while kubectl get statefulset -l app.kubernetes.io/created-by=eks-workshop -A --no-headers 2>/dev/null | grep -q .; do + sleep 2 + done + fi + + # Deploy base application and run one-time preprovision in parallel + logmessage "\n📦 Deploying base application..." + kubectl apply -k $base_path + + # One-time preprovision: install KEDA, fluent-bit, external-secrets etc. + TF_PID="" + if ! kubectl get configmap fastpaths-provisioned -n kube-system &>/dev/null; then + logmessage "\n🔧 First time setup: provisioning fastpaths infrastructure (this only runs once)..." + + rm -rf /eks-workshop/terraform + mkdir -p /eks-workshop/terraform + cp -R $manifests_path/.workshop/terraform/* /eks-workshop/terraform + rm -f /eks-workshop/terraform/lab-fastpaths.tf + + # Copy lab files BEFORE destroy so it can clean up partial state + # from a previously interrupted apply + mkdir -p /eks-workshop/terraform/lab + cp -R $manifests_path/modules/fastpaths/developers/.workshop/terraform/* /eks-workshop/terraform/lab + cp $manifests_path/.workshop/terraform/lab-fastpaths.tf /eks-workshop/terraform/lab.tf + + mkdir -p /eks-workshop/terraform-data + export TF_DATA_DIR="/eks-workshop/terraform-data" + export TF_VAR_eks_cluster_id="$EKS_CLUSTER_NAME" + export TF_VAR_eks_cluster_auto_id="$EKS_CLUSTER_AUTO_NAME" + export TF_VAR_resources_precreated="false" + + tf_dir=$(realpath --relative-to="$PWD" '/eks-workshop/terraform') + + terraform -chdir="$tf_dir" init -upgrade + terraform -chdir="$tf_dir" destroy --auto-approve + + terraform -chdir="$tf_dir" apply --auto-approve & + TF_PID=$! + fi + + logmessage "\n⏳ Waiting for application to become ready..." + sleep 10 + kubectl wait --for=condition=available --timeout=480s deployments -l app.kubernetes.io/created-by=eks-workshop -A + kubectl wait --for=condition=Ready --timeout=480s pods -l app.kubernetes.io/created-by=eks-workshop -A + + # Restart deployments to pick up any configmap changes from the base app restore + kubectl rollout restart deployment/carts -n carts + kubectl rollout restart deployment/catalog -n catalog + kubectl rollout restart deployment/orders -n orders + kubectl rollout status deployment/carts -n carts --timeout=120s + kubectl rollout status deployment/catalog -n catalog --timeout=120s + kubectl rollout status deployment/orders -n orders --timeout=120s + + if [ -n "$TF_PID" ]; then + logmessage "\n⏳ Waiting for infrastructure provisioning to complete..." + wait $TF_PID + TF_EXIT=$? + if [ $TF_EXIT -ne 0 ]; then + logmessage "\n${RED}Error:${NC} Infrastructure provisioning failed" + exit $TF_EXIT + fi + kubectl create configmap fastpaths-provisioned -n kube-system --from-literal=provisioned=true + logmessage "\n✅ Fastpaths infrastructure provisioned!" + fi + + # Save cleanup hook for this module's path (developer or operator) + rm -rf /eks-workshop/hooks + # Map content path to manifests path (developer->developers, operator->operators) + fastpaths_top=$(echo "$module" | cut -d'/' -f1-2) + module_path="$manifests_path/modules/$fastpaths_top" + # Try plural form if singular doesn't exist + if [ ! -d "$module_path/.workshop" ]; then + module_path="${module_path}s" + fi + if [ -f "$module_path/.workshop/cleanup.sh" ]; then + mkdir -p /eks-workshop/hooks + cp "$module_path/.workshop/cleanup.sh" /eks-workshop/hooks + fi + + logmessage "\n✅ Environment is ${GREEN}ready${NC}!" exit fi fi @@ -104,31 +217,52 @@ fi kubectl delete pod load-generator --ignore-not-found -kubectl delete namespace other --ignore-not-found +# Delete namespace with timeout and handle metrics-server discovery failures +kubectl delete namespace other --ignore-not-found --timeout=60s || { + logmessage "⚠️ ${RED}Warning:${NC} Namespace deletion timed out or failed, forcing cleanup..." + + # Remove finalizers to force deletion + kubectl get namespace other -o json 2>/dev/null | \ + jq '.spec.finalizers = []' | \ + kubectl replace --raw "/api/v1/namespaces/other/finalize" -f - 2>/dev/null || \ + kubectl patch namespace other -p '{"metadata":{"finalizers":[]}}' --type=merge 2>/dev/null || true + + # Wait a bit and verify deletion + sleep 5 + if kubectl get namespace other &>/dev/null; then + logmessage "⚠️ ${RED}Warning:${NC} Namespace 'other' still exists but continuing..." + fi +} + +if [[ $module = introduction/basics* || $module = introduction/getting-started* ]]; then + kubectl delete -k $base_path --ignore-not-found +else + kubectl apply -k $base_path --prune --all \ + --prune-allowlist=autoscaling/v1/HorizontalPodAutoscaler \ + --prune-allowlist=core/v1/Service \ + --prune-allowlist=core/v1/ConfigMap \ + --prune-allowlist=apps/v1/Deployment \ + --prune-allowlist=apps/v1/StatefulSet \ + --prune-allowlist=core/v1/ServiceAccount \ + --prune-allowlist=core/v1/Secret \ + --prune-allowlist=core/v1/PersistentVolumeClaim \ + --prune-allowlist=scheduling.k8s.io/v1/PriorityClass \ + --prune-allowlist=networking.k8s.io/v1/Ingress -kubectl apply -k $base_path --prune --all \ - --prune-allowlist=autoscaling/v1/HorizontalPodAutoscaler \ - --prune-allowlist=core/v1/Service \ - --prune-allowlist=core/v1/ConfigMap \ - --prune-allowlist=apps/v1/Deployment \ - --prune-allowlist=apps/v1/StatefulSet \ - --prune-allowlist=core/v1/ServiceAccount \ - --prune-allowlist=core/v1/Secret \ - --prune-allowlist=core/v1/PersistentVolumeClaim \ - --prune-allowlist=scheduling.k8s.io/v1/PriorityClass \ - --prune-allowlist=networking.k8s.io/v1/Ingress + logmessage "\n⏳ Waiting for application to become ready..." -logmessage "\n⏳ Waiting for application to become ready..." + sleep 10 -sleep 10 + kubectl wait --for=condition=available --timeout=240s deployments -l app.kubernetes.io/created-by=eks-workshop -A + kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A -kubectl wait --for=condition=available --timeout=240s deployments -l app.kubernetes.io/created-by=eks-workshop -A -kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A +fi # Addons rm -rf /eks-workshop/terraform mkdir -p /eks-workshop/terraform cp -R $manifests_path/.workshop/terraform/* /eks-workshop/terraform +rm -f /eks-workshop/terraform/lab-fastpaths.tf # Use a separate directory for .terraform directory # Allows us to wipe the TF configuration every time but retain providers installed @@ -149,7 +283,14 @@ terraform -chdir="$tf_dir" destroy --auto-approve rm -rf /eks-workshop/hooks +# Cleanup done, let's install the right item + if [ ! -z "$module" ]; then + if [ $module = "introduction/getting-started" ]; then + # This module doesn't need anything installed + exit + fi + module_path="$manifests_path/modules/$module" if [ -f "$module_path/.workshop/cleanup.sh" ]; then @@ -239,10 +380,12 @@ if [ $EXIT_CODE -ne 0 ]; then fi # Recycle workload pods in case stateful pods got restarted -kubectl delete pod -l app.kubernetes.io/created-by=eks-workshop -l app.kubernetes.io/component=service -A +kubectl delete pod -l app.kubernetes.io/created-by=eks-workshop -l app.kubernetes.io/component=service -A --ignore-not-found # Wait for the workload pods previously recycled -kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A +if kubectl get pods -A -l app.kubernetes.io/created-by=eks-workshop 2>/dev/null | grep -q .; then + kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A +fi # Finished logmessage "\n✅ Environment is ${GREEN}ready${NC}!" diff --git a/lab/iam/policies/base.yaml b/lab/iam/policies/base.yaml index c4152c8e4d..ff57b3e62f 100644 --- a/lab/iam/policies/base.yaml +++ b/lab/iam/policies/base.yaml @@ -10,13 +10,7 @@ Statement: - Effect: Allow Action: - cloudformation:CreateStack - Resource: - - !Sub arn:aws:cloudformation:${AWS::Region}:${AWS::AccountId}:stack/eksctl-${Env}* - Condition: - "Null": - cloudformation:RoleARN: "true" - - Effect: Allow - Action: + - cloudformation:CreateChangeSet - cloudformation:UpdateTerminationProtection - cloudformation:DeleteStack Resource: diff --git a/lab/scripts/entrypoint.sh b/lab/scripts/entrypoint.sh index fd7608a737..6cca2bb8c8 100644 --- a/lab/scripts/entrypoint.sh +++ b/lab/scripts/entrypoint.sh @@ -6,6 +6,11 @@ bash /tmp/setup.sh ln -s /eks-workshop/manifests /home/ec2-user/environment/eks-workshop +# We do auto mode first so we default to standard +if [ ! -z "$EKS_CLUSTER_AUTO_NAME" ]; then + aws eks update-kubeconfig --name $EKS_CLUSTER_AUTO_NAME +fi + if [ ! -z "$EKS_CLUSTER_NAME" ]; then aws eks update-kubeconfig --name $EKS_CLUSTER_NAME fi diff --git a/lab/scripts/setup.sh b/lab/scripts/setup.sh index da7fd3add4..097a388b74 100644 --- a/lab/scripts/setup.sh +++ b/lab/scripts/setup.sh @@ -21,6 +21,7 @@ export AWS_PAGER="" export AWS_REGION="${AWS_REGION}" export AWS_ACCOUNT_ID="${AWS_ACCOUNT_ID}" export EKS_CLUSTER_NAME="${EKS_CLUSTER_NAME}" +export EKS_CLUSTER_AUTO_NAME="${EKS_CLUSTER_AUTO_NAME}" export EKS_DEFAULT_MNG_NAME="default" export EKS_DEFAULT_MNG_MIN=3 export EKS_DEFAULT_MNG_MAX=6 @@ -29,21 +30,42 @@ EOT touch ~/.bashrc.d/workshop-env.bash +REPOSITORY_OWNER=${REPOSITORY_OWNER:-"aws-samples"} +REPOSITORY_NAME=${REPOSITORY_NAME:-"eks-workshop-v2"} +REPOSITORY_REF=${REPOSITORY_REF:-"main"} + cat << EOT > /home/ec2-user/.bashrc.d/aliases.bash -function prepare-environment() { +function prepare-environment() { + start_time=\$(date +%s) + + if [[ "\$1" == fastpaths/* ]]; then + cluster_name="\$EKS_CLUSTER_AUTO_NAME" + create_cmd="create-cluster-auto" + else + cluster_name="\$EKS_CLUSTER_NAME" + create_cmd="create-cluster" + fi + + if ! aws eks describe-cluster --name "\$cluster_name" --no-cli-pager &>/dev/null; then + echo "Error: EKS cluster '\$cluster_name' does not exist." + echo "Please create it first by running: \$create_cmd" + return 1 + fi + bash /usr/local/bin/reset-environment \$1 exit_code=\$? source ~/.bashrc.d/workshop-env.bash + echo "Execution time: \$((\$(date +%s) - start_time)) seconds" return \$exit_code } function use-cluster() { bash /usr/local/bin/use-cluster \$1; source ~/.bashrc.d/env.bash; } -function create-cluster() { URL=https://raw.githubusercontent.com/${REPOSITORY_OWNER}/${REPOSITORY_NAME}/refs/heads/${REPOSITORY_REF}/cluster/eksctl/cluster.yaml; echo "Creating cluster with eksctl from $URL"; curl -fsSL $URL | envsubst | eksctl create cluster -f -; } +function create-cluster() { URL=https://raw.githubusercontent.com/\${REPOSITORY_OWNER}/\${REPOSITORY_NAME}/refs/heads/\${REPOSITORY_REF}/cluster/eksctl/cluster.yaml; echo "Creating cluster with eksctl from \$URL"; curl -fsSL \$URL | envsubst | eksctl create cluster -f -; } +function create-cluster-auto() { URL=https://raw.githubusercontent.com/\${REPOSITORY_OWNER}/\${REPOSITORY_NAME}/refs/heads/\${REPOSITORY_REF}/cluster/eksctl/cluster-auto.yaml; echo "Creating cluster with eksctl from \$URL"; curl -fsSL \$URL | envsubst | eksctl create cluster -f -; } +function delete-cluster() { URL=https://raw.githubusercontent.com/\${REPOSITORY_OWNER}/\${REPOSITORY_NAME}/refs/heads/\${REPOSITORY_REF}/cluster/eksctl/cluster.yaml; echo "Deleting cluster with eksctl from \$URL"; curl -fsSL \$URL | envsubst | eksctl delete cluster -f -; } +function delete-cluster-auto() { URL=https://raw.githubusercontent.com/\${REPOSITORY_OWNER}/\${REPOSITORY_NAME}/refs/heads/\${REPOSITORY_REF}/cluster/eksctl/cluster-auto.yaml; echo "Deleting cluster with eksctl from \$URL"; curl -fsSL \$URL | envsubst | eksctl delete cluster -f -; } EOT -REPOSITORY_OWNER=${REPOSITORY_OWNER:-"aws-samples"} -REPOSITORY_NAME=${REPOSITORY_NAME:-"eks-workshop-v2"} - if [ ! -z "$REPOSITORY_REF" ]; then cat << EOT > ~/.bashrc.d/repository.bash export REPOSITORY_OWNER='${REPOSITORY_OWNER}' @@ -70,4 +92,4 @@ echo "export INBOUND_CIDRS='${INBOUND_CIDRS}'" > ~/.bashrc.d/inbound-cidr.bash /usr/local/bin/kubectl completion bash > ~/.bashrc.d/kubectl_completion.bash echo "alias k=kubectl" >> ~/.bashrc.d/kubectl_completion.bash -echo "complete -F __start_kubectl k" >> ~/.bashrc.d/kubectl_completion.bash \ No newline at end of file +echo "complete -F __start_kubectl k" >> ~/.bashrc.d/kubectl_completion.bash diff --git a/manifests/.workshop/terraform/base.tf b/manifests/.workshop/terraform/base.tf index 6bc1724b77..f4b8d33863 100644 --- a/manifests/.workshop/terraform/base.tf +++ b/manifests/.workshop/terraform/base.tf @@ -45,6 +45,13 @@ variable "resources_precreated" { default = false } +# tflint-ignore: terraform_unused_declarations +variable "eks_cluster_auto_id" { + description = "EKS Auto Mode cluster name" + type = string + default = "eks-workshop-auto" +} + # tflint-ignore: terraform_unused_declarations variable "inbound_cidrs" { description = "CIDR range to allowlist for inbound traffic" @@ -56,12 +63,31 @@ data "aws_partition" "current" {} data "aws_caller_identity" "current" {} data "aws_region" "current" {} +data "aws_eks_clusters" "available" {} + data "aws_eks_cluster" "eks_cluster" { - name = var.eks_cluster_id + count = local.standard_cluster_exists ? 1 : 0 + name = var.eks_cluster_id } data "aws_eks_cluster_auth" "this" { - name = var.eks_cluster_id + count = local.standard_cluster_exists ? 1 : 0 + name = var.eks_cluster_id +} + +locals { + standard_cluster_exists = contains(data.aws_eks_clusters.available.names, var.eks_cluster_id) + auto_cluster_exists = contains(data.aws_eks_clusters.available.names, var.eks_cluster_auto_id) +} + +data "aws_eks_cluster" "eks_cluster_auto" { + count = local.auto_cluster_exists ? 1 : 0 + name = var.eks_cluster_auto_id +} + +data "aws_eks_cluster_auth" "this_auto" { + count = local.auto_cluster_exists ? 1 : 0 + name = var.eks_cluster_auto_id } provider "aws" { @@ -71,25 +97,41 @@ provider "aws" { } provider "kubernetes" { - host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks_cluster.certificate_authority[0].data) - token = data.aws_eks_cluster_auth.this.token + host = try(data.aws_eks_cluster.eks_cluster[0].endpoint, "https://localhost") + cluster_ca_certificate = try(base64decode(data.aws_eks_cluster.eks_cluster[0].certificate_authority[0].data), "") + token = try(data.aws_eks_cluster_auth.this[0].token, "") +} + +provider "kubernetes" { + alias = "auto_mode" + host = try(data.aws_eks_cluster.eks_cluster_auto[0].endpoint, "https://localhost") + cluster_ca_certificate = try(base64decode(data.aws_eks_cluster.eks_cluster_auto[0].certificate_authority[0].data), "") + token = try(data.aws_eks_cluster_auth.this_auto[0].token, "") +} + +provider "helm" { + kubernetes { + host = try(data.aws_eks_cluster.eks_cluster[0].endpoint, "https://localhost") + cluster_ca_certificate = try(base64decode(data.aws_eks_cluster.eks_cluster[0].certificate_authority[0].data), "") + token = try(data.aws_eks_cluster_auth.this[0].token, "") + } } provider "helm" { + alias = "auto_mode" kubernetes { - host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks_cluster.certificate_authority[0].data) - token = data.aws_eks_cluster_auth.this.token + host = try(data.aws_eks_cluster.eks_cluster_auto[0].endpoint, "https://localhost") + cluster_ca_certificate = try(base64decode(data.aws_eks_cluster.eks_cluster_auto[0].certificate_authority[0].data), "") + token = try(data.aws_eks_cluster_auth.this_auto[0].token, "") } } provider "kubectl" { apply_retry_count = 10 - host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks_cluster.certificate_authority[0].data) + host = try(data.aws_eks_cluster.eks_cluster[0].endpoint, "https://localhost") + cluster_ca_certificate = try(base64decode(data.aws_eks_cluster.eks_cluster[0].certificate_authority[0].data), "") load_config_file = false - token = data.aws_eks_cluster_auth.this.token + token = try(data.aws_eks_cluster_auth.this[0].token, "") } locals { @@ -98,12 +140,12 @@ locals { env = var.eks_cluster_id } - eks_cluster_id = data.aws_eks_cluster.eks_cluster.id - eks_oidc_issuer_url = replace(data.aws_eks_cluster.eks_cluster.identity[0].oidc[0].issuer, "https://", "") - eks_cluster_endpoint = data.aws_eks_cluster.eks_cluster.endpoint - eks_cluster_version = data.aws_eks_cluster.eks_cluster.version - eks_oidc_provider_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:oidc-provider/${local.eks_oidc_issuer_url}" - cluster_security_group_id = data.aws_eks_cluster.eks_cluster.vpc_config[0].cluster_security_group_id + eks_cluster_id = try(data.aws_eks_cluster.eks_cluster[0].id, var.eks_cluster_id) + eks_oidc_issuer_url = try(replace(data.aws_eks_cluster.eks_cluster[0].identity[0].oidc[0].issuer, "https://", ""), "") + eks_cluster_endpoint = try(data.aws_eks_cluster.eks_cluster[0].endpoint, "") + eks_cluster_version = try(data.aws_eks_cluster.eks_cluster[0].version, "") + eks_oidc_provider_arn = try("arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:oidc-provider/${local.eks_oidc_issuer_url}", "") + cluster_security_group_id = try(data.aws_eks_cluster.eks_cluster[0].vpc_config[0].cluster_security_group_id, "") addon_context = { aws_caller_identity_account_id = data.aws_caller_identity.current.account_id diff --git a/manifests/.workshop/terraform/lab-fastpaths.tf b/manifests/.workshop/terraform/lab-fastpaths.tf new file mode 100644 index 0000000000..d650b5072a --- /dev/null +++ b/manifests/.workshop/terraform/lab-fastpaths.tf @@ -0,0 +1,21 @@ +module "lab" { + source = "./lab" + + providers = { + helm.auto_mode = helm.auto_mode + kubernetes.auto_mode = kubernetes.auto_mode + } + + eks_cluster_id = local.eks_cluster_id + eks_cluster_auto_id = var.eks_cluster_auto_id + eks_cluster_version = local.eks_cluster_version + cluster_security_group_id = local.cluster_security_group_id + addon_context = local.addon_context + tags = local.tags + resources_precreated = var.resources_precreated + inbound_cidrs = var.inbound_cidrs +} + +locals { + environment_variables = try(module.lab.environment_variables, []) +} diff --git a/manifests/modules/autoscaling/compute/karpenter/automode/scale/deployment.yaml b/manifests/modules/autoscaling/compute/karpenter/automode/scale/deployment.yaml new file mode 100644 index 0000000000..9867507c69 --- /dev/null +++ b/manifests/modules/autoscaling/compute/karpenter/automode/scale/deployment.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: inflate + namespace: other +spec: + replicas: 0 + selector: + matchLabels: + app: inflate + template: + metadata: + labels: + app: inflate + spec: + terminationGracePeriodSeconds: 0 + containers: + - name: inflate + image: public.ecr.aws/eks-distro/kubernetes/pause:3.2 + resources: + requests: + memory: 1Gi diff --git a/manifests/modules/autoscaling/compute/karpenter/automode/scale/kustomization.yaml b/manifests/modules/autoscaling/compute/karpenter/automode/scale/kustomization.yaml new file mode 100644 index 0000000000..9c2d28b0c9 --- /dev/null +++ b/manifests/modules/autoscaling/compute/karpenter/automode/scale/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - deployment.yaml diff --git a/manifests/modules/fastpaths/developers/.workshop/cleanup.sh b/manifests/modules/fastpaths/developers/.workshop/cleanup.sh new file mode 100644 index 0000000000..5f6c636f9c --- /dev/null +++ b/manifests/modules/fastpaths/developers/.workshop/cleanup.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +set -e + +echo "Cleaning up developer essentials resources..." + +# Delete load generator if running +kubectl delete pod load-generator --ignore-not-found + +# Delete ingress resources +kubectl delete ingress --all -A --ignore-not-found +kubectl delete ingressclass eks-auto-alb --ignore-not-found + +# Delete KEDA ScaledObjects and HPA +kubectl delete scaledobject --all -n ui --ignore-not-found 2>/dev/null || true +kubectl delete hpa --all -n ui --ignore-not-found 2>/dev/null || true + +# Delete EBS storage class and modified StatefulSets + PVCs +kubectl delete statefulset -l app.kubernetes.io/created-by=eks-workshop -A --ignore-not-found +kubectl delete pvc --all -A --ignore-not-found +kubectl delete storageclass ebs-sc --ignore-not-found + +# Delete pod identity associations for carts +for assoc in $(aws eks list-pod-identity-associations --cluster-name ${EKS_CLUSTER_AUTO_NAME:-eks-workshop-auto} --namespace carts --query 'associations[].associationId' --output text 2>/dev/null); do + aws eks delete-pod-identity-association --cluster-name ${EKS_CLUSTER_AUTO_NAME:-eks-workshop-auto} --association-id $assoc 2>/dev/null || true +done + +# Delete pod identity associations for keda (created by install-keda.md) +for assoc in $(aws eks list-pod-identity-associations --cluster-name ${EKS_CLUSTER_AUTO_NAME:-eks-workshop-auto} --namespace keda --query 'associations[].associationId' --output text 2>/dev/null); do + aws eks delete-pod-identity-association --cluster-name ${EKS_CLUSTER_AUTO_NAME:-eks-workshop-auto} --association-id $assoc 2>/dev/null || true +done + +# Delete network policies +kubectl delete networkpolicy --all -A --ignore-not-found 2>/dev/null || true + +echo "Cleanup complete!" diff --git a/manifests/modules/fastpaths/developers/.workshop/terraform/main.tf b/manifests/modules/fastpaths/developers/.workshop/terraform/main.tf new file mode 100644 index 0000000000..f7d1c3e1ef --- /dev/null +++ b/manifests/modules/fastpaths/developers/.workshop/terraform/main.tf @@ -0,0 +1,14 @@ +module "preprovision" { + source = "./preprovision" + count = var.resources_precreated ? 0 : 1 + + providers = { + helm.auto_mode = helm.auto_mode + kubernetes.auto_mode = kubernetes.auto_mode + } + + eks_cluster_id = var.eks_cluster_id + eks_cluster_auto_id = var.eks_cluster_auto_id + tags = var.tags + inbound_cidrs = var.inbound_cidrs +} diff --git a/manifests/modules/fastpaths/developers/.workshop/terraform/preprovision/eks-auto.tf b/manifests/modules/fastpaths/developers/.workshop/terraform/preprovision/eks-auto.tf new file mode 100644 index 0000000000..416f4dd38c --- /dev/null +++ b/manifests/modules/fastpaths/developers/.workshop/terraform/preprovision/eks-auto.tf @@ -0,0 +1,29 @@ + +data "aws_eks_cluster" "eks_cluster_auto" { + name = var.eks_cluster_auto_id +} + +data "aws_eks_cluster_auth" "this_auto" { + name = var.eks_cluster_auto_id +} + +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} +data "aws_availability_zones" "available" { + state = "available" +} + +data "aws_region" "current" {} + +# Helm provider configuration for EKS +terraform { + required_version = ">= 1.3" + + required_providers { + helm = { + source = "hashicorp/helm" + version = "2.17.0" + configuration_aliases = [helm.auto_mode] + } + } +} diff --git a/manifests/modules/fastpaths/developers/.workshop/terraform/preprovision/fluent-bit.tf b/manifests/modules/fastpaths/developers/.workshop/terraform/preprovision/fluent-bit.tf new file mode 100644 index 0000000000..ac064fa9d4 --- /dev/null +++ b/manifests/modules/fastpaths/developers/.workshop/terraform/preprovision/fluent-bit.tf @@ -0,0 +1,132 @@ +resource "random_string" "fluentbit_log_group" { + length = 6 + special = false +} + +locals { + cw_log_group_name = "/${var.eks_cluster_auto_id}/worker-fluentbit-logs-${random_string.fluentbit_log_group.result}" +} + +# Create CloudWatch log group for FluentBit +resource "aws_cloudwatch_log_group" "fluentbit" { + name = local.cw_log_group_name + retention_in_days = 7 + tags = var.tags +} + +# IAM role for FluentBit with CloudWatch write permissions using Pod Identity +resource "aws_iam_role" "auto_fluentbit" { + name_prefix = "${var.eks_cluster_auto_id}-fb-" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = [ + "sts:AssumeRole", + "sts:TagSession" + ] + Effect = "Allow" + Principal = { + Service = "pods.eks.amazonaws.com" + } + }] + }) + + tags = var.tags +} + +# IAM policy for FluentBit CloudWatch log write access +resource "aws_iam_policy" "auto_fluentbit_cloudwatch" { + name_prefix = "${var.eks_cluster_auto_id}-fb-pol-" + description = "CloudWatch Logs policy for FluentBit" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams" + ] + Resource = "arn:${data.aws_partition.current.partition}:logs:*:${data.aws_caller_identity.current.account_id}:log-group:/${var.eks_cluster_auto_id}/*" + } + ] + }) + + tags = var.tags +} + +# Attach CloudWatch policy to FluentBit role +resource "aws_iam_role_policy_attachment" "auto_fluentbit_cloudwatch" { + policy_arn = aws_iam_policy.auto_fluentbit_cloudwatch.arn + role = aws_iam_role.auto_fluentbit.name +} + +# EKS Pod Identity Association for FluentBit +resource "aws_eks_pod_identity_association" "fluentbit" { + cluster_name = var.eks_cluster_auto_id + namespace = "amazon-cloudwatch" + service_account = "aws-for-fluent-bit" + role_arn = aws_iam_role.auto_fluentbit.arn + + depends_on = [ + aws_iam_role.auto_fluentbit + ] +} + +# Helm release for AWS for FluentBit (Pod Identity compatible) +resource "helm_release" "aws_for_fluent_bit" { + name = "aws-for-fluent-bit" + repository = "https://aws.github.io/eks-charts" + chart = "aws-for-fluent-bit" + namespace = "amazon-cloudwatch" + version = "0.1.35" + provider = helm.auto_mode + + create_namespace = true + + set { + name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" + value = aws_iam_role.auto_fluentbit.arn + } + set { + name = "cloudWatchLogs.enabled" + value = "true" + } + set { + name = "cloudWatchLogs.region" + value = data.aws_region.current.id + } + set { + name = "cloudWatchLogs.logGroupName" + value = aws_cloudwatch_log_group.fluentbit.name + } + set { + name = "firehose.enabled" + value = "false" + } + set { + name = "kinesis.enabled" + value = "false" + } + set { + name = "image.tag" + value = "2.32.5.20250327" + } + + depends_on = [ + aws_cloudwatch_log_group.fluentbit, + aws_eks_pod_identity_association.fluentbit + ] +} + +output "environment_variables" { + description = "Environment variables to be added to the IDE shell" + value = { + CLOUDWATCH_LOG_GROUP_NAME = aws_cloudwatch_log_group.fluentbit.name + } +} diff --git a/manifests/modules/fastpaths/developers/.workshop/terraform/preprovision/keda.tf b/manifests/modules/fastpaths/developers/.workshop/terraform/preprovision/keda.tf new file mode 100644 index 0000000000..6fcab9515c --- /dev/null +++ b/manifests/modules/fastpaths/developers/.workshop/terraform/preprovision/keda.tf @@ -0,0 +1,77 @@ +variable "keda_chart_version" { + description = "The chart version of keda to use" + type = string + # renovate-helm: depName=keda registryUrl=https://kedacore.github.io/charts + default = "2.18.0" +} + +resource "aws_iam_role" "keda_auto" { + name = "${var.eks_cluster_auto_id}-keda" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = [ + "sts:AssumeRole", + "sts:TagSession" + ] + Effect = "Allow" + Principal = { + Service = "pods.eks.amazonaws.com" + } + }] + }) + + tags = var.tags +} + +resource "aws_iam_role_policy_attachment" "keda_auto" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/CloudWatchReadOnlyAccess" + role = aws_iam_role.keda_auto.name +} + +# EKS Pod Identity Association for FluentBit +resource "aws_eks_pod_identity_association" "keda_auto" { + cluster_name = var.eks_cluster_auto_id + namespace = "keda" + service_account = "keda-operator" + role_arn = aws_iam_role.keda_auto.arn +} + +resource "kubernetes_manifest" "ui_alb" { + count = 0 # Created in exposing workloads with Ingress + provider = kubernetes.auto_mode + manifest = { + "apiVersion" = "networking.k8s.io/v1" + "kind" = "Ingress" + "metadata" = { + "name" = "ui_keda" + "namespace" = "ui" + "annotations" = { + "alb.ingress.kubernetes.io/scheme" = "internet-facing" + "alb.ingress.kubernetes.io/target-type" = "ip" + "alb.ingress.kubernetes.io/healthcheck-path" = "/actuator/health/liveness" + "alb.ingress.kubernetes.io/inbound-cidrs" = var.inbound_cidrs + } + } + "spec" = { + ingressClassName = "eks-auto-alb", + "rules" = [{ + "http" = { + paths = [{ + path = "/" + pathType = "Prefix" + "backend" = { + service = { + name = "ui" + port = { + number = 80 + } + } + } + }] + } + }] + } + } +} diff --git a/manifests/modules/fastpaths/developers/.workshop/terraform/preprovision/pod-identity.tf b/manifests/modules/fastpaths/developers/.workshop/terraform/preprovision/pod-identity.tf new file mode 100644 index 0000000000..f9aced1ef1 --- /dev/null +++ b/manifests/modules/fastpaths/developers/.workshop/terraform/preprovision/pod-identity.tf @@ -0,0 +1,167 @@ +# Pod Identity resources for carts/DynamoDB (from operator preprovision) + +resource "aws_dynamodb_table" "carts" { + name = "${var.eks_cluster_auto_id}-carts" + hash_key = "id" + billing_mode = "PAY_PER_REQUEST" + stream_enabled = true + stream_view_type = "NEW_AND_OLD_IMAGES" + + server_side_encryption { + enabled = true + kms_key_arn = aws_kms_key.cmk_dynamodb.arn + } + + attribute { + name = "id" + type = "S" + } + + attribute { + name = "customerId" + type = "S" + } + + global_secondary_index { + name = "idx_global_customerId" + hash_key = "customerId" + projection_type = "ALL" + } + + tags = var.tags +} + +module "iam_assumable_role_carts" { + source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role" + version = "5.60.0" + create_role = true + role_requires_mfa = false + role_name = "${var.eks_cluster_auto_id}-carts-dynamo" + trusted_role_services = ["pods.eks.amazonaws.com"] + custom_role_policy_arns = [aws_iam_policy.carts_dynamo.arn] + trusted_role_actions = ["sts:AssumeRole", "sts:TagSession"] + + tags = var.tags +} + +resource "aws_iam_policy" "carts_dynamo" { + name = "${var.eks_cluster_auto_id}-carts-dynamo" + path = "/" + description = "Dynamo policy for carts application" + + policy = </dev/null || true + +# Delete SecretProviderClass and ClusterSecretStore +kubectl delete secretproviderclass --all -A --ignore-not-found 2>/dev/null || true +kubectl delete clustersecretstore --all --ignore-not-found 2>/dev/null || true +kubectl delete externalsecret --all -A --ignore-not-found 2>/dev/null || true + +# Delete pod identity associations for carts +echo "Deleting carts pod identity associations for cluster ${EKS_CLUSTER_AUTO_NAME:-eks-workshop-auto}..." +for assoc in $(aws eks list-pod-identity-associations --cluster-name ${EKS_CLUSTER_AUTO_NAME:-eks-workshop-auto} --namespace carts --query 'associations[].associationId' --output text 2>&1); do + echo " Deleting association $assoc" + aws eks delete-pod-identity-association --cluster-name ${EKS_CLUSTER_AUTO_NAME:-eks-workshop-auto} --association-id $assoc 2>&1 || echo " Failed to delete $assoc" +done + +# Delete pod identity associations for keda (created by install-keda.md) +echo "Deleting keda pod identity associations for cluster ${EKS_CLUSTER_AUTO_NAME:-eks-workshop-auto}..." +for assoc in $(aws eks list-pod-identity-associations --cluster-name ${EKS_CLUSTER_AUTO_NAME:-eks-workshop-auto} --namespace keda --query 'associations[].associationId' --output text 2>&1); do + echo " Deleting association $assoc" + aws eks delete-pod-identity-association --cluster-name ${EKS_CLUSTER_AUTO_NAME:-eks-workshop-auto} --association-id $assoc 2>&1 || echo " Failed to delete $assoc" +done + +# Delete modified StatefulSets + PVCs (from any EBS changes) +kubectl delete statefulset -l app.kubernetes.io/created-by=eks-workshop -A --ignore-not-found +kubectl delete pvc --all -A --ignore-not-found +kubectl delete storageclass ebs-sc --ignore-not-found + +echo "Cleanup complete!" diff --git a/manifests/modules/fastpaths/operators/external-secrets/cluster-secret-store.yaml b/manifests/modules/fastpaths/operators/external-secrets/cluster-secret-store.yaml new file mode 100644 index 0000000000..8a70b01356 --- /dev/null +++ b/manifests/modules/fastpaths/operators/external-secrets/cluster-secret-store.yaml @@ -0,0 +1,9 @@ +apiVersion: external-secrets.io/v1beta1 +kind: ClusterSecretStore +metadata: + name: "cluster-secret-store" +spec: + provider: + aws: + service: SecretsManager + region: $AWS_REGION diff --git a/manifests/modules/fastpaths/operators/network-policies/allow-ui-egress.yaml b/manifests/modules/fastpaths/operators/network-policies/allow-ui-egress.yaml new file mode 100644 index 0000000000..b8e54df93f --- /dev/null +++ b/manifests/modules/fastpaths/operators/network-policies/allow-ui-egress.yaml @@ -0,0 +1,23 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + namespace: ui + name: allow-ui-egress +spec: + podSelector: + matchLabels: + app.kubernetes.io/name: ui + policyTypes: + - Egress + egress: + - to: + - ipBlock: + cidr: 172.20.0.10/32 + - namespaceSelector: + matchLabels: + podSelector: + matchLabels: + app.kubernetes.io/component: service + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system diff --git a/manifests/modules/fastpaths/operators/network-policies/vpc-cni-policies.yaml b/manifests/modules/fastpaths/operators/network-policies/vpc-cni-policies.yaml new file mode 100644 index 0000000000..edc33fc333 --- /dev/null +++ b/manifests/modules/fastpaths/operators/network-policies/vpc-cni-policies.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: amazon-vpc-cni + namespace: kube-system +data: + enable-network-policy-controller: "true" diff --git a/manifests/modules/introduction/basics/.workshop/cleanup.sh b/manifests/modules/introduction/basics/.workshop/cleanup.sh new file mode 100644 index 0000000000..2f50b4cda7 --- /dev/null +++ b/manifests/modules/introduction/basics/.workshop/cleanup.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +set -e + +echo "Cleaning up Kubernetes Basics module resources..." + +# Clean up pods +echo "Cleaning up pods..." +kubectl delete pod ui-pod -n ui --ignore-not-found=true +kubectl delete pod test-pod --ignore-not-found=true + +# Clean up secrets +echo "Cleaning up secrets..." +kubectl delete secret catalog-db -n catalog --ignore-not-found=true + +# Clean up daemonsets +echo "Cleaning up daemonsets..." +kubectl delete daemonset log-collector -n kube-system --ignore-not-found=true + +# Clean up jobs and cronjobs +echo "Cleaning up jobs and cronjobs..." +kubectl delete job data-processor -n catalog --ignore-not-found=true +kubectl delete cronjob catalog-cleanup -n catalog --ignore-not-found=true +kubectl delete job manual-cleanup -n catalog --ignore-not-found=true + +# Delete any jobs that start with catalog-cleanup (created by CronJob) +kubectl get jobs -n catalog -o name 2>/dev/null | grep "job/catalog-cleanup" | xargs -r kubectl delete -n catalog --ignore-not-found=true + +# Clean up namespaces (do this last as it will clean up any remaining resources) +echo "Cleaning up namespaces..." +kubectl delete namespace ui --ignore-not-found=true +kubectl delete namespace catalog --ignore-not-found=true + +echo "Kubernetes Basics module cleanup completed." diff --git a/manifests/modules/introduction/basics/configmaps/kustomization.yaml b/manifests/modules/introduction/basics/configmaps/kustomization.yaml new file mode 100644 index 0000000000..44d8b488f3 --- /dev/null +++ b/manifests/modules/introduction/basics/configmaps/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../../../../base-application/ui diff --git a/manifests/modules/introduction/basics/configmaps/ui-pod-with-config.yaml b/manifests/modules/introduction/basics/configmaps/ui-pod-with-config.yaml new file mode 100644 index 0000000000..6f5551cc87 --- /dev/null +++ b/manifests/modules/introduction/basics/configmaps/ui-pod-with-config.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: ui-pod + namespace: ui + labels: + app.kubernetes.io/name: ui + app.kubernetes.io/component: service + app.kubernetes.io/created-by: eks-workshop +spec: + containers: + - name: ui + image: public.ecr.aws/aws-containers/retail-store-sample-ui:0.4.0 + ports: + - containerPort: 8080 + envFrom: + - configMapRef: + name: ui + resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "200m" diff --git a/manifests/modules/introduction/basics/daemonsets/log-collector.yaml b/manifests/modules/introduction/basics/daemonsets/log-collector.yaml new file mode 100644 index 0000000000..0a8c733f78 --- /dev/null +++ b/manifests/modules/introduction/basics/daemonsets/log-collector.yaml @@ -0,0 +1,41 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: log-collector + namespace: kube-system + labels: + app.kubernetes.io/name: log-collector + app.kubernetes.io/created-by: eks-workshop +spec: + selector: + matchLabels: + app: log-collector + template: + metadata: + labels: + app: log-collector + spec: + containers: + - name: fluentd + image: public.ecr.aws/aws-observability/aws-for-fluent-bit:stable + volumeMounts: + - name: varlog + mountPath: /var/log + readOnly: true + - name: containers + mountPath: /var/lib/docker/containers + readOnly: true + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + volumes: + - name: varlog + hostPath: + path: /var/log + - name: containers + hostPath: + path: /var/lib/docker/containers diff --git a/manifests/modules/introduction/basics/jobs/catalog-cleanup.yaml b/manifests/modules/introduction/basics/jobs/catalog-cleanup.yaml new file mode 100644 index 0000000000..b507b9598f --- /dev/null +++ b/manifests/modules/introduction/basics/jobs/catalog-cleanup.yaml @@ -0,0 +1,52 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: catalog-cleanup + namespace: catalog + labels: + app.kubernetes.io/name: catalog-cleanup + app.kubernetes.io/created-by: eks-workshop +spec: + schedule: "*/1 * * * *" # Every 1 minute for demo purposes + timeZone: "UTC" + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 1 + jobTemplate: + spec: + template: + metadata: + labels: + app: catalog-cleanup + spec: + restartPolicy: OnFailure + containers: + - name: cleanup + image: busybox:1.36 + command: + - /bin/sh + - -c + - | + echo "Starting cleanup job at $(date)" + echo "Checking for temporary files..." + + # Simulate finding and cleaning up files + echo "Found 3 temporary files to clean up:" + echo " - /tmp/cache_file_1.tmp" + echo " - /tmp/cache_file_2.tmp" + echo " - /tmp/old_log.log" + + # Simulate cleanup process + sleep 3 + echo "Cleaning up temporary files..." + sleep 2 + echo "Temporary files removed successfully" + + echo "Cleanup completed at $(date)" + echo "Next cleanup scheduled in 1 minute" + resources: + requests: + cpu: 50m + memory: 64Mi + limits: + cpu: 100m + memory: 128Mi diff --git a/manifests/modules/introduction/basics/jobs/data-processing-job.yaml b/manifests/modules/introduction/basics/jobs/data-processing-job.yaml new file mode 100644 index 0000000000..510c52e0ae --- /dev/null +++ b/manifests/modules/introduction/basics/jobs/data-processing-job.yaml @@ -0,0 +1,55 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-processor + namespace: catalog + labels: + app.kubernetes.io/name: data-processor + app.kubernetes.io/created-by: eks-workshop +spec: + completions: 1 + parallelism: 1 + backoffLimit: 3 + template: + metadata: + labels: + app: data-processor + spec: + restartPolicy: Never + containers: + - name: processor + image: busybox:1.36 + command: + - /bin/sh + - -c + - | + echo "Starting data processing job..." + echo "Processing catalog data files..." + + # Simulate processing multiple files + for i in $(seq 1 5); do + echo "Processing file $i/5..." + sleep 2 + echo "File $i processed successfully" + done + + echo "Generating summary report..." + cat > /tmp/processing-report.txt << EOF + Data Processing Report + ===================== + Job: data-processor + Date: $(date) + Files processed: 5 + Status: Completed successfully + EOF + + echo "Report generated:" + cat /tmp/processing-report.txt + echo "Data processing job completed successfully!" + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi diff --git a/manifests/modules/introduction/basics/namespaces/namespace.yaml b/manifests/modules/introduction/basics/namespaces/namespace.yaml new file mode 100644 index 0000000000..3c16236b55 --- /dev/null +++ b/manifests/modules/introduction/basics/namespaces/namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ui + labels: + app.kubernetes.io/created-by: eks-workshop diff --git a/manifests/modules/introduction/basics/pods/ui-pod.yaml b/manifests/modules/introduction/basics/pods/ui-pod.yaml new file mode 100644 index 0000000000..b207ed72b5 --- /dev/null +++ b/manifests/modules/introduction/basics/pods/ui-pod.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: ui-pod + namespace: ui + labels: + app.kubernetes.io/name: ui + app.kubernetes.io/component: service +spec: + containers: + - name: ui + image: public.ecr.aws/aws-containers/retail-store-sample-ui:1.2.1 + ports: + - name: http + containerPort: 8080 + protocol: TCP + env: + - name: JAVA_OPTS + value: -XX:MaxRAMPercentage=75.0 -Djava.security.egd=file:/dev/urandom + resources: + requests: + cpu: 250m + memory: 1.5Gi + limits: + memory: 1.5Gi diff --git a/manifests/modules/introduction/basics/secrets/catalog-pod-with-secret.yaml b/manifests/modules/introduction/basics/secrets/catalog-pod-with-secret.yaml new file mode 100644 index 0000000000..1b2368ea82 --- /dev/null +++ b/manifests/modules/introduction/basics/secrets/catalog-pod-with-secret.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: catalog-pod + namespace: catalog + labels: + app.kubernetes.io/name: catalog + app.kubernetes.io/component: service + app.kubernetes.io/created-by: eks-workshop +spec: + containers: + - name: catalog + image: public.ecr.aws/aws-containers/retail-store-sample-catalog:1.2.1 + ports: + - containerPort: 8080 + envFrom: + - configMapRef: + name: catalog + - secretRef: + name: catalog-db + resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "200m" diff --git a/manifests/modules/introduction/basics/secrets/kustomization.yaml b/manifests/modules/introduction/basics/secrets/kustomization.yaml new file mode 100644 index 0000000000..5ae93b2ed0 --- /dev/null +++ b/manifests/modules/introduction/basics/secrets/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../../../../base-application/catalog diff --git a/manifests/modules/introduction/basics/services/deployment.yaml b/manifests/modules/introduction/basics/services/deployment.yaml new file mode 100644 index 0000000000..e201e7a6cc --- /dev/null +++ b/manifests/modules/introduction/basics/services/deployment.yaml @@ -0,0 +1,26 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ui + labels: + app.kubernetes.io/created-by: eks-workshop + app.kubernetes.io/type: app +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: ui + app.kubernetes.io/instance: ui + app.kubernetes.io/component: service + template: + spec: + containers: + - name: ui + env: + - name: MANAGEMENT_INFO_ENV_ENABLED + value: "true" + - name: INFO_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + diff --git a/manifests/modules/introduction/basics/services/kustomization.yaml b/manifests/modules/introduction/basics/services/kustomization.yaml new file mode 100644 index 0000000000..33a9c05913 --- /dev/null +++ b/manifests/modules/introduction/basics/services/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../../../../base-application/ui +patches: + - path: deployment.yaml diff --git a/manifests/modules/observability/kubecost/.workshop/terraform/vars.tf b/manifests/modules/observability/kubecost/.workshop/terraform/vars.tf index 6576fd0ebd..758e44b750 100644 --- a/manifests/modules/observability/kubecost/.workshop/terraform/vars.tf +++ b/manifests/modules/observability/kubecost/.workshop/terraform/vars.tf @@ -38,7 +38,7 @@ variable "kubecost_chart_version" { description = "The chart version of kubecost to use" type = string # renovate-helm: depName=cost-analyzer registryUrl=https://kubecost.github.io/cost-analyzer - default = "2.8.4" + default = "2.8.6" } # tflint-ignore: terraform_unused_declarations diff --git a/manifests/modules/security/eks-pod-identity/dynamo/config.properties b/manifests/modules/security/eks-pod-identity/dynamo/config.properties index 59afe48619..9c242fc080 100644 --- a/manifests/modules/security/eks-pod-identity/dynamo/config.properties +++ b/manifests/modules/security/eks-pod-identity/dynamo/config.properties @@ -1,2 +1,3 @@ +AWS_REGION=${AWS_REGION} RETAIL_CART_PERSISTENCE_PROVIDER=dynamodb -RETAIL_CART_PERSISTENCE_DYNAMODB_TABLE_NAME=${CARTS_DYNAMODB_TABLENAME} \ No newline at end of file +RETAIL_CART_PERSISTENCE_DYNAMODB_TABLE_NAME=${CARTS_DYNAMODB_TABLENAME} diff --git a/manifests/modules/security/secrets-manager/secret-provider-class.yaml b/manifests/modules/security/secrets-manager/secret-provider-class.yaml index cd3c711753..ae16380b15 100644 --- a/manifests/modules/security/secrets-manager/secret-provider-class.yaml +++ b/manifests/modules/security/secrets-manager/secret-provider-class.yaml @@ -14,6 +14,7 @@ spec: objectAlias: username - path: password objectAlias: password + usePodIdentity: "true" secretObjects: - secretName: catalog-secret type: Opaque diff --git a/manifests/modules/troubleshooting/alb/.workshop/terraform/template/other_issue.json b/manifests/modules/troubleshooting/alb/.workshop/terraform/template/other_issue.json index 4503e4e9ba..e9e0a807b2 100644 --- a/manifests/modules/troubleshooting/alb/.workshop/terraform/template/other_issue.json +++ b/manifests/modules/troubleshooting/alb/.workshop/terraform/template/other_issue.json @@ -127,6 +127,8 @@ { "Effect": "Allow", "Action": [ + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancer", "elasticloadbalancing:CreateListener", "elasticloadbalancing:DeleteListener", "elasticloadbalancing:CreateRule", diff --git a/netlify.toml b/netlify.toml index 32d9f2b38d..10b7ad1165 100644 --- a/netlify.toml +++ b/netlify.toml @@ -3,7 +3,7 @@ command = "bash website/netlify-build.sh" - environment = { NODE_VERSION = "18.13.0" } + environment = { NODE_VERSION = "20.18.0" } [context.production.environment] ENABLE_INDEX = "1" \ No newline at end of file diff --git a/package.json b/package.json index fabeafd55b..15e514046e 100644 --- a/package.json +++ b/package.json @@ -9,7 +9,9 @@ "releaser" ], "scripts": { - "serve": "yarn workspace website start", + "build": "LAB_TIMES_ENABLED=true yarn workspace website build", + "serve": "LAB_TIMES_ENABLED=true yarn workspace website serve", + "start": "LAB_TIMES_ENABLED=true yarn workspace website start", "spelling:check": "yarn cspell 'website/docs/**/*.md'", "links:check": "markdown-link-check -q -c link-check-config.json website/docs/**/*.md", "format:check": "prettier -c .", @@ -23,12 +25,14 @@ "devDependencies": { "@aws/toolkit-md": "^0.1.6", "cspell": "^9.0.0", + "linkinator": "^7.0.0", "lint-staged": "^16.0.0", "markdown-link-check": "3.14.2", "markdownlint-cli2": "^0.18.0", "npm-run-all2": "^8.0.0", "prettier": "^3.2.5", - "prettier-plugin-sh": "^0.18.0" + "prettier-plugin-sh": "^0.18.0", + "puppeteer": "^23.0.0" }, "lint-staged": { "*.{js,jsx,ts,tsx}": [ diff --git a/testing/run.sh b/testing/run.sh index 661b9d021c..ee4e95d95f 100644 --- a/testing/run.sh +++ b/testing/run.sh @@ -4,6 +4,10 @@ set -e bash /entrypoint.sh +# When running tests, manifests are volume-mounted from the host. +# Clear REPOSITORY_REF so reset-environment skips git clone. +echo 'export REPOSITORY_REF=""' > /home/ec2-user/.bashrc.d/repository.bash + cat << EOT > /tmp/wrapper.sh #!/bin/bash diff --git a/website/docs/_partials/setup/aws-event-setup.mdx b/website/docs/_partials/setup/aws-event-setup.mdx new file mode 100644 index 0000000000..7d9a65f23d --- /dev/null +++ b/website/docs/_partials/setup/aws-event-setup.mdx @@ -0,0 +1,37 @@ +By participating in this workshop, you will be provided with an AWS account to use to complete the lab material. Connect to the portal by browsing to [https://catalog.workshops.aws/](https://catalog.workshops.aws/). Click on **Get Started**. + +![Workshop Studio Home](/docs/introduction/setup/workshop-studio-home.webp) + +You will be prompted to sign in. Select the option **Email One-Time Password(OTP)**. + +Workshop Studio Sign in + +Enter your email address and press **Send passcode**, which will send a one-time passcode to your inbox. When the email arrives, enter the passcode and log in. + +Your instructor should have provided you with an **Event access code** prior to starting these exercises. Enter the provided code in the text box and click **Next**. + +![Event Code](/docs/introduction/setup/event-code.webp) + +Read and accept the Terms and Conditions and click **Join event** to continue. + +![Review and Join](/docs/introduction/setup/review-and-join.webp) + +You will be presented with your personal dashboard. Select the **Open AWS Console** button to be taken to your AWS account console: + +![Open Console](/docs/introduction/setup/openconsole.webp) + +Next, return to the personal dashboard page and scroll down to the **Event Outputs** section. Copy the URL from the **IdeUrl** field and open it in a new browser tab: + +![Cloud9 Link](/docs/introduction/setup/workshop-studio-06.png) + +You will be prompted for a password: + +![IDE Password](/docs/introduction/setup/visual-studio-01.png) + +Enter the value from the **IdePassword** field from the outputs and the web IDE will load. + +![Code-server login screen](/docs/introduction/setup/vscode-splash.webp) + +Press **Get started** to access the workshop splash page: + +![Get Started](/docs/introduction/setup/workshop-event-page.webp) diff --git a/website/docs/_partials/setup/ide-cleanup.mdx b/website/docs/_partials/setup/ide-cleanup.mdx new file mode 100644 index 0000000000..a23b36a8b9 --- /dev/null +++ b/website/docs/_partials/setup/ide-cleanup.mdx @@ -0,0 +1,4 @@ +Once you have completed the cleanup of the EKS cluster, you can remove the IDE CloudFormation stack. + +Navigate to the [CloudFormation console](https://console.aws.amazon.com/cloudformation/home) and select the `eks-workshop-ide` stack, then click the **Delete** button: + diff --git a/website/docs/fastpaths/_category_.json b/website/docs/fastpaths/_category_.json new file mode 100644 index 0000000000..a02bd41c18 --- /dev/null +++ b/website/docs/fastpaths/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Learn with EKS Auto Mode", + "position": 100 +} diff --git a/website/docs/fastpaths/developer/amazon-eks-pod-identity/index.md b/website/docs/fastpaths/developer/amazon-eks-pod-identity/index.md new file mode 100644 index 0000000000..ef4ca4d311 --- /dev/null +++ b/website/docs/fastpaths/developer/amazon-eks-pod-identity/index.md @@ -0,0 +1,17 @@ +--- +title: "Accessing AWS APIs securely from workloads" +sidebar_position: 50 +description: "Manage AWS credentials for your applications running on Amazon Elastic Kubernetes Service with EKS Pod Identity." +--- + +:::tip What's been set up for you +Your Amazon EKS Auto Mode cluster includes: + +- An Amazon DynamoDB table for the carts service +- An IAM role configured for the carts workload to access DynamoDB + +::: + +Applications in a Pod's containers can use a supported AWS SDK or the AWS CLI to make API requests to AWS services using AWS Identity and Access Management (IAM) permissions. For example, applications may need to upload files to an S3 bucket or query a DynamoDB table, and in order to do so, they must sign their AWS API requests with AWS credentials. [EKS Pod Identities](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html) provide the ability to manage credentials for your applications, similar to the way that Amazon EC2 Instance Profiles provide credentials to instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance's role, you can associate an IAM role with a Kubernetes Service Account and configure your Pods to use it. Check out EKS documentation [here](https://docs.aws.amazon.com/eks/latest/userguide/pod-id-minimum-sdk.html) for the exact list of SDK versions supported. + +In this module, we'll reconfigure one of the sample application components to leverage the AWS API and provide it with the appropriate privileges. diff --git a/website/docs/fastpaths/developer/amazon-eks-pod-identity/introduction.md b/website/docs/fastpaths/developer/amazon-eks-pod-identity/introduction.md new file mode 100644 index 0000000000..28f9a2eead --- /dev/null +++ b/website/docs/fastpaths/developer/amazon-eks-pod-identity/introduction.md @@ -0,0 +1,25 @@ +--- +title: "Introduction" +sidebar_position: 31 +--- + +The `carts` component of our architecture uses Amazon DynamoDB as its storage backend, which is a common use-case you'll find for non-relational databases integration with Amazon EKS. Currently, the carts API is deployed with a [lightweight version of Amazon DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBLocal.html) running as a container in the EKS cluster. + +You can see this by running the following command: + +```bash wait=30 +$ kubectl -n carts get pod +NAME READY STATUS RESTARTS AGE +carts-5d7fc9d8f-xm4hs 1/1 Running 0 14m +carts-dynamodb-698674dcc6-hw2bg 1/1 Running 0 14m +``` + +In the output above, the Pod `carts-dynamodb-698674dcc6-hw2bg` is our lightweight DynamoDB service. We can verify our `carts` application is using this by inspecting its environment: + +```bash timeout=180 +$ kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=service -n carts --timeout=120s +$ kubectl -n carts exec deployment/carts -- env | grep RETAIL_CART_PERSISTENCE_DYNAMODB_ENDPOINT +RETAIL_CART_PERSISTENCE_DYNAMODB_ENDPOINT=http://carts-dynamodb:8000 +``` + +While this approach can be useful for testing, we want to migrate our application to use the fully managed Amazon DynamoDB service to take full advantage of the scale and reliability it offers. In the following sections, we'll reconfigure our application to use Amazon DynamoDB and implement EKS Pod Identity to provide secure access to AWS services. diff --git a/website/docs/fastpaths/developer/amazon-eks-pod-identity/tests/hook-enable-dynamo.sh b/website/docs/fastpaths/developer/amazon-eks-pod-identity/tests/hook-enable-dynamo.sh new file mode 100644 index 0000000000..448c50be19 --- /dev/null +++ b/website/docs/fastpaths/developer/amazon-eks-pod-identity/tests/hook-enable-dynamo.sh @@ -0,0 +1,18 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + sleep 10 + + if [[ $TEST_OUTPUT != *"timed out waiting"* ]]; then + echo "Failed to match expected output" + echo $TEST_OUTPUT + + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/amazon-eks-pod-identity/tests/hook-enable-pod-identity.sh b/website/docs/fastpaths/developer/amazon-eks-pod-identity/tests/hook-enable-pod-identity.sh new file mode 100644 index 0000000000..1f27c488de --- /dev/null +++ b/website/docs/fastpaths/developer/amazon-eks-pod-identity/tests/hook-enable-pod-identity.sh @@ -0,0 +1,13 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + sleep 10 + + kubectl wait --for=condition=available --timeout=120s deployment/carts -n carts +} + +"$@" diff --git a/website/docs/fastpaths/developer/amazon-eks-pod-identity/tests/hook-pod-logs.sh b/website/docs/fastpaths/developer/amazon-eks-pod-identity/tests/hook-pod-logs.sh new file mode 100644 index 0000000000..6f08dc41b0 --- /dev/null +++ b/website/docs/fastpaths/developer/amazon-eks-pod-identity/tests/hook-pod-logs.sh @@ -0,0 +1,41 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + echo "=== DEBUG: Checking carts pods ===" + kubectl get pods -n carts -l app.kubernetes.io/component=service -o wide 2>&1 || true + echo "=== DEBUG: Carts configmap ===" + kubectl -n carts get cm carts -o jsonpath='{.data}' 2>&1 || true + echo "" + echo "=== DEBUG: Pod identity associations ===" + aws eks list-pod-identity-associations --cluster-name ${EKS_CLUSTER_AUTO_NAME} --namespace carts 2>&1 || true + + # Wait for the carts pod to crash and restart at least once + echo "Waiting for carts pod to crash and restart..." + + for i in $(seq 1 36); do + RESTARTS=$(kubectl get pods -n carts -l app.kubernetes.io/component=service --sort-by=.metadata.creationTimestamp -o jsonpath='{.items[-1:].status.containerStatuses[0].restartCount}' 2>/dev/null || echo "0") + if [ "$RESTARTS" -gt 0 ] 2>/dev/null; then + LATEST_POD=$(kubectl get pods -n carts -l app.kubernetes.io/component=service --sort-by=.metadata.creationTimestamp -o jsonpath='{.items[-1:].metadata.name}') + LOG_OUTPUT=$(kubectl logs -n carts -p "$LATEST_POD" 2>/dev/null || true) + if [[ "$LOG_OUTPUT" == *"Unable to load credentials"* ]]; then + echo "Found expected credential error after $i attempts (restarts=$RESTARTS)" + return 0 + fi + fi + echo "Attempt $i: restarts=$RESTARTS, waiting..." + sleep 10 + done + + echo "=== DEBUG: Final pod state ===" + kubectl get pods -n carts -l app.kubernetes.io/component=service -o wide 2>&1 || true + kubectl describe pods -n carts -l app.kubernetes.io/component=service 2>&1 | tail -30 || true + + echo "Failed to find expected credential error after 360s" + exit 1 +} + +"$@" diff --git a/website/docs/fastpaths/developer/amazon-eks-pod-identity/understanding.md b/website/docs/fastpaths/developer/amazon-eks-pod-identity/understanding.md new file mode 100644 index 0000000000..a990c0969c --- /dev/null +++ b/website/docs/fastpaths/developer/amazon-eks-pod-identity/understanding.md @@ -0,0 +1,18 @@ +--- +title: "Understanding Pod IAM" +sidebar_position: 33 +--- + +The first place to look for the issue is the logs of the `carts` service: + +```bash hook=pod-logs timeout=480 +$ LATEST_POD=$(kubectl get pods -n carts -l app.kubernetes.io/component=service --sort-by=.metadata.creationTimestamp -o jsonpath='{.items[-1:].metadata.name}') +sleep 60 +kubectl logs -n carts -p $LATEST_POD +[...] +software.amazon.awssdk.core.exception.SdkClientException: Unable to load credentials from any of the providers in the chain AwsCredentialsProviderChain(credentialsProviders=[SystemPropertyCredentialsProvider(), EnvironmentVariableCredentialsProvider(), WebIdentityTokenCredentialsProvider(), ProfileCredentialsProvider(profileName=default, profileFile=ProfileFile(sections=[])), ContainerCredentialsProvider(), InstanceProfileCredentialsProvider()]) : [SystemPropertyCredentialsProvider(): Unable to load credentials from system settings. Access key must be specified either via environment variable (AWS_ACCESS_KEY_ID) or system property (aws.accessKeyId)., EnvironmentVariableCredentialsProvider(): Unable to load credentials from system settings. Access key must be specified either via environment variable (AWS_ACCESS_KEY_ID) or system property (aws.accessKeyId)., WebIdentityTokenCredentialsProvider(): Either the environment variable AWS_WEB_IDENTITY_TOKEN_FILE or the javaproperty aws.webIdentityTokenFile must be set., ProfileCredentialsProvider(profileName=default, profileFile=ProfileFile(sections=[])): Profile file contained no credentials for profile 'default': ProfileFile(sections=[]), ContainerCredentialsProvider(): Cannot fetch credentials from container - neither AWS_CONTAINER_CREDENTIALS_FULL_URI or AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variables are set., InstanceProfileCredentialsProvider(): Failed to load credentials from IMDS.] +``` + +The application is generating an error which indicates that the Pod cannot load AWS credentials to access DynamoDB. This is happening because by default, when no IAM roles or policies are linked to our Pod via EKS Pod Identity, the application cannot obtain credentials to make AWS API calls. + +One approach would be to expand the IAM permissions of the node IAM role, but this would allow any Pod running on those instances to access our DynamoDB table. This violates the principle of least privilege and is not a security best practice. Instead, we'll use EKS Pod Identity to provide the specific permissions required by the `carts` application at the Pod level, ensuring fine-grained access control. diff --git a/website/docs/fastpaths/developer/amazon-eks-pod-identity/use-pod-identity.md b/website/docs/fastpaths/developer/amazon-eks-pod-identity/use-pod-identity.md new file mode 100644 index 0000000000..808ac4aea0 --- /dev/null +++ b/website/docs/fastpaths/developer/amazon-eks-pod-identity/use-pod-identity.md @@ -0,0 +1,105 @@ +--- +title: "Using EKS Pod Identity" +sidebar_position: 34 +hide_table_of_contents: true +--- + +With Amazon EKS Auto Mode, the EKS Pod Identity Agent is already included and managed by AWS in the control plane. You can verify Pod Identity is available by checking for existing pod identity associations: + +```bash +$ aws eks list-pod-identity-associations --cluster-name $EKS_CLUSTER_AUTO_NAME --namespace carts +{ + "associations": [] +} +``` + +An IAM role, which provides the required permissions for the `carts` service to read and write to the DynamoDB table, was created when the Auto Mode cluster was set up. You can view the policy as shown below: + +```bash +$ aws iam get-policy-version \ + --version-id v1 --policy-arn \ + --query 'PolicyVersion.Document' \ + arn:aws:iam::${AWS_ACCOUNT_ID}:policy/${EKS_CLUSTER_AUTO_NAME}-carts-dynamo | jq . +{ + "Statement": [ + { + "Action": "dynamodb:*", + "Effect": "Allow", + "Resource": [ + "arn:aws:dynamodb:us-west-2:267912352941:table/eks-workshop-auto-carts", + "arn:aws:dynamodb:us-west-2:267912352941:table/eks-workshop-auto-carts/index/*" + ], + "Sid": "AllAPIActionsOnCart" + } + ], + "Version": "2012-10-17" +} +``` + +The role has also been configured with the appropriate trust relationship, which allows the EKS Service Principal to assume this role for Pod Identity. You can view it with the command below: + +```bash +$ aws iam get-role \ + --query 'Role.AssumeRolePolicyDocument' \ + --role-name ${EKS_CLUSTER_AUTO_NAME}-carts-dynamo | jq . +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "pods.eks.amazonaws.com" + }, + "Action": [ + "sts:AssumeRole", + "sts:TagSession" + ] + } + ] +} +``` + +Next, we will use Amazon EKS Pod Identity feature to associate an AWS IAM role with the Kubernetes Service Account that will be used by our deployment. To create the association, run the following command: + +```bash wait=30 +$ aws eks create-pod-identity-association --cluster-name ${EKS_CLUSTER_AUTO_NAME} \ + --role-arn arn:aws:iam::${AWS_ACCOUNT_ID}:role/${EKS_CLUSTER_AUTO_NAME}-carts-dynamo \ + --namespace carts --service-account carts | jq . +{ + "association": { + "clusterName": "eks-workshop-auto", + "namespace": "carts", + "serviceAccount": "carts", + "roleArn": "arn:aws:iam::267912352941:role/eks-workshop-auto-carts-dynamo", + "associationArn": "arn:aws:eks:us-west-2:267912352941:podidentityassociation/eks-workshop-auto/a-yg5uoymvtfgdg5tcj", + "associationId": "a-yg5uoymvtfgdg5tcj", + "tags": {}, + "createdAt": "2025-10-11T01:13:27.763000+00:00", + "modifiedAt": "2025-10-11T01:13:27.763000+00:00", + "disableSessionTags": false + } +} +``` + +All that's left is to verify that the `carts` Deployment is using the `carts` Service Account: + +```bash +$ kubectl -n carts describe deployment carts | grep 'Service Account' + Service Account: carts +``` + +With the Service Account verified, let's recycle the `carts` Pods: + +```bash hook=enable-pod-identity hookTimeout=430 +$ kubectl -n carts rollout restart deployment/carts +deployment.apps/carts restarted +``` +Let's check the status of the Pods to check if they are successfully rolled out: + +```bash timeout=360 +$ kubectl -n carts rollout status deployment/carts --timeout=300s +Waiting for deployment "carts" rollout to finish: 1 old replicas are pending termination... +deployment "carts" successfully rolled out +``` + +Now, let's verify if the DynamoDB permission issue that we had encountered has been resolved for the carts application in the next section. diff --git a/website/docs/fastpaths/developer/amazon-eks-pod-identity/using-dynamo.md b/website/docs/fastpaths/developer/amazon-eks-pod-identity/using-dynamo.md new file mode 100644 index 0000000000..340df24579 --- /dev/null +++ b/website/docs/fastpaths/developer/amazon-eks-pod-identity/using-dynamo.md @@ -0,0 +1,77 @@ +--- +title: "Using Amazon DynamoDB" +sidebar_position: 32 +--- + +The first step in this process is to re-configure the carts service to use a DynamoDB table that has already been created for us. The application loads most of its configurations from a ConfigMap. Let's take look at it: + +```bash +$ kubectl -n carts get -o yaml cm carts | yq +apiVersion: v1 +data: + AWS_ACCESS_KEY_ID: key + AWS_SECRET_ACCESS_KEY: secret + RETAIL_CART_PERSISTENCE_DYNAMODB_CREATE_TABLE: "true" + RETAIL_CART_PERSISTENCE_DYNAMODB_ENDPOINT: http://carts-dynamodb:8000 + RETAIL_CART_PERSISTENCE_DYNAMODB_TABLE_NAME: Items + RETAIL_CART_PERSISTENCE_PROVIDER: dynamodb +kind: ConfigMap +metadata: + name: carts + namespace: carts +``` + +The following kustomization overwrites the ConfigMap removing the DynamoDB endpoint configuration. It tells the SDK to use the real DynamoDB service instead of our test Pod. We've also configured the DynamoDB table name that's already been created for us. The table name is being pulled from the environment variable `RETAIL_CART_PERSISTENCE_DYNAMODB_TABLE_NAME`. + +```kustomization +modules/security/eks-pod-identity/dynamo/kustomization.yaml +ConfigMap/carts +``` + +Let's set the DynamoDB table name and run Kustomize to use the real DynamoDB service: + +```bash +$ export CARTS_DYNAMODB_TABLENAME=${EKS_CLUSTER_AUTO_NAME}-carts && echo $CARTS_DYNAMODB_TABLENAME +eks-workshop-auto-carts +$ kubectl kustomize ~/environment/eks-workshop/modules/security/eks-pod-identity/dynamo \ + | envsubst | kubectl apply -f- +``` + +This will overwrite our ConfigMap with new values: + +```bash +$ kubectl -n carts get cm carts -o yaml | yq +apiVersion: v1 +data: + AWS_REGION: us-west-2 + RETAIL_CART_PERSISTENCE_DYNAMODB_TABLE_NAME: eks-workshop-auto-carts + RETAIL_CART_PERSISTENCE_PROVIDER: dynamodb +kind: ConfigMap +metadata: + labels: + app: carts + name: carts + namespace: carts +``` + +Now, we need to recycle all the carts pods to pick up our new ConfigMap contents: + +```bash expectError=true hook=enable-dynamo +$ kubectl rollout restart -n carts deployment/carts +deployment.apps/carts restarted +$ kubectl rollout status -n carts deployment/carts --timeout=20s +Waiting for deployment "carts" rollout to finish: 1 old replicas are pending termination... +error: timed out waiting for the condition +``` + +It looks like our change failed to deploy properly. We can confirm this by looking at the Pods: + +```bash +$ kubectl -n carts get pod +NAME READY STATUS RESTARTS AGE +carts-5d486d7cf7-8qxf9 1/1 Running 0 5m49s +carts-df76875ff-7jkhr 0/1 CrashLoopBackOff 3 (36s ago) 2m2s +carts-dynamodb-698674dcc6-hw2bg 1/1 Running 0 20m +``` + +What's gone wrong? diff --git a/website/docs/fastpaths/developer/amazon-eks-pod-identity/verifying-dynamo.md b/website/docs/fastpaths/developer/amazon-eks-pod-identity/verifying-dynamo.md new file mode 100644 index 0000000000..fabdffb145 --- /dev/null +++ b/website/docs/fastpaths/developer/amazon-eks-pod-identity/verifying-dynamo.md @@ -0,0 +1,39 @@ +--- +title: "Verifying DynamoDB access" +sidebar_position: 35 +--- + +Now, with the `carts` Service Account associated with the authorized IAM role, the `carts` Pod has permission to access the DynamoDB table. Access the web store again and navigate to the shopping cart. + +```bash +$ ALB_HOSTNAME=$(kubectl get ingress ui-auto -n ui -o yaml | yq .status.loadBalancer.ingress[0].hostname) +$ echo "http://$ALB_HOSTNAME" +http://k8s-ui-ui-a9797f0f61.elb.us-west-2.amazonaws.com +``` + +The `carts` Pod is able to reach the DynamoDB service and the shopping cart is now accessible! + +![Cart](/img/sample-app-screens/shopping-cart.webp) + +:::caution +If you see an error loading the application, make sure you have restarted `carts` Pods at the end of the [previous section](./use-pod-identity.md). +::: + +After the AWS IAM role is associated with the Service Account, any newly created Pods using that Service Account will be intercepted by the [EKS Pod Identity webhook](https://github.com/aws/amazon-eks-pod-identity-webhook). This webhook runs on the Amazon EKS cluster's control plane and is fully managed by AWS. Take a closer look at the new `carts` Pod to see the new environment variables: + +```bash +$ kubectl -n carts exec deployment/carts -- env | grep AWS +AWS_STS_REGIONAL_ENDPOINTS=regional +AWS_DEFAULT_REGION=us-west-2 +AWS_REGION=us-west-2 +AWS_CONTAINER_CREDENTIALS_FULL_URI=http://169.254.170.23/v1/credentials +AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE=/var/run/secrets/pods.eks.amazonaws.com/serviceaccount/eks-pod-identity-token +``` + +Notable points about these environment variables: + +- `AWS_DEFAULT_REGION` - The region is set automatically to the same as our EKS cluster +- `AWS_STS_REGIONAL_ENDPOINTS` - Regional STS endpoints are configured to avoid putting too much pressure on the global endpoint in `us-east-1` +- `AWS_CONTAINER_CREDENTIALS_FULL_URI` - This variable tells AWS SDKs how to obtain credentials using the [HTTP credential provider](https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html). This means that EKS Pod Identity does not need to inject credentials via something like an `AWS_ACCESS_KEY_ID`/`AWS_SECRET_ACCESS_KEY` pair, and instead the SDKs can have temporary credentials vended to them via the EKS Pod Identity mechanism. You can read more about how this functions in the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html). + +You have successfully configured Pod Identity in your application. diff --git a/website/docs/fastpaths/developer/ebs/deployment-with-ebs.md b/website/docs/fastpaths/developer/ebs/deployment-with-ebs.md new file mode 100644 index 0000000000..98ea9f9e95 --- /dev/null +++ b/website/docs/fastpaths/developer/ebs/deployment-with-ebs.md @@ -0,0 +1,194 @@ +--- +title: Using persistent EBS volumes +sidebar_position: 20 +--- + +Now let's update the catalog MySQL database to use persistent EBS storage. With EKS Auto Mode, the EBS CSI Driver is already installed and managed by AWS. + +## Create the StorageClass + +The StorageClass defines how EKS Auto Mode will provision EBS volumes. While EKS Auto Mode includes the EBS CSI Driver, you need to create a StorageClass that references `ebs.csi.eks.amazonaws.com` to use the storage capability. + +::yaml{file="manifests/modules/fastpaths/developers/ebs/storageclass.yaml" paths="provisioner,parameters.type"} + +1. `provisioner: ebs.csi.eks.amazonaws.com` - Uses EKS Auto Mode's built-in EBS CSI Driver +2. `type: gp3` - Specifies the EBS volume type + +Apply the StorageClass: + +```bash +$ kubectl apply -f ~/environment/eks-workshop/modules/fastpaths/developers/ebs/storageclass.yaml +``` + +## Update the catalog MySQL database + +Since many StatefulSet fields, including `volumeClaimTemplates`, cannot be modified, we'll need to delete and recreate the catalog service with the new storage configuration. + +First, delete the current catalog MySQL StatefulSet: + +```bash wait=10 +$ kubectl delete -n catalog statefulset catalog-mysql +``` + +Now recreate it with persistent storage enabled. The updated StatefulSet includes a `volumeClaimTemplates` section: + +::yaml{file="manifests/modules/fastpaths/developers/ebs/statefulset-mysql.yaml" paths="spec.volumeClaimTemplates.0.spec.storageClassName,spec.volumeClaimTemplates.0.spec.accessModes,spec.volumeClaimTemplates.0.spec.resources"} + +1. The `accessModes` specifies ReadWriteOnce, allowing the volume to be mounted by a single node +2. The `storageClassName` specifies the ebs-sc StorageClass for dynamic provisioning +3. We are requesting a 30GB EBS volume + +Apply the configuration, and restart the catalog pod to ensure initialization of the database: + +```bash timeout=180 +$ kubectl apply -k ~/environment/eks-workshop/modules/fastpaths/developers/ebs +$ kubectl rollout restart deployment/catalog -n catalog # Force catalog to push the DB structure +``` + +## Verify the PersistentVolumeClaim + +The recreated catalog MySQL StatefulSet now has an associated PersistentVolumeClaim. + +```bash +$ kubectl describe statefulset -n catalog catalog-mysql +Name: catalog-mysql +Namespace: catalog +... + Containers: + mysql: + Image: public.ecr.aws/docker/library/mysql:8.0 + Port: 3306/TCP + Mounts: + /var/lib/mysql from data (rw) +Volume Claims: + Name: data + StorageClass: + Labels: + Annotations: + Capacity: 30Gi + Access Modes: [ReadWriteOnce] +``` + +Examine the Persistent Volucmes Claims(PVC) that was created: + +```bash +$ kubectl get pvc -n catalog +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +data-catalog-mysql-0 Bound pvc-abc123... 30Gi RWO ebs-sc 2m +``` + +Inspect the PVC details: + +```bash +$ kubectl describe pvc -n catalog data-catalog-mysql-0 +Name: data-catalog-mysql-0 +Namespace: catalog +StorageClass: ebs-sc +Status: Bound +Volume: pvc-abc123... +Annotations: pv.kubernetes.io/bind-completed: yes + pv.kubernetes.io/bound-by-controller: yes + volume.beta.kubernetes.io/storage-provisioner: ebs.csi.aws.com +Capacity: 30Gi +Access Modes: RWO +VolumeMode: Filesystem +Used By: catalog-mysql-0 +``` + +The PVC is bound to a Persitent Volume (PV), provisioned using **ebs.csi.aws.com** with 30Gi capacity. + +## Inspect the PersistentVolume (PV) + +```bash +$ kubectl describe pv $(kubectl get pvc -n catalog data-catalog-mysql-0 -o jsonpath="{.spec.volumeName}") +Name: pvc-abc123... +Annotations: pv.kubernetes.io/provisioned-by: ebs.csi.aws.com +StorageClass: ebs-sc +Status: Bound +Claim: catalog/data-catalog-mysql-0 +Reclaim Policy: Delete +Access Modes: RWO +VolumeMode: Filesystem +Capacity: 30Gi +Node Affinity: + Required Terms: + Term 0: topology.kubernetes.io/zone in [us-west-2a] +Source: + Type: CSI (a Container Storage Interface (CSI) volume source) + Driver: ebs.csi.aws.com + FSType: ext4 + VolumeHandle: vol-0abc123... + ReadOnly: false +``` + +The **VolumeHandle** references the Amazon EBS Volume ID. The **Node Affinity** ensures the pod is scheduled in the same Availability Zone as the EBS volume. + +## Verify the EBS volume + +Get the EBS Volume ID: + +```bash +$ MYSQL_PV_NAME=$(kubectl get pvc -n catalog data-catalog-mysql-0 -o jsonpath="{.spec.volumeName}") +$ MYSQL_EBS_VOL_ID=$(kubectl get pv $MYSQL_PV_NAME -o jsonpath="{.spec.csi.volumeHandle}") +$ echo "EBS Volume ID: $MYSQL_EBS_VOL_ID" +``` + +Display the EBS volume details: + +```bash +$ aws ec2 describe-volumes --volume-ids $MYSQL_EBS_VOL_ID | jq . +``` + +The volume uses gp3 storage with encryption enabled. + +## Test data persistence + +Let's verify that data persists across pod restarts. First, wait for the pod to be ready: + +```bash timeout=420 +$ kubectl wait --for=condition=Ready -n catalog pod/catalog-mysql-0 --timeout=360s +``` + +Create a test file in the MySQL data directory: + +```bash +$ kubectl exec -n catalog catalog-mysql-0 -- bash -c "echo 123 > /var/lib/mysql/test.txt" +``` + +Verify the test file was created: + +```bash +$ kubectl exec -n catalog catalog-mysql-0 -- ls -larth /var/lib/mysql/ | grep -i test +-rw-r--r--. 1 root root 4 Oct 11 00:39 test.txt +``` + +Now delete the pod to simulate a failure: + +```bash +$ kubectl delete pod -n catalog catalog-mysql-0 +``` + +Wait for the StatefulSet controller to automatically recreate the pod: + +```bash +$ kubectl wait --for=condition=Ready -n catalog pod/catalog-mysql-0 --timeout=120s +``` + +Verify the test file still exists after the pod restart: + +```bash +$ kubectl exec -n catalog catalog-mysql-0 -- cat /var/lib/mysql/test.txt +123 +``` + +Success! The test file persisted across the pod restart because it's stored on the EBS volume, not in the pod's ephemeral storage. Amazon EBS is storing the data and keeping it safe and available within an AWS availability zone. + +## Summary + +In this section, we: + +- Updated the catalog MySQL database to use persistent EBS storage +- Verified that the EBS volume was created correctly +- Tested data persistence across pod restarts + +With EKS Auto Mode, the EBS CSI Driver is pre-installed and managed, making it simple to provision persistent block storage for your stateful workloads. diff --git a/website/docs/fastpaths/developer/ebs/existing-architecture.md b/website/docs/fastpaths/developer/ebs/existing-architecture.md new file mode 100644 index 0000000000..4647970c25 --- /dev/null +++ b/website/docs/fastpaths/developer/ebs/existing-architecture.md @@ -0,0 +1,33 @@ +--- +title: Current storage configuration +sidebar_position: 10 +--- + +Let's examine how the catalog MySQL database currently stores its data. The catalog service uses MySQL as its backend database, and we'll check its current storage configuration. + +First, let's look at the StatefulSet for the catalog MySQL database: + +```bash +$ kubectl describe statefulset -n catalog catalog-mysql +Name: catalog-mysql +Namespace: catalog +[...] + Containers: + mysql: + Image: public.ecr.aws/docker/library/mysql:8.0 + Port: 3306/TCP + Mounts: + /var/lib/mysql from data (rw) + Volumes: + data: + Type: EmptyDir (a temporary directory that shares a pod's lifetime) +[...] +``` + +The StatefulSet currently uses an [EmptyDir volume](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) that exists only for the Pod's lifetime. This means: + +- When the Pod is terminated, all database data is permanently lost +- The database starts fresh with each pod restart +- There's no data persistence across pod lifecycle events + +This is not suitable for a production database. In the next section, we'll configure persistent storage using Amazon EBS to ensure our database data survive Pod restarts and failures. diff --git a/website/docs/fastpaths/developer/ebs/index.md b/website/docs/fastpaths/developer/ebs/index.md new file mode 100644 index 0000000000..a70ffb9e9d --- /dev/null +++ b/website/docs/fastpaths/developer/ebs/index.md @@ -0,0 +1,21 @@ +--- +title: Adding workload storage with EBS +sidebar_position: 40 +description: "Persistent block storage for workloads on Amazon Elastic Kubernetes Service with Amazon Elastic Block Store." +--- + +:::tip What's been set up for you +Your Amazon EKS Auto Mode cluster includes the **Amazon EBS CSI Driver**, which enables dynamic provisioning of persistent block storage volumes. +::: + +[Amazon Elastic Block Store](https://docs.aws.amazon.com/ebs/latest/userguide/what-is-ebs.html) (Amazon EBS) provides persistent block storage volumes for use with Amazon EC2 and Amazon EKS. EBS volumes are highly available and reliable storage that can be attached to running instances in the same Availability Zone. + +With Amazon EKS Auto Mode, the EBS CSI Driver comes pre-installed and managed by AWS, eliminating the need for manual installation and configuration. + +In this lab, you will: + +- Learn about persistent block storage with EBS +- Configure the catalog MySQL database to use persistent EBS volumes +- Verify data persistence across pod restarts + +This hands-on experience will demonstrate how to effectively use Amazon EBS with EKS Auto Mode for persistent storage solutions. diff --git a/website/docs/fastpaths/developer/ebs/tests/hook-efs-deployment.sh b/website/docs/fastpaths/developer/ebs/tests/hook-efs-deployment.sh new file mode 100644 index 0000000000..74b7cac66d --- /dev/null +++ b/website/docs/fastpaths/developer/ebs/tests/hook-efs-deployment.sh @@ -0,0 +1,23 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + sleep 60 + + EXIT_CODE=0 + + timeout -s TERM 60 bash -c \ + 'while [[ $(kubectl get pod -l app.kubernetes.io/name=ui -n ui -o json | jq -r ".items | length") -lt 2 ]];\ + do sleep 30;\ + done' || EXIT_CODE=$? + + if [ $EXIT_CODE -ne 0 ]; then + >&2 echo "UI service did not deploy in 60 seconds" + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/ebs/tests/hook-placeholder.sh b/website/docs/fastpaths/developer/ebs/tests/hook-placeholder.sh new file mode 100644 index 0000000000..cd58bf2e1c --- /dev/null +++ b/website/docs/fastpaths/developer/ebs/tests/hook-placeholder.sh @@ -0,0 +1,21 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + export ui_endpoint=$(kubectl get ingress ui -n ui -o yaml | yq .status.loadBalancer.ingress[0].hostname) + + if [ -z "$ui_endpoint" ]; then + >&2 echo "Failed to retrieve LB hostname" + exit 1 + fi + + if [[ "$(curl -s -o /dev/null -L -w ''%{http_code}'' ${ui_endpoint}/assets/img/products/placeholder.jpg)" != "200" ]]; then + >&2 echo "Expected placeholder image not available" + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/ebs/tests/hook-sample-images.sh b/website/docs/fastpaths/developer/ebs/tests/hook-sample-images.sh new file mode 100644 index 0000000000..323e30452c --- /dev/null +++ b/website/docs/fastpaths/developer/ebs/tests/hook-sample-images.sh @@ -0,0 +1,15 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + if [[ $TEST_OUTPUT != *"1ca35e86-4b4c-4124-b6b5-076ba4134d0d.jpg"* ]]; then + >&2 echo "Failed to match expected output" + echo $TEST_OUTPUT + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/index.md b/website/docs/fastpaths/developer/index.md new file mode 100644 index 0000000000..5ed3182b36 --- /dev/null +++ b/website/docs/fastpaths/developer/index.md @@ -0,0 +1,31 @@ +--- +title: "Developer Essentials" +sidebar_position: 50 +sidebar_custom_props: { "module": true } +--- + +# Developer Essentials + +::required-time + +:::tip Before you start +This fast path uses a dedicated Amazon EKS Auto Mode cluster. Amazon EKS Auto Mode extends AWS management of Kubernetes clusters beyond the cluster itself, managing infrastructure that enables smooth operation of your workloads including compute autoscaling, networking, load balancing, DNS, and block storage. + +Prepare your environment for this lab: + +```bash timeout=600 +$ prepare-environment fastpaths/developer +``` +::: + +Welcome to the EKS Workshop Developer Essentials! This is a collection of labs optimized for developers to learn the features of Amazon EKS most commonly required when deploying workloads. + +In this learning path, you'll learn: + +- Deploying and managing containerized applications on EKS +- Working with persistent storage using Amazon EBS +- Implementing autoscaling for your workloads +- Exposing applications with load balancers and Ingress +- Using AWS services like DynamoDB with EKS Pod Identity + +Let's get started! diff --git a/website/docs/fastpaths/developer/ingress/adding-ingress.md b/website/docs/fastpaths/developer/ingress/adding-ingress.md new file mode 100644 index 0000000000..e2b8e01e59 --- /dev/null +++ b/website/docs/fastpaths/developer/ingress/adding-ingress.md @@ -0,0 +1,151 @@ +--- +title: "Creating the Ingress" +sidebar_position: 20 +--- + +:::info AWS Load Balancer Controller +The AWS Load Balancer Controller is included with Amazon EKS Auto Mode and runs in the control plane. It will automatically provision AWS load balancers when you create Ingress resources. +::: + +Currently there are no Ingress resources in our cluster, which you can check with the following command: + +```bash expectError=true +$ kubectl get ingress -n ui +No resources found in ui namespace. +``` + +First, we need to configure an IngressClass and IngressClassParams: + +::yaml{file="manifests/modules/fastpaths/developers/ingress/adding-ingress/ingressclass.yaml" paths="0.spec.controller,0.spec.parameters,1.spec"} + +1. The `controller` field must be set to `eks.amazonaws.com/alb` to target the Auto Mode ALB capability +2. The `parameters` section references an IngressClassParams resource with `apiGroup: eks.amazonaws.com` +3. The IngressClassParams defines AWS-specific configuration like the load balancer scheme and target type + +Using this IngressClass we will configure an Ingress: + +::yaml{file="manifests/modules/fastpaths/developers/ingress/adding-ingress/ingress.yaml" paths="kind,spec.ingressClassName,spec.rules"} + +1. Use an `Ingress` kind +2. The `ingressClassName` references our Auto Mode IngressClass +3. The rules section routes all HTTP requests where the path starts with `/` to the Kubernetes service called `ui` on port 80 + +:::info +With EKS Auto Mode, ALB configuration via annotations is not supported. Configuration must be done in the IngressClassParams. +::: + +Let's apply those configurations: + +```bash timeout=180 hook=add-ingress hookTimeout=660 +$ kubectl kustomize ~/environment/eks-workshop/modules/fastpaths/developers/ingress/adding-ingress | envsubst | kubectl apply -f - +``` + +Let's inspect the Ingress object created: + +```bash +$ kubectl get ingress ui-auto -n ui +NAME CLASS HOSTS ADDRESS PORTS AGE +ui-auto eks-auto-alb * k8s-ui-uiauto-6cd0ef095e-78768930.us-west-2.elb.amazonaws.com 80 5s +``` + +The ALB will take several minutes to provision and register its targets so take some time to take a closer look at the ALB provisioned for this Ingress to see how it's configured: + +```bash +$ aws elbv2 describe-load-balancers --query 'LoadBalancers[?contains(LoadBalancerName, `k8s-ui-uiauto`) == `true`]' +[ + { + "LoadBalancerArn": "arn:aws:elasticloadbalancing:us-west-2:1234567890:loadbalancer/app/k8s-ui-uiauto-cb8129ddff/f62a7bc03db28e7c", + "DNSName": "k8s-ui-ui-cb8129ddff-1888909706.us-west-2.elb.amazonaws.com", + "CanonicalHostedZoneId": "Z1H1FL5HABSF5", + "CreatedTime": "2022-09-30T03:40:00.950000+00:00", + "LoadBalancerName": "k8s-ui-ui-cb8129ddff", + "Scheme": "internet-facing", + "VpcId": "vpc-0851f873025a2ece5", + "State": { + "Code": "active" + }, + "Type": "application", + "AvailabilityZones": [ + { + "ZoneName": "us-west-2b", + "SubnetId": "subnet-00415f527bbbd999b", + "LoadBalancerAddresses": [] + }, + { + "ZoneName": "us-west-2a", + "SubnetId": "subnet-0264d4b9985bd8691", + "LoadBalancerAddresses": [] + }, + { + "ZoneName": "us-west-2c", + "SubnetId": "subnet-05cda6deed7f3da65", + "LoadBalancerAddresses": [] + } + ], + "SecurityGroups": [ + "sg-0f8e704ee37512eb2", + "sg-02af06ec605ef8777" + ], + "IpAddressType": "ipv4" + } +] +``` + +What does this tell us? + +- The ALB is accessible over the public internet +- It uses the public subnets in our VPC + +Inspect the targets in the target group that was created by the controller: + +```bash +$ ALB_ARN=$(aws elbv2 describe-load-balancers --query 'LoadBalancers[?contains(LoadBalancerName, `k8s-ui-uiauto`) == `true`].LoadBalancerArn' | jq -r '.[0]') +$ TARGET_GROUP_ARN=$(aws elbv2 describe-target-groups --load-balancer-arn $ALB_ARN | jq -r '.TargetGroups[0].TargetGroupArn') +$ aws elbv2 describe-target-health --target-group-arn $TARGET_GROUP_ARN +{ + "TargetHealthDescriptions": [ + { + "Target": { + "Id": "10.42.180.183", + "Port": 8080, + "AvailabilityZone": "us-west-2c" + }, + "HealthCheckPort": "8080", + "TargetHealth": { + "State": "healthy" + } + } + ] +} +``` + +Since we specified using IP mode in our Ingress object, the target is registered using the IP address of the `ui` pod and the port on which it serves traffic. + +You can also inspect the ALB and its target groups in the console by clicking this link: + + + +:::caution +If you face issues opening the console using this button, you might not have an active session for the AWS console. To fix this, please go to the home page of the workshop and click on the link named `Open AWS console` under `AWS account access` section of the left navigation menu. +::: + +Get the URL from the Ingress resource: + +```bash +$ ADDRESS=$(kubectl get ingress -n ui ui-auto -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") +$ echo "http://${ADDRESS}" +http://k8s-ui-uiauto-cb8129ddff-1888909706.us-west-2.elb.amazonaws.com +``` + +To wait until the load balancer has finished provisioning you can run this command: + +```bash timeout=600 +$ curl --head -X GET --retry 30 --retry-all-errors --retry-delay 15 --connect-timeout 30 --max-time 60 \ + -k $(kubectl get ingress -n ui ui-auto -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") +``` + +And access it in your web browser. You will see the UI from the web store displayed and will be able to navigate around the site as a user. + + + + diff --git a/website/docs/fastpaths/developer/ingress/index.md b/website/docs/fastpaths/developer/ingress/index.md new file mode 100644 index 0000000000..7aeff03ca6 --- /dev/null +++ b/website/docs/fastpaths/developer/ingress/index.md @@ -0,0 +1,16 @@ +--- +title: "Exposing workloads with Ingress" +chapter: true +sidebar_position: 20 +description: "Expose HTTP and HTTPS routes to the outside world using Ingress API on Amazon Elastic Kubernetes Service." +--- + +:::tip What's been set up for you +Your Amazon EKS Auto Mode cluster includes the **AWS Load Balancer Controller**, which manages AWS Elastic Load Balancers for Kubernetes Ingress resources. +::: + +Right now our web store application is not exposed to the outside world, so there's no way for users to access it. Although there are many microservices in our web store workload, only the `ui` application needs to be available to end users. This is because the `ui` application will perform all communication to the other backend services using internal Kubernetes networking. + +Kubernetes Ingress is an API resource that allows you to manage external or internal HTTP(S) access to Kubernetes services running in a cluster. Amazon Elastic Load Balancing Application Load Balancer (ALB) is a popular AWS service that load balances incoming traffic at the application layer (layer 7) across multiple targets, such as Amazon EC2 instances, in a region. ALB supports multiple features including host or path based routing, TLS (Transport Layer Security) termination, WebSockets, HTTP/2, AWS WAF (Web Application Firewall) integration, integrated access logs, and health checks. + +In this lab exercise, we'll expose our sample application using an ALB with the Kubernetes ingress model. diff --git a/website/docs/fastpaths/developer/ingress/tests/hook-add-ingress.sh b/website/docs/fastpaths/developer/ingress/tests/hook-add-ingress.sh new file mode 100644 index 0000000000..ec76dfc9a4 --- /dev/null +++ b/website/docs/fastpaths/developer/ingress/tests/hook-add-ingress.sh @@ -0,0 +1,32 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + sleep 20 + + export ui_endpoint=$(kubectl get ingress -n ui ui-auto -o json | jq -r '.status.loadBalancer.ingress[0].hostname') + + if [ -z "$ui_endpoint" ]; then + >&2 echo "Failed to retrieve hostname from Ingress" + exit 1 + fi + + EXIT_CODE=0 + + timeout -s TERM 400 bash -c \ + 'while [[ "$(curl -s -o /dev/null -L -w ''%{http_code}'' ${ui_endpoint}/home)" != "200" ]];\ + do sleep 20;\ + done' || EXIT_CODE=$? + + echo "Timeout completed" + + if [ $EXIT_CODE -ne 0 ]; then + >&2 echo "Ingress did not become available after 400 seconds" + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/ingress/tests/hook-dns-curl.sh b/website/docs/fastpaths/developer/ingress/tests/hook-dns-curl.sh new file mode 100644 index 0000000000..beed10e525 --- /dev/null +++ b/website/docs/fastpaths/developer/ingress/tests/hook-dns-curl.sh @@ -0,0 +1,15 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + if [[ $TEST_OUTPUT != *"HTTP/1.1 200 OK"* ]]; then + >&2 echo "Failed to match expected output" + echo $TEST_OUTPUT + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/ingress/tests/hook-dns-logs.sh b/website/docs/fastpaths/developer/ingress/tests/hook-dns-logs.sh new file mode 100644 index 0000000000..f02fef2cb9 --- /dev/null +++ b/website/docs/fastpaths/developer/ingress/tests/hook-dns-logs.sh @@ -0,0 +1,15 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + if [[ $TEST_OUTPUT != *"Desired change: CREATE ui.retailstore.com"* ]]; then + >&2 echo "Failed to match expected output" + echo $TEST_OUTPUT + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/ingress/tests/hook-multiple-ingress.sh b/website/docs/fastpaths/developer/ingress/tests/hook-multiple-ingress.sh new file mode 100644 index 0000000000..bfc5010e9d --- /dev/null +++ b/website/docs/fastpaths/developer/ingress/tests/hook-multiple-ingress.sh @@ -0,0 +1,37 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + sleep 60 + + kubectl get ingress -A + + export catalog_endpoint=$(kubectl get ingress -n catalog catalog-multi -o json | jq -r '.status.loadBalancer.ingress[0].hostname') + + if [ -z "$catalog_endpoint" ]; then + >&2 echo "Failed to retrieve hostname from Ingress" + exit 1 + fi + + EXIT_CODE=0 + + timeout -s TERM 400 bash -c \ + 'while [[ "$(curl -s -o /dev/null -L -w ''%{http_code}'' ${catalog_endpoint}/catalog/products)" != "200" ]];\ + do sleep 20;\ + done' || EXIT_CODE=$? + + echo "Timeout completed" + + if [ $EXIT_CODE -ne 0 ]; then + >&2 echo "Ingress did not become available after 400 seconds" + echo "Was checking $catalog_endpoint" + echo "" + kubectl get ingress -A + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/keda/configure-keda.md b/website/docs/fastpaths/developer/keda/configure-keda.md new file mode 100644 index 0000000000..eedc952eb1 --- /dev/null +++ b/website/docs/fastpaths/developer/keda/configure-keda.md @@ -0,0 +1,30 @@ +--- +title: "Configure KEDA" +sidebar_position: 10 +--- + +When installed, KEDA creates several custom resources. One of those resources, a `ScaledObject`, enables you to map an external event source to a Deployment or StatefulSet for scaling. In this lab, we'll create a `ScaledObject` that targets the `ui` Deployment and scales this workload based on the `RequestCountPerTarget` metric in CloudWatch. + +::yaml{file="manifests/modules/autoscaling/workloads/keda/scaledobject/scaledobject.yaml" paths="spec.scaleTargetRef,spec.minReplicaCount,spec.maxReplicaCount,spec.triggers"} + +1. This is the resource KEDA will scale. The `name` is the name of the deployment you are targeting and your `ScaledObject` must be in the same namespace as the Deployment +2. The minimum number of replicas that KEDA will scale the deployment to +3. The maximum number of replicas that KEDA will scale the deployment to +4. The `expression` uses [CloudWatch Metrics Insights](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch-metrics-insights-querylanguage.html) syntax to select your target metric. When the `targetMetricValue` is exceeded, KEDA will scale out the workload to support the increased load. In our case, if the `RequestCountPerTarget` is greater than 100, KEDA will scale the deployment. + +More details on the AWS CloudWatch scaler can be found [here](https://keda.sh/docs/scalers/aws-cloudwatch/). + +First we need to gather some information about the Application Load Balancer (ALB) and Target Group that were created as part of the lab pre-requisites. + +```bash +$ export ALB_ARN=$(aws elbv2 describe-load-balancers --query 'LoadBalancers[?contains(LoadBalancerName, `k8s-ui-uiauto-`) == `true`]' | jq -r .[0].LoadBalancerArn) +$ export ALB_ID=$(aws elbv2 describe-load-balancers --query 'LoadBalancers[?contains(LoadBalancerName, `k8s-ui-uiauto-`) == `true`]' | jq -r .[0].LoadBalancerArn | awk -F "loadbalancer/" '{print $2}') +$ export TARGETGROUP_ID=$(aws elbv2 describe-target-groups --load-balancer-arn $ALB_ARN | jq -r '.TargetGroups[0].TargetGroupArn' | awk -F ":" '{print $6}') +``` + +Now we can use those values to update the configuration of our `ScaledObject` and create the resource in the cluster. + +```bash +$ kubectl kustomize ~/environment/eks-workshop/modules/autoscaling/workloads/keda/scaledobject \ + | envsubst | kubectl apply -f- +``` diff --git a/website/docs/fastpaths/developer/keda/index.md b/website/docs/fastpaths/developer/keda/index.md new file mode 100644 index 0000000000..58a6cf52e8 --- /dev/null +++ b/website/docs/fastpaths/developer/keda/index.md @@ -0,0 +1,19 @@ +--- +title: "Autoscaling applications" +chapter: true +sidebar_position: 80 +description: "Automatically scale workloads on Amazon Elastic Kubernetes Service with KEDA" +--- + +:::tip What's been set up for you +After the Amazon EKS Auto Mode cluster was created, an IAM role was configured for the KEDA Operator +::: + +Autoscaling monitors your workloads and automatically adjusts capacity to maintain steady, predictable performance while also optimizing for cost. When using Kubernetes there are two main relevant mechanisms which can be used to scale automatically: + +- **Compute:** As pods are scaled the underlying compute in a Kubernetes cluster must also adapt by adjusting the number or size of worker nodes used to run the Pods. +- **Pods:** Since pods are used to run workloads in a Kubernetes cluster, scaling a workload is primarily done by scaling Pods either horizontally or vertically in response to scenarios such as changes in load on a given application. + +In this lab, we'll look at using the [Kubernetes Event-Driven Autoscaler (KEDA)](https://keda.sh/) to scale pods in a deployment. There is also another option for that purpose, Horizontal Pod Autoscaler (HPA), which can be used to horizontally scale pods based on average CPU utilization. But sometimes workloads need to scale based on external events or metrics. For that, KEDA provides the capability to scale your workload based on events from various event sources, such as the queue length in Amazon SQS or other metrics in CloudWatch. KEDA supports 60+ [scalers](https://keda.sh/docs/scalers/) for various metrics systems, databases, messaging systems, and more. + +KEDA is a lightweight workload that can be deployed into a Kubernetes cluster using a Helm chart. KEDA works with standard Kubernetes components like the Horizontal Pod Autoscaler to scale a Deployment or StatefulSet. With KEDA, you selectively choose the workloads you want to scale with these various event sources. diff --git a/website/docs/fastpaths/developer/keda/install-keda.md b/website/docs/fastpaths/developer/keda/install-keda.md new file mode 100644 index 0000000000..ec129ba88d --- /dev/null +++ b/website/docs/fastpaths/developer/keda/install-keda.md @@ -0,0 +1,56 @@ +--- +title: "Installing KEDA" +sidebar_position: 5 +--- + +First let's install KEDA using Helm. An IAM role with permissions to access metric data within CloudWatch was created when the Auto Mode cluster was set up. + +With Amazon EKS Auto Mode, we'll use EKS Pod Identity instead of IRSA. Let's create the Pod Identity association: + +```bash wait=10 +$ export KEDA_ROLE_ARN=arn:aws:iam::${AWS_ACCOUNT_ID}:role/${EKS_CLUSTER_AUTO_NAME}-keda +$ aws eks create-pod-identity-association --cluster-name ${EKS_CLUSTER_AUTO_NAME} \ + --role-arn ${KEDA_ROLE_ARN} \ + --namespace keda --service-account keda-operator | jq . +``` + +Now install KEDA: + +```bash timeout=300 +$ export KEDA_CHART_VERSION=$(grep -oP 'default\s*=\s*"\K[^"]+' ~/environment/eks-workshop/modules/autoscaling/workloads/keda/.workshop/terraform/vars.tf | tail -1) +$ helm repo add kedacore https://kedacore.github.io/charts +$ helm upgrade --install keda kedacore/keda \ + --version "${KEDA_CHART_VERSION}" \ + --namespace keda \ + --create-namespace \ + --wait +Release "keda" does not exist. Installing it now. +NAME: keda +LAST DEPLOYED: [...] +NAMESPACE: kube-system +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +[...] +``` + +After the Helm install, KEDA will be running as several deployments in the keda namespace: + +```bash +$ kubectl rollout restart deployment/keda-operator -n keda +$ kubectl rollout status deployment/keda-operator -n keda --timeout=120s +$ kubectl get deployment -n keda +NAME READY UP-TO-DATE AVAILABLE AGE +keda-admission-webhooks 1/1 1 1 105s +keda-operator 1/1 1 1 105s +keda-operator-metrics-apiserver 1/1 1 1 105s +``` + +Each KEDA deployment performs a different key role: + +1. Agent (keda-operator) - controls the scaling of the workload +2. Metrics (keda-operator-metrics-server) - acts as a Kubernetes metrics server, providing access to external metrics +3. Admission Webhooks (keda-admission-webhooks) - validates resource configuration to prevent misconfiguration (ex. multiple ScaledObjects targeting the same workload) + +Now we can move on to configuring KEDA to scale our workload. diff --git a/website/docs/fastpaths/developer/keda/test-keda.md b/website/docs/fastpaths/developer/keda/test-keda.md new file mode 100644 index 0000000000..649f0fc7fa --- /dev/null +++ b/website/docs/fastpaths/developer/keda/test-keda.md @@ -0,0 +1,62 @@ +--- +title: "Generate load" +sidebar_position: 20 +--- + +To observe KEDA scale the deployment in response to the KEDA `ScaledObject` we have configured, we need to generate some load on our application. We'll do that by calling the home page of the workload with [hey](https://github.com/rakyll/hey). + +The command below will run the load generator with: + +- 3 workers running concurrently +- Sending 5 queries per second each +- Running for a maximum of 10 minutes + +```bash hook=keda-pod-scaleout hookTimeout=660 wait=300 +$ export ALB_HOSTNAME=$(kubectl get ingress ui-auto -n ui -o yaml | yq .status.loadBalancer.ingress[0].hostname) +$ kubectl run load-generator \ + --image=williamyeh/hey:latest \ + --restart=Never -- -c 3 -q 5 -z 10m http://$ALB_HOSTNAME/home +``` + +Based on the `ScaledObject`, KEDA creates an HPA resource and provides the required metrics to allow the HPA to scale the workload. Now that we have requests hitting our application we can watch the HPA resource to follow its progress: + +```bash test=false +$ kubectl get hpa keda-hpa-ui-hpa -n ui --watch +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +keda-hpa-ui-hpa Deployment/ui 7/100 (avg) 1 10 1 7m58s +keda-hpa-ui-hpa Deployment/ui 778/100 (avg) 1 10 1 8m33s +keda-hpa-ui-hpa Deployment/ui 194500m/100 (avg) 1 10 4 8m48s +keda-hpa-ui-hpa Deployment/ui 97250m/100 (avg) 1 10 8 9m3s +keda-hpa-ui-hpa Deployment/ui 625m/100 (avg) 1 10 8 9m18s +keda-hpa-ui-hpa Deployment/ui 91500m/100 (avg) 1 10 8 9m33s +keda-hpa-ui-hpa Deployment/ui 92125m/100 (avg) 1 10 8 9m48s +keda-hpa-ui-hpa Deployment/ui 750m/100 (avg) 1 10 8 10m +keda-hpa-ui-hpa Deployment/ui 102625m/100 (avg) 1 10 8 10m +keda-hpa-ui-hpa Deployment/ui 113625m/100 (avg) 1 10 8 11m +keda-hpa-ui-hpa Deployment/ui 90900m/100 (avg) 1 10 10 11m +keda-hpa-ui-hpa Deployment/ui 91500m/100 (avg) 1 10 10 12m +``` + +Once you're satisfied with the autoscaling behavior, you can end the watch with `Ctrl+C` and stop the load generator like so: + +```bash +$ kubectl delete pod load-generator --ignore-not-found +``` + +As the load generator terminates, notice that the HPA will slowly bring the replica count to min number based on its configuration. + +You can also view the load test results in the CloudWatch console: + + + +To reproduce this graph in your account, from the cloudwatch metrics console, you'll need to add 2 metrics and configure accordingly the graph: + +1. Under **Metrics**, ensure you're in the same region as your cluster. +1. Under **ApplicationELB > Per AppELB Metrics, per TG Metrics**, select `RequestCount` and `RequestCountPerTarget` +1. Click on the **Graphed Metrics (2)** tab, for each metrics + 1. Change **Statistic** from `Average` to `Sum` + 1. Change **Period** from `5 minutes` to `1 minute` + +From the results you can see that initially all of the load was handled by a single pod, but as KEDA begins to scale the workload the requests are distributed across the additional pods added to the workload as they become valid target in the load balancer target group. If you let the load-generator pod run for the full 10 minutes, you'll see results similar to this. + +![Insights](/img/keda/keda-cloudwatch.png) diff --git a/website/docs/fastpaths/developer/keda/tests/hook-keda-pod-scaleout.sh b/website/docs/fastpaths/developer/keda/tests/hook-keda-pod-scaleout.sh new file mode 100644 index 0000000000..8200f555ae --- /dev/null +++ b/website/docs/fastpaths/developer/keda/tests/hook-keda-pod-scaleout.sh @@ -0,0 +1,21 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + EXIT_CODE=0 + + timeout -s TERM 600 bash -c \ + 'while [[ $(kubectl get pod -l app.kubernetes.io/instance=ui -n ui -o json | jq -r ".items | length") -lt 2 ]];\ + do sleep 30;\ + done' || EXIT_CODE=$? + + if [ $EXIT_CODE -ne 0 ]; then + >&2 echo "Pods did not scale within 600 seconds" + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/keda/tests/hook-validate-ingress.sh b/website/docs/fastpaths/developer/keda/tests/hook-validate-ingress.sh new file mode 100644 index 0000000000..e573766a3e --- /dev/null +++ b/website/docs/fastpaths/developer/keda/tests/hook-validate-ingress.sh @@ -0,0 +1,32 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + sleep 20 + + export ui_endpoint=$(kubectl -n kube-system get ingress -n ui ui -o json | jq -r '.status.loadBalancer.ingress[0].hostname') + + if [ -z "$ui_endpoint" ]; then + >&2 echo "Failed to retrieve hostname from Ingress" + exit 1 + fi + + EXIT_CODE=0 + + timeout -s TERM 400 bash -c \ + 'while [[ "$(curl -s -o /dev/null -L -w ''%{http_code}'' ${ui_endpoint}/home)" != "200" ]];\ + do sleep 20;\ + done' || EXIT_CODE=$? + + echo "Timeout completed" + + if [ $EXIT_CODE -ne 0 ]; then + >&2 echo "Ingress did not become available after 400 seconds" + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/pod-logging/fluent-bit-cloudwatch.md b/website/docs/fastpaths/developer/pod-logging/fluent-bit-cloudwatch.md new file mode 100644 index 0000000000..37ff3647c9 --- /dev/null +++ b/website/docs/fastpaths/developer/pod-logging/fluent-bit-cloudwatch.md @@ -0,0 +1,70 @@ +--- +title: "Verifying the logs in CloudWatch" +sidebar_position: 40 +pagination_next: fastpaths/explore/index +--- + +In this lab exercise, we'll see how to check the Kubernetes pod logs forwarded by the Fluent Bit agent deployed on each node to Amazon CloudWatch Logs. The deployed application components write logs to `stdout`, which are saved in the `/var/log/containers/*.log` path on each node. + +First, lets recycle the pods for the `ui` component to make sure fresh logs are written since we enabled Fluent Bit. + +```bash timeout=180 +$ kubectl delete pod -n ui --all +$ kubectl rollout status deployment/ui \ + -n ui --timeout 120s +deployment "ui" successfully rolled out +``` + +Meanwhile, if you check the Fluent Bit DaemonSet logs, you will observe that a new log stream is created under the existing log group for the `ui` component. + +```bash hook=pods-log +$ kubectl logs daemonset.apps/aws-for-fluent-bit -n amazon-cloudwatch +... +[2025/04/15 12:40:10] [ info] [filter:kubernetes:kubernetes.0] token updated +[2025/04/15 12:40:10] [ info] [input:tail:tail.0] inotify_fs_add(): inode=16895961 watch_fd=12 name=/var/log/containers/ui-8564fc5cfb-qb7td_ui_ui-4ace14944409ee785708c9031b4c2243bfa065ffe0cd320e219131aa33541a1e.log +[2025/04/15 12:40:11] [ info] [output:cloudwatch_logs:cloudwatch_logs.0] Creating log stream ui-8564fc5cfb-qb7td.ui in log group /aws/eks/fluentbit-cloudwatch/workload/ui +[2025/04/15 12:40:11] [ info] [output:cloudwatch_logs:cloudwatch_logs.0] Created log stream ui-8564fc5cfb-qb7td.ui + +``` + +Now we can check that our `ui` component is creating logs by directly using `kubectl logs`: + +```bash +$ kubectl logs -n ui deployment/ui +Picked up JAVA_TOOL_OPTIONS: -javaagent:/opt/aws-opentelemetry-agent.jar +OpenJDK 64-Bit Server VM warning: Sharing is only supported for boot loader classes because bootstrap classpath has been appended +[otel.javaagent 2023-07-03 23:39:18:499 +0000] [main] INFO io.opentelemetry.javaagent.tooling.VersionLogger - opentelemetry-javaagent - version: 1.24.0-aws + + . ____ _ __ _ _ + /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \ +( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \ + \\/ ___)| |_)| | | | | || (_| | ) ) ) ) + ' |____| .__|_| |_|_| |_\__, | / / / / + =========|_|==============|___/=/_/_/_/ + :: Spring Boot :: (v3.0.6) + +2023-07-03T23:39:20.472Z INFO 1 --- [ main] c.a.s.u.UiApplication : Starting UiApplication v0.0.1-SNAPSHOT using Java 17.0.7 with PID 1 (/app/app.jar started by appuser in /app) +2023-07-03T23:39:20.488Z INFO 1 --- [ main] c.a.s.u.UiApplication : No active profile set, falling back to 1 default profile: "default" +2023-07-03T23:39:24.985Z WARN 1 --- [ main] o.s.b.a.e.EndpointId : Endpoint ID 'fail-cart' contains invalid characters, please migrate to a valid format. +2023-07-03T23:39:25.132Z INFO 1 --- [ main] o.s.b.a.e.w.EndpointLinksResolver : Exposing 15 endpoint(s) beneath base path '/actuator' +2023-07-03T23:39:25.567Z INFO 1 --- [ main] o.s.b.w.e.n.NettyWebServer : Netty started on port 8080 +2023-07-03T23:39:25.599Z INFO 1 --- [ main] c.a.s.u.UiApplication : Started UiApplication in 5.877 seconds (process running for 7.361) +``` + +Open the CloudWatch Logs console to check these logs are appearing: + + + +Filter for **fluentbit-cloudwatch** to find the log groups created by Fluent Bit: + +![CloudWatch Log Group](/img/fastpaths/developer/pod-logging/log-group.webp) + +Select `/eks-workshop-auto/worker-fluentbit-logs-*` to view the log streams, each one corresponds to an individual pod: + +![CloudWatch Log Stream](/img/fastpaths/developer/pod-logging/log-streams.webp) + +You can expand one of the log entries to see the full JSON payload: + +![Pod logs](/img/fastpaths/developer/pod-logging/logs.webp) + +This concludes the EKS workshop's fast path for developers. diff --git a/website/docs/fastpaths/developer/pod-logging/fluentbit-setup.md b/website/docs/fastpaths/developer/pod-logging/fluentbit-setup.md new file mode 100644 index 0000000000..9b1d2db125 --- /dev/null +++ b/website/docs/fastpaths/developer/pod-logging/fluentbit-setup.md @@ -0,0 +1,118 @@ +--- +title: "Using Fluent Bit" +sidebar_position: 30 +--- + +In this lab, we will configure [Fluent Bit](https://fluentbit.io/) logging agent as a DaemonSet in the EKS Auto Mode cluster. + +Fluent Bit is a lightweight log processor and forwarder that allows you to collect data and logs from different sources, enrich them with filters and send them to multiple destinations like CloudWatch, Kinesis Data Firehose, Kinesis Data Streams and Amazon OpenSearch Service. + +AWS provides a Fluent Bit image with plugins for both CloudWatch Logs and Kinesis Data Firehose. The [AWS for Fluent Bit](https://github.com/aws/aws-for-fluent-bit) image is available on the [Amazon ECR Public Gallery](https://gallery.ecr.aws/aws-observability/aws-for-fluent-bit). + +Fluent Bit can be used to ship logs to various destinations. However, in this lab, we will see how it is leveraged to ship the container logs to CloudWatch. + +![Fluent-bit Architecture](/img/fastpaths/developer/pod-logging/fluentbit-architecture.png) + +In the following section, we will see how to validate Fluent Bit agent is already running as a DaemonSet to send the containers / Pods logs to CloudWatch Logs. Read more about how to [deploy Fluent Bit to send logs from containers to CloudWatch Logs](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Container-Insights-setup-logs-FluentBit.html#Container-Insights-FluentBit-troubleshoot). + +First, we can validate the resources created for Fluent Bit by entering the following command. Each node should have one Pod: + +```bash hook=get-all +$ kubectl get all -n amazon-cloudwatch -l app.kubernetes.io/name=aws-for-fluent-bit +NAME READY STATUS RESTARTS AGE +pod/aws-for-fluent-bit-jg4jr 1/1 Running 0 94s +pod/aws-for-fluent-bit-lvp9f 1/1 Running 0 95s +pod/aws-for-fluent-bit-q959s 1/1 Running 0 94s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/aws-for-fluent-bit ClusterIP 172.16.41.165 2020/TCP 96s + +NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +daemonset.apps/aws-for-fluent-bit 3 3 3 3 3 96s +``` + +The ConfigMap for `aws-for-fluent-bit` is configured to stream the contents of files in the directory `/var/log/containers/*.log` from each node to the CloudWatch log group `/eks-workshop/worker-fluentbit-logs`. + +```bash hook=desc-cm +$ kubectl describe configmap -n amazon-cloudwatch -l app.kubernetes.io/name=aws-for-fluent-bit +Name: aws-for-fluent-bit +Namespace: kube-system +Labels: app.kubernetes.io/instance=aws-for-fluent-bit + app.kubernetes.io/managed-by=Helm + app.kubernetes.io/name=aws-for-fluent-bit + app.kubernetes.io/version=2.31.12.20231011 + helm.sh/chart=aws-for-fluent-bit-0.1.32 +Annotations: meta.helm.sh/release-name: aws-for-fluent-bit + meta.helm.sh/release-namespace: kube-system + +Data +==== +fluent-bit.conf: +---- +[SERVICE] + HTTP_Server On + HTTP_Listen 0.0.0.0 + HTTP_PORT 2020 + Health_Check On + HC_Errors_Count 5 + HC_Retry_Failure_Count 5 + HC_Period 5 + + Parsers_File /fluent-bit/parsers/parsers.conf +[INPUT] + Name tail + Tag kube.* + Path /var/log/containers/*.log + DB /var/log/flb_kube.db + multiline.parser docker, cri + Mem_Buf_Limit 5MB + Skip_Long_Lines On + Refresh_Interval 10 +[FILTER] + Name kubernetes + Match kube.* + Kube_URL https://kubernetes.default.svc.cluster.local:443 + Merge_Log On + Merge_Log_Key data + Keep_Log On + K8S-Logging.Parser On + K8S-Logging.Exclude On + Buffer_Size 32k +[OUTPUT] + Name cloudwatch_logs + Match * + region us-west-2 + log_group_name /aws/eks/eks-workshop/aws-fluentbit-logs-20250415195811907400000002 + log_stream_prefix fluentbit- +... +``` + +Use the `kubectl logs` command to check the Fluent Bit Pod logs, where you will observe new CloudWatch Log groups and streams are created for the services. + +```bash hook=pods-log +$ kubectl logs daemonset.apps/aws-for-fluent-bit -n amazon-cloudwatch + +Found 3 pods, using pod/aws-for-fluent-bit-4mnbw +AWS for Fluent Bit Container Image Version 2.28.4 +Fluent Bit v1.9.9 +* Copyright (C) 2015-2022 The Fluent Bit Authors +* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd +* https://fluentbit.io + +[2025/04/14 16:15:40] [ info] [fluent bit] version=1.9.9, commit=5fcfe330e5, pid=1 +[2025/04/14 16:15:40] [ info] [storage] version=1.3.0, type=memory-only, sync=normal, checksum=disabled, max_chunks_up=128 +[2025/04/14 16:15:40] [ info] [cmetrics] version=0.3.7 +... +[2025/04/14 16:15:40] [ info] [filter:kubernetes:kubernetes.0] connectivity OK +[2025/04/14 16:15:40] [ info] [sp] stream processor started +[2025/04/14 16:15:40] [ info] [output:cloudwatch_logs:cloudwatch_logs.0] worker #0 started +... +[2025/04/14 16:16:01] [ info] [output:cloudwatch_logs:cloudwatch_logs.0] Creating log stream ui-8564fc5cfb-54llk.ui in log group /aws/eks/fluentbit-cloudwatch/workload/ui +[2025/04/14 16:16:01] [ info] [output:cloudwatch_logs:cloudwatch_logs.0] Log Group /aws/eks/fluentbit-cloudwatch/workload/ui not found. Will attempt to create it. +[2025/04/14 16:16:01] [ info] [output:cloudwatch_logs:cloudwatch_logs.0] Creating log group /aws/eks/fluentbit-cloudwatch/workload/ui +[2025/04/14 16:16:01] [ info] [output:cloudwatch_logs:cloudwatch_logs.0] Created log group /aws/eks/fluentbit-cloudwatch/workload/ui +[2025/04/14 16:16:01] [ info] [output:cloudwatch_logs:cloudwatch_logs.0] Creating log stream ui-8564fc5cfb-54llk.ui in log group /aws/eks/fluentbit-cloudwatch/workload/ui +[2025/04/14 16:16:01] [ info] [output:cloudwatch_logs:cloudwatch_logs.0] Created log stream ui-8564fc5cfb-54llk.ui +``` + +In the next lab, we will verify these logs in CloudWatch. diff --git a/website/docs/fastpaths/developer/pod-logging/index.md b/website/docs/fastpaths/developer/pod-logging/index.md new file mode 100644 index 0000000000..8edd5b780f --- /dev/null +++ b/website/docs/fastpaths/developer/pod-logging/index.md @@ -0,0 +1,19 @@ +--- +title: "Accessing workload logs" +sidebar_position: 90 +description: "Capture workload logs from pods running on Amazon Elastic Kubernetes Service." +--- + +:::tip What's been set up for you +Your Amazon EKS Auto Mode cluster is configured with Fluent Bit log collection agent. +::: + +According to the [Twelve-Factor App manifesto](https://12factor.net/), which provides the gold standard for architecting modern applications, containerized applications should output their [logs to stdout and stderr](https://12factor.net/logs). This is also considered the best practice in Kubernetes. + +Application logs are developers' best friends when they need to debug application behavior. However, Kubernetes doesn’t provide a native solution to collect and store logs out of the box. It just configures the container runtime to save logs in JSON format on the local filesystem. Container runtime – like Docker – redirects containers' `stdout` and `stderr` streams to a logging driver. In Kubernetes, container logs are written to `/var/log/pods/*.log` on the node. These logs can be accessed using `kubectl logs myapp` command, where `myapp` is a pod or a deployment running in the cluster. But accessing logs in this manner is not scalable in production. For that, we need a cluster-wide log collector system like Fluent Bit that can tail these log files on the node and ship logs to a log retention and searching system like CloudWatch. These log collector systems usually run as DaemonSets on worker nodes. + +In this lab, we'll show how a log agent, Fluent Bit, can be set up to collect application logs from nodes in EKS and send them to CloudWatch Logs. + +:::info +If you are using the CDK Observability Accelerator then check out the [AWS for Fluent Bit Addon](https://aws-quickstart.github.io/cdk-eks-blueprints/addons/aws-for-fluent-bit/). AWS for FluentBit addon can be configured to forward logs to multiple AWS destinations including CloudWatch, Amazon Kinesis, and AWS OpenSearch. +::: diff --git a/website/docs/fastpaths/developer/pod-logging/tests/hook-desc-cm.sh b/website/docs/fastpaths/developer/pod-logging/tests/hook-desc-cm.sh new file mode 100644 index 0000000000..1716b1d806 --- /dev/null +++ b/website/docs/fastpaths/developer/pod-logging/tests/hook-desc-cm.sh @@ -0,0 +1,17 @@ +set -e + +before() { + echo "noop" +} + +after() { + sleep 10 + echo "get fluent-bit ds" + if [[ $TEST_OUTPUT != *"app.kubernetes.io/name=aws-for-fluent-bit"* ]]; then + echo "Failed to match expected output" + echo $TEST_OUTPUT + exit 1 + fi +} + +"$@" \ No newline at end of file diff --git a/website/docs/fastpaths/developer/pod-logging/tests/hook-get-all.sh b/website/docs/fastpaths/developer/pod-logging/tests/hook-get-all.sh new file mode 100644 index 0000000000..3b4f40915e --- /dev/null +++ b/website/docs/fastpaths/developer/pod-logging/tests/hook-get-all.sh @@ -0,0 +1,17 @@ +set -e + +before() { + echo "noop" +} + +after() { + sleep 10 + echo "get fluent-bit ds" + if [[ $TEST_OUTPUT != *"daemonset.apps/aws-for-fluent-bit"* ]]; then + echo "Failed to match expected output" + echo $TEST_OUTPUT + exit 1 + fi +} + +"$@" \ No newline at end of file diff --git a/website/docs/fastpaths/developer/pod-logging/tests/hook-pods-log.sh b/website/docs/fastpaths/developer/pod-logging/tests/hook-pods-log.sh new file mode 100644 index 0000000000..30c42ce9ad --- /dev/null +++ b/website/docs/fastpaths/developer/pod-logging/tests/hook-pods-log.sh @@ -0,0 +1,26 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + # Check all fluent-bit pods for "Created log stream" messages + # Some pods may not have created streams yet depending on which node they're on + for i in $(seq 1 6); do + for pod in $(kubectl get pods -n amazon-cloudwatch -l app.kubernetes.io/name=aws-for-fluent-bit -o jsonpath='{.items[*].metadata.name}'); do + LOG_OUTPUT=$(kubectl logs -n amazon-cloudwatch "$pod" 2>/dev/null || true) + if [[ "$LOG_OUTPUT" == *"Created log stream"* ]]; then + echo "Found 'Created log stream' in pod $pod" + return 0 + fi + done + echo "Attempt $i: 'Created log stream' not found in any pod, waiting..." + sleep 10 + done + + echo "Failed to find 'Created log stream' in any fluent-bit pod" + exit 1 +} + +"$@" diff --git a/website/docs/fastpaths/developer/pod-logging/tests/hook-suite.sh b/website/docs/fastpaths/developer/pod-logging/tests/hook-suite.sh new file mode 100644 index 0000000000..d437bae9e5 --- /dev/null +++ b/website/docs/fastpaths/developer/pod-logging/tests/hook-suite.sh @@ -0,0 +1,11 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + echo "Pod logging lab complete" +} + +"$@" diff --git a/website/docs/fastpaths/developer/tests/hook-suite.sh b/website/docs/fastpaths/developer/tests/hook-suite.sh new file mode 100644 index 0000000000..1da2c6c38a --- /dev/null +++ b/website/docs/fastpaths/developer/tests/hook-suite.sh @@ -0,0 +1,11 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + echo "Developer essentials complete" +} + +"$@" diff --git a/website/docs/fastpaths/explore/index.md b/website/docs/fastpaths/explore/index.md new file mode 100644 index 0000000000..539588d5bc --- /dev/null +++ b/website/docs/fastpaths/explore/index.md @@ -0,0 +1,13 @@ +--- +title: "Explore" +sidebar_position: 80 +pagination_prev: null +--- + +Congratulations, you're done with the learning paths! Don't forget to leave us your feedback through the application! + +If you have some time left, why not explore additional content in our [modular workshop modules](/docs/introduction)? These cover deeper topics like networking, security, observability, and more. Don't forget to run the `prepare-environment` command at the start of each module. + +import HomepageModuleLink from "@site/src/components/HomepageModuleLink"; + + diff --git a/website/docs/fastpaths/getting-started/about.md b/website/docs/fastpaths/getting-started/about.md new file mode 100644 index 0000000000..7c20d9e61a --- /dev/null +++ b/website/docs/fastpaths/getting-started/about.md @@ -0,0 +1,58 @@ +--- +title: Application architecture +sidebar_position: 10 +--- + +Most of the labs in this workshop use a common sample application to provide actual container components that we can work on during the exercises. The sample application models a simple web store application, where customers can browse a catalog, add items to their cart and complete an order through the checkout process. + + + + + +The application has several components and dependencies: + + + +| Component | Description | +| --------- | ----------- | +| UI | Provides the front end user interface and aggregates API calls to the various other services. | +| Catalog | API for product listings and details | +| Cart | API for customer shopping carts | +| Checkout | API to orchestrate the checkout process | +| Orders | API to receive and process customer orders | + +Initially, we'll deploy the application in a manner that is self-contained in the Amazon EKS cluster, without using any AWS services like load balancers or a managed database. Over the course of the labs we'll leverage different features of EKS to take advantage of broader AWS services and features for our retail store. + +You can find the full source code for the sample application on [GitHub](https://github.com/aws-containers/retail-store-sample-app). + +## Container images + +Each component is packaged as a container image and published to Amazon ECR Public: + +| Component | ECR Public repository | Dockerfile | +| --------- | --------------------- | ---------- | +| UI | [Repository](https://gallery.ecr.aws/aws-containers/retail-store-sample-ui) | [Dockerfile](https://github.com/aws-containers/retail-store-sample-app/blob/v1.2.1/src/ui/Dockerfile) | +| Catalog | [Repository](https://gallery.ecr.aws/aws-containers/retail-store-sample-catalog) | [Dockerfile](https://github.com/aws-containers/retail-store-sample-app/blob/v1.2.1/src/catalog/Dockerfile) | +| Cart | [Repository](https://gallery.ecr.aws/aws-containers/retail-store-sample-cart) | [Dockerfile](https://github.com/aws-containers/retail-store-sample-app/blob/v1.2.1/src/cart/Dockerfile) | +| Checkout | [Repository](https://gallery.ecr.aws/aws-containers/retail-store-sample-checkout) | [Dockerfile](https://github.com/aws-containers/retail-store-sample-app/blob/v1.2.1/src/checkout/Dockerfile) | +| Orders | [Repository](https://gallery.ecr.aws/aws-containers/retail-store-sample-orders) | [Dockerfile](https://github.com/aws-containers/retail-store-sample-app/blob/v1.2.1/src/orders/Dockerfile) | + +## Kubernetes architecture + +Let's explore how the **catalog** component maps to Kubernetes resources: + + + +There are a number of things to consider in this diagram: + +- The application that provides the catalog API runs as a [Pod](https://kubernetes.io/docs/concepts/workloads/pods/), which is the smallest deployable unit in Kubernetes. Application Pods will run the container images we outlined in the previous section. +- The Pods that run for the catalog component are created by a [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) which may manage one or more "replicas" of the catalog Pod, allowing it to scale horizontally. +- A [Service](https://kubernetes.io/docs/concepts/services-networking/service/) is an abstract way to expose an application running as a set of Pods, and this allows our catalog API to be called by other components inside the Kubernetes cluster. Each Service is given its own DNS entry. +- We're starting this workshop with a MySQL database that runs inside our Kubernetes cluster as a [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/), which is designed to manage stateful workloads. +- All of these Kubernetes constructs are grouped in their own dedicated catalog Namespace. Each of the application components has its own Namespace. + +Each of the components in the microservices architecture is conceptually similar to the catalog, using Deployments to manage application workload Pods and Services to route traffic to those Pods. If we expand out our view of the architecture we can consider how traffic is routed throughout the broader system: + + + +The **ui** component receives HTTP requests from, for example, a user's browser. It then makes HTTP requests to other API components in the architecture to fulfill that request and returns a response to the user. Each of the downstream components may have their own data stores or other infrastructure. The Namespaces are a logical grouping of the resources for each microservice and also act as a soft isolation boundary, which can be used to effectively implement controls using Kubernetes RBAC and Network Policies. diff --git a/website/docs/fastpaths/getting-started/finish.md b/website/docs/fastpaths/getting-started/finish.md new file mode 100644 index 0000000000..dea7f30437 --- /dev/null +++ b/website/docs/fastpaths/getting-started/finish.md @@ -0,0 +1,112 @@ +--- +title: Other components +sidebar_position: 50 +pagination_next: null +--- + +In this lab exercise, we'll deploy the rest of the sample application efficiently using the power of Kustomize. The following kustomization file shows how you can reference other kustomizations and deploy multiple components together: + +```file +manifests/base-application/kustomization.yaml +``` + +:::tip +Notice that the catalog API is in this kustomization, didn't we already deploy it? + +Because Kubernetes uses a declarative mechanism we can apply the manifests for the catalog API again and expect that because all of the resources are already created Kubernetes will take no action. +::: + +Apply this kustomization to our cluster to deploy the rest of the components: + +```bash wait=10 +$ kubectl apply -k ~/environment/eks-workshop/base-application +``` + +:::info +As you deploy additional workloads, EKS Auto Mode will automatically provision additional compute instances as needed to accommodate the new Pods. +::: + +Watch as EKS Auto Mode provisions a node for your workload. You'll see EKS Auto Mode provision a second node in the general-purpose node pool for our applications. It will also consolidate the system node as there is capacity to move the pods around. + +```bash timeout=180 test=false +$ kubectl get nodes --watch +... +NAME STATUS ROLES AGE VERSION +i-082b0e8be0994671a NotReady 1s v1.33.4-eks-e386d34 +... +i-082b0e8be0994671a Ready 2s v1.33.4-eks-e386d34 +``` + +Depending on when you run the previous command, you may see a node in either `NotReady` or `Ready` status. However, you should see the new node with the lowest age in any case. Press `Ctrl+C` to stop watching once you see the node appear. The Pods will now be running: + +Kubernetes uses labels for many purposes, for example the nodes have a label that indicates their NodePool, you can inspect them via this command: +```bash +$ kubectl get nodes -o json | jq -c '.items[] | {name: .metadata.name, nodepool: .metadata.labels."karpenter.sh/nodepool"}' +{"name":"i-082b0e8be0994671a","nodepool":"general-purpose"} +{"name":"i-0af75b7f0f828f36c","nodepool":"general-purpose"} +``` + + +After this is complete, we can use `kubectl wait` to make sure all the components have started before we proceed: + +```bash timeout=200 +$ kubectl wait --for=condition=Ready --timeout=180s pods \ + -l app.kubernetes.io/created-by=eks-workshop -A +``` + +We'll now have a Namespace for each of our application components: + +```bash +$ kubectl get namespaces -l app.kubernetes.io/created-by=eks-workshop +NAME STATUS AGE +carts Active 62s +catalog Active 7m17s +checkout Active 62s +orders Active 62s +other Active 62s +ui Active 62s +``` + +We can also see all of resources created for the components: + +```bash +$ kubectl get all -l app.kubernetes.io/created-by=eks-workshop -A +NAMESPACE NAME READY STATUS RESTARTS AGE +carts pod/carts-68d496fff8-h2w84 1/1 Running 1 (75s ago) 89s +carts pod/carts-dynamodb-995f7768c-s6wv2 1/1 Running 0 89s +catalog pod/catalog-5fdcc8c65-rrcbh 1/1 Running 3 (68s ago) 89s +catalog pod/catalog-mysql-0 1/1 Running 0 88s +checkout pod/checkout-5b885fb57c-8bkf2 1/1 Running 0 89s +checkout pod/checkout-redis-69cb79ff4d-vxjlh 1/1 Running 0 89s +orders pod/orders-74f89d6dbd-pw58j 1/1 Running 0 88s +orders pod/orders-postgresql-0 1/1 Running 0 88s +ui pod/ui-5989474687-tqps9 1/1 Running 0 88s + +NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +carts service/carts ClusterIP 172.20.64.186 80/TCP 89s +carts service/carts-dynamodb ClusterIP 172.20.187.59 8000/TCP 89s +catalog service/catalog ClusterIP 172.20.242.75 80/TCP 89s +catalog service/catalog-mysql ClusterIP 172.20.4.209 3306/TCP 89s +... +``` + +The sample application is now deployed and ready to provide a foundation for us to use in the rest of the labs in this workshop! + +## Next Steps + +Now that we have deployed our sample application, pick one of the two options to define your learning journey. + + diff --git a/website/docs/fastpaths/getting-started/first.md b/website/docs/fastpaths/getting-started/first.md new file mode 100644 index 0000000000..d744e4bf4b --- /dev/null +++ b/website/docs/fastpaths/getting-started/first.md @@ -0,0 +1,146 @@ +--- +title: Deploying our first component +sidebar_position: 40 +--- + +The sample application is composed of a set of Kubernetes manifests organized in a way that can be easily applied with Kustomize. Kustomize is an open-source tool also provided as a native feature of the `kubectl` CLI. This workshop uses Kustomize to apply changes to Kubernetes manifests, making it easier to understand changes to manifest files without needing to manually edit YAML. As we work through the various modules of this workshop, we'll incrementally apply overlays and patches with Kustomize. + +The easiest way to browse the YAML manifests for the sample application and the modules in this workshop is using the file browser in the IDE: + +![IDE files](/img/fastpaths/getting-started/ide-initial.webp) + +Expanding the `eks-workshop` and then `base-application` items will allow you to browse the manifests that make up the initial state of the sample application: + +![IDE files base](/img/fastpaths/getting-started/ide-base.webp) + +The structure consists of a directory for each application component that was outlined in the **Sample application** section. + +The `modules` directory contains sets of manifests that we will apply to the cluster throughout the subsequent lab exercises: + +![IDE files modules](/img/fastpaths/getting-started/ide-modules.webp) + +Before we do anything, let's inspect the current Namespaces in our EKS cluster: + +```bash +$ kubectl get namespaces +NAME STATUS AGE +default Active 30h +kube-node-lease Active 30h +kube-public Active 30h +kube-system Active 30h +``` + +All of the entries listed are Namespaces for system components. We'll use [Kubernetes labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) to filter the Namespaces down to only those we've created: + +```bash +$ kubectl get namespaces -l app.kubernetes.io/created-by=eks-workshop +No resources found +``` + +The first thing we'll do is deploy the catalog component by itself. The manifests for this component can be found in `~/environment/eks-workshop/base-application/catalog`. + +```bash +$ ls ~/environment/eks-workshop/base-application/catalog +configMap.yaml +deployment.yaml +kustomization.yaml +namespace.yaml +secrets.yaml +service-mysql.yaml +service.yaml +serviceAccount.yaml +statefulset-mysql.yaml +``` + +These manifests include the Deployment for the catalog API which expresses the desired state of the catalog API component: + +::yaml{file="manifests/base-application/catalog/deployment.yaml" paths="spec.replicas,spec.template.metadata.labels,spec.template.spec.containers.0.image,spec.template.spec.containers.0.ports,spec.template.spec.containers.0.livenessProbe,spec.template.spec.containers.0.resources"} + +1. Run a single replica +2. Apply labels to the Pods so other resources can refer to them +3. Use the `public.ecr.aws/aws-containers/retail-store-sample-catalog` container image +4. Expose the container on port 8080 named `http` +5. Run [probes/healthchecks](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) against the `/health` path +6. [Requests](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) a specific amount of CPU and memory so the Kubernetes scheduler can place it on a node with enough available resources + +The manifests also include the Service used by other components to access the catalog API: + +::yaml{file="manifests/base-application/catalog/service.yaml" paths="spec.ports,spec.selector"} + +1. Exposes itself on port 80 and targets the `http` port exposed by the Deployment, which translates to port 8080 +2. Selects catalog Pods using labels that match what we expressed in the Deployment above + +Let's create the catalog component: + +```bash +$ kubectl apply -k ~/environment/eks-workshop/base-application/catalog +namespace/catalog created +serviceaccount/catalog created +configmap/catalog created +secret/catalog-db created +service/catalog created +service/catalog-mysql created +deployment.apps/catalog created +statefulset.apps/catalog-mysql created +``` + +:::info EKS Auto Mode Compute Provisioning +When you deploy workloads to Amazon EKS Auto Mode, the cluster automatically provisions EC2 instances to run your Pods. Let's observe this process in real-time. +::: + + + +```bash +$ kubectl get pod -n catalog +NAME READY STATUS RESTARTS AGE +catalog-5fdcc8c65-jkg9f 1/1 Running 2 (87s ago) 2m6s +catalog-mysql-0 1/1 Running 0 2m5s +``` + + + +If the pods aren't ready yet, we can use [kubectl wait](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#wait) for them to become ready. + +```bash timeout=200 +$ kubectl wait --for=condition=Ready pods --all -n catalog --timeout=180s +``` + +Now that the Pods are running, we can [check their logs](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#logs), for example the catalog API: + +:::tip +You can ["follow" the kubectl logs output](https://kubernetes.io/docs/reference/kubectl/cheatsheet/) by using the '-f' option with the command. (Use CTRL-C to stop following the output) +::: + +```bash +$ kubectl logs -n catalog deployment/catalog +``` + +Kubernetes also allows us to easily scale the number of catalog Pods horizontally: + +```bash +$ kubectl scale -n catalog --replicas 3 deployment/catalog +deployment.apps/catalog scaled +$ kubectl wait --for=condition=Ready pods --all -n catalog --timeout=180s +``` + +:::info Auto Mode Automatic Scaling +EKS Auto Mode automatically scales compute capacity to match your workload demands. If you scale to more replicas than the current node can handle, Auto Mode will provision additional nodes automatically. The cluster continuously optimizes node placement and capacity based on your Pod resource requirements. +::: + +The manifests we applied also create a Service for each of our application and MySQL Pods that can be used by other components in the cluster to connect: + +```bash +$ kubectl get svc -n catalog +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +catalog ClusterIP 172.20.83.84 80/TCP 2m48s +catalog-mysql ClusterIP 172.20.181.252 3306/TCP 2m48s +``` + +These Services are internal to the cluster, so we cannot access them from the Internet or even the VPC. However, we can use [exec](https://kubernetes.io/docs/tasks/debug/debug-application/get-shell-running-container/) to access an existing Pod in the EKS cluster to check the catalog API is working: + +```bash timeout=180 +$ kubectl -n catalog exec -i \ + deployment/catalog -- curl catalog.catalog.svc/catalog/products | jq . +``` + +You should receive back a JSON payload with product information. Congratulations, you've just deployed your first microservice to Kubernetes with EKS! diff --git a/website/docs/fastpaths/getting-started/index.md b/website/docs/fastpaths/getting-started/index.md new file mode 100644 index 0000000000..493c59c64f --- /dev/null +++ b/website/docs/fastpaths/getting-started/index.md @@ -0,0 +1,20 @@ +--- +title: Getting started +sidebar_position: 40 +description: "Deploy sample retail application in EKS." +sidebar_custom_props: { "module": true } +--- + +:::tip Before you start +This fast path uses a dedicated Amazon EKS Auto Mode cluster. Amazon EKS Auto Mode extends AWS management of Kubernetes clusters beyond the cluster itself, managing infrastructure that enables smooth operation of your workloads including compute autoscaling, networking, load balancing, DNS, and block storage. + +Prepare your environment for this lab: + +```bash +$ prepare-environment fastpaths/getting-started +``` +::: + +Welcome to the first hands-on lab in the EKS workshop. The goal of this exercise is to familiarize ourselves with the sample application we'll use for many of the coming lab exercises and in doing so touch on some basic concepts related to deploying workloads to EKS. We'll explore the architecture of the application and deploy out the components to our EKS cluster. + +Let's deploy your first workload to the EKS cluster in your lab environment and explore! diff --git a/website/docs/fastpaths/index.md b/website/docs/fastpaths/index.md new file mode 100644 index 0000000000..388cbc6f14 --- /dev/null +++ b/website/docs/fastpaths/index.md @@ -0,0 +1,22 @@ +--- +title: "Introduction" +sidebar_position: 10 +--- + +# Learn with Amazon EKS Auto Mode + +Streamlined, role-based learning experiences that get you hands-on with Amazon EKS quickly. Unlike the comprehensive workshop that covers all EKS features, Auto Mode Paths focus on the most essential capabilities for specific roles like developers and operators. + +## Choose Your Journey + +![Fast Path Options](/img/fastpaths/fast-path-options.png) + +Follow these steps to get started: + +1. **[Setup](/docs/fastpaths/setup)** your environment — whether you're at an AWS event or running this in your own account, follow the setup guide to get your IDE and cluster ready. +2. Learn how to navigate the instructions in the lab using the [Navigating the Labs](/docs/fastpaths/navigating-labs) module. +3. Based on your role and interest, either choose the EKS developer or the EKS operator path for further learning. + +Powered by Amazon EKS Auto Mode, these paths minimize infrastructure setup and management, letting you focus on learning core EKS concepts and deploying workloads faster. Perfect for workshops, events, or self-paced learning when you want immediate hands-on experience. + +Let's get your environment set up! \ No newline at end of file diff --git a/website/docs/fastpaths/navigating-labs.md b/website/docs/fastpaths/navigating-labs.md new file mode 100644 index 0000000000..18dc3dc168 --- /dev/null +++ b/website/docs/fastpaths/navigating-labs.md @@ -0,0 +1,102 @@ +--- +title: Navigating the Labs +sidebar_position: 30 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Let’s review how to navigate this website and the content provided. + +## Structure + +The content of this workshop is made up of: + +1. Individual lab exercises +2. Supporting content that explains concepts related to the labs + +The lab exercises are designed in a way that you can run any modules as a self-contained exercise. Lab exercises will be displayed in the sidebar to the left and are designated by the `LAB` icon. + +## Opening the IDE + +If you're **at an AWS event**, open the IDE from the *Event Outputs* section at the bottom of the Workshop Studio start page. + +Event Outputs copy/paste + +If you're running **in your own account**, find the `IdeUrl` in your CloudFormation stack's Outputs tab — see the [setup guide](/docs/fastpaths/setup/your-account) for details. + +## Starting a Lab + +:::caution +Each lab has a "BEFORE YOU START" section with a `prepare-environment` command you need to run first. Always start from that page — jumping into the middle of a lab will cause unpredictable behavior. +::: + +## Tips + +### Copy/Paste Permission +Depending on your browser, you may need to copy/paste content differently into the Code Server terminal. + + + + The first time you try to paste content in the terminal, you will see a browser pop-up that looks like this: + + Chrome copy/paste + + Click the **Allow** button to enable this functionality. After this, subsequent copy/paste will be straightforward. For this workshop, we recommend using Google Chrome if possible. + + + Every time you try to paste content in the terminal, you will see a small button as shown in the following screenshot adjacent to your mouse pointer. You will need to click on it to actually paste the copied content. + + Firefox/Safari copy/paste + + Additionally, you may also see the following pop-up box on the bottom-right corner of your editor window, which you may close and ignore. + + Firefox/Safari copy/paste + + + +### Terminal commands + +Most of the interaction you will do in this workshop will be done with terminal commands, which you can either manually type or copy/paste to the IDE terminal. You will see terminal commands displayed like this: + +```bash test=false +$ echo "This is an example command" +``` + +Hover your mouse over `echo "This is an example command"` and click to copy that command to your clipboard. + +You will also come across commands with sample output like this: + +```bash test=false +$ date +Fri Aug 30 12:25:58 MDT 2024 +``` + +Using the 'click to copy' function will only copy the command and ignore the sample output. + +Another pattern used in the content is presenting several commands in a single terminal: + +```bash test=false +$ echo "This is an example command" +This is an example command +$ date +Fri Aug 30 12:26:58 MDT 2024 +``` + +In this case you can either copy each command individually or copy all of the commands using the clipboard icon in the top right corner of the terminal window. Give it a shot! + +### Using Kustomize + +[Kustomize](https://kustomize.io/) allows you to manage Kubernetes manifest files using declarative "kustomization" files. It provides the ability to express "base" manifests for your Kubernetes resources and then apply changes using composition, customization and easily making cross-cutting changes across many resources. + +In this workshop, you will see the following two types of commands involving Kustomize. + +1. `kubectl kustomize some-deployment.yaml` - This command **generates** the customized version of the yaml using Kustomize configuration. It does not deploy the resource. + +1. `kubectl apply -k some-deployment.yaml` - This command directly **applies** the customized version of the yaml using Kustomize configuration and deploys the resource. + +You can learn more about Kustomize at https://kustomize.io/. + +## Next Steps + +Now that you're familiar with the format of this workshop, head over to [Getting Started](/docs/fastpaths/getting-started). \ No newline at end of file diff --git a/website/docs/fastpaths/operator/amazon-eks-pod-identity/index.md b/website/docs/fastpaths/operator/amazon-eks-pod-identity/index.md new file mode 100644 index 0000000000..54fda98ca1 --- /dev/null +++ b/website/docs/fastpaths/operator/amazon-eks-pod-identity/index.md @@ -0,0 +1,17 @@ +--- +title: "Accessing AWS APIs securely from workloads" +sidebar_position: 60 +description: "Manage AWS credentials for your applications running on Amazon Elastic Kubernetes Service with EKS Pod Identity." +--- + +:::tip What's been set up for you +Your Amazon EKS Auto Mode cluster includes: + +- An Amazon DynamoDB table for the carts service +- An IAM role configured for the carts workload to access DynamoDB + +::: + +Applications in a Pod's containers can use a supported AWS SDK or the AWS CLI to make API requests to AWS services using AWS Identity and Access Management (IAM) permissions. For example, applications may need to upload files to an S3 bucket or query a DynamoDB table, and in order to do so, they must sign their AWS API requests with AWS credentials. [EKS Pod Identities](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html) provide the ability to manage credentials for your applications, similar to the way that Amazon EC2 Instance Profiles provide credentials to instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance's role, you can associate an IAM role with a Kubernetes Service Account and configure your Pods to use it. Check out EKS documentation [here](https://docs.aws.amazon.com/eks/latest/userguide/pod-id-minimum-sdk.html) for the exact list of SDK versions supported. + +In this module, we'll reconfigure one of the sample application components to leverage the AWS API and provide it with the appropriate privileges. diff --git a/website/docs/fastpaths/operator/amazon-eks-pod-identity/introduction.md b/website/docs/fastpaths/operator/amazon-eks-pod-identity/introduction.md new file mode 100644 index 0000000000..28f9a2eead --- /dev/null +++ b/website/docs/fastpaths/operator/amazon-eks-pod-identity/introduction.md @@ -0,0 +1,25 @@ +--- +title: "Introduction" +sidebar_position: 31 +--- + +The `carts` component of our architecture uses Amazon DynamoDB as its storage backend, which is a common use-case you'll find for non-relational databases integration with Amazon EKS. Currently, the carts API is deployed with a [lightweight version of Amazon DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBLocal.html) running as a container in the EKS cluster. + +You can see this by running the following command: + +```bash wait=30 +$ kubectl -n carts get pod +NAME READY STATUS RESTARTS AGE +carts-5d7fc9d8f-xm4hs 1/1 Running 0 14m +carts-dynamodb-698674dcc6-hw2bg 1/1 Running 0 14m +``` + +In the output above, the Pod `carts-dynamodb-698674dcc6-hw2bg` is our lightweight DynamoDB service. We can verify our `carts` application is using this by inspecting its environment: + +```bash timeout=180 +$ kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=service -n carts --timeout=120s +$ kubectl -n carts exec deployment/carts -- env | grep RETAIL_CART_PERSISTENCE_DYNAMODB_ENDPOINT +RETAIL_CART_PERSISTENCE_DYNAMODB_ENDPOINT=http://carts-dynamodb:8000 +``` + +While this approach can be useful for testing, we want to migrate our application to use the fully managed Amazon DynamoDB service to take full advantage of the scale and reliability it offers. In the following sections, we'll reconfigure our application to use Amazon DynamoDB and implement EKS Pod Identity to provide secure access to AWS services. diff --git a/website/docs/fastpaths/operator/amazon-eks-pod-identity/tests/hook-enable-dynamo.sh b/website/docs/fastpaths/operator/amazon-eks-pod-identity/tests/hook-enable-dynamo.sh new file mode 100644 index 0000000000..448c50be19 --- /dev/null +++ b/website/docs/fastpaths/operator/amazon-eks-pod-identity/tests/hook-enable-dynamo.sh @@ -0,0 +1,18 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + sleep 10 + + if [[ $TEST_OUTPUT != *"timed out waiting"* ]]; then + echo "Failed to match expected output" + echo $TEST_OUTPUT + + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/operator/amazon-eks-pod-identity/tests/hook-enable-pod-identity.sh b/website/docs/fastpaths/operator/amazon-eks-pod-identity/tests/hook-enable-pod-identity.sh new file mode 100644 index 0000000000..1f27c488de --- /dev/null +++ b/website/docs/fastpaths/operator/amazon-eks-pod-identity/tests/hook-enable-pod-identity.sh @@ -0,0 +1,13 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + sleep 10 + + kubectl wait --for=condition=available --timeout=120s deployment/carts -n carts +} + +"$@" diff --git a/website/docs/fastpaths/operator/amazon-eks-pod-identity/tests/hook-pod-logs.sh b/website/docs/fastpaths/operator/amazon-eks-pod-identity/tests/hook-pod-logs.sh new file mode 100644 index 0000000000..6f08dc41b0 --- /dev/null +++ b/website/docs/fastpaths/operator/amazon-eks-pod-identity/tests/hook-pod-logs.sh @@ -0,0 +1,41 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + echo "=== DEBUG: Checking carts pods ===" + kubectl get pods -n carts -l app.kubernetes.io/component=service -o wide 2>&1 || true + echo "=== DEBUG: Carts configmap ===" + kubectl -n carts get cm carts -o jsonpath='{.data}' 2>&1 || true + echo "" + echo "=== DEBUG: Pod identity associations ===" + aws eks list-pod-identity-associations --cluster-name ${EKS_CLUSTER_AUTO_NAME} --namespace carts 2>&1 || true + + # Wait for the carts pod to crash and restart at least once + echo "Waiting for carts pod to crash and restart..." + + for i in $(seq 1 36); do + RESTARTS=$(kubectl get pods -n carts -l app.kubernetes.io/component=service --sort-by=.metadata.creationTimestamp -o jsonpath='{.items[-1:].status.containerStatuses[0].restartCount}' 2>/dev/null || echo "0") + if [ "$RESTARTS" -gt 0 ] 2>/dev/null; then + LATEST_POD=$(kubectl get pods -n carts -l app.kubernetes.io/component=service --sort-by=.metadata.creationTimestamp -o jsonpath='{.items[-1:].metadata.name}') + LOG_OUTPUT=$(kubectl logs -n carts -p "$LATEST_POD" 2>/dev/null || true) + if [[ "$LOG_OUTPUT" == *"Unable to load credentials"* ]]; then + echo "Found expected credential error after $i attempts (restarts=$RESTARTS)" + return 0 + fi + fi + echo "Attempt $i: restarts=$RESTARTS, waiting..." + sleep 10 + done + + echo "=== DEBUG: Final pod state ===" + kubectl get pods -n carts -l app.kubernetes.io/component=service -o wide 2>&1 || true + kubectl describe pods -n carts -l app.kubernetes.io/component=service 2>&1 | tail -30 || true + + echo "Failed to find expected credential error after 360s" + exit 1 +} + +"$@" diff --git a/website/docs/fastpaths/operator/amazon-eks-pod-identity/understanding.md b/website/docs/fastpaths/operator/amazon-eks-pod-identity/understanding.md new file mode 100644 index 0000000000..a990c0969c --- /dev/null +++ b/website/docs/fastpaths/operator/amazon-eks-pod-identity/understanding.md @@ -0,0 +1,18 @@ +--- +title: "Understanding Pod IAM" +sidebar_position: 33 +--- + +The first place to look for the issue is the logs of the `carts` service: + +```bash hook=pod-logs timeout=480 +$ LATEST_POD=$(kubectl get pods -n carts -l app.kubernetes.io/component=service --sort-by=.metadata.creationTimestamp -o jsonpath='{.items[-1:].metadata.name}') +sleep 60 +kubectl logs -n carts -p $LATEST_POD +[...] +software.amazon.awssdk.core.exception.SdkClientException: Unable to load credentials from any of the providers in the chain AwsCredentialsProviderChain(credentialsProviders=[SystemPropertyCredentialsProvider(), EnvironmentVariableCredentialsProvider(), WebIdentityTokenCredentialsProvider(), ProfileCredentialsProvider(profileName=default, profileFile=ProfileFile(sections=[])), ContainerCredentialsProvider(), InstanceProfileCredentialsProvider()]) : [SystemPropertyCredentialsProvider(): Unable to load credentials from system settings. Access key must be specified either via environment variable (AWS_ACCESS_KEY_ID) or system property (aws.accessKeyId)., EnvironmentVariableCredentialsProvider(): Unable to load credentials from system settings. Access key must be specified either via environment variable (AWS_ACCESS_KEY_ID) or system property (aws.accessKeyId)., WebIdentityTokenCredentialsProvider(): Either the environment variable AWS_WEB_IDENTITY_TOKEN_FILE or the javaproperty aws.webIdentityTokenFile must be set., ProfileCredentialsProvider(profileName=default, profileFile=ProfileFile(sections=[])): Profile file contained no credentials for profile 'default': ProfileFile(sections=[]), ContainerCredentialsProvider(): Cannot fetch credentials from container - neither AWS_CONTAINER_CREDENTIALS_FULL_URI or AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variables are set., InstanceProfileCredentialsProvider(): Failed to load credentials from IMDS.] +``` + +The application is generating an error which indicates that the Pod cannot load AWS credentials to access DynamoDB. This is happening because by default, when no IAM roles or policies are linked to our Pod via EKS Pod Identity, the application cannot obtain credentials to make AWS API calls. + +One approach would be to expand the IAM permissions of the node IAM role, but this would allow any Pod running on those instances to access our DynamoDB table. This violates the principle of least privilege and is not a security best practice. Instead, we'll use EKS Pod Identity to provide the specific permissions required by the `carts` application at the Pod level, ensuring fine-grained access control. diff --git a/website/docs/fastpaths/operator/amazon-eks-pod-identity/use-pod-identity.md b/website/docs/fastpaths/operator/amazon-eks-pod-identity/use-pod-identity.md new file mode 100644 index 0000000000..7e10e0ab9a --- /dev/null +++ b/website/docs/fastpaths/operator/amazon-eks-pod-identity/use-pod-identity.md @@ -0,0 +1,101 @@ +--- +title: "Using EKS Pod Identity" +sidebar_position: 34 +hide_table_of_contents: true +--- + +With Amazon EKS Auto Mode, the EKS Pod Identity Agent is already included and managed by AWS in the control plane. You can verify Pod Identity is available by checking for existing pod identity associations: + +```bash +$ aws eks list-pod-identity-associations --cluster-name $EKS_CLUSTER_AUTO_NAME --namespace carts +{ + "associations": [] +} +``` + +An IAM role, which provides the required permissions for the `carts` service to read and write to the DynamoDB table, was created when the Auto Mode cluster was set up. You can view the policy as shown below: + +```bash +$ aws iam get-policy-version \ + --version-id v1 --policy-arn \ + --query 'PolicyVersion.Document' \ + arn:aws:iam::${AWS_ACCOUNT_ID}:policy/${EKS_CLUSTER_AUTO_NAME}-carts-dynamo | jq . +{ + "Statement": [ + { + "Action": "dynamodb:*", + "Effect": "Allow", + "Resource": [ + "arn:aws:dynamodb:us-west-2:267912352941:table/eks-workshop-auto-carts", + "arn:aws:dynamodb:us-west-2:267912352941:table/eks-workshop-auto-carts/index/*" + ], + "Sid": "AllAPIActionsOnCart" + } + ], + "Version": "2012-10-17" +} +``` + +The role has also been configured with the appropriate trust relationship, which allows the EKS Service Principal to assume this role for Pod Identity. You can view it with the command below: + +```bash +$ aws iam get-role \ + --query 'Role.AssumeRolePolicyDocument' \ + --role-name ${EKS_CLUSTER_AUTO_NAME}-carts-dynamo | jq . +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "pods.eks.amazonaws.com" + }, + "Action": [ + "sts:AssumeRole", + "sts:TagSession" + ] + } + ] +} +``` + +Next, we will use Amazon EKS Pod Identity feature to associate an AWS IAM role with the Kubernetes Service Account that will be used by our deployment. To create the association, run the following command: + +```bash wait=30 +$ aws eks create-pod-identity-association --cluster-name ${EKS_CLUSTER_AUTO_NAME} \ + --role-arn arn:aws:iam::${AWS_ACCOUNT_ID}:role/${EKS_CLUSTER_AUTO_NAME}-carts-dynamo \ + --namespace carts --service-account carts | jq . +{ + "association": { + "clusterName": "eks-workshop-auto", + "namespace": "carts", + "serviceAccount": "carts", + "roleArn": "arn:aws:iam::267912352941:role/eks-workshop-auto-carts-dynamo", + "associationArn": "arn:aws:eks:us-west-2:267912352941:podidentityassociation/eks-workshop-auto/a-yg5uoymvtfgdg5tcj", + "associationId": "a-yg5uoymvtfgdg5tcj", + "tags": {}, + "createdAt": "2025-10-11T01:13:27.763000+00:00", + "modifiedAt": "2025-10-11T01:13:27.763000+00:00", + "disableSessionTags": false + } +} +``` + +All that's left is to verify that the `carts` Deployment is using the `carts` Service Account: + +```bash +$ kubectl -n carts describe deployment carts | grep 'Service Account' + Service Account: carts +``` + +With the Service Account verified, let's recycle the `carts` Pods: + +```bash hook=enable-pod-identity hookTimeout=430 timeout=360 +$ kubectl -n carts rollout restart deployment/carts +deployment.apps/carts restarted +$ kubectl -n carts rollout status deployment/carts --timeout=300s +Waiting for deployment "carts" rollout to finish: 1 old replicas are pending termination... +deployment "carts" successfully rolled out +``` + +Now, let's verify if the DynamoDB permission issue that we had encountered has been resolved for the carts application in the next section. diff --git a/website/docs/fastpaths/operator/amazon-eks-pod-identity/using-dynamo.md b/website/docs/fastpaths/operator/amazon-eks-pod-identity/using-dynamo.md new file mode 100644 index 0000000000..340df24579 --- /dev/null +++ b/website/docs/fastpaths/operator/amazon-eks-pod-identity/using-dynamo.md @@ -0,0 +1,77 @@ +--- +title: "Using Amazon DynamoDB" +sidebar_position: 32 +--- + +The first step in this process is to re-configure the carts service to use a DynamoDB table that has already been created for us. The application loads most of its configurations from a ConfigMap. Let's take look at it: + +```bash +$ kubectl -n carts get -o yaml cm carts | yq +apiVersion: v1 +data: + AWS_ACCESS_KEY_ID: key + AWS_SECRET_ACCESS_KEY: secret + RETAIL_CART_PERSISTENCE_DYNAMODB_CREATE_TABLE: "true" + RETAIL_CART_PERSISTENCE_DYNAMODB_ENDPOINT: http://carts-dynamodb:8000 + RETAIL_CART_PERSISTENCE_DYNAMODB_TABLE_NAME: Items + RETAIL_CART_PERSISTENCE_PROVIDER: dynamodb +kind: ConfigMap +metadata: + name: carts + namespace: carts +``` + +The following kustomization overwrites the ConfigMap removing the DynamoDB endpoint configuration. It tells the SDK to use the real DynamoDB service instead of our test Pod. We've also configured the DynamoDB table name that's already been created for us. The table name is being pulled from the environment variable `RETAIL_CART_PERSISTENCE_DYNAMODB_TABLE_NAME`. + +```kustomization +modules/security/eks-pod-identity/dynamo/kustomization.yaml +ConfigMap/carts +``` + +Let's set the DynamoDB table name and run Kustomize to use the real DynamoDB service: + +```bash +$ export CARTS_DYNAMODB_TABLENAME=${EKS_CLUSTER_AUTO_NAME}-carts && echo $CARTS_DYNAMODB_TABLENAME +eks-workshop-auto-carts +$ kubectl kustomize ~/environment/eks-workshop/modules/security/eks-pod-identity/dynamo \ + | envsubst | kubectl apply -f- +``` + +This will overwrite our ConfigMap with new values: + +```bash +$ kubectl -n carts get cm carts -o yaml | yq +apiVersion: v1 +data: + AWS_REGION: us-west-2 + RETAIL_CART_PERSISTENCE_DYNAMODB_TABLE_NAME: eks-workshop-auto-carts + RETAIL_CART_PERSISTENCE_PROVIDER: dynamodb +kind: ConfigMap +metadata: + labels: + app: carts + name: carts + namespace: carts +``` + +Now, we need to recycle all the carts pods to pick up our new ConfigMap contents: + +```bash expectError=true hook=enable-dynamo +$ kubectl rollout restart -n carts deployment/carts +deployment.apps/carts restarted +$ kubectl rollout status -n carts deployment/carts --timeout=20s +Waiting for deployment "carts" rollout to finish: 1 old replicas are pending termination... +error: timed out waiting for the condition +``` + +It looks like our change failed to deploy properly. We can confirm this by looking at the Pods: + +```bash +$ kubectl -n carts get pod +NAME READY STATUS RESTARTS AGE +carts-5d486d7cf7-8qxf9 1/1 Running 0 5m49s +carts-df76875ff-7jkhr 0/1 CrashLoopBackOff 3 (36s ago) 2m2s +carts-dynamodb-698674dcc6-hw2bg 1/1 Running 0 20m +``` + +What's gone wrong? diff --git a/website/docs/fastpaths/operator/amazon-eks-pod-identity/verifying-dynamo.md b/website/docs/fastpaths/operator/amazon-eks-pod-identity/verifying-dynamo.md new file mode 100644 index 0000000000..c694dbff8d --- /dev/null +++ b/website/docs/fastpaths/operator/amazon-eks-pod-identity/verifying-dynamo.md @@ -0,0 +1,36 @@ +--- +title: "Verifying DynamoDB access" +sidebar_position: 35 +pagination_next: fastpaths/explore/index +--- + +Now, with the `carts` Service Account associated with the authorized IAM role, the `carts` Pod has permission to access the DynamoDB table. Access the web store again and navigate to the shopping cart. + +```bash +$ LB_HOSTNAME=$(kubectl get svc ui-nlb-auto -n ui -o yaml | yq .status.loadBalancer.ingress[0].hostname) +$ echo "http://$LB_HOSTNAME" +http://k8s-ui-uinlbaut-a9797f0f61.elb.us-west-2.amazonaws.com +``` + +The `carts` Pod is able to reach the DynamoDB service and the shopping cart is now accessible! + +![Cart](/img/sample-app-screens/shopping-cart.webp) + +After the AWS IAM role is associated with the Service Account, any newly created Pods using that Service Account will be intercepted by the [EKS Pod Identity webhook](https://github.com/aws/amazon-eks-pod-identity-webhook). This webhook runs on the Amazon EKS cluster's control plane and is fully managed by AWS. Take a closer look at the new `carts` Pod to see the new environment variables: + +```bash +$ kubectl -n carts exec deployment/carts -- env | grep AWS +AWS_STS_REGIONAL_ENDPOINTS=regional +AWS_DEFAULT_REGION=us-west-2 +AWS_REGION=us-west-2 +AWS_CONTAINER_CREDENTIALS_FULL_URI=http://169.254.170.23/v1/credentials +AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE=/var/run/secrets/pods.eks.amazonaws.com/serviceaccount/eks-pod-identity-token +``` + +Notable points about these environment variables: + +- `AWS_DEFAULT_REGION` - The region is set automatically to the same as our EKS cluster +- `AWS_STS_REGIONAL_ENDPOINTS` - Regional STS endpoints are configured to avoid putting too much pressure on the global endpoint in `us-east-1` +- `AWS_CONTAINER_CREDENTIALS_FULL_URI` - This variable tells AWS SDKs how to obtain credentials using the [HTTP credential provider](https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html). This means that EKS Pod Identity does not need to inject credentials via something like an `AWS_ACCESS_KEY_ID`/`AWS_SECRET_ACCESS_KEY` pair, and instead the SDKs can have temporary credentials vended to them via the EKS Pod Identity mechanism. You can read more about how this functions in the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html). + +You have successfully configured Pod Identity in your application. diff --git a/website/docs/fastpaths/operator/index.md b/website/docs/fastpaths/operator/index.md new file mode 100644 index 0000000000..fb34583b3c --- /dev/null +++ b/website/docs/fastpaths/operator/index.md @@ -0,0 +1,30 @@ +--- +title: "Operator Essentials" +sidebar_position: 60 +sidebar_custom_props: { "module": true } +--- + +# Operator Essentials + +::required-time + +:::tip Before you start +This fast path uses a dedicated Amazon EKS Auto Mode cluster. Amazon EKS Auto Mode extends AWS management of Kubernetes clusters beyond the cluster itself, managing infrastructure that enables smooth operation of your workloads including compute autoscaling, networking, load balancing, DNS, and block storage. + +Prepare your environment for this lab: + +```bash timeout=600 +$ prepare-environment fastpaths/operator +``` +::: + +Welcome to the EKS Workshop Operator Essentials! This is a collection of labs optimized for operators to learn the features of Amazon EKS most commonly required when operating EKS clusters. + +Throughout this series of exercises, you'll learn: + +- Configuring cluster autoscaling with Karpenter +- Implementing network policies for secure Pod-to-Pod traffic +- Working with secrets in EKS +- Using AWS services like DynamoDB with EKS Pod Identity + +Let's get started! \ No newline at end of file diff --git a/website/docs/fastpaths/operator/karpenter/consolidation.md b/website/docs/fastpaths/operator/karpenter/consolidation.md new file mode 100644 index 0000000000..fd1d9dd5a1 --- /dev/null +++ b/website/docs/fastpaths/operator/karpenter/consolidation.md @@ -0,0 +1,117 @@ +--- +title: "Disruption (Consolidation)" +sidebar_position: 50 +--- + +Karpenter automatically discovers nodes that are eligible for disruption and spins up replacements when needed. This can happen for three different reasons: + +- **Expiration**: By default, Karpenter automatically expires instances after 720h (30 days), forcing a recycle allowing nodes to be kept up to date. +- **Drift**: Karpenter detects changes in configuration (such as the `NodePool` or `NodeClass`) to apply necessary changes +- **Consolidation**: A critical feature for operating compute in a cost-effective manner, Karpenter will optimize our cluster's compute on an on-going basis. For example, if workloads are running on under-utilized compute instances, it will consolidate them to fewer instances. + +Disruption is configured through the `disruption` block in a `NodePool`. You can see below the portion of the `general-purpose` NodePool configuration policy that Auto Mode has configured for you. + +```json + disruption: + budgets: + - nodes: 10% + consolidateAfter: 30s + consolidationPolicy: WhenEmptyOrUnderutilized +``` + +1. `budget` is set to a custom value so that only a specified % of nodes get disturbed at the same time to minimize any adverse impact on your workload. +2. `consolidateAfter` specifies a wait time before initiating the consolidation process. +3. The `WhenEmptyOrUnderutilized` policy enables Karpenter to replace nodes when they are either empty or underutilized. + +You can see the NodePool configuration using the following command and check out the disruption configuration. + +```bash +$ kubectl get nodepools general-purpose -o yaml | yq .spec.disruption +``` + +The `consolidationPolicy` value of `WhenEmptyOrUnderutilized` will consolidate nodes to optimize packing after `consolidateAfter` (30s here) with a budget that allow to replace 10% of the node at a time. There are other values possible, for example `consolidationPolicy` can also be set to `WhenEmpty`, which restricts disruptions only to nodes that contain no workload pods. Learn more about Disruption on the [Karpenter docs](https://karpenter.sh/docs/concepts/disruption/#consolidation). + +Scaling out infrastructure is only one side of the equation for operating compute infrastructure in a cost-effective manner. We also need to be able to optimize on an on-going basis such that, for example, workloads running on under-utilized compute instances are compacted to fewer instances. This improves the overall efficiency of how we run workloads on the compute, resulting in less overhead and lower costs. + +Let's explore how to trigger automatic consolidation when `disruption` is set to `consolidationPolicy: WhenEmptyOrUnderutilized`: + +1. Scale the `inflate` workload from 5 to 12 replicas, triggering Karpenter to provision additional capacity +2. Scale down the workload back down to 5 replicas +3. Observe Karpenter consolidating the compute + +Scale our `inflate` workload again to consume more resources: + +```bash timeout=240 +$ kubectl scale -n other deployment/inflate --replicas 12 +$ kubectl rollout status -n other deployment/inflate --timeout=180s +``` + +This changes the total memory request for this deployment to around 12Gi, which when adjusted to account for the roughly 600Mi reserved for the kubelet on each node means that this will fit on 2 instances of type `m5.large`: + +```bash +$ kubectl get nodes -L beta.kubernetes.io/instance-type -L kubernetes.io/arch -L kubernetes.io/os --sort-by=.metadata.creationTimestamp +NAME STATUS ROLES AGE VERSION INSTANCE-TYPE ARCH OS +i-07fd006840ed07309 Ready 20h v1.33.4-eks-e386d34 c6a.large amd64 linux +i-0e209b70f1d2dfae0 Ready 17h v1.33.4-eks-e386d34 c6a.large amd64 linux +i-0a78dba9f62f5e0e4 Ready 90m v1.33.4-eks-e386d34 m5a.large amd64 linux +i-076a7c45e5f8e5f11 Ready 7m12s v1.33.4-eks-e386d34 m5a.large amd64 linux +``` + +Next, scale the number of replicas back down to 5: + +```bash wait=90 +$ kubectl scale -n other deployment/inflate --replicas 5 +``` + +We can check the Karpenter events to get an idea of what actions it took in response to our scaling in the deployment. Wait about 5-10 seconds before running the following command: + +```bash hook=grep +$ kubectl events | grep -i 'disruption' + +3m39s Normal DisruptionBlocked nodeclaim/general-purpose-5c74h Node is nominated for a pending pod +3m42s Normal DisruptionLaunching nodeclaim/general-purpose-l6dpl Launching NodeClaim: Underutilized +3m42s Normal DisruptionWaitingReadiness nodeclaim/general-purpose-l6dpl Waiting on readiness to continue disruption +3m39s Normal DisruptionBlocked nodeclaim/general-purpose-l6dpl Nodeclaim does not have an associated node +18m Normal DisruptionBlocked nodeclaim/general-purpose-m6gjm Nodeclaim does not have an associated node +4m38s Normal DisruptionBlocked nodeclaim/general-purpose-m6gjm Node is nominated for a pending pod +3m20s Normal DisruptionTerminating nodeclaim/general-purpose-m6gjm Disrupting NodeClaim: Underutilized +2m29s Normal DisruptionBlocked nodeclaim/general-purpose-m6gjm Node is deleting or marked for deletion +4m38s Normal DisruptionTerminating nodeclaim/general-purpose-nhtc7 Disrupting NodeClaim: Underutilized +4m28s Normal DisruptionBlocked nodeclaim/general-purpose-nhtc7 Node is deleting or marked for deletion +4m38s Normal DisruptionBlocked node/i-076a7c45e5f8e5f11 Node is nominated for a pending pod +3m20s Normal DisruptionTerminating node/i-076a7c45e5f8e5f11 Disrupting Node: Underutilized +2m29s Normal DisruptionBlocked node/i-076a7c45e5f8e5f11 Node is deleting or marked for deletion +3m39s Normal DisruptionBlocked node/i-0a78dba9f62f5e0e4 Node is nominated for a pending pod +3m19s Normal DisruptionBlocked node/i-0e1f072dc32194cc7 Node is nominated for a pending pod +4m38s Normal DisruptionTerminating node/i-0e209b70f1d2dfae0 Disrupting Node: Underutilized +4m28s Normal DisruptionBlocked node/i-0e209b70f1d2dfae0 Node is deleting or marked for deletion +``` + +The output will show Karpenter identifying specific nodes to cordon, drain and then terminate: + +This will result in the Kubernetes scheduler placing any pods on those nodes on the remaining capacity, and now we can see less number of nodes in the cluster. + +```bash +$ kubectl get nodes -L beta.kubernetes.io/instance-type -L kubernetes.io/arch -L kubernetes.io/os --sort-by=.metadata.creationTimestamp + +NAME STATUS ROLES AGE VERSION INSTANCE-TYPE ARCH OS +i-07fd006840ed07309 Ready 21h v1.33.4-eks-e386d34 c6a.large amd64 linux +i-0a78dba9f62f5e0e4 Ready 104m v1.33.4-eks-e386d34 m5a.large amd64 linux +i-0e1f072dc32194cc7 Ready 6m4s v1.33.4-eks-e386d34 c6a.large amd64 linux +``` + +Karpenter can also further consolidate if a node can be replaced with a cheaper variant in response to workload changes. This can be demonstrated by scaling the `inflate` deployment replicas down to 1, with a total memory request of around 1Gi: + +```bash wait=60 +$ kubectl scale -n other deployment/inflate --replicas 1 +``` + +We can check the Karpenter logs and see what actions the controller took in response: + +```bash test=false +$ kubectl events | grep -i 'disruption' +``` + +The output will show Karpenter consolidating the workloads by removing underutilized nodes in the NodePool. + +This concludes the introduction to EKS Auto Mode's autoscaling capabilities. Though we used the default NodePool and NodeClass configuration that Auto Mode provides, you may also configure custom NodePool and NodeClass resources in your cluster to fit your specific needs. diff --git a/website/docs/fastpaths/operator/karpenter/index.md b/website/docs/fastpaths/operator/karpenter/index.md new file mode 100644 index 0000000000..9c9eb98b24 --- /dev/null +++ b/website/docs/fastpaths/operator/karpenter/index.md @@ -0,0 +1,27 @@ +--- +title: "Autoscaling with EKS Auto Mode" +sidebar_position: 20 +description: "Automatically manage compute for Amazon Elastic Kubernetes Service with EKS Auto Mode." +--- + +:::tip What's been set up for you +Your Amazon EKS Auto Mode cluster includes fully-managed autoscaling powered by **Karpenter**, which enables automatic compute scaling out of the box. +::: + +In this lab, we'll explore how EKS Auto Mode provides automatic compute scaling for your cluster. Auto Mode includes fully-managed [Karpenter](https://github.com/aws/karpenter) functionality as part of a comprehensive suite of managed features that minimize operational burden. The autoscaling capability is designed to provide the right compute resources to match your application's needs in seconds, not minutes, by observing the aggregate resource requests of unschedulable pods and making decisions to launch and terminate nodes to minimize scheduling latencies. + + + +EKS Auto Mode's autoscaling works by: + +- Watching for pods that the Kubernetes scheduler has marked as unschedulable +- Evaluating scheduling constraints (resource requests, node selectors, affinities, tolerations, and topology spread constraints) requested by the pods +- Provisioning nodes that meet the requirements of the pods +- Scheduling the pods to run on the new nodes +- Removing the nodes when the nodes are no longer needed + +:::info +With EKS Auto Mode, Karpenter is fully managed by AWS and runs off-cluster. Unlike self-managed Karpenter, you don't need to deploy, scale, or upgrade Karpenter pods. All operational aspects are handled by AWS, while you retain control over NodePool and NodeClass configurations. +::: + +Since Auto Mode provides fully-managed autoscaling, we can move directly to configuring NodePools to control how nodes are provisioned for your workloads. diff --git a/website/docs/fastpaths/operator/karpenter/node-provisioning.md b/website/docs/fastpaths/operator/karpenter/node-provisioning.md new file mode 100644 index 0000000000..8bffb6fd6b --- /dev/null +++ b/website/docs/fastpaths/operator/karpenter/node-provisioning.md @@ -0,0 +1,115 @@ +--- +title: "Automatic Node Provisioning" +sidebar_position: 40 +--- + +We'll start putting Karpenter to work by examining how it can dynamically provision appropriately sized EC2 instances depending on the needs of pods that cannot be scheduled at any given time. This can reduce the amount of unused compute resources in an EKS cluster. + +The NodePool inspected in the previous section expressed specific instance families that Karpenter was allowed to use. They were + +| Instance families | Generation | OS | Architecture | +| ----------------- | ---------- | ------ | ------------ | +| `c`, `m`, `r` | >4 | Linux | amd64 | + +This broad configuration provide a wide range of choices to Karpenter for selecting a right-sized instance based on the requirements. + +Let's create some Pods and see how Auto Mode's Karpenter-based autoscaling adapts. Currently there should be a couple of nodes available that are managed by Karpenter: + +```bash +$ kubectl get node -l karpenter.sh/nodepool=general-purpose + +NAME STATUS ROLES AGE VERSION +i-07fd006840ed07309 Ready 17h v1.33.4-eks-e386d34 +i-0e209b70f1d2dfae0 Ready 14h v1.33.4-eks-e386d34 +``` + +We'll use the following Deployment to trigger Karpenter to scale out: + +::yaml{file="manifests/modules/autoscaling/compute/karpenter/automode/scale/deployment.yaml" paths="spec.replicas,spec.template.spec.containers.0.image,spec.template.spec.containers.0.resources"} + +1. Initially specifies 0 replicas to run, we'll scale it up later +3. Uses a simple `pause` container image +4. Requests `1Gi` of memory for each pod + +:::info What's a pause container? +You'll notice in this example we're using the image: + +`public.ecr.aws/eks-distro/kubernetes/pause` + +This is a small container that will consume no real resources and starts quickly, which makes it great for demonstrating scaling scenarios. We'll be using this for many of the examples in this particular lab. +::: + +Apply this deployment: + +```bash +$ kubectl apply -k ~/environment/eks-workshop/modules/autoscaling/compute/karpenter/automode/scale +deployment.apps/inflate created +``` + +Now let's deliberately scale this deployment to demonstrate that Karpenter is making optimized decisions. Since we've requested 1Gi of memory, if we scale the deployment to 5 replicas that will request a total of 5Gi of memory. + +Before we proceed, what instance from the table above do you think Karpenter will end up provisioning? Which instance type would you want it to? + +Scale the deployment: + +```bash +$ kubectl scale -n other deployment/inflate --replicas 5 +``` + +Because this operation is creating one or more new EC2 instances it will take a while, you can use `kubectl` to wait until its done with this command: + +```bash timeout=200 +$ kubectl rollout status -n other deployment/inflate --timeout=180s +``` + +Let's now check the action taken by Karpenter listing those events. Wait for 5-10 seconds to see the events getting listed. + +```bash wait=10 +$ kubectl events | grep -i 'NodeClaim' +``` + +You should see the output showing a new node is launched. + +``` +2m55s Normal Launched nodeclaim/general-purpose-5c74h Status condition transitioned, Type: Launched, Status: Unknown -> True, Reason: Launched +2m52s Normal DisruptionBlocked nodeclaim/general-purpose-5c74h Nodeclaim does not have an associated node +2m39s Normal Registered nodeclaim/general-purpose-5c74h Status condition transitioned, Type: Registered, Status: Unknown -> True, Reason: Registered +2m36s Normal Initialized nodeclaim/general-purpose-5c74h Status condition transitioned, Type: Initialized, Status: Unknown -> True, Reason: Initialized +2m36s Normal Ready nodeclaim/general-purpose-5c74h Status condition transitioned, Type: Ready, Status: Unknown -> True, Reason: Ready +12m Normal Unconsolidatable nodeclaim/general-purpose-nhtc7 Can't replace with a cheaper node +``` + +Karpenter will find the most suitable instance type that is big enough to accommodate all to-be-scheduled Pods and lower in cost at the same time. + +:::info +There are certain cases where a different instance type might be selected other than the lowest price, for example if that cheapest instance type has no remaining capacity available in the region you're working in. +::: + +Let's again list all the available nodes in the cluster. + +```bash +$ kubectl get nodes \ + -L beta.kubernetes.io/instance-type \ + -L kubernetes.io/arch \ + -L kubernetes.io/os \ + --sort-by=.metadata.creationTimestamp + +NAME STATUS ROLES AGE VERSION INSTANCE-TYPE ARCH OS +i-07fd006840ed07309 Ready 20h v1.33.4-eks-e386d34 c6a.large amd64 linux +i-0e209b70f1d2dfae0 Ready 17h v1.33.4-eks-e386d34 c6a.large amd64 linux +i-0a78dba9f62f5e0e4 Ready 60m v1.33.4-eks-e386d34 m5a.large amd64 linux +``` + +You can see that the last node added to the pool is as per the `NodePool` configuration table shown earlier in this page. + +Karpenter keep track of its node through Kubernetes native object called a NodeClaim. It's an object, so you can check the configuration as well: + +```bash +$ kubectl get nodeclaims.karpenter.sh -o wide +NAME TYPE CAPACITY ZONE NODE READY AGE IMAGEID ID NODEPOOL NODECLASS DRIFTED +general-purpose-dh59z m5a.large on-demand us-west-2b i-0d3ed392f96f22793 True 5m58s ami-00e71b7a43dd16dec aws:///us-west-2b/i-0d3ed392f96f22793 general-purpose default +general-purpose-mw4sf c6a.large on-demand us-west-2a i-0078b61779fc13053 True 30h ami-00e71b7a43dd16dec aws:///us-west-2a/i-0078b61779fc13053 general-purpose default +general-purpose-wp7wg c6a.large on-demand us-west-2c i-0c1ceaeeb6ed1bfb6 True 8m5s ami-00e71b7a43dd16dec aws:///us-west-2c/i-0c1ceaeeb6ed1bfb6 general-purpose default +``` + +This simple example illustrates how EKS Auto Mode's Karpenter-based autoscaling can dynamically select the right instance type based on the resource requirements of the workloads that require compute capacity. This differs fundamentally from a model oriented around node pools, such as Cluster Autoscaler, where the instance types within a single node group must have consistent CPU and memory characteristics. diff --git a/website/docs/fastpaths/operator/karpenter/setup-provisioner.md b/website/docs/fastpaths/operator/karpenter/setup-provisioner.md new file mode 100644 index 0000000000..300e2f2fea --- /dev/null +++ b/website/docs/fastpaths/operator/karpenter/setup-provisioner.md @@ -0,0 +1,113 @@ +--- +title: "Inspect Karpenter configuration" +sidebar_position: 30 +--- + +EKS Auto Mode provides fully-managed Karpenter as an out-of-the-box functionality. Karpenter configuration comes in the form of a `NodePool` CRD (Custom Resource Definition). A single Karpenter `NodePool` is capable of handling many different pod shapes. Karpenter makes scheduling and provisioning decisions based on pod attributes such as labels and affinity. A cluster may have more than one `NodePool`, but for the moment we'll use the default node pools that Auto Mode configures for you. + +One of the main objectives of Karpenter is to simplify the management of capacity. If you're familiar with other auto scaling solutions, you may have noticed that Karpenter takes a different approach, referred to as **group-less auto scaling**. Other solutions have traditionally used the concept of a **node group** as the element of control that defines the characteristics of the capacity provided (i.e: On-Demand, EC2 Spot, GPU Nodes, etc) and that controls the desired scale of the group in the cluster. In AWS the implementation of a node group matches with [Auto Scaling groups](https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroup.html). Karpenter allows us to avoid complexity that arises from managing multiple types of applications with different compute needs. + +We'll start by inspecting the existing resources used by Karpenter. First we'll check out the default `NodePool` that defines general capacity requirements: + +```bash +$ kubectl get nodepools general-purpose -o yaml + +apiVersion: karpenter.sh/v1 +kind: NodePool +metadata: + annotations: + karpenter.sh/nodepool-hash: "4012513481623584108" + karpenter.sh/nodepool-hash-version: v3 + generation: 1 + labels: + app.kubernetes.io/managed-by: eks + name: general-purpose + resourceVersion: "57384" +spec: + disruption: + budgets: + - nodes: 10% + consolidateAfter: 30s + consolidationPolicy: WhenEmptyOrUnderutilized + template: + metadata: {} + spec: + expireAfter: 336h + nodeClassRef: + group: eks.amazonaws.com + kind: NodeClass + name: default + requirements: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - key: eks.amazonaws.com/instance-category + operator: In + values: + - c + - m + - r + - key: eks.amazonaws.com/instance-generation + operator: Gt + values: + - "4" + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - key: kubernetes.io/os + operator: In + values: + - linux + terminationGracePeriod: 24h0m0s +``` + +In addition to this default `NodePool` resource, you may also create your custom `NodePool` resources to specify different isolation and infrastructure requirements for your workloads. Following are some the key considerations for the same. + +1. The `NodePool` is configured to start all new nodes with a Kubernetes label `type: karpenter`, which will allow us to specifically target Karpenter nodes with pods for demonstration purposes. +2. The [NodePool CRD](https://karpenter.sh/docs/concepts/nodepools/) supports defining node properties like instance type and zone. In this configuration, we're setting the `karpenter.sh/capacity-type` to initially limit Karpenter to provisioning On-Demand instances, as well as `node.kubernetes.io/instance-type` to limit to a subset of specific instance types. You can learn which other properties are [available here](https://karpenter.sh/docs/concepts/scheduling/#selecting-nodes). We'll work on a few more during the workshop. +3. A `NodePool` can define a limit on the amount of CPU and memory managed by it. Once this limit is reached, Karpenter will not provision additional capacity associated with that particular `NodePool`, providing a cap on the total compute. + +In addition to `NodePool`, Karpenter also has one more important resource, a `NodeClass`. You can see a `NodeClass` referenced in the previous `NodePool` configuration under `nodeClassRef`. This `NodeClass` is also pre-provisioned by EKS Auto Mode. Here is the configuration of the same. + +```bash +$ kubectl get nodeclass default -o yaml + +apiVersion: eks.amazonaws.com/v1 +kind: NodeClass +metadata: + annotations: + eks.amazonaws.com/nodeclass-hash: "495408067366721138" + eks.amazonaws.com/nodeclass-hash-version: v2 + finalizers: + - eks.amazonaws.com/termination + generation: 1 + labels: + app.kubernetes.io/managed-by: eks + name: default + resourceVersion: "304263" +spec: + ephemeralStorage: + iops: 3000 + size: 80Gi + throughput: 125 + networkPolicy: DefaultAllow + networkPolicyEventLogs: Disabled + role: eks-workshop-auto-auto-node + securityGroupSelectorTerms: + - id: sg-0c70efd097a74a4cf + snatPolicy: Random + subnetSelectorTerms: + - id: subnet-096bfe6623a87be3f + - id: subnet-09e84ab4eee5d16bb + - id: subnet-02a87ab5b226b952d +``` + +1. The `role` attribute assigns the IAM role that will be applied to the EC2 instance provisioned by Karpenter +2. The `subnetSelectorTerms` can be used to look up the subnets where Karpenter should launch the EC2 instances. +3. The `securityGroupSelectorTerms` accomplishes the same function for the security group that will be attached to the EC2 instances. + +With all these resources managed by EKS Auto Mode, Karpenter has the basic requirements it needs to start provisioning capacity for our cluster. + +Let's do some hands-on to see how it works. diff --git a/website/docs/fastpaths/operator/karpenter/tests/hook-grep.sh b/website/docs/fastpaths/operator/karpenter/tests/hook-grep.sh new file mode 100644 index 0000000000..a7484c0c66 --- /dev/null +++ b/website/docs/fastpaths/operator/karpenter/tests/hook-grep.sh @@ -0,0 +1,14 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + if [[ -z "$TEST_OUTPUT" ]]; then + echo "Failed to find a disruption log. Expected at least one." + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/operator/karpenter/tests/hook-suite.sh b/website/docs/fastpaths/operator/karpenter/tests/hook-suite.sh new file mode 100644 index 0000000000..be7e7d6d69 --- /dev/null +++ b/website/docs/fastpaths/operator/karpenter/tests/hook-suite.sh @@ -0,0 +1,26 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + # Clean up inflate deployment from karpenter lab + kubectl delete deployment inflate -n other --ignore-not-found + + # Wait for Karpenter consolidation to settle and pods to stabilize + # Retry the wait since pods may be evicted during consolidation + sleep 120 + for i in $(seq 1 3); do + if kubectl wait --for=condition=Ready --timeout=120s pods -l app.kubernetes.io/created-by=eks-workshop -A 2>/dev/null; then + echo "All pods ready" + return 0 + fi + echo "Attempt $i: some pods not ready, waiting for consolidation..." + sleep 30 + done + # Final attempt without error suppression + kubectl wait --for=condition=Ready --timeout=120s pods -l app.kubernetes.io/created-by=eks-workshop -A +} + +"$@" diff --git a/website/docs/fastpaths/operator/network-policies/egress.md b/website/docs/fastpaths/operator/network-policies/egress.md new file mode 100644 index 0000000000..ecc2b196b6 --- /dev/null +++ b/website/docs/fastpaths/operator/network-policies/egress.md @@ -0,0 +1,91 @@ +--- +title: "Implementing Egress Controls" +sidebar_position: 70 +--- + + + +As shown in the above architecture diagram, the 'ui' component is the front-facing app. So we can start implementing our network controls for the 'ui' component by defining a network policy that will block all egress traffic from the 'ui' namespace. + +::yaml{file="manifests/modules/networking/network-policies/apply-network-policies/default-deny.yaml" paths="spec.podSelector,spec.policyTypes"} + +1. The empty selector `{}` matches all pods +2. The `Egress` policy type controls outbound traffic from pods + +> **Note** : There is no namespace specified in the network policy, as it is a generic policy that can potentially be applied to any namespace in our cluster. + +```bash wait=45 +$ kubectl apply -n ui -f ~/environment/eks-workshop/modules/networking/network-policies/apply-network-policies/default-deny.yaml +``` + +Now let us try accessing the 'catalog' component from the 'ui' component, + +```bash expectError=true +$ kubectl exec deployment/ui -n ui -- curl http://catalog.catalog/health --connect-timeout 5 + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 0 0 0 0 0 0 0 0 --:--:-- 0:00:03 --:--:-- 0 +curl: (28) Resolving timed out after 5000 milliseconds +command terminated with exit code 28 +``` + +On execution of the curl command, the output displayed should have the below statement, which shows that the 'ui' component now cannot directly communicate with the 'catalog' component. + +```text +curl: (28) Resolving timed out after 5000 milliseconds +``` + +Implementing the above policy will also cause the sample application to no longer function properly as 'ui' component requires access to the 'catalog' service and other service components. To define an effective egress policy for 'ui' component requires understanding the network dependencies for the component. + +The 'ui' service will need the following three egress network connectivity to function properly. + +1. Ability to communicate with all the other services, such as 'catalog', 'orders', etc. +2. Ability to access cluster-wide common tools in the cluster system namespaces like `kube-system`. +3. Ability to access the kube-dns service to resolve DNS name. For your EKS Auto Mode clusters, this IP is `172.20.0.10/32`. The following configuration enables this connectivity. + +The network policy below was designed with the above requirements in mind. + +::yaml{file="manifests/modules/fastpaths/operators/network-policies/allow-ui-egress.yaml" paths="spec.egress.0.to.0,spec.egress.0.to.1,spec.egress.0.to.2"} + +1. The first egress rule focuses on allowing egress traffic to DNS server for domain name resolution of internal services. +2. The first egress rule focuses on allowing egress traffic to all `service` components such as 'catalog', 'orders' etc. (without providing access to the database components), along with the `namespaceSelector`, which allows for egress traffic to any namespace as long as the pod labels match `app.kubernetes.io/component: service`. +3. The second egress rule focuses on allowing egress traffic to all components in the `kube-system` namespace, which enables other key communications with the components in the system namespace. + +Let's apply this additional policy: + +```bash wait=45 +$ kubectl apply -f ~/environment/eks-workshop/modules/fastpaths/operators/network-policies/allow-ui-egress.yaml +``` + +Now, we can test to see if we can connect to 'catalog' service: + +```bash +$ kubectl exec deployment/ui -n ui -- curl -s http://catalog.catalog/health | yq +OK +``` + +As you can see from the outputs, we can now connect to the 'catalog' service but not the database since it does not have the `app.kubernetes.io/component: service` label: + +```bash expectError=true +$ kubectl exec deployment/ui -n ui -- curl -v telnet://catalog-mysql.catalog:3306 --connect-timeout 5 + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 0 0 0 0 0 0 0 0 --:--:-- 0:00:05 --:--:-- 0 +* Failed to connect to catalog-mysql.catalog port 3306 after 5000 ms: Timeout was reached +* Closing connection 0 +curl: (28) Failed to connect to catalog-mysql.catalog port 3306 after 5000 ms: Timeout was reached +command terminated with exit code 28 +``` + +Similarly, we can test to see if we are able to connect to other services like the 'order' service, which we should be able to. However, any calls to the internet or other third-party services should be blocked. + +```bash expectError=true +$ kubectl exec deployment/ui -n ui -- curl -v www.google.com --connect-timeout 5 + Trying XXX.XXX.XXX.XXX:80... +* Trying [XXXX:XXXX:XXXX:XXXX::XXXX]:80... +* Immediate connect fail for XXXX:XXXX:XXXX:XXXX::XXXX: Network is unreachable +curl: (28) Failed to connect to www.google.com port 80 after 5001 ms: Timeout was reached +command terminated with exit code 28 +``` + +Now that we have defined an effective egress policy for 'ui' component, let us focus on the catalog service and database components to implement a network policy to control traffic to the 'catalog' namespace. diff --git a/website/docs/fastpaths/operator/network-policies/index.md b/website/docs/fastpaths/operator/network-policies/index.md new file mode 100644 index 0000000000..2a811123cf --- /dev/null +++ b/website/docs/fastpaths/operator/network-policies/index.md @@ -0,0 +1,26 @@ +--- +title: "Enabling secure Pod-to-Pod communication" +sidebar_position: 30 +description: "Restrict network traffic to and from pods in Amazon Elastic Kubernetes Service with network policies." +--- + +By default, Kubernetes allows all pods to freely communicate with each other with no restrictions. Kubernetes Network Policies enable you to define and enforce rules on the flow of traffic between pods, namespaces, and IP blocks (CIDR ranges). They act as a virtual firewall, allowing you to segment and secure your cluster by specifying ingress (incoming) and egress (outgoing) network traffic rules based on various criteria such as pod labels, namespaces, IP addresses, and ports. + +Below is an example network policy with an explanation of some key elements: + +::yaml{file="manifests/modules/networking/network-policies/apply-network-policies/example-network-policy.yaml" paths="metadata,spec.podSelector,spec.policyTypes,spec.ingress,spec.egress" title="example-network-policy.yaml"} + +1. Similar to other Kubernetes objects, `metadata` allows you to specify the name and namespace for the given network policy +2. `spec.podSelector` allows for the selection of specific pods based on their labels within the namespace to which the given network policy will be applied. If an empty pod selector or matchLabels is specified in the specification, then the policy will be applied to all the pods within the namespace. +3. `spec.policyTypes` specifies whether the policy will be applied to ingress traffic, egress traffic, or both for the selected pods. If you do not specify this field, then the default behavior is to apply the network policy to ingress traffic only, unless the network policy has an egress section, in which case the network policy will be applied to both ingress and egress traffic. +4. `ingress` allows for ingress rules to be configured that specify from which pods (`podSelector`), namespace (`namespaceSelector`), or CIDR range (`ipBlock`) traffic is allowed to the selected pods and which port or port range can be used. If a port or port range is not specified, any port can be used for communication. +5. `egress` allows for egress rules to be configured that specify to which pods (`podSelector`), namespace (`namespaceSelector`), or CIDR range (`ipBlock`) traffic is allowed from the selected pods and which port or port range can be used. If a port or port range is not specified, any port can be used for communication. + +For more information about what capabilities are allowed or restricted for Kubernetes network policies, refer to the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/network-policies/). + +In addition to network policies, Amazon VPC CNI in IPv4 mode offers a powerful feature known as "Security Groups for Pods." This feature enables you to use Amazon EC2 security groups to define comprehensive rules governing inbound and outbound network traffic to and from the pods deployed on your nodes. While there is overlap in capabilities between security groups for pods and network policies, there are some key differences. + +- Security groups allow control of ingress and egress traffic to CIDR ranges, whereas network policies allow control of ingress and egress traffic to pods, namespaces as well as CIDR ranges. +- Security groups allow control of ingress and egress traffic from other security groups, which is not available for network policies. + +Amazon EKS strongly recommends employing network policies in conjunction with security groups to restrict network communication between pods, thus reducing the attack surface and minimizing potential vulnerabilities. diff --git a/website/docs/fastpaths/operator/network-policies/ingress.md b/website/docs/fastpaths/operator/network-policies/ingress.md new file mode 100644 index 0000000000..9485737401 --- /dev/null +++ b/website/docs/fastpaths/operator/network-policies/ingress.md @@ -0,0 +1,142 @@ +--- +title: "Implementing Ingress Controls" +sidebar_position: 80 +--- + + + +As shown in the architecture diagram, the 'catalog' namespace receives traffic only from the 'ui' namespace and from no other namespace. Also, the 'catalog' database component can only receive traffic from the 'catalog' service component. + +We can start implementing the above network rules using an ingress network policy that will control traffic to the 'catalog' namespace. + +Before applying the policy, the 'catalog' service can be accessed by both the 'ui' component: + +```bash timeout=180 +$ kubectl exec deployment/ui -n ui -- curl -v catalog.catalog/health --connect-timeout 5 + Trying XXX.XXX.XXX.XXX:80... +* Connected to catalog.catalog (XXX.XXX.XXX.XXX) port 80 (#0) +> GET /health HTTP/1.1 +> Host: catalog.catalog +> User-Agent: curl/7.88.1 +> Accept: */* +> +< HTTP/1.1 200 OK +... +``` + +As well as the 'orders' component: + +```bash +$ kubectl exec deployment/orders -n orders -- curl -v catalog.catalog/health --connect-timeout 5 + Trying XXX.XXX.XXX.XXX:80... +* Connected to catalog.catalog (XXX.XXX.XXX.XXX) port 80 (#0) +> GET /health HTTP/1.1 +> Host: catalog.catalog +> User-Agent: curl/7.88.1 +> Accept: */* +> +< HTTP/1.1 200 OK +... +``` + +Now, we'll define a network policy that will allow traffic to the 'catalog' service component only from the 'ui' component: + +::yaml{file="manifests/modules/networking/network-policies/apply-network-policies/allow-catalog-ingress-webservice.yaml" paths="spec.podSelector,spec.ingress.0.from.0"} + +1. The `podSelector` targets pods with labels `app.kubernetes.io/name: catalog` and `app.kubernetes.io/component: service` +2. This `ingress.from` configuration allows inbound connections only from pods running in the `ui` namespace identified by `kubernetes.io/metadata.name: ui` with label `app.kubernetes.io/name: ui` + +Let's apply the policy: + +```bash wait=45 +$ kubectl apply -f ~/environment/eks-workshop/modules/networking/network-policies/apply-network-policies/allow-catalog-ingress-webservice.yaml +``` + +Now, we can validate the policy by confirming that we can still access the 'catalog' component from the 'ui': + +```bash +$ kubectl exec deployment/ui -n ui -- curl -v catalog.catalog/health --connect-timeout 5 + Trying XXX.XXX.XXX.XXX:80... +* Connected to catalog.catalog (XXX.XXX.XXX.XXX) port 80 (#0) +> GET /health HTTP/1.1 +> Host: catalog.catalog +> User-Agent: curl/7.88.1 +> Accept: */* +> +< HTTP/1.1 200 OK +... +``` + +But not from the 'orders' component: + +```bash expectError=true +$ kubectl exec deployment/orders -n orders -- curl -v catalog.catalog/health --connect-timeout 5 +* Trying XXX.XXX.XXX.XXX:80... +* ipv4 connect timeout after 4999ms, move on! +* Failed to connect to catalog.catalog port 80 after 5001 ms: Timeout was reached +* Closing connection 0 +curl: (28) Failed to connect to catalog.catalog port 80 after 5001 ms: Timeout was reached +... +``` + +As you could see from the above outputs, only the 'ui' component is able to communicate with the 'catalog' service component, and the 'orders' service component is not able to. + +But this still leaves the 'catalog' database component open, so let us implement a network policy to ensure only the 'catalog' service component can communicate with the 'catalog' database component. + +::yaml{file="manifests/modules/networking/network-policies/apply-network-policies/allow-catalog-ingress-db.yaml" paths="spec.podSelector,spec.ingress.0.from.0"} + +1. The `podSelector` targets pods with labels `app.kubernetes.io/name: catalog` and `app.kubernetes.io/component: mysql` +2. The `ingress.from` allows inbound connections only from pods with labels `app.kubernetes.io/name: catalog` and `app.kubernetes.io/component: service` + +Lets apply the policy: + +```bash wait=45 +$ kubectl apply -f ~/environment/eks-workshop/modules/networking/network-policies/apply-network-policies/allow-catalog-ingress-db.yaml +``` + +Let us validate the network policy by confirming we cannot connect to the 'catalog' database from the 'orders' component: + +```bash expectError=true +$ kubectl exec deployment/orders -n orders -- curl -v catalog-mysql.catalog:3306 --connect-timeout 5 +* Trying XXX.XXX.XXX.XXX:3306... +* ipv4 connect timeout after 4999ms, move on! +* Failed to connect to catalog-mysql.catalog port 3306 after 5001 ms: Timeout was reached +* Closing connection 0 +curl: (28) Failed to connect to catalog-mysql.catalog port 3306 after 5001 ms: Timeout was reached +command terminated with exit code 28 +... +``` + +It's important to note, that the Network Policy doesn't rely on IP address. You can play with restarting the 'catalog' pod and confirms that you can still connect: + +```bash timeout=180 +$ kubectl rollout restart deployment/catalog -n catalog +$ kubectl rollout status deployment/catalog -n catalog --timeout=2m +``` + +Now, let's check if we can connect to the 'catalog-mysql' database from a 'catalog' pod. + +```bash +$ kubectl exec deployment/catalog -n catalog -- curl -v catalog-mysql.catalog:3306 --connect-timeout 5 --http0.9 + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0* Host catalog-mysql.catalog:3306 was resolved. +* IPv6: (none) +* IPv4: 172.20.233.240 +* Trying 172.20.233.240:3306... +* Connected to catalog-mysql.catalog (172.20.233.240) port 3306 +* using HTTP/1.x +> GET / HTTP/1.1 +> Host: catalog-mysql.catalog:3306 +> User-Agent: curl/8.11.1 +> Accept: */* +> +* Request completely sent off +{ [5 bytes data] +100 115 0 115 0 0 20901 0 --:--:-- --:--:-- --:--:-- 23000 +* shutting down connection #0 +``` + +As you could see from the above outputs, only the 'catalog' service component alone is able to communicate with the 'catalog' database component. + +Now that we have implemented an effective ingress policy for the 'catalog' namespace, we extend the same logic to other namespaces and components in the sample application, thereby greatly reducing the attack surface for the sample application and increasing network security. diff --git a/website/docs/fastpaths/operator/network-policies/setup.md b/website/docs/fastpaths/operator/network-policies/setup.md new file mode 100644 index 0000000000..3e1aa373df --- /dev/null +++ b/website/docs/fastpaths/operator/network-policies/setup.md @@ -0,0 +1,44 @@ +--- +title: "Lab setup" +sidebar_position: 60 +--- + +In this lab, we are going to implement network policies for the sample application deployed in the lab cluster. The sample application component architecture is shown below. + + + +Each component in the sample application is implemented in its own namespace. For example, the **'ui'** component is deployed in the **'ui'** namespace, whereas the **'catalog'** web service and **'catalog'** MySQL database are deployed in the **'catalog'** namespace. + +Currently, there are no network policies that are defined, and any component in the sample application can communicate with any other component or any external service. For example, the 'catalog' component can directly communicate with the 'checkout' component. We can validate this using the below commands: + +```bash +$ kubectl exec deployment/catalog -n catalog -- curl -s http://checkout.checkout/health | jq +{ + "status": "ok", + "info": { + "chaos": { + "status": "up" + } + }, + "error": {}, + "details": { + "chaos": { + "status": "up" + } + } +} +``` + +Let us make required configuration changes in our EKS Auto Mode cluster to enable network policies. For that, create a ConfigMap for VPC container network interface (CNI) that provides networking for the cluster. + +::yaml{file="manifests/modules/fastpaths/operators/network-policies/vpc-cni-policies.yaml" paths="data.enable-network-policy-controller"} + +1. This will enable the network policy controller in the vpc-cni plugin + +Apply this configuration: + +```bash timeout=180 +$ kubectl apply -f ~/environment/eks-workshop/modules/fastpaths/operators/network-policies/vpc-cni-policies.yaml +``` + +Let's now implement some network rules so we can better control the network traffic flow for the sample application. diff --git a/website/docs/fastpaths/operator/network-policies/tests/hook-suite.sh b/website/docs/fastpaths/operator/network-policies/tests/hook-suite.sh new file mode 100644 index 0000000000..6f199947c6 --- /dev/null +++ b/website/docs/fastpaths/operator/network-policies/tests/hook-suite.sh @@ -0,0 +1,19 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + # Clean up network policies applied during the lab + kubectl delete networkpolicy --all -n ui --ignore-not-found + kubectl delete networkpolicy --all -n catalog --ignore-not-found + + # Restart affected pods to clear any cached connection state + kubectl rollout restart deployment/catalog -n catalog + kubectl rollout restart deployment/ui -n ui + kubectl rollout status deployment/catalog -n catalog --timeout=120s + kubectl rollout status deployment/ui -n ui --timeout=120s +} + +"$@" diff --git a/website/docs/fastpaths/operator/secrets-manager/ascp.md b/website/docs/fastpaths/operator/secrets-manager/ascp.md new file mode 100644 index 0000000000..7fa9b399bd --- /dev/null +++ b/website/docs/fastpaths/operator/secrets-manager/ascp.md @@ -0,0 +1,47 @@ +--- +title: "AWS Secrets and Configuration Provider (ASCP)" +sidebar_position: 422 +--- + +For this workshop, we pre-configured the AWS Secrets and Configuration Provider (ASCP) into your EKS cluster. + +Let's validate that the addons were deployed correctly. + +First, check the Secret Store CSI driver `DaemonSet` and its `Pods`: + +```bash +$ kubectl -n kube-system get daemonsets,pods -l app=secrets-store-csi-driver +NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +daemonset.apps/csi-secrets-store-secrets-store-csi-driver 3 3 3 3 3 kubernetes.io/os=linux 3m57s + +NAME READY STATUS RESTARTS AGE +pod/csi-secrets-store-secrets-store-csi-driver-bzddm 3/3 Running 0 3m57s +``` + +Next, check the CSI Secrets Store Provider for AWS driver `DaemonSet` and its `Pods`: + +```bash +$ kubectl -n kube-system get daemonset,pods -l "app=secrets-store-csi-driver-provider-aws" +NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +daemonset.apps/secrets-store-csi-driver-provider-aws 3 3 3 3 3 kubernetes.io/os=linux 2m3s + +NAME READY STATUS RESTARTS AGE +pod/secrets-store-csi-driver-provider-aws-4jf8f 1/1 Running 0 2m2s +``` + +To provide access to secrets stored in AWS Secrets Manager via the CSI driver, you'll need a `SecretProviderClass` - a namespaced custom resource that provides driver configurations and parameters matching the information in AWS Secrets Manager. + +::yaml{file="manifests/modules/security/secrets-manager/secret-provider-class.yaml" paths="spec.provider,spec.parameters.objects,spec.secretObjects.0"} + +1. `provider: aws` specifies AWS Secrets Store CSI driver +2. `parameters.objects` defines the AWS `secretsmanager` source secret named `$SECRET_NAME` and uses [jmesPath](https://jmespath.org/) to extract specific `username` and `password` fields into named aliases for Kubernetes consumption +3. `secretObjects` creates a standard `Opaque` Kubernetes secret named `catalog-secret` that maps the extracted `username` and `password` fields to secret keys + +Let's create this resource: + +```bash +$ cat ~/environment/eks-workshop/modules/security/secrets-manager/secret-provider-class.yaml \ + | envsubst | kubectl apply -f - +``` + +The Secret Store CSI Driver acts as an intermediary between Kubernetes and external secrets providers like AWS Secrets Manager. When configured with a SecretProviderClass, it can both mount secrets as files in Pod volumes and create synchronized Kubernetes Secret objects, providing flexibility in how applications consume these secrets. diff --git a/website/docs/fastpaths/operator/secrets-manager/create-secret.md b/website/docs/fastpaths/operator/secrets-manager/create-secret.md new file mode 100644 index 0000000000..7b28d29c44 --- /dev/null +++ b/website/docs/fastpaths/operator/secrets-manager/create-secret.md @@ -0,0 +1,41 @@ +--- +title: "Storing secrets in AWS Secrets Manager" +sidebar_position: 421 +--- + +Let's begin by creating a secret in AWS Secrets Manager using the AWS CLI. We'll create a secret that contains JSON-encoded credentials with username and password values: + +```bash +$ export SECRET_SUFFIX=$(openssl rand -hex 4) +$ export SECRET_NAME="$EKS_CLUSTER_AUTO_NAME-catalog-secret-${SECRET_SUFFIX}" +$ aws secretsmanager create-secret --name "$SECRET_NAME" \ + --secret-string '{"username":"catalog", "password":"dYmNfWV4uEvTzoFu"}' --region $AWS_REGION | jq +{ + "ARN": "arn:aws:secretsmanager:us-west-2:1234567890:secret:eks-workshop-catalog-secret-WDD8yS", + "Name": "eks-workshop-catalog-secret-WDD8yS", + "VersionId": "7e0b352d-6666-4444-aaaa-cec1f1d2df1b" +} +``` + +:::note +We're generating a unique suffix for our secret name using `openssl` to ensure it doesn't conflict with any existing secrets in your account. +::: + +You can verify that the secret was created successfully by checking either the [AWS Secrets Manager Console](https://console.aws.amazon.com/secretsmanager/listsecrets) or using the AWS CLI. Let's use the CLI to examine the secret's metadata: + +```bash +$ aws secretsmanager describe-secret --secret-id "$SECRET_NAME" | jq +{ + "ARN": "arn:aws:secretsmanager:us-west-2:1234567890:secret:eks-workshop-catalog-secret-WDD8yS", + "Name": "eks-workshop-catalog-secret-WDD8yS", + "LastChangedDate": "2023-10-10T20:44:51.882000+00:00", + "VersionIdsToStages": { + "94d1fe43-87f5-42fb-bf28-f6b090f0ca44": [ + "AWSCURRENT" + ] + }, + "CreatedDate": "2023-10-10T20:44:51.439000+00:00" +} +``` + +Now that we've successfully created a secret in AWS Secrets Manager, we'll use it in our Kubernetes applications in the next sections. diff --git a/website/docs/fastpaths/operator/secrets-manager/external-secrets.md b/website/docs/fastpaths/operator/secrets-manager/external-secrets.md new file mode 100644 index 0000000000..9e4328e571 --- /dev/null +++ b/website/docs/fastpaths/operator/secrets-manager/external-secrets.md @@ -0,0 +1,135 @@ +--- +title: "External Secrets Operator" +sidebar_position: 424 +--- + +Now let's explore integrating with AWS Secrets Manager using the External Secrets operator. This has already been installed in our EKS cluster: + +```bash wait=30 +$ kubectl -n external-secrets get pods +NAME READY STATUS RESTARTS AGE +external-secrets-6d95d66dc8-5trlv 1/1 Running 0 7m +external-secrets-cert-controller-774dff987b-krnp7 1/1 Running 0 7m +external-secrets-webhook-6565844f8f-jxst8 1/1 Running 0 7m +$ kubectl -n external-secrets get sa +NAME SECRETS AGE +default 0 7m +external-secrets-sa 0 7m +``` + +The operator uses a ServiceAccount named `external-secrets-sa` which is tied to an IAM role via [EKS Pod Identities](../amazon-eks-pod-identity/), providing access to AWS Secrets Manager for retrieving secrets: + +We need to create a `ClusterSecretStore` resource - this is a cluster-wide SecretStore that can be referenced by ExternalSecrets from any namespace. Let's inspect the file we will use to create this `ClusterSecretStore`: + +::yaml{file="manifests/modules/fastpaths/operators/external-secrets/cluster-secret-store.yaml" paths="spec.provider.aws.service,spec.provider.aws.region"} + +1. Set `service: SecretsManager` to use AWS Secrets Manager as the secret source +2. Use the `$AWS_REGION` environment variable to specify the AWS region where secrets are stored + +:::note +With EKS Pod Identites there is no need for the auth section here as the ServiceAccount authenticate via the Pod Identity Association linking the service account `external-secrets-sa` to an IAM role with AWS Secrets Manager permissions +::: + +Let's use this file to create the ClusterSecretStore resource. + +```bash timeout=300 +$ kubectl wait --for=condition=available deployment/external-secrets-webhook -n external-secrets --timeout=240s +$ cat ~/environment/eks-workshop/modules/fastpaths/operators/external-secrets/cluster-secret-store.yaml \ + | envsubst | kubectl apply -f - +``` + +Next, we'll create an `ExternalSecret` that defines what data should be fetched from AWS Secrets Manager and how it should be transformed into a Kubernetes Secret. We'll then update our `catalog` Deployment to use these credentials: + +```kustomization +modules/security/secrets-manager/external-secrets/kustomization.yaml +Deployment/catalog +ExternalSecret/catalog-external-secret +``` + +```bash timeout=180 +$ kubectl kustomize ~/environment/eks-workshop/modules/security/secrets-manager/external-secrets/ \ + | envsubst | kubectl apply -f- +$ kubectl rollout status -n catalog deployment/catalog --timeout=120s +``` + +Let's examine our new `ExternalSecret` resource: + +```bash +$ kubectl -n catalog get externalsecrets.external-secrets.io +NAME STORE REFRESH INTERVAL STATUS READY +catalog-external-secret cluster-secret-store 1h SecretSynced True +``` + +The `SecretSynced` status indicates successful synchronization from AWS Secrets Manager. Let's look at the resource specifications: + +```bash +$ kubectl -n catalog get externalsecrets.external-secrets.io catalog-external-secret -o yaml | yq '.spec' +dataFrom: + - extract: + conversionStrategy: Default + decodingStrategy: None + key: eks-workshop-catalog-secret-WDD8yS +refreshInterval: 1h +secretStoreRef: + kind: ClusterSecretStore + name: cluster-secret-store +target: + creationPolicy: Owner + deletionPolicy: Retain +``` + +The configuration references our AWS Secrets Manager secret via the `key` parameter and the `ClusterSecretStore` we created earlier. The `refreshInterval` of 1 hour determines how often the secret values are synchronized. + +When we create an ExternalSecret, it automatically creates a corresponding Kubernetes secret: + +```bash +$ kubectl -n catalog get secrets +NAME TYPE DATA AGE +catalog-db Opaque 2 21h +catalog-external-secret Opaque 2 1m +catalog-secret Opaque 2 5h40m +``` + +This secret is owned by the External Secrets Operator: + +```bash +$ kubectl -n catalog get secret catalog-external-secret -o yaml | yq '.metadata.ownerReferences' +- apiVersion: external-secrets.io/v1beta1 + blockOwnerDeletion: true + controller: true + kind: ExternalSecret + name: catalog-external-secret + uid: b8710001-366c-44c2-8e8d-462d85b1b8d7 +``` + +We can verify our `catalog` pod is using the new secret values: + +```bash +$ kubectl -n catalog get pods +NAME READY STATUS RESTARTS AGE +catalog-777c4d5dc8-lmf6v 1/1 Running 0 1m +catalog-mysql-0 1/1 Running 0 24h +$ kubectl -n catalog get deployment catalog -o yaml | yq '.spec.template.spec.containers[] | .env' +- name: RETAIL_CATALOG_PERSISTENCE_USER + valueFrom: + secretKeyRef: + key: username + name: catalog-external-secret +- name: RETAIL_CATALOG_PERSISTENCE_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: catalog-external-secret +``` + +### Conclusion + +There is no single "best" choice between **AWS Secrets and Configuration Provider (ASCP)** and **External Secrets Operator (ESO)** for managing AWS Secrets Manager secrets. + +Each tool has distinct advantages: + +- **ASCP** can mount secrets directly from AWS Secrets Manager as volumes, avoiding exposure as environment variables, though this requires volume management. + +- **ESO** simplifies Kubernetes Secrets lifecycle management and offers cluster-wide SecretStore capability, but doesn't support volume mounting. + +Your specific use case should drive the decision, and using both tools can provide maximum flexibility and security in secrets management. diff --git a/website/docs/fastpaths/operator/secrets-manager/index.md b/website/docs/fastpaths/operator/secrets-manager/index.md new file mode 100644 index 0000000000..44ff84971b --- /dev/null +++ b/website/docs/fastpaths/operator/secrets-manager/index.md @@ -0,0 +1,23 @@ +--- +title: "Managing secrets with AWS Secrets Manager" +sidebar_position: 40 +description: "Provide sensitive configuration like credentials to applications running on Amazon Elastic Kubernetes Service with AWS Secrets Manager." +--- + +:::tip What's been set up for you +Your Amazon EKS Auto Mode cluster is configured with the following components. + +- Kubernetes Secrets Store CSI Driver +- AWS Secrets and Configuration Provider +- External Secrets Operator +::: + +[AWS Secrets Manager](https://aws.amazon.com/secrets-manager/) is a service that enables you to easily rotate, manage, and retrieve sensitive data including credentials, API keys, and certificates. Using the [AWS Secrets and Configuration Provider (ASCP)](https://github.com/aws/secrets-store-csi-driver-provider-aws) with the [Kubernetes Secrets Store CSI Driver](https://secrets-store-csi-driver.sigs.k8s.io/), you can mount secrets stored in Secrets Manager as volumes in Kubernetes Pods. + +ASCP allows workloads running on Amazon EKS to access secrets stored in Secrets Manager through fine-grained access control using IAM roles and policies. When a Pod requests access to a secret, ASCP retrieves the Pod's identity, exchanges it for an IAM role, assumes that role, and then retrieves only the secrets authorized for that role from Secrets Manager. + +An alternative approach for integrating AWS Secrets Manager with Kubernetes is through [External Secrets](https://external-secrets.io/). This operator synchronizes secrets from AWS Secrets Manager into Kubernetes Secrets, managing the entire lifecycle through an abstraction layer. It automatically injects values from Secrets Manager into Kubernetes Secrets. + +Both approaches support automatic secret rotation through Secrets Manager. When using External Secrets, you can configure a refresh interval to poll for updates, while the Secrets Store CSI Driver provides a rotation reconciler feature to ensure Pods always have the latest secret values. + +In the following sections, we'll explore practical examples of managing secrets using both AWS Secrets Manager with ASCP and External Secrets. diff --git a/website/docs/fastpaths/operator/secrets-manager/mounting-secrets.md b/website/docs/fastpaths/operator/secrets-manager/mounting-secrets.md new file mode 100644 index 0000000000..3f7f06f854 --- /dev/null +++ b/website/docs/fastpaths/operator/secrets-manager/mounting-secrets.md @@ -0,0 +1,130 @@ +--- +title: "Mounting AWS Secrets Manager secret on Kubernetes Pod" +sidebar_position: 423 +--- + +Now that we have a secret stored in AWS Secrets Manager and synchronized with a Kubernetes Secret, let's mount it inside the Pod. First, we should examine the `catalog` Deployment and the existing Secrets in the `catalog` namespace. + +Currently, the `catalog` Deployment accesses database credentials from the `catalog-db` secret via environment variables: + +- `RETAIL_CATALOG_PERSISTENCE_USER` +- `RETAIL_CATALOG_PERSISTENCE_PASSWORD` + +This is done by referencing a Secret with `envFrom`: + +```bash +$ kubectl -n catalog get deployment catalog -o yaml | yq '.spec.template.spec.containers[] | .envFrom' + +- configMapRef: + name: catalog +- secretRef: + name: catalog-db +``` + +The `catalog` Deployment currently has no additional `volumes` or `volumeMounts` except for an `emptyDir` mounted at `/tmp`: + +```bash +$ kubectl -n catalog get deployment catalog -o yaml | yq '.spec.template.spec.volumes' +- emptyDir: + medium: Memory + name: tmp-volume +$ kubectl -n catalog get deployment catalog -o yaml | yq '.spec.template.spec.containers[] | .volumeMounts' +- mountPath: /tmp + name: tmp-volume +``` + +Let's modify the `catalog` Deployment to use the secret stored in AWS Secrets Manager as the source for credentials: + +```kustomization +modules/security/secrets-manager/mounting-secrets/kustomization.yaml +Deployment/catalog +``` + +We'll mount the AWS Secrets Manager secret using the CSI driver with the SecretProviderClass we validated earlier at the `/etc/catalog-secret` mountPath inside the Pod. This will trigger AWS Secrets Manager to synchronize the stored secret contents with Amazon EKS and create a Kubernetes Secret that can be consumed as environment variables in the Pod. + +```bash timeout=180 +$ kubectl kustomize ~/environment/eks-workshop/modules/security/secrets-manager/mounting-secrets/ \ + | envsubst | kubectl apply -f- +$ kubectl rollout status -n catalog deployment/catalog --timeout=120s +``` + +Let's verify the changes made in the `catalog` namespace. + +The Deployment now has a new `volume` and corresponding `volumeMount` that uses the CSI Secret Store Driver and is mounted at `/etc/catalog-secret`: + +```bash +$ kubectl -n catalog get deployment catalog -o yaml | yq '.spec.template.spec.volumes' +- csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: catalog-spc + name: catalog-secret +- emptyDir: + medium: Memory + name: tmp-volume +$ kubectl -n catalog get deployment catalog -o yaml | yq '.spec.template.spec.containers[] | .volumeMounts' +- mountPath: /etc/catalog-secret + name: catalog-secret + readOnly: true +- mountPath: /tmp + name: tmp-volume +``` + +Mounted Secrets provide a secure way to access sensitive information as files inside the Pod's container filesystem. This approach offers several benefits including not exposing secret values as environment variables and automatic updates when the source Secret is modified. + +Let's examine the contents of the mounted Secret inside the Pod: + +```bash +$ kubectl -n catalog exec deployment/catalog -- ls /etc/catalog-secret/ +eks-workshop-auto-catalog-secret-WDD8yS +password +username +$ kubectl -n catalog exec deployment/catalog -- cat /etc/catalog-secret/${SECRET_NAME} | jq +{"username":"catalog", "password":"dYmNfWV4uEvTzoFu"} +$ kubectl -n catalog exec deployment/catalog -- cat /etc/catalog-secret/username | yq +catalog +$ kubectl -n catalog exec deployment/catalog -- cat /etc/catalog-secret/password | yq +dYmNfWV4uEvTzoFu +``` + +:::info +When mounting secrets from AWS Secrets Manager using the CSI driver, three files are created in the mountPath: + +1. A file with the name of your AWS secret containing the complete JSON value +2. Individual files for each key extracted via jmesPath expressions as defined in the SecretProviderClass + +::: + +The environment variables are now sourced from the newly created `catalog-secret`, which was automatically created by the SecretProviderClass via the CSI Secret Store driver: + +```bash +$ kubectl -n catalog get deployment catalog -o yaml | yq '.spec.template.spec.containers[] | .env' +- name: RETAIL_CATALOG_PERSISTENCE_USER + valueFrom: + secretKeyRef: + key: username + name: catalog-secret +- name: RETAIL_CATALOG_PERSISTENCE_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: catalog-secret +$ kubectl -n catalog get secrets +NAME TYPE DATA AGE +catalog-db Opaque 2 15h +catalog-secret Opaque 2 43s +``` + +We can confirm the environment variables are set correctly in the running pod: + +```bash +$ kubectl -n catalog exec -ti deployment/catalog -- env | grep PERSISTENCE +RETAIL_CATALOG_PERSISTENCE_ENDPOINT=catalog-mysql:3306 +RETAIL_CATALOG_PERSISTENCE_PASSWORD=dYmNfWV4uEvTzoFu +RETAIL_CATALOG_PERSISTENCE_PROVIDER=mysql +RETAIL_CATALOG_PERSISTENCE_DB_NAME=catalog +RETAIL_CATALOG_PERSISTENCE_USER=catalog +``` + +We now have a Kubernetes Secret fully integrated with AWS Secrets Manager that can leverage secret rotation, a best practice for secrets management. When a secret is rotated or updated in AWS Secrets Manager, we can roll out a new version of the Deployment allowing the CSI Secret Store driver to synchronize the Kubernetes Secret contents with the updated value. diff --git a/website/docs/fastpaths/operator/secrets-manager/tests/hook-install.sh b/website/docs/fastpaths/operator/secrets-manager/tests/hook-install.sh new file mode 100644 index 0000000000..2a4aef296f --- /dev/null +++ b/website/docs/fastpaths/operator/secrets-manager/tests/hook-install.sh @@ -0,0 +1,12 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + sleep 120 + kubectl wait --for condition=established --timeout=120s crd secretproviderclasses.secrets-store.csi.x-k8s.io +} + +"$@" diff --git a/website/docs/fastpaths/operator/secrets-manager/tests/hook-suite.sh b/website/docs/fastpaths/operator/secrets-manager/tests/hook-suite.sh new file mode 100644 index 0000000000..704bb2247b --- /dev/null +++ b/website/docs/fastpaths/operator/secrets-manager/tests/hook-suite.sh @@ -0,0 +1,27 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + # Clean up secrets lab artifacts + kubectl delete secretproviderclass catalog-spc -n catalog --ignore-not-found + kubectl delete externalsecret catalog-external-secret -n catalog --ignore-not-found + kubectl delete clustersecretstore cluster-secret-store --ignore-not-found 2>/dev/null || true + + # Delete the test secret from Secrets Manager + if [ -n "${SECRET_NAME:-}" ]; then + aws secretsmanager delete-secret --secret-id "$SECRET_NAME" --force-delete-without-recovery 2>/dev/null || true + fi + + # Restore catalog to base state + kubectl apply -k ~/environment/eks-workshop/base-application/catalog + kubectl rollout status deployment/catalog -n catalog --timeout=120s + + # Wait for all workshop pods to stabilize before next lab + sleep 30 + kubectl wait --for=condition=Ready --timeout=300s pods -l app.kubernetes.io/created-by=eks-workshop -A +} + +"$@" diff --git a/website/docs/security/kyverno/assets/.gitkeep b/website/docs/fastpaths/setup/.notest similarity index 100% rename from website/docs/security/kyverno/assets/.gitkeep rename to website/docs/fastpaths/setup/.notest diff --git a/website/docs/fastpaths/setup/aws-event.md b/website/docs/fastpaths/setup/aws-event.md new file mode 100644 index 0000000000..9a3999b805 --- /dev/null +++ b/website/docs/fastpaths/setup/aws-event.md @@ -0,0 +1,44 @@ +--- +title: At an AWS event +sidebar_position: 20 +--- + +By participating in this workshop, you will be provided with an AWS account to use to complete the lab material. Connect to the portal by browsing to [https://catalog.workshops.aws/](https://catalog.workshops.aws/). Click on **Get Started**. + +![Workshop Studio Home](/docs/introduction/setup/workshop-studio-home.webp) + +You will be prompted to sign in. Select the option **Email One-Time Password(OTP)**. + +![Workshop Studio Sign in](/docs/introduction/setup/ws-studio-login.webp) + +Enter your email address and press **Send passcode**, which will send a one-time passcode to your inbox. When the email arrives, enter the passcode and log in. + +Your instructor should have provided you with an **Event access code** prior to starting these exercises. Enter the provided code in the text box and click **Next**. + +![Event Code](/docs/introduction/setup/event-code.webp) + +Read and accept the Terms and Conditions and click **Join event** to continue. + +![Review and Join](/docs/introduction/setup/review-and-join.webp) + +You will be presented with your personal dashboard. Select the **Open AWS Console** button to be taken to your AWS account console: + +![Open Console](/docs/introduction/setup/openconsole.webp) + +Next, return to the personal dashboard page and scroll down to the **Event Outputs** section. Copy the URL from the **IdeUrl** field and open it in a new browser tab: + +![Cloud9 Link](/docs/introduction/setup/workshop-studio-06.png) + +You will be prompted for a password: + +![IDE Password](/docs/introduction/setup/visual-studio-01.png) + +Enter the value from the **IdePassword** field from the outputs and the web IDE will load. + +![Code-server login screen](/docs/introduction/setup/vscode-splash.webp) + +Press **Get started** to access the workshop splash page: + +![Get Started](/docs/introduction/setup/workshop-event-page.webp) + +You can now proceed to the [Navigating the labs section](/docs/fastpaths/navigating-labs). diff --git a/website/docs/fastpaths/setup/index.md b/website/docs/fastpaths/setup/index.md new file mode 100644 index 0000000000..96bc98fa31 --- /dev/null +++ b/website/docs/fastpaths/setup/index.md @@ -0,0 +1,17 @@ +--- +title: Setup +sidebar_position: 20 +--- + +This section outlines how to set up the environment to run the labs in the workshop. + +You have two options for accessing the workshop environment: + +1. [At an AWS event](./aws-event.md) - For participants attending an AWS-hosted workshop event +2. [In your own account](./your-account/index.md) - For running the workshop in your personal AWS account + +:::info +Each option provides detailed instructions for getting access to the required resources and tools for completing the workshop exercises. +::: + +Choose the option that best matches your situation to proceed with setting up your environment. diff --git a/website/docs/fastpaths/setup/your-account/cleanup.md b/website/docs/fastpaths/setup/your-account/cleanup.md new file mode 100644 index 0000000000..82bb8c604b --- /dev/null +++ b/website/docs/fastpaths/setup/your-account/cleanup.md @@ -0,0 +1,27 @@ +--- +title: Cleaning up +sidebar_position: 90 +--- + +:::caution + +Make sure you have run the respective clean up instructions for the mechanism you used to provision the lab EKS cluster before proceeding: + +- [eksctl](./using-eksctl.md) +- [Terraform](./using-terraform.md) + +::: + +This section outlines how to clean up the IDE we've used to run the labs. + +First, open CloudShell in the region where you deployed the CloudFormation stack: + + + +Then run the following command to delete the CloudFormation stack: + +```bash test=false +$ aws cloudformation delete-stack --stack-name eks-workshop-ide +``` + +Once the stack is deleted, all resources associated with the IDE will be removed from your AWS account, preventing further charges. diff --git a/website/docs/fastpaths/setup/your-account/index.md b/website/docs/fastpaths/setup/your-account/index.md new file mode 100644 index 0000000000..308154cf42 --- /dev/null +++ b/website/docs/fastpaths/setup/your-account/index.md @@ -0,0 +1,67 @@ +--- +title: In your AWS account +sidebar_position: 30 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +:::danger Warning +Provisioning this workshop environment in your AWS account will create resources and **there will be cost associated with them**. The cleanup section provides a guide to remove them, preventing further charges. +::: + +This section outlines how to set up the environment to run the labs in your own AWS account. + +The first step is to create an IDE with the provided CloudFormation templates. Use the AWS CloudFormation quick-create links below to launch the desired template in the appropriate AWS region. + +| Region | Link | +| ---------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `us-west-2` | [Launch](https://us-west-2.console.aws.amazon.com/cloudformation/home#/stacks/quickcreate?templateUrl=https://ws-assets-prod-iad-r-pdx-f3b3f9f1a7d6a3d0.s3.us-west-2.amazonaws.com/39146514-f6d5-41cb-86ef-359f9d2f7265/eks-workshop-vscode-cfn.yaml&stackName=eks-workshop-ide¶m_RepositoryRef=VAR::MANIFESTS_REF) | +| `eu-west-1` | [Launch](https://eu-west-1.console.aws.amazon.com/cloudformation/home#/stacks/quickcreate?templateUrl=https://ws-assets-prod-iad-r-dub-85e3be25bd827406.s3.eu-west-1.amazonaws.com/39146514-f6d5-41cb-86ef-359f9d2f7265/eks-workshop-vscode-cfn.yaml&stackName=eks-workshop-ide¶m_RepositoryRef=VAR::MANIFESTS_REF) | +| `ap-southeast-1` | [Launch](https://ap-southeast-1.console.aws.amazon.com/cloudformation/home#/stacks/quickcreate?templateUrl=https://ws-assets-prod-iad-r-sin-694a125e41645312.s3.ap-southeast-1.amazonaws.com/39146514-f6d5-41cb-86ef-359f9d2f7265/eks-workshop-vscode-cfn.yaml&stackName=eks-workshop-ide¶m_RepositoryRef=VAR::MANIFESTS_REF) | + +These instructions have been tested in the AWS regions listed above and are not guaranteed to work in others without modification. + +:::warning + +The nature of the workshop material means that the IDE EC2 instance requires broad IAM permissions in your account, for example creating IAM roles. Before continuing please review the IAM permissions that will be provided to the IDE instance in the CloudFormation template. + +We are continuously working to optimize the IAM permissions. Please raise a [GitHub issue](https://github.com/aws-samples/eks-workshop-v2/issues) with any suggestions for improvement. + +::: + +Scroll to the bottom of the screen and acknowledge the IAM notice: + +acknowledge IAM + +Then click the **Create stack** button: + +Create Stack + +The CloudFormation stack will take roughly 5 minutes to deploy, and once completed you can retrieve information required to continue from the **Outputs** tab: + +cloudformation outputs + +The `IdeUrl` output contains the URL to enter in your browser to access the IDE. The `IdePasswordSecret` contains a link to an AWS Secrets Manager secret that contains a generated password for the IDE. + +To retrieve the password open the `IdePasswordSecret` URL and click the **Retrieve** button: + +secretsmanager retrieve + +The password will then be available for you to copy: + +password in Secrets Manager + +Open the IDE URL provided and you will be prompted for the password: + +IDE password prompt + +After submitting your password you will be presented with the initial IDE screen: + +IDE initial screen + +The next step is to create an EKS cluster to perform the lab exercises in. Please follow one of the guides below to provision a cluster that meets the requirements for these labs: + +- **(Recommended)** [eksctl](./using-eksctl.md) +- (Coming soon!) [Terraform](./using-terraform.md), interested? Let us know in the [GitHub repository](https://github.com/aws-samples/eks-workshop-v2/issues) +- (Coming soon!) CDK diff --git a/website/docs/fastpaths/setup/your-account/using-eksctl.md b/website/docs/fastpaths/setup/your-account/using-eksctl.md new file mode 100644 index 0000000000..9def0d67e6 --- /dev/null +++ b/website/docs/fastpaths/setup/your-account/using-eksctl.md @@ -0,0 +1,60 @@ +--- +title: Using eksctl +sidebar_position: 20 +pagination_next: fastpaths/navigating-labs +--- + +This section outlines how to build a cluster for the lab exercises using the [eksctl tool](https://eksctl.io/). This is the easiest way to get started, and is recommended for most learners. + +The `eksctl` utility has been pre-installed in your IDE environment, so we can immediately create the cluster. This is the configuration that will be used to build the cluster: + +::yaml{file="manifests/../cluster/eksctl/cluster-auto.yaml" paths="availabilityZones,metadata.name,autoModeConfig.nodePools" title="cluster.yaml"} + +1. Create a VPC across three availability zones +2. Create an EKS cluster, named `eks-workshop-auto` by default +3. Enable EKS Auto Mode built-in node pools + + +Apply the configuration file like so: + +```bash +$ export EKS_CLUSTER_AUTO_NAME=eks-workshop-auto +$ curl -fsSL https://raw.githubusercontent.com/VAR::MANIFESTS_OWNER/VAR::MANIFESTS_REPOSITORY/VAR::MANIFESTS_REF/cluster/eksctl/cluster-auto.yaml | \ +envsubst | eksctl create cluster -f - +``` + +This process will take approximately 20 minutes to complete. + +## Next Steps + +Now that the cluster is ready, head to the Navigating the labs section to get started. + +import Link from '@docusaurus/Link'; + +Continue to Navigating the Labs → + +

+ +--- + +## Cleaning Up (after you're done with the entire Workshop) + +:::tip +The following demonstrates how to clean up resources once you are done using the EKS cluster. Completing these steps will prevent further charges to your AWS account. +::: + +Before deleting the IDE environment, clean up the cluster that we set up in previous steps. + +First, use `delete-environment` to ensure that the sample application and any left-over lab infrastructure is removed: + +```bash +$ delete-environment +``` + +Next, delete the cluster with `eksctl`: + +```bash +$ eksctl delete cluster $EKS_CLUSTER_AUTO_NAME --wait +``` + +You can now proceed to [cleaning](./cleanup.md) up the IDE. diff --git a/website/docs/fastpaths/setup/your-account/using-terraform.md b/website/docs/fastpaths/setup/your-account/using-terraform.md new file mode 100644 index 0000000000..023fa7a736 --- /dev/null +++ b/website/docs/fastpaths/setup/your-account/using-terraform.md @@ -0,0 +1,11 @@ +--- +title: Using Terraform +sidebar_position: 30 +pagination_next: null +pagination_prev: null +--- + +:::danger +Creating the EKS Auto Mode cluster with Terraform is currently not supported. Please raise any interest in the [GitHub repository](https://github.com/aws-samples/eks-workshop-v2/issues). +::: + diff --git a/website/docs/introduction/basics/configuration/configmaps/index.md b/website/docs/introduction/basics/configuration/configmaps/index.md new file mode 100644 index 0000000000..210a5313ce --- /dev/null +++ b/website/docs/introduction/basics/configuration/configmaps/index.md @@ -0,0 +1,119 @@ +--- +title: ConfigMaps +sidebar_position: 10 +--- + +# ConfigMaps + +**ConfigMaps** allow you to decouple configuration artifacts from image content to keep containerized applications portable. They store non-confidential data in key-value pairs and can be consumed by pods as environment variables, command-line arguments, or configuration files. + +ConfigMaps provide: +- **Configuration Management:** Store application configuration separately from code +- **Environment Flexibility:** Use different configurations for different environments +- **Runtime Updates:** Update configuration without rebuilding container images +- **Portability:** Keep applications portable across different environments + +In this lab, you'll learn about ConfigMaps by creating one for our retail store's UI component and seeing how it connects to backend services. + +### Creating ConfigMap + +Let's create a ConfigMap for our retail store's UI component. The UI needs to know where to find the backend services: + +::yaml{file="manifests/base-application/ui/configMap.yaml" paths="kind,metadata.name,data" title="ui-configmap.yaml"} + +1. `kind: ConfigMap`: Tells Kubernetes what type of resource to create +2. `metadata.name`: Unique identifier for this ConfigMap within the namespace +4. `data`: Key-value pairs containing the configuration data + +Apply the ConfigMap configuration: +```bash +$ kubectl apply -k ~/environment/eks-workshop/modules/introduction/basics/configmaps/ +``` + +### Exploring ConfigMap + +Now let's examine the ConfigMap we just created: + +```bash +$ kubectl get configmaps -n ui +NAME DATA AGE +kube-root-ca.crt 1 2m51s +ui 4 2m50s +``` + +Get detailed information about the ConfigMap: +```bash +$ kubectl describe configmap ui -n ui +Name: ui +Namespace: ui +Labels: +Annotations: + +Data +==== +RETAIL_UI_ENDPOINTS_CARTS: +---- +http://carts.carts.svc:80 + +RETAIL_UI_ENDPOINTS_CATALOG: +---- +http://catalog.catalog.svc:80 + +RETAIL_UI_ENDPOINTS_CHECKOUT: +---- +http://checkout.checkout.svc:80 + +RETAIL_UI_ENDPOINTS_ORDERS: +---- +http://orders.orders.svc:80 + + +BinaryData +==== + +Events: +``` + +This shows: +- **Data section** - The key-value pairs stored in the ConfigMap +- **Labels** - Metadata tags for organization +- **Annotations** - Additional metadata + +### Using ConfigMaps in Pods + +Now let's create a pod that uses our ConfigMap. We'll update our UI pod to use the configuration: + +::yaml{file="manifests/modules/introduction/basics/configmaps/ui-pod-with-config.yaml" paths="spec.containers.0.envFrom" title="ui-pod-with-config.yaml"} + +1. `envFrom.configMapRef`: Loads all key-value pairs from the ConfigMap as environment variables + +Apply the updated pod configuration: +```bash hook=ready +$ kubectl apply -f ~/environment/eks-workshop/modules/introduction/basics/configmaps/ui-pod-with-config.yaml +``` + +### Testing the Configuration + +Let's verify that our pod can now access the configuration: + +```bash +$ kubectl exec -n ui ui-pod -- env | grep RETAIL_UI_ENDPOINTS_CATALOG +RETAIL_UI_ENDPOINTS_CATALOG=http://catalog.catalog.svc:80 +``` + +You can also see all the ConfigMap environment variables: +```bash +$ kubectl exec -n ui ui-pod -- env | grep RETAIL_UI +RETAIL_UI_ENDPOINTS_CATALOG=http://catalog.catalog.svc:80 +RETAIL_UI_ENDPOINTS_CARTS=http://carts.carts.svc:80 +RETAIL_UI_ENDPOINTS_ORDERS=http://orders.orders.svc:80 +RETAIL_UI_ENDPOINTS_CHECKOUT=http://checkout.checkout.svc:80 +``` + +## Key Points to Remember + +* ConfigMaps store non-confidential configuration data +* They decouple configuration from container images +* Can be consumed as environment variables or mounted as files +* Allow the same image to work across different environments +* Have a 1MB size limit per ConfigMap diff --git a/website/docs/introduction/basics/configuration/configmaps/tests/hook-ready.sh b/website/docs/introduction/basics/configuration/configmaps/tests/hook-ready.sh new file mode 100644 index 0000000000..d765f6fbc6 --- /dev/null +++ b/website/docs/introduction/basics/configuration/configmaps/tests/hook-ready.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + echo "Waiting for UI pod to be ready..." + kubectl wait --for=condition=ready pod/ui-pod -n ui --timeout=300s + + echo "Verifying ConfigMap is accessible..." + + # Check that ConfigMap exists + kubectl get configmap ui -n ui + + # Verify the pod has the environment variable from ConfigMap + env_var=$(kubectl exec -n ui ui-pod -- env | grep RETAIL_UI_ENDPOINTS_CATALOG || echo "") + if [ -z "$env_var" ]; then + echo "Error: RETAIL_UI_ENDPOINTS_CATALOG environment variable not found" + echo "Available environment variables:" + kubectl exec -n ui ui-pod -- env | grep RETAIL_UI || echo "No RETAIL_UI variables found" + exit 1 + fi + + echo "Found environment variable: $env_var" + echo "ConfigMap test completed successfully" +} + +"$@" diff --git a/website/docs/introduction/basics/configuration/index.md b/website/docs/introduction/basics/configuration/index.md new file mode 100644 index 0000000000..649bd36d89 --- /dev/null +++ b/website/docs/introduction/basics/configuration/index.md @@ -0,0 +1,67 @@ +--- +title: Configuration +sidebar_position: 50 +--- + +# Configuration + +Applications often require configuration data - from environment-specific settings like API endpoints to sensitive credentials like database passwords. Kubernetes provides two core resources to manage configuration data: + +**ConfigMaps** - for non-confidentail configuration data +**Secrets** - for sensitive information like passwords, tokens, and certificates + +Modern applications run across multiple environments and often scale dynamically. + +Kubernetes configuration resources make this easy by allowing you to: +- **Separate configuration from code** — so you can deploy the same container everywhere +- **Use environment-specific settings** without modifying application images +- **Update configuration at runtime** without restarting or rebuilding images +- **Enhance security** by limiting access to sensitive values +- **Improve portability** across clusters and cloud providers + +## ConfigMaps vs Secrets + +| Category | ConfigMaps | Secrets | +| ------------------ | ------------------------------------------ | --------------------------------------------- | +| **Purpose** | Store non-confidential configuration | Store sensitive data | +| **Examples** | API endpoints, feature flags, config files | Passwords, tokens, certificates | +| **Data format** | Plain text | Base64 encoded | +| **Visibility** | Readable by all with access | Access restricted via RBAC | +| **Security level** | Low | High | + +## When to Use Each + +**Use ConfigMaps for:** +- Application settings and feature flags +- Service URLs and API endpoints +- Configuration files (`nginx.conf`, `application.yaml`) +- Environment-specific parameters + +**Use Secrets for:** +- Database credentials +- API keys and tokens +- TLS certificates and private keys +- Container registry credentials + +## Configuration Patterns + +Both ConfigMaps and Secrets can be consumed by pods in multiple ways: + +- **Environment variables:** Inject configuration as environment variables +- **Volume mounts:** Mount configuration as files in the container filesystem +- **Command-line arguments:** Pass configuration as arguments to container commands + +## Explore Configuration Management + +Learn how to manage both types of configuration data: + +- **[ConfigMaps](./configmaps)** - Store and manage non-confidential configuration data +- **[Secrets](./secrets)** - Securely handle sensitive information like passwords and certificates + +## Key Points to Remember + +* ConfigMaps handle non-confidential configuration data +* Secrets securely store sensitive information +* Both decouple configuration from application code +* Choose the right resource based on data sensitivity +* Both support multiple consumption patterns (env vars, files, args) diff --git a/website/docs/introduction/basics/configuration/secrets/index.md b/website/docs/introduction/basics/configuration/secrets/index.md new file mode 100644 index 0000000000..19ba42aec2 --- /dev/null +++ b/website/docs/introduction/basics/configuration/secrets/index.md @@ -0,0 +1,152 @@ +--- +title: Secrets +sidebar_position: 20 +--- + +# Secrets + +**Secrets** are used to store and manage sensitive information such as passwords, OAuth tokens, SSH keys, and TLS certificates. They provide a more secure way to handle confidential data compared to putting it directly in pod specifications or container images. + +Secrets provide: +- **Security:** Store sensitive data separately from application code +- **Access Control:** Control which pods and users can access sensitive information +- **Encryption:** Data is base64 encoded and can be encrypted at rest +- **Flexibility:** Use secrets as environment variables, files, or for image pulls + +In this lab, you'll learn about Secrets by creating database credentials for our retail store's catalog service and seeing how pods securely access this sensitive information. + +### Creating Your First Secret + +Let's create a Secret for our retail store's catalog service. The catalog needs database credentials to connect to its MySQL database: + +::yaml{file="manifests/base-application/catalog/secrets.yaml" paths="kind,metadata.name,data" title="catalog-secret.yaml"} + +1. `kind: Secret`: Tells Kubernetes what type of resource to create +2. `metadata.name`: Unique identifier for this Secret within the namespace +5. `data`: Key-value pairs containing sensitive data (base64 encoded) + +Apply the Secret configuration: +```bash +$ kubectl apply -k ~/environment/eks-workshop/modules/introduction/basics/secrets +``` + +### Exploring Your Secret + +Now let's examine the Secret we just created: + +```bash +$ kubectl get secrets -n catalog +NAME TYPE DATA AGE +catalog-db Opaque 2 30s +``` + +Get detailed information about the Secret: +```bash +$ kubectl describe secret -n catalog catalog-db +Name: catalog-db +Namespace: catalog +Labels: +Annotations: + +Type: Opaque + +Data +==== +RETAIL_CATALOG_PERSISTENCE_PASSWORD: 16 bytes +RETAIL_CATALOG_PERSISTENCE_USER: 7 bytes +``` + +This shows: +- **Type** - The kind of secret (Opaque for general use) +- **Data** - Number of key-value pairs (values are hidden for security) +- **Labels** - Metadata tags for organization + +Notice that the actual values are not displayed for security reasons. To see the base64 encoded data: +```bash +$ kubectl get secret catalog-db -n catalog -o yaml +apiVersion: v1 +data: + RETAIL_CATALOG_PERSISTENCE_PASSWORD: ZFltTmZXVjR1RXZUem9GdQ== + RETAIL_CATALOG_PERSISTENCE_USER: Y2F0YWxvZw== +kind: Secret +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","data":{"RETAIL_CATALOG_PERSISTENCE_PASSWORD":"ZFltTmZXVjR1RXZUem9GdQ==","RETAIL_CATALOG_PERSISTENCE_USER":"Y2F0YWxvZw=="},"kind":"Secret","metadata":{"annotations":{},"name":"catalog-db","namespace":"catalog"}} + creationTimestamp: "2025-10-05T17:52:34Z" + name: catalog-db + namespace: catalog + resourceVersion: "902820" + uid: 726e4fef-f82b-4a7e-a063-f72f18a941cd +type: Opaque +``` + +You'll see the data is base64 encoded. To decode a value: +```bash +$ kubectl get secret catalog-db -n catalog -o jsonpath='{.data.RETAIL_CATALOG_PERSISTENCE_USER}' | base64 --decode +catalog +``` + +### Using Secrets in Pods + +Now let's create a pod that uses our Secret. We'll update our catalog pod to use the database credentials: + +::yaml{file="manifests/modules/introduction/basics/secrets/catalog-pod-with-secret.yaml" paths="kind,metadata.name,spec.containers,spec.containers.0.envFrom" title="catalog-pod-with-secret.yaml"} + +The key differences here are: +- `envFrom.configMapRef`: Loads all key-value pairs from a ConfigMap as environment variables +- `envFrom.secretRef`: Loads all key-value pairs from a Secret as environment variables +- This approach automatically makes all Secret data available without mapping individual keys + +Apply the updated pod configuration: +```bash +$ kubectl apply -f ~/environment/eks-workshop/modules/introduction/basics/secrets/catalog-pod-with-secret.yaml +``` + +### Testing the Secret Access + +Let's verify that our pod can access the secret values: + +```bash hook=ready +$ kubectl exec -n catalog catalog-pod -- env | grep RETAIL_CATALOG_PERSISTENCE_USER +RETAIL_CATALOG_PERSISTENCE_USER=catalog_user +``` + +You can also see all catalog-related environment variables: +```bash +$ kubectl exec -n catalog catalog-pod -- env | grep RETAIL_CATALOG +RETAIL_CATALOG_PERSISTENCE_PROVIDER=mysql +RETAIL_CATALOG_PERSISTENCE_ENDPOINT=catalog-mysql:3306 +RETAIL_CATALOG_PERSISTENCE_DB_NAME=catalog +RETAIL_CATALOG_PERSISTENCE_USER=catalog_user +RETAIL_CATALOG_PERSISTENCE_PASSWORD=dYmNfWV4uEvTzoFu +``` + +:::warning +In production, avoid printing passwords to logs or console output. This is shown here for educational purposes only. +::: + +## Secrets vs ConfigMaps + +| Secrets | ConfigMaps | +|---------|------------| +| Sensitive data (passwords, tokens) | Non-confidential data | +| Base64 encoded + additional security | Base64 encoded for storage | +| Values hidden in kubectl output | Visible in plain text | +| Credentials, certificates, keys | Configuration files, environment variables | + +## Advanced Secrets Management + +While Kubernetes Secrets provide basic security for sensitive data, production environments often require more sophisticated secrets management solutions. For enhanced security features like automatic rotation, fine-grained access control, and integration with external secret stores, explore: + +**[AWS Secrets Manager Integration](../../../../security/secrets-management/secrets-manager/)** - Learn how to integrate AWS Secrets Manager with your EKS cluster for enterprise-grade secrets management with automatic rotation and centralized control. + +## Key Points to Remember + +* Secrets store sensitive data separately from application code +* Values are base64 encoded and can be encrypted at rest +* Secret values are hidden in kubectl describe output for security +* Can be consumed as environment variables or mounted as files +* Use ConfigMaps for non-sensitive configuration data +* For production workloads, consider advanced solutions like AWS Secrets Manager + diff --git a/website/docs/introduction/basics/configuration/secrets/tests/hook-ready.sh b/website/docs/introduction/basics/configuration/secrets/tests/hook-ready.sh new file mode 100644 index 0000000000..8c46169084 --- /dev/null +++ b/website/docs/introduction/basics/configuration/secrets/tests/hook-ready.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -Eeuo pipefail + +before() { + echo "Waiting for catalog pod to be ready..." + kubectl wait --for=condition=ready pod/catalog-pod -n catalog --timeout=300s +} + +after() { + echo "noop" +} + +"$@" diff --git a/website/docs/introduction/basics/index.md b/website/docs/introduction/basics/index.md new file mode 100644 index 0000000000..7f0d380739 --- /dev/null +++ b/website/docs/introduction/basics/index.md @@ -0,0 +1,87 @@ +--- +title: Kubernetes Basics +sidebar_position: 60 +sidebar_custom_props: { "module": true } +description: "Learn fundamental Kubernetes concepts including architecture, Helm, and Kustomize." +--- + +# Kubernetes Concepts + +Before diving into hands-on labs, it's important to understand **how Kubernetes works** and **the tools you'll use** throughout this workshop. This section introduces the core architecture, key components, and deployment tools that form the foundation of your EKS learning journey. + +:::tip Before you start +Prepare your environment for this section: + +```bash timeout=300 wait=10 +$ prepare-environment introduction/basics +``` + +::: + +## Kubernetes Architecture Overview + +Kubernetes follows a **control plane–worker node architecture**, where the **control plane** manages the cluster and **worker nodes** run your workloads. + +![Kubernetes Cluster Architecture](https://kubernetes.io/images/docs/kubernetes-cluster-architecture.svg) +*Figure: Simplified Kubernetes cluster architecture.* + +### Control Plane Components + +The control plane makes global decisions about the cluster and ensures the system’s desired state. + +- **API Server** — Acts as the front-end for Kubernetes, exposing the Kubernetes API to users and components. +- **etcd** — A highly available key-value store that holds all cluster data. +- **Scheduler** — Assigns Pods to nodes based on resource availability and constraints. +- **Controller Manager** — Runs background processes (controllers) that maintain cluster health and reconcile actual vs. desired states. + +### Worker Node Components + +Each node runs the components needed to host and manage Pods. + +- **kubelet** — Communicates with the control plane and ensures containers are running as expected. +- **Container Runtime** — Executes containers (e.g., containerd, CRI-O). +- **kube-proxy** — Maintains network rules and manages communication between Pods and services. + +--- + +## Amazon EKS Architecture + +**Amazon Elastic Kubernetes Service (EKS)** is a managed Kubernetes service that simplifies cluster operations. +It takes care of control plane management, upgrades, and high availability, so you can focus on your workloads. + +With EKS, you can: +- **Deploy applications faster** with less operational overhead +- **Scale seamlessly** to handle changing workloads +- **Enhance security** using AWS IAM and managed updates +- **Choose your compute model** — traditional EC2 nodes or serverless with EKS Auto Mode + +### Shared Responsibility Model + +In Amazon EKS: +- **AWS manages the control plane** — including the API Server, etcd, scheduler, and controllers. +- **You manage the worker nodes** — EC2, Fargate, or hybrid options where your applications run. +- **AWS services integrate natively** — including load balancers, IAM roles, VPC networking, and storage. + +![Amazon EKS Architecture](https://docs.aws.amazon.com/images/eks/latest/userguide/images/whatis.png) +*Figure: Amazon EKS architecture and integration with AWS services.* + +## Key Points to Remember + +Understanding Kubernetes architecture is crucial for effective cluster management and troubleshooting: + +### Control Plane vs. Worker Nodes +- **Control plane** components (API Server, etcd, Scheduler, Controller Manager) handle cluster-wide decisions and state management +- **Worker nodes** (kubelet, container runtime, kube-proxy) focus on running and networking your applications +- This separation allows for scalable, resilient cluster operations + +### EKS Advantages +- **Reduced operational burden** — AWS manages control plane complexity, patching, and high availability +- **Native AWS integration** — Seamless connectivity with VPC, IAM, Load Balancers, and other AWS services +- **Flexible compute options** — Choose between EC2, Fargate, or Auto Mode based on your workload needs + +### Design Principles +- **Declarative configuration** — Define desired state; Kubernetes controllers work to achieve it +- **API-driven** — All interactions go through the Kubernetes API for consistency and auditability +- **Extensible** — Custom resources and controllers allow you to extend Kubernetes functionality + +These architectural concepts will be essential as you progress through deploying applications, managing configurations with Helm and Kustomize, and implementing advanced cluster features. diff --git a/website/docs/introduction/basics/namespaces/index.md b/website/docs/introduction/basics/namespaces/index.md new file mode 100644 index 0000000000..31435e2f4e --- /dev/null +++ b/website/docs/introduction/basics/namespaces/index.md @@ -0,0 +1,125 @@ +--- +title: Namespaces +sidebar_position: 10 +--- + +# Namespaces + +**Namespaces** provide a way to organize and isolate resources within a single Kubernetes cluster. Think on them as virtual clusters inside your physical cluster - they help you separate different applications, environments, or teams while sharing the same underlying infrastructure. + +You can think of namespaces like folders on your computer — they let you group related files (resources) without mixing them up. + +Namespaces provide: +- **Organization:** Group related resources together (like all components of an application) +- **Isolation:** Prevent resource conflicts between different applications or teams +- **Resource Management:** Apply quotas and limits to specific groups of resources +- **Access control:** Use Kubernetes permissions (called RBAC — Role-Based Access Control) to decide who can access or change resources. + +In this section, you'll explore how namespaces organize resources by working with the different components of our retail store application. + +### Default Namespaces +Every Kubernetes cluster starts with several built-in namespaces. These are created automatically when a cluster is provisioned: + +- **default** - Where resources go if you don't specify a namespace +- **kube-system** - System components like DNS and networking +- **kube-public** - Publicly readable resources +- **kube-node-lease** - Node heartbeat information + +```bash +$ kubectl get namespaces +NAME STATUS AGE +default Active 1h +kube-node-lease Active 1h +kube-public Active 1h +kube-system Active 1h +``` + +### Creating Your First Namespace +Let's create a namespace for our retail store's UI component: + +::yaml{file="manifests/base-application/ui/namespace.yaml" paths="kind,metadata.name,metadata.labels" title="namespace.yaml"} + +1. `kind: Namespace`: Tells Kubernetes what type of resource to create. +2. `metadata.name`: Unique identifier for this namespace within the cluster. +3. `metadata.labels`: Key-value pairs that organize and categorize resources. + +Apply the configuration file using `kubectl` +```bash +$ kubectl apply -f ~/environment/eks-workshop/base-application/ui/namespace.yaml +``` + +You can also create namespaces directly using the `kubectl create` command. Let's create a namespace for our `catalog` service and add labels (labels are optional but helpful for organization): + +```bash +$ kubectl create namespace catalog +$ kubectl label namespace catalog app.kubernetes.io/created-by=eks-workshop +``` + +Let's inspect both namespaces: +```bash +$ kubectl get namespaces -l app.kubernetes.io/created-by=eks-workshop +``` + +The `-l` flag stands for "label selector" and filters resources based on their labels. In this case, we're only showing namespaces that have the label `app.kubernetes.io/created-by=eks-workshop`. This is useful for finding resources created by this workshop among all the namespaces in your cluster. + +Describe namespace +```bash +$ kubectl describe namespace ui +Name: ui +Labels: app.kubernetes.io/created-by=eks-workshop + kubernetes.io/metadata.name=ui +Annotations: +Status: Active + +No resource quota. + +No LimitRange resource. +``` + +### Using Namespaces +When working with resources, you can specify the namespace in two ways: + +**Using the `-n` flag:** +```bash +$ kubectl get all -n ui +``` + +**Using the `--namespace` flag:** +```bash +$ kubectl get all --namespace ui +``` + + +Tip: You can also see resources across all namespaces using the -A flag: + +```bash +$ kubectl get pods -A +``` + +### Namespaces in this workshop +In this workshop, namespaces help us separate the different microservices that make up our sample retail store application. + +- `ui` - Frontend user interface +- `catalog` - Product catalog service +- `carts` - Shopping cart service +- `checkout` - Order processing service +- `orders` - Order management service + +You'll see commands like this throughout the labs: +```bash +$ kubectl get pods -n ui +$ kubectl get secrets -n catalog +``` + +This organization makes it easy to: +* See which components belong to which service +* Apply configurations to specific services +* Troubleshoot issues within a particular service + +## Key Points to Remember +* Namespaces organize and separate resources +* Names must be unique within a namespace +* Most resources are namespaced, some are cluster-wide +* Some resources (like Nodes and PersistentVolumes) are not namespaced and exist at the cluster level. +* Default namespace is used when none specified +* Enable resource quotas and access control diff --git a/website/docs/introduction/basics/pods/index.md b/website/docs/introduction/basics/pods/index.md new file mode 100644 index 0000000000..25d0f7ac56 --- /dev/null +++ b/website/docs/introduction/basics/pods/index.md @@ -0,0 +1,229 @@ +--- +title: Pods +sidebar_position: 20 +--- + +# Pods + +**Pods** are the smallest deployable units in Kubernetes. A Pod represents one or more containers that share storage, network, and configuration settings for how they should run together. + +Pods provide: +- **Container grouping:** Usually, a pod runs a single container, but it can include multiple tightly coupled containers that need to share data or communicate over localhost. +- **Shared networking:** All containers in a pod share the same IP address +- **Shared storage:** Containers can share volumes within the pod +- **Lifecycle management:** Containers in a pod live and die together +- **Ephemeral nature:** Pods can be created, destroyed, and recreated + +In this lab, you'll learn about pods by creating a simple example pod and exploring its properties. + +### Creating a Pod + +Let's create a simple pod to understand how they work. The manifest defines a simple pod running the retail store UI container. + +::yaml{file="manifests/modules/introduction/basics/pods/ui-pod.yaml" paths="kind,metadata.name,metadata.namespace,spec.containers,spec.containers.0.name,spec.containers.0.image,spec.containers.0.ports,spec.containers.0.env,spec.containers.0.resources" title="ui-pod.yaml"} + +1. `kind: Pod`: Tells Kubernetes what type of resource to create +2. `metadata.name`: Unique identifier for this pod within the namespace +3. `metadata.namespace`: Which namespace the pod belongs to (ui namespace) +4. `spec.containers`: Array defining what containers run in the pod +5. `spec.containers.0.name`: Name of the first container (ui) +6. `spec.containers.0.image`: Container image from ECR Public registry +7. `spec.containers.0.ports`: Network ports the container exposes +8. `spec.containers.0.env`: Environment variables for the container +9. `spec.containers.0.resources`: CPU and memory allocation settings + +Apply the pod configuration: +```bash +$ kubectl apply -f ~/environment/eks-workshop/modules/introduction/basics/pods/ui-pod.yaml +``` + +Kubernetes will create the pod in the `ui` namespace and start pulling the container image. + +Wait for the pod to become ready: +```bash +$ kubectl wait --for=condition=Ready --timeout=60s -n ui pod/ui-pod +``` + +### Exploring Pod + +Now let's examine the pod we just created: + +```bash +$ kubectl get pods -n ui +NAME READY STATUS RESTARTS AGE +ui-pod 1/1 Running 0 30s +``` + +Get detailed information about the pod: +```bash +$ kubectl describe pod -n ui ui-pod +Name: ui-pod +Namespace: ui +Priority: 0 +Service Account: default +Node: ip-10-42-144-0.us-west-2.compute.internal/10.42.144.0 +Start Time: Sun, 05 Oct 2025 19:28:02 +0000 +Labels: app.kubernetes.io/component=service + app.kubernetes.io/name=ui +Annotations: +Status: Running +IP: 10.42.146.177 +IPs: + IP: 10.42.146.177 +Containers: + ui: + Container ID: containerd://01709a8abac99ce46842dda128752a68e828a485ee47f2094549fc00f9d71953 + Image: public.ecr.aws/aws-containers/retail-store-sample-ui:1.2.1 + Image ID: public.ecr.aws/aws-containers/retail-store-sample-ui@sha256:63a531dd3716cf9f6a3c7b54d65c39ce4de43cb23a613ac2933f2cb38aff86d7 + Port: 8080/TCP + Host Port: 0/TCP + State: Running + Started: Sun, 05 Oct 2025 19:28:03 +0000 + Ready: True + Restart Count: 0 + Limits: + memory: 1536Mi + Requests: + cpu: 250m + memory: 1536Mi + Environment: + JAVA_OPTS: -XX:MaxRAMPercentage=75.0 -Djava.security.egd=file:/dev/urandom + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-68xdw (ro) +Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True +Volumes: + kube-api-access-68xdw: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true +QoS Class: Burstable +Node-Selectors: +Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 10s default-scheduler Successfully assigned ui/ui-pod to ip-10-42-144-0.us-west-2.compute.internal + Normal Pulled 10s kubelet Container image "public.ecr.aws/aws-containers/retail-store-sample-ui:1.2.1" already present on machine + Normal Created 10s kubelet Created container: ui + Normal Started 10s kubelet Started container ui +``` + +This shows: +- **Container specifications** - Image, ports, environment variables +- **Resource usage** - CPU and memory requests/limits +- **Events** - What happened during pod creation +- **Status** - Current state and health + +View the pod's logs: +```bash +$ kubectl logs -n ui ui-pod +Picked up JAVA_TOOL_OPTIONS: + + . ____ _ __ _ _ + /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \ +( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \ + \\/ ___)| |_)| | | | | || (_| | ) ) ) ) + ' |____| .__|_| |_|_| |_\__, | / / / / + =========|_|==============|___/=/_/_/_/ + + :: Spring Boot :: (v3.4.4) + +2025-10-05T19:28:06.600Z INFO 1 --- [ main] c.a.s.u.UiApplication : Starting UiApplication v0.0.1-SNAPSHOT using Java 21.0.7 with PID 1 (/app/app.jar started by appuser in /app) +2025-10-05T19:28:06.658Z INFO 1 --- [ main] c.a.s.u.UiApplication : The following 1 profile is active: "prod" +2025-10-05T19:28:10.268Z INFO 1 --- [ main] i.o.i.s.a.OpenTelemetryAutoConfiguration : OpenTelemetry Spring Boot starter has been disabled + +2025-10-05T19:28:11.712Z INFO 1 --- [ main] o.s.b.a.e.w.EndpointLinksResolver : Exposing 4 endpoints beneath base path '/actuator' +2025-10-05T19:28:14.045Z INFO 1 --- [ main] o.s.b.w.e.n.NettyWebServer : Netty started on port 8080 (http) +2025-10-05T19:28:14.075Z INFO 1 --- [ main] c.a.s.u.UiApplication : Started UiApplication in 8.505 seconds (process running for 10.444) +``` + +> You’ll see the UI container starting up. + +Execute a command inside the pod: +```bash hook=ready +$ kubectl exec -n ui ui-pod -- curl -s localhost:8080/actuator/health +{"status":"UP","groups":["liveness","readiness"]} +``` +This should return the status of the application. + +### Accessing Pod + +You can access the pod from your local machine using port forwarding: +```bash test=false +$ kubectl port-forward -n ui ui-pod 8080:8080 +``` + +:::info +Port forwarding temporarily connects your local port to a port inside the pod, allowing you to access the application directly from your laptop. +::: + +In the Workshop IDE, a popup appears to view all forwarded ports. Click to open applicaiton URL in the browser. + +Alternatively, open another terminal and test: +```bash test=false +$ curl localhost:8080 +``` + +In the browser, You'll see the Retail store application landing page. + +Press `CTRL+C` to break `port-forward` session. + +### Deleting Pods + +When you no longer need a pod, you can delete it using the `kubectl delete` command. There are several ways to delete pods: + +**Method 1: Delete by name** +```bash +$ kubectl delete pod -n ui ui-pod +pod "ui-pod" deleted +``` + +**Method 2: Delete using the manifest file** +Let's recreate the `ui-pod` and delete using mainfest file. +```bash +$ kubectl apply -f ~/environment/eks-workshop/modules/introduction/basics/pods/ui-pod.yaml +$ kubectl delete -f ~/environment/eks-workshop/modules/introduction/basics/pods/ui-pod.yaml +pod "ui-pod" deleted +``` + +After deletion, verify the pod is gone: +```bash +$ kubectl get pods -n ui +No resources found in ui namespace. +``` + +:::warning +When you delete a pod directly, it's gone forever. The data inside the pod (unless stored in persistent volumes) is lost. In production environments, pods are typically managed by controllers like Deployments that automatically recreate them if needed. +::: + +### Pod Lifecycle + +Pods have well-defined lifecycle phases that reflect their current state in the cluster. +- **Pending** - Pod is being scheduled and containers are starting +- **Running** - At least one container is running +- **Succeeded** - All containers have completed successfully +- **Failed** - At least one container has failed +- **Unknown** - Pod state cannot be determined + +Kubernetes controllers continuously monitor pod states and take action (like restarting failed containers or recreating pods) to maintain desired application health. + +## Key Points to Remember + +* Pods are the smallest deployable units in Kubernetes +* Usually contain one container, but can contain multiple +* Share network and storage within the pod +* Pods are ephemeral - they come and go +* Typically managed by higher-level controllers like Deployments + +:::info +In real-world scenarios, you rarely create pods directly — instead, you use higher-level resources like Deployments, ReplicaSets, or Jobs to manage them. +::: diff --git a/website/docs/introduction/basics/pods/tests/hook-ready.sh b/website/docs/introduction/basics/pods/tests/hook-ready.sh new file mode 100644 index 0000000000..8210b2083f --- /dev/null +++ b/website/docs/introduction/basics/pods/tests/hook-ready.sh @@ -0,0 +1,25 @@ +set -Eeuo pipefail + +before() { + echo "Waiting for pod to be ready..." + kubectl wait --for=condition=Ready --timeout=60s -n ui pod/ui-pod + + echo "Waiting for application to start listening on port 8080..." + for i in {1..30}; do + if kubectl exec -n ui ui-pod -- curl -s --connect-timeout 2 localhost:8080/actuator/health >/dev/null 2>&1; then + echo "Application is ready and responding on port 8080" + return 0 + fi + echo "Attempt $i/30: Application not ready yet, waiting..." + sleep 2 + done + + echo "Application failed to become ready within 60 seconds" + exit 1 +} + +after() { + echo "noop" +} + +"$@" diff --git a/website/docs/introduction/basics/services/index.md b/website/docs/introduction/basics/services/index.md new file mode 100644 index 0000000000..6d240732cd --- /dev/null +++ b/website/docs/introduction/basics/services/index.md @@ -0,0 +1,237 @@ +--- +title: Services +sidebar_position: 40 +--- + +# Services + +**Services** provide stable network endpoints for accessing pods. Since pods are ephemeral and can be created/destroyed frequently, services give you consistent DNS names and IP addresses for reliable communication. + +#### Why Services are important: +Pods can come and go, so clients cannot reliably connect to them directly. Services: +- **Provide stable networking:** IP and DNS names remain same even if pods change. +- **Offer load balancing:** Automatically distribute requests across healthy pods +- **Enable service discovery:** Other components can reach the service by name +- **Provide pod abstraction:** Clients don’t need to know individual pod IPs +- **Handle automatic updates:** Adjust endpoints as pods are created or destroyed + +In this lab, you'll create a service for the catalog component of our retail store and explore how services enable communication between pods. + +### Service Types + +Kubernetes provides different service types for various use cases: + +| Type | Purpose | Access | +|------|---------|--------| +| **ClusterIP** | Internal cluster communication | Cluster-only | +| **NodePort** | External access via node ports | External | +| **LoadBalancer** | External access via cloud load balancer | External | +| **ExternalName** | Map to external DNS name | External | + +:::info +A dedicated lab on **LoadBalancer services** is available later in this workshop. You will learn how to expose services externally using a cloud load balancer there. +::: + +### Creating a Service + +Let's examine the UI service from our retail store: + +::yaml{file="manifests/base-application/ui/service.yaml" paths="kind,metadata.name,spec.type,spec.ports,spec.selector" title="service.yaml"} + +1. `kind: Service`: Creates a Service resource +2. `metadata.name`: Name of the service (ui) +3. `spec.type`: Service type (ClusterIP for internal access) +4. `spec.ports`: Port mapping from service to pods +5. `spec.selector`: Selects which pods receive traffic + +Deploy the service: +```bash hook=ready +$ kubectl apply -k ~/environment/eks-workshop/modules/introduction/basics/services/ +``` + +### How Services Connect to Pods + +Services don't directly know about specific pods. Instead, they use **label selectors** to dynamically find pods that should receive traffic. This creates a flexible, loosely-coupled relationship. + +**Here's how it works:** + +1. **Pods have labels** - Key-value pairs that describe the pod +2. **Services have selectors** - Criteria that match pod labels +3. **Kubernetes automatically connects them** - Any pod matching the selector becomes an endpoint + +Let's see this in action with our UI service: + +```bash +# Check the service selector +$ kubectl get service -n ui ui -o jsonpath='{.spec.selector}' | jq +{ + "app.kubernetes.io/component": "service", + "app.kubernetes.io/instance": "ui", + "app.kubernetes.io/name": "ui" +} +``` + +Now check which pods have matching labels: +```bash +# Look for pods with matching labels +$ kubectl get pod -n ui -l app.kubernetes.io/component=service -o jsonpath='{.items[0].metadata.labels}{"\n"}' | jq +{ + "app.kubernetes.io/component": "service", + "app.kubernetes.io/created-by": "eks-workshop", + "app.kubernetes.io/instance": "ui", + "app.kubernetes.io/name": "ui", + "pod-template-hash": "5989474687" +} +``` + +You'll see the UI pods have labels that match the service selector. This is how the service knows which pods to send traffic to. + +**The relationship is dynamic:** +- When new pods start with matching labels, they automatically become service endpoints +- When pods are deleted, they're automatically removed from the service +- If you change a pod's labels, it can be added or removed from services + +This label-based system means: +- **Services work with any workload controller** (Deployments, StatefulSets, etc.) +- **Pods can belong to multiple services** if they match different selectors +- **Services automatically adapt** as pods scale up or down + +### Exploring Your Service + +Check service status: +```bash +$ kubectl get service -n ui +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +ui ClusterIP 172.20.83.84 80/TCP 15m +``` + +View service endpoints (the actual pod IPs): +```bash +$ kubectl get endpoints -n ui ui +NAME ENDPOINTS AGE +ui 10.42.1.15:8080 15m +``` +> This shows which pods receive traffic + +Get detailed service information: +```bash +$ kubectl describe service -n ui ui +Name: ui +Namespace: ui +Labels: app.kubernetes.io/component=service + app.kubernetes.io/created-by=eks-workshop + app.kubernetes.io/instance=ui + app.kubernetes.io/name=ui +Annotations: +Selector: app.kubernetes.io/component=service,app.kubernetes.io/instance=ui,app.kubernetes.io/name=ui +Type: ClusterIP +IP Family Policy: SingleStack +IP Families: IPv4 +IP: 172.16.88.252 +IPs: 172.16.88.252 +Port: http 80/TCP +TargetPort: http/TCP +Endpoints: 10.42.129.33:8080 +Session Affinity: None +Internal Traffic Policy: Cluster +Events: +``` + +### Service Discovery + +Services enable automatic service discovery through DNS names: + +**Full DNS name format:** +``` +..svc.cluster.local +``` + +**Examples from our retail store:** +- `ui.ui.svc.cluster.local` +- `catalog.catalog.svc.cluster.local` +- `carts.carts.svc.cluster.local` + +**Short names within the same namespace:** +``` +# From a pod in the ui namespace +curl http://ui:80 + +# From a different namespace, use the full name +curl http://ui.ui.svc.cluster.local:80 +``` + +### Testing Service Communication + +Let's test service discovery and communication by creating a test pod: + +```bash +# Create a test pod for network testing +$ kubectl run test-pod --image=curlimages/curl --restart=Never -- sleep 3600 +$ kubectl wait --for=condition=ready pod/test-pod --timeout=60s +``` + +```bash +# Test DNS resolution from within the cluster +$ kubectl exec test-pod -- nslookup ui.ui.svc.cluster.local +Server: 172.16.0.10 +Address: 172.16.0.10:53 + + +Name: ui.ui.svc.cluster.local +Address: 172.16.88.252 +``` + +```bash +# Test HTTP communication (shows the web page) +$ kubectl exec test-pod -- curl -s http://ui.ui.svc.cluster.local/actuator/info | jq +{ + "pod": { + "name": "ui-6db5f6bd84-cx4mg" + } +} +``` + +### Load Balancing + +Services automatically distribute traffic across all healthy pods that match their selector: + +**Scale the UI deployment to see load balancing:** +```bash hook=replicas +$ kubectl scale deployment -n ui ui --replicas=3 +``` + +**Watch how the service endpoints update:** +```bash +$ kubectl get endpoints -n ui ui +NAME ENDPOINTS AGE +ui 10.42.117.212:8080,10.42.129.33:8080,10.42.174.4:8080 11m +``` + +You'll now see multiple pod IPs listed as endpoints - the service automatically discovered the new pods because they have matching labels. + +**Test load balancing:** +```bash +# Make multiple requests to see load balancing in action (single line) +$ for i in $(seq 1 5); do printf "Request %d:" "$i"; kubectl exec test-pod -- curl -s http://ui.ui.svc.cluster.local/actuator/info; echo; sleep 1; done +Request 1:{"pod":{"name":"ui-6db5f6bd84-xgpf4"}} +Request 2:{"pod":{"name":"ui-6db5f6bd84-cx4mg"}} +Request 3:{"pod":{"name":"ui-6db5f6bd84-7bq8w"}} +Request 4:{"pod":{"name":"ui-6db5f6bd84-7bq8w"}} +Request 5:{"pod":{"name":"ui-6db5f6bd84-cx4mg"}} +``` + +You'll see requests distributed across different pod hostnames, demonstrating how the service load balances across all matching pods. + +```bash +# Clean up the test pod +$ kubectl delete pod test-pod +``` + +## Key Points to Remember + +* Services provide stable network endpoints for ephemeral pods +* ClusterIP services enable internal cluster communication +* Services use label selectors to find target pods +* DNS names follow the pattern: service.namespace.svc.cluster.local +* Services automatically load balance traffic across healthy pods +* Use port forwarding to test services locally diff --git a/website/docs/introduction/basics/services/tests/hook-ready.sh b/website/docs/introduction/basics/services/tests/hook-ready.sh new file mode 100644 index 0000000000..127a192dc4 --- /dev/null +++ b/website/docs/introduction/basics/services/tests/hook-ready.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + echo "Waiting for UI deployment to be available..." + + kubectl wait --for=condition=available deployment/ui -n ui --timeout=300s + + echo "Waiting for UI service endpoints..." + kubectl wait --for=jsonpath='{.subsets[0].addresses[0].ip}' endpoints/ui -n ui --timeout=300s +} + +"$@" diff --git a/website/docs/introduction/basics/services/tests/hook-replicas.sh b/website/docs/introduction/basics/services/tests/hook-replicas.sh new file mode 100644 index 0000000000..89df167f8b --- /dev/null +++ b/website/docs/introduction/basics/services/tests/hook-replicas.sh @@ -0,0 +1,23 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + kubectl rollout status -n ui deployment/ui --timeout=60s + + # Wait for all 3 pods to be ready + kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=ui -n ui --timeout=60s + + POD_COUNT=$(kubectl get pod -n ui -l app.kubernetes.io/name=ui -o json | jq -r ".items | length") + + if [[ $POD_COUNT -eq 3 ]]; then + exit 0 + fi + + >&2 echo "There should be 3 pods running" + exit 1 +} + +"$@" diff --git a/website/docs/introduction/basics/workload-management/daemonsets.md b/website/docs/introduction/basics/workload-management/daemonsets.md new file mode 100644 index 0000000000..fe8473dece --- /dev/null +++ b/website/docs/introduction/basics/workload-management/daemonsets.md @@ -0,0 +1,121 @@ +--- +title: DaemonSets +sidebar_position: 33 +--- + +# DaemonSets + +**DaemonSets** ensure that a copy of a pod runs on **every node** (or a subset of nodes) in your cluster. They are ideal for system-level services that must operate on all nodes, such as logging, monitoring, and network agents. + +Key benefits: +- **Cover all nodes** - One Pod per node +- **Scale automatically with nodes** - New nodes get pods, removed nodes lose pods +- **Run system services** - Ideal for logging, monitoring, and networking +- **Target specific nodes** - Using selectors or affinity +- **Access host resources** - Like logs, metrics, and system files + +## When to Use DaemonSets +Daemonsets are perfect for services that need to run on every node or a subset of nodes: +- **Log collectors** - Fluentd, Filebeat, Fluent Bit +- **Monitoring agents** - Node Exporter, Datadog agent, New Relic +- **Network plugins** - CNI plugins, load balancer controllers +- **Security agents** - Antivirus scanners, compliance tools +- **Storage daemons** - Distributed storage agents + +## Deploying a DaemonSet + +Let's create a simple log collector DaemonSet that runs on all nodes and collects logs from the host filesystem: + +::yaml{file="manifests/modules/introduction/basics/daemonsets/log-collector.yaml" paths="kind,metadata.name,spec.selector,spec.template.spec.containers.0.volumeMounts,spec.template.spec.volumes" title="log-collector.yaml"} + +1. `kind: DaemonSet`: Creates a DaemonSet controller +2. `metadata.name`: Name of the DaemonSet (`log-collector`) +3. `spec.selector`: How DaemonSet finds its pods (by labels) +4. `spec.template.spec.containers.0.volumeMounts`: How container accesses node files +5. `spec.template.spec.volumes`: Host paths for accessing node logs + +Key DaemonSet characteristics: +- No `replicas` field - Kubernetes automatically runs one pod per node +- Pods automatically scale as nodes are added or removed. +- `hostPath` volumes allow Pods to access node files, if required. +- Typically deployed in `kube-system` namespace for system services, but can run in other namespaces. + +Deploy the DaemonSet: +```bash +$ kubectl apply -f ~/environment/eks-workshop/modules/introduction/basics/daemonsets/log-collector.yaml +``` + +## Inspecting Your DaemonSet + +Check DaemonSet status: +```bash +$ kubectl get daemonset -n kube-system +NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE AGE +log-collector 3 3 3 3 3 2m +``` +> You'll see output showing desired vs current pods: + +View the pods across all nodes: +```bash +$ kubectl get pods -n kube-system -l app=log-collector -o wide +NAME READY STATUS NODE AGE +log-collector-abc12 1/1 Running ip-10-42-1-1 2m +log-collector-def34 1/1 Running ip-10-42-2-1 2m +log-collector-ghi56 1/1 Running ip-10-42-3-1 2m +``` +> Notice one pod per node + +## Node Selection + +Target specific nodes using nodeSelector: + +```yaml +spec: + template: + spec: + nodeSelector: + node-type: worker + containers: + - name: monitoring-agent + image: monitoring:latest +``` + +Or use nodeAffinity for more complex rules: + +```yaml +spec: + template: + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 +``` +Use nodeSelector for simple label matches and nodeAffinity for more complex scheduling requirements. + +## DaemonSets vs Other Controllers + +| Controller | Purpose | Replica Count | Node Placement | Use Case | +|------------|---------|---------------|----------------|----------| +| DaemonSet | One Pod per node | Automatic | All nodes or subset | System services | +| Deployment | Multiple interchangeable Pods | Configurable | Any node | Stateless apps | +| StatefulSet | Pods with stable identity | Configurable | Any node | Stateful apps | + +:::info +DaemonSets are ideal for services that must run on every node or a specific set of nodes. +::: + +## Key Points to Remember + +* DaemonSets automatically run one pod per node +* Perfect for system-level services like logging and monitoring +* No need to specify replica count - it's automatic +* Can access node resources through hostPath volumes +* Use node selectors to target specific nodes +* Pods are automatically added/removed as nodes join/leave +* Ideal for consistent system functionality across all nodes diff --git a/website/docs/introduction/basics/workload-management/deployments.md b/website/docs/introduction/basics/workload-management/deployments.md new file mode 100644 index 0000000000..cf371cf325 --- /dev/null +++ b/website/docs/introduction/basics/workload-management/deployments.md @@ -0,0 +1,119 @@ +--- +title: Deployments +sidebar_position: 31 +--- + +# Deployments + +**Deployments** are the most common workload controller for running stateless applications. They make sure your application always runs the desired number of Pods - automatically handling creation, scaling, updates, and recovery. + +Instead of managing Pods manually, Deployments let Kubernetes: +- **Run multiple identical Pods** for reliability and load distribution +- **Scale automatically** by adjusting replica counts +- **Recover failed Pods** without manual intervention +- **Perform rolling** updates without downtime +- **Rollback easily** to previous versions when needed + +### Creating a Deployment + +Let's deploy the retail store UI using a deployment: + +::yaml{file="manifests/base-application/ui/deployment.yaml" paths="kind,metadata.name,spec.replicas,spec.selector,spec.template" title="deployment.yaml"} + +1. `kind: Deployment`: Defines a Deployment controller +2. `metadata.name`: Name of the Deployment (ui) +3. `spec.replicas`: Desired number of pods (1 in this example) +4. `spec.selector`: Labels used to find managed Pods +5. `spec.template`: Pod template defining what each pod should looks like + +The deployment ensures that the actual Pods always match this template. + +Apply the Deployments: +```bash +$ kubectl apply -k ~/environment/eks-workshop/base-application/ui +``` + +### Inspecting Deployment + +Check deployment status: +```bash +$ kubectl get deployment -n ui +NAME READY UP-TO-DATE AVAILABLE AGE +ui 1/1 1 1 30s +``` + +List the Pods created by the Deployment: +```bash +$ kubectl get pods -n ui +NAME READY STATUS RESTARTS AGE +ui-6d5bb7b9c8-xyz12 1/1 Running 0 30s +``` + +Get detailed information: +```bash +$ kubectl describe deployment -n ui ui +``` + +### Scaling Deployment + +Scale up to 5 replicas: +```bash +$ kubectl scale deployment -n ui ui --replicas=5 +$ kubectl get pods -n ui +NAME READY STATUS RESTARTS AGE +ui-6d5bb7b9c8-abc12 1/1 Running 0 2m +ui-6d5bb7b9c8-def34 1/1 Running 0 12s +ui-6d5bb7b9c8-ghi56 1/1 Running 0 12s +ui-6d5bb7b9c8-arx97 1/1 Running 0 10s +ui-6d5bb7b9c8-uiv85 1/1 Running 0 10s +``` + +:::info +Kubernetes automatically spreads these Pods across available worker nodes for high availability. +::: + +Scale back down to 3 replicas: +```bash +$ kubectl scale deployment -n ui ui --replicas=3 +$ kubectl get pods -n ui +NAME READY STATUS RESTARTS AGE +ui-6d5bb7b9c8-abc12 1/1 Running 0 2m +ui-6d5bb7b9c8-def34 1/1 Running 0 12s +ui-6d5bb7b9c8-ghi56 1/1 Running 0 12s +``` + +### Rolling Updates and Rollbacks +You can update a Deployment by changing the image version: +```bash +$ kubectl set image deployment/ui ui=public.ecr.aws/aws-containers/retail-store-sample-ui:v2 -n ui +$ kubectl get pods -n ui +NAME READY STATUS RESTARTS AGE +ui-5989474687-5gcbt 1/1 Running 0 13m +ui-5989474687-dhk6q 1/1 Running 0 14s +ui-5989474687-dw8x8 1/1 Running 0 14s +ui-7c65b44b7c-znm9c 0/1 ErrImagePull 0 7s +``` +> You'll see a new pod created but with status `ErrImagePull`. + +Now let's rollback the change +```bash +$ kubectl rollout undo deployment/ui -n ui +$ kubectl get pods -n ui +NAME READY STATUS RESTARTS AGE +ui-5989474687-5gcbt 1/1 Running 0 13m +ui-5989474687-dhk6q 1/1 Running 0 14s +ui-5989474687-dw8x8 1/1 Running 0 14s +``` + +Rolling updates let you update your application gradually without downtime, while Kubernetes ensures new Pods match the desired state. +If something goes wrong — like an invalid image — you can rollback safely to the previous working version, keeping your application available and stable. + +This demonstrates how Deployments simplify application updates, maintain availability, and reduce risk in production environments. + +### Key Points to Remember + +* Deployments manage multiple identical pods automatically +* Use deployments instead of creating pods directly in production +* Scaling is as simple as changing the replica count +* Pod names include the deployment name plus random suffixes +* Deployments are perfect for stateless applications like web apps and APIs diff --git a/website/docs/introduction/basics/workload-management/index.md b/website/docs/introduction/basics/workload-management/index.md new file mode 100644 index 0000000000..75b9e604f2 --- /dev/null +++ b/website/docs/introduction/basics/workload-management/index.md @@ -0,0 +1,117 @@ +--- +title: Workload Management +sidebar_position: 30 +--- + +# Workload Management +While you can create individual pods directly, in production you rarely manage pods manually. Instead, you use **workload controllers** - higher-level Kubernetes resources that create and manage pods according to different application patterns. + +Think of workload controllers as smart managers that: +- **Create pods** based on templates you define +- **Monitor pod health** and replace failed instances +- **Handle scaling** up and down based on demand +- **Manage updates** with strategies like rolling deployments +- **Provide specialized behavior** for different application types + +## Types of Workload Controllers +Kubernetes provides several workload controllers, each designed for specific use cases: + +- **Deployments** manage multiple identical pods for stateless applications. They handle scaling, rolling updates, and automatic replacement of failed pods. Perfect for web applications where any pod can handle any request. +- **ReplicaSets** ensure a specified number of identical pods are running at any time. While you rarely create ReplicaSets directly, they're the building blocks that Deployments use under the hood to manage pods. +- **StatefulSets** provide stable identities and persistent storage for stateful applications. Each pod gets a unique name (like `mysql-0`, `mysql-1`) and its own persistent volume. Essential for databases and clustered applications. +- **DaemonSets** ensure exactly one pod runs on each node (or selected nodes). Great for system-level services like log collectors or monitoring agents that need to run everywhere in your cluster. +- **Jobs** run pods until they complete successfully, then stop. Unlike other controllers, they don't restart completed pods. Ideal for one-time tasks like data migrations or batch processing. +- **CronJobs** create Jobs on a schedule using familiar cron syntax. They're perfect for recurring tasks like backups, report generation, or cleanup operations. + +## Understanding the Controller Hierarchy + +It's helpful to understand how these controllers relate to each other: + +**Deployment → ReplicaSet → Pods** + +When you create a Deployment, here's what happens: +1. **Deployment** creates and manages ReplicaSets +2. **ReplicaSet** creates and manages the actual Pods +3. **Pods** run your application containers + +This layered approach enables powerful features: +- **Rolling updates**: Deployments create new ReplicaSets while gradually scaling down old ones +- **Rollbacks**: Deployments can switch back to previous ReplicaSet versions +- **Scaling**: Changes to replica count flow through ReplicaSets to Pods + +You'll often see ReplicaSets when debugging (like `kubectl get rs`), but you typically manage them indirectly through Deployments. + +### Why Use Workload Controllers? + +**Managing pods directly:** +- Manual pod replacement when they fail +- No built-in scaling mechanisms +- Complex update procedures +- No rollback capabilities +- Production management becomes difficult + +**Using workload controllers:** +- Automatic pod replacement and healing +- Easy scaling with a single command +- Rolling updates with zero downtime +- Simple rollback to previous versions +- Production-ready management + +| Controller | Purpose | Best For | +|------------|---------|----------| +| **Deployments** | Stateless applications | Web apps, APIs, microservices | +| **ReplicaSets** | Maintain pod replicas | Usually managed by Deployments | +| **StatefulSets** | Stateful applications | Databases, message queues | +| **DaemonSets** | Node-level services | Logging agents, monitoring | +| **Jobs** | Run-to-completion tasks | Data migration, batch processing | +| **CronJobs** | Scheduled tasks | Backups, reports, cleanup | + +### Choosing the Right Workload Controller + +Ask yourself these questions to pick the right controller: + +**What type of application am I running?** + +- **Web app, API, or microservice?** → Use **Deployment** + - Pods are interchangeable and stateless + - Can run multiple identical copies + - Example: Our retail store UI, catalog service + +- **Database or message queue?** → Use **StatefulSet** + - Needs persistent storage + - Requires stable network identity + - Example: MySQL database, Kafka cluster + +- **System service on every node?** → Use **DaemonSet** + - Monitoring, logging, or networking + - One pod per node automatically + - Example: Log collector, node monitoring + +- **One-time task or batch job?** → Use **Job** + - Runs until completion + - Database migration, data processing + - Example: Import product catalog + +- **Recurring scheduled task?** → Use **CronJob** + - Runs on a schedule (like cron) + - Backups, reports, cleanup + - Example: Daily sales report generation + +## Key Points to Remember + +* Different workload controllers serve different application patterns +* Deployments are for stateless applications that can have identical replicas +* StatefulSets are for stateful applications that need persistent identity +* DaemonSets ensure pods run on every node for system-level services +* Jobs run tasks to completion, CronJobs run them on schedule +* Choose the right controller based on your application's requirements + +## Explore Each Workload Type + +Now that you have an overview of workload controllers, dive deeper into each type: + +- **[Deployments](./deployments)** - Learn to deploy and manage stateless applications like our retail store UI +- **[StatefulSets](./statefulsets)** - Understand how to run stateful applications like databases with persistent storage +- **[DaemonSets](./daemonsets)** - Explore system-level services that run on every node +- **[Jobs & CronJobs](./jobs)** - Master batch processing and scheduled tasks + diff --git a/website/docs/introduction/basics/workload-management/jobs.md b/website/docs/introduction/basics/workload-management/jobs.md new file mode 100644 index 0000000000..e8e036f43a --- /dev/null +++ b/website/docs/introduction/basics/workload-management/jobs.md @@ -0,0 +1,404 @@ +--- +title: Jobs & CronJobs +sidebar_position: 34 +--- + +# Jobs & CronJobs + +**Jobs** and **CronJobs** are controllers for running **finite or recurring tasks**. Unlike Deployments or StatefulSets that keep pods running continuously, Jobs run tasks to completion, and CronJobs run Jobs on a schedule. + +Key benefits: +- **Run to completion** - Pods finish the task and stop +- **Retry failed tasks** - Automatically retry based on backoff policy +- **Parallel execution** - Multiple Pods can run simultaneously +- **Scheduled tasks** - CronJobs run tasks at specific times +- **Track history** - Monitor successful and failed completions + +## When to Use Jobs & CronJobs + +**Use Jobs for:** +- Database migrations and schema updates +- Data processing and ETL operations +- One-time setup tasks and initialization +- Backup operations and file processing + +**Use CronJobs for:** +- Regular backups (daily, weekly) +- Cleanup tasks and log rotation +- Report generation and data synchronization +- Periodic health checks and monitoring + +## Deploying a Job + +Let's create a data processing job: + +::yaml{file="manifests/modules/introduction/basics/jobs/data-processing-job.yaml" paths="kind,metadata.name,spec.completions,spec.backoffLimit,spec.template.spec.restartPolicy" title="data-processing-job.yaml"} + +1. `kind: Job`: Creates a Job controller +2. `metadata.name`: Name of the job (data-processor) +3. `spec.completions`: Number of successful completions needed (1) +4. `spec.backoffLimit`: Maximum retry attempts (3) +5. `spec.template.spec.restartPolicy`: Pods never restart on failure; the Job controller handles retries + +Deploy the job: +```bash +$ kubectl apply -f ~/environment/eks-workshop/modules/introduction/basics/jobs/data-processing-job.yaml +``` + +## Inspecting Job + +Check job status: +```bash +$ kubectl get jobs -n catalog +NAME COMPLETIONS DURATION AGE +data-processor 1/1 15s 1m +``` + +View the job's pod: +```bash +$ kubectl get pods -n catalog -l job-name=data-processor +NAME READY STATUS RESTARTS AGE +data-processor-h7mg7 0/1 Completed 0 25s +``` + +Wait for the job to complete: +```bash +$ kubectl wait --for=condition=complete --timeout=60s job/data-processor -n catalog +``` + +Check job logs to see the processing output: +```bash +$ kubectl logs -n catalog job/data-processor +Starting data processing job... +Processing catalog data files... +Processing file 1/5... +File 1 processed successfully +... +Data processing job completed successfully! +``` + +Get detailed job information: +```bash +$ kubectl describe job -n catalog data-processor +Name: data-processor +Namespace: catalog +Selector: batch.kubernetes.io/controller-uid=639c46e3-ee04-4914-8c97-516a14087c1d +Labels: app.kubernetes.io/created-by=eks-workshop + app.kubernetes.io/name=data-processor +Annotations: +Parallelism: 1 +Completions: 1 +Completion Mode: NonIndexed +Suspend: false +Backoff Limit: 3 +Start Time: Sun, 05 Oct 2025 18:51:01 +0000 +Completed At: Sun, 05 Oct 2025 18:51:14 +0000 +Duration: 13s +Pods Statuses: 0 Active (0 Ready) / 1 Succeeded / 0 Failed +Pod Template: + Labels: app=data-processor + batch.kubernetes.io/controller-uid=639c46e3-ee04-4914-8c97-516a14087c1d + batch.kubernetes.io/job-name=data-processor + controller-uid=639c46e3-ee04-4914-8c97-516a14087c1d + job-name=data-processor + Containers: + processor: + Image: busybox:1.36 + Port: + Host Port: + Command: + /bin/sh + -c + echo "Starting data processing job..." + echo "Processing catalog data files..." + + # Simulate processing multiple files + for i in $(seq 1 5); do + echo "Processing file $i/5..." + sleep 2 + echo "File $i processed successfully" + done + + echo "Generating summary report..." + cat > /tmp/processing-report.txt << EOF + Data Processing Report + ===================== + Job: data-processor + Date: $(date) + Files processed: 5 + Status: Completed successfully + EOF + + echo "Report generated:" + cat /tmp/processing-report.txt + echo "Data processing job completed successfully!" + + Limits: + cpu: 200m + memory: 256Mi + Requests: + cpu: 100m + memory: 128Mi + Environment: + Mounts: + Volumes: + Node-Selectors: + Tolerations: +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 60s job-controller Created pod: data-processor-h7mg7 + Normal Completed 47s job-controller Job completed +``` + +## Deploying a CronJob + +Let's create a cleanup CronJob that runs every 1 minutes: + +::yaml{file="manifests/modules/introduction/basics/jobs/catalog-cleanup.yaml" paths="kind,metadata.name,spec.schedule,spec.jobTemplate" title="catalog-cleanup.yaml"} + +1. `kind: CronJob`: Creates a CronJob controller +2. `metadata.name`: Name of the CronJob (`catalog-cleanup`) +3. `spec.schedule`: Cron schedule (`*/1 * * * *` = every 1 minutes) +4. `spec.jobTemplate`: Template for jobs that will be created + +Deploy the CronJob: +```bash +$ kubectl apply -f ~/environment/eks-workshop/modules/introduction/basics/jobs/catalog-cleanup.yaml +``` + +## Managing CronJobs + +View CronJobs: +```bash +$ kubectl get cronjobs -n catalog +NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE +catalog-cleanup */1 * * * * False 0 30s +``` + +Initially, `LAST SCHEDULE` shows `` because the CronJob hasn't run yet. Since our CronJob runs every minute, let's manually trigger it to see it in action immediately: + +```bash +# Manually trigger a CronJob to see it work immediately +$ kubectl create job --from=cronjob/catalog-cleanup manual-cleanup -n catalog +``` + +Now view jobs created by the CronJob: +```bash +$ kubectl get jobs -n catalog +NAME STATUS COMPLETIONS DURATION AGE +data-processor Complete 1/1 13s 17m +manual-cleanup Running 0/1 5s 5s +``` + +Wait for the job pod to be running before checking logs: +```bash +$ kubectl wait --for=jsonpath='{.status.phase}'=Running pod -l job-name=manual-cleanup -n catalog --timeout=60s +``` + +Check the logs of the job execution: +```bash +$ kubectl logs job/manual-cleanup -n catalog +Starting cleanup job at Mon Oct 5 17:30:00 UTC 2025 +Checking for temporary files... +Found 3 temporary files to clean up: + - /tmp/cache_file_1.tmp + - /tmp/cache_file_2.tmp + - /tmp/old_log.log +Cleaning up temporary files... +Temporary files removed successfully +Cleanup completed at Mon Oct 5 17:30:05 UTC 2025 +Next cleanup scheduled in 1 minute +``` + +Wait for the CronJob to run automatically (or check back in 1 minute): +```bash +# Check if the CronJob has run automatically +$ kubectl get cronjobs -n catalog +NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE +catalog-cleanup */1 * * * * False 0 30s 2m +``` + +View all jobs in the namespace (including those created by CronJobs): +```bash +$ kubectl get jobs -n catalog +NAME STATUS COMPLETIONS DURATION AGE +catalog-cleanup-29328191 Complete 1/1 9s 114s +catalog-cleanup-29328192 Complete 1/1 9s 54s +data-processor Complete 1/1 13s 21m +manual-cleanup Complete 1/1 10s 56s +``` + +To see which jobs were created by a specific CronJob, look for jobs with names starting with the CronJob name: +```bash hook=cronjob-first-run +$ kubectl get jobs -n catalog | grep catalog-cleanup +catalog-cleanup-29328192 Complete 1/1 9s 74s +catalog-cleanup-29328193 Complete 1/1 8s 14s +``` + +You can also check the job's owner reference to see which CronJob created it: +```bash +$ kubectl get job manual-cleanup -n catalog -o yaml | grep -A 5 ownerReferences + ownerReferences: + - apiVersion: batch/v1 + controller: true + kind: CronJob + name: catalog-cleanup + uid: 7f2deb86-a5c7-4703-ac5e-c5dd4893ff23 +``` + +Clean up the manual job: +```bash +$ kubectl delete job manual-cleanup -n catalog +``` + +### Monitoring CronJob Execution + +Check CronJob status and history: +```bash +$ kubectl describe cronjob catalog-cleanup -n catalog +Name: catalog-cleanup +Namespace: catalog +Labels: app.kubernetes.io/created-by=eks-workshop + app.kubernetes.io/name=catalog-cleanup +Annotations: +Schedule: */1 * * * * +Concurrency Policy: Allow +Suspend: False +Successful Job History Limit: 3 +Failed Job History Limit: 1 +Starting Deadline Seconds: +Selector: +Parallelism: +Completions: +Pod Template: + Labels: app=catalog-cleanup + Containers: + cleanup: + Image: busybox:1.36 + Port: + Host Port: + Command: + /bin/sh + -c + echo "Starting cleanup job at $(date)" + echo "Checking for temporary files..." + + # Simulate finding and cleaning up files + echo "Found 3 temporary files to clean up:" + echo " - /tmp/cache_file_1.tmp" + echo " - /tmp/cache_file_2.tmp" + echo " - /tmp/old_log.log" + + # Simulate cleanup process + sleep 3 + echo "Cleaning up temporary files..." + sleep 2 + echo "Temporary files removed successfully" + + echo "Cleanup completed at $(date)" + echo "Next cleanup scheduled in 1 minute" + + Limits: + cpu: 100m + memory: 128Mi + Requests: + cpu: 50m + memory: 64Mi + Environment: + Mounts: + Volumes: + Node-Selectors: + Tolerations: +Last Schedule Time: Sun, 05 Oct 2025 19:14:00 +0000 +Active Jobs: catalog-cleanup-29328194 +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 19m cronjob-controller Created job catalog-cleanup-29328175 + Normal SawCompletedJob 18m cronjob-controller Saw completed job: catalog-cleanup-29328175, condition: Complete + ... +``` + +This shows: +- **Schedule**: When the job runs +- **Last Schedule Time**: When it last executed +- **Active**: Currently running jobs +- **Events**: Recent CronJob activity + +View recent events for troubleshooting: +```bash +$ kubectl get events -n catalog --field-selector involvedObject.name=catalog-cleanup +LAST SEEN TYPE REASON OBJECT MESSAGE +20m Normal SuccessfulCreate cronjob/catalog-cleanup Created job catalog-cleanup-29328175 +20m Normal SawCompletedJob cronjob/catalog-cleanup Saw completed job: catalog-cleanup-29328175, condition: Complete +3m28s Warning UnexpectedJob cronjob/catalog-cleanup Saw a job that the controller did not create or forgot: manual-cleanup +18m Normal SuccessfulCreate cronjob/catalog-cleanup Created job catalog-cleanup-29328176 +18m Normal SuccessfulCreate cronjob/catalog-cleanup Created job catalog-cleanup-29328177 +18m Normal SawCompletedJob cronjob/catalog-cleanup Saw completed job: catalog-cleanup-29328176, condition: Complete +18m Normal SuccessfulDelete cronjob/catalog-cleanup Deleted job catalog-cleanup-29328175 +18m Normal SawCompletedJob cronjob/catalog-cleanup Saw completed job: catalog-cleanup-29328177, condition: Complete +17m Normal SuccessfulCreate cronjob/catalog-cleanup Created job catalog-cleanup-29328178 +17m Normal SuccessfulDelete cronjob/catalog-cleanup Deleted job catalog-cleanup-29328176 +17m Normal SawCompletedJob cronjob/catalog-cleanup Saw completed job: catalog-cleanup-29328178, condition: Complete +``` + +### Suspending and Resuming CronJobs + +Temporarily stop a CronJob: +```bash +$ kubectl patch cronjob catalog-cleanup -n catalog -p '{"spec":{"suspend":true}}' +$ kubectl get cronjobs -n catalog +NAME SCHEDULE TIMEZONE SUSPEND ACTIVE LAST SCHEDULE AGE +catalog-cleanup */1 * * * * UTC True 0 42s 24m +``` + +Resume a suspended CronJob: +```bash +$ kubectl patch cronjob catalog-cleanup -n catalog -p '{"spec":{"suspend":false}}' +$ kubectl get cronjobs -n catalog +NAME SCHEDULE TIMEZONE SUSPEND ACTIVE LAST SCHEDULE AGE +catalog-cleanup */1 * * * * UTC False 1 16s 24m +``` + +## Common Cron Schedules + +| Schedule | Description | +|----------|-------------| +| `0 2 * * *` | Daily at 2 AM | +| `0 */6 * * *` | Every 6 hours | +| `0 0 * * 0` | Every Sunday at midnight | +| `*/15 * * * *` | Every 15 minutes | +| `0 9 * * 1-5` | Weekdays at 9 AM | + +## Parallel Jobs + +For processing multiple items simultaneously: + +```yaml +spec: + completions: 10 # Process 10 items total + parallelism: 3 # Run 3 pods at once +``` +- `completions` = total number of successful Pods +- `parallelism` = how many Pods run concurrently + +This creates 10 successful completions using 3 parallel pods. + +## Jobs vs Other Controllers +| Controller | Purpose | Pods run continuously? | Use Case | +|------------|---------|---------------|----------------| +| Job | One-off task | No | Batch processing, migrations | +| CronJob | Scheduled jobs | No | Backups, periodic reports | +| Deployment | Long-running stateless app | Yes | Web apps, APIs | +| StatefulSet | Stateful services | Yes | Databases, queues | + +## Key Points to Remember + +* Jobs run pods until tasks complete successfully +* CronJobs create Jobs automatically on schedules +* Use `restartPolicy: Never` for Jobs and `OnFailure` for CronJobs +* Set backoff limits to control retry attempts +* Jobs can run multiple pods in parallel for faster processing +* Clean up completed Jobs to avoid resource accumulation +* Jobs and CronJobs are ideal for finite or recurring batch tasks, not long-running services diff --git a/website/docs/introduction/basics/workload-management/statefulsets.md b/website/docs/introduction/basics/workload-management/statefulsets.md new file mode 100644 index 0000000000..3fa8047071 --- /dev/null +++ b/website/docs/introduction/basics/workload-management/statefulsets.md @@ -0,0 +1,103 @@ +--- +title: StatefulSets +sidebar_position: 32 +--- + +# StatefulSets + +**StatefulSets** manage applications that need **stable identities and persistent storage**. Unlike Deployments, where Pods are interchangeable, each Pod in a StatefulSet **keeps a unique, predictable identity** throughout its lifecycle. + +They provide several important benefits for stateful applications: +- **Provide stable identities** - Pods get predictable names (mysql-0, mysql-1, mysql-2) +- **Enable persistent storage** - Each pod can have its own persistent volume +- **Ensure ordered operations** - Pods are created and deleted sequentially +- **Maintain stable networking** - Each pod keeps the same network identity +- **Support rolling updates in order** - Pods update one at a time + +## Deploying a StatefulSet + +Let's deploy a MySQL database for our catalog service: + +The following YAML creates a StatefulSet running MySQL for the catalog service, with persistent storage and predictable Pod names. + +::yaml{file="manifests/base-application/catalog/statefulset-mysql.yaml" paths="kind,metadata.name,spec.serviceName,spec.replicas" title="statefulset.yaml"} + +1. `kind: StatefulSet`: Creates a StatefulSet controller +2. `metadata.name`: Name of the StatefulSet (catalog-mysql) +3. `spec.serviceName`: Required for stable network identities (creates a headless Service) +4. `spec.replicas`: Number of pods to run (1 for this example) + +Deploy the database: +```bash +$ kubectl apply -k ~/environment/eks-workshop/base-application/catalog/ +``` + +## Inspecting StatefulSet + +Check StatefulSet status: +```bash +$ kubectl get statefulset -n catalog +NAME READY AGE +catalog-mysql 1/1 2m +``` + +View the pods created: +```bash +$ kubectl get pods -n catalog +NAME READY STATUS RESTARTS AGE +catalog-mysql-0 1/1 Running 0 2m +``` +> Notice the predictable pod name with a number suffix + +Get detailed information about the StatefulSet: +```bash +$ kubectl describe statefulset -n catalog catalog-mysql +``` + +The suffix (`-0`, `-1`, etc.) allows you to track each Pod individually for storage and network purposes. + +## Scaling StatefulSet + +Scale up to 3 replicas: +```bash +$ kubectl scale statefulset -n catalog catalog-mysql --replicas=3 +$ kubectl get pods -n catalog +NAME READY STATUS RESTARTS AGE +catalog-mysql-0 1/1 Running 0 5m +catalog-mysql-1 0/1 Pending 0 10s +catalog-mysql-1 1/1 Running 0 30s +catalog-mysql-2 0/1 Pending 0 5s +catalog-mysql-2 1/1 Running 0 25s +``` +You'll see pods created one at a time in order + +Scale back down: +```bash +$ kubectl scale statefulset -n catalog catalog-mysql --replicas=1 +``` + +Pods are deleted in reverse order (2, then 1, keeping 0), ensuring stability. + +Kubernetes also ensures that **each Pod keeps its persistent volume**, even when scaled up or down. + +## StatefulSets vs Deployments +| Feature | StatefulSet | Deployment | +| ----------------- | ----------------------------- | ----------------- | +| Pod Names | Stable (`mysql-0`, `mysql-1`) | Random | +| Storage | Persistent per Pod | Usually ephemeral | +| Creation/Deletion | Ordered | Any order | +| Network Identity | Stable | Dynamic | +| Use Case | Databases, message queues | Stateless apps | + +:::info +StatefulSets are ideal for applications that require persistent identity, stable networking, and ordered operations. +::: + +## Key Points to Remember + +* StatefulSets provide stable, unique identities for each pod +* Perfect for databases, message queues, and clustered applications +* Each pod can have its own persistent storage that survives restarts +* Operations happen in order - creation (0→1→2) and deletion (2→1→0) +* Pod names are predictable and never change +* Use StatefulSets whenever your application needs identity, stability, and persistence. diff --git a/website/docs/introduction/basics/workload-management/tests/hook-cronjob-first-run.sh b/website/docs/introduction/basics/workload-management/tests/hook-cronjob-first-run.sh new file mode 100644 index 0000000000..ee0d1b0c6b --- /dev/null +++ b/website/docs/introduction/basics/workload-management/tests/hook-cronjob-first-run.sh @@ -0,0 +1,41 @@ +set -Eeuo pipefail + +before() { + echo "Ensuring CronJob 'catalog-cleanup' has created at least one job..." + + # Check if any jobs already exist from the CronJob + job_count=$(kubectl get jobs -n catalog --no-headers 2>/dev/null | grep -c "catalog-cleanup-" || echo "0") + + if [[ "$job_count" -ge 1 ]]; then + echo "CronJob has already created $job_count job(s)." + return + fi + + # If no jobs exist, wait a bit for the CronJob to run naturally + echo "No existing jobs found. Waiting up to 90 seconds for CronJob to run..." + for i in {1..9}; do + job_count=$(kubectl get jobs -n catalog --no-headers 2>/dev/null | grep -c "catalog-cleanup-" || echo "0") + if [[ "$job_count" -ge 1 ]]; then + echo "CronJob has created $job_count job(s)." + return + fi + sleep 10 + done + + echo "CronJob hasn't run yet. This is normal for CronJobs with minute-based schedules." + echo "The test will proceed - CronJob jobs may appear in subsequent runs." +} + +after() { + echo "Checking for CronJob-created jobs..." + job_count=$(kubectl get jobs -n catalog --no-headers 2>/dev/null | grep -c "catalog-cleanup-" || echo "0") + + if [[ "$job_count" -ge 1 ]]; then + echo "Found $job_count CronJob-created job(s). Verification successful." + else + echo "No CronJob-created jobs found yet. This is normal - CronJobs run on schedule." + echo "The manual job 'manual-cleanup' demonstrates the same functionality." + fi +} + +"$@" diff --git a/website/docs/introduction/getting-started/index.md b/website/docs/introduction/getting-started/index.md index bd4d533cb9..f5c6790884 100644 --- a/website/docs/introduction/getting-started/index.md +++ b/website/docs/introduction/getting-started/index.md @@ -1,25 +1,149 @@ --- -title: Getting started -sidebar_position: 30 +title: Getting Started +sidebar_position: 50 sidebar_custom_props: { "module": true } description: "Learn the basics of running workloads on Amazon Elastic Kubernetes Service." --- ::required-time -Welcome to the first hands-on lab in the EKS workshop. The goal of this exercise is to familiarize ourselves with the sample application we'll use for many of the coming lab exercises and in doing so touch on some basic concepts related to deploying workloads to EKS. We'll explore the architecture of the application and deploy out the components to our EKS cluster. - -Let's deploy your first workload to the EKS cluster in your lab environment and explore! +Welcome to the first hands-on lab in the EKS workshop. The goal of this exercise is to prepare the IDE with necessary configurations and explore the structure. Before we begin we need to run the following command to prepare our IDE environment and EKS cluster: +:::tip Prepare your environment for this section: + ```bash $ prepare-environment introduction/getting-started ``` +This command will clone the EKS workshop Git repository into the IDE environment. +::: + +
+What does prepare-environment do? (Click to expand) + +The `prepare-environment` command is a crucial tool that sets up your lab environment for each workshop module. Here's what it does behind the scenes: + +- **Repository Setup**: Downloads the latest EKS Workshop content from GitHub to `/eks-workshop/repository` and links Kubernetes manifests to `~/environment/eks-workshop` +- **Cluster Reset & Cleanup**: Resets the sample retail application to its base state. Removes any leftover resources from previous labs and restores EKS managed node groups to initial size (3 nodes). +- **Lab-Specific Infrastructure**: Ensure the target module is ready to use by creating any extra AWS resources using Terraform, deploying the required Kubernetes manifests, configuring environment variables, and installing necessary add-ons or components. + +
+ +## Workshop Structure + +After running `prepare-environment`, you'll have access to the workshop materials at `~/environment/eks-workshop/`. The workshop is organized into modular sections that you can complete in any order. + +## Exploring Your EKS Cluster + +Now that your environment is ready, let's explore the EKS cluster that's been provisioned for you. Run these commands to get familiar with your cluster: + +### Cluster Information + +First, let's verify your cluster connection and get basic information: + +```bash +$ kubectl cluster-info +Kubernetes control plane is running at https://XXXXXXXXXXXXXXXXXXXXXXXXXX.gr7.us-west-2.eks.amazonaws.com +CoreDNS is running at https://XXXXXXXXXXXXXXXXXXXXXXXXXX.gr7.us-west-2.eks.amazonaws.com/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy + +To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. +``` + +Check the cluster version +```bash +$ kubectl version +Client Version: v1.33.5 +Kustomize Version: v5.6.0 +Server Version: v1.33.5-eks-113cf36 +``` + +Check worker nodes in the cluster + +```bash +$ kubectl get nodes -o wide +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +ip-10-42-121-153.us-west-2.compute.internal Ready 26h v1.33.5-eks-113cf36 10.42.121.153 Amazon Linux 2023.9.20250929 6.12.46-66.121.amzn2023.x86_64 containerd://1.7.27 +ip-10-42-141-241.us-west-2.compute.internal Ready 26h v1.33.5-eks-113cf36 10.42.141.241 Amazon Linux 2023.9.20250929 6.12.46-66.121.amzn2023.x86_64 containerd://1.7.27 +ip-10-42-183-73.us-west-2.compute.internal Ready 26h v1.33.5-eks-113cf36 10.42.183.73 Amazon Linux 2023.9.20250929 6.12.46-66.121.amzn2023.x86_64 containerd://1.7.27 +``` + +This shows your worker nodes, their Kubernetes version, internal/external IPs, and the container runtime being used. + +### Explore Cluster Components + +Let's look at the system components running in your cluster: + +```bash +$ kubectl get pods -n kube-system +NAME READY STATUS RESTARTS AGE +aws-node-8cz4d 2/2 Running 0 26h +aws-node-jlg4q 2/2 Running 0 26h +aws-node-vdc56 2/2 Running 0 26h +coredns-7bf648ff5d-4fqv9 1/1 Running 0 26h +coredns-7bf648ff5d-bfwwf 1/1 Running 0 26h +kube-proxy-77ln2 1/1 Running 0 26h +kube-proxy-7bwbj 1/1 Running 0 26h +kube-proxy-jnhfx 1/1 Running 0 26h +metrics-server-7fb96f5556-2k4lh 1/1 Running 0 26h +metrics-server-7fb96f5556-mpj78 1/1 Running 0 26h +``` + +You'll see essential components like: +- **CoreDNS** - Provides DNS services for the cluster +- **AWS Load Balancer Controller** - Manages AWS load balancers for services +- **VPC CNI** - Handles pod networking within your VPC +- **kube-proxy** - Manages network rules on each node + +## Deploy the Sample Application + +Let's deploy the retail store application to see Kubernetes in action. We'll use Kustomize, which is built into kubectl: + +```bash wait=10 +$ kubectl apply -k ~/environment/eks-workshop/base-application +``` + +After this is complete we can use `kubectl wait` to make sure all the components have started before we proceed: + +```bash timeout=200 +$ kubectl wait --for=condition=Ready --timeout=180s pods \ + -l app.kubernetes.io/created-by=eks-workshop -A +``` + +We'll now have a Namespace for each of our application components: + +```bash +$ kubectl get namespaces -l app.kubernetes.io/created-by=eks-workshop +NAME STATUS AGE +carts Active 62s +catalog Active 7m17s +checkout Active 62s +orders Active 62s +other Active 62s +ui Active 62s +``` + +We can also see all of the Deployments created for the components: + +```bash +$ kubectl get deployment -l app.kubernetes.io/created-by=eks-workshop -A +NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE +carts carts 1/1 1 1 90s +carts carts-dynamodb 1/1 1 1 90s +catalog catalog 1/1 1 1 7m46s +checkout checkout 1/1 1 1 90s +checkout checkout-redis 1/1 1 1 90s +orders orders 1/1 1 1 90s +orders orders-postgresql 1/1 1 1 90s +ui ui 1/1 1 1 90s +``` + +The sample application is now deployed and ready to provide a foundation for us to use in the rest of the labs in this workshop! -What is this command doing? For this lab it is cloning the EKS Workshop Git repository in to the IDE environment so the Kubernetes manifest files we need are present on the file system. +## What's Next? -You'll notice in subsequent labs we'll also run this command, where it will perform two important additional functions: +Your EKS cluster is ready and the sample application is deployed! You can now jump into any workshop module based on your learning goals. -1. Reset the EKS cluster back to its initial state -2. Install any additional components needed in to the cluster for the upcoming lab exercise +:::tip +Each module is self-contained and includes its own `prepare-environment` command to set up the required resources. You can complete them in any order! +::: diff --git a/website/docs/introduction/helm/index.md b/website/docs/introduction/helm/index.md index 442af23774..55f1550854 100644 --- a/website/docs/introduction/helm/index.md +++ b/website/docs/introduction/helm/index.md @@ -1,7 +1,7 @@ --- title: Helm sidebar_custom_props: { "module": true } -sidebar_position: 50 +sidebar_position: 80 --- ::required-time @@ -161,4 +161,113 @@ $ helm uninstall ui --namespace ui --wait This will delete all the resources created by the chart for that release from our EKS cluster. -Now that you understand how Helm works, proceed to the [Fundamentals module](/docs/fundamentals). +## Deploying Applications with Helm + +Now let's see how Helm can be used to deploy our retail store application. While the workshop primarily uses Kustomize, understanding Helm is valuable as many third-party applications are distributed as Helm charts. + +### Creating a Simple Chart for the Catalog Service + +Let's create a basic Helm chart for our catalog service to understand how applications can be packaged and deployed with Helm: + +```bash +$ helm create retail-catalog +``` + +This creates a basic chart structure. Let's examine what was created: + +```bash +$ ls -la retail-catalog/ +total 8 +drwxr-xr-x 4 user user 128 Nov 15 10:30 . +drwxr-xr-x 3 user user 96 Nov 15 10:30 .. +-rw-r--r-- 1 user user 1141 Nov 15 10:30 Chart.yaml +drwxr-xr-x 2 user user 64 Nov 15 10:30 charts +drwxr-xr-x 3 user user 96 Nov 15 10:30 templates +-rw-r--r-- 1 user user 1862 Nov 15 10:30 values.yaml +``` + +### Customizing the Chart + +Let's modify the default values to deploy our catalog service. Update the `values.yaml` file: + +```bash +$ cat > retail-catalog/values.yaml << 'EOF' +replicaCount: 2 + +image: + repository: public.ecr.aws/aws-containers/retail-store-sample-catalog + tag: "0.4.0" + pullPolicy: IfNotPresent + +service: + type: ClusterIP + port: 80 + targetPort: 8080 + +resources: + requests: + cpu: 128m + memory: 512Mi + limits: + cpu: 256m + memory: 512Mi + +nameOverride: "catalog" +fullnameOverride: "catalog" +EOF +``` + +### Installing the Chart + +Now let's install our catalog service using the Helm chart: + +```bash +$ helm install catalog ./retail-catalog --namespace catalog --create-namespace +``` + +Verify the deployment: + +```bash +$ helm list -n catalog +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +catalog catalog 1 2024-11-15 10:35:42.123456789 +0000 UTC deployed retail-catalog-0.1.0 1.16.0 +``` + +Check the running pods: + +```bash +$ kubectl get pods -n catalog +NAME READY STATUS RESTARTS AGE +catalog-7d4b8c9f8d-abc12 1/1 Running 0 2m +catalog-7d4b8c9f8d-def34 1/1 Running 0 2m +``` + +### Upgrading the Release + +One of Helm's strengths is managing application upgrades. Let's scale our application by updating the replica count: + +```bash +$ helm upgrade catalog ./retail-catalog \ + --namespace catalog \ + --set replicaCount=3 +``` + +### Rolling Back + +If something goes wrong, Helm makes it easy to rollback: + +```bash +$ helm rollback catalog 1 -n catalog +``` + +### Cleaning Up + +Remove the Helm release: + +```bash +$ helm uninstall catalog -n catalog +``` + +This example shows how Helm provides a higher-level abstraction for deploying applications, with built-in support for upgrades, rollbacks, and configuration management. + +Now that you understand how Helm works, you can proceed to [Kustomize](../kustomize) to learn about declarative configuration management, or jump ahead to the [Fundamentals module](/docs/fundamentals). diff --git a/website/docs/introduction/index.md b/website/docs/introduction/index.md index 30ce71c000..d4ff2be426 100644 --- a/website/docs/introduction/index.md +++ b/website/docs/introduction/index.md @@ -8,7 +8,7 @@ Welcome to the **AWS Elastic Kubernetes Service (EKS) workshop**! This workshop guides you through a set of hands-on lab exercises to learn and explore the various features provided by EKS and how it integrates with the broader set of services offered by AWS. The labs are grouped across a number of areas: -- **Introduction** - Learn the format and structure of this workshop +- **Introduction** - Get Started with the workshop’s format and structure, provision EKS cluster, Kubernetes basics and familiarize yourself with the sample application. - **Fundamentals** - Familiarize yourself with basic EKS concepts such as managed node groups, Fargate, exposing your applications and utilizing storage - **Autoscaling** - Understand how to automatically scale your applications and clusters horizontally and vertically - **Observability** - Monitoring is a critical factor getting a workload to production diff --git a/website/docs/introduction/kustomize/index.md b/website/docs/introduction/kustomize/index.md index a947af906d..28a17b1012 100644 --- a/website/docs/introduction/kustomize/index.md +++ b/website/docs/introduction/kustomize/index.md @@ -1,7 +1,7 @@ --- title: Kustomize sidebar_custom_props: { "module": true } -sidebar_position: 40 +sidebar_position: 70 --- ::required-time @@ -17,13 +17,56 @@ $ prepare-environment [Kustomize](https://kustomize.io/) allows you to manage Kubernetes manifest files using declarative "kustomization" files. It provides the ability to express "base" manifests for your Kubernetes resources and then apply changes using composition, customization and easily making cross-cutting changes across many resources. -For example, take a look at the following manifest file for the `checkout` Deployment: +## Deploying the Retail Store Application + +Let's start by deploying the complete retail store application using Kustomize. The application consists of multiple microservices that work together: + +### Deploy the Base Application + +First, let's deploy the entire retail store application using the base configuration: + +```bash +$ kubectl apply -k ~/environment/eks-workshop/base-application +``` + +This single command deploys all the microservices. Let's see what was created: + +```bash +$ kubectl get pods -A -l app.kubernetes.io/created-by=eks-workshop +NAME READY STATUS RESTARTS AGE +cart-6d4f8c9b8d-xyz12 1/1 Running 0 2m +catalog-7b5c9d8e9f-abc34 1/1 Running 0 2m +checkout-8c6d0e1f2g-def56 1/1 Running 0 2m +orders-9d7e2f3g4h-ghi78 1/1 Running 0 2m +ui-0e8f3g4h5i-jkl90 1/1 Running 0 2m +``` + +### Understanding the Kustomization Structure + +The base application uses a `kustomization.yaml` file that references all the component directories: + +```bash +$ cat ~/environment/eks-workshop/base-application/kustomization.yaml +``` + +Each service has its own directory with Kubernetes manifests: + +```bash +$ ls ~/environment/eks-workshop/base-application/ +cart/ catalog/ checkout/ orders/ ui/ kustomization.yaml +``` + +### Customizing with Overlays + +Now let's see Kustomize's power by creating customizations. For example, let's scale the `checkout` service horizontally by updating the `replicas` field from 1 to 3. + +Take a look at the following manifest file for the `checkout` Deployment: ```file manifests/base-application/checkout/deployment.yaml ``` -This file has already been applied in the previous [Getting Started](../getting-started) lab, but let's say we wanted to scale this component horizontally by updating the `replicas` field using Kustomize. Rather than manually updating this YAML file, we'll use Kustomize to update the `spec/replicas` field from 1 to 3. +Rather than manually updating this YAML file, we'll use Kustomize to update the `spec/replicas` field from 1 to 3. To do so, we'll apply the following kustomization. @@ -94,6 +137,47 @@ $ kubectl kustomize ~/environment/eks-workshop/base-application \ This uses `envsubst` to substitute environment variable placeholders in the Kubernetes manifest files with the actual values based on your particular environment. For example in some manifests we need to reference the EKS cluster name with `$EKS_CLUSTER_NAME` or the AWS region with `$AWS_REGION`. -Now that you understand how Kustomize works, you can proceed to the [Helm module](/docs/introduction/helm) or go directly to the [Fundamentals module](/docs/fundamentals). +## Advanced Kustomize Patterns + +### Environment-Specific Configurations + +Kustomize excels at managing different configurations for different environments. You might have: + +- **Base**: Common configuration shared across all environments +- **Development Overlay**: Lower resource limits, debug logging enabled +- **Production Overlay**: Higher resource limits, multiple replicas, monitoring enabled + +### Cross-Cutting Changes + +One of Kustomize's strengths is making changes across multiple resources. For example, you could: + +- Add labels to all resources: `commonLabels` +- Add annotations to all resources: `commonAnnotations` +- Set resource limits across all deployments +- Configure image pull policies consistently + +### Deploying Individual Services + +You can also deploy individual services using their specific kustomization: + +```bash +# Deploy just the catalog service +$ kubectl apply -k ~/environment/eks-workshop/base-application/catalog + +# Deploy just the UI service +$ kubectl apply -k ~/environment/eks-workshop/base-application/ui +``` + +### Viewing Generated Manifests + +Before applying changes, you can preview what Kustomize will generate: + +```bash +$ kubectl kustomize ~/environment/eks-workshop/base-application/catalog +``` + +This shows you exactly what Kubernetes resources will be created without actually applying them to the cluster. + +Now that you understand how Kustomize works, you can proceed to the [Getting Started](/docs/introduction/getting-started) hands-on lab or go directly to the [Fundamentals module](/docs/fundamentals). To learn more about Kustomize, you can refer to the official Kubernetes [documentation](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/kustomization/). diff --git a/website/docs/introduction/navigating-labs.md b/website/docs/introduction/navigating-labs.md index 0781f1e7e4..176b8cb87a 100644 --- a/website/docs/introduction/navigating-labs.md +++ b/website/docs/introduction/navigating-labs.md @@ -1,12 +1,12 @@ --- -title: Navigating the labs -sidebar_position: 25 +title: Navigating the Labs +sidebar_position: 30 --- import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -Let’s review how to navigate this web site and the content provided. +Let’s review how to navigate this website and the content provided. ## Structure @@ -15,21 +15,61 @@ The content of this workshop is made up of: 1. Individual lab exercises 2. Supporting content that explains concepts related to the labs -The lab exercises are designed in a way that you can run any modules as a self-contained exercise. Lab exercises will be displayed in the sidebar to the left and are designated by the icon shown here: +The lab exercises are designed in a way that you can run any modules as a self-contained exercise. Lab exercises will be displayed in the sidebar to the left and are designated by the `LAB` icon. -![Lab icon example](/docs/introduction/lab-icon.webp) +## Opening the IDE -This module contains a single lab named **Getting started** which will be visible on the left side of your screen. +If you haven't done so yet, you can open the IDE from the *Event outputs* section at the bottom of the start page. + + ![Event Outputs copy/paste](/img/fastpaths/ide-open.png) + +## Prepare Environment + +The `prepare-environment` tool helps you set up and configure your lab environment for each section. Simply run: + +``` +$ prepare-environment $MODULE_NAME +``` + +### Basic Usage Patterns +``` +$ prepare-environment $MODULE_NAME/$LAB +``` + +**Examples** +``` +# For the getting started lab +$ prepare-environment introduction/getting-started + +# For Karpenter autoscaling +$ prepare-environment autoscaling/compute/karpenter + +# For storage with EBS +$ prepare-environment fundamentals/storage/ebs + +# For networking security groups +$ prepare-environment networking/securitygroups-for-pods +``` :::caution -You should start each lab from the page indicated by this badge. Starting in the middle of a lab will cause unpredictable behavior. +You should start each lab from the page indicated by "BEFORE YOU START" badge. Starting in the middle of a lab will cause unpredictable behavior. ::: -Depending on your browser the first time you copy/paste content in to the VSCode terminal you may be presented with a prompt that looks like this: +## Resetting Your Cluster (Modular Section Only) + +In the event that you accidentally configure your cluster or module in a way that is not functioning you have been provided with a mechanism to reset your EKS cluster as best we can which can be run at any time. Simply run the command prepare-environment and wait until it completes. This may take several minutes depending on the state of your cluster when it is run. -![VSCode copy/paste](/docs/introduction/vscode-copy-paste.webp) +```bash +$ prepare-environment +``` -## Terminal commands +## Tips + +### Copy/Paste Permission +Depending on your browser the first time you copy/paste content in to the VSCode terminal you may be presented with a prompt that looks like this: + +VSCode copy/paste +### Terminal commands Most of the interaction you will do in this workshop will be done with terminal commands, which you can either manually type or copy/paste to the IDE terminal. You will see this terminal commands displayed like this: @@ -59,10 +99,6 @@ Fri Aug 30 12:26:58 MDT 2024 In this case you can either copy each command individually or copy all of the commands using the clipboard icon in the top right of the terminal window. Give it a shot! -## Resetting your EKS cluster - -In the event that you accidentally configure your cluster in a way that is not functioning you have been provided with a mechanism to reset your EKS cluster as best we can which can be run at any time. Simply run the command `prepare-environment` and wait until it completes. This may take several minutes depending on the state of your cluster when it is run. - ## Next Steps -Now that you're familiar with the format of this workshop, head to the [Getting started](/docs/introduction/getting-started) lab or skip ahead to any module in the workshop with the top navigation bar. +Now that you're familiar with the format of this workshop, head to the [Application Overview](/docs/introduction/getting-started/about) to learn about the sample application, then proceed to [Getting Started](/docs/introduction/getting-started) lab or skip ahead to any module in the workshop with the top navigation bar. diff --git a/website/docs/observability/container-insights/visualize-application-metrics-cloudwatch.md b/website/docs/observability/container-insights/visualize-application-metrics-cloudwatch.md index 961e3fdf53..9c252afca9 100644 --- a/website/docs/observability/container-insights/visualize-application-metrics-cloudwatch.md +++ b/website/docs/observability/container-insights/visualize-application-metrics-cloudwatch.md @@ -3,6 +3,8 @@ title: "Application Metrics" sidebar_position: 50 --- +import dashboard from '@site/static/docs/observability/container-insights/cw-dashboard.webp'; + In this section we'll look at gaining insight into metrics exposed by our workloads and visualizing those metrics using Amazon CloudWatch Insights Prometheus. Some examples of these metrics could be: - System metrics such as Java heap metrics or database connection pool status diff --git a/website/docusaurus.config.js b/website/docusaurus.config.js index 7f5de93a82..3177ceb3b7 100644 --- a/website/docusaurus.config.js +++ b/website/docusaurus.config.js @@ -26,7 +26,7 @@ const baseUrl = process.env.BASE_URL || ""; /** @type {import('@docusaurus/types').Config} */ const config = { - title: "EKS Workshop", + title: "Amazon EKS Workshop", tagline: "Practical exercises to learn about Amazon Elastic Kubernetes Service", url: "https://www.eksworkshop.com", @@ -37,6 +37,31 @@ const config = { noIndex: process.env.ENABLE_INDEX !== "1", customFields: { showNotification: process.env.SHOW_NOTIFICATION === "1", + secondaryNav: { + eksGroup: { + label: 'Choose your own adventure', + items: [ + { to: '/docs/introduction', label: 'Intro' }, + { to: '/docs/fundamentals', label: 'Fundamentals' }, + { to: '/docs/observability', label: 'Observability' }, + { to: '/docs/security', label: 'Security' }, + { to: '/docs/networking', label: 'Networking' }, + { to: '/docs/automation', label: 'Automation' }, + { to: '/docs/aiml', label: 'AI/ML' }, + { to: '/docs/troubleshooting', label: 'Troubleshooting' }, + ], + }, + autoModeGroup: { + label: 'Amazon EKS Essentials', + items: [ + { to: '/docs/fastpaths/', label: 'Intro' }, + { to: '/docs/fastpaths/setup', label: 'Setup' }, + { to: '/docs/fastpaths/navigating-labs', label: 'Navigating the labs' }, + { to: '/docs/fastpaths/developer', label: 'Developer' }, + { to: '/docs/fastpaths/operator', label: 'Operator' }, + ], + }, + }, }, organizationName: "aws-samples", diff --git a/website/netlify-build.sh b/website/netlify-build.sh index f25a1ab137..2410f534c6 100644 --- a/website/netlify-build.sh +++ b/website/netlify-build.sh @@ -4,11 +4,29 @@ set -e source ./hack/lib/kubectl-version.sh -wget -q https://dl.k8s.io/release/$KUBECTL_VERSION/bin/linux/amd64/kubectl -chmod +x ./kubectl - -mkdir -p ~/bin -mv ./kubectl ~/bin +# Check if kubectl already exists in ~/bin +if [ ! -f ~/bin/kubectl ]; then + echo "Downloading kubectl..." + + # Detect OS and architecture + OS=$(uname -s | tr '[:upper:]' '[:lower:]') + ARCH=$(uname -m) + + # Map architecture names + if [ "$ARCH" = "x86_64" ]; then + ARCH="amd64" + elif [ "$ARCH" = "aarch64" ] || [ "$ARCH" = "arm64" ]; then + ARCH="arm64" + fi + + wget -q https://dl.k8s.io/release/$KUBECTL_VERSION/bin/$OS/$ARCH/kubectl + chmod +x ./kubectl + + mkdir -p ~/bin + mv ./kubectl ~/bin +else + echo "kubectl already exists in ~/bin, skipping download" +fi export PATH="$PATH:$HOME/bin" diff --git a/website/sidebars.js b/website/sidebars.js index 4375d03407..f7fc45a567 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -23,6 +23,7 @@ const sidebars = { automation: [{ type: "autogenerated", dirName: "automation" }], aiml: [{ type: "autogenerated", dirName: "aiml" }], troubleshooting: [{ type: "autogenerated", dirName: "troubleshooting" }], + fastpaths: [{ type: "autogenerated", dirName: "fastpaths" }], }; module.exports = sidebars; diff --git a/website/src/components/GlobalNotification/styles.module.css b/website/src/components/GlobalNotification/styles.module.css index deff13e0ae..3a5a5ff002 100644 --- a/website/src/components/GlobalNotification/styles.module.css +++ b/website/src/components/GlobalNotification/styles.module.css @@ -6,7 +6,7 @@ align-items: center; justify-content: space-between; position: fixed; - top: 80px; + top: 130px; right: 20px; z-index: 1000; border-radius: 8px; diff --git a/website/src/components/HomepageFeatures/styles.module.css b/website/src/components/HomepageFeatures/styles.module.css index a667c78079..04e2d2d654 100644 --- a/website/src/components/HomepageFeatures/styles.module.css +++ b/website/src/components/HomepageFeatures/styles.module.css @@ -7,6 +7,11 @@ color: rgb(28, 30, 33); } +[data-theme="dark"] .features { + background-color: var(--ifm-background-color); + color: var(--ifm-font-color-base); +} + .featureSvg { height: 200px; width: 200px; diff --git a/website/src/components/HomepageModuleLink/index.js b/website/src/components/HomepageModuleLink/index.js new file mode 100644 index 0000000000..9aff5c2b09 --- /dev/null +++ b/website/src/components/HomepageModuleLink/index.js @@ -0,0 +1,50 @@ +import React from "react"; +import clsx from "clsx"; +import styles from "./styles.module.css"; +import useBaseUrl from "@docusaurus/useBaseUrl"; +import useDocusaurusContext from "@docusaurus/useDocusaurusContext"; +import Link from "@docusaurus/Link"; + +export default function HomepageModuleLink(props) { + const { siteConfig } = useDocusaurusContext(); + + return ( +
+
+
+
+

Amazon EKS EssentialsNew

+

Streamlined learning paths powered by Amazon EKS Auto Mode

+ + Start here + +
+ {siteConfig.customFields.secondaryNav.autoModeGroup.items.map((item, i) => ( + {item.label} + ))} +
+
+
+

Amazon EKS - Modular

+

Comprehensive modules covering critical Amazon EKS features and integrations

+ + Explore + +
+ {siteConfig.customFields.secondaryNav.eksGroup.items.map((item, i) => ( + {item.label} + ))} +
+
+ +
+
+
+ ); +} diff --git a/website/src/components/HomepageModuleLink/styles.module.css b/website/src/components/HomepageModuleLink/styles.module.css new file mode 100644 index 0000000000..d26d1908bf --- /dev/null +++ b/website/src/components/HomepageModuleLink/styles.module.css @@ -0,0 +1,116 @@ +.features { + display: flex; + align-items: center; + padding: 2rem 0; + width: 100%; + background-color: white; + color: rgb(28, 30, 33); +} + +[data-theme="dark"] .features { + background-color: var(--ifm-background-color); + color: var(--ifm-font-color-base); +} + +.featureSvg { + height: 200px; + width: 200px; +} + +.heroBanner { + padding: 0 0 4rem 0; + text-align: center; + position: relative; + overflow: hidden; + color: white; +} + +@media screen and (max-width: 996px) { + .heroBanner { + padding: 2rem; + } +} + +.pathSelection { + display: flex; + gap: 2rem; + justify-content: center; + margin-top: 2rem; + flex-wrap: wrap; +} + +.pathCard { + background: rgba(255, 255, 255, 0.1); + border: 2px solid rgba(255, 255, 255, 0.2); + border-radius: 12px; + padding: 2rem; + max-width: 400px; + flex: 1; + min-width: 300px; + transition: transform 0.2s, border-color 0.2s, background 0.2s; +} + +[data-theme="dark"] .pathCard { + background: rgba(255, 255, 255, 0.05); + border-color: rgba(255, 255, 255, 0.15); +} + +.pathCard:hover { + transform: translateY(-4px); + border-color: rgba(255, 255, 255, 0.4); +} + +[data-theme="dark"] .pathCard:hover { + background: rgba(255, 255, 255, 0.08); + border-color: rgba(255, 255, 255, 0.3); +} + +.pathCard h3 { + margin-top: 0; + margin-bottom: 1rem; + font-size: 1.5rem; +} + +.pathCard p { + margin-bottom: 1.5rem; + opacity: 0.9; + line-height: 1.5; +} + +.newBadge { + display: inline-block; + background: #ff9900; + color: white; + font-size: 0.7rem; + font-weight: 700; + padding: 0.2rem 0.5rem; + border-radius: 4px; + margin-left: 0.5rem; + vertical-align: middle; +} + +.moduleLinks { + display: flex; + flex-wrap: wrap; + justify-content: center; + gap: 0.5rem; + margin-top: 1.5rem; + padding-top: 1.5rem; + border-top: 1px solid rgba(255, 255, 255, 0.2); +} + +.moduleLink { + color: rgba(255, 255, 255, 0.9); + text-decoration: none; + font-size: 0.9rem; + transition: color 0.2s; +} + +.moduleLink:hover { + color: white; + text-decoration: underline; +} +.disabled { + opacity: 0.5; + pointer-events: none; +} diff --git a/website/src/components/SecondaryNav/index.js b/website/src/components/SecondaryNav/index.js new file mode 100644 index 0000000000..72baf5cf1e --- /dev/null +++ b/website/src/components/SecondaryNav/index.js @@ -0,0 +1,97 @@ +import React, { useEffect } from 'react'; +import { useLocation } from '@docusaurus/router'; +import useDocusaurusContext from '@docusaurus/useDocusaurusContext'; +import Link from '@docusaurus/Link'; +import styles from './styles.module.css'; + +export default function SecondaryNav() { + const location = useLocation(); + const { siteConfig } = useDocusaurusContext(); + + const isAutoMode = location.pathname.includes('/fastpaths/'); + const isTraditional = location.pathname.includes('/docs/') && !isAutoMode; + const isHomePage = location.pathname === '/' || location.pathname === '/docs/'; + + const { eksGroup, autoModeGroup } = siteConfig.customFields.secondaryNav; + + // Remember the last page for each context + useEffect(() => { + if (isTraditional) { + localStorage.setItem('lastTraditionalPage', location.pathname); + } else if (isAutoMode) { + localStorage.setItem('lastAutoModePage', location.pathname); + } + }, [location.pathname, isTraditional, isAutoMode]); + + // Get the last visited page or default + const getTargetPage = (context) => { + if (typeof window === 'undefined') { + return context === 'autoMode' ? '/docs/fastpaths/setup' : '/docs/introduction'; + } + if (context === 'autoMode') { + return localStorage.getItem('lastAutoModePage') || '/docs/fastpaths/setup'; + } else { + return localStorage.getItem('lastTraditionalPage') || '/docs/introduction'; + } + }; + + if (isHomePage) { + return null; + } + + if (isTraditional) { + return ( +
+
+ {eksGroup.items.map((item, i) => { + const normalizedTo = item.to.replace(/\/+$/, ''); + const normalizedPath = location.pathname.replace(/\/+$/, ''); + const isActive = normalizedPath === normalizedTo || + (normalizedPath.startsWith(normalizedTo + '/') && + !eksGroup.items.some(other => { + const otherTo = other.to.replace(/\/+$/, ''); + return otherTo !== normalizedTo && + otherTo.startsWith(normalizedTo + '/') && + normalizedPath.startsWith(otherTo); + })); + return ( + {item.label} + ); + })} +
+
+ Switch to Essentials → +
+
+ ); + } + + if (isAutoMode) { + return ( +
+
+ {autoModeGroup.items.map((item, i) => { + const normalizedTo = item.to.replace(/\/+$/, ''); + const normalizedPath = location.pathname.replace(/\/+$/, ''); + const isActive = normalizedPath === normalizedTo || + (normalizedPath.startsWith(normalizedTo + '/') && + !autoModeGroup.items.some(other => { + const otherTo = other.to.replace(/\/+$/, ''); + return otherTo !== normalizedTo && + otherTo.startsWith(normalizedTo + '/') && + normalizedPath.startsWith(otherTo); + })); + return ( + {item.label} + ); + })} +
+
+ Switch to Modular → +
+
+ ); + } + + return null; +} diff --git a/website/src/components/SecondaryNav/styles.module.css b/website/src/components/SecondaryNav/styles.module.css new file mode 100644 index 0000000000..46faf3eb70 --- /dev/null +++ b/website/src/components/SecondaryNav/styles.module.css @@ -0,0 +1,101 @@ +.secondaryNavContainer { + background: var(--ifm-secondary-nav-background-color, #1a2332); + border-top: 1px solid rgba(255, 255, 255, 0.05); + border-bottom: 1px solid rgba(255, 255, 255, 0.1); + padding: 0.4rem clamp(0.75rem, 2.5vw, 2rem); + display: flex; + align-items: center; + gap: clamp(0.3rem, 1vw, 0.75rem); + font-size: clamp(0.75rem, 1.5vw, 0.9rem); + position: sticky; + top: var(--ifm-navbar-height); + z-index: 99; + overflow-x: auto; + overflow-y: hidden; + white-space: nowrap; + scrollbar-width: none; + width: 100%; + box-sizing: border-box; +} + +@media (max-width: 1200px) { + .secondaryNavContainer { + flex-wrap: wrap; + overflow-x: visible; + row-gap: 0.5rem; + } + + .navDivider { + display: none; + } +} + +.secondaryNavContainer::-webkit-scrollbar { + display: none; +} + +.secondaryNavContainer:has(.contextSwitcher) { + justify-content: space-between; +} + +.navSection { + display: flex; + align-items: center; + gap: clamp(0.3rem, 1vw, 0.75rem); + flex-shrink: 0; +} + +@media (max-width: 1200px) { + .navSection { + flex-wrap: wrap; + } +} + +.navLabel { + font-weight: 600; + color: var(--ifm-navbar-link-color); + margin-right: 0.5rem; + opacity: 0.9; +} + +[data-theme="light"] .navLabel { + color: #ffffff; + opacity: 0.85; +} + +.navSection a { + color: var(--ifm-navbar-link-color); + text-decoration: none; + transition: color 0.2s; + white-space: nowrap; +} + +.navSection a:hover { + color: var(--ifm-navbar-link-hover-color); +} + +.navDivider { + display: none; +} + +.contextSwitcher { + margin-left: auto; +} + +.contextSwitcher a { + color: var(--ifm-navbar-link-color); + text-decoration: none; + font-size: 0.85rem; + opacity: 0.8; + transition: opacity 0.2s; +} + +.contextSwitcher a:hover { + opacity: 1; + color: var(--ifm-navbar-link-hover-color); +} + +.activeLink { + color: var(--ifm-navbar-link-hover-color) !important; + font-weight: 600; +} diff --git a/website/src/css/custom.scss b/website/src/css/custom.scss index 36a884138e..a34c98ab38 100644 --- a/website/src/css/custom.scss +++ b/website/src/css/custom.scss @@ -13,8 +13,9 @@ --ifm-color-primary-light: #33925d; --ifm-color-primary-lighter: #359962; --ifm-color-primary-lightest: #3cad6e; - --ifm-hero-background-color: #000716; + --ifm-hero-background-color: #0f1b2a; --ifm-navbar-background-color: #000716; + --ifm-secondary-nav-background-color: #1a2332; --ifm-navbar-link-color: #d1d5db; --ifm-navbar-link-hover-color: #ff9900; --ifm-menu-color: #414d5c; @@ -52,11 +53,34 @@ --ifm-tabs-color-active-border: #ff9900; --dark-search-text-color: #999; --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3); + + /* Tone down admonition colors in dark mode */ + --ifm-color-warning-contrast-background: #3d3416; + --ifm-color-warning-contrast-foreground: #f0d090; + --ifm-color-success-contrast-background: #1a3a2e; + --ifm-color-success-contrast-foreground: #90d0a0; } .navbar__title { margin-left: 1rem; margin-right: 1rem; + font-size: clamp(0.85rem, 4vw, 1.5rem); + font-weight: 400; +} + +@media (max-width: 600px) { + .navbar__title { + font-size: 1.1rem !important; + } + + .navbar__brand { + margin-right: 1.5rem !important; + flex-shrink: 0; + } + + .navbar__items { + gap: 1rem !important; + } } .footer { @@ -211,6 +235,15 @@ background-color: #0073bb; } +/* A muted sage green works well for optional content - it's noticeable but not distracting */ +.optional { + background-color: #7fb39c; +} + +.hidden { + display: none !important; +} + .category-wrapper { display: flex; width: 100%; @@ -239,18 +272,23 @@ .navbar__items { display: flex; flex-wrap: nowrap; - /* adjust this when adding new nav bar items */ - max-width: 1050px; } -/* Navbar items that dynamically shrink */ +/* Hide navbar items on desktop - they're in secondary nav */ .navbar__item { - flex-grow: 1; - flex-shrink: 1; - padding: 0 0px; - font-size: clamp(0.8em, 1.2vw, 1em); - white-space: nowrap; - text-overflow: ellipsis; + display: none; +} + +/* Keep the locale dropdown and GitHub link visible on desktop */ +@media (min-width: 997px) { + .navbar__items--right .navbar__item { + display: flex; + } +} + +/* Show navbar items only in mobile sidebar */ +.navbar-sidebar .navbar__item { + display: block; } .navbar__items--right .dropdown--right { @@ -259,10 +297,6 @@ /* docusaurus switches to mobile view at 996px */ @media (min-width: 997px) and (max-width: 1250px) { - .navbar__brand .navbar__title { - display: none; /* Hide the navbar title */ - } - .navbar__items { max-width: 80%; } @@ -271,3 +305,28 @@ .navbar__search-input { max-width: 150px; /* Set a maximum width for the search input */ } + +@media (max-width: 550px) { + .navbar__search-input { + margin-left: 1rem; + } +} + +/* Tone down admonitions in dark mode */ +[data-theme="dark"] .theme-admonition-caution, +[data-theme="dark"] .alert--warning { + background-color: rgba(255, 153, 0, 0.15); + border-color: rgba(255, 153, 0, 0.4); +} + +[data-theme="dark"] .theme-admonition-tip, +[data-theme="dark"] .alert--success { + background-color: rgba(0, 200, 83, 0.15); + border-color: rgba(0, 200, 83, 0.4); +} + +[data-theme="dark"] .theme-admonition-warning, +[data-theme="dark"] .alert--danger { + background-color: rgba(250, 82, 82, 0.15); + border-color: rgba(250, 82, 82, 0.4); +} diff --git a/website/src/pages/index.js b/website/src/pages/index.js index 58e6ea30a3..051287b5ae 100644 --- a/website/src/pages/index.js +++ b/website/src/pages/index.js @@ -4,6 +4,7 @@ import Link from "@docusaurus/Link"; import useDocusaurusContext from "@docusaurus/useDocusaurusContext"; import Layout from "@theme/Layout"; import HomepageFeatures from "@site/src/components/HomepageFeatures"; +import HomepageModuleLink from "@site/src/components/HomepageModuleLink"; import HomepageVideo from "@site/src/components/HomepageVideo"; import Translate from "@docusaurus/Translate"; @@ -11,6 +12,7 @@ import styles from "./index.module.css"; function HomepageHeader() { const { siteConfig } = useDocusaurusContext(); + return (
@@ -50,6 +52,7 @@ export default function Home() { description="Amazon Web Services workshop for Elastic Kubernetes Service" > +
diff --git a/website/src/pages/index.module.css b/website/src/pages/index.module.css index 711845feb0..1d2b9a07a2 100644 --- a/website/src/pages/index.module.css +++ b/website/src/pages/index.module.css @@ -4,7 +4,7 @@ */ .heroBanner { - padding: 4rem 0; + padding: 0; text-align: center; position: relative; overflow: hidden; @@ -22,3 +22,83 @@ align-items: center; justify-content: center; } + +.pathSelection { + display: flex; + gap: 2rem; + justify-content: center; + margin-top: 2rem; + flex-wrap: wrap; +} + +.pathCard { + background: rgba(255, 255, 255, 0.1); + border: 2px solid rgba(255, 255, 255, 0.2); + border-radius: 12px; + padding: 2rem; + max-width: 400px; + flex: 1; + min-width: 300px; + transition: transform 0.2s, border-color 0.2s, background 0.2s; +} + +[data-theme="dark"] .pathCard { + background: rgba(255, 255, 255, 0.05); + border-color: rgba(255, 255, 255, 0.15); +} + +.pathCard:hover { + transform: translateY(-4px); + border-color: rgba(255, 255, 255, 0.4); +} + +[data-theme="dark"] .pathCard:hover { + background: rgba(255, 255, 255, 0.08); + border-color: rgba(255, 255, 255, 0.3); +} + +.pathCard h3 { + margin-top: 0; + margin-bottom: 1rem; + font-size: 1.5rem; +} + +.pathCard p { + margin-bottom: 1.5rem; + opacity: 0.9; + line-height: 1.5; +} + +.newBadge { + display: inline-block; + background: #ff9900; + color: white; + font-size: 0.7rem; + font-weight: 700; + padding: 0.2rem 0.5rem; + border-radius: 4px; + margin-left: 0.5rem; + vertical-align: middle; +} + +.moduleLinks { + display: flex; + flex-wrap: wrap; + justify-content: center; + gap: 0.5rem; + margin-top: 1.5rem; + padding-top: 1.5rem; + border-top: 1px solid rgba(255, 255, 255, 0.2); +} + +.moduleLink { + color: rgba(255, 255, 255, 0.9); + text-decoration: none; + font-size: 0.9rem; + transition: color 0.2s; +} + +.moduleLink:hover { + color: white; + text-decoration: underline; +} diff --git a/website/src/remark/include-yaml.js b/website/src/remark/include-yaml.js index bec349f7a4..b1820e9e3b 100644 --- a/website/src/remark/include-yaml.js +++ b/website/src/remark/include-yaml.js @@ -190,13 +190,21 @@ function getLinesForPath(inputString, lookup) { const tokens = parser.parse(inputString); const docs = new YAML.Composer().compose(tokens); + const docsArray = Array.from(docs); + + // Support document index prefix (e.g., "1.spec.controller" for second document) + let docIndex = 0; + let pathElements = lookup.split(".").map((e) => e.trim()); + + if (pathElements.length > 0 && isInt(pathElements[0])) { + docIndex = parseInt(pathElements[0]); + pathElements = pathElements.slice(1); + } + - const doc = Array.from(docs)[0]; + const doc = docsArray[docIndex]; - const target = findByPath( - doc.contents, - lookup.split(".").map((e) => e.trim()), - ); + const target = findByPath(doc.contents, pathElements); const startLine = lineCounter.linePos(target.start).line; const endLine = lineCounter.linePos(target.end).line; diff --git a/website/src/remark/time.js b/website/src/remark/time.js index 2893e516c2..f9a722c208 100644 --- a/website/src/remark/time.js +++ b/website/src/remark/time.js @@ -34,15 +34,19 @@ const plugin = (options) => { const attributes = { ...defaultAttributes, ...node.attributes }; const filePath = vfile.history[0]; + const projectDir = process.cwd(); + const docsDir = `${projectDir}/docs`; let relativePath; - if (locale === "en") { - relativePath = path.relative(`${vfile.cwd}/docs`, filePath); + if (locale !== "en") { + const i18nDocsDir = `${projectDir}/i18n/${locale}/docusaurus-plugin-content-docs/current`; + if (filePath.startsWith(i18nDocsDir)) { + relativePath = path.relative(i18nDocsDir, filePath); + } else { + relativePath = path.relative(docsDir, filePath); + } } else { - relativePath = path.relative( - `${vfile.cwd}/i18n/${locale}/docusaurus-plugin-content-docs/current`, - filePath, - ); + relativePath = path.relative(docsDir, filePath); } if (attributes.estimatedLabExecutionTimeMinutes === "0") { diff --git a/website/src/theme/DocSidebarItem/Category/index.js b/website/src/theme/DocSidebarItem/Category/index.js index d64f3122cc..f4b4a8753a 100644 --- a/website/src/theme/DocSidebarItem/Category/index.js +++ b/website/src/theme/DocSidebarItem/Category/index.js @@ -187,6 +187,11 @@ export default function DocSidebarItemCategory({ ) : ( )} + {item.customProps?.optional ? ( + OPTIONAL + ) : ( + + )}
diff --git a/website/src/theme/DocSidebarItem/Link/index.js b/website/src/theme/DocSidebarItem/Link/index.js index 5d4c4ac7f8..4a4a5733b1 100644 --- a/website/src/theme/DocSidebarItem/Link/index.js +++ b/website/src/theme/DocSidebarItem/Link/index.js @@ -61,6 +61,11 @@ export default function DocSidebarItemLink({ ) : ( )} + {item.customProps?.optional ? ( + OPTIONAL + ) : ( + + )} {!isInternalLink && } diff --git a/website/src/theme/Navbar/Content/index.js b/website/src/theme/Navbar/Content/index.js new file mode 100644 index 0000000000..f7c8aea189 --- /dev/null +++ b/website/src/theme/Navbar/Content/index.js @@ -0,0 +1,6 @@ +import React from 'react'; +import Content from '@theme-original/Navbar/Content'; + +export default function NavbarContent(props) { + return ; +} diff --git a/website/src/theme/Navbar/index.js b/website/src/theme/Navbar/index.js new file mode 100644 index 0000000000..52101596bc --- /dev/null +++ b/website/src/theme/Navbar/index.js @@ -0,0 +1,12 @@ +import React from 'react'; +import Navbar from '@theme-original/Navbar'; +import SecondaryNav from '@site/src/components/SecondaryNav'; + +export default function NavbarWrapper(props) { + return ( + <> + + + + ); +} diff --git a/website/docs/automation/gitops/flux/assets/ci.drawio b/website/static/img/automation/gitops/flux/ci.drawio similarity index 100% rename from website/docs/automation/gitops/flux/assets/ci.drawio rename to website/static/img/automation/gitops/flux/ci.drawio diff --git a/website/static/img/fastpaths/developer/amazon-eks-pod-identity/error.webp b/website/static/img/fastpaths/developer/amazon-eks-pod-identity/error.webp new file mode 100644 index 0000000000..9ff4c1aef8 Binary files /dev/null and b/website/static/img/fastpaths/developer/amazon-eks-pod-identity/error.webp differ diff --git a/website/static/img/fastpaths/developer/ebs/placeholder.jpg b/website/static/img/fastpaths/developer/ebs/placeholder.jpg new file mode 100644 index 0000000000..3bc3cbfec2 Binary files /dev/null and b/website/static/img/fastpaths/developer/ebs/placeholder.jpg differ diff --git a/website/static/img/fastpaths/developer/ingress/multiple-ingress-lb.webp b/website/static/img/fastpaths/developer/ingress/multiple-ingress-lb.webp new file mode 100644 index 0000000000..c7f21b056b Binary files /dev/null and b/website/static/img/fastpaths/developer/ingress/multiple-ingress-lb.webp differ diff --git a/website/static/img/fastpaths/developer/ingress/multiple-ingress-listener.webp b/website/static/img/fastpaths/developer/ingress/multiple-ingress-listener.webp new file mode 100644 index 0000000000..08a8c0bef9 Binary files /dev/null and b/website/static/img/fastpaths/developer/ingress/multiple-ingress-listener.webp differ diff --git a/website/static/img/fastpaths/developer/ingress/multiple-ingress-rules.webp b/website/static/img/fastpaths/developer/ingress/multiple-ingress-rules.webp new file mode 100644 index 0000000000..f809553601 Binary files /dev/null and b/website/static/img/fastpaths/developer/ingress/multiple-ingress-rules.webp differ diff --git a/website/static/img/fastpaths/developer/ingress/web-ui.webp b/website/static/img/fastpaths/developer/ingress/web-ui.webp new file mode 100644 index 0000000000..2221ddcbde Binary files /dev/null and b/website/static/img/fastpaths/developer/ingress/web-ui.webp differ diff --git a/website/static/img/fastpaths/developer/pod-logging/fluentbit-architecture.png b/website/static/img/fastpaths/developer/pod-logging/fluentbit-architecture.png new file mode 100644 index 0000000000..5a3c58fd0b Binary files /dev/null and b/website/static/img/fastpaths/developer/pod-logging/fluentbit-architecture.png differ diff --git a/website/static/img/fastpaths/developer/pod-logging/fluentbit-architecture.webp b/website/static/img/fastpaths/developer/pod-logging/fluentbit-architecture.webp new file mode 100644 index 0000000000..17eb4bfacc Binary files /dev/null and b/website/static/img/fastpaths/developer/pod-logging/fluentbit-architecture.webp differ diff --git a/website/static/img/fastpaths/developer/pod-logging/log-group.webp b/website/static/img/fastpaths/developer/pod-logging/log-group.webp new file mode 100644 index 0000000000..4222e18c78 Binary files /dev/null and b/website/static/img/fastpaths/developer/pod-logging/log-group.webp differ diff --git a/website/static/img/fastpaths/developer/pod-logging/log-streams.webp b/website/static/img/fastpaths/developer/pod-logging/log-streams.webp new file mode 100644 index 0000000000..78071c3876 Binary files /dev/null and b/website/static/img/fastpaths/developer/pod-logging/log-streams.webp differ diff --git a/website/static/img/fastpaths/developer/pod-logging/logs.webp b/website/static/img/fastpaths/developer/pod-logging/logs.webp new file mode 100644 index 0000000000..c36ca643f1 Binary files /dev/null and b/website/static/img/fastpaths/developer/pod-logging/logs.webp differ diff --git a/website/static/img/fastpaths/fast-path-options.png b/website/static/img/fastpaths/fast-path-options.png new file mode 100644 index 0000000000..8cd865cbc5 Binary files /dev/null and b/website/static/img/fastpaths/fast-path-options.png differ diff --git a/website/static/img/fastpaths/getting-started/catalog-microservice.webp b/website/static/img/fastpaths/getting-started/catalog-microservice.webp new file mode 100644 index 0000000000..3e213f9481 Binary files /dev/null and b/website/static/img/fastpaths/getting-started/catalog-microservice.webp differ diff --git a/website/static/img/fastpaths/getting-started/ide-base.webp b/website/static/img/fastpaths/getting-started/ide-base.webp new file mode 100644 index 0000000000..49b50ab379 Binary files /dev/null and b/website/static/img/fastpaths/getting-started/ide-base.webp differ diff --git a/website/static/img/fastpaths/getting-started/ide-initial.webp b/website/static/img/fastpaths/getting-started/ide-initial.webp new file mode 100644 index 0000000000..f1861e6529 Binary files /dev/null and b/website/static/img/fastpaths/getting-started/ide-initial.webp differ diff --git a/website/static/img/fastpaths/getting-started/ide-modules.webp b/website/static/img/fastpaths/getting-started/ide-modules.webp new file mode 100644 index 0000000000..d28b9902c8 Binary files /dev/null and b/website/static/img/fastpaths/getting-started/ide-modules.webp differ diff --git a/website/static/img/fastpaths/getting-started/microservices.webp b/website/static/img/fastpaths/getting-started/microservices.webp new file mode 100644 index 0000000000..e00e007ee1 Binary files /dev/null and b/website/static/img/fastpaths/getting-started/microservices.webp differ diff --git a/website/static/img/fastpaths/ide-open.png b/website/static/img/fastpaths/ide-open.png new file mode 100644 index 0000000000..852b6095e3 Binary files /dev/null and b/website/static/img/fastpaths/ide-open.png differ diff --git a/website/static/img/fastpaths/introduction/paste-in-firefox-safari.png b/website/static/img/fastpaths/introduction/paste-in-firefox-safari.png new file mode 100644 index 0000000000..1b886a38ab Binary files /dev/null and b/website/static/img/fastpaths/introduction/paste-in-firefox-safari.png differ diff --git a/website/static/img/fastpaths/introduction/paste-warning-in-firefox-safari.png b/website/static/img/fastpaths/introduction/paste-warning-in-firefox-safari.png new file mode 100644 index 0000000000..fba7aec642 Binary files /dev/null and b/website/static/img/fastpaths/introduction/paste-warning-in-firefox-safari.png differ diff --git a/website/static/img/fastpaths/operator/amazon-eks-pod-identity/error.webp b/website/static/img/fastpaths/operator/amazon-eks-pod-identity/error.webp new file mode 100644 index 0000000000..9ff4c1aef8 Binary files /dev/null and b/website/static/img/fastpaths/operator/amazon-eks-pod-identity/error.webp differ diff --git a/website/static/img/fastpaths/operator/karpenter/karpenter-diagram.webp b/website/static/img/fastpaths/operator/karpenter/karpenter-diagram.webp new file mode 100644 index 0000000000..a5ecca24de Binary files /dev/null and b/website/static/img/fastpaths/operator/karpenter/karpenter-diagram.webp differ diff --git a/website/static/img/fastpaths/operator/secrets-manager/choose-type.webp b/website/static/img/fastpaths/operator/secrets-manager/choose-type.webp new file mode 100644 index 0000000000..f7b168a8b1 Binary files /dev/null and b/website/static/img/fastpaths/operator/secrets-manager/choose-type.webp differ diff --git a/website/static/img/fastpaths/operator/secrets-manager/configure-secret.webp b/website/static/img/fastpaths/operator/secrets-manager/configure-secret.webp new file mode 100644 index 0000000000..f59d0a3122 Binary files /dev/null and b/website/static/img/fastpaths/operator/secrets-manager/configure-secret.webp differ diff --git a/website/static/img/fastpaths/operator/secrets-manager/store-new-secret.webp b/website/static/img/fastpaths/operator/secrets-manager/store-new-secret.webp new file mode 100644 index 0000000000..17da92b42c Binary files /dev/null and b/website/static/img/fastpaths/operator/secrets-manager/store-new-secret.webp differ diff --git a/website/docs/fundamentals/compute/managed-node-groups/graviton/assets/eks-graviton.drawio b/website/static/img/fundamentals/compute/managed-node-groups/graviton/eks-graviton.drawio similarity index 100% rename from website/docs/fundamentals/compute/managed-node-groups/graviton/assets/eks-graviton.drawio rename to website/static/img/fundamentals/compute/managed-node-groups/graviton/eks-graviton.drawio diff --git a/website/static/img/sample-app-screens/home.webp b/website/static/img/sample-app-screens/home.webp index 9efecbb5b5..2f2730d06f 100644 Binary files a/website/static/img/sample-app-screens/home.webp and b/website/static/img/sample-app-screens/home.webp differ diff --git a/website/test-durations.json b/website/test-durations.json index a6e75ad7bd..be7b5030ab 100644 --- a/website/test-durations.json +++ b/website/test-durations.json @@ -31,6 +31,58 @@ "/automation/gitops/flux/gitea.md": 169677, "/automation/gitops/flux/index.md": 188933, "/automation/gitops/index.md": 0, + "/fastpaths/developer/amazon-eks-pod-identity/index.md": 1, + "/fastpaths/developer/amazon-eks-pod-identity/introduction.md": 1, + "/fastpaths/developer/amazon-eks-pod-identity/understanding.md": 1, + "/fastpaths/developer/amazon-eks-pod-identity/use-pod-identity.md": 1, + "/fastpaths/developer/amazon-eks-pod-identity/using-dynamo.md": 1, + "/fastpaths/developer/amazon-eks-pod-identity/verifying-dynamo.md": 1, + "/fastpaths/developer/ebs/deployment-with-ebs.md": 1, + "/fastpaths/developer/ebs/existing-architecture.md": 1, + "/fastpaths/developer/ebs/index.md": 1, + "/fastpaths/developer/index.md": 1, + "/fastpaths/developer/ingress/adding-ingress.md": 1, + "/fastpaths/developer/ingress/index.md": 1, + "/fastpaths/developer/keda/configure-keda.md": 1, + "/fastpaths/developer/keda/index.md": 1, + "/fastpaths/developer/keda/install-keda.md": 1, + "/fastpaths/developer/keda/test-keda.md": 1, + "/fastpaths/developer/pod-logging/fluent-bit-cloudwatch.md": 1, + "/fastpaths/developer/pod-logging/fluentbit-setup.md": 1, + "/fastpaths/developer/pod-logging/index.md": 1, + "/fastpaths/explore/index.md": 1, + "/fastpaths/getting-started/about.md": 1, + "/fastpaths/getting-started/finish.md": 1, + "/fastpaths/getting-started/first.md": 1, + "/fastpaths/getting-started/index.md": 1, + "/fastpaths/index.md": 1, + "/fastpaths/navigating-labs.md": 1, + "/fastpaths/operator/amazon-eks-pod-identity/index.md": 1, + "/fastpaths/operator/amazon-eks-pod-identity/introduction.md": 1, + "/fastpaths/operator/amazon-eks-pod-identity/understanding.md": 1, + "/fastpaths/operator/amazon-eks-pod-identity/use-pod-identity.md": 1, + "/fastpaths/operator/amazon-eks-pod-identity/using-dynamo.md": 1, + "/fastpaths/operator/amazon-eks-pod-identity/verifying-dynamo.md": 1, + "/fastpaths/operator/index.md": 1, + "/fastpaths/operator/karpenter/consolidation.md": 1, + "/fastpaths/operator/karpenter/index.md": 1, + "/fastpaths/operator/karpenter/node-provisioning.md": 1, + "/fastpaths/operator/karpenter/setup-provisioner.md": 1, + "/fastpaths/operator/network-policies/egress.md": 1, + "/fastpaths/operator/network-policies/index.md": 1, + "/fastpaths/operator/network-policies/ingress.md": 1, + "/fastpaths/operator/network-policies/setup.md": 1, + "/fastpaths/operator/secrets-manager/ascp.md": 1, + "/fastpaths/operator/secrets-manager/create-secret.md": 1, + "/fastpaths/operator/secrets-manager/external-secrets.md": 1, + "/fastpaths/operator/secrets-manager/index.md": 1, + "/fastpaths/operator/secrets-manager/mounting-secrets.md": 1, + "/fastpaths/setup/aws-event.md": 1, + "/fastpaths/setup/index.md": 1, + "/fastpaths/setup/your-account/cleanup.md": 1, + "/fastpaths/setup/your-account/index.md": 1, + "/fastpaths/setup/your-account/using-eksctl.md": 1, + "/fastpaths/setup/your-account/using-terraform.md": 1, "/fundamentals/compute/fargate/enabling.md": 191830, "/fundamentals/compute/fargate/index.md": 126307, "/fundamentals/compute/fargate/scaling.md": 83672, @@ -95,6 +147,18 @@ "/fundamentals/workloads/keda/install-keda.md": 37918, "/fundamentals/workloads/keda/test-keda.md": 94797, "/fundamentals/workloads/keda/validate-ingress.md": 103610, + "/introduction/basics/configuration/configmaps/index.md": 25044, + "/introduction/basics/configuration/index.md": 1, + "/introduction/basics/configuration/secrets/index.md": 9419, + "/introduction/basics/index.md": 87253, + "/introduction/basics/namespaces/index.md": 11632, + "/introduction/basics/pods/index.md": 22068, + "/introduction/basics/services/index.md": 61307, + "/introduction/basics/workload-management/daemonsets.md": 2563, + "/introduction/basics/workload-management/deployments.md": 13833, + "/introduction/basics/workload-management/index.md": 1, + "/introduction/basics/workload-management/jobs.md": 33585, + "/introduction/basics/workload-management/statefulsets.md": 8602, "/introduction/getting-started/finish.md": 17926, "/introduction/getting-started/first.md": 12371, "/introduction/getting-started/index.md": 832, diff --git a/yarn.lock b/yarn.lock index 4bc494799b..b491d78aba 100644 --- a/yarn.lock +++ b/yarn.lock @@ -4238,6 +4238,24 @@ __metadata: languageName: node linkType: hard +"@puppeteer/browsers@npm:2.6.1": + version: 2.6.1 + resolution: "@puppeteer/browsers@npm:2.6.1" + dependencies: + debug: "npm:^4.4.0" + extract-zip: "npm:^2.0.1" + progress: "npm:^2.0.3" + proxy-agent: "npm:^6.5.0" + semver: "npm:^7.6.3" + tar-fs: "npm:^3.0.6" + unbzip2-stream: "npm:^1.4.3" + yargs: "npm:^17.7.2" + bin: + browsers: lib/cjs/main-cli.js + checksum: 10c0/31d4951eec40515769467be3878d3581fe0e50227f2a9fa865e9f872e4a003262996c412a1d48d9c800665b3aa91bb1c2d971eaa314ef10e536d08e63f2f40d3 + languageName: node + linkType: hard + "@reteps/dockerfmt@npm:^0.3.6": version: 0.3.6 resolution: "@reteps/dockerfmt@npm:0.3.6" @@ -5529,6 +5547,15 @@ __metadata: languageName: node linkType: hard +"@types/yauzl@npm:^2.9.1": + version: 2.10.3 + resolution: "@types/yauzl@npm:2.10.3" + dependencies: + "@types/node": "npm:*" + checksum: 10c0/f1b7c1b99fef9f2fe7f1985ef7426d0cebe48cd031f1780fcdc7451eec7e31ac97028f16f50121a59bcf53086a1fc8c856fd5b7d3e00970e43d92ae27d6b43dc + languageName: node + linkType: hard + "@ungap/structured-clone@npm:^1.0.0": version: 1.3.0 resolution: "@ungap/structured-clone@npm:1.3.0" @@ -6107,6 +6134,18 @@ __metadata: languageName: node linkType: hard +"b4a@npm:^1.6.4": + version: 1.8.0 + resolution: "b4a@npm:1.8.0" + peerDependencies: + react-native-b4a: "*" + peerDependenciesMeta: + react-native-b4a: + optional: true + checksum: 10c0/27eab5c50ea1f1314f36256f160d2e6d6950f55f02ee4942732ecafd8bcc4b3a2ed209fab532b288770d41df2befa97a2745175c06471875b716eb87abf31519 + languageName: node + linkType: hard + "babel-loader@npm:^9.1.3": version: 9.2.1 resolution: "babel-loader@npm:9.2.1" @@ -6186,6 +6225,96 @@ __metadata: languageName: node linkType: hard +"balanced-match@npm:^4.0.2": + version: 4.0.4 + resolution: "balanced-match@npm:4.0.4" + checksum: 10c0/07e86102a3eb2ee2a6a1a89164f29d0dbaebd28f2ca3f5ca786f36b8b23d9e417eb3be45a4acf754f837be5ac0a2317de90d3fcb7f4f4dc95720a1f36b26a17b + languageName: node + linkType: hard + +"bare-events@npm:^2.5.4, bare-events@npm:^2.7.0": + version: 2.8.2 + resolution: "bare-events@npm:2.8.2" + peerDependencies: + bare-abort-controller: "*" + peerDependenciesMeta: + bare-abort-controller: + optional: true + checksum: 10c0/53fef240cf2cdcca62f78b6eead90ddb5a59b0929f414b13a63764c2b4f9de98ea8a578d033b04d64bb7b86dfbc402e937984e69950855cc3754c7b63da7db21 + languageName: node + linkType: hard + +"bare-fs@npm:^4.0.1, bare-fs@npm:^4.5.5": + version: 4.7.1 + resolution: "bare-fs@npm:4.7.1" + dependencies: + bare-events: "npm:^2.5.4" + bare-path: "npm:^3.0.0" + bare-stream: "npm:^2.6.4" + bare-url: "npm:^2.2.2" + fast-fifo: "npm:^1.3.2" + peerDependencies: + bare-buffer: "*" + peerDependenciesMeta: + bare-buffer: + optional: true + checksum: 10c0/4dc67f6dd0264b817941c2b8cbfc42b6abc3980984cdfd129c4d1f22517cb29f6b99a69fc1e3e87f3a9c997e8c94114604bb67fff10574b2adf0966510cf0222 + languageName: node + linkType: hard + +"bare-os@npm:^3.0.1": + version: 3.8.7 + resolution: "bare-os@npm:3.8.7" + checksum: 10c0/6541b223a196a58b52e1103ef1f04d35018c1b56b6c250410fc54680767624273691ece741eb88502c95b058ab90b632972348a9231410df05c5df61a62c9c08 + languageName: node + linkType: hard + +"bare-path@npm:^3.0.0": + version: 3.0.0 + resolution: "bare-path@npm:3.0.0" + dependencies: + bare-os: "npm:^3.0.1" + checksum: 10c0/56a3ca82a9f808f4976cb1188640ac206546ce0ddff582afafc7bd2a6a5b31c3bd16422653aec656eeada2830cfbaa433c6cbf6d6b4d9eba033d5e06d60d9a68 + languageName: node + linkType: hard + +"bare-stream@npm:^2.6.4": + version: 2.13.0 + resolution: "bare-stream@npm:2.13.0" + dependencies: + streamx: "npm:^2.25.0" + teex: "npm:^1.0.1" + peerDependencies: + bare-abort-controller: "*" + bare-buffer: "*" + bare-events: "*" + peerDependenciesMeta: + bare-abort-controller: + optional: true + bare-buffer: + optional: true + bare-events: + optional: true + checksum: 10c0/3c81f169d3bda8af430c5cb0a1cf29f3f697f25fb0863941582112e588680fdfe28357083edcee4c099d3df5a7e3f4145ccc9552d9c7d9b5cab195644fff53d5 + languageName: node + linkType: hard + +"bare-url@npm:^2.2.2": + version: 2.4.1 + resolution: "bare-url@npm:2.4.1" + dependencies: + bare-path: "npm:^3.0.0" + checksum: 10c0/cb9301481a1781ec6ee1dbba970d8d880587162787253009a8269a2ff370833b0cf2c1b39ffad23b2b3ccf0c07e80db4c58a4d247cef1556334c0ec7584f213d + languageName: node + linkType: hard + +"base64-js@npm:^1.3.1": + version: 1.5.1 + resolution: "base64-js@npm:1.5.1" + checksum: 10c0/f23823513b63173a001030fae4f2dabe283b99a9d324ade3ad3d148e218134676f1ee8568c877cd79ec1c53158dcf2d2ba527a97c606618928ba99dd930102bf + languageName: node + linkType: hard + "basic-ftp@npm:^5.0.2": version: 5.2.2 resolution: "basic-ftp@npm:5.2.2" @@ -6368,6 +6497,15 @@ __metadata: languageName: node linkType: hard +"brace-expansion@npm:^5.0.5": + version: 5.0.5 + resolution: "brace-expansion@npm:5.0.5" + dependencies: + balanced-match: "npm:^4.0.2" + checksum: 10c0/4d238e14ed4f5cc9c07285550a41cef23121ca08ba99fa9eb5b55b580dcb6bf868b8210aa10526bdc9f8dc97f33ca2a7259039c4cc131a93042beddb424c48e3 + languageName: node + linkType: hard + "braces@npm:^3.0.3, braces@npm:~3.0.2": version: 3.0.3 resolution: "braces@npm:3.0.3" @@ -6391,6 +6529,13 @@ __metadata: languageName: node linkType: hard +"buffer-crc32@npm:~0.2.3": + version: 0.2.13 + resolution: "buffer-crc32@npm:0.2.13" + checksum: 10c0/cb0a8ddf5cf4f766466db63279e47761eb825693eeba6a5a95ee4ec8cb8f81ede70aa7f9d8aeec083e781d47154290eb5d4d26b3f7a465ec57fb9e7d59c47150 + languageName: node + linkType: hard + "buffer-from@npm:^1.0.0": version: 1.1.2 resolution: "buffer-from@npm:1.1.2" @@ -6398,6 +6543,16 @@ __metadata: languageName: node linkType: hard +"buffer@npm:^5.2.1": + version: 5.7.1 + resolution: "buffer@npm:5.7.1" + dependencies: + base64-js: "npm:^1.3.1" + ieee754: "npm:^1.1.13" + checksum: 10c0/27cac81cff434ed2876058d72e7c4789d11ff1120ef32c9de48f59eab58179b66710c488987d295ae89a228f835fc66d088652dffeb8e3ba8659f80eb091d55e + languageName: node + linkType: hard + "bytes@npm:3.0.0": version: 3.0.0 resolution: "bytes@npm:3.0.0" @@ -6580,6 +6735,13 @@ __metadata: languageName: node linkType: hard +"chalk@npm:^5.0.0, chalk@npm:^5.6.2": + version: 5.6.2 + resolution: "chalk@npm:5.6.2" + checksum: 10c0/99a4b0f0e7991796b1e7e3f52dceb9137cae2a9dfc8fc0784a550dc4c558e15ab32ed70b14b21b52beb2679b4892b41a0aa44249bcb996f01e125d58477c6976 + languageName: node + linkType: hard + "chalk@npm:^5.0.1, chalk@npm:^5.2.0, chalk@npm:^5.3.0": version: 5.6.0 resolution: "chalk@npm:5.6.0" @@ -6587,13 +6749,6 @@ __metadata: languageName: node linkType: hard -"chalk@npm:^5.6.2": - version: 5.6.2 - resolution: "chalk@npm:5.6.2" - checksum: 10c0/99a4b0f0e7991796b1e7e3f52dceb9137cae2a9dfc8fc0784a550dc4c558e15ab32ed70b14b21b52beb2679b4892b41a0aa44249bcb996f01e125d58477c6976 - languageName: node - linkType: hard - "char-regex@npm:^1.0.2": version: 1.0.2 resolution: "char-regex@npm:1.0.2" @@ -6704,6 +6859,18 @@ __metadata: languageName: node linkType: hard +"chromium-bidi@npm:0.11.0": + version: 0.11.0 + resolution: "chromium-bidi@npm:0.11.0" + dependencies: + mitt: "npm:3.0.1" + zod: "npm:3.23.8" + peerDependencies: + devtools-protocol: "*" + checksum: 10c0/7155b1b78bc07371cc750f5a431fb7120fb96e412d24895e5107efe21056a2406f4d051c26be89d2a7355258d6322d203e6d1c4e82f4b30f9b02923de50ba6c9 + languageName: node + linkType: hard + "ci-info@npm:^3.2.0": version: 3.9.0 resolution: "ci-info@npm:3.9.0" @@ -6790,6 +6957,17 @@ __metadata: languageName: node linkType: hard +"cliui@npm:^8.0.1": + version: 8.0.1 + resolution: "cliui@npm:8.0.1" + dependencies: + string-width: "npm:^4.2.0" + strip-ansi: "npm:^6.0.1" + wrap-ansi: "npm:^7.0.0" + checksum: 10c0/4bda0f09c340cbb6dfdc1ed508b3ca080f12992c18d68c6be4d9cf51756033d5266e61ec57529e610dacbf4da1c634423b0c1b11037709cc6b09045cbd815df5 + languageName: node + linkType: hard + "clone-deep@npm:^4.0.1": version: 4.0.1 resolution: "clone-deep@npm:4.0.1" @@ -7189,6 +7367,23 @@ __metadata: languageName: node linkType: hard +"cosmiconfig@npm:^9.0.0": + version: 9.0.1 + resolution: "cosmiconfig@npm:9.0.1" + dependencies: + env-paths: "npm:^2.2.1" + import-fresh: "npm:^3.3.0" + js-yaml: "npm:^4.1.0" + parse-json: "npm:^5.2.0" + peerDependencies: + typescript: ">=4.9.5" + peerDependenciesMeta: + typescript: + optional: true + checksum: 10c0/a5d4d95599687532ee072bca60170133c24d4e08cd795529e0f22c6ce5fde9409eaf4f26e36e3d671f43270ef858fc68f3c7b0ec28e58fac7ddebda5b7725306 + languageName: node + linkType: hard + "cross-spawn@npm:^7.0.3, cross-spawn@npm:^7.0.5, cross-spawn@npm:^7.0.6": version: 7.0.6 resolution: "cross-spawn@npm:7.0.6" @@ -7843,6 +8038,13 @@ __metadata: languageName: node linkType: hard +"devtools-protocol@npm:0.0.1367902": + version: 0.0.1367902 + resolution: "devtools-protocol@npm:0.0.1367902" + checksum: 10c0/be4017f2bfd04474d718daca0e88e062f4afceb2f311662d717f4eae5bda3473da748a68ff1bf2326a67ce35c37af33932190fe8ef1d36c8ef22576befdc57c4 + languageName: node + linkType: hard + "diff@npm:^7.0.0": version: 7.0.0 resolution: "diff@npm:7.0.0" @@ -7989,7 +8191,7 @@ __metadata: languageName: node linkType: hard -"domutils@npm:^3.0.1, domutils@npm:^3.1.0": +"domutils@npm:^3.0.1, domutils@npm:^3.1.0, domutils@npm:^3.2.2": version: 3.2.2 resolution: "domutils@npm:3.2.2" dependencies: @@ -8057,12 +8259,14 @@ __metadata: dependencies: "@aws/toolkit-md": "npm:^0.1.6" cspell: "npm:^9.0.0" + linkinator: "npm:^7.0.0" lint-staged: "npm:^16.0.0" markdown-link-check: "npm:3.14.2" markdownlint-cli2: "npm:^0.18.0" npm-run-all2: "npm:^8.0.0" prettier: "npm:^3.2.5" prettier-plugin-sh: "npm:^0.18.0" + puppeteer: "npm:^23.0.0" languageName: unknown linkType: soft @@ -8148,6 +8352,15 @@ __metadata: languageName: node linkType: hard +"end-of-stream@npm:^1.1.0": + version: 1.4.5 + resolution: "end-of-stream@npm:1.4.5" + dependencies: + once: "npm:^1.4.0" + checksum: 10c0/b0701c92a10b89afb1cb45bf54a5292c6f008d744eb4382fa559d54775ff31617d1d7bc3ef617575f552e24fad2c7c1a1835948c66b3f3a4be0a6c1f35c883d8 + languageName: node + linkType: hard + "enhanced-resolve@npm:^5.17.1": version: 5.18.1 resolution: "enhanced-resolve@npm:5.18.1" @@ -8172,7 +8385,14 @@ __metadata: languageName: node linkType: hard -"env-paths@npm:^2.2.0": +"entities@npm:^7.0.1": + version: 7.0.1 + resolution: "entities@npm:7.0.1" + checksum: 10c0/b4fb9937bb47ecb00aaaceb9db9cdd1cc0b0fb649c0e843d05cf5dbbd2e9d2df8f98721d8b1b286445689c72af7b54a7242fc2d63ef7c9739037a8c73363e7ca + languageName: node + linkType: hard + +"env-paths@npm:^2.2.0, env-paths@npm:^2.2.1": version: 2.2.1 resolution: "env-paths@npm:2.2.1" checksum: 10c0/285325677bf00e30845e330eec32894f5105529db97496ee3f598478e50f008c5352a41a30e5e72ec9de8a542b5a570b85699cd63bd2bc646dbcb9f311d83bc4 @@ -8483,6 +8703,15 @@ __metadata: languageName: node linkType: hard +"events-universal@npm:^1.0.0": + version: 1.0.1 + resolution: "events-universal@npm:1.0.1" + dependencies: + bare-events: "npm:^2.7.0" + checksum: 10c0/a1d9a5e9f95843650f8ec240dd1221454c110189a9813f32cdf7185759b43f1f964367ac7dca4ebc69150b59043f2d77c7e122b0d03abf7c25477ea5494785a5 + languageName: node + linkType: hard + "events@npm:^3.2.0": version: 3.3.0 resolution: "events@npm:3.3.0" @@ -8632,6 +8861,23 @@ __metadata: languageName: node linkType: hard +"extract-zip@npm:^2.0.1": + version: 2.0.1 + resolution: "extract-zip@npm:2.0.1" + dependencies: + "@types/yauzl": "npm:^2.9.1" + debug: "npm:^4.1.1" + get-stream: "npm:^5.1.0" + yauzl: "npm:^2.10.0" + dependenciesMeta: + "@types/yauzl": + optional: true + bin: + extract-zip: cli.js + checksum: 10c0/9afbd46854aa15a857ae0341a63a92743a7b89c8779102c3b4ffc207516b2019337353962309f85c66ee3d9092202a83cdc26dbf449a11981272038443974aee + languageName: node + linkType: hard + "fast-content-type-parse@npm:^3.0.0": version: 3.0.0 resolution: "fast-content-type-parse@npm:3.0.0" @@ -8653,6 +8899,13 @@ __metadata: languageName: node linkType: hard +"fast-fifo@npm:^1.2.0, fast-fifo@npm:^1.3.2": + version: 1.3.2 + resolution: "fast-fifo@npm:1.3.2" + checksum: 10c0/d53f6f786875e8b0529f784b59b4b05d4b5c31c651710496440006a398389a579c8dbcd2081311478b5bf77f4b0b21de69109c5a4eabea9d8e8783d1eb864e4c + languageName: node + linkType: hard + "fast-glob@npm:^3.2.11, fast-glob@npm:^3.2.9, fast-glob@npm:^3.3.0, fast-glob@npm:^3.3.3": version: 3.3.3 resolution: "fast-glob@npm:3.3.3" @@ -8718,6 +8971,15 @@ __metadata: languageName: node linkType: hard +"fd-slicer@npm:~1.1.0": + version: 1.1.0 + resolution: "fd-slicer@npm:1.1.0" + dependencies: + pend: "npm:~1.2.0" + checksum: 10c0/304dd70270298e3ffe3bcc05e6f7ade2511acc278bc52d025f8918b48b6aa3b77f10361bddfadfe2a28163f7af7adbdce96f4d22c31b2f648ba2901f0c5fc20e + languageName: node + linkType: hard + "fdir@npm:^6.5.0": version: 6.5.0 resolution: "fdir@npm:6.5.0" @@ -9047,6 +9309,13 @@ __metadata: languageName: node linkType: hard +"get-caller-file@npm:^2.0.5": + version: 2.0.5 + resolution: "get-caller-file@npm:2.0.5" + checksum: 10c0/c6c7b60271931fa752aeb92f2b47e355eac1af3a2673f47c9589e8f8a41adc74d45551c1bc57b5e66a80609f10ffb72b6f575e4370d61cc3f7f3aaff01757cde + languageName: node + linkType: hard + "get-east-asian-width@npm:^1.0.0": version: 1.3.0 resolution: "get-east-asian-width@npm:1.3.0" @@ -9096,6 +9365,15 @@ __metadata: languageName: node linkType: hard +"get-stream@npm:^5.1.0": + version: 5.2.0 + resolution: "get-stream@npm:5.2.0" + dependencies: + pump: "npm:^3.0.0" + checksum: 10c0/43797ffd815fbb26685bf188c8cfebecb8af87b3925091dd7b9a9c915993293d78e3c9e1bce125928ff92f2d0796f3889b92b5ec6d58d1041b574682132e0a80 + languageName: node + linkType: hard + "get-stream@npm:^6.0.0, get-stream@npm:^6.0.1": version: 6.0.1 resolution: "get-stream@npm:6.0.1" @@ -9121,6 +9399,13 @@ __metadata: languageName: node linkType: hard +"github-slugger@npm:^2.0.0": + version: 2.0.0 + resolution: "github-slugger@npm:2.0.0" + checksum: 10c0/21b912b6b1e48f1e5a50b2292b48df0ff6abeeb0691b161b3d93d84f4ae6b1acd6ae23702e914af7ea5d441c096453cf0f621b72d57893946618d21dd1a1c486 + languageName: node + linkType: hard + "glob-parent@npm:^5.1.2, glob-parent@npm:~5.1.2": version: 5.1.2 resolution: "glob-parent@npm:5.1.2" @@ -9178,6 +9463,17 @@ __metadata: languageName: node linkType: hard +"glob@npm:^13.0.0": + version: 13.0.6 + resolution: "glob@npm:13.0.6" + dependencies: + minimatch: "npm:^10.2.2" + minipass: "npm:^7.1.3" + path-scurry: "npm:^2.0.2" + checksum: 10c0/269c236f11a9b50357fe7a8c6aadac667e01deb5242b19c84975628f05f4438d8ee1354bb62c5d6c10f37fd59911b54d7799730633a2786660d8c69f1d18120a + languageName: node + linkType: hard + "glob@npm:^7.0.0, glob@npm:^7.0.5, glob@npm:^7.1.3, glob@npm:^7.1.6": version: 7.2.3 resolution: "glob@npm:7.2.3" @@ -9815,6 +10111,18 @@ __metadata: languageName: node linkType: hard +"htmlparser2@npm:^10.0.0": + version: 10.1.0 + resolution: "htmlparser2@npm:10.1.0" + dependencies: + domelementtype: "npm:^2.3.0" + domhandler: "npm:^5.0.3" + domutils: "npm:^3.2.2" + entities: "npm:^7.0.1" + checksum: 10c0/36394e29b80cfcc5e78e0fa4d3aa21fdaac3e6778d23e5c933e625c290987cd9a724a2eb0753ab60ed0c69dfaba0ab115f0ee50fb112fd8f0c4d522e7e0089a2 + languageName: node + linkType: hard + "htmlparser2@npm:^6.1.0": version: 6.1.0 resolution: "htmlparser2@npm:6.1.0" @@ -10000,6 +10308,13 @@ __metadata: languageName: node linkType: hard +"ieee754@npm:^1.1.13": + version: 1.2.1 + resolution: "ieee754@npm:1.2.1" + checksum: 10c0/b0782ef5e0935b9f12883a2e2aa37baa75da6e66ce6515c168697b42160807d9330de9a32ec1ed73149aea02e0d822e572bca6f1e22bdcbd2149e13b050b17bb + languageName: node + linkType: hard + "ignore@npm:^5.2.0, ignore@npm:^5.2.4": version: 5.3.2 resolution: "ignore@npm:5.3.2" @@ -10870,6 +11185,26 @@ __metadata: languageName: node linkType: hard +"linkinator@npm:^7.0.0": + version: 7.6.1 + resolution: "linkinator@npm:7.6.1" + dependencies: + chalk: "npm:^5.0.0" + escape-html: "npm:^1.0.3" + glob: "npm:^13.0.0" + htmlparser2: "npm:^10.0.0" + marked: "npm:^17.0.0" + marked-gfm-heading-id: "npm:^4.1.3" + meow: "npm:^14.0.0" + mime: "npm:^4.0.0" + srcset: "npm:^5.0.0" + undici: "npm:^7.16.0" + bin: + linkinator: build/src/cli.js + checksum: 10c0/cdced6748087b7a1f4f5917e193574e6f6bef9bca1c0940be3c4a7853b29a90212e685a2da4266c0082fbcf4762abec536fb311be1129c7df04f2296fefad8b0 + languageName: node + linkType: hard + "lint-staged@npm:^16.0.0": version: 16.2.7 resolution: "lint-staged@npm:16.2.7" @@ -11219,6 +11554,17 @@ __metadata: languageName: node linkType: hard +"marked-gfm-heading-id@npm:^4.1.3": + version: 4.1.4 + resolution: "marked-gfm-heading-id@npm:4.1.4" + dependencies: + github-slugger: "npm:^2.0.0" + peerDependencies: + marked: ">=13 <19" + checksum: 10c0/677982332f26e64fce426c00219b7b6ee9a45dd87e3e83ee0606b42bdb0548fe04789767aa2119573343db0143d8417afd60654d9fc7f3ba421ca3030f9755f4 + languageName: node + linkType: hard + "marked@npm:^12.0.1": version: 12.0.2 resolution: "marked@npm:12.0.2" @@ -11228,6 +11574,15 @@ __metadata: languageName: node linkType: hard +"marked@npm:^17.0.0": + version: 17.0.6 + resolution: "marked@npm:17.0.6" + bin: + marked: bin/marked.js + checksum: 10c0/77961fbb360511d6638491e097cb15b1ef5e643f27c25b9cd52aa3cd92e216fc3cda7dbfc6a10adb7922d1b38df132510df65de4afe6c53c7280772c9d4221bd + languageName: node + linkType: hard + "math-intrinsics@npm:^1.1.0": version: 1.1.0 resolution: "math-intrinsics@npm:1.1.0" @@ -11584,6 +11939,13 @@ __metadata: languageName: node linkType: hard +"meow@npm:^14.0.0": + version: 14.1.0 + resolution: "meow@npm:14.1.0" + checksum: 10c0/f0ca4bb4fd08e4b9470fcbb7332deb61d72d40d4bda18ffb87c1a98e5014c0b44749ae9f0cab18fa532e26d61cef5d453831f9ae23ac09fa8ea0e0469be73ebc + languageName: node + linkType: hard + "merge-descriptors@npm:1.0.3": version: 1.0.3 resolution: "merge-descriptors@npm:1.0.3" @@ -12223,6 +12585,15 @@ __metadata: languageName: node linkType: hard +"mime@npm:^4.0.0": + version: 4.1.0 + resolution: "mime@npm:4.1.0" + bin: + mime: bin/cli.js + checksum: 10c0/3b8602e50dff1049aea8bb2d4c65afc55bf7f3eb5c17fd2bcb315b8c8ae225a7553297d424d3621757c24cdba99e930ecdc4108467009cdc7ed55614cd55031d + languageName: node + linkType: hard + "mimic-fn@npm:^2.1.0": version: 2.1.0 resolution: "mimic-fn@npm:2.1.0" @@ -12288,6 +12659,15 @@ __metadata: languageName: node linkType: hard +"minimatch@npm:^10.2.2": + version: 10.2.5 + resolution: "minimatch@npm:10.2.5" + dependencies: + brace-expansion: "npm:^5.0.5" + checksum: 10c0/6bb058bd6324104b9ec2f763476a35386d05079c1f5fe4fbf1f324a25237cd4534d6813ecd71f48208f4e635c1221899bef94c3c89f7df55698fe373aaae20fd + languageName: node + linkType: hard + "minimatch@npm:^9.0.4": version: 9.0.5 resolution: "minimatch@npm:9.0.5" @@ -12371,6 +12751,13 @@ __metadata: languageName: node linkType: hard +"minipass@npm:^7.1.3": + version: 7.1.3 + resolution: "minipass@npm:7.1.3" + checksum: 10c0/539da88daca16533211ea5a9ee98dc62ff5742f531f54640dd34429e621955e91cc280a91a776026264b7f9f6735947629f920944e9c1558369e8bf22eb33fbb + languageName: node + linkType: hard + "minizlib@npm:^3.0.1": version: 3.0.1 resolution: "minizlib@npm:3.0.1" @@ -12390,6 +12777,13 @@ __metadata: languageName: node linkType: hard +"mitt@npm:3.0.1": + version: 3.0.1 + resolution: "mitt@npm:3.0.1" + checksum: 10c0/3ab4fdecf3be8c5255536faa07064d05caa3dd332bd318ff02e04621f7b3069ca1de9106cfe8e7ced675abfc2bec2ce4c4ef321c4a1bb1fb29df8ae090741913 + languageName: node + linkType: hard + "mkdirp@npm:0.3.0": version: 0.3.0 resolution: "mkdirp@npm:0.3.0" @@ -12750,7 +13144,7 @@ __metadata: languageName: node linkType: hard -"once@npm:^1.3.0, once@npm:^1.4.0": +"once@npm:^1.3.0, once@npm:^1.3.1, once@npm:^1.4.0": version: 1.4.0 resolution: "once@npm:1.4.0" dependencies: @@ -13143,6 +13537,16 @@ __metadata: languageName: node linkType: hard +"path-scurry@npm:^2.0.2": + version: 2.0.2 + resolution: "path-scurry@npm:2.0.2" + dependencies: + lru-cache: "npm:^11.0.0" + minipass: "npm:^7.1.2" + checksum: 10c0/b35ad37cf6557a87fd057121ce2be7695380c9138d93e87ae928609da259ea0a170fac6f3ef1eb3ece8a068e8b7f2f3adf5bb2374cf4d4a57fe484954fcc9482 + languageName: node + linkType: hard + "path-to-regexp@npm:0.1.12": version: 0.1.12 resolution: "path-to-regexp@npm:0.1.12" @@ -13187,6 +13591,13 @@ __metadata: languageName: node linkType: hard +"pend@npm:~1.2.0": + version: 1.2.0 + resolution: "pend@npm:1.2.0" + checksum: 10c0/8a87e63f7a4afcfb0f9f77b39bb92374afc723418b9cb716ee4257689224171002e07768eeade4ecd0e86f1fa3d8f022994219fb45634f2dbd78c6803e452458 + languageName: node + linkType: hard + "picocolors@npm:^1.0.0, picocolors@npm:^1.0.1, picocolors@npm:^1.1.1": version: 1.1.1 resolution: "picocolors@npm:1.1.1" @@ -13877,6 +14288,16 @@ __metadata: languageName: node linkType: hard +"pump@npm:^3.0.0": + version: 3.0.4 + resolution: "pump@npm:3.0.4" + dependencies: + end-of-stream: "npm:^1.1.0" + once: "npm:^1.3.1" + checksum: 10c0/2780e66b5471c19e3e3e1063b84f3f6a3a08367f24c5ed552f98cd5901e6ada27c7ad6495d4244f553fd03b01884a4561933064f053f47c8994d84fd352768ea + languageName: node + linkType: hard + "punycode.js@npm:^2.3.1": version: 2.3.1 resolution: "punycode.js@npm:2.3.1" @@ -13900,6 +14321,36 @@ __metadata: languageName: node linkType: hard +"puppeteer-core@npm:23.11.1": + version: 23.11.1 + resolution: "puppeteer-core@npm:23.11.1" + dependencies: + "@puppeteer/browsers": "npm:2.6.1" + chromium-bidi: "npm:0.11.0" + debug: "npm:^4.4.0" + devtools-protocol: "npm:0.0.1367902" + typed-query-selector: "npm:^2.12.0" + ws: "npm:^8.18.0" + checksum: 10c0/6512a3dca8c7bea620219332b84c4442754fead6c5021c26ea395ddc2f84610a54accf185ba1450e02885cb063c2d12f96eb5f18e7e1b6795f3e32a4b8a2102e + languageName: node + linkType: hard + +"puppeteer@npm:^23.0.0": + version: 23.11.1 + resolution: "puppeteer@npm:23.11.1" + dependencies: + "@puppeteer/browsers": "npm:2.6.1" + chromium-bidi: "npm:0.11.0" + cosmiconfig: "npm:^9.0.0" + devtools-protocol: "npm:0.0.1367902" + puppeteer-core: "npm:23.11.1" + typed-query-selector: "npm:^2.12.0" + bin: + puppeteer: lib/cjs/puppeteer/node/cli.js + checksum: 10c0/e967f5ce02ab9e0343eb4403f32ab7de8a6dbeffe6b23be8725e112015ae4a60264a554742cf10302434795a8e9ea27ec9b048126fee23750ce24c3b238d2ebc + languageName: node + linkType: hard + "qs@npm:6.13.0": version: 6.13.0 resolution: "qs@npm:6.13.0" @@ -15030,6 +15481,13 @@ __metadata: languageName: node linkType: hard +"require-directory@npm:^2.1.1": + version: 2.1.1 + resolution: "require-directory@npm:2.1.1" + checksum: 10c0/83aa76a7bc1531f68d92c75a2ca2f54f1b01463cb566cf3fbc787d0de8be30c9dbc211d1d46be3497dac5785fe296f2dd11d531945ac29730643357978966e99 + languageName: node + linkType: hard + "require-from-string@npm:^2.0.2": version: 2.0.2 resolution: "require-from-string@npm:2.0.2" @@ -15392,7 +15850,7 @@ __metadata: languageName: node linkType: hard -"semver@npm:^7.7.3": +"semver@npm:^7.6.3, semver@npm:^7.7.3": version: 7.7.4 resolution: "semver@npm:7.7.4" bin: @@ -15890,6 +16348,13 @@ __metadata: languageName: node linkType: hard +"srcset@npm:^5.0.0": + version: 5.0.3 + resolution: "srcset@npm:5.0.3" + checksum: 10c0/35158007a4f02a52dc57293441f8f668c332d4cca80297367a51eb43ef76c5f2eefbcf2e09614c5399298981c3b02e15adfa6ab0691e85b32582ca73dabe60b5 + languageName: node + linkType: hard + "ssri@npm:^12.0.0": version: 12.0.0 resolution: "ssri@npm:12.0.0" @@ -15934,6 +16399,17 @@ __metadata: languageName: node linkType: hard +"streamx@npm:^2.12.5, streamx@npm:^2.15.0, streamx@npm:^2.25.0": + version: 2.25.0 + resolution: "streamx@npm:2.25.0" + dependencies: + events-universal: "npm:^1.0.0" + fast-fifo: "npm:^1.3.2" + text-decoder: "npm:^1.1.0" + checksum: 10c0/1ecc4b722050e9088b99cde59d035e846ac97cedc3ef14a00b196d9c0b6f47d9fd18df454a19f56f0f586ab4f23fb7229069b9e8eaf22072a21bd9c909d4e0ea + languageName: node + linkType: hard + "string-argv@npm:^0.3.2": version: 0.3.2 resolution: "string-argv@npm:0.3.2" @@ -16167,6 +16643,35 @@ __metadata: languageName: node linkType: hard +"tar-fs@npm:^3.0.6": + version: 3.1.2 + resolution: "tar-fs@npm:3.1.2" + dependencies: + bare-fs: "npm:^4.0.1" + bare-path: "npm:^3.0.0" + pump: "npm:^3.0.0" + tar-stream: "npm:^3.1.5" + dependenciesMeta: + bare-fs: + optional: true + bare-path: + optional: true + checksum: 10c0/9dcbbbef9cdfc27f47651fe679f15952a6a8e6b3c9761c4bf3f416ace41cf462fb6292519bd3e041cadfcc0b89043a6bdecb46ff19f770b6864b77dcde7bad46 + languageName: node + linkType: hard + +"tar-stream@npm:^3.1.5": + version: 3.1.8 + resolution: "tar-stream@npm:3.1.8" + dependencies: + b4a: "npm:^1.6.4" + bare-fs: "npm:^4.5.5" + fast-fifo: "npm:^1.2.0" + streamx: "npm:^2.15.0" + checksum: 10c0/c4bf369de2302fcf30218d091167a5372ee79b69a1b5bb493ddb7714193ca805719558966334bab1f2775c8142826865f24e25459ff1c5f0a096bc3a3d5c5ce2 + languageName: node + linkType: hard + "tar@npm:^7.4.3": version: 7.5.11 resolution: "tar@npm:7.5.11" @@ -16180,6 +16685,15 @@ __metadata: languageName: node linkType: hard +"teex@npm:^1.0.1": + version: 1.0.1 + resolution: "teex@npm:1.0.1" + dependencies: + streamx: "npm:^2.12.5" + checksum: 10c0/8df9166c037ba694b49d32a49858e314c60e513d55ac5e084dbf1ddbb827c5fa43cc389a81e87684419c21283308e9d68bb068798189c767ec4c252f890b8a77 + languageName: node + linkType: hard + "terser-webpack-plugin@npm:^5.3.10, terser-webpack-plugin@npm:^5.3.9": version: 5.3.11 resolution: "terser-webpack-plugin@npm:5.3.11" @@ -16216,6 +16730,15 @@ __metadata: languageName: node linkType: hard +"text-decoder@npm:^1.1.0": + version: 1.2.7 + resolution: "text-decoder@npm:1.2.7" + dependencies: + b4a: "npm:^1.6.4" + checksum: 10c0/929938ed154fbadb660a7f3d1aca30b7e53649a731af7583168fcfba0c158046325d35d945926e2a512bb62d1a49a7818151c987ea38b48853f01e1615722fc5 + languageName: node + linkType: hard + "text-table@npm:^0.2.0": version: 0.2.0 resolution: "text-table@npm:0.2.0" @@ -16223,6 +16746,13 @@ __metadata: languageName: node linkType: hard +"through@npm:^2.3.8": + version: 2.3.8 + resolution: "through@npm:2.3.8" + checksum: 10c0/4b09f3774099de0d4df26d95c5821a62faee32c7e96fb1f4ebd54a2d7c11c57fe88b0a0d49cf375de5fee5ae6bf4eb56dbbf29d07366864e2ee805349970d3cc + languageName: node + linkType: hard + "thunky@npm:^1.0.2": version: 1.1.0 resolution: "thunky@npm:1.1.0" @@ -16364,6 +16894,13 @@ __metadata: languageName: node linkType: hard +"typed-query-selector@npm:^2.12.0": + version: 2.12.1 + resolution: "typed-query-selector@npm:2.12.1" + checksum: 10c0/2c81c8560910d87f98a64e1c0b03247a7c94c3703d11f2f048553718c18da8dcab8469be76a39d2d258f0ff5a9b0bf419394d8b1c804fdf72a06181a0631d70d + languageName: node + linkType: hard + "typedarray-to-buffer@npm:^3.1.5": version: 3.1.5 resolution: "typedarray-to-buffer@npm:3.1.5" @@ -16398,6 +16935,16 @@ __metadata: languageName: node linkType: hard +"unbzip2-stream@npm:^1.4.3": + version: 1.4.3 + resolution: "unbzip2-stream@npm:1.4.3" + dependencies: + buffer: "npm:^5.2.1" + through: "npm:^2.3.8" + checksum: 10c0/2ea2048f3c9db3499316ccc1d95ff757017ccb6f46c812d7c42466247e3b863fb178864267482f7f178254214247779daf68e85f50bd7736c3c97ba2d58b910a + languageName: node + linkType: hard + "undici-types@npm:~6.20.0": version: 6.20.0 resolution: "undici-types@npm:6.20.0" @@ -16412,6 +16959,13 @@ __metadata: languageName: node linkType: hard +"undici@npm:^7.16.0": + version: 7.25.0 + resolution: "undici@npm:7.25.0" + checksum: 10c0/02a0b45dc14eb91bc488948750232450fe52f27a6b08086d6ac6736bb47908d600fe3a96d346f12eab24729c782e5c2f693bc8e8eca6696d4e4c09b1ed4cb4ec + languageName: node + linkType: hard + "unicode-canonical-property-names-ecmascript@npm:^2.0.0": version: 2.0.1 resolution: "unicode-canonical-property-names-ecmascript@npm:2.0.1" @@ -17208,7 +17762,7 @@ __metadata: languageName: node linkType: hard -"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0": +"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0, wrap-ansi@npm:^7.0.0": version: 7.0.0 resolution: "wrap-ansi@npm:7.0.0" dependencies: @@ -17290,6 +17844,21 @@ __metadata: languageName: node linkType: hard +"ws@npm:^8.18.0": + version: 8.20.0 + resolution: "ws@npm:8.20.0" + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: ">=5.0.2" + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + checksum: 10c0/956ac5f11738c914089b65878b9223692ace77337ba55379ae68e1ecbeae9b47a0c6eb9403688f609999a58c80d83d99865fe0029b229d308b08c1ef93d4ea14 + languageName: node + linkType: hard + "xdg-basedir@npm:^5.0.1, xdg-basedir@npm:^5.1.0": version: 5.1.0 resolution: "xdg-basedir@npm:5.1.0" @@ -17327,6 +17896,13 @@ __metadata: languageName: node linkType: hard +"y18n@npm:^5.0.5": + version: 5.0.8 + resolution: "y18n@npm:5.0.8" + checksum: 10c0/4df2842c36e468590c3691c894bc9cdbac41f520566e76e24f59401ba7d8b4811eb1e34524d57e54bc6d864bcb66baab7ffd9ca42bf1eda596618f9162b91249 + languageName: node + linkType: hard + "yallist@npm:^3.0.2": version: 3.1.1 resolution: "yallist@npm:3.1.1" @@ -17386,6 +17962,38 @@ __metadata: languageName: node linkType: hard +"yargs-parser@npm:^21.1.1": + version: 21.1.1 + resolution: "yargs-parser@npm:21.1.1" + checksum: 10c0/f84b5e48169479d2f402239c59f084cfd1c3acc197a05c59b98bab067452e6b3ea46d4dd8ba2985ba7b3d32a343d77df0debd6b343e5dae3da2aab2cdf5886b2 + languageName: node + linkType: hard + +"yargs@npm:^17.7.2": + version: 17.7.2 + resolution: "yargs@npm:17.7.2" + dependencies: + cliui: "npm:^8.0.1" + escalade: "npm:^3.1.1" + get-caller-file: "npm:^2.0.5" + require-directory: "npm:^2.1.1" + string-width: "npm:^4.2.3" + y18n: "npm:^5.0.5" + yargs-parser: "npm:^21.1.1" + checksum: 10c0/ccd7e723e61ad5965fffbb791366db689572b80cca80e0f96aad968dfff4156cd7cd1ad18607afe1046d8241e6fb2d6c08bf7fa7bfb5eaec818735d8feac8f05 + languageName: node + linkType: hard + +"yauzl@npm:^2.10.0": + version: 2.10.0 + resolution: "yauzl@npm:2.10.0" + dependencies: + buffer-crc32: "npm:~0.2.3" + fd-slicer: "npm:~1.1.0" + checksum: 10c0/f265002af7541b9ec3589a27f5fb8f11cf348b53cc15e2751272e3c062cd73f3e715bc72d43257de71bbaecae446c3f1b14af7559e8ab0261625375541816422 + languageName: node + linkType: hard + "yocto-queue@npm:^0.1.0": version: 0.1.0 resolution: "yocto-queue@npm:0.1.0" @@ -17416,6 +18024,13 @@ __metadata: languageName: node linkType: hard +"zod@npm:3.23.8": + version: 3.23.8 + resolution: "zod@npm:3.23.8" + checksum: 10c0/8f14c87d6b1b53c944c25ce7a28616896319d95bc46a9660fe441adc0ed0a81253b02b5abdaeffedbeb23bdd25a0bf1c29d2c12dd919aef6447652dd295e3e69 + languageName: node + linkType: hard + "zod@npm:^3.25 || ^4.0": version: 4.3.6 resolution: "zod@npm:4.3.6"