Skip to content

Commit 58120c6

Browse files
committed
Import all changes from RIV
1 parent f8b8110 commit 58120c6

209 files changed

Lines changed: 9941 additions & 99 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

docs/style_guide.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,14 @@ sidebar_custom_props:
6464
---
6565
```
6666

67+
To mark your module as optional:
68+
```
69+
---
70+
...
71+
sidebar_custom_props: { "optional": "true" }
72+
---
73+
```
74+
6775
### Navigating the AWS console
6876

6977
There are instances where the user needs to navigate to specific screens in the AWS console. It is preferable to provide a link to the exact screen if possible, or a close as can be done.

hack/create-infrastructure.sh

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,5 +20,18 @@ if [ $cluster_exists -eq 0 ]; then
2020
echo "Cluster ${EKS_CLUSTER_NAME} already exists"
2121
else
2222
echo "Creating cluster ${EKS_CLUSTER_NAME}"
23-
bash $SCRIPT_DIR/exec.sh "${environment}" 'cat /cluster/eksctl/cluster.yaml | envsubst | eksctl create cluster -f -'
24-
fi
23+
bash $SCRIPT_DIR/exec.sh "${environment}" 'cat /cluster/eksctl/cluster.yaml | envsubst | eksctl create cluster -f -'&
24+
fi
25+
26+
auto_cluster_exists=0
27+
aws eks describe-cluster --name "${EKS_CLUSTER_AUTO_NAME}" &> /dev/null || auto_cluster_exists=$?
28+
29+
if [ $auto_cluster_exists -eq 0 ]; then
30+
echo "Auto mode cluster ${EKS_CLUSTER_AUTO_NAME} already exists"
31+
else
32+
echo "Creating auto mode cluster ${EKS_CLUSTER_AUTO_NAME} with terraform"
33+
bash $SCRIPT_DIR/exec.sh "${environment}" 'cat /cluster/eksctl/cluster-auto.yaml | envsubst'
34+
bash $SCRIPT_DIR/exec.sh "${environment}" 'cat /cluster/eksctl/cluster-auto.yaml | envsubst | eksctl create cluster -f -'&
35+
fi
36+
37+
wait

hack/destroy-infrastructure.sh

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,23 @@ aws eks describe-cluster --name "${EKS_CLUSTER_NAME}" &> /dev/null || cluster_ex
1515
if [ $cluster_exists -eq 0 ]; then
1616
echo "Deleting cluster ${EKS_CLUSTER_NAME}"
1717
bash $SCRIPT_DIR/shell.sh "${environment}" 'delete-environment' || true
18-
19-
bash $SCRIPT_DIR/exec.sh "${environment}" 'eksctl delete cluster --name ${EKS_CLUSTER_NAME} --region ${AWS_REGION} --wait --force --disable-nodegroup-eviction --timeout 45m'
18+
bash $SCRIPT_DIR/exec.sh "${environment}" 'eksctl delete cluster --name ${EKS_CLUSTER_NAME} --region ${AWS_REGION} --wait --force --disable-nodegroup-eviction --timeout 45m'&
2019
else
2120
echo "Cluster ${EKS_CLUSTER_NAME} does not exist"
2221
fi
2322

23+
export EKS_CLUSTER_AUTO_NAME="${EKS_CLUSTER_NAME}-auto"
24+
auto_cluster_exists=0
25+
aws eks describe-cluster --name "${EKS_CLUSTER_AUTO_NAME}" &> /dev/null || auto_cluster_exists=$?
26+
27+
if [ $auto_cluster_exists -eq 0 ]; then
28+
echo "Deleting auto mode cluster ${EKS_CLUSTER_AUTO_NAME}"
29+
bash $SCRIPT_DIR/shell.sh "${environment}" 'delete-environment' || true # Needed ?
30+
bash $SCRIPT_DIR/exec.sh "${environment}" 'eksctl delete cluster --name ${EKS_CLUSTER_AUTO_NAME} --region ${AWS_REGION} --wait --force --disable-nodegroup-eviction --timeout 45m'
31+
else
32+
echo "Auto mode cluster ${EKS_CLUSTER_AUTO_NAME} does not exist"
33+
fi
34+
35+
wait
36+
2437
aws cloudformation delete-stack --stack-name ${EKS_CLUSTER_NAME}-ide-role || true

hack/exec.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,5 +31,5 @@ $CONTAINER_CLI run --rm \
3131
-v $SCRIPT_DIR/../manifests:/manifests \
3232
-v $SCRIPT_DIR/../cluster:/cluster \
3333
--entrypoint /bin/bash \
34-
-e 'EKS_CLUSTER_NAME' -e 'AWS_REGION' -e 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' \
34+
-e 'EKS_CLUSTER_NAME' -e 'EKS_CLUSTER_AUTO_NAME' -e 'AWS_REGION' -e 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' -e RESOURCE_CODEBUILD_ROLE_ARN \
3535
$aws_credential_args $container_image -c "$shell_command"

hack/lib/common-env.sh

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,10 @@ environment=${environment:-""}
22

33
if [ -z "$environment" ]; then
44
export EKS_CLUSTER_NAME="eks-workshop"
5+
export EKS_CLUSTER_AUTO_NAME="eks-workshop-auto"
56
else
67
export EKS_CLUSTER_NAME="eks-workshop-${environment}"
8+
export EKS_CLUSTER_AUTO_NAME="eks-workshop-auto-${environment}"
79
fi
810

911
AWS_REGION=${AWS_REGION:-""}

hack/pre-provision-resources.sh

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@ fi
99

1010
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
1111

12+
echo $SCRIPT_DIR
13+
1214
source $SCRIPT_DIR/lib/common-env.sh
1315

1416
terraform_dir="${SCRIPT_DIR}/../terraform-resources"
@@ -23,7 +25,7 @@ rm -rf $conf_dir
2325
mkdir -p "$conf_dir"
2426

2527
cat << EOF > $conf_dir/backend_override.tf
26-
terraform {
28+
terraform {
2729
backend "local" {
2830
path = "../terraform.tfstate"
2931
}
@@ -34,12 +36,19 @@ cp $manifests_dir/.workshop/terraform/base.tf $conf_dir/base.tf
3436

3537
find $manifests_dir/modules -type d -name "preprovision" -print0 | while read -d $'\0' file
3638
do
37-
target=$(echo $file | md5sum | cut -f1 -d" ")
39+
md5=$(echo ${file#"$manifests_dir/modules/"} | md5sum | cut -f1 -d" " | cut -d'/' -f1 | rev) # In case of non-unique
40+
first_path=$(echo ${file#"$manifests_dir/modules/"} | cut -d'/' -f1,2 | tr '/' '_')
41+
target="${first_path}-$md5"
42+
3843
cp -R $file $conf_dir/$target
3944

4045
cat << EOF > $conf_dir/$target.tf
4146
module "gen-$target" {
4247
source = "./$target"
48+
providers = {
49+
helm.auto_mode = helm.auto_mode
50+
kubernetes.auto_mode = kubernetes.auto_mode
51+
}
4352
4453
eks_cluster_id = local.eks_cluster_id
4554
tags = local.tags

hack/run-tests.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
#!/bin/bash
1+
#!/bin/bash -x
22

33
environment=$1
44
module=$2

hack/shell.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,6 @@ fi
4040
$CONTAINER_CLI run --rm $interactive_args $dns_args \
4141
-v $SCRIPT_DIR/../manifests:/eks-workshop/manifests \
4242
-v $SCRIPT_DIR/../cluster:/cluster \
43-
-e 'EKS_CLUSTER_NAME' -e 'AWS_REGION' \
43+
-e 'EKS_CLUSTER_NAME' -e 'EKS_CLUSTER_AUTO_NAME' -e 'AWS_REGION' \
4444
-p 8889:8889 \
4545
$aws_credential_args $container_image $shell_command

lab/bin/reset-environment

Lines changed: 29 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,11 @@ if [ ! -z "$module" ]; then
9292
if [ $module = "introduction/getting-started" ]; then
9393
exit
9494
fi
95+
96+
if [[ "$module" = "fastpaths/"* ]]; then
97+
kubectl config use-context eks-workshop-auto
98+
exit
99+
fi
95100
fi
96101

97102
logmessage "🎓 ${PURPLE}Tip:${NC} Read the rest of the lab introduction while you wait!\n"
@@ -106,24 +111,29 @@ kubectl delete pod load-generator --ignore-not-found
106111

107112
kubectl delete namespace other --ignore-not-found
108113

109-
kubectl apply -k $base_path --prune --all \
110-
--prune-allowlist=autoscaling/v1/HorizontalPodAutoscaler \
111-
--prune-allowlist=core/v1/Service \
112-
--prune-allowlist=core/v1/ConfigMap \
113-
--prune-allowlist=apps/v1/Deployment \
114-
--prune-allowlist=apps/v1/StatefulSet \
115-
--prune-allowlist=core/v1/ServiceAccount \
116-
--prune-allowlist=core/v1/Secret \
117-
--prune-allowlist=core/v1/PersistentVolumeClaim \
118-
--prune-allowlist=scheduling.k8s.io/v1/PriorityClass \
119-
--prune-allowlist=networking.k8s.io/v1/Ingress
114+
if [[ $module != introduction/basics* ]]; then
115+
kubectl apply -k $base_path --prune --all \
116+
--prune-allowlist=autoscaling/v1/HorizontalPodAutoscaler \
117+
--prune-allowlist=core/v1/Service \
118+
--prune-allowlist=core/v1/ConfigMap \
119+
--prune-allowlist=apps/v1/Deployment \
120+
--prune-allowlist=apps/v1/StatefulSet \
121+
--prune-allowlist=core/v1/ServiceAccount \
122+
--prune-allowlist=core/v1/Secret \
123+
--prune-allowlist=core/v1/PersistentVolumeClaim \
124+
--prune-allowlist=scheduling.k8s.io/v1/PriorityClass \
125+
--prune-allowlist=networking.k8s.io/v1/Ingress
120126

121-
logmessage "\n⏳ Waiting for application to become ready..."
127+
logmessage "\n⏳ Waiting for application to become ready..."
122128

123-
sleep 10
129+
sleep 10
124130

125-
kubectl wait --for=condition=available --timeout=240s deployments -l app.kubernetes.io/created-by=eks-workshop -A
126-
kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A
131+
kubectl wait --for=condition=available --timeout=240s deployments -l app.kubernetes.io/created-by=eks-workshop -A
132+
kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A
133+
134+
else
135+
kubectl delete -k $base_path --ignore-not-found
136+
fi
127137

128138
# Addons
129139
rm -rf /eks-workshop/terraform
@@ -238,10 +248,12 @@ if [ $EXIT_CODE -ne 0 ]; then
238248
fi
239249

240250
# Recycle workload pods in case stateful pods got restarted
241-
kubectl delete pod -l app.kubernetes.io/created-by=eks-workshop -l app.kubernetes.io/component=service -A
251+
kubectl delete pod -l app.kubernetes.io/created-by=eks-workshop -l app.kubernetes.io/component=service -A --ignore-not-found
242252

243253
# Wait for the workload pods previously recycled
244-
kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A
254+
if kubectl get pods -A -l app.kubernetes.io/created-by=eks-workshop 2>/dev/null | grep -q .; then
255+
kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A
256+
fi
245257

246258
# Finished
247259
logmessage "\n✅ Environment is ${GREEN}ready${NC}!"

lab/scripts/setup.sh

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ export AWS_PAGER=""
2121
export AWS_REGION="${AWS_REGION}"
2222
export AWS_ACCOUNT_ID="${AWS_ACCOUNT_ID}"
2323
export EKS_CLUSTER_NAME="${EKS_CLUSTER_NAME}"
24+
export EKS_CLUSTER_AUTO_NAME="${EKS_CLUSTER_AUTO_NAME}"
2425
export EKS_DEFAULT_MNG_NAME="default"
2526
export EKS_DEFAULT_MNG_MIN=3
2627
export EKS_DEFAULT_MNG_MAX=6
@@ -30,15 +31,18 @@ EOT
3031
touch ~/.bashrc.d/workshop-env.bash
3132

3233
cat << EOT > /home/ec2-user/.bashrc.d/aliases.bash
33-
function prepare-environment() {
34+
function prepare-environment() {
35+
start_time=\$(date +%s)
3436
bash /usr/local/bin/reset-environment \$1
3537
exit_code=\$?
3638
source ~/.bashrc.d/workshop-env.bash
39+
echo "Execution time: \$((\$(date +%s) - start_time)) seconds"
3740
return \$exit_code
3841
}
3942
4043
function use-cluster() { bash /usr/local/bin/use-cluster \$1; source ~/.bashrc.d/env.bash; }
4144
function create-cluster() { URL=https://raw.githubusercontent.com/${REPOSITORY_OWNER}/${REPOSITORY_NAME}/refs/heads/${REPOSITORY_REF}/cluster/eksctl/cluster.yaml; echo "Creating cluster with eksctl from $URL"; curl -fsSL $URL | envsubst | eksctl create cluster -f -; }
45+
function create-cluster-auto() { URL=https://raw.githubusercontent.com/${REPOSITORY_OWNER}/${REPOSITORY_NAME}/refs/heads/${REPOSITORY_REF}/cluster/eksctl/cluster-auto.yaml; echo "Creating cluster with eksctl from $URL"; curl -fsSL $URL | envsubst | eksctl create cluster -f -; }
4246
EOT
4347

4448
REPOSITORY_OWNER=${REPOSITORY_OWNER:-"aws-samples"}
@@ -60,4 +64,4 @@ echo "export ANALYTICS_ENDPOINT='${ANALYTICS_ENDPOINT}'" > ~/.bashrc.d/analytics
6064

6165
/usr/local/bin/kubectl completion bash > ~/.bashrc.d/kubectl_completion.bash
6266
echo "alias k=kubectl" >> ~/.bashrc.d/kubectl_completion.bash
63-
echo "complete -F __start_kubectl k" >> ~/.bashrc.d/kubectl_completion.bash
67+
echo "complete -F __start_kubectl k" >> ~/.bashrc.d/kubectl_completion.bash

0 commit comments

Comments
 (0)