Skip to content

Commit b0b1f51

Browse files
committed
feat: Life improvement for make-infrastructure and destroy, you can pass cluster=auto, cluster=standard or cluster=all
1 parent e0b4e2e commit b0b1f51

7 files changed

Lines changed: 49 additions & 19 deletions

File tree

Makefile

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ environment=''
44
shell_command=''
55
shell_simple_command=''
66
glob='-'
7+
cluster='all'
78

89
.PHONY: install
910
install:
@@ -39,11 +40,11 @@ delete-environment:
3940

4041
.PHONY: create-infrastructure
4142
create-infrastructure:
42-
bash hack/create-infrastructure.sh $(environment)
43+
bash hack/create-infrastructure.sh $(environment) $(cluster)
4344

4445
.PHONY: destroy-infrastructure
4546
destroy-infrastructure:
46-
bash hack/destroy-infrastructure.sh $(environment)
47+
bash hack/destroy-infrastructure.sh $(environment) $(cluster)
4748

4849
.PHONY: deploy-ide
4950
deploy-ide:

cluster/eksctl/cluster.yaml

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,4 +40,13 @@ remoteNetworkConfig:
4040
remoteNodeNetworks:
4141
- cidrs: ["10.52.0.0/16"]
4242
remotePodNetworks:
43-
- cidrs: ["10.53.0.0/16"]
43+
- cidrs: ["10.53.0.0/16"]
44+
accessConfig:
45+
authenticationMode: API
46+
bootstrapClusterCreatorAdminPermissions: true
47+
accessEntries:
48+
- principalARN: ${RESOURCE_CODEBUILD_ROLE_ARN}
49+
accessPolicies:
50+
- policyARN: "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
51+
accessScope:
52+
type: cluster

hack/create-infrastructure.sh

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
#!/bin/bash
22

33
environment=$1
4+
cluster=${2:-all}
5+
echo "Creating infrastructure for environment ${environment} and cluster ${cluster}"
46

57
set -Eeuo pipefail
68
set -u
@@ -12,26 +14,26 @@ source $SCRIPT_DIR/lib/common-env.sh
1214
bash $SCRIPT_DIR/update-iam-role.sh $environment
1315

1416
sleep 5
17+
export USE_CURRENT_USER=1 # We don't want to change the ARN in exec
1518

1619
cluster_exists=0
1720
aws eks describe-cluster --name "${EKS_CLUSTER_NAME}" &> /dev/null || cluster_exists=$?
1821

19-
if [ $cluster_exists -eq 0 ]; then
20-
echo "Cluster ${EKS_CLUSTER_NAME} already exists"
21-
else
22+
if [ $cluster_exists -ne 0 ] && [[ "$cluster" == "standard" || "$cluster" == "all" ]]; then
2223
echo "Creating cluster ${EKS_CLUSTER_NAME}"
2324
bash $SCRIPT_DIR/exec.sh "${environment}" 'cat /cluster/eksctl/cluster.yaml | envsubst | eksctl create cluster -f -'&
25+
else
26+
echo "Cluster ${EKS_CLUSTER_NAME} already exists"
2427
fi
2528

2629
auto_cluster_exists=0
2730
aws eks describe-cluster --name "${EKS_CLUSTER_AUTO_NAME}" &> /dev/null || auto_cluster_exists=$?
2831

29-
if [ $auto_cluster_exists -eq 0 ]; then
30-
echo "Auto mode cluster ${EKS_CLUSTER_AUTO_NAME} already exists"
31-
else
32-
echo "Creating auto mode cluster ${EKS_CLUSTER_AUTO_NAME} with terraform"
33-
bash $SCRIPT_DIR/exec.sh "${environment}" 'cat /cluster/eksctl/cluster-auto.yaml | envsubst'
32+
if [ $auto_cluster_exists -ne 0 ] && [[ "$cluster" == "standard" || "$cluster" == "all" ]]; then
33+
echo "Creating auto mode cluster ${EKS_CLUSTER_AUTO_NAME}"
3434
bash $SCRIPT_DIR/exec.sh "${environment}" 'cat /cluster/eksctl/cluster-auto.yaml | envsubst | eksctl create cluster -f -'&
35+
else
36+
echo "Auto mode cluster ${EKS_CLUSTER_AUTO_NAME} already exists"
3537
fi
3638

3739
wait

hack/destroy-infrastructure.sh

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,37 +1,42 @@
11
#!/bin/bash
22

33
environment=$1
4+
cluster=${2:-all}
5+
echo "Destroying infrastructure for environment ${environment} and cluster ${cluster}"
46

57
set -Eeuo pipefail
68
set -u
79

810
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
9-
11+
export USE_CURRENT_USER=1;
1012
source $SCRIPT_DIR/lib/common-env.sh
1113

1214
cluster_exists=0
1315
aws eks describe-cluster --name "${EKS_CLUSTER_NAME}" &> /dev/null || cluster_exists=$?
1416

15-
if [ $cluster_exists -eq 0 ]; then
17+
if [ $cluster_exists -eq 0 ] && [[ "$cluster" == "standard" || "$cluster" == "all" ]]; then
1618
echo "Deleting cluster ${EKS_CLUSTER_NAME}"
1719
bash $SCRIPT_DIR/shell.sh "${environment}" 'delete-environment' || true
1820
bash $SCRIPT_DIR/exec.sh "${environment}" 'eksctl delete cluster --name ${EKS_CLUSTER_NAME} --region ${AWS_REGION} --wait --force --disable-nodegroup-eviction --timeout 45m'&
1921
else
20-
echo "Cluster ${EKS_CLUSTER_NAME} does not exist"
22+
echo "Cluster ${EKS_CLUSTER_NAME} does not exist or skipped"
2123
fi
2224

2325
export EKS_CLUSTER_AUTO_NAME="${EKS_CLUSTER_NAME}-auto"
2426
auto_cluster_exists=0
2527
aws eks describe-cluster --name "${EKS_CLUSTER_AUTO_NAME}" &> /dev/null || auto_cluster_exists=$?
2628

27-
if [ $auto_cluster_exists -eq 0 ]; then
29+
if [ $auto_cluster_exists -eq 0 ] && [[ "$cluster" == "auto" || "$cluster" == "all" ]]; then
2830
echo "Deleting auto mode cluster ${EKS_CLUSTER_AUTO_NAME}"
29-
bash $SCRIPT_DIR/shell.sh "${environment}" 'delete-environment' || true # Needed ?
31+
#bash $SCRIPT_DIR/shell.sh "${environment}" 'delete-environment' || true # Needed ?
3032
bash $SCRIPT_DIR/exec.sh "${environment}" 'eksctl delete cluster --name ${EKS_CLUSTER_AUTO_NAME} --region ${AWS_REGION} --wait --force --disable-nodegroup-eviction --timeout 45m'
3133
else
32-
echo "Auto mode cluster ${EKS_CLUSTER_AUTO_NAME} does not exist"
34+
echo "Auto mode cluster ${EKS_CLUSTER_AUTO_NAME} does not exist or skipped"
3335
fi
3436

3537
wait
3638

37-
aws cloudformation delete-stack --stack-name ${EKS_CLUSTER_NAME}-ide-role || true
39+
# Only delete ide-role if all clusters are deleted
40+
if [ "$cluster" == "all" ]; then
41+
aws cloudformation delete-stack --stack-name ${EKS_CLUSTER_NAME}-ide-role || true
42+
fi

hack/exec.sh

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,15 @@ container_image='eks-workshop-environment'
1919

2020
(cd $SCRIPT_DIR/../lab && $CONTAINER_CLI build -q -t $container_image .)
2121

22-
if [ -z "$SKIP_CREDENTIALS" ]; then
22+
echo "Checking SKIP: $SKIP_CREDENTIALS"
23+
if [ -z "$SKIP_CREDENTIALS" -a -z "$USE_CURRENT_USER" ]; then
2324
source $SCRIPT_DIR/lib/generate-aws-creds.sh
25+
elif [ -n "${USE_CURRENT_USER:-}" ]; then
26+
if [ -z "$AWS_ACCESS_KEY_ID" ]; then
27+
echo "No role credentials found, please check your AWS credentials"
28+
exit 1
29+
fi
30+
aws_credential_args="-e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY -e AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN"
2431
else
2532
aws_credential_args=""
2633
fi

hack/lib/common-env.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ if [ -z "$AWS_REGION" ]; then
1717
fi
1818

1919
SKIP_CREDENTIALS=${SKIP_CREDENTIALS:-""}
20+
USE_CURRENT_USER=${USE_CURRENT_USER:-""}
2021

2122
if [ -z "$SKIP_CREDENTIALS" ]; then
2223
ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)

lab/scripts/entrypoint.sh

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,11 @@ bash /tmp/setup.sh
66

77
ln -s /eks-workshop/manifests /home/ec2-user/environment/eks-workshop
88

9+
# We do auto mode first so we default to standard
10+
if [ ! -z "$EKS_CLUSTER_AUTO_NAME" ]; then
11+
aws eks update-kubeconfig --name $EKS_CLUSTER_AUTO_NAME
12+
fi
13+
914
if [ ! -z "$EKS_CLUSTER_NAME" ]; then
1015
aws eks update-kubeconfig --name $EKS_CLUSTER_NAME
1116
fi

0 commit comments

Comments
 (0)