Skip to content

Commit 9dd3578

Browse files
committed
fix: optimize prepare-environment - skip StatefulSet deletion unless EBS lab ran, restart deployments for configmap changes
1 parent 6c4ae0c commit 9dd3578

3 files changed

Lines changed: 22 additions & 9 deletions

File tree

lab/bin/reset-environment

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -117,13 +117,15 @@ if [ ! -z "$module" ]; then
117117
rm -f /eks-workshop/hooks/cleanup.sh
118118
fi
119119

120-
# Delete StatefulSets before reapplying base app to avoid spec conflicts
121-
kubectl delete statefulset -l app.kubernetes.io/created-by=eks-workshop -A --ignore-not-found 2>/dev/null || true
122-
# Wait for StatefulSets to be fully deleted
123-
sleep 5
124-
while kubectl get statefulset -l app.kubernetes.io/created-by=eks-workshop -A --no-headers 2>/dev/null | grep -q .; do
125-
sleep 2
126-
done
120+
# Only delete StatefulSets if EBS lab modified them (StorageClass ebs-sc exists)
121+
# Otherwise the base app apply is idempotent and StatefulSets don't need recreation
122+
if kubectl get storageclass ebs-sc &>/dev/null 2>&1; then
123+
kubectl delete statefulset -l app.kubernetes.io/created-by=eks-workshop -A --ignore-not-found 2>/dev/null || true
124+
sleep 5
125+
while kubectl get statefulset -l app.kubernetes.io/created-by=eks-workshop -A --no-headers 2>/dev/null | grep -q .; do
126+
sleep 2
127+
done
128+
fi
127129

128130
# Deploy base application and run one-time preprovision in parallel
129131
logmessage "\n📦 Deploying base application..."
@@ -165,10 +167,11 @@ if [ ! -z "$module" ]; then
165167
kubectl wait --for=condition=available --timeout=480s deployments -l app.kubernetes.io/created-by=eks-workshop -A
166168
kubectl wait --for=condition=Ready --timeout=480s pods -l app.kubernetes.io/created-by=eks-workshop -A
167169

168-
# Restart service pods to ensure DB migrations run against initialized databases
169-
# (catalog and orders may have started before their MySQL/PostgreSQL were ready)
170+
# Restart deployments to pick up any configmap changes from the base app restore
171+
kubectl rollout restart deployment/carts -n carts
170172
kubectl rollout restart deployment/catalog -n catalog
171173
kubectl rollout restart deployment/orders -n orders
174+
kubectl rollout status deployment/carts -n carts --timeout=120s
172175
kubectl rollout status deployment/catalog -n catalog --timeout=120s
173176
kubectl rollout status deployment/orders -n orders --timeout=120s
174177

manifests/modules/fastpaths/developers/.workshop/cleanup.sh

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,11 @@ for assoc in $(aws eks list-pod-identity-associations --cluster-name ${EKS_CLUST
2525
aws eks delete-pod-identity-association --cluster-name ${EKS_CLUSTER_AUTO_NAME:-eks-workshop-auto} --association-id $assoc 2>/dev/null || true
2626
done
2727

28+
# Delete pod identity associations for keda (created by install-keda.md)
29+
for assoc in $(aws eks list-pod-identity-associations --cluster-name ${EKS_CLUSTER_AUTO_NAME:-eks-workshop-auto} --namespace keda --query 'associations[].associationId' --output text 2>/dev/null); do
30+
aws eks delete-pod-identity-association --cluster-name ${EKS_CLUSTER_AUTO_NAME:-eks-workshop-auto} --association-id $assoc 2>/dev/null || true
31+
done
32+
2833
# Delete network policies
2934
kubectl delete networkpolicy --all -A --ignore-not-found 2>/dev/null || true
3035

manifests/modules/fastpaths/operators/.workshop/cleanup.sh

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,11 @@ for assoc in $(aws eks list-pod-identity-associations --cluster-name ${EKS_CLUST
2020
aws eks delete-pod-identity-association --cluster-name ${EKS_CLUSTER_AUTO_NAME:-eks-workshop-auto} --association-id $assoc 2>/dev/null || true
2121
done
2222

23+
# Delete pod identity associations for keda (created by install-keda.md)
24+
for assoc in $(aws eks list-pod-identity-associations --cluster-name ${EKS_CLUSTER_AUTO_NAME:-eks-workshop-auto} --namespace keda --query 'associations[].associationId' --output text 2>/dev/null); do
25+
aws eks delete-pod-identity-association --cluster-name ${EKS_CLUSTER_AUTO_NAME:-eks-workshop-auto} --association-id $assoc 2>/dev/null || true
26+
done
27+
2328
# Delete modified StatefulSets + PVCs (from any EBS changes)
2429
kubectl delete statefulset -l app.kubernetes.io/created-by=eks-workshop -A --ignore-not-found
2530
kubectl delete pvc --all -A --ignore-not-found

0 commit comments

Comments
 (0)