diff --git a/.github/workflows/cicd.yaml b/.github/workflows/cicd.yaml
new file mode 100644
index 000000000..69d7258c4
--- /dev/null
+++ b/.github/workflows/cicd.yaml
@@ -0,0 +1,63 @@
+name: Build, Test, and Deploy of 3-Tier-Application
+
+# on:
+# push:
+# branches:
+# - main
+# pull_request:
+# branches:
+# - main
+
+env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ AWS_REGION: ${{ secrets.AWS_REGION }}
+ AWS_ECR_FRONTEND: ${{ secrets.AWS_ECR }}
+ FRONTEND_DOCKER_IMAGE: ${{ secrets.DOCKER_IMAGE }}
+ AWS_ECR_USERNAME: ${{ secrets.AWS_ECR_USERNAME }}
+ AWS_ECR_BACKEND: ${{ secrets.AWS_ECR_BACKEND }}
+ BACKEND_DOCKER_IMAGE: ${{ secrets.BACKEND_DOCKER_IMAGE }}
+ AWS_EKS_CLUSTER_NAME: ${{ secrets.AWS_EKS_CLUSTER_NAME }}
+ AWS_EKS_CLUSTER_NAMESPACE: ${{ secrets.AWS_EKS_CLUSTER_NAMESPACE }}
+ AWS_EKS_CLUSTER_REGION: ${{ secrets.AWS_EKS_CLUSTER_REGION }}
+
+jobs:
+ build-and-push:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout Repository
+ uses: actions/checkout@v2
+
+ # Configure AWS
+ - name: Set up AWS CLI
+ uses: aws-actions/configure-aws-credentials@v2
+ with:
+ aws-access-key-id: ${{ env.AWS_ACCESS_KEY_ID }}
+ aws-secret-access-key: ${{ env.AWS_SECRET_ACCESS_KEY }}
+ aws-region: ${{ env.AWS_REGION }}
+
+ # ECR Login
+ - name: Login to Amazon ECR
+ run: |
+ aws ecr-public get-login-password --region ${{ env.AWS_REGION }} | docker login --username ${{ env.AWS_ECR_USERNAME }} --password-stdin ${{ env.AWS_ECR_FRONTEND }}
+ aws ecr-public get-login-password --region ${{ env.AWS_REGION }} | docker login --username ${{ env.AWS_ECR_USERNAME }} --password-stdin ${{ env.AWS_ECR_BACKEND }}
+
+ # Building and Tagging Docker Image
+ - name: Build and Tag Docker Image
+ run: |
+ docker build -t ${{ env.FRONTEND_DOCKER_IMAGE }} ./frontend/.
+ docker tag ${{ env.FRONTEND_DOCKER_IMAGE }}:latest ${{ env.AWS_ECR_FRONTEND }}/${{ env.FRONTEND_DOCKER_IMAGE }}:latest
+
+ docker build -t ${{ env.BACKEND_DOCKER_IMAGE }} ./backend/.
+ docker tag ${{ env.BACKEND_DOCKER_IMAGE }}:latest ${{ env.AWS_ECR_BACKEND }}/${{ env.BACKEND_DOCKER_IMAGE }}:latest
+
+ # Pushing Docker Image to ECR
+ - name: Push Docker Image to ECR
+ run: |
+ docker push ${{ env.AWS_ECR_FRONTEND }}/${{ env.FRONTEND_DOCKER_IMAGE }}:latest
+ docker push ${{ env.AWS_ECR_BACKEND }}/${{ env.BACKEND_DOCKER_IMAGE }}:latest
+
+ # Updating kubeconfig
+ - name: update kubeconfig
+ run: aws eks update-kubeconfig --region ${{ env.AWS_EKS_CLUSTER_REGION }} --name ${{ env.AWS_EKS_CLUSTER_NAME }}
diff --git a/README.md b/README.md
index 6ad463b02..65e6d99dc 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-# #TWSThreeTierAppChallenge
+# TWSThreeTierAppChallenge
## Overview
This repository hosts the `#TWSThreeTierAppChallenge` for the TWS community.
@@ -54,21 +54,75 @@ sudo mv /tmp/eksctl /usr/local/bin
eksctl version
```
-### Step 7: Setup EKS Cluster
+### Step 7: Install Helm Chart
+``` shell
+curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
+chmod 700 get_helm.sh
+./get_helm.sh
+helm version
+```
+
+### Step 8: Setup EKS Cluster
``` shell
eksctl create cluster --name three-tier-cluster --region us-west-2 --node-type t2.medium --nodes-min 2 --nodes-max 2
aws eks update-kubeconfig --region us-west-2 --name three-tier-cluster
kubectl get nodes
```
-### Step 8: Run Manifests
+### Step 9: Add the Helm Stable Charts
+``` shell
+helm repo add stable https://charts.helm.sh/stable
+```
+
+### Step 10: Add Prometheus Helm repo and Install Prometheus
+``` shell
+helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
+helm install stable prometheus-community/kube-prometheus-stack -n workshop
+```
+
+### Step 11: Edit service for Prometheus
+``` shell
+kubectl edit svc stable-kube-prometheus-sta-prometheus -n workshop
+```
+
+
+**Change it from ClusterIP to LoadBalancer after changing make sure to save the file**
+
+### Step 12: Edit service for Grafana
+``` shell
+kubectl edit svc stable-grafana -n workshop
+```
+
+
+**Change it from ClusterIP to LoadBalancer after changing make sure to save the file**
+
+### Step 13: Making Dashboard using Grafana
+``` shell
+kubectl get svc -n workshop
+```
+> - Use the LoadBalancer link and access the Grafana in the browser.
+>
+> - Give username as "admin" and for password run the below command.
+> ``` shell
+> kubectl get secret --namespace workshop stable-grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
+> ```
+>
+> - Click on create your first dashboard in Grafana.
+>
+> - click on import dashboard.
+>
+> - Give no "15760" or select prometheus from the Load field and click on Load button.
+>
+> - Then click on Import.
+
+### Step 14: Run Manifests
``` shell
kubectl create namespace workshop
kubectl apply -f .
kubectl delete -f .
```
-### Step 9: Install AWS Load Balancer
+### Step 15: Install AWS Load Balancer
``` shell
curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.5.4/docs/install/iam_policy.json
aws iam create-policy --policy-name AWSLoadBalancerControllerIAMPolicy --policy-document file://iam_policy.json
@@ -76,7 +130,7 @@ eksctl utils associate-iam-oidc-provider --region=us-west-2 --cluster=three-tier
eksctl create iamserviceaccount --cluster=three-tier-cluster --namespace=kube-system --name=aws-load-balancer-controller --role-name AmazonEKSLoadBalancerControllerRole --attach-policy-arn=arn:aws:iam::626072240565:policy/AWSLoadBalancerControllerIAMPolicy --approve --region=us-west-2
```
-### Step 10: Deploy AWS Load Balancer Controller
+### Step 16: Deploy AWS Load Balancer Controller
``` shell
sudo snap install helm --classic
helm repo add eks https://aws.github.io/eks-charts
@@ -86,6 +140,16 @@ kubectl get deployment -n kube-system aws-load-balancer-controller
kubectl apply -f full_stack_lb.yaml
```
+### Important for CI/CD
+> - If you want to add Continuous Deployment (CD) you need to make a bit changes in the github workflow's `.github/workflows/cicd.yaml` file.
+>
+> - Create all the secret variables that are listed under the `env` section in `cicd.yaml`. For creating them follow the below steps:
+> - Go to settings -> Security (Secrets and variables) -> Actions -> New repository secret -> Give Secret name and value -> Add secret.
+>
+> - Pass your own env varibales that you've created and replace them with mine including AWS CLI, AWS ECR, AWS EKS, and more.
+>
+> - Your AWS EKS Cluster should be up and running for smooth and Continuous Delivery (CD) implementation.
+
### Cleanup
- To delete the EKS cluster:
``` shell
diff --git a/alb_install.sh b/alb_install.sh
new file mode 100755
index 000000000..ef55fdff2
--- /dev/null
+++ b/alb_install.sh
@@ -0,0 +1,11 @@
+curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.5.4/docs/install/iam_policy.json
+aws iam create-policy --policy-name AWSLoadBalancerControllerIAMPolicy --policy-document file://iam_policy.json
+eksctl utils associate-iam-oidc-provider --region=us-west-2 --cluster=three-tier-cluster --approve
+eksctl create iamserviceaccount --cluster=three-tier-cluster --namespace=kube-system --name=aws-load-balancer-controller --role-name AmazonEKSLoadBalancerControllerRole --attach-policy-arn=arn:aws:iam::042770045671:policy/AWSLoadBalancerControllerIAMPolicy --approve --region=us-west-2
+
+sudo snap install helm --classic
+helm repo add eks https://aws.github.io/eks-charts
+helm repo update eks
+helm install aws-load-balancer-controller eks/aws-load-balancer-controller -n kube-system --set clusterName=three-tier-cluster --set serviceAccount.create=false --set serviceAccount.name=aws-load-balancer-controller
+kubectl get deployment -n kube-system aws-load-balancer-controller
+kubectl apply -f ./k8s_manifests/alb_config/full_stack_lb.yaml
diff --git a/k8s_manifests/full_stack_lb.yaml b/k8s_manifests/alb_config/full_stack_lb.yaml
similarity index 77%
rename from k8s_manifests/full_stack_lb.yaml
rename to k8s_manifests/alb_config/full_stack_lb.yaml
index b5ba00816..d50912801 100644
--- a/k8s_manifests/full_stack_lb.yaml
+++ b/k8s_manifests/alb_config/full_stack_lb.yaml
@@ -28,3 +28,10 @@ spec:
name: frontend
port:
number: 3000
+ - path: /metrics
+ pathType: Prefix
+ backend:
+ service:
+ name: stable-kube-prometheus-sta-prometheus
+ port:
+ number: 9090
diff --git a/k8s_manifests/backend-deployment.yaml b/k8s_manifests/backend_config/backend-deployment.yaml
similarity index 87%
rename from k8s_manifests/backend-deployment.yaml
rename to k8s_manifests/backend_config/backend-deployment.yaml
index 51472127a..4b9705c52 100644
--- a/k8s_manifests/backend-deployment.yaml
+++ b/k8s_manifests/backend_config/backend-deployment.yaml
@@ -23,7 +23,7 @@ spec:
spec:
containers:
- name: api
- image: public.ecr.aws/w8u5e4v2/workshop-backend:v1
+ image: public.ecr.aws/u6o2f0j6/workshop-backend:latest
imagePullPolicy: Always
env:
- name: MONGO_CONN_STR
@@ -40,6 +40,11 @@ spec:
key: password
ports:
- containerPort: 8080
+ resources:
+ requests:
+ cpu: "100m"
+ limits:
+ cpu: "200m"
livenessProbe:
httpGet:
path: /ok
diff --git a/k8s_manifests/backend-service.yaml b/k8s_manifests/backend_config/backend-service.yaml
similarity index 100%
rename from k8s_manifests/backend-service.yaml
rename to k8s_manifests/backend_config/backend-service.yaml
diff --git a/k8s_manifests/backend_config/hpa.yaml b/k8s_manifests/backend_config/hpa.yaml
new file mode 100644
index 000000000..b542e858b
--- /dev/null
+++ b/k8s_manifests/backend_config/hpa.yaml
@@ -0,0 +1,18 @@
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+ name: backend-hpa
+ namespace: workshop
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: api
+ minReplicas: 2
+ maxReplicas: 3
+ metrics:
+ - type: Resource
+ resource:
+ name: cpu
+ targetAverageUtilization: 50
+
diff --git a/k8s_manifests/frontend-deployment.yaml b/k8s_manifests/frontend_config/frontend-deployment.yaml
similarity index 72%
rename from k8s_manifests/frontend-deployment.yaml
rename to k8s_manifests/frontend_config/frontend-deployment.yaml
index fd8326d8c..281d421c5 100644
--- a/k8s_manifests/frontend-deployment.yaml
+++ b/k8s_manifests/frontend_config/frontend-deployment.yaml
@@ -23,11 +23,17 @@ spec:
spec:
containers:
- name: frontend
- image: public.ecr.aws/e3i3d3z5/three-tier-frontend-d:latest
+ image: public.ecr.aws/u6o2f0j6/three-tier-frontend:latest
imagePullPolicy: Always
env:
- name: REACT_APP_BACKEND_URL
value: "http://app.trainwithshubham.com/api/tasks"
ports:
- containerPort: 3000
-
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "128Mi"
+ limits:
+ cpu: "200m"
+ memory: "256Mi"
diff --git a/k8s_manifests/frontend-service.yaml b/k8s_manifests/frontend_config/frontend-service.yaml
similarity index 100%
rename from k8s_manifests/frontend-service.yaml
rename to k8s_manifests/frontend_config/frontend-service.yaml
diff --git a/k8s_manifests/frontend_config/hpa.yaml b/k8s_manifests/frontend_config/hpa.yaml
new file mode 100644
index 000000000..b4d00fa15
--- /dev/null
+++ b/k8s_manifests/frontend_config/hpa.yaml
@@ -0,0 +1,17 @@
+apiVersion: autoscaling/v2beta2
+kind: HorizontalPodAutoscaler
+metadata:
+ name: frontend-hpa
+ namespace: workshop
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: frontend
+ minReplicas: 2
+ maxReplicas: 10
+ metrics:
+ - type: Pods
+ pods:
+ metricName: requests-per-second
+ targetAverageValue: 100
diff --git a/k8s_manifests/mongo/deploy.yaml b/k8s_manifests/mongo_config/deploy.yaml
similarity index 100%
rename from k8s_manifests/mongo/deploy.yaml
rename to k8s_manifests/mongo_config/deploy.yaml
diff --git a/k8s_manifests/mongo/secrets.yaml b/k8s_manifests/mongo_config/secrets.yaml
similarity index 100%
rename from k8s_manifests/mongo/secrets.yaml
rename to k8s_manifests/mongo_config/secrets.yaml
diff --git a/k8s_manifests/mongo/service.yaml b/k8s_manifests/mongo_config/service.yaml
similarity index 100%
rename from k8s_manifests/mongo/service.yaml
rename to k8s_manifests/mongo_config/service.yaml