Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 63 additions & 0 deletions .github/workflows/cicd.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
name: Build, Test, and Deploy of 3-Tier-Application

# on:
# push:
# branches:
# - main
# pull_request:
# branches:
# - main

env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_REGION: ${{ secrets.AWS_REGION }}
AWS_ECR_FRONTEND: ${{ secrets.AWS_ECR }}
FRONTEND_DOCKER_IMAGE: ${{ secrets.DOCKER_IMAGE }}
AWS_ECR_USERNAME: ${{ secrets.AWS_ECR_USERNAME }}
AWS_ECR_BACKEND: ${{ secrets.AWS_ECR_BACKEND }}
BACKEND_DOCKER_IMAGE: ${{ secrets.BACKEND_DOCKER_IMAGE }}
AWS_EKS_CLUSTER_NAME: ${{ secrets.AWS_EKS_CLUSTER_NAME }}
AWS_EKS_CLUSTER_NAMESPACE: ${{ secrets.AWS_EKS_CLUSTER_NAMESPACE }}
AWS_EKS_CLUSTER_REGION: ${{ secrets.AWS_EKS_CLUSTER_REGION }}

jobs:
build-and-push:
runs-on: ubuntu-latest

steps:
- name: Checkout Repository
uses: actions/checkout@v2

# Configure AWS
- name: Set up AWS CLI
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ env.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ env.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}

# ECR Login
- name: Login to Amazon ECR
run: |
aws ecr-public get-login-password --region ${{ env.AWS_REGION }} | docker login --username ${{ env.AWS_ECR_USERNAME }} --password-stdin ${{ env.AWS_ECR_FRONTEND }}
aws ecr-public get-login-password --region ${{ env.AWS_REGION }} | docker login --username ${{ env.AWS_ECR_USERNAME }} --password-stdin ${{ env.AWS_ECR_BACKEND }}

# Building and Tagging Docker Image
- name: Build and Tag Docker Image
run: |
docker build -t ${{ env.FRONTEND_DOCKER_IMAGE }} ./frontend/.
docker tag ${{ env.FRONTEND_DOCKER_IMAGE }}:latest ${{ env.AWS_ECR_FRONTEND }}/${{ env.FRONTEND_DOCKER_IMAGE }}:latest

docker build -t ${{ env.BACKEND_DOCKER_IMAGE }} ./backend/.
docker tag ${{ env.BACKEND_DOCKER_IMAGE }}:latest ${{ env.AWS_ECR_BACKEND }}/${{ env.BACKEND_DOCKER_IMAGE }}:latest

# Pushing Docker Image to ECR
- name: Push Docker Image to ECR
run: |
docker push ${{ env.AWS_ECR_FRONTEND }}/${{ env.FRONTEND_DOCKER_IMAGE }}:latest
docker push ${{ env.AWS_ECR_BACKEND }}/${{ env.BACKEND_DOCKER_IMAGE }}:latest

# Updating kubeconfig
- name: update kubeconfig
run: aws eks update-kubeconfig --region ${{ env.AWS_EKS_CLUSTER_REGION }} --name ${{ env.AWS_EKS_CLUSTER_NAME }}
74 changes: 69 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# #TWSThreeTierAppChallenge
# TWSThreeTierAppChallenge

## Overview
This repository hosts the `#TWSThreeTierAppChallenge` for the TWS community.
Expand Down Expand Up @@ -54,29 +54,83 @@ sudo mv /tmp/eksctl /usr/local/bin
eksctl version
```

### Step 7: Setup EKS Cluster
### Step 7: Install Helm Chart
``` shell
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
chmod 700 get_helm.sh
./get_helm.sh
helm version
```

### Step 8: Setup EKS Cluster
``` shell
eksctl create cluster --name three-tier-cluster --region us-west-2 --node-type t2.medium --nodes-min 2 --nodes-max 2
aws eks update-kubeconfig --region us-west-2 --name three-tier-cluster
kubectl get nodes
```

### Step 8: Run Manifests
### Step 9: Add the Helm Stable Charts
``` shell
helm repo add stable https://charts.helm.sh/stable
```

### Step 10: Add Prometheus Helm repo and Install Prometheus
``` shell
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm install stable prometheus-community/kube-prometheus-stack -n workshop
```

### Step 11: Edit service for Prometheus
``` shell
kubectl edit svc stable-kube-prometheus-sta-prometheus -n workshop
```
<img width="509" alt="Screenshot 2024-01-17 154212" src="https://github.com/iamamash/TWSThreeTierAppChallenge/assets/42666741/c0ed6acc-a95f-4823-b978-df31d8a1bf26">

**Change it from ClusterIP to LoadBalancer after changing make sure to save the file**

### Step 12: Edit service for Grafana
``` shell
kubectl edit svc stable-grafana -n workshop
```
<img width="838" alt="Screenshot 2024-01-17 154656" src="https://github.com/iamamash/TWSThreeTierAppChallenge/assets/42666741/55f3302e-2a4e-49bb-9444-0e40829e4a0b">

**Change it from ClusterIP to LoadBalancer after changing make sure to save the file**

### Step 13: Making Dashboard using Grafana
``` shell
kubectl get svc -n workshop
```
> - Use the LoadBalancer link and access the Grafana in the browser.
>
> - Give username as "admin" and for password run the below command.
> ``` shell
> kubectl get secret --namespace workshop stable-grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
> ```
>
> - Click on create your first dashboard in Grafana.
>
> - click on import dashboard.
>
> - Give no "15760" or select prometheus from the Load field and click on Load button.
>
> - Then click on Import.

### Step 14: Run Manifests
``` shell
kubectl create namespace workshop
kubectl apply -f .
kubectl delete -f .
```

### Step 9: Install AWS Load Balancer
### Step 15: Install AWS Load Balancer
``` shell
curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.5.4/docs/install/iam_policy.json
aws iam create-policy --policy-name AWSLoadBalancerControllerIAMPolicy --policy-document file://iam_policy.json
eksctl utils associate-iam-oidc-provider --region=us-west-2 --cluster=three-tier-cluster --approve
eksctl create iamserviceaccount --cluster=three-tier-cluster --namespace=kube-system --name=aws-load-balancer-controller --role-name AmazonEKSLoadBalancerControllerRole --attach-policy-arn=arn:aws:iam::626072240565:policy/AWSLoadBalancerControllerIAMPolicy --approve --region=us-west-2
```

### Step 10: Deploy AWS Load Balancer Controller
### Step 16: Deploy AWS Load Balancer Controller
``` shell
sudo snap install helm --classic
helm repo add eks https://aws.github.io/eks-charts
Expand All @@ -86,6 +140,16 @@ kubectl get deployment -n kube-system aws-load-balancer-controller
kubectl apply -f full_stack_lb.yaml
```

### Important for CI/CD
> - If you want to add Continuous Deployment (CD) you need to make a bit changes in the github workflow's `.github/workflows/cicd.yaml` file.
>
> - Create all the secret variables that are listed under the `env` section in `cicd.yaml`. For creating them follow the below steps:
> - Go to settings -> Security (Secrets and variables) -> Actions -> New repository secret -> Give Secret name and value -> Add secret.
>
> - Pass your own env varibales that you've created and replace them with mine including AWS CLI, AWS ECR, AWS EKS, and more.
>
> - Your AWS EKS Cluster should be up and running for smooth and Continuous Delivery (CD) implementation.

### Cleanup
- To delete the EKS cluster:
``` shell
Expand Down
11 changes: 11 additions & 0 deletions alb_install.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.5.4/docs/install/iam_policy.json
aws iam create-policy --policy-name AWSLoadBalancerControllerIAMPolicy --policy-document file://iam_policy.json
eksctl utils associate-iam-oidc-provider --region=us-west-2 --cluster=three-tier-cluster --approve
eksctl create iamserviceaccount --cluster=three-tier-cluster --namespace=kube-system --name=aws-load-balancer-controller --role-name AmazonEKSLoadBalancerControllerRole --attach-policy-arn=arn:aws:iam::042770045671:policy/AWSLoadBalancerControllerIAMPolicy --approve --region=us-west-2

sudo snap install helm --classic
helm repo add eks https://aws.github.io/eks-charts
helm repo update eks
helm install aws-load-balancer-controller eks/aws-load-balancer-controller -n kube-system --set clusterName=three-tier-cluster --set serviceAccount.create=false --set serviceAccount.name=aws-load-balancer-controller
kubectl get deployment -n kube-system aws-load-balancer-controller
kubectl apply -f ./k8s_manifests/alb_config/full_stack_lb.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,10 @@ spec:
name: frontend
port:
number: 3000
- path: /metrics
pathType: Prefix
backend:
service:
name: stable-kube-prometheus-sta-prometheus
port:
number: 9090
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ spec:
spec:
containers:
- name: api
image: public.ecr.aws/w8u5e4v2/workshop-backend:v1
image: public.ecr.aws/u6o2f0j6/workshop-backend:latest
imagePullPolicy: Always
env:
- name: MONGO_CONN_STR
Expand All @@ -40,6 +40,11 @@ spec:
key: password
ports:
- containerPort: 8080
resources:
requests:
cpu: "100m"
limits:
cpu: "200m"
livenessProbe:
httpGet:
path: /ok
Expand Down
18 changes: 18 additions & 0 deletions k8s_manifests/backend_config/hpa.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: backend-hpa
namespace: workshop
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: api
minReplicas: 2
maxReplicas: 3
metrics:
- type: Resource
resource:
name: cpu
targetAverageUtilization: 50

Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,17 @@ spec:
spec:
containers:
- name: frontend
image: public.ecr.aws/e3i3d3z5/three-tier-frontend-d:latest
image: public.ecr.aws/u6o2f0j6/three-tier-frontend:latest
imagePullPolicy: Always
env:
- name: REACT_APP_BACKEND_URL
value: "http://app.trainwithshubham.com/api/tasks"
ports:
- containerPort: 3000

resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "200m"
memory: "256Mi"
17 changes: 17 additions & 0 deletions k8s_manifests/frontend_config/hpa.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: frontend-hpa
namespace: workshop
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: frontend
minReplicas: 2
maxReplicas: 10
metrics:
- type: Pods
pods:
metricName: requests-per-second
targetAverageValue: 100
File renamed without changes.