Prerequisites #
Install AWS CLI #
# Update packages
sudo apt update
# Unstall zip tool
sudo apt install unzip -y
# Download AWS CLI zip file
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
# Unzip
unzip awscliv2.zip
# Install
sudo ./aws/install
# Verify installation / check version
/usr/local/bin/aws --version
Configure AWS CLI #
# Start AWS CLI configuration
aws configure
Install Terraform #
# Install the HashiCorp GPG key
wget -O- https://apt.releases.hashicorp.com/gpg | gpg --dearmor | sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg > /dev/null
# Verify the GPG key fingerprint
gpg --no-default-keyring --keyring /usr/share/keyrings/hashicorp-archive-keyring.gpg --fingerprint
# Add the official HashiCorp repository
echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
# Install Terraform
sudo apt update && sudo apt-get install terraform
# Verify installation / check version
terraform version
Install Kubectl #
# Install Kubectl
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" &&
chmod +x kubectl &&
sudo mv kubectl /usr/local/bin/
# Verify installation / check version
kubectl version --client
Terraform EKS Cluster Template #
File and Folder Structure #
The file and folder structure of the Terraform template looks like this:
aws-eks-cluster
├── eks-cluster.tf
├── networking.tf
├── outputs.tf
└── terraform.tf
Project Folder #
# Create a project folder
TF_PROJECT_NAME=aws-eks-cluster
mkdir $TF_PROJECT_NAME && cd $TF_PROJECT_NAME
Provider #
- terraform.tf
# Terraform Provider
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.47.0"
}
tls = {
source = "hashicorp/tls"
version = "~> 4.0.5"
}
cloudinit = {
source = "hashicorp/cloudinit"
version = "~> 2.3.4"
}
}
required_version = "~> 1.3"
}
# AWS Provider
provider "aws" {
region = "us-east-1"
}
VPC & Subnets #
- networking.tf
# Use VPC module
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "5.8.1"
# VPC configuration
name = "vpc-dev" # VPC name
cidr = "10.10.0.0/16" # VPC CIDR
# Define Subnets
# Determine Availability Zone usage
azs = slice(data.aws_availability_zones.available.names, 0, 3)
# Define Public Subnet CIDR
public_subnets = ["10.10.0.0/24", "10.10.1.0/24", "10.10.2.0/24"]
# Define Private Subnet CIDR
private_subnets = ["10.10.3.0/24", "10.10.4.0/24", "10.10.5.0/24"]
# Enable NAT Gateway
enable_nat_gateway = true
single_nat_gateway = true # Use one NAT Gateway for entire VPC, instead of one per availability zone
enable_dns_hostnames = true # Enable DNS resolution for communication between resources in the VPC
# ELB LoadBalancers placement
public_subnet_tags = {
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/role/internal-elb" = 1
}
}
# Retrieve available Availability Zones / Filter out zones with unsupported managed node groups
data "aws_availability_zones" "available" {
filter {
name = "opt-in-status"
values = ["opt-in-not-required"]
}
}
EKS Cluster #
- eks-cluster.tf
# Use EKS module
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "20.8.5"
# Cluster Name
cluster_name = "eks-example"
cluster_version = "1.29"
cluster_endpoint_public_access = true
enable_cluster_creator_admin_permissions = true
# EBS CSI
cluster_addons = {
aws-ebs-csi-driver = {
service_account_role_arn = module.irsa-ebs-csi.iam_role_arn
}
}
# VPC and Subnets
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
# VM Image
eks_managed_node_group_defaults = {
ami_type = "AL2_x86_64" # Amazon Linux 2
}
# Worker Nodes
eks_managed_node_groups = {
default_node_group = {
name = "node-group-1"
instance_types = ["t3.small"]
desired_size = 3
min_size = 3
max_size = 3
node_security_group_ids = [module.vpc.default_security_group_id]
}
}
}
# IAM Role for EKS Worker Nodes
resource "aws_iam_role" "eks_nodes" {
assume_role_policy = data.aws_iam_policy_document.eks_assume_role_policy.json
name = "eks-nodes-role"
}
# Allows EC2 instance to use "eks_nodes" role
data "aws_iam_policy_document" "eks_assume_role_policy" {
statement {
effect = "Allow"
principals {
type = "Service"
identifiers = ["ec2.amazonaws.com"]
}
actions = ["sts:AssumeRole"]
}
}
# Enable EBS Volume Management for EBS CSI Driver
data "aws_iam_policy" "ebs_csi_policy" {
arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy"
}
module "irsa-ebs-csi" {
source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
version = "5.39.0"
create_role = true
role_name = "AmazonEKSTFEBSCSIRole-${module.eks.cluster_name}"
provider_url = module.eks.oidc_provider
role_policy_arns = [data.aws_iam_policy.ebs_csi_policy.arn]
oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:ebs-csi-controller-sa"]
}
Outputs #
- outputs.tf
# Outputs
output "cluster_endpoint" {
description = "Endpoint for EKS control plane"
value = module.eks.cluster_endpoint
}
output "cluster_security_group_id" {
description = "Security group ids attached to the cluster control plane"
value = module.eks.cluster_security_group_id
}
output "public_subnets" {
description = "Public subnets"
value = module.vpc.public_subnets
}
Deploy EKS Cluster #
Initialize Terraform Project #
This will download and install the AWS Terraform provider defined in the terraform.tf file with “hashicorp/aws”, as well as setting up the configuration files in the project directory.
# Initialize the Terraform project
terraform init
Validate Configuration Files #
# Validates the syntax and structure of Terraform configuration files
terraform validate
# Shell output:
Success! The configuration is valid.
Plan the Deployment #
# Dry run / preview changes before applying them
terraform plan
Apply the Configuration #
- It takes about 14 minutes to deploy the EKS cluster
# Create network stack
terraform apply -auto-approve
# Shell output:
Apply complete! Resources: 60 added, 0 changed, 0 destroyed.
Outputs:
cluster_endpoint = "https://347DA2EDB74EDB755D80EBE591A626F9.gr7.us-east-1.eks.amazonaws.com"
cluster_security_group_id = "sg-0f0a872734c8970b9"
public_subnets = [
"subnet-00c9ac337d02d5d18",
"subnet-0d4583d03dfbb70fd",
"subnet-002823dd4a50a5309",
]
Verify Deployment State #
# Lists all resources tracked in the Terraform state file
terraform state list
# Shell output:
data.aws_availability_zones.available
data.aws_iam_policy.ebs_csi_policy
data.aws_iam_policy_document.eks_assume_role_policy
aws_iam_role.eks_nodes
module.eks.data.aws_caller_identity.current
module.eks.data.aws_eks_addon_version.this["aws-ebs-csi-driver"]
module.eks.data.aws_iam_policy_document.assume_role_policy[0]
module.eks.data.aws_iam_session_context.current
module.eks.data.aws_partition.current
module.eks.data.tls_certificate.this[0]
module.eks.aws_cloudwatch_log_group.this[0]
module.eks.aws_eks_access_entry.this["cluster_creator"]
module.eks.aws_eks_access_policy_association.this["cluster_creator_admin"]
module.eks.aws_eks_addon.this["aws-ebs-csi-driver"]
module.eks.aws_eks_cluster.this[0]
module.eks.aws_iam_openid_connect_provider.oidc_provider[0]
module.eks.aws_iam_policy.cluster_encryption[0]
module.eks.aws_iam_role.this[0]
module.eks.aws_iam_role_policy_attachment.cluster_encryption[0]
module.eks.aws_iam_role_policy_attachment.this["AmazonEKSClusterPolicy"]
module.eks.aws_iam_role_policy_attachment.this["AmazonEKSVPCResourceController"]
module.eks.aws_security_group.cluster[0]
module.eks.aws_security_group.node[0]
module.eks.aws_security_group_rule.cluster["ingress_nodes_443"]
module.eks.aws_security_group_rule.node["egress_all"]
module.eks.aws_security_group_rule.node["ingress_cluster_443"]
module.eks.aws_security_group_rule.node["ingress_cluster_4443_webhook"]
module.eks.aws_security_group_rule.node["ingress_cluster_6443_webhook"]
module.eks.aws_security_group_rule.node["ingress_cluster_8443_webhook"]
module.eks.aws_security_group_rule.node["ingress_cluster_9443_webhook"]
module.eks.aws_security_group_rule.node["ingress_cluster_kubelet"]
module.eks.aws_security_group_rule.node["ingress_nodes_ephemeral"]
module.eks.aws_security_group_rule.node["ingress_self_coredns_tcp"]
module.eks.aws_security_group_rule.node["ingress_self_coredns_udp"]
module.eks.time_sleep.this[0]
module.irsa-ebs-csi.data.aws_caller_identity.current
module.irsa-ebs-csi.data.aws_iam_policy_document.assume_role_with_oidc[0]
module.irsa-ebs-csi.data.aws_partition.current
module.irsa-ebs-csi.aws_iam_role.this[0]
module.irsa-ebs-csi.aws_iam_role_policy_attachment.custom[0]
module.vpc.aws_default_network_acl.this[0]
module.vpc.aws_default_route_table.default[0]
module.vpc.aws_default_security_group.this[0]
module.vpc.aws_eip.nat[0]
module.vpc.aws_internet_gateway.this[0]
module.vpc.aws_nat_gateway.this[0]
module.vpc.aws_route.private_nat_gateway[0]
module.vpc.aws_route.public_internet_gateway[0]
module.vpc.aws_route_table.private[0]
module.vpc.aws_route_table.public[0]
module.vpc.aws_route_table_association.private[0]
module.vpc.aws_route_table_association.private[1]
module.vpc.aws_route_table_association.private[2]
module.vpc.aws_route_table_association.public[0]
module.vpc.aws_route_table_association.public[1]
module.vpc.aws_route_table_association.public[2]
module.vpc.aws_subnet.private[0]
module.vpc.aws_subnet.private[1]
module.vpc.aws_subnet.private[2]
module.vpc.aws_subnet.public[0]
module.vpc.aws_subnet.public[1]
module.vpc.aws_subnet.public[2]
module.vpc.aws_vpc.this[0]
module.eks.module.eks_managed_node_group["default_node_group"].data.aws_caller_identity.current
module.eks.module.eks_managed_node_group["default_node_group"].data.aws_iam_policy_document.assume_role_policy[0]
module.eks.module.eks_managed_node_group["default_node_group"].data.aws_partition.current
module.eks.module.eks_managed_node_group["default_node_group"].aws_eks_node_group.this[0]
module.eks.module.eks_managed_node_group["default_node_group"].aws_iam_role.this[0]
module.eks.module.eks_managed_node_group["default_node_group"].aws_iam_role_policy_attachment.this["AmazonEC2ContainerRegistryReadOnly"]
module.eks.module.eks_managed_node_group["default_node_group"].aws_iam_role_policy_attachment.this["AmazonEKSWorkerNodePolicy"]
module.eks.module.eks_managed_node_group["default_node_group"].aws_iam_role_policy_attachment.this["AmazonEKS_CNI_Policy"]
module.eks.module.eks_managed_node_group["default_node_group"].aws_launch_template.this[0]
module.eks.module.kms.data.aws_caller_identity.current[0]
module.eks.module.kms.data.aws_iam_policy_document.this[0]
module.eks.module.kms.data.aws_partition.current[0]
module.eks.module.kms.aws_kms_alias.this["cluster"]
module.eks.module.kms.aws_kms_key.this[0]
module.eks.module.eks_managed_node_group["default_node_group"].module.user_data.null_resource.validate_cluster_service_cidr
Verify & Access EKS Cluster #
List EKS Cluster ARN #
# List the EKS cluster ARN
aws eks describe-cluster --name eks-example --region us-east-1 --query "cluster.arn" --output text
# Shell output:
arn:aws:eks:us-east-1:012345678912:cluster/eks-example
Configure Kubectl #
# Update your local Kubectl configuration with the cluster details
aws eks --region us-east-1 update-kubeconfig --name eks-example
# Shell output:
Added new context arn:aws:eks:us-east-1:012345678912:cluster/eks-example to /home/ubuntu/.kube/confi
List Cluster Nodes #
# List cluster nodes
kubectl get nodes -o wide
# Shell output:
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
ip-10-10-3-243.ec2.internal Ready <none> 3m32s v1.29.10-eks-59bf375 10.10.3.243 <none> Amazon Linux 2 5.10.228-219.884.amzn2.x86_64 containerd://1.7.23
ip-10-10-4-131.ec2.internal Ready <none> 3m31s v1.29.10-eks-59bf375 10.10.4.131 <none> Amazon Linux 2 5.10.228-219.884.amzn2.x86_64 containerd://1.7.23
ip-10-10-5-153.ec2.internal Ready <none> 3m25s v1.29.10-eks-59bf375 10.10.5.153 <none> Amazon Linux 2 5.10.228-219.884.amzn2.x86_64 containerd://1.7.23
Verify Container Storage Interface (CSI) #
# List CSI Drivers
kubectl get csidrivers
# Shell output:
NAME ATTACHREQUIRED PODINFOONMOUNT STORAGECAPACITY TOKENREQUESTS REQUIRESREPUBLISH MODES AGE
ebs.csi.aws.com true false false <unset> false Persistent 3m41s
efs.csi.aws.com false false false <unset> false Persistent 11m
# List StorageClasses
kubectl get storageclasses
# Shell output:
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
gp2 (default) kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 11m
Example Deployment #
List TLS Certificate ARN #
# List the ARN of the AWS Certificate
aws acm list-certificates \
--region us-east-1 \
--query "CertificateSummaryList[*].{CertificateArn:CertificateArn,DomainName:DomainName}" \
--output table
# Shell output:
---------------------------------------------------------------------------------------------------------
| ListCertificates |
+---------------------------------------------------------------------------------------+---------------+
| CertificateArn | DomainName |
+---------------------------------------------------------------------------------------+---------------+
| arn:aws:acm:us-east-1:012345678912:certificate/ef60b545-2a1a-4fcb-988d-dd68cd491354 | *.jklug.work |
+---------------------------------------------------------------------------------------+---------------+
Create Deployment #
vi nginx-example.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: example-pvc
spec:
accessModes:
- ReadWriteOnce # Must match underlying storage
resources:
requests:
storage: 5Gi # Define PV storage size
storageClassName: gp2 # Define StorageClass
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: default
name: example-deployment
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.16.1-alpine
imagePullPolicy: IfNotPresent
volumeMounts:
- name: persistent-storage
mountPath: "/mnt/nfs" # Mountpoint inside the container
readOnly: false
- name: nginx-configmap
mountPath: /usr/share/nginx/html
volumes:
- name: persistent-storage
persistentVolumeClaim:
claimName: example-pvc # Define PVC name
- name: nginx-configmap
configMap:
name: nginx-index
restartPolicy: Always
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: default
name: nginx-index
data:
index.html: |
EKS Cluster Example
---
# Nginx LoadBalancer Service with TLS Certificate
apiVersion: v1
kind: Service
metadata:
name: nginx-lb
annotations:
service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:012345678912:certificate/ef60b545-2a1a-4fcb-988d-dd68cd491354
service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https"
spec:
selector:
app: nginx
ports:
- name: http
port: 80
targetPort: 80 # Forwarding to port 80 in the pod
- name: https
port: 443
targetPort: 80 # Forwarding HTTPS to port 80 in the pod
type: LoadBalancer
kubectl apply -f nginx-example.yaml
Verify the Deployment #
Verify PV & PVC #
# List PVC & PV
kubectl get pvc,pv
# Shell output:
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
persistentvolumeclaim/example-pvc Bound pvc-d1314ef7-0cef-418d-9c4b-4c375f901822 5Gi RWO gp2 <unset> 2m4s
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE
persistentvolume/pvc-d1314ef7-0cef-418d-9c4b-4c375f901822 5Gi RWO Delete Bound default/example-pvc gp2 <unset> 2m1s
Verify LoadBalancer #
# List pods
kubectl get pods
# Shell output:
NAME READY STATUS RESTARTS AGE
example-deployment-d95476678-mdsj4 1/1 Running 0 58s
LoadBalancer External DNS Name
# List load balancer service details: List DNS name
kubectl get svc nginx-lb
# Shell output:
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx-lb LoadBalancer 172.20.185.104 a95a21491329f4008bbc0fa24360a263-255679239.us-east-1.elb.amazonaws.com 80:32649/TCP,443:32198/TCP 73s
Create DNS Entry with Route53 #
Create a CNAME DNS entry for the LoadBalancer external IP:
# Create route53 DNS entery
aws route53 change-resource-record-sets \
--hosted-zone-id Z05838621L1SISL2CGD4M \
--change-batch '{
"Changes": [
{
"Action": "CREATE",
"ResourceRecordSet": {
"Name": "eks-nginx.jklug.work",
"Type": "CNAME",
"TTL": 300,
"ResourceRecords": [
{
"Value": "a95a21491329f4008bbc0fa24360a263-255679239.us-east-1.elb.amazonaws.com"
}
]
}
}
]
}'
# Shell output:
{
"ChangeInfo": {
"Id": "/change/C09972792LYP8XG8CFZV7",
"Status": "PENDING",
"SubmittedAt": "2024-12-18T18:32:02.550000+00:00"
}
}
Verify Nginx Container #
# Curl the Nginx container via the LoadBalancer external IP
curl a95a21491329f4008bbc0fa24360a263-255679239.us-east-1.elb.amazonaws.com
# Shell output:
EKS Cluster Example
# Curl the Nginx container via the LoadBalancer DNS name
curl https://eks-nginx.jklug.work
# Shell output:
EKS Cluster Example
Cleanup #
Delete Route 53 DNS Entry #
# Delete DNS entry
aws route53 change-resource-record-sets \
--hosted-zone-id Z05838621L1SISL2CGD4M \
--change-batch '{
"Changes": [
{
"Action": "DELETE",
"ResourceRecordSet": {
"Name": "eks-nginx.jklug.work",
"Type": "CNAME",
"TTL": 300,
"ResourceRecords": [
{
"Value": "a95a21491329f4008bbc0fa24360a263-255679239.us-east-1.elb.amazonaws.com"
}
]
}
}
]
}'
Delete the Deployment #
# Delete the example deployment
kubectl delete -f nginx-example.yaml
Delete the EKS Cluster #
# Delete Terraform resources
terraform destroy -auto-approve