Skip to main content

Managed Kubernetes Services - AWS Elastic Kubernetes Service (EKS) : EFS Storage Configuration

1494 words·
Kubernetes Kubernetes Cluster AWS EKS Eksctl Kubectl EBS CSI
Table of Contents

EKS Cluster
#

Create Cluster
#

# Create an EKS cluster: Define Kubernetes version & region
eksctl create cluster \
  --name eks-efs-example \
  --version 1.30 \
  --nodegroup-name prod-nodes \
  --node-type t3.medium \
  --nodes 2 \
  --region eu-central-1 \
  --managed

More Options:

  • --node-volume-size 20 Define the volume size of the worker nodes

Note: Creating the necessary CloudFormation resources can take up to 10 - 20 minutes.


List VPC & Subnets
#

By default (if not otherwise defined) eksctl creates a new dedicated VPC and subnets for the EKS cluster.

List the created VPC and Subnet IDs:

# List VPC & Subnets used by the EKS cluster
aws eks describe-cluster \
  --name eks-efs-example \
  --query "cluster.resourcesVpcConfig" \
  --region eu-central-1

# Shell output:
{
    "subnetIds": [
        "subnet-0497c7c8cb16d694e",
        "subnet-0ae8628cb3d9ec217",
        "subnet-087b97256d7da13de",
        "subnet-008d099b8c95da76d",
        "subnet-085922b2ecabf6732",
        "subnet-05d0e21b173475cbd"
    ],
    "securityGroupIds": [
        "sg-0a42e727854b22436"
    ],
    "clusterSecurityGroupId": "sg-0b59e1faaf3f1f528",
    "vpcId": "vpc-0e46232a4373312f8",
    "endpointPublicAccess": true,
    "endpointPrivateAccess": false,
    "publicAccessCidrs": [
        "0.0.0.0/0"
    ]
}

# List VPC CIDR
aws ec2 describe-vpcs \
  --vpc-ids vpc-0e46232a4373312f8 \
  --query 'Vpcs[*].CidrBlock' --output text \
  --region eu-central-1

# Shell output:
192.168.0.0/16



EFS File System
#

Create EFS Security Group
#

  • Create a Security Group for EFS file system, define the EKS VPC ID
# Create a Security Group for EFS
aws ec2 create-security-group \
  --group-name EFS-SG \
  --description "Security group for EFS in EKS" \
  --vpc-id vpc-0e46232a4373312f8 \
  --region eu-central-1

# Shell output:
{
    "GroupId": "sg-0868cd65ac1bb7583",
    "SecurityGroupArn": "arn:aws:ec2:eu-central-1:073526172187:security-group/sg-0868cd65ac1bb7583"
}
  • Add an ingress rule that allows the VPC CIDR
# Allow Ingress from EKS Worker Nodes
aws ec2 authorize-security-group-ingress \
  --group-id sg-0868cd65ac1bb7583 \
  --protocol tcp \
  --port 2049 \
  --cidr 192.168.0.0/16 \
  --region eu-central-1

# Shell output:
{
    "Return": true,
    "SecurityGroupRules": [
        {
            "SecurityGroupRuleId": "sgr-0d09dcc3dd0f6c990",
            "GroupId": "sg-0868cd65ac1bb7583",
            "GroupOwnerId": "073526172187",
            "IsEgress": false,
            "IpProtocol": "tcp",
            "FromPort": 2049,
            "ToPort": 2049,
            "CidrIpv4": "192.168.0.0/16",
            "SecurityGroupRuleArn": "arn:aws:ec2:eu-central-1:073526172187:security-group-rule/sgr-0d09dcc3dd0f6c990"
        }
    ]
}

Create EFS File System
#

  • Create an EFS file system in the same AWS region as the EKS cluster
# Create EFS file system
aws efs create-file-system \
  --performance-mode generalPurpose \
  --tags Key=Name,Value=eks-efs-storage \
  --region eu-central-1

# Shell output:
{
    "OwnerId": "073526172187",
    "CreationToken": "a42da59a-9424-41e7-bdd7-747c594fad3d",
    "FileSystemId": "fs-09117cf61edb866b1",
    "FileSystemArn": "arn:aws:elasticfilesystem:eu-central-1:073526172187:file-system/fs-09117cf61edb866b1",
    "CreationTime": "2025-02-01T15:12:29+00:00",
    "LifeCycleState": "creating",
    "Name": "eks-efs-storage",
    "NumberOfMountTargets": 0,
    "SizeInBytes": {
        "Value": 0,
        "ValueInIA": 0,
        "ValueInStandard": 0,
        "ValueInArchive": 0
    },
    "PerformanceMode": "generalPurpose",
    "Encrypted": false,
    "ThroughputMode": "bursting",
    "Tags": [
        {
            "Key": "Name",
            "Value": "eks-efs-storage"
        }
    ],
    "FileSystemProtection": {
        "ReplicationOverwriteProtection": "ENABLED"
    }
}

Find Cluster Subnets
#

Find the subnets that are used by the EKS cluster:

# List the available Subnets in the VPC
aws ec2 describe-subnets \
    --filters "Name=vpc-id,Values=vpc-0e46232a4373312f8" \
    --query 'Subnets[*].{SubnetId: SubnetId,AvailabilityZone: AvailabilityZone,CidrBlock: CidrBlock}' \
    --output table \
    --region eu-central-1

# Shell output:
----------------------------------------------------------------------
|                           DescribeSubnets                          |
+------------------+--------------------+----------------------------+
| AvailabilityZone |     CidrBlock      |         SubnetId           |
+------------------+--------------------+----------------------------+
|  eu-central-1a   |  192.168.32.0/19   |  subnet-0497c7c8cb16d694e  |
|  eu-central-1b   |  192.168.96.0/19   |  subnet-008d099b8c95da76d  |
|  eu-central-1a   |  192.168.128.0/19  |  subnet-085922b2ecabf6732  |
|  eu-central-1c   |  192.168.64.0/19   |  subnet-0ae8628cb3d9ec217  |
|  eu-central-1b   |  192.168.0.0/19    |  subnet-087b97256d7da13de  |
|  eu-central-1c   |  192.168.160.0/19  |  subnet-05d0e21b173475cbd  |
+------------------+--------------------+----------------------------+
# List EKS nodes IP addresses
kubectl get nodes -o wide

# Shell output:
NAME                                              STATUS   ROLES    AGE   VERSION               INTERNAL-IP      EXTERNAL-IP      OS-IMAGE         KERNEL-VERSION                  CONTAINER-RUNTIME
ip-192-168-15-17.eu-central-1.compute.internal    Ready    <none>   12m   v1.30.8-eks-aeac579   192.168.15.17    3.72.113.43      Amazon Linux 2   5.10.230-223.885.amzn2.x86_64   containerd://1.7.23
ip-192-168-60-175.eu-central-1.compute.internal   Ready    <none>   12m   v1.30.8-eks-aeac579   192.168.60.175   18.185.107.192   Amazon Linux 2   5.10.230-223.885.amzn2.x86_64   containerd://1.7.23
# The subnets used by the EKS cluster are the following
subnet-087b97256d7da13de
subnet-0497c7c8cb16d694e

Create EFS Mount Targets
#

  • Create EFS mount targets, define the EFS ID, the previously created SG and the two subnet IDs, that are used by the EKS cluster
# Create EFS Mount Targets: Define EFS ID
aws efs create-mount-target \
  --file-system-id fs-09117cf61edb866b1 \
  --subnet-id subnet-087b97256d7da13de \
  --security-groups sg-0868cd65ac1bb7583 \
  --region eu-central-1

# Shell output:
{
    "OwnerId": "073526172187",
    "MountTargetId": "fsmt-0b7da34b0987c8fb7",
    "FileSystemId": "fs-09117cf61edb866b1",
    "SubnetId": "subnet-087b97256d7da13de",
    "LifeCycleState": "creating",
    "IpAddress": "192.168.15.38",
    "NetworkInterfaceId": "eni-049b882a5ea4365c3",
    "AvailabilityZoneId": "euc1-az3",
    "AvailabilityZoneName": "eu-central-1b",
    "VpcId": "vpc-0e46232a4373312f8"
}
# Create EFS Mount Targets: Define EFS ID
aws efs create-mount-target \
  --file-system-id fs-09117cf61edb866b1 \
  --subnet-id subnet-0497c7c8cb16d694e \
  --security-groups sg-0868cd65ac1bb7583 \
  --region eu-central-1

# Shell output:
{
    "OwnerId": "073526172187",
    "MountTargetId": "fsmt-0cb9c12c109054529",
    "FileSystemId": "fs-09117cf61edb866b1",
    "SubnetId": "subnet-0497c7c8cb16d694e",
    "LifeCycleState": "creating",
    "IpAddress": "192.168.38.165",
    "NetworkInterfaceId": "eni-08b4fdd53013cef44",
    "AvailabilityZoneId": "euc1-az2",
    "AvailabilityZoneName": "eu-central-1a",
    "VpcId": "vpc-0e46232a4373312f8"
}

Wait till the mount targets are ready:



EFS CSI Setup
#

Install EFS CSI Driver
#

# Install EFS CSI Driver as an EKS Add-on
aws eks create-addon \
  --cluster-name eks-efs-example \
  --addon-name aws-efs-csi-driver \
  --region eu-central-1

# Shell output:
{
    "addon": {
        "addonName": "aws-efs-csi-driver",
        "clusterName": "eks-efs-example",
        "status": "CREATING",
        "addonVersion": "v2.1.4-eksbuild.1",
        "health": {
            "issues": []
        },
        "addonArn": "arn:aws:eks:eu-central-1:073526172187:addon/eks-efs-example/aws-efs-csi-driver/96ca6115-825f-85af-278d-e6a0f39c7d0d",
        "createdAt": "2025-02-01T15:39:46.897000+00:00",
        "modifiedAt": "2025-02-01T15:39:46.910000+00:00",
        "tags": {}
    }
}

Verify the EFS CSI Driver
#

Wait till the status changes to “ACTIVE”:

# Verify the status of the installed add-on
aws eks describe-addon \
  --cluster-name eks-efs-example \
  --addon-name aws-efs-csi-driver \
  --query "addon.status" \
  --region eu-central-1

# Shell output:
"CREATING"

# Shell output: 
"ACTIVE"
# List EFS CSI pods
kubectl get pods -n kube-system -l app.kubernetes.io/name=aws-efs-csi-driver

# Shell output:
NAME                                  READY   STATUS    RESTARTS   AGE
efs-csi-controller-5c6f5fc6f4-dhck4   3/3     Running   0          62s
efs-csi-controller-5c6f5fc6f4-rsdd6   3/3     Running   0          62s
efs-csi-node-blz4v                    3/3     Running   0          62s
efs-csi-node-dq94b                    3/3     Running   0          62s

IAM Role & Permissions
#

Find Worker Nodes Role
#

# Lister cluster IAM roles
aws iam list-roles --query "Roles[?contains(RoleName, 'eks-efs-example')].RoleName"

# Shell output:
[
    "eksctl-eks-efs-example-cluster-ServiceRole-kdf4IW7IU0Db",
    "eksctl-eks-efs-example-nodegroup-p-NodeInstanceRole-i8vc25DCayTT"
]
  • eksctl-eks-efs-example-nodegroup-p-NodeInstanceRole-i8vc25DCayTT Node instance role, which is attached to the EC2 instances in the worker node group.

Attach IAM Policy to Role
#

# Attach the "AmazonElasticFileSystemFullAccess" IAM policy
aws iam attach-role-policy \
  --role-name eksctl-eks-efs-example-nodegroup-p-NodeInstanceRole-i8vc25DCayTT \
  --policy-arn arn:aws:iam::aws:policy/AmazonElasticFileSystemFullAccess

Verify IAM Policy is Attached
#

List the IAM policies that are attached to the Worker Node Role:

# List attached Polcies
aws iam list-attached-role-policies --role-name eksctl-eks-efs-example-nodegroup-p-NodeInstanceRole-i8vc25DCayTT

# Shell output:
{
    "AttachedPolicies": [
        {
            "PolicyName": "AmazonSSMManagedInstanceCore",
            "PolicyArn": "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
        },
        {
            "PolicyName": "AmazonElasticFileSystemFullAccess",
            "PolicyArn": "arn:aws:iam::aws:policy/AmazonElasticFileSystemFullAccess"
        },
        {
            "PolicyName": "AmazonEKS_CNI_Policy",
            "PolicyArn": "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
        },
        {
            "PolicyName": "AmazonEC2ContainerRegistryReadOnly",
            "PolicyArn": "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
        },
        {
            "PolicyName": "AmazonEKSWorkerNodePolicy",
            "PolicyArn": "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
        }
    ]
}

Restart EFS CSI Pods
#

# Delete the EFS CSI pods to restart them
kubectl delete pods -n kube-system -l app.kubernetes.io/name=aws-efs-csi-driver
# List EFS CSI pods
kubectl get pods -n kube-system -l app.kubernetes.io/name=aws-efs-csi-driver

# Shell output:
NAME                                  READY   STATUS    RESTARTS   AGE
efs-csi-controller-5c6f5fc6f4-5wkzh   3/3     Running   0          7s
efs-csi-controller-5c6f5fc6f4-k976r   3/3     Running   0          7s
efs-csi-node-5p8np                    3/3     Running   0          7s
efs-csi-node-wqphd                    3/3     Running   0          7s



Create EFS StorageClass
#

  • efs-storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
  name: efs-sc
provisioner: efs.csi.aws.com
parameters:
  provisioningMode: efs-ap
  fileSystemId: fs-09117cf61edb866b1 # Define with EFS ID
  directoryPerms: "700"
  gidRangeStart: "1000"
  gidRangeEnd: "2000"
  basePath: "/dynamic_provisioning"
mountOptions:
  - tls
reclaimPolicy: Delete
volumeBindingMode: Immediate
  • reclaimPolicy: Set to “Retain” or “Delete”
# Apply the StorageClass
kubectl apply -f efs-storageclass.yaml

Verify EFS StorageClass
#

# List StorageClasses
kubectl get storageclass

# Shell output:
NAME     PROVISIONER             RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
efs-sc   efs.csi.aws.com         Delete          Immediate              false                  16s
gp2      kubernetes.io/aws-ebs   Delete          WaitForFirstConsumer   false                  34m



Example PVC
#

Create PVC and Deployment
#

  • efs-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: efs-claim
spec:
  accessModes:
    - ReadWriteMany
  storageClassName: efs-sc
  resources:
    requests:
      storage: 5Gi
---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-efs
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx-efs
  template:
    metadata:
      labels:
        app: nginx-efs
    spec:
      containers:
        - name: nginx
          image: nginx
          ports:
            - containerPort: 80
          volumeMounts:
            - mountPath: "/usr/share/nginx/html"
              name: efs-storage
      volumes:
        - name: efs-storage
          persistentVolumeClaim:
            claimName: efs-claim
# Apply PVC and pod
kubectl apply -f efs-pvc.yaml

Verify PVC, PV & Pods
#

# List PVCs
kubectl get pvc

# Shell output:
NAME        STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   VOLUMEATTRIBUTESCLASS   AGE
efs-claim   Bound    pvc-d32e6c2a-01ae-40dd-99f5-db3f3ae10907   5Gi        RWX            efs-sc         <unset>                 4s
# List PVs
kubectl get pv

# Shell output:
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM               STORAGECLASS   VOLUMEATTRIBUTESCLASS   REASON   AGE
pvc-d32e6c2a-01ae-40dd-99f5-db3f3ae10907   5Gi        RWX            Delete           Bound    default/efs-claim   efs-sc         <unset>                          16s
# List pods
kubectl get pod

# Shell output:
NAME                        READY   STATUS    RESTARTS   AGE
nginx-efs-df9bc5d48-2vwqg   1/1     Running   0          26s
nginx-efs-df9bc5d48-mw5fc   1/1     Running   0          26s
nginx-efs-df9bc5d48-zf55p   1/1     Running   0          26s

Test the EFS Volume
#

# Add an index.html file with some text via the "nginx-efs-df9bc5d48-2vwqg" pod
kubectl exec nginx-efs-df9bc5d48-2vwqg -- sh -c 'echo "Hello from Nginx via EFS" > /usr/share/nginx/html/index.html'
# Curl the other pods
kubectl exec -it nginx-efs-df9bc5d48-mw5fc -- sh -c "curl http://localhost:80"
kubectl exec -it nginx-efs-df9bc5d48-zf55p -- sh -c "curl http://localhost:80"

# Shell output:
Hello from Nginx via EFS
Hello from Nginx via EFS

Delete PVC & Deployment
#

# Delete the PVC and Nginx Deployment
kubectl delete -f efs-pvc.yaml
# Verify the PV was deleted
kubectl get pv

# Shell output:
No resources found



Cleanup
#

Delete EFS File System
#

List EFS Mount Targets
#

# List EFS mount targets
aws efs describe-mount-targets \
  --file-system-id fs-09117cf61edb866b1 \
  --query "MountTargets[*].[MountTargetId,SubnetId]" \
  --output table \
  --region eu-central-1

# Shell output:
--------------------------------------------------------
|                 DescribeMountTargets                 |
+-------------------------+----------------------------+
|  fsmt-0b7da34b0987c8fb7 |  subnet-087b97256d7da13de  |
|  fsmt-0cb9c12c109054529 |  subnet-0497c7c8cb16d694e  |
+-------------------------+----------------------------+

Delete Mount Targets
#

# Delete EFS mount targets
aws efs delete-mount-target --mount-target-id fsmt-0b7da34b0987c8fb7 --region eu-central-1
aws efs delete-mount-target --mount-target-id fsmt-0cb9c12c109054529 --region eu-central-1

Delete EFS File System
#

# Delete EFS file system (Wait till the mount targets are deleted)
aws efs delete-file-system \
  --file-system-id fs-09117cf61edb866b1 \
  --region eu-central-1

Delete EFS Security Group
#

# Delete the EFS security group
aws ec2 delete-security-group \
  --group-id sg-0868cd65ac1bb7583 \
  --region eu-central-1

Delete EKS Cluster
#

# Delete EKS cluster
eksctl delete cluster \
  --name eks-efs-example \
  --region eu-central-1