Kubeconfig Overview #
-
A Kubeconfig is a YAML manifest with the Kubernetes cluster details, certificates and secret tokens for the cluster authentication.
-
Kubectl uses the Kubeconfig file to connect to the Kubernetes cluster API.
-
The default Kubeconfig file location is
~/.kube/config
Kubeconfig File Example:
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: <ca-data-here>
server: https://IP-or-DNS
name: <cluster-name>
contexts:
- context:
cluster: <cluster-name>
user: <cluster-name-user>
name: <cluster-name>
current-context: <cluster-name>
kind: Config
preferences: {}
users:
- name: <cluster-name-user>
user:
token: <secret-token-here>
-
certificate-authority-data:
Kubernetes Cluster CA Certificate -
server:
IP address or DNS name of the Kubernetes cluster -
name:
Kubernetes cluster name -
user:
User / Service account user name -
token:
Token of the user/service account
Create New Kubeconfig File #
Create a Service Account #
# Create service account "example-cluster-admin"
kubectl -n kube-system create serviceaccount example-cluster-admin
- The service account name will be the username in Kubeconfig file
Verify new Service Account #
# List service accounts
kubectl -n kube-system get serviceaccount
# Shell output:
NAME SECRETS AGE
attachdetach-controller 0 58d
bootstrap-signer 0 58d
certificate-controller 0 58d
cilium 0 58d
cilium-operator 0 58d
clusterrole-aggregation-controller 0 58d
coredns 0 58d
cronjob-controller 0 58d
daemon-set-controller 0 58d
default 0 58d
deployment-controller 0 58d
disruption-controller 0 58d
endpoint-controller 0 58d
endpointslice-controller 0 58d
endpointslicemirroring-controller 0 58d
ephemeral-volume-controller 0 58d
example-cluster-admin 0 3m18s # Check
expand-controller 0 58d
generic-garbage-collector 0 58d
horizontal-pod-autoscaler 0 58d
job-controller 0 58d
kube-proxy 0 58d
namespace-controller 0 58d
node-controller 0 58d
persistent-volume-binder 0 58d
pod-garbage-collector 0 58d
pv-protection-controller 0 58d
pvc-protection-controller 0 58d
replicaset-controller 0 58d
replication-controller 0 58d
resourcequota-controller 0 58d
root-ca-cert-publisher 0 58d
service-account-controller 0 58d
service-controller 0 58d
statefulset-controller 0 58d
token-cleaner 0 58d
ttl-after-finished-controller 0 58d
ttl-controller 0 58d
# List service account details
kubectl -n kube-system get serviceaccount example-cluster-admin -o yaml
# Shell output:
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: "2024-09-02T11:12:33Z"
name: example-cluster-admin
namespace: kube-system
resourceVersion: "55760"
uid: 8602fc11-e2c5-43af-9c35-a92109c1ca2d
Create Service Account Secret #
vi example-cluster-admin-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: example-cluster-admin-secret
namespace: kube-system
annotations:
kubernetes.io/service-account.name: example-cluster-admin
type: kubernetes.io/service-account-token
kubectl apply -f example-cluster-admin-secret.yaml
Create ClusterRole #
The following ClusterRole provides full admin access to the Kubernetes cluster:
vi example-cluster-admin-cr.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: example-cluster-admin
rules:
- apiGroups: ["*"] # All API groups
resources: ["*"] # All resources
verbs: ["*"] # All verbs (get, list, create, delete, etc.)
- nonResourceURLs: ["*"] # Allow access to all non-resource URLs
verbs: ["*"]
kubectl apply -f example-cluster-admin-cr.yaml
Create ClusterRole Binding #
vi example-cluster-admin-crb.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: example-cluster-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: example-cluster-admin
subjects:
- kind: ServiceAccount
name: example-cluster-admin
namespace: kube-system
kubectl apply -f example-cluster-admin-crb.yaml
Verify ClusterRole & ClusterRole Binding #
# List ClusterRoles
kubectl get clusterroles | grep example-cluster-admin
# Shell output:
example-cluster-admin 2024-09-02T11:17:03
# List ClusterRole Bindings
kubectl get clusterrolebindings | grep example-cluster-admin
# Shell output:
example-cluster-admin ClusterRole/example-cluster-admin 2m30s
Retrieve Details #
Retrieve the required Kubeconfig details and save them in variables:
# Export variables to current shell:
export SA_SECRET_TOKEN=$(kubectl -n kube-system get secret/example-cluster-admin-secret -o=go-template='{{.data.token}}' | base64 --decode)
export CLUSTER_NAME=$(kubectl config current-context)
export CURRENT_CLUSTER=$(kubectl config view --raw -o=go-template='{{range .contexts}}{{if eq .name "'''${CLUSTER_NAME}'''"}}{{ index .context "cluster" }}{{end}}{{end}}')
export CLUSTER_CA_CERT=$(kubectl config view --raw -o=go-template='{{range .clusters}}{{if eq .name "'''${CURRENT_CLUSTER}'''"}}"{{with index .cluster "certificate-authority-data" }}{{.}}{{end}}"{{ end }}{{ end }}')
export CLUSTER_ENDPOINT=$(kubectl config view --raw -o=go-template='{{range .clusters}}{{if eq .name "'''${CURRENT_CLUSTER}'''"}}{{ .cluster.server }}{{end}}{{ end }}')
Generate new Kubeconfig File #
Generate a new Kubeconfig file with the values of the exported variables:
# Create Kubeconfig file
cat << EOF > example-cluster-admin-config
apiVersion: v1
kind: Config
current-context: ${CLUSTER_NAME}
contexts:
- name: ${CLUSTER_NAME}
context:
cluster: ${CLUSTER_NAME}
user: example-cluster-admin
clusters:
- name: ${CLUSTER_NAME}
cluster:
certificate-authority-data: ${CLUSTER_CA_CERT}
server: ${CLUSTER_ENDPOINT}
users:
- name: example-cluster-admin
user:
token: ${SA_SECRET_TOKEN}
EOF
# Verify the Kubeconfig file
cat example-cluster-admin-config
# Validate the generated Kubeconfig file
kubectl get nodes --kubeconfig=example-cluster-admin-config
Use Kubeconfig File on New Client #
Install Kubectl (Deb based Distributions) #
# Install prerequisites
sudo apt update && sudo apt install -y apt-transport-https ca-certificates curl gnupg
# Download the public signing key for the Kubernetes package repositories
sudo mkdir -p -m 755 /etc/apt/keyrings
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
sudo chmod 644 /etc/apt/keyrings/kubernetes-apt-keyring.gpg
# Add the Kubernetes repository
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.31/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo chmod 644 /etc/apt/sources.list.d/kubernetes.list
# Install Kubectl
sudo apt update && sudo apt install -y kubectl
# Verify Kubectl installation / check version
kubectl version --client
Copy Kubeconfig File #
Create the .kube
directory for the Kubeconfig file on the new client:
# Create the .kube directory
mkdir $HOME/.kube/
# Set permissions (Restrict for group and others)
chmod 700 $HOME/.kube/
Copy the Kubeconfig file to the new client:
# Copy the Kubeconfig file
scp ./example-cluster-admin-config ubuntu@192.168.30.14:/home/ubuntu/.kube/config
Verify the Kubeconfig file on the new client:
# Verify the Kubeconfig file:
ls ~/.kube/
# Shell output
config
Validate Kubeconfig File #
# List Kubernetes nodes
kubectl get nodes