CoreDNS #
Backup and Restore ConfigMap #
Before changing the CoreDNS ConfigMap settings, create a backup, so that you can restore it, in case you mess it up.
Here are the details on how to backup and restore the CoreDNS ConfigMap:
# Export the current ConfigMap
kubectl get cm coredns -n kube-system -o yaml > coredns-configmap-backup.yaml
# Scale down the deployment to "0"
kubectl scale deployment coredns --replicas=0 -n kube-system
# Delete the original ConfigMap
kubectl delete cm coredns -n kube-system
# Restore the ConfigMap from the backup
kubectl apply -f coredns-configmap-backup.yaml
# Restart / Redeploy CoreDNS
kubectl rollout restart deployment coredns -n kube-system
# Scale down the deployment to "1"
kubectl scale deployment coredns --replicas=0 -n kube-system
Optional: Backup and Restore Deployment #
If you’re planning to also adopt the deployment, such as to add ConfigMap volumes to it, it’s a good idea to also backup the CoreDNS deployment:
# Create a backup of a Deployment
kubectl get deployment coredns -n kube-system -o yaml > coredns-deployment.yaml
# Delete the Deployment
kubectl delete deployment coredns -n kube-system
# Restore the Deployment
kubectl apply -f coredns-deployment.yaml
Add Hosts Entry to CoreDNS #
Find the CoreDNS ConfigMap #
# List ConfigMaps in "kube-system" namespace
kubectl get cm -n kube-system
List the ConfigMap Details #
# List CoreDNS ConfigMap details
kubectl describe cm coredns -n kube-system
Edit the ConfigMap #
# Adopt the CoreDNS ConfigMap / configuration
kubectl edit cm coredns -n kube-system
Create Custom DNS Entry: K3s #
Create a custom DNS / hosts entry for the following hosts:
192.168.30.61 debian-node-1.local
192.168.30.62 debian-node-2.local
The whole ConfigMap looks like this:
apiVersion: v1
data:
Corefile: |
.:53 {
errors
health
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
hosts /etc/coredns/NodeHosts {
ttl 60
reload 15s
fallthrough
}
prometheus :9153
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
import /etc/coredns/custom/*.override
}
import /etc/coredns/custom/*.server
NodeHosts: |
192.168.30.20 debian-01
192.168.30.21 debian-02
192.168.30.22 debian-03
192.168.30.61 debian-node-1.local # Add custom hosts entry
192.168.30.62 debian-node-2.local # Add custom hosts entry
kind: ConfigMap
Create Custom DNS Entry: K8s #
Create a custom DNS / hosts entry for the following hosts:
192.168.30.61 debian-node-1.local
192.168.30.62 debian-node-2.local
The whole ConfigMap looks like this:
apiVersion: v1
data:
Corefile: |
.:53 {
errors {
}
health {
lameduck 5s
}
ready
kubernetes jkw-k8s.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
hosts {
192.168.30.61 debian-node-1.local # Add custom hosts entry
192.168.30.62 debian-node-2.local # Add custom hosts entry
fallthrough
}
prometheus :9153
forward . /etc/resolv.conf {
prefer_udp
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
Restart CoreDNS #
# Restart CoreDNS
kubectl rollout restart deployment coredns -n kube-system
# Alternative delete the CoreDNS pods
kubectl delete pod -l k8s-app=kube-dns -n kube-system
Verify CoreDNS Pods, Details and Logs #
Verify the CoreDNS pods are up and running:
# List pods in the "kube-system" namespace
kubectl get pod -n kube-system
# Shell output:
NAME READY STATUS RESTARTS AGE
...
coredns-6f84c7cff6-fdjd7 1/1 Running 0 40s
If necessary list the pods details and logs for troubleshooting:
# List CoreDNS pod details
kubectl describe pod coredns-6f84c7cff6-fdjd7 -n kube-system
# List CoreDNS pod logs
kubectl logs coredns-6f84c7cff6-fdjd7 -n kube-system
Test the DNS Resolution #
# Run a busybox pod and test the DNS resolution
kubectl run -i --tty --rm debug --image=busybox --restart=Never -- nslookup debian-node-1.local
# Shell output:
Name: debian-node-1.local
Address: 192.168.30.61
# Alternative run a busybox and open the shell:
kubectl run -i --tty --rm debug --image=busybox --restart=Never -- sh
# Manually start nslookup
nslookup debian-node.local
Deployment with Hosts Entry #
Deployment Manifest #
# Create a manifest for the deployment
vi hosts-deployment-example.yaml
Define one or more hostnames for an IP:
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
hostAliases:
- ip: "192.168.30.61"
hostnames:
- "debian-node-1.local" # Hostname 1
- "deb1.local" # Hostname 2
- ip: "192.168.30.62"
hostnames:
- "debian-node-3.local"
containers:
- name: nginx-container
image: nginx
ports:
- containerPort: 80
# Deploy the manifest
kubectl apply -f hosts-deployment-example.yaml
Test the DNS Resolution #
Open the container terminal:
# Exec the container terminal
kubectl exec -it $(kubectl get pod -l app=nginx -o jsonpath="{.items[0].metadata.name}") -- /bin/sh
Verify the hosts entries:
# Verify the hosts entry:
cat /etc/hosts
# Shell output:
# Entries added by HostAliases.
192.168.30.61 debian-node-1.local deb1.local
192.168.30.62 debian-node-3.local
Alternative ping the host:
# Install the packages for ping
apt update && apt install -y iputils-ping
# Run ping
ping deb1.local
# Shell output:
PING debian-node-1.local (192.168.30.61)
Note: Nslookup queries DNS directly and might bypass the /etc/hosts file.
DNS Troubleshooting #
Find the CoreDNS Cluster IP #
# List the CoreDNS cluster IP
kubectl get svc -n kube-system
# Shell output:
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
coredns ClusterIP 10.233.0.3 <none> 53/UDP,53/TCP,9153/TCP 5d18h
Note: In older Kubernetes versions the name for the cordns ClusterIP service can also be kube-dns
.
Verify Pod DNS Resolver #
Verify if the DNS resolver of a pod:
# Run a busybox pod to test the DNS resolution
kubectl run -i --tty --rm debug --image=busybox --restart=Never -- cat /etc/resolv.conf
# Alternative run a busybox and open the shell:
kubectl run -i --tty --rm debug --image=busybox --restart=Never -- sh
# List DNS resolver
cat /etc/resolv.conf
Logs #
# List CoreDNS logs
kubectl logs -l k8s-app=kube-dns -n kube-system
# List local DNS logs (DNS cache)
kubectl logs -n kube-system -l k8s-app=node-local-dns
Node DNS #
Verify the DNS resolver:
# Verify the DNS resolver of the Kubernetes Nodes
cat /etc/resolv.conf
Kubelet DNS settings:
# Verify the Kubelet configuration (Debian 12)
cat /var/lib/kubelet/config.yaml