Prerequisites #
For the cluster nodes I use two Ubuntu 23.04 servers with the following IP addresses:
192.168.30.51 drbd1
192.168.30.52 drbd2
I use the following storage configuration on both nodes:
# List blockdevices
lsblk
# Shell output:
sda 8:0 0 20G 0 disk
├─sda1 8:1 0 1M 0 part
├─sda2 8:2 0 1.8G 0 part /boot
└─sda3 8:3 0 18.2G 0 part
└─ubuntu--vg-ubuntu--lv 253:0 0 10G 0 lvm /
sdb 8:16 0 5G 0 disk
DRBD Setup #
# Install DRBD packages
sudo apt install drbd-utils -y
# Configure DNS in hosts file
sudo vi /etc/hosts
# Add cluster hostnames
192.168.30.51 drbd1
192.168.30.52 drbd2
DRBD Configuration #
DRBD can be used on a hard drive, a partition, a RAID or an LVM logical volume. The size should be identical on both nodes.
# Configure DRBD: On both nodes
sudo vi /etc/drbd.conf
# /etc/drbd.conf
global { usage-count no; }
common { syncer { rate 100M; } }
resource drbd-storage { # Define name of DRBD storage resource
protocol C; # Define protocol type
startup {
wfc-timeout 15;
degr-wfc-timeout 60;
}
net {
cram-hmac-alg sha1;
shared-secret "secret";
}
on drbd1 { # Use hostname
device /dev/drbd0; # Define mountpoint
disk /dev/sdb; # Define storage device
address 192.168.30.51:7788; # Define IP of node
meta-disk internal;
}
on drbd2 { # Use hostname
device /dev/drbd0; # Define mountpoint
disk /dev/sdb; # Define storage device
address 192.168.30.52:7788; # Define IP of node
meta-disk internal;
}
}
Protocol types
A
Long distance replicationB
Memory synchronous protocolC
Short distanced networks
Meta-Disk
internal
Metadata is stored on same device as the data
Start DRBD #
Initialize DRBD #
# Initialize the meta data storage: On both nodes
sudo drbdadm create-md drbd-storage
# or
sudo drbdadm create-md all
# Shell output:
initializing activity log
initializing bitmap (160 KB) to all zero
Writing meta data...
New drbd meta data block successfully created.
Start Daemon #
# Start the DRBD daemon: On both nodes
sudo systemctl start drbd.service
# Check blockdevices
lsblk
# Shell Output: Node 1
sda 8:0 0 20G 0 disk
├─sda1 8:1 0 1M 0 part
├─sda2 8:2 0 1.8G 0 part /boot
└─sda3 8:3 0 18.2G 0 part
└─ubuntu--vg-ubuntu--lv 253:0 0 10G 0 lvm /
sdb 8:16 0 5G 0 disk
└─drbd1 147:1 0 5G 1 disk
# Shell Output: Node 2
sda 8:0 0 20G 0 disk
├─sda1 8:1 0 1M 0 part
├─sda2 8:2 0 1.8G 0 part /boot
└─sda3 8:3 0 18.2G 0 part
└─ubuntu--vg-ubuntu--lv 253:0 0 10G 0 lvm /
sdb 8:16 0 5G 0 disk
└─drbd0 147:0 0 5G 1 disk
Start Synchronisation #
# Start the data synchronisation: Run on node 1 (define node 1 as primary node)
sudo drbdadm -- --overwrite-data-of-peer primary drbd-storage
# Watch synchronisation: Run on node 2
watch -n1 cat /proc/drbd
# Shell output: Wait till "cs:Connected"
Every 1.0s: cat /proc/drbd drbd2: Thu Aug 31 18:42:30 2023
version: 8.4.11 (api:1/proto:86-101)
srcversion: 05D52F25C803AEC1A945DEF
0: cs:SyncTarget ro:Secondary/Primary ds:Inconsistent/UpToDate C r-----
ns:0 nr:1097728 dw:1097728 dr:0 al:8 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:4144956
[===>................] sync'ed: 21.1% (4044/5116)M
finish: 0:01:48 speed: 38,040 (37,852) want: 50,280 K/sec
# Check status
drbdadm status drbd-storage
# Shell output: Node 1
drbd-storage role:Primary
disk:UpToDate
peer role:Secondary
replication:Established peer-disk:UpToDate
# Shell output: Node 2
drbd-storage role:Secondary
disk:UpToDate
peer role:Primary
replication:Established peer-disk:UpToDate
Create Filesystem #
# Create ext4 filesystem: Run on node 1
sudo mkfs.ext4 /dev/drbd0
# Create mountpoint: Run on node 1
sudo mkdir -p /mnt/drbd-storage
# Mount DRDB storage: Run on node 1
sudo mount /dev/drbd0 /mnt/drbd-storage
Switch DRBD Roles #
# Unmount DRBD resource
sudo umount /mnt/drbd-storage
# Switch node 1 to secondary role
sudo drbdadm secondary drbd-storage
# Switch node 2 to primary role
sudo drbdadm primary drbd-storage
# Create mountpoint: Run on node 2
sudo mkdir -p /mnt/drbd-storage
# Mount DRDB storage: Run on node 2
sudo mount /dev/drbd0 /mnt/drbd-storage