ENABLE REPOS # check to see if needed: yum groupinstall 'High Availability' subscription-manager repos --enable=rhel-8-for-x86_64-highavailability-rpms subscription-manager repos --enable=rhel-8-for-x86_64-resilientstorage-rpms # do not work subscription-manager repos --enable=rhel-8-for-x86_64-highavailability-eus-rpms vim /etc/selinux/config -> disabled dnf repolist dnf update yum install pcs pacemaker fence-agents-all firewall-cmd --permanent --add-service=high-availability firewall-cmd --add-service=high-availability passwd hacluster systemctl start pcsd.service systemctl status pcsd.service systemctl enable pcsd.service yum install pcp-zeroconf pcs host auth inoss-n1 inoss-n2 inoss-n3 inoss-n4 user: hacluster pass: pcs cluster setup INOSS --start inoss-n1 inoss-n2 inoss-n3 inoss-n4 --force pcs cluster enable --all pcs status (first time will take a few seconds) pcs config backup INOSS.cluster.config pcs property set stonith-enabled=false pcs stonith sbd disable -> reboot all nodes # pcs stonith list _________________________________ LVM CONFIG find node name: uname -n node1: vim /etc/lvm/lvm.conf system_id_source = "uname" scp /etc/lvm/lvm.conf root@192.168.1.62:/etc/lvm/lvm.conf scp /etc/lvm/lvm.conf root@192.168.1.63:/etc/lvm/lvm.conf scp /etc/lvm/lvm.conf root@192.168.1.64:/etc/lvm/lvm.conf lvm systemid # must return the name of the cluster node dd bs=1M oflag=sync status=progress count=40 if=/dev/zero of=/dev/sdb cfdisk /dev/sdb # create a LVM partition, (not Linux 83) pvcreate /dev/sdb1 vgcreate --setautoactivation n my_vg /dev/sdb1 vgchange --setautoactivation n vgchange --setautoactivation y vgcreate my_vg /dev/sdb vgs -o+systemid lvcreate -L5G -n my_lv my_vg lvcreate -l 100%FREE -n lv_symbol vg_symbol lvs XFS : mkfs.xfs /dev/my_vg/my_lv GFS2: https://tldp.org/HOWTO/LVM-HOWTO/sharinglvm1.html vgscan vgchange -ay # If you have enabled the use of a devices file by setting use_devicesfile = 1 # in the lvm.conf file, add the shared device to the devices file on the second node in the cluster. # By default, the use of a devices file is not enabled. # lvmdevices --adddev /dev/sdb1 all nodes: vim /etc/lvm/lvm.conf -> auto_activation_volume_list = [ ] # empty list all nodes: dracut -H -f /boot/initramfs-$(uname -r).img $(uname -r) lvchange -ay /dev/vg_symbol/lv_symbol CONFIGURE NFS all nodes: mkdir /nfsshare node1 : lvchange -ay my_vg/my_lv mount /dev/my_vg/my_lv /nfsshare LVM hard disk, partition, pyshical volumes lvmdiskscan pvdisplay pvscan pvchange -x n /dev/sda1 pvresize /dev/sda1 pvresize --setphysicalvolumesize 140G /dev/sda1 pvmove /dev/sda1 pvremove /dev/sda1 LVM volume groupinstall vgcreate vg1 /dev/sda1 /dev/sdb1 vgextend vg1 /dev/sdb1 vgdisplay vg1 vgscan vgreduce vg1 /dev/sda1 vgchange vgremove vg1 vgsplit / vgmerge vgrename LVM logical volumes and files lvcreate -L 10G vg1 lvchange and lvreduce lvrename lvremove lvscan lvdisplay lvextend -l +100%FREE /dev/vg1/lv1 resize2fs /dev/vg1/lv1 [root@z1 ~]# mkdir -p /nfsshare/exports [root@z1 ~]# mkdir -p /nfsshare/exports/export1 [root@z1 ~]# mkdir -p /nfsshare/exports/export2 [root@z1 ~]# touch /nfsshare/exports/export1/clientdatafile1 [root@z1 ~]# touch /nfsshare/exports/export2/clientdatafile2 [root@z1 ~]# umount /dev/my_vg/my_lv [root@z1 ~]# vgchange -an my_vg CONFIGURE RESOURCE LVM-activate: pcs resource create my_lvm ocf:heartbeat:LVM-activate vgname=my_vg vg_access_mode=system_id --group nfsgroup Filesystem pcs resource create nfsshare ocf:heartbeat:Filesystem device=/dev/my_vg/my_lv directory=/nfsshare fstype=xfs --group nfsgroup NFS Daemon pcs resource create nfs-daemon nfsserver nfs_shared_infodir=/nfsshare/nfsinfo nfs_no_notify=true --group nfsgroup pcs resource create nfs-root ocf::heartbeat:exportfs clientspec=192.168.1.0/255.255.255.0 options=rw,sync,no_root_squash directory=/nfsshare/exports fsid=0 --group nfsgroup pcs resource create nfs-export1 ocf::heartbeat:exportfs clientspec=192.168.1.0/255.255.255.0 options=rw,sync,no_root_squash directory=/nfsshare/exports/export1 fsid=1 --group nfsgroup pcs resource create nfs-export2 ocf::heartbeat:exportfs clientspec=192.168.1.0/255.255.255.0 options=rw,sync,no_root_squash directory=/nfsshare/exports/export2 fsid=2 --group nfsgroup IP RESOURCE # pcs resource create vip ocf:heartbeat:IPaddr2 ip=192.168.1.66 cidr_netmask=24 # --group groupname pcs resource create nfs_ip IPaddr2 ip=192.168.1.66 cidr_netmask=24 --group nfsgroup pcs resource create nfs-notify nfsnotify source_host=192.168.1.66 --group nfsgroup rpcinfo -p open firewall ports for nfs firewall-cmd --permanent --zone=public --add-port=2049/tcp firewall-cmd --permanent --zone=public --add-port=2049/udp firewall-cmd --permanent --zone=public --add-port=20048/tcp firewall-cmd --permanent --zone=public --add-port=20048/udp firewall-cmd --permanent --zone=public --add-port=111/tcp firewall-cmd --permanent --zone=public --add-port=111/udp firewall-cmd --reload firewall-cmd --list-ports https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/storage_administration_guide/s2-nfs-nfs-firewall-config showmount -e 192.168.1.66 Export list for 192.168.1.66: /nfsshare/exports/export1 192.168.1.0/255.255.255.0 /nfsshare/exports 192.168.1.0/255.255.255.0 /nfsshare/exports/export2 192.168.1.0/255.255.255.0 mount -o "vers=4" 192.168.1.66:export1 nfsshare 5.1. CONFIGURING AN LVM VOLUME WITH AN XFS FILE SYSTEM IN A PACEMAKER CLUSTER dnf install dlm systemctl start dlm dd bs=1M oflag=sync status=progress count=40 if=/dev/zero of=/dev/sdb 288 partprobe 289 pvcreate /dev/sdb 290 vgcreate my_volume_group /dev/sdb 291 lvcreate -L 100%FREE -n my_logical_volume my_volume_group 292 lvcreate -L 4G -n my_logical_volume my_volume_group 293 lvcreate -L 3G -n my_logical_volume my_volume_group 294 xmkfs.gfs2 -t INOSS:shareddisk -p lock_dlm -j 4 /dev/sdb1 295 lsblk 296 mkfs.gfs2 -t INOSS:shareddisk -p lock_dlm -j 4 /dev/my_volume_group/my_logical_volume 297 lsblk mkfs.gfs2 -t INOSS:shareddisk -p lock_dlm -j 4 /dev/my_volume_group/my_logical_volume pcs resource create shared1 Filesystem device="/dev/disk/by-uuid/09bd7a32-255b-456e-8b15-f7c842bd885d" directory="/mnt/shared" fstype="gfs2" --group critical mount -t gfs2 /dev/my_volume_group/my_logical_volume /mnt/shared