###环境
| oracledb1 | oracledb2 | |
| public ip | 192.168.2.160 | 192.168.2.161 |
| heartbeat ip | 10.10.10.1 | 10.10.10.2 |
| vip | 192.168.2.162 | |
| OS | rhel 7.8 | rhel 7.8 |
| DB | oracle 11g | oracle 11g |
###免密认证
ssh-keygenssh-copy-id -i root/.ssh/id_rsa root@oracledb2ssh-keygenssh-copy-id -i root/.ssh/id_rsa root@oracledb1
###添加hosts解析
# vi etc/hosts127.0.0.1 localhost### public ip/vip for orcl192.168.2.160 oracledb1192.168.2.161 oracledb2192.168.2.162 clust-orcl-vip### cluster heartbeat ip10.10.10.1 oracledb1-hb10.10.10.2 oracledb2-hb
###配置yum源
[redhat7.8]name = redhat 7.8baseurl=file:///mntgpgcheck=0enabled=1[HighAvailability]name=HighAvailabilitybaseurl=file:///mnt/addons/HighAvailabilitygpgcheck=0enabled=1[ResilientStorage]name=ResilientStoragebaseurl=file:///mnt/addons/ResilientStoragegpgcheck=0enabled=1
### 安装依赖包
yum install pcs pacemaker corosync -y
### 关闭防火墙及selinux
systemctl stop firewalld.servicesystemctl disable firewalld.servicesed -i 's/SELINUX=enforcing/SELINUX=disabled/g' etc/selinux/configsetenforce 0
### 设置时间同步
yum install -y chronysystemctl enable chronydsystemctl start chronydsystemctl status chronyd添加时间同步服务器vi /etc/chrony.confserver x.x.x.x iburst重启chronyd服务systemctl restart chronyd.service检查同步是否正常chronyc sources -v
### 配置共享存储
fdisk分区时把分区类型改成lvmpvcreate /dev/sdb1vgcreate oradata1_vg dev/sdb1lvcreate -l 100%VG -n oradata1_lv oradata1_vgmkfs.xfs /dev/oradata1_vg/oradata1_lvmkdir /oradata1chown -R oracle:oinstall oradata1chmod -R 775 oradata1mount /dev/oradata1_vg/oradata1_lv /oradata1
### 两个节点启动pcsd服务
echo "redhat"|passwd --stdin haclustersystemctl start pcsd.servicesystemctl enable pcsd.servicesystemctl status pcsd.service
### 集群节点认证
pcs cluster auth oracledb1 oracledb2 -u hacluster -p redhat
### 创建cluster
pcs cluster setup --name dbcluster oracledb1-hb oracledb2-hbpcs cluster start --allpcs cluster enable --allpcs cluster statuspcs status
### 禁用stonith
pcs property set stonith-enabled=false
### 开启lvmha
lvmconf --enable-halvm --services --startstopservicescp /etc/lvm/lvm.conf /etc/lvm/lvm.conf.bakvi /etc/lvm/lvm.confvolume_list = ["rhel"]dracut -H -f /boot/initramfs-$(uname -r).img $(uname -r)reboot
### 创建资源
pcs resource create ClusterIP1 IPaddr2 ip=192.168.2.162 cidr_netmask=24 nic=ens33 op monitor interval=30spcs resource create orcl_lvm LVM volgrpname=oradata1_vg exclusive=yes op monitor interval=10spcs resource create orcl_fs Filesystem device='/dev/oradata1_vg/oradata1_lv' directory=/oradata1 fstype=xfs op monitor interval=10spcs resource create orcllsnr oralsnr sid=orcl home=/u01/app/oracle/product/11.2.0/db_1 user=oracle listener=LISTENER_ORCL op monitor interval=10spcs resource create orcl oracle sid=orcl home=/u01/app/oracle/product/11.2.0/db_1 user=oracle clear_backupmode=1 shutdown_method=immediate op monitor interval=10spcs resource group add orclgroup ClusterIP1 orcl_lvm orcl_fs orcllsnr orcl
### 防止资源回切
pcs resource defaults resource-stickiness=500
### 设置资源超时间
pcs resource op defaults timeout=10s
### 设置投票属性
pcs property set no-quorum-policy=ignore
### 集群故障时服务迁移
pcs resource defaults migration-threshold=1
### 资源约束(用以指定在哪些群集节点上运行资源,以何种顺序装载资源,以及特定资源依赖于哪些其它资源)
# 定义资源依赖pcs constraint colocation add orcl_lvm with ClusterIP1pcs constraint colocation add orcl_fs with orcl_lvmpcs constraint colocation add orcllsnr with orcl_fspcs constraint colocation add orcl with orcllsnr# 定义资源启动顺序pcs constraint order start ClusterIP1 then orcl_lvmpcs constraint order start orcl_lvm then start orcl_fspcs constraint order start orcl_fs then start orcllsnrpcs constraint order start orcllsnr then start orcl# 定义资源粘性pcs constraint location ClusterIP1 prefers oracledb1-hb=200pcs constraint location ClusterIP1 prefers oracledb2-hb=50pcs constraint location orcl_lvm prefers oracledb1-hb=200pcs constraint location orcl_lvm prefers oracledb2-hb=50pcs constraint location orcl_fs prefers oracledb1-hb=200pcs constraint location orcl_fs prefers oracledb2-hb=50pcs constraint location orcllsnr prefers oracledb1-hb=200pcs constraint location orcllsnr prefers oracledb2-hb=50pcs constraint location orcl prefers oracledb1-hb=200pcs constraint location orcl prefers oracledb2-hb=50pcs constraint show --full
### 监控
crm_mon
### 排查资源启动失败信息
pcs resource debug-start resource
文章转载自6号见的MindPalace,如果涉嫌侵权,请发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。




