暂无图片
暂无图片
暂无图片
暂无图片
暂无图片

添加rac集群新节点以及删除rac节点实验

原创 Leo 2023-12-09
895

文档课题:添加rac集群新节点以及删除rac节点实验.

1、架构介绍


2、新节点前期准备

--配置与原rac节点相同参数的节点3.

 

2.1、系统版本

# cat /etc/centos-release

CentOS Linux release 7.9.2009 (Core)

 

2.2、移除虚拟嵌套

# yum remove libvirt-libs

 

3、网络配置

3.1、添加网卡

给新增主机额外添加1块网卡

说明:此处ens37缺少配置文件,拷贝ens33.

然后按如下修改:

# vi ifcfg-ens37

TYPE=Ethernet

PROXY_METHOD=none

BROWSER_ONLY=no

BOOTPROTO=none

DEFROUTE=yes

IPV4_FAILURE_FATAL=no

IPV6INIT=yes

IPV6_AUTOCONF=yes

IPV6_DEFROUTE=yes

IPV6_FAILURE_FATAL=no

IPV6_ADDR_GEN_MODE=stable-privacy

NAME=ens37

UUID=03f506b2-a9fb-4b64-94e2-1208c732a649

DEVICE=ens37

ONBOOT=yes

IPADDR=192.168.78.193

HWADDR=00:0C:29:43:D4:EF

PREFIX=24

#GATEWAY=192.168.133.2

#DNS1=192.168.133.2

IPV6_PRIVACY=no

 

# service network restart

 

[root@rac3 network-scripts]# nmcli con show

NAME   UUID                                  TYPE      DEVICE

ens33  a35b793b-c550-40ba-a2b8-5750939e844b  ethernet  ens33 

ens37  03f506b2-a9fb-4b64-94e2-1208c732a649  ethernet  ens37 

 

3.2、修改hosts文件

说明:节点1&2均添加节点3的IP信息.

[root@rac3 ~]# vi /etc/hosts

添加如下内容:

#Public IP (ens33)

192.168.133.191 rac1

192.168.133.192 rac2

192.168.133.196 rac3

 

#Private IP (ens37)

192.168.78.191 rac1-priv

192.168.78.192 rac2-priv

192.168.78.193 rac3-priv

 

#Virtual IP

192.168.133.193 rac1-vip

192.168.133.194 rac2-vip

192.168.133.197 rac3-vip

 

#Scan IP

192.168.133.195 rac-scan

 

#此处公网和私网能ping通,其它三个不能ping通才正常

 

3.3、配置Yum源

# mount /dev/sr0 /mnt

# cat <<EOF>>/etc/yum.repos.d/local.repo

[local]

name=local

baseurl=file:///mnt

gpgcheck=0

enabled=1

EOF

 

# yum makecache

 

3.4、安装依赖包

# yum groupinstall -y "Server with GUI"

yum install -y bc \

binutils \

compat-libcap1 \

compat-libstdc++-33 \

gcc \

gcc-c++ \

elfutils-libelf \

elfutils-libelf-devel \

glibc \

glibc-devel \

ksh \

libaio \

libaio-devel \

libgcc \

libstdc++ \

libstdc++-devel \

libxcb \

libX11 \

libXau \

libXi \

libXtst \

libXrender \

libXrender-devel \

make \

net-tools \

nfs-utils \

smartmontools \

sysstat \

e2fsprogs \

e2fsprogs-libs \

fontconfig-devel \

expect \

unzip \

openssh-clients \

readline* \

tigervnc* \

psmisc --skip-broken

 

#手动上传并安装依赖包:pdksh-5.2.14-37.el5.x86_64.rpm和compat-libstdc++-33-3.2.3-72.el7.x86_64.rpm

sftp> lcd F:\package

sftp> cd /tmp

sftp> put pdksh-5.2.14-37.el5.x86_64.rpm

sftp> put compat-libstdc++-33-3.2.3-72.el7.x86_64.rpm

 

# cd /tmp

# rpm -ivh compat-libstdc++-33-3.2.3-72.el7.x86_64.rpm

# rpm -e ksh-20120801-142.el7.x86_64

# rpm -ivh pdksh-5.2.14-37.el5.x86_64.rpm

 

检查依赖包安装情况

# rpm -q bc binutils compat-libcap1 compat-libstdc++-33 gcc gcc-c++ elfutils-libelf elfutils-libelf-devel glibc glibc-devel ksh libaio libaio-devel libgcc libstdc++ libstdc++-devel libxcb libX11 libXau libXi libXtst libXrender libXrender-devel make net-tools nfs-utils smartmontools sysstat e2fsprogs e2fsprogs-libs fontconfig-devel expect unzip openssh-clients readline | grep "not installed"

注意:package ksh is not installed

说明:安装gi/db时不会因缺少此包报警

 

root用户下,cvuqdisk安装

将cvuqdisk传输到节点三安装.

[grid@rac1 rpm]$ scp cvuqdisk-1.0.9-1.rpm root@192.168.133.196:/tmp

[root@rac3 tmp]# rpm -ivh cvuqdisk-1.0.9-1.rpm

 

3.5、关闭防火墙

# systemctl status firewalld.service

# systemctl stop firewalld.service

# systemctl disable firewalld.service

 

3.6、禁用selinux

将SELINUX修改为disabled

# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

 

注意:需重启主机,才能生效

 

3.7、关闭ntp和chrony服务

从oracle 11gR2 rac开始使用Cluster Time Synchronization Service(CTSS)同步各节点的时间.

此处关闭NTP、chrony服务,Oracle会自动启用ctssd进程.

systemctl stop ntpd

systemctl disable ntpd.service

mv /etc/ntp.conf /etc/ntp.conf.bak

systemctl disable chronyd

systemctl stop chronyd

mv /etc/chrony.conf /etc/chrony.conf_bak

 

3.8、关闭透明大页和numa

备注:关闭透明大页和numa,重启生效

# cat /etc/default/grub

GRUB_TIMEOUT=5

GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)"

GRUB_DEFAULT=saved

GRUB_DISABLE_SUBMENU=true

GRUB_TERMINAL_OUTPUT="console"

GRUB_CMDLINE_LINUX="crashkernel=auto spectre_v2=retpoline rd.lvm.lv=centos/root rd.lvm.lv=centos/swap rhgb quiet"

GRUB_DISABLE_RECOVERY="true"

# sed -i 's/quiet/quiet transparent_hugepage=never numa=off/' /etc/default/grub

# cat /etc/default/grub

GRUB_TIMEOUT=5

GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)"

GRUB_DEFAULT=saved

GRUB_DISABLE_SUBMENU=true

GRUB_TERMINAL_OUTPUT="console"

GRUB_CMDLINE_LINUX="crashkernel=auto spectre_v2=retpoline rd.lvm.lv=centos/root rd.lvm.lv=centos/swap rhgb quiet transparent_hugepage=never numa=off"

GRUB_DISABLE_RECOVERY="true"

# grub2-mkconfig -o /boot/grub2/grub.cfg

Generating grub configuration file ...

Found linux image: /boot/vmlinuz-3.10.0-1160.el7.x86_64

Found initrd image: /boot/initramfs-3.10.0-1160.el7.x86_64.img

Found linux image: /boot/vmlinuz-0-rescue-954f0e9a62d9487e8bcb8e8598b71fab

Found initrd image: /boot/initramfs-0-rescue-954f0e9a62d9487e8bcb8e8598b71fab.img

done

重启前参数

# cat /sys/kernel/mm/transparent_hugepage/enabled

[always] madvise never

# cat /proc/cmdline

BOOT_IMAGE=/vmlinuz-3.10.0-1160.el7.x86_64 root=/dev/mapper/centos-root ro crashkernel=auto spectre_v2=retpoline rd.lvm.lv=centos/root rd.lvm.lv=centos/swap rhgb quiet LANG=en_US.UTF-8

重启后结果

# cat /sys/kernel/mm/transparent_hugepage/enabled

always madvise [never]

# cat /proc/cmdline

BOOT_IMAGE=/vmlinuz-3.10.0-1160.el7.x86_64 root=/dev/mapper/centos-root ro crashkernel=auto spectre_v2=retpoline rd.lvm.lv=centos/root rd.lvm.lv=centos/swap rhgb quiet transparent_hugepage=never numa=off

 

3.9、avahi-daemon配置

[root@hisdb1 ~]# yum install -y avahi*

[root@hisdb1 ~]# systemctl stop avahi-daemon.socket

[root@hisdb1 ~]# systemctl stop avahi-daemon.service

[root@hisdb1 ~]# pgrep -f avahi-daemon | awk '{print "kill -9 "$2}'

 

配置NOZEROCONF=yes

# cat <<EOF>>/etc/sysconfig/network                

NOZEROCONF=yes

EOF

 

查看状态

# systemctl status avahi-daemon.socket

# systemctl status avahi-daemon.service

 

3.10、修改/etc/sysctl.conf文件

# cat <<EOF>>/etc/sysctl.conf

fs.aio-max-nr = 1048576

fs.file-max = 6815744

kernel.shmall = 2097152

kernel.shmmax = 8181829631

kernel.shmmni = 4096

kernel.sem = 250 32000 100 128

net.ipv4.ip_local_port_range = 9000 65500

net.core.rmem_default = 262144

net.core.rmem_max = 4194304

net.core.wmem_default = 262144

net.core.wmem_max = 1048576

net.ipv4.conf.ens33.rp_filter = 1

net.ipv4.conf.ens37.rp_filter = 2

EOF

# /sbin/sysctl -p

 

3.11、修改用户限制

# cat <<EOF>>/etc/security/limits.conf

oracle soft nofile 1024

oracle hard nofile 65536

oracle soft stack 10240

oracle hard stack 32768

oracle soft nproc 2047

oracle hard nproc 16384

oracle hard memlock 134217728

oracle soft memlock 134217728

 

grid soft nofile 1024

grid hard nofile 65536

grid soft stack 10240

grid hard stack 32768

grid soft nproc 2047

grid hard nproc 16384

EOF

 

3.12、修改/etc/pam.d/login文件

# cat <<EOF>>/etc/pam.d/login

session required pam_limits.so

session required /lib64/security/pam_limits.so

EOF

 

3.13、创建用户、用户组、目录

/usr/sbin/groupadd -g 54321 oinstall

/usr/sbin/groupadd -g 54322 dba

/usr/sbin/groupadd -g 54323 oper

/usr/sbin/groupadd -g 54324 backupdba

/usr/sbin/groupadd -g 54325 dgdba

/usr/sbin/groupadd -g 54326 kmdba

/usr/sbin/groupadd -g 54327 asmdba

/usr/sbin/groupadd -g 54328 asmoper

/usr/sbin/groupadd -g 54329 asmadmin

/usr/sbin/groupadd -g 54330 racdba

/usr/sbin/useradd -u 11012 -g oinstall -G asmadmin,asmdba,asmoper,dba,racdba,oper grid

/usr/sbin/useradd -u 54321 -g oinstall -G asmdba,dba,backupdba,dgdba,kmdba,racdba,oper oracle

echo "oracle_4U" |passwd oracle --stdin

echo "grid_4U" |passwd grid --stdin

mkdir -p /u01/app/11.2.0/grid

mkdir -p /u01/app/grid

mkdir -p /u01/app/oracle/product/11.2.0/db

mkdir -p /u01/app/oraInventory

mkdir -p /backup

mkdir -p /home/oracle/scripts

chown -R oracle:oinstall /backup

chown -R oracle:oinstall /home/oracle/scripts

chown -R grid:oinstall /u01

chown -R grid:oinstall /u01/app/grid

chown -R grid:oinstall /u01/app/11.2.0/grid

chown -R grid:oinstall /u01/app/oraInventory

chown -R oracle:oinstall /u01/app/oracle

chmod -R 775 /u01

 

3.14、用户环境变量

grid用户:

$ cat <<EOF>>/home/grid/.bash_profile

umask 022

export TMP=/tmp

export TMPDIR=\$TMP

export NLS_LANG=AMERICAN_AMERICA.AL32UTF8

export ORACLE_BASE=/u01/app/grid

export ORACLE_HOME=/u01/app/11.2.0/grid

export ORACLE_TERM=xterm

export TNS_ADMIN=\$ORACLE_HOME/network/admin

export LD_LIBRARY_PATH=\$ORACLE_HOME/lib:/lib:/usr/lib

export ORACLE_SID=+ASM3

export PATH=/usr/sbin:\$PATH

export PATH=\$ORACLE_HOME/bin:\$ORACLE_HOME/OPatch:\$PATH

alias sas='sqlplus / as sysasm'

#export PS1="[\`whoami\`@\`hostname\`:"'\$PWD]\$ '

EOF

 

$ source .bash_profile

 

注意:节点3为+ASM3

 

oracle用户:

$ cat <<EOF>>/home/oracle/.bash_profile

umask 022

export TMP=/tmp

export TMPDIR=\$TMP

export NLS_LANG=AMERICAN_AMERICA.AL32UTF8

export ORACLE_BASE=/u01/app/oracle

export ORACLE_HOME=\$ORACLE_BASE/product/11.2.0/db

export ORACLE_TERM=xterm

export TNS_ADMIN=\$ORACLE_HOME/network/admin

export LD_LIBRARY_PATH=\$ORACLE_HOME/lib:/lib:/usr/lib

export ORACLE_SID=orcl3

export PATH=/usr/sbin:\$PATH

export PATH=\$ORACLE_HOME/bin:\$ORACLE_HOME/OPatch:\$PATH

alias sas='sqlplus / as sysdba'

#export PS1="[\`whoami\`@\`hostname\`:"'\$PWD]\$ '

EOF

 

$ source .bash_profile

 

注意:节点3 为orcl3

3.14、修改/etc/profile文件

# vim /etc/profile

末尾添加:

if [ $USER = "oracle" ] || [ $USER = "grid" ]; then

        if [ $SHELL = "/bin/ksh" ]; then

              ulimit -p 16384

              ulimit -n 65536

        else

              ulimit -u 16384 -n 65536

        fi

        umask 022

fi

# source /etc/profile

备注:此为设置系统限制

 

4、共享存储

节点3添加共享磁盘后修改主机的.vmx文件后再开启主机.

.vmx文件添加如下内容:

disk.locking = "FALSE"

disk.EnableUUID = "TRUE"

 

--重读分区

[root@rac3 ~]# partprobe

 

5、multipath

5.1、安装multipath

# yum -y install device-mapper*

# mpathconf --enable --with_multipathd y

 

5.2、查看共享盘的scsi_id

[root@hisdb1 ~]# /usr/lib/udev/scsi_id -g -u /dev/sdb

36000c2967b415216947d577e41c3ae0e

[root@hisdb1 ~]# /usr/lib/udev/scsi_id -g -u /dev/sdc

36000c29cc1beff5fa34e51edc68b8d1e

 

5.3、配置multipath

wwid值为上面获取的scsi_id,alias可自定义.

# cat <<EOF>/etc/multipath.conf

defaults {

    user_friendly_names yes

}

 

blacklist {

  devnode "^sda"

}

multipaths {

  multipath {

  wwid "36000c29ab97a3c68a42010d3ff22c349"

  alias ocr

  }

  multipath {

  wwid "36000c292c1ebe544c644ebecafc5e794"

  alias data

  }

}

EOF

 

5.4、激活multipath多路径

# multipath -F

# multipath -v2

# multipath -ll

data (36000c292c1ebe544c644ebecafc5e794) dm-4 VMware, ,VMware Virtual S

size=20G features='0' hwhandler='0' wp=rw

`-+- policy='service-time 0' prio=1 status=active

  `- 0:0:2:0 sdc 8:32 active ready running

ocr (36000c29ab97a3c68a42010d3ff22c349) dm-2 VMware, ,VMware Virtual S

size=10G features='0' hwhandler='0' wp=rw

`-+- policy='service-time 0' prio=1 status=active

  `- 0:0:1:0 sdb 8:16 active ready running

 

# lsblk -p

NAME                           MAJ:MIN RM  SIZE RO TYPE  MOUNTPOINT

/dev/sda                         8:0    0  100G  0 disk 

├─/dev/sda1                      8:1    0    1G  0 part  /boot

└─/dev/sda2                      8:2    0   99G  0 part 

  ├─/dev/mapper/rhel_rac3-root 253:0    0   95G  0 lvm   /

  └─/dev/mapper/rhel_rac3-swap 253:1    0    4G  0 lvm   [SWAP]

/dev/sdb                         8:16   0   10G  0 disk 

├─/dev/sdb1                      8:17   0   10G  0 part 

└─/dev/mapper/ocr              253:2    0   10G  0 mpath

  └─/dev/mapper/ocr1           253:3    0   10G  0 part 

/dev/sdc                         8:32   0   20G  0 disk 

├─/dev/sdc1                      8:33   0   20G  0 part 

└─/dev/mapper/data             253:4    0   20G  0 mpath

  └─/dev/mapper/data1          253:5    0   20G  0 part 

/dev/sr0                        11:0    1  4.2G  0 rom    

 

6、asmlib

6.1、文件上传

sftp> cd /root

sftp> lcd F:\package\asmlib\el7

sftp> lls

kmod-oracleasm-2.0.8-21.0.1.el7.x86_64.rpm  oracleasm-support-2.1.11-2.el7.x86_64.rpm

oracleasmlib-2.0.12-1.el7.x86_64.rpm

sftp> put *.rpm

 

6.2、安装asmlib

# rpm -ivh oracleasm-support-2.1.11-2.el7.x86_64.rpm

# rpm -ivh kmod-oracleasm-2.0.8-21.0.1.el7.x86_64.rpm

# rpm -ivh oracleasmlib-2.0.12-1.el7.x86_64.rpm

 

6.3、配置asmlib

[root@rac3 ~]# /usr/sbin/oracleasm configure -i

Configuring the Oracle ASM library driver.

 

This will configure the on-boot properties of the Oracle ASM library

driver.  The following questions will determine whether the driver is

loaded on boot and what permissions it will have.  The current values

will be shown in brackets ('[]').  Hitting <ENTER> without typing an

answer will keep that current value.  Ctrl-C will abort.

 

Default user to own the driver interface []: grid

Default group to own the driver interface []: asmadmin

Start Oracle ASM library driver on boot (y/n) [n]: y

Scan for Oracle ASM disks on boot (y/n) [y]: y

Writing Oracle ASM library driver configuration: done

 

[root@rac3 ~]# /usr/sbin/oracleasm init

Creating /dev/oracleasm mount point: /dev/oracleasm

Loading module "oracleasm": oracleasm

Configuring "oracleasm" to use device physical block size

Mounting ASMlib driver filesystem: /dev/oracleasm

 

6.4、创建asm磁盘

说明:此步仅在节点1执行

# oracleasm createdisk data01 /dev/mapper/ocr1

# oracleasm createdisk data02 /dev/mapper/data1

说明:6.4步骤不用执行.

 

6.5、验证磁盘

[root@rac3 ~]# oracleasm scandisks

Reloading disk partitions: done

Cleaning any stale ASM disks...

Scanning system for ASM disks...

Instantiating disk "DATA01"

Instantiating disk "DATA02"

[root@rac3 ~]# oracleasm listdisks

DATA01

DATA02

[root@rac3 ~]# oracleasm querydisk DATA01

Disk "DATA01" is a valid ASM disk

[root@rac3 ~]# oracleasm querydisk DATA02

Disk "DATA02" is a valid ASM disk

[root@rac3 ~]# ls -ltr /dev/oracleasm/disks

total 0

brw-rw---- 1 grid asmadmin 8, 17 Dec  9 11:08 DATA01

brw-rw---- 1 grid asmadmin 8, 33 Dec  9 11:08 DATA02

 

7、互信

7.1、配置grid ssh用户等效性

[root@rac3 tmp]# su - grid

[grid@rac3 ~]$ mkdir ~/.ssh

[grid@rac3 ~]$ chmod 700 ~/.ssh

 

--使用如下命令生成用户身份认证秘钥

[grid@rac3 ~]$ ssh-keygen -t rsa

--一路回车

 

[grid@rac3 ~]$ ssh-keygen -t dsa

--一路回车

 

节点1:

[grid@rac1 ~]$ ssh rac3 cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys

[grid@rac1 ~]$ scp ~/.ssh/authorized_keys rac3:~/.ssh/

 

节点2:

[grid@rac2 .ssh]$ ssh rac3 cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys

 

测试

节点3:

[grid@rac3 .ssh]$ ssh rac1 date

[grid@rac3 ~]$ ssh rac1-priv date

[grid@rac3 .ssh]$ ssh rac2 date

[grid@rac3 ~]$ ssh rac2-priv date

 

节点1:

[grid@rac1 ~]$ ssh rac3 date

[grid@rac1 ~]$ ssh rac3-priv date

 

节点2:

[grid@rac2 ~]$ ssh rac3 date

[grid@rac2 ~]$ ssh rac3-priv date

 

#需要达到不输入yes

 

7.2、配置oracle ssh用户等效性

[root@rac3 tmp]# su - oracle

[oracle@rac3 ~]$ mkdir ~/.ssh

[oracle@rac3 ~]$ chmod 700 ~/.ssh

 

--使用如下命令生成用户身份认证秘钥

[oracle@rac3 ~]$ ssh-keygen -t rsa

--一路回车

 

[oracle@rac3 ~]$ ssh-keygen -t dsa

--一路回车

 

节点1:

[oracle@rac1 ~]$ ssh rac3 cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys

[oracle@rac1 ~]$ scp ~/.ssh/authorized_keys rac3:~/.ssh/

 

节点2:

[oracle@rac2 ~]$ ssh rac3 cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys

测试

节点3:

[oracle@rac3 ~]$  ssh rac1 date

[oracle@rac3 .ssh]$ ssh rac1-priv date

[oracle@rac3 ~]$  ssh rac2 date

[oracle@rac3 .ssh]$ ssh rac2-priv date

 

节点1:

[oracle@rac1 ~]$ ssh rac3 date

[oracle@rac1 ~]$ ssh rac3-priv date

 

节点2:

[oracle@rac2 ~]$ ssh rac3 date

[oracle@rac2 ~]$ ssh rac3-priv date

 

#需要达到不输入yes

 

8、CVU工具检查

[grid@rac1 ~]$ cluvfy stage -pre nodeadd -n rac3 -verbose

说明:显示Pre-check for node addition was successful表示成功.

 

[oracle@rac2 ~]$ cluvfy stage -pre nodeadd -n rac3 -verbose

说明:显示Pre-check for node addition was successful表示成功.

 

9、新节点添加Clusterware

9.1、添加Clusterware

Step1:在任何一个现有节点以grid用户执行如下命令添加Clusterware

[grid@rac1 ~]$ cd /u01/app/11.2.0/grid/oui/bin

[grid@rac1 bin]$ ./addNode.sh -silent "CLUSTER_NEW_NODES={rac3}" "CLUSTER_NEW_VIRTUAL_HOSTNAMES={rac3-vip}"

 

Step2:在新节点以root用户执行如下命令(上一步骤输出最后有提示)

[root@rac3 ~]# /u01/app/oraInventory/orainstRoot.sh

[root@rac3 ~]# /u01/app/11.2.0/grid/root.sh

 

9.2、验证新节点添加情况

[grid@rac1 bin]$ crs_stat -t

Name           Type           Target    State     Host       

------------------------------------------------------------

ora.DATA.dg    ora....up.type ONLINE    ONLINE    rac1       

ora....ER.lsnr ora....er.type ONLINE    ONLINE    rac1       

ora....N1.lsnr ora....er.type ONLINE    ONLINE    rac2       

ora.OCR.dg     ora....up.type ONLINE    ONLINE    rac1        

ora.asm        ora.asm.type   ONLINE    ONLINE    rac1       

ora.cvu        ora.cvu.type   ONLINE    ONLINE    rac1       

ora.gsd        ora.gsd.type   OFFLINE   OFFLINE              

ora....network ora....rk.type ONLINE    ONLINE    rac1        

ora.oc4j       ora.oc4j.type  ONLINE    ONLINE    rac1       

ora.ons        ora.ons.type   ONLINE    ONLINE    rac1       

ora.orcl.db    ora....se.type ONLINE    ONLINE    rac1       

ora....SM1.asm application    ONLINE    ONLINE    rac1       

ora....C1.lsnr application    ONLINE    ONLINE    rac1       

ora.rac1.gsd   application    OFFLINE   OFFLINE              

ora.rac1.ons   application    ONLINE    ONLINE    rac1       

ora.rac1.vip   ora....t1.type ONLINE    ONLINE    rac1       

ora....SM2.asm application    ONLINE    ONLINE    rac2       

ora....C2.lsnr application    ONLINE    ONLINE    rac2       

ora.rac2.gsd   application    OFFLINE   OFFLINE              

ora.rac2.ons   application    ONLINE    ONLINE    rac2       

ora.rac2.vip   ora....t1.type ONLINE    ONLINE    rac2       

ora....SM3.asm application    ONLINE    ONLINE    rac3       

ora....C3.lsnr application    ONLINE    ONLINE    rac3       

ora.rac3.gsd   application    OFFLINE   OFFLINE              

ora.rac3.ons   application    ONLINE    ONLINE    rac3       

ora.rac3.vip   ora....t1.type ONLINE    ONLINE    rac3       

ora.scan1.vip  ora....ip.type ONLINE    ONLINE    rac2

 

10、新节点添加RAC Database Software

10.1、添加数据库软件

Step1:在任何一个现节点以oracle用户执行下面命令添加RAC Database Software

[root@rac1 ~]# su - oracle

[oracle@rac1 ~]$ cd $ORACLE_HOME/oui/bin

[oracle@rac1 bin]$ ./addNode.sh -silent "CLUSTER_NEW_NODES={rac3}"

 

Step2:在新节点以root用户执行如下命令(上一步骤输出最后有提示)

[root@rac3 ~]# /u01/app/oracle/product/11.2.0/db/root.sh

 

说明:在执行root.sh前先解决/u01/app/oracle/product/11.2.0/db/bin/nmhs问题,否则报错如下.

Now product-specific root actions will be performed.

/bin/chown: cannot access ‘/u01/app/oracle/product/11.2.0/db/bin/nmhs’: No such file or directory

/bin/chmod: cannot access ‘/u01/app/oracle/product/11.2.0/db/bin/nmhs’: No such file or directory

Finished product-specific root actions.

 

解决方案:

[root@rac1 ~]# ls -ltr /u01/app/oracle/product/11.2.0/db/bin/nmhs

-rws--x--- 1 root oinstall 0 Aug 24  2013 /u01/app/oracle/product/11.2.0/db/bin/nmhs

[root@rac1 ~]# scp /u01/app/oracle/product/11.2.0/db/bin/nmhs rac3:/u01/app/oracle/product/11.2.0/db/bin/

[root@rac3 tmp]# chown root:ointall /u01/app/oracle/product/11.2.0/db/bin/nmhs

[root@rac3 tmp]# chmod 4710 /u01/app/oracle/product/11.2.0/db/bin/nmhs

 

[root@rac3 bin]# /u01/app/oracle/product/11.2.0/db/root.sh

Performing root user operation for Oracle 11g

 

The following environment variables are set as:

    ORACLE_OWNER= oracle

    ORACLE_HOME=  /u01/app/oracle/product/11.2.0/db

 

Enter the full pathname of the local bin directory: [/usr/local/bin]:

The contents of "dbhome" have not changed. No need to overwrite.

The contents of "oraenv" have not changed. No need to overwrite.

The contents of "coraenv" have not changed. No need to overwrite.

 

Entries will be added to the /etc/oratab file as needed by

Database Configuration Assistant when a database is created

Finished running generic part of root script.

Now product-specific root actions will be performed.

Finished product-specific root actions.

 

10.2、CVU工具检查

Grid用户:

[grid@rac1 ~]$ cluvfy stage -post nodeadd -n rac3 -verbose

 

Oracle用户:

[oracle@rac1 ~]$ cluvfy stage -post nodeadd -n rac3 -verbose

 

说明:通过如上命令检查新节点上的GI和Database Software是否添加正确.

此处会报rac-scan无法解析的告警,PRVG-1101、PRVF-4657、PRVF-4664,忽略.

 

11、新节点添加数据库实例

11.1、新节点添加实例

--检查现有实例.

[grid@rac2 ~]$ srvctl status database -d orcl

Instance orcl1 is running on node rac1

Instance orcl2 is running on node rac2

 

--在任何一个现有节点以oracle用户执行如下命令添加新节点数据库实例

[oracle@rac1 ~]$ dbca -silent -addInstance -nodeList "rac3" -gdbName "orcl" -instanceName "orcl3" -sysDBAUserName "sys" -sysDBAPassword oracle_4U

 

参数说明:

dbca -silent -addInstance -nodeList [node_name] -gdbName [globle_dbname] -instanceName [newnode_instance] -sysDBAUserName [sysdba] -sysDBAPassword [password]

-silent:静默方式

[node_name]:添加实例的节点名称

[globle_dbname]:全局数据库名称

[newnode_instance]:添加实例名称

[sysdba]:拥有sysdba权限的oracle用户名称

[password]:拥有sysdba权限的oracle用户的密码

 

11.2、集群检查

[grid@rac1 ~]$ crs_stat -t -v

Name           Type           R/RA   F/FT   Target    State     Host       

----------------------------------------------------------------------

ora.DATA.dg    ora....up.type 0/5    0/     ONLINE    ONLINE    rac1       

ora....ER.lsnr ora....er.type 0/5    0/     ONLINE    ONLINE    rac1       

ora....N1.lsnr ora....er.type 0/5    0/0    ONLINE    ONLINE    rac2       

ora.OCR.dg     ora....up.type 0/5    0/     ONLINE    ONLINE    rac1       

ora.asm        ora.asm.type   0/5    0/     ONLINE    ONLINE    rac1       

ora.cvu        ora.cvu.type   0/5    0/0    ONLINE    ONLINE    rac1       

ora.gsd        ora.gsd.type   0/5    0/     OFFLINE   OFFLINE              

ora....network ora....rk.type 0/5    0/     ONLINE    ONLINE    rac1       

ora.oc4j       ora.oc4j.type  0/1    0/2    ONLINE    ONLINE    rac1       

ora.ons        ora.ons.type   0/3    0/     ONLINE    ONLINE    rac1       

ora.orcl.db    ora....se.type 0/2    0/1    ONLINE    ONLINE    rac1       

ora....SM1.asm application    0/5    0/0    ONLINE    ONLINE    rac1       

ora....C1.lsnr application    0/5    0/0    ONLINE    ONLINE    rac1       

ora.rac1.gsd   application    0/5    0/0    OFFLINE   OFFLINE              

ora.rac1.ons   application    0/3    0/0    ONLINE    ONLINE    rac1       

ora.rac1.vip   ora....t1.type 0/0    0/0    ONLINE    ONLINE    rac1       

ora....SM2.asm application    0/5    0/0    ONLINE    ONLINE    rac2       

ora....C2.lsnr application    0/5    0/0    ONLINE    ONLINE    rac2       

ora.rac2.gsd   application    0/5    0/0    OFFLINE   OFFLINE              

ora.rac2.ons   application    0/3    0/0    ONLINE    ONLINE    rac2       

ora.rac2.vip   ora....t1.type 0/0    0/0    ONLINE    ONLINE    rac2       

ora....SM3.asm application    0/5    0/0    ONLINE    ONLINE    rac3       

ora....C3.lsnr application    0/5    0/0    ONLINE    ONLINE    rac3       

ora.rac3.gsd   application    0/5    0/0    OFFLINE   OFFLINE              

ora.rac3.ons   application    0/3    0/0    ONLINE    ONLINE    rac3       

ora.rac3.vip   ora....t1.type 0/0    0/0    ONLINE    ONLINE    rac3       

ora.scan1.vip  ora....ip.type 0/0    0/0    ONLINE    ONLINE    rac2

 

[grid@rac3 ~]$ crsctl stat res -t

-------------------------------------------------------------------------------

NAME           TARGET  STATE        SERVER                   STATE_DETAILS      

-------------------------------------------------------------------------------

Local Resources

-------------------------------------------------------------------------------

ora.DATA.dg

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                        

               ONLINE  ONLINE       rac3                                         

ora.LISTENER.lsnr

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                        

               ONLINE  ONLINE       rac3                                        

ora.OCR.dg

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                        

               ONLINE  ONLINE       rac3                                        

ora.asm

               ONLINE  ONLINE       rac1                     Started            

               ONLINE  ONLINE       rac2                     Started            

               ONLINE  ONLINE       rac3                     Started            

ora.gsd

               OFFLINE OFFLINE      rac1                                        

               OFFLINE OFFLINE      rac2                                        

               OFFLINE OFFLINE      rac3                                        

ora.net1.network

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                         

               ONLINE  ONLINE       rac3                                        

ora.ons

               ONLINE  ONLINE       rac1                                        

               ONLINE  ONLINE       rac2                                         

               ONLINE  ONLINE       rac3                                        

-------------------------------------------------------------------------------

Cluster Resources

-------------------------------------------------------------------------------

ora.LISTENER_SCAN1.lsnr

      1        ONLINE  ONLINE       rac2                                        

ora.cvu

      1        ONLINE  ONLINE       rac1                                        

ora.oc4j

      1        ONLINE  ONLINE       rac1                                        

ora.orcl.db

      1        ONLINE  ONLINE       rac1                     Open               

      2        ONLINE  ONLINE       rac2                     Open                

      3        ONLINE  ONLINE       rac3                     Open               

ora.rac1.vip

      1        ONLINE  ONLINE       rac1                                        

ora.rac2.vip

      1        ONLINE  ONLINE       rac2                                         

ora.rac3.vip

      1        ONLINE  ONLINE       rac3                                        

ora.scan1.vip

      1        ONLINE  ONLINE       rac2    

 

[grid@rac1 ~]$ srvctl status database -d orcl

Instance orcl1 is running on node rac1

Instance orcl2 is running on node rac2

Instance orcl3 is running on node rac3

 

[grid@rac1 ~]$ srvctl status listener

Listener LISTENER is enabled

Listener LISTENER is running on node(s): rac2,rac1,rac3

 

[grid@rac1 ~]$ srvctl config vip -n rac3

VIP exists: /rac3-vip/192.168.133.197/192.168.133.0/255.255.255.0/ens33, hosting node rac3

 

[oracle@rac1 ~]$ sqlplus / as sysdba

SQL> select instance_name,status from gv$instance;

 

INSTANCE_NAME    STATUS

---------------- ------------

orcl1            OPEN

orcl3            OPEN

orcl2            OPEN

 

12、删除rac3节点

12.1、删除节点实例

Step 1.在任何一个保留的节点上执行如下命令清除实例

[oracle@rac1 ~]$ dbca -silent -deleteInstance -nodeList "rac3" -gdbName "orcl" -instanceName "orcl3" -sysDBAUserName "sys" -sysDBAPassword oracle_4U

Deleting instance

1% complete

2% complete

6% complete

13% complete

20% complete

26% complete

33% complete

40% complete

46% complete

53% complete

60% complete

66% complete

Completing instance management.

100% complete

Look at the log file "/u01/app/oracle/cfgtoollogs/dbca/orcl.log" for further details.

[oracle@rac1 ~]$ srvctl status database -d orcl

Instance orcl1 is running on node rac1

Instance orcl2 is running on node rac2

 

12.2、卸载Database Software

step 1.在任何一个节点上执行如下命令停止和禁用监听器

[grid@rac1 ~]$ srvctl status listener

Listener LISTENER is enabled

Listener LISTENER is running on node(s): rac2,rac1,rac3

[grid@rac1 ~]$ srvctl stop listener -n rac3

[grid@rac1 ~]$ srvctl status listener     

Listener LISTENER is enabled

Listener LISTENER is running on node(s): rac2,rac1

[grid@rac1 ~]$ srvctl disable listener -n rac3

 

step 2.在要删除的节点上执行如下命令更新Inventory

[oracle@rac3 ~]$ cd $ORACLE_HOME/oui/bin

[oracle@rac3 bin]$ ./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={rac3}" -local

Starting Oracle Universal Installer...

 

Checking swap space: must be greater than 500 MB.   Actual 3696 MB    Passed

The inventory pointer is located at /etc/oraInst.loc

The inventory is located at /u01/app/oraInventory

'UpdateNodeList' was successful.

 

step 3.删除RAC Database Home有两种分类:

1) Share Oracle RAC Database Home

oracle@rac1$ cd /u01/app/oracle/product/11.2.0/dbhome_1/oui/bin

oracle@rac3 bin$ ./runInstaller -detachHome ORACLE_HOME=$ORACLE_HOME

 

2)Non Shared Oracle RAC Database Home

[oracle@rac3 bin]$ cd $ORACLE_HOME/deinstall/

[oracle@rac3 deinstall]$ ./deinstall -local

第一次deinstall会报错,执行如下操作解决:

[oracle@rac3 deinstall]$ /tmp/deinstall2023-12-09_03-56-39PM/sshUserSetup.sh -hosts "rac3 " -user oracle

[oracle@rac3 deinstall]$ ./deinstall -local

......

####################### CLEAN OPERATION SUMMARY #######################

Cleaning the config for CCR

As CCR is not configured, so skipping the cleaning of CCR configuration

CCR clean is finished

Successfully detached Oracle home '/u01/app/oracle/product/11.2.0/db' from the central inventory on the local node.

Successfully deleted directory '/u01/app/oracle/product/11.2.0/db' on the local node.

Failed to delete directory '/u01/app/oracle' on the local node.  说明:该告警忽略.

Oracle Universal Installer cleanup completed with errors.

 

Oracle deinstall tool successfully cleaned up temporary directories.

#######################################################################

 

 

############# ORACLE DEINSTALL & DECONFIG TOOL END #############

 

step 4.在保留的所有节点上执行如下命令更新Inventory

[oracle@rac1 ~]$ cd /u01/app/oracle/product/11.2.0/db/oui/bin/

[oracle@rac1 bin]$ ./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={rac1}"              

Starting Oracle Universal Installer...

 

Checking swap space: must be greater than 500 MB.   Actual 3088 MB    Passed

The inventory pointer is located at /etc/oraInst.loc

The inventory is located at /u01/app/oraInventory

'UpdateNodeList' was successful.

 

[oracle@rac2 ~]$ cd /u01/app/oracle/product/11.2.0/db/oui/bin/

[oracle@rac2 bin]$ ./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={rac2}" 

Starting Oracle Universal Installer...

 

Checking swap space: must be greater than 500 MB.   Actual 3378 MB    Passed

The inventory pointer is located at /etc/oraInst.loc

The inventory is located at /u01/app/oraInventory

'UpdateNodeList' was successful.

 

12.3、卸载节点Clusterware

Step 1.确定所有节点grid用户的ORACLE_HOME配置正确

[grid@rac1 ~]$ grep ORACLE_HOME .bash_profile | head -1

export ORACLE_HOME=/u01/app/11.2.0/grid

[grid@rac2 ~]$ grep ORACLE_HOME .bash_profile | head -1

export ORACLE_HOME=/u01/app/11.2.0/grid

[grid@rac3 ~]$ grep ORACLE_HOME .bash_profile | head -1

export ORACLE_HOME=/u01/app/11.2.0/grid

 

step 2.查看是否是Pinned状态

[grid@rac1 ~]$ olsnodes -s -t

rac1    Active  Unpinned

rac2    Active  Unpinned

rac3    Active  Unpinned

 

需要每个节点都显示Unpinned状态,如果返回结果是pinned状态,执行

$ crsctl unpin css -n node_name

 

step 3.在要删除的节点上以root用户执行如下命令

[root@rac3 install]# /u01/app/11.2.0/grid/perl/bin/perl /u01/app/11.2.0/grid/crs/install/rootcrs.pl -deconfig -force

 

说明:原本应该采用如下命令,但11g存在bug 18650453.

[root@rac3 ~]# cd /u01/app/11.2.0/grid/crs/install

[root@rac3 install]# ./rootcrs.pl -deconfig -force

 

检查集群情况:

[grid@rac1 ~]$ crs_stat -t

Name           Type           Target    State     Host       

------------------------------------------------------------

ora.DATA.dg    ora....up.type ONLINE    ONLINE    rac1       

ora....ER.lsnr ora....er.type ONLINE    ONLINE    rac1       

ora....N1.lsnr ora....er.type ONLINE    ONLINE    rac2       

ora.OCR.dg     ora....up.type ONLINE    ONLINE    rac1       

ora.asm        ora.asm.type   ONLINE    ONLINE    rac1       

ora.cvu        ora.cvu.type   ONLINE    ONLINE    rac1       

ora.gsd        ora.gsd.type   OFFLINE   OFFLINE              

ora....network ora....rk.type ONLINE    ONLINE    rac1       

ora.oc4j       ora.oc4j.type  ONLINE    ONLINE    rac1       

ora.ons        ora.ons.type   ONLINE    ONLINE    rac1       

ora.orcl.db    ora....se.type ONLINE    ONLINE    rac1       

ora....SM1.asm application    ONLINE    ONLINE    rac1       

ora....C1.lsnr application    ONLINE    ONLINE    rac1       

ora.rac1.gsd   application    OFFLINE   OFFLINE              

ora.rac1.ons   application    ONLINE    ONLINE    rac1       

ora.rac1.vip   ora....t1.type ONLINE    ONLINE    rac1       

ora....SM2.asm application    ONLINE    ONLINE    rac2       

ora....C2.lsnr application    ONLINE    ONLINE    rac2       

ora.rac2.gsd   application    OFFLINE   OFFLINE              

ora.rac2.ons   application    ONLINE    ONLINE    rac2       

ora.rac2.vip   ora....t1.type ONLINE    ONLINE    rac2       

ora.scan1.vip  ora....ip.type ONLINE    ONLINE    rac2   

 

step 4.在任何一个保留的节点以root用户执行如下命令删除节点

[root@rac1 ~]# cd /u01/app/11.2.0/grid/bin

[root@rac1 bin]# ./crsctl delete node -n rac3

CRS-4661: Node rac3 successfully deleted.

 

step 5.在两个保留节点以grid用户执行如下命令更新保留节点的Inventory

[grid@rac1 ~]$ cd /u01/app/11.2.0/grid/oui/bin

[grid@rac1 bin]$ ./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={rac1}" CRS=TRUE -silent -local

Starting Oracle Universal Installer...

 

Checking swap space: must be greater than 500 MB.   Actual 3137 MB    Passed

The inventory pointer is located at /etc/oraInst.loc

The inventory is located at /u01/app/oraInventory

'UpdateNodeList' was successful.

 

[grid@rac2 bin]$ ./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={rac2}" CRS=TRUE -silent -local 

Starting Oracle Universal Installer...

 

Checking swap space: must be greater than 500 MB.   Actual 3372 MB    Passed

The inventory pointer is located at /etc/oraInst.loc

The inventory is located at /u01/app/oraInventory

'UpdateNodeList' was successful.


step 6.在要删除的节点以grid用户执行如下命令卸载Clusterware软件

1) Shared Oracle RAC Grid Home

grid@rac1$ cd /u01/app/11.2.0/grid/oui/bin/

grid@rac1 bin$ ./runInstaller -detachHome ORACLE_HOME=$ORACLE_HOME -silent -local

 

2) Non Shared Oracle RAC Grid Home

[grid@rac3 ~]$ cd /u01/app/11.2.0/grid/deinstall

[grid@rac3 deinstall]$ ./deinstall -local

--第一次执行deinstall会报错,执行如下命令解决.

[grid@rac3 deinstall]$ /tmp/deinstall2023-12-09_05-10-29PM/sshUserSetup.sh -hosts "rac3 " -user grid

[grid@rac3 deinstall]$ ./deinstall -local

--以上命令一路回车,期间会弹出执行以下命令

[root@rac3 tmp]# /tmp/deinstall2023-12-09_05-12-46PM/perl/bin/perl -I/tmp/deinstall2023-12-09_05-12-46PM/perl/lib -I/tmp/deinstall2023-12-09_05-12-46PM/crs/install /tmp/deinstall2023-12-09_05-12-46PM/crs/install/rootcrs.pl -force  -deconfig -paramfile "/tmp/deinstall2023-12-09_05-12-46PM/response/deinstall_Ora11g_gridinfrahome1.rsp"

 

执行完成之后,清理多余文件.

[root@rac3 ~]# rm -rf /etc/oraInst.loc

[root@rac3 ~]# rm -rf /opt/ORCLfmap

[root@rac3 ~]# rm -rf /etc/oratab

 

step 7.在保留节点以grid用户执行如下命令更新保留节点的Inventory

[grid@rac1 ~]$ cd /u01/app/11.2.0/grid/oui/bin

[grid@rac1 bin]$ ./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={rac1}" CRS=TRUE -silent -local

Starting Oracle Universal Installer...

 

Checking swap space: must be greater than 500 MB.   Actual 3137 MB    Passed

The inventory pointer is located at /etc/oraInst.loc

The inventory is located at /u01/app/oraInventory

'UpdateNodeList' was successful.

 

[grid@rac2 bin]$ ./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={rac2}" CRS=TRUE -silent -local 

Starting Oracle Universal Installer...

 

Checking swap space: must be greater than 500 MB.   Actual 3372 MB    Passed

The inventory pointer is located at /etc/oraInst.loc

The inventory is located at /u01/app/oraInventory

'UpdateNodeList' was successful.

 

step 8.在任何一个保留节点以grid用户执行如下命令验证成功删除与否

grid@rac1 bin$ cluvfy stage -post nodedel -n rac3 -verbose

 

12.4、后续操作

在任何一个保留节点中重启CRS.

[root@rac1 ~]# cd /u01/app/11.2.0/grid/bin

[root@rac1 bin]# ./crsctl stop crs

[root@rac1 bin]# ./crsctl start crs

 

[grid@rac1 ~]$ srvctl status database -d orcl

Instance orcl1 is running on node rac1

Instance orcl2 is running on node rac2

[grid@rac1 ~]$ srvctl status listener

Listener LISTENER is enabled

Listener LISTENER is running on node(s): rac2,rac1

「喜欢这篇文章,您的关注和赞赏是给作者最好的鼓励」
关注作者
【版权声明】本文为墨天轮用户原创内容,转载时必须标注文章的来源(墨天轮),文章链接,文章作者等基本信息,否则作者和墨天轮有权追究责任。如果您发现墨天轮中有涉嫌抄袭或者侵权的内容,欢迎发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。

评论