10M在rhel7上安装oracle11.2.0.4两节点的rac,集群和数据库安装时正常、创建实例也正常,但重启服务器后集群状态异常,两边均识别不了对方的vip和db信息,均认为对方的vip和db异常,自己正常。但其实实例两边均是open状态。现在两个vip在两个节点都存在,scan_listener也在两个节点都存在。
节点1:
[root@rhel75h1 ~]# crsctl status res -t
NAME TARGET STATE SERVER STATE_DETAILS
Local Resources
ora.CRSDG.dg
ONLINE ONLINE rhel75h1
ora.DATA.dg
ONLINE ONLINE rhel75h1
ora.LISTENER.lsnr
ONLINE ONLINE rhel75h1
ora.asm
ONLINE ONLINE rhel75h1 Started
ora.gsd
OFFLINE OFFLINE rhel75h1
ora.net1.network
ONLINE ONLINE rhel75h1
ora.ons
ONLINE ONLINE rhel75h1
Cluster Resources
ora.LISTENER_SCAN1.lsnr
1 ONLINE ONLINE rhel75h1
ora.cvu
1 ONLINE ONLINE rhel75h1
ora.oc4j
1 ONLINE ONLINE rhel75h1
ora.rhel75h1.vip
1 ONLINE ONLINE rhel75h1
ora.rhel75h2.vip
1 ONLINE INTERMEDIATE rhel75h1 FAILED OVER
ora.scan1.vip
1 ONLINE ONLINE rhel75h1
ora.test.db
1 OFFLINE OFFLINE Instance Shutdown
2 ONLINE ONLINE rhel75h1 Open
[root@rhel75h1 ~]# ps -ef |grep pmon
grid 2333 1 0 11:41 ? 00:00:00 asm_pmon_+ASM1
oracle 2913 1 0 11:41 ? 00:00:00 ora_pmon_test1
root 3933 3187 0 11:50 pts/2 00:00:00 grep --color=auto pmon
[root@rhel75h1 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:bd:99:8d brd ff:ff:ff:ff:ff:ff
inet 192.168.56.10/24 brd 192.168.56.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168.56.14/24 brd 192.168.56.255 scope global secondary ens33:1
valid_lft forever preferred_lft forever
inet 192.168.56.13/24 brd 192.168.56.255 scope global secondary ens33:2
valid_lft forever preferred_lft forever
inet 192.168.56.12/24 brd 192.168.56.255 scope global secondary ens33:3
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:febd:998d/64 scope link
valid_lft forever preferred_lft forever
3: ens38: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:bd:99:97 brd ff:ff:ff:ff:ff:ff
inet 172.16.204.10/24 brd 172.16.204.255 scope global noprefixroute ens38
valid_lft forever preferred_lft forever
inet 169.254.167.28/16 brd 169.254.255.255 scope global ens38:1
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:febd:9997/64 scope link
valid_lft forever preferred_lft forever
[root@rhel75h1 ~]#
评论
有用 0节点2:
[root@rhel75h2 ~]# crsctl status res -t
NAME TARGET STATE SERVER STATE_DETAILS
Local Resources
ora.CRSDG.dg
ONLINE ONLINE rhel75h2
ora.DATA.dg
ONLINE ONLINE rhel75h2
ora.LISTENER.lsnr
ONLINE ONLINE rhel75h2
ora.asm
ONLINE ONLINE rhel75h2 Started
ora.gsd
OFFLINE OFFLINE rhel75h2
ora.net1.network
ONLINE ONLINE rhel75h2
ora.ons
ONLINE ONLINE rhel75h2
Cluster Resources
ora.LISTENER_SCAN1.lsnr
1 ONLINE ONLINE rhel75h2
ora.cvu
1 ONLINE ONLINE rhel75h2
ora.oc4j
1 ONLINE ONLINE rhel75h2
ora.rhel75h1.vip
1 ONLINE INTERMEDIATE rhel75h2 FAILED OVER
ora.rhel75h2.vip
1 ONLINE ONLINE rhel75h2
ora.scan1.vip
1 ONLINE ONLINE rhel75h2
ora.test.db
1 ONLINE ONLINE rhel75h2 Open
2 OFFLINE OFFLINE Instance Shutdown
[root@rhel75h2 ~]# ps -ef |grep pmon
grid 2271 1 0 11:41 ? 00:00:00 asm_pmon_+ASM2
oracle 2845 1 0 11:41 ? 00:00:00 ora_pmon_test2
root 3674 2016 0 11:50 pts/0 00:00:00 grep --color=auto pmon
[root@rhel75h2 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:d4:d9:3c brd ff:ff:ff:ff:ff:ff
inet 192.168.56.11/24 brd 192.168.56.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168.56.14/24 brd 192.168.56.255 scope global secondary ens33:1
valid_lft forever preferred_lft forever
inet 192.168.56.13/24 brd 192.168.56.255 scope global secondary ens33:2
valid_lft forever preferred_lft forever
inet 192.168.56.12/24 brd 192.168.56.255 scope global secondary ens33:3
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fed4:d93c/64 scope link
valid_lft forever preferred_lft forever
3: ens38: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:d4:d9:46 brd ff:ff:ff:ff:ff:ff
inet 172.16.204.11/24 brd 172.16.204.255 scope global noprefixroute ens38
valid_lft forever preferred_lft forever
inet 169.254.223.58/16 brd 169.254.255.255 scope global ens38:1
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fed4:d946/64 scope link
valid_lft forever preferred_lft forever
[root@rhel75h2 ~]#
评论
有用 0节点1:
[root@rhel75h1 ~]# oifcfg iflist -p -n
ens33 192.168.56.0 PRIVATE 255.255.255.0
ens38 172.16.204.0 PRIVATE 255.255.255.0
ens38 169.254.0.0 UNKNOWN 255.255.0.0
[root@rhel75h1 ~]# oifcfg getif
ens33 192.168.56.0 global public
ens38 172.16.204.0 global cluster_interconnect
[root@rhel75h1 ~]#
节点2:
[root@rhel75h2 +asm]# olsnodes -n
rhel75h1 1
rhel75h2 2
[root@rhel75h2 +asm]# oifcfg iflist -p -n
ens33 192.168.56.0 PRIVATE 255.255.255.0
ens38 172.16.204.0 PRIVATE 255.255.255.0
ens38 169.254.0.0 UNKNOWN 255.255.0.0
[root@rhel75h2 +asm]# oifcfg getif
ens33 192.168.56.0 global public
ens38 172.16.204.0 global cluster_interconnect
[root@rhel75h2 +asm]#
评论
有用 0
墨值悬赏

