暂无图片
暂无图片
暂无图片
暂无图片
暂无图片

浪潮K1-910 小机 Oracle 10g 添加磁盘组

原创 liketoochao 2024-04-28
211

1.需求描述

描述

  • Oracle 10G数据库一套,2节点运行在K1-910服务器上
  • 浪潮AS5600存储一套,三个卷组各10T空间:卷组1已经分配,卷组2可以作为归档日志空间,卷组3可以为其他ASM磁盘组添加磁盘
  • 数据库开启归档模式

2.查看数据库信息

查看操作系统信息

[root@jczhdb2 mapper]# uname -a
Linux jczhdb2 2.6.28.10-vs2.1.4 #1 SMP Fri Jul 17 15:51:11 EDT 2015 ia64 ia64 ia64 GNU/Linux

查看磁盘信息

[root@jczhdb2 mapper]# fdisk -l | grep GB

Disk /dev/sda: 2997.9 GB, 2997920727040 bytes
Disk /dev/sdg: 4398.0 GB, 4398046511104 bytes
Disk /dev/sdh: 4398.0 GB, 4398046511104 bytes
Disk /dev/sdi: 1099.5 GB, 1099511627776 bytes
Disk /dev/sdo: 4398.0 GB, 4398046511104 bytes
Disk /dev/sdp: 4398.0 GB, 4398046511104 bytes
Disk /dev/sdq: 1099.5 GB, 1099511627776 bytes
Disk /dev/sdw: 4398.0 GB, 4398046511104 bytes
Disk /dev/sdx: 4398.0 GB, 4398046511104 bytes
Disk /dev/sdy: 1099.5 GB, 1099511627776 bytes
Disk /dev/sdae: 4398.0 GB, 4398046511104 bytes
Disk /dev/sdaf: 4398.0 GB, 4398046511104 bytes
Disk /dev/sdag: 1099.5 GB, 1099511627776 bytes
Disk /dev/dm-4: 4398.0 GB, 4398046511104 bytes
Disk /dev/dm-5: 4398.0 GB, 4398046511104 bytes
Disk /dev/dm-6: 1099.5 GB, 1099511627776 bytes
Disk /dev/dm-8: 1099.5 GB, 1099506046464 bytes

查看多路径

[root@jczhdb2 mapper]# cat /etc/multipath.conf
# This is a basic configuration file with some examples, for device mapper
# multipath.
# For a complete list of the default configuration values, see
# /usr/share/doc/device-mapper-multipath-0.4.7/multipath.conf.defaults
# For a list of configuration options with descriptions, see
# /usr/share/doc/device-mapper-multipath-0.4.7/multipath.conf.annotated

defaults {
        user_friendly_names yes
}
blacklist {
        devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*"
        devnode "^(hd|xvd|vd)[a-z]*"
        wwid "3600605b00bf749501ff4184888ff8fb8"
}
# Make sure our multipath devices are enabled.

#blacklist_exceptions {
#        wwid "3600000e00d100000001032ce000c0000"
#}
#}

multipaths
{
multipath
{
wwid            3600000e00d2a0000002a084d00010000
alias           ocr1
}
multipath
{
wwid            3600000e00d2a0000002a084d00020000
alias           ocr2
}
multipath
{
wwid            3600000e00d2a0000002a084d00040000
alias           vote1
}
multipath
{
wwid            3600000e00d2a0000002a084d00030000
alias           vote2
}
multipath
{
wwid            3600000e00d2a0000002a084d00000000
alias           vote3
}
multipath
{
wwid            3600000e00d2a0000002a084d00050000
alias           data1
}
multipath
{
wwid            3600000e00d2a0000002a084d00060000
alias           data2
}
multipath
{
wwid            3600000e00d2a0000002a084d00080000
alias           arch2
}
}
[root@jczhdb2 mapper]#

数据库版本信息

[oracle@jczhdb2 ~]$ sqlplus  / as  sysdba

SQL*Plus: Release 10.2.0.5.0 - Production on Fri Nov 6 13:21:08 2020
Copyright (c) 1982, 2010, Oracle.  All Rights Reserved.

Connected to:
Oracle Database 10g Enterprise Edition Release 10.2.0.5.0 - 64bit Production
With the Partitioning, Real Application Clusters, OLAP, Data Mining
and Real Application Testing options

SQL>

asm磁盘

SQL> select t.name,t.PATH from v$asm_disk  t;

	NAME			PATH
----------------------------------------------------------------------------------------
DATA_0000			/dev/raw/raw10
DATA_0001			/dev/raw/raw11
DATA_0002			/dev/raw/raw12
DATA_0003			/dev/raw/raw13
DATA_0004			/dev/raw/raw6
DATA_0005			/dev/raw/raw7
DATA_0006			/dev/raw/raw8
DATA_0007			/dev/raw/raw9

8 rows selected.
SQL>
SQL> select t.GROUP_NUMBER,t.name from v$asm_diskgroup  t;

GROUP_NUMBER NAME
------------ ------------------------------
           1 DATA
SQL>

查看磁盘多路径

[root@jczhdb2 rules.d]# cd /dev/mapper/
[root@jczhdb2 mapper]# ls -la
total 0
drwxr-xr-x  2 root root     400 May 23 12:57 .
drwxr-xr-x 15 root root    6540 May 23 12:57 ..
brw-rw----  1 root disk 253,  6 May 23 12:57 arch2
brw-rw----  1 root disk 253,  8 May 23 12:57 arch2p1
crw-------  1 root root  10, 59 May 23 12:57 control
brw-rw----  1 root disk 253,  4 May 23 12:57 data1
brw-rw----  1 root disk 253,  5 May 23 12:57 data2
brw-rw----  1 root disk 253,  0 May 23 12:57 ocr1
brw-rw----  1 root disk 253,  1 May 23 12:57 ocr2
brw-rw----  1 root disk 253, 13 May 23 12:57 vgdate1-lvdate1
brw-rw----  1 root disk 253, 14 May 23 12:57 vgdate1-lvdate2
brw-rw----  1 root disk 253, 15 May 23 12:57 vgdate1-lvdate3
brw-rw----  1 root disk 253, 16 May 23 12:57 vgdate1-lvdate4
brw-rw----  1 root disk 253,  9 May 23 12:57 vgdate2-lvdate5
brw-rw----  1 root disk 253, 10 May 23 12:57 vgdate2-lvdate6
brw-rw----  1 root disk 253, 11 May 23 12:57 vgdate2-lvdate7
brw-rw----  1 root disk 253, 12 May 23 12:57 vgdate2-lvdate8
brw-rw----  1 root disk 253,  3 May 23 12:57 vote1
brw-rw----  1 root disk 253,  2 May 23 12:57 vote2
brw-rw----  1 root disk 253,  7 May 23 12:57 vote3
[root@jczhdb2 mapper]#

查看裸设备配置

[root@jczhdb2 rules.d]# cat /etc/rc.local
#!/bin/sh
#
# This script will be executed *after* all the other init scripts.
# You can put your own initialization stuff in here if you don't
# want to do the full Sys V style init stuff.

touch /var/lock/subsys/local
/sbin/modprobe hangcheck-timer
/bin/raw /dev/raw/raw1 /dev/mapper/ocr1
/bin/raw /dev/raw/raw2 /dev/mapper/ocr2
/bin/raw /dev/raw/raw3 /dev/mapper/vote1
/bin/raw /dev/raw/raw4 /dev/mapper/vote2
/bin/raw /dev/raw/raw5 /dev/mapper/vote3
/bin/raw /dev/raw/raw6 /dev/mapper/vgdate1-lvdate1
/bin/raw /dev/raw/raw7 /dev/mapper/vgdate1-lvdate2
/bin/raw /dev/raw/raw8 /dev/mapper/vgdate1-lvdate3
/bin/raw /dev/raw/raw9 /dev/mapper/vgdate1-lvdate4
/bin/raw /dev/raw/raw10 /dev/mapper/vgdate2-lvdate5
/bin/raw /dev/raw/raw11 /dev/mapper/vgdate2-lvdate6
/bin/raw /dev/raw/raw12 /dev/mapper/vgdate2-lvdate7
/bin/raw /dev/raw/raw13 /dev/mapper/vgdate2-lvdate8
sleep 2
chmod 660 /dev/raw/raw*
chown root:oinstall /dev/raw/raw{1,2}
chown oracle:oinstall /dev/raw/raw{3,4,5,6,7,8,9,10,11,12,13}
[root@jczhdb2 rules.d]#

内存使用

SQL> show parameter sga

NAME                                 TYPE        VALUE
------------------------------------ ----------- ------------------------------
lock_sga                             boolean     FALSE
pre_page_sga                         boolean     FALSE
sga_max_size                         big integer 1536M
sga_target                           big integer 1536M
SQL>
SQL> show parameter pga

NAME                                 TYPE        VALUE
------------------------------------ ----------- ------------------------------
pga_aggregate_target                 big integer 32626M
SQL>

3.方案

1.存储划分乱,建议卷每个大小为1TB
2.配置磁盘多路径
3.创建RAW设备
4.创建ASM磁盘组
5.数据库启用归档
6.修改归档日志文件存储路径参数

4.裸设备

  • 裸设备:
    也叫裸分区(原始分区),是一种没有经过格式化,不被Unix/Linux通过文件系统来读取的特殊字符设备。裸设备可以绑定一个分区,也可以绑定一个磁盘。
  • 字符设备:
    对字符设备的读写不需要通过OS的buffer。它不可被文件系统mount。
  • 块设备:
    对块设备的读写需要通过OS的buffer,它可以被mount到文件系统中。

在旧版本中,最多只可以有256个裸设备,Linux 4下做多可以绑定81Array2个裸设备。
在linux下,最多只能有255个分区,所以,如果用裸设备绑定分区,最多只能绑定255个裸设备。如果是用lvm,则没有这个限制。

Linux下单个磁盘最多可以有15个分区。3个主分区 + 1个扩展分区 + 11个逻辑分区。
建议的分区方法是:先分3个主分区,第四个分区为扩展分区,然后在扩展分区中再分成11个逻辑分区。

Note:
裸设备不要绑定在扩展分区上。

4.1.裸设备绑定:

  • linux下使用裸设备,则需要手工进行绑定
    在Linux中rawio的则实现了一套非绑定(unbound)的裸设备/dev/rawN或者/dev/raw/rawN和一个控制设备/dev/rawct用来把他们绑定到块设备上。所以当需要使用一个裸设备的时候,就需要把他和一个真实存在的块设备对应起来,这一个步骤实际上就是完成了Unix里的自动对应一个非缓存字符设备。
  • unix下使用裸设备,不需要手工进行绑定
    在Unix中每一个块设备都会有一个对应的字符设备用于非缓存(unbuffered)I/O,这就是他对应的裸设备了。

4.2.major 和 minor device number

在unix/linux系统中,一切都是文件。所有硬盘、软盘、键盘等设备都用文件来代表,对应着/dev下面的文件。对于应用程序来说,可以像对待普通文件一样打开,关闭、读写这些设备文件。但是这种文件名,比如/dev/sda、/dev/raw/raw1都是用户空间名称,OS Kernel根本不知道这个名称指的是什么。在内核空间是通过major、minor device number 来区分设备的。

major device number可以看作是设备驱动程序,被同一设备驱动程序管理的设备有相同的major device number,这个数字实际是Kernel中device driver table 的索引,这个表保存着不同设备驱动程序。而minor device number用来代表被访问的具体设备。也就是说Kernel根据major device number 找到设备驱动程序,然后再从minor device number 获得设备位置等属性。所有这些major device number 是已经预先分配好的。

[oracle@itms-base ~]$ cat /etc/udev/rules.d/60-raw.rules 
# Enter raw device bindings here.
#
# An example would be:
#   ACTION=="add", KERNEL=="sda", RUN+="/bin/raw /dev/raw/raw1 %N"
# to bind /dev/raw/raw1 to /dev/sda, or
#   ACTION=="add", ENV{MAJOR}=="8", ENV{MINOR}=="1", RUN+="/bin/raw /dev/raw/raw2 %M %m"
# to bind /dev/raw/raw2 to the device with major 8, minor 1.
[oracle@itms-base ~]$

4.3.配置裸设备

在OEL4.8中需要编辑的文件是/etc/sysconfig/rawdevices,使用service rawdevices restart命令完成裸设备的配置。
1.编辑后的/etc/sysconfig/rawdevices文件内容

[root@RAC1 ~]# cat /etc/sysconfig/rawdevices
# This file and interface are deprecated.
# Applications needing raw device access should open regular
# block devices with O_DIRECT.
# raw device bindings
# format: 
#         
# example: /dev/raw/raw1 /dev/sda1
#          /dev/raw/raw2 8 5

/dev/raw/raw1 /dev/sdb1
/dev/raw/raw2 /dev/sdc1
/dev/raw/raw3 /dev/sdd1
/dev/raw/raw4 /dev/sde1

2.启动裸设备服务

[root@RAC1 ~]# service rawdevices restart
Assigning devices:
           /dev/raw/raw1  -->   /dev/sdb1
/dev/raw/raw1:  bound to major 8, minor 17
           /dev/raw/raw2  -->   /dev/sdc1
/dev/raw/raw2:  bound to major 8, minor 33
           /dev/raw/raw3  -->   /dev/sdd1
/dev/raw/raw3:  bound to major 8, minor 49
           /dev/raw/raw4  -->   /dev/sde1
/dev/raw/raw4:  bound to major 8, minor 65
done

3.最后的确认

[root@RAC1 ~]# ls -l /dev/raw
total 0
crw-rw----  1 root disk 162, 1 Jul  2 17:26 raw1
crw-rw----  1 root disk 162, 2 Jul  2 17:26 raw2
crw-rw----  1 root disk 162, 3 Jul  2 17:26 raw3
crw-rw----  1 root disk 162, 4 Jul  2 17:26 raw4

此事可以顺利的在/dev/raw目录中看到裸设备信息。

在Redhat 5之后,原来的raw设备接口已经取消了,redhat 5中通过udev规则进行配置。
编辑/etc/udev/rules.d/60-raw.rules

[oracle@itms-base ~]$ cat /etc/udev/rules.d/60-raw.rules 
# Enter raw device bindings here.
#
# An example would be:
#   ACTION=="add", KERNEL=="sda", RUN+="/bin/raw /dev/raw/raw1 %N"
# to bind /dev/raw/raw1 to /dev/sda, or
#   ACTION=="add", ENV{MAJOR}=="8", ENV{MINOR}=="1", RUN+="/bin/raw /dev/raw/raw2 %M %m"
# to bind /dev/raw/raw2 to the device with major 8, minor 1.
[oracle@itms-base ~]$

配置规则:

ACTION=="add", KERNEL="<device name>", RUN+="raw /dev/raw/rawX %N"
  • device name :需要绑定的设备名称替换(/dev/sda1)
  • X :为裸设备号,主/次号码:
ACTION=="add", ENV{MAJOR}="A", ENV{MINOR}="B", RUN+="raw /dev/raw/rawX %M %m"
  • “A” 和 “B” 是设备的主/次号码
  • X 是系统使用的raw设备号码

在redhat 5中,是通过udev来管理raw设备的,而udev是通过MAJOR和MINOR来识别raw设备。

1.查看磁盘分区情况

# fdisk  -l /dev/sdb

Disk /dev/sdb: 4880 MB, 4880072704 bytes

255 heads, 63 sectors/track, 593 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes

   Device Boot      Start         End      Blocks   Id  System
/dev/sdb1               1          25      200781   83  Linux
/dev/sdb2              26          50      200812+  83  Linux

2.配置/etc/udev/rules.d/60-raw.rules文件

# grep -v ^# /etc/udev/rules.d/60-raw.rules
ACTION=="add", KERNEL=="sdb1", RUN+="/bin/raw /dev/raw/raw1 %N"
ACTION=="add", KERNEL=="sdb2", RUN+="/bin/raw /dev/raw/raw2 %N"
ACTION=="add", ENV{MAJOR}=="3", ENV{MINOR}=="2", RUN+="/bin/raw /dev/raw/raw1 %M %m"
ACTION=="add", ENV{MAJOR}=="7", ENV{MINOR}=="2", RUN+="/bin/raw /dev/raw/raw2 %M %m"

3.启动raw设备

# start_udev
Starting udev:                                             [  OK  ]

4.查看配置情况

# raw -qa
/dev/raw/raw1:  bound to major 8, minor 17
/dev/raw/raw2:  bound to major 8, minor 18

5.可以通过如下方式指定主设备号和复设备号

# raw /dev/raw/raw1 1 1
/dev/raw/raw1:  bound to major 1, minor 1

raw /dev/raw/raw[n] /dev/xxx
其中n的范围是0-8191。raw目录不存在的话会被自动创建。执行这个命令,就会在/dev/raw下生成一个对应的raw[n]文件用命令方式绑定裸设备在系统重启后会失效。

5.删除裸设备

# raw /dev/raw/raw2 0 0
/dev/raw/raw2:  bound to major 0, minor 0

# raw -qa
/dev/raw/raw1:  bound to major 1, minor 1

以上设置必须同时修改/etc/udev/rules.d/60-raw.rules才能保证重启后生效,否则重启后系统会重新读取/etc/udev/rules.d/60-raw.rules

6.确定裸设备的大小
用blockdev命令来计算,如:

# blockdev --getsize /dev/raw/raw1
11718750

11718750表示有多少OS BLIOCK。
一般一个OS BLOCK大小是512字节,所以11718750*512/1024/1024= 5722(m) 就是裸设备的大小。

如需设置raw设备的用户和权限信息,可在/etc/udev/rules.d/60-raw.rules文件里添加如下信息:

ACTION=="add", KERNEL=="raw1", OWNER="dave", GROUP="tianlesoftware", MODE="660"

如果有多个raw设备,可以写成:

ACTION=="add", KERNEL=="raw[1-4]", OWNER="dave", GROUP="tianlesoftware", MODE="660"
#chown oracle:oinstall /dev/raw/raw[1-4]
#chmod 775 /dev/raw/raw[1-4]

Note:在内核2.6.9-89.5AXS2之前使用/etc/sysconfig/rawdevices和/etc/udev/permissions.d/50-udev.permissions进行raw设备的配置和权限管理。在内核 2.6.18-128.7AXS3以后则使用了本文介绍的/etc/udev/rules.d/60-raw.rules进行raw设备的管理

使用裸设备作为oracle的数据文件的注意事项

  1. 一个裸设备只能放置一个数据文件
  2. 数据文件的大小不能超过裸设备的大小
    日志文件,则裸设备最大可用大小=裸设备对应分区大小 - 1 * 512 (保留一个redo lock)
    数据文件,则裸设备最大可用大小=裸设备对应分区大小 - 2 * db_block_size(保留两个block)
    为了简单起见,对所有的文件设置称比裸设备小1M即可。
  3. 数据文件最好不要设置称自动扩展,如果设置称自动扩展,一定要把maxsize设置设置为比裸设备小
  4. linux下oracle不能直接把逻辑卷作为裸设备,也要进行绑定。unix下就不需要。

5.实施步骤

step 1.配置磁盘多路径(所有节点)

[root@jczhdb1 mapper]# cat /etc/multipath.conf 
# This is a basic configuration file with some examples, for device mapper
# multipath.
# For a complete list of the default configuration values, see
# /usr/share/doc/device-mapper-multipath-0.4.7/multipath.conf.defaults
# For a list of configuration options with descriptions, see
# /usr/share/doc/device-mapper-multipath-0.4.7/multipath.conf.annotated

defaults {
        user_friendly_names yes
}
blacklist {
        devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*"
        devnode "^(hd|xvd|vd)[a-z]*"
        wwid "3600605b00bf61c801ff42c1b222b02ab"
}

# Make sure our multipath devices are enabled.

#blacklist_exceptions {
#        wwid "3600000e00d100000001032ce000c0000"
#}
#}

multipaths
{
multipath
{
wwid            3600000e00d2a0000002a084d00010000
alias           ocr1
}

multipath
{
wwid            3600000e00d2a0000002a084d00020000
alias           ocr2
}
multipath
{
wwid            3600000e00d2a0000002a084d00040000
alias           vote1
}
multipath
{
wwid            3600000e00d2a0000002a084d00030000
alias           vote2
}
multipath
{
wwid            3600000e00d2a0000002a084d00000000
alias           vote3
}
multipath
{
wwid            3600000e00d2a0000002a084d00050000
alias           data1
}
multipath
{
wwid            3600000e00d2a0000002a084d00060000
alias           data2
}
multipath
{
wwid            3600000e00d2a0000002a084d00070000
alias           arch1
}
multipath
{
wwid            3600000e00d2a0000002a084d00090000
alias           FRA01
}
multipath
{
wwid            3600000e00d2a0000002a084d000a0000
alias           FRA02
}
multipath
{
wwid            3600000e00d2a0000002a084d000b0000
alias           FRA03
}
multipath
{
wwid            3600000e00d2a0000002a084d000c0000
alias           FRA04
}
multipath
{
wwid            3600000e00d2a0000002a084d000d0000
alias           FRA05
}
multipath
{
wwid            3600000e00d2a0000002a084d000e0000
alias           FRA06
}
multipath
{
wwid            3600000e00d2a0000002a084d000f0000
alias           FRA07
}  
multipath
{
wwid            3600000e00d2a0000002a084d00100000
alias           FRA08
}
multipath
{
wwid            3600000e00d2a0000002a084d00110000
alias           FRA09
}
multipath
{
wwid            3600000e00d2a0000002a084d00120000
alias           FRA10
}
multipath
{
wwid            3600000e00d2a0000002a084d00130000
alias           data3
}
multipath
{
wwid            3600000e00d2a0000002a084d00140000
alias           data4
}
}
[root@jczhdb1 mapper]#

step 2.创建LVM磁盘

创建物理卷
[root@jczhdb1 mapper]# pvcreate /dev/mapper/data3
  Physical volume "/dev/mapper/data3" successfully created
[root@jczhdb1 mapper]# pvcreate /dev/mapper/data4
  Physical volume "/dev/mapper/data4" successfully created
[root@jczhdb1 mapper]# pvdisplay
  --- Physical volume ---
  PV Name               /dev/dm-1
  VG Name               vgdate2
  PV Size               4.00 TB / not usable 4.00 MB
  Allocatable           yes 
  PE Size (KByte)       4096
  Total PE              1048575
  Free PE               196607
  Allocated PE          851968
  PV UUID               eZcWjA-MHlC-9trU-cXZa-6Akv-OyKr-kLeIou

  --- Physical volume ---
  PV Name               /dev/dm-0
  VG Name               vgdate1
  PV Size               4.00 TB / not usable 4.00 MB
  Allocatable           yes 
  PE Size (KByte)       4096
  Total PE              1048575
  Free PE               196607
  Allocated PE          851968
  PV UUID               1hDRBE-ve0S-92di-h81V-g0qe-6eaI-SCWqKc

  "/dev/dm-14" is a new physical volume of "4.00 TB"
  --- NEW Physical volume ---
  PV Name               /dev/dm-14
  VG Name
  PV Size               4.00 TB
  Allocatable           NO
  PE Size (KByte)       0
  Total PE              0
  Free PE               0
  Allocated PE          0
  PV UUID               ptLQ1i-e8Ev-E9vs-T9uA-AMq0-Lwg8-UTc2Kp

  "/dev/sda2" is a new physical volume of "2.73 TB"
  --- NEW Physical volume ---
  PV Name               /dev/sda2
  VG Name
  PV Size               2.73 TB
  Allocatable           NO
  PE Size (KByte)       0
  Total PE              0
  Free PE               0
  Allocated PE          0
  PV UUID               bOWV6x-Bh3T-C3jN-OX0C-DBTf-7132-C1quVl

  "/dev/dm-13" is a new physical volume of "4.00 TB"
  --- NEW Physical volume ---
  PV Name               /dev/dm-13
  VG Name
  PV Size               4.00 TB
  Allocatable           NO
  PE Size (KByte)       0
  Total PE              0
  Free PE               0
  Allocated PE          0
  PV UUID               dfWkgY-TT2y-wROs-NxYn-XJ3k-jhZx-eMLXel

[root@jczhdb1 mapper]#

创建LVM卷组
[root@jczhdb1 mapper]# vgcreate vgdata3 /dev/mapper/data3
  Volume group "vgdata3" successfully created
[root@jczhdb1 mapper]# vgcreate vgdata4 /dev/mapper/data4
  Volume group "vgdata4" successfully created
[root@jczhdb1 mapper]# vgdisplay
  --- Volume group ---
  VG Name               vgdata4
  System ID
  Format                lvm2
  Metadata Areas        1
  Metadata Sequence No  1
  VG Access             read/write
  VG Status             resizable
  MAX LV                0
  Cur LV                0
  Open LV               0
  Max PV                0
  Cur PV                1
  Act PV                1
  VG Size               4.00 TB
  PE Size               4.00 MB
  Total PE              1048575
  Alloc PE / Size       0 / 0
  Free  PE / Size       1048575 / 4.00 TB
  VG UUID               9QaoZs-7eVM-cteQ-Z3HE-pCJ7-1Uvm-eOwMcG

  --- Volume group ---
  VG Name               vgdata3
  System ID
  Format                lvm2
  Metadata Areas        1
  Metadata Sequence No  1
  VG Access             read/write
  VG Status             resizable
  MAX LV                0
  Cur LV                0
  Open LV               0
  Max PV                0
  Cur PV                1
  Act PV                1
  VG Size               4.00 TB
  PE Size               4.00 MB
  Total PE              1048575
  Alloc PE / Size       0 / 0
  Free  PE / Size       1048575 / 4.00 TB
  VG UUID               FP40ex-h8aA-IAWc-rYH4-lAzh-8HGY-KxDk7d

  --- Volume group ---
  VG Name               vgdate2
  System ID
  Format                lvm2
  Metadata Areas        1
  Metadata Sequence No  5
  VG Access             read/write
  VG Status             resizable
  MAX LV                0
  Cur LV                4
  Open LV               4
  Max PV                0
  Cur PV                1
  Act PV                1
  VG Size               4.00 TB
  PE Size               4.00 MB
  Total PE              1048575
  Alloc PE / Size       851968 / 3.25 TB
  Free  PE / Size       196607 / 768.00 GB
  VG UUID               F3Tiy0-zMkn-i2mp-A2l2-jz30-5rI9-NM2fqD

  --- Volume group ---
  VG Name               vgdate1
  System ID
  Format                lvm2
  Metadata Areas        1
  Metadata Sequence No  5
  VG Access             read/write
  VG Status             resizable
  MAX LV                0
  Cur LV                4
  Open LV               4
  Max PV                0
  Cur PV                1
  Act PV                1
  VG Size               4.00 TB
  PE Size               4.00 MB
  Total PE              1048575
  Alloc PE / Size       851968 / 3.25 TB
  Free  PE / Size       196607 / 768.00 GB
  VG UUID               RCHGjL-1TMA-zbr1-0Wzi-zomY-W2x8-aahM9s

[root@jczhdb1 mapper]#

创建LVM的逻辑卷
[root@jczhdb1 mapper]# lvcreate -n data9 -L 1024G vgdata3
  Logical volume "data9" created
[root@jczhdb1 mapper]# lvcreate -n data10 -L 1024G vgdata3
  Logical volume "data10" created
[root@jczhdb1 mapper]# lvcreate -n data11 -L 1024G vgdata3
  Logical volume "data11" created
[root@jczhdb1 mapper]# lvcreate -n data12 -L 1024G vgdata3
  Insufficient free extents (262143) in volume group vgdata3: 262144 required
[root@jczhdb1 mapper]# 
[root@jczhdb1 mapper]# lvcreate -n data13 -L 1024G vgdata4
  Logical volume "data13" created
[root@jczhdb1 mapper]# lvcreate -n data14 -L 1024G vgdata4
  Logical volume "data14" created
[root@jczhdb1 mapper]# lvcreate -n data15 -L 1024G vgdata4
  Logical volume "data15" created
[root@jczhdb1 mapper]# lvcreate -n data16 -L 1024G vgdata4
  Insufficient free extents (262143) in volume group vgdata4: 262144 required
[root@jczhdb1 mapper]# 

step 3.创建raw磁盘(所有节点)

[root@jczhdb1 mapper]#  cat /etc/rc.loca

#!/bin/sh
#
# This script will be executed *after* all the other init scripts.
# You can put your own initialization stuff in here if you don't
# want to do the full Sys V style init stuff.

touch /var/lock/subsys/local
/sbin/modprobe hangcheck-timer

/bin/raw /dev/raw/raw1 /dev/mapper/ocr1
/bin/raw /dev/raw/raw2 /dev/mapper/ocr2
/bin/raw /dev/raw/raw3 /dev/mapper/vote1
/bin/raw /dev/raw/raw4 /dev/mapper/vote2
/bin/raw /dev/raw/raw5 /dev/mapper/vote3
/bin/raw /dev/raw/raw6 /dev/mapper/vgdate1-lvdate1
/bin/raw /dev/raw/raw7 /dev/mapper/vgdate1-lvdate2
/bin/raw /dev/raw/raw8 /dev/mapper/vgdate1-lvdate3
/bin/raw /dev/raw/raw9 /dev/mapper/vgdate1-lvdate4
/bin/raw /dev/raw/raw10 /dev/mapper/vgdate2-lvdate5
/bin/raw /dev/raw/raw11 /dev/mapper/vgdate2-lvdate6
/bin/raw /dev/raw/raw12 /dev/mapper/vgdate2-lvdate7
/bin/raw /dev/raw/raw13 /dev/mapper/vgdate2-lvdate8
/bin/raw /dev/raw/raw14 /dev/mapper/vgdata3-data9
/bin/raw /dev/raw/raw15 /dev/mapper/vgdata3-data10
/bin/raw /dev/raw/raw16 /dev/mapper/vgdata3-data11
/bin/raw /dev/raw/raw17 /dev/mapper/vgdata4-data13
/bin/raw /dev/raw/raw18 /dev/mapper/vgdata4-data14
/bin/raw /dev/raw/raw19 /dev/mapper/vgdata4-data15
/bin/raw /dev/raw/raw20 /dev/mapper/FRA01
/bin/raw /dev/raw/raw21 /dev/mapper/FRA02
/bin/raw /dev/raw/raw22 /dev/mapper/FRA03
/bin/raw /dev/raw/raw23 /dev/mapper/FRA04
/bin/raw /dev/raw/raw24 /dev/mapper/FRA05
/bin/raw /dev/raw/raw25 /dev/mapper/FRA06
/bin/raw /dev/raw/raw26 /dev/mapper/FRA07
/bin/raw /dev/raw/raw27 /dev/mapper/FRA08
/bin/raw /dev/raw/raw28 /dev/mapper/FRA09
/bin/raw /dev/raw/raw29 /dev/mapper/FRA10

sleep 2
chmod 660 /dev/raw/raw*
chown root:oinstall /dev/raw/raw{1,2}
chown oracle:oinstall /dev/raw/raw{3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29}
[root@jczhdb1 mapper]#

step 4.其他节点扫描LVM

[root@jczhdb2 mapper]# lvscan
  inactive          '/dev/vgdata4/data13' [1.00 TB] inherit
  inactive          '/dev/vgdata4/data14' [1.00 TB] inherit
  inactive          '/dev/vgdata4/data15' [1.00 TB] inherit
  inactive          '/dev/vgdata3/data9' [1.00 TB] inherit
  inactive          '/dev/vgdata3/data10' [1.00 TB] inherit
  inactive          '/dev/vgdata3/data11' [1.00 TB] inherit
  ACTIVE            '/dev/vgdate2/lvdate5' [1.00 TB] inherit
  ACTIVE            '/dev/vgdate2/lvdate6' [1.00 TB] inherit
  ACTIVE            '/dev/vgdate2/lvdate7' [1.00 TB] inherit
  ACTIVE            '/dev/vgdate2/lvdate8' [256.00 GB] inherit
  ACTIVE            '/dev/vgdate1/lvdate1' [1.00 TB] inherit
  ACTIVE            '/dev/vgdate1/lvdate2' [1.00 TB] inherit
  ACTIVE            '/dev/vgdate1/lvdate3' [1.00 TB] inherit
  ACTIVE            '/dev/vgdate1/lvdate4' [256.00 GB] inherit
[root@jczhdb2 mapper]#
发现新增的LVM,但状态为“inactive”。
vgchange命令 用于修改卷组的属性,经常被用来设置卷组是处于活动状态或非活动状态。处于活动状态的卷组无法被删除,必须使用vgchange命令将卷组设置为非活动状态后才能删除。

激活LVM
[root@jczhdb2 mapper]# vgchange -ay vgdata3
  3 logical volume(s) in volume group "vgdata3" now active
[root@jczhdb2 mapper]# vgchange -ay vgdata4
  3 logical volume(s) in volume group "vgdata4" now active
[root@jczhdb2 mapper]# lvscan 
  ACTIVE            '/dev/vgdata4/data13' [1.00 TB] inherit
  ACTIVE            '/dev/vgdata4/data14' [1.00 TB] inherit
  ACTIVE            '/dev/vgdata4/data15' [1.00 TB] inherit
  ACTIVE            '/dev/vgdata3/data9' [1.00 TB] inherit
  ACTIVE            '/dev/vgdata3/data10' [1.00 TB] inherit
  ACTIVE            '/dev/vgdata3/data11' [1.00 TB] inherit
  ACTIVE            '/dev/vgdate2/lvdate5' [1.00 TB] inherit
  ACTIVE            '/dev/vgdate2/lvdate6' [1.00 TB] inherit
  ACTIVE            '/dev/vgdate2/lvdate7' [1.00 TB] inherit
  ACTIVE            '/dev/vgdate2/lvdate8' [256.00 GB] inherit
  ACTIVE            '/dev/vgdate1/lvdate1' [1.00 TB] inherit
  ACTIVE            '/dev/vgdate1/lvdate2' [1.00 TB] inherit
  ACTIVE            '/dev/vgdate1/lvdate3' [1.00 TB] inherit
  ACTIVE            '/dev/vgdate1/lvdate4' [256.00 GB] inherit
[root@jczhdb2 mapper]#

step 5.查看磁盘

[root@jczhdb2 mapper]# ls -la
total 0
drwxr-xr-x  2 root root     760 Nov 10 17:49 .
drwxr-xr-x 17 root root    8740 Nov 10 17:49 ..
brw-rw----  1 root disk 253,  2 Nov 10 16:22 arch2
brw-rw----  1 root disk 253, 20 Nov 10 16:22 arch2p1
crw-------  1 root root  10, 59 Nov 10 16:22 control
brw-rw----  1 root disk 253,  0 Nov 10 16:22 data1
brw-rw----  1 root disk 253,  1 Nov 10 16:22 data2
brw-rw----  1 root disk 253, 13 Nov 10 16:22 data3
brw-rw----  1 root disk 253, 14 Nov 10 16:22 data4
brw-rw----  1 root disk 253,  3 Nov 10 16:22 FRA01
brw-rw----  1 root disk 253,  4 Nov 10 16:22 FRA02
brw-rw----  1 root disk 253,  5 Nov 10 16:22 FRA03
brw-rw----  1 root disk 253,  6 Nov 10 16:22 FRA04
brw-rw----  1 root disk 253,  7 Nov 10 16:22 FRA05
brw-rw----  1 root disk 253,  8 Nov 10 16:22 FRA06
brw-rw----  1 root disk 253,  9 Nov 10 16:22 FRA07
brw-rw----  1 root disk 253, 10 Nov 10 16:22 FRA08
brw-rw----  1 root disk 253, 11 Nov 10 16:22 FRA09
brw-rw----  1 root disk 253, 12 Nov 10 16:22 FRA10
brw-rw----  1 root disk 253, 16 Nov 10 16:22 ocr1
brw-rw----  1 root disk 253, 17 Nov 10 16:22 ocr2
brw-rw----  1 root disk 253, 30 Nov 10 17:49 vgdata3-data10
brw-rw----  1 root disk 253, 31 Nov 10 17:49 vgdata3-data11
brw-rw----  1 root disk 253, 29 Nov 10 17:49 vgdata3-data9
brw-rw----  1 root disk 253, 32 Nov 10 17:49 vgdata4-data13
brw-rw----  1 root disk 253, 33 Nov 10 17:49 vgdata4-data14
brw-rw----  1 root disk 253, 34 Nov 10 17:49 vgdata4-data15
brw-rw----  1 root disk 253, 25 Nov 10 16:22 vgdate1-lvdate1
brw-rw----  1 root disk 253, 26 Nov 10 16:22 vgdate1-lvdate2
brw-rw----  1 root disk 253, 27 Nov 10 16:22 vgdate1-lvdate3
brw-rw----  1 root disk 253, 28 Nov 10 16:22 vgdate1-lvdate4
brw-rw----  1 root disk 253, 21 Nov 10 16:22 vgdate2-lvdate5
brw-rw----  1 root disk 253, 22 Nov 10 16:22 vgdate2-lvdate6
brw-rw----  1 root disk 253, 23 Nov 10 16:22 vgdate2-lvdate7
brw-rw----  1 root disk 253, 24 Nov 10 16:22 vgdate2-lvdate8
brw-rw----  1 root disk 253, 19 Nov 10 16:22 vote1
brw-rw----  1 root disk 253, 18 Nov 10 16:22 vote2
brw-rw----  1 root disk 253, 15 Nov 10 16:22 vote3
[root@jczhdb2 mapper]#

step 6.创建RAW磁盘组

[root@jczhdb2 mapper]# source /etc/rc.local 
Error setting raw device (Device or resource busy)
Error setting raw device (Device or resource busy)
Error setting raw device (Device or resource busy)
Error setting raw device (Device or resource busy)
Error setting raw device (Device or resource busy)
Error setting raw device (Device or resource busy)
Error setting raw device (Device or resource busy)
Error setting raw device (Device or resource busy)
Error setting raw device (Device or resource busy)
Error setting raw device (Device or resource busy)
Error setting raw device (Device or resource busy)
Error setting raw device (Device or resource busy)
Error setting raw device (Device or resource busy)
/dev/raw/raw14:	bound to major 253, minor 29
/dev/raw/raw15:	bound to major 253, minor 30
/dev/raw/raw16:	bound to major 253, minor 31
/dev/raw/raw17:	bound to major 253, minor 32
/dev/raw/raw18:	bound to major 253, minor 33
/dev/raw/raw19:	bound to major 253, minor 34
[root@jczhdb2 mapper]#

step 7.给ASM磁盘组添加磁盘

alter diskgroup DATA add disk '/dev/raw/raw14';
alter diskgroup DATA add disk '/dev/raw/raw15';
alter diskgroup DATA add disk '/dev/raw/raw16';
alter diskgroup DATA add disk '/dev/raw/raw17';
alter diskgroup DATA add disk '/dev/raw/raw18';
alter diskgroup DATA add disk '/dev/raw/raw19';

step 8.创建ASM磁盘组

create diskgroup FRA external redundancy disk '/dev/raw/raw20';

alter diskgroup DATA add disk '/dev/raw/raw21';
alter diskgroup DATA add disk '/dev/raw/raw22';
alter diskgroup DATA add disk '/dev/raw/raw23';
alter diskgroup DATA add disk '/dev/raw/raw24';
alter diskgroup DATA add disk '/dev/raw/raw25';
alter diskgroup DATA add disk '/dev/raw/raw26';
alter diskgroup DATA add disk '/dev/raw/raw27';
alter diskgroup DATA add disk '/dev/raw/raw28';
alter diskgroup DATA add disk '/dev/raw/raw29';

step 9.数据库开启归档模式

关闭所有实例
shut immediate;
startup mount;
alter database archivelog;
alter database open;

配置归档文档路径
alter system set log_archive_dest_1='LOCATION=+FRA' scope=spfile sid='jczhdb1';
alter system set log_archive_dest_1='LOCATION=+FRA' scope=spfile sid='jczhdb2';

切换日志,使日志归档
alter system switch logfile;
「喜欢这篇文章,您的关注和赞赏是给作者最好的鼓励」
关注作者
【版权声明】本文为墨天轮用户原创内容,转载时必须标注文章的来源(墨天轮),文章链接,文章作者等基本信息,否则作者和墨天轮有权追究责任。如果您发现墨天轮中有涉嫌抄袭或者侵权的内容,欢迎发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。

文章被以下合辑收录

评论