1. 准备工作
1.1 RAC规划
1.2 磁盘划分
我们这里规划如下:
注意:19c中MGMT 是可选的,如果不想安装,可以在GI安装时选择NO即可。
1.3 配置host
注意,以下步骤如没有特别说明,都在所有RAC节点上执行。
[root@rac1 ~]# cat /etc/hosts
127.0.0.1 localhost
192.168.20.180 rac1
172.168.222.180 rac1-priv
192.168.20.182 rac1-vip
192.168.20.181 rac2
172.168.222.181 rac2-priv
192.168.20.183 rac2-vip
192.168.20.184 rac-scan
[root@rac1 ~]#
1.4 添加用户和组
/usr/sbin/groupadd -g 54321 oinstall
/usr/sbin/groupadd -g 54322 dba
/usr/sbin/groupadd -g 54323 oper
/usr/sbin/groupadd -g 54324 backupdba
/usr/sbin/groupadd -g 54325 dgdba
/usr/sbin/groupadd -g 54326 kmdba
/usr/sbin/groupadd -g 54327 asmdba
/usr/sbin/groupadd -g 54328 asmoper
/usr/sbin/groupadd -g 54329 asmadmin
/usr/sbin/groupadd -g 54330 racdba
/usr/sbin/useradd -u 54321 -g oinstall -G dba,asmdba,oper oracle
/usr/sbin/useradd -u 54322 -g oinstall -G dba,oper,backupdba,dgdba,kmdba,asmdba,asmoper,asmadmin,racdba grid
echo "oracle" | passwd --stdin oracle
echo "grid" | passwd --stdin grid
1.5 关闭防火墙和selinux
防火墙:
[root@rac1 ~]# systemctl stop firewalld.service
[root@rac1 ~]# systemctl disable firewalld.service
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@rac1 ~]#
关闭SELINUX:
[root@rac1 ~]# sed -i "s/SELINUX=enforcing/SELINUX=disabled/" /etc/selinux/config
[root@rac1 ~]# cat /etc/selinux/config
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=disabled
# SELINUXTYPE= can take one of three values:
# targeted - Targeted processes are protected,
# minimum - Modification of targeted policy. Only selected processes are protected.
# mls - Multi Level Security protection.
SELINUXTYPE=targeted
[root@rac1 ~]#
1.6 关闭禁用透明大页和配置HugePage
这里仅在生产环境需要配置,测试环境可以忽略。 详细操作参考我的博客:
Linux 7.4 中关闭透明大页(Transparent Huge Pages)
https://www.cndba.cn/dave/article/2673
Linux HugePages 配置与 Oracle 性能关系说明
http://www.cndba.cn/dave/article/310
Linux 7.x 中 Oracle hugepage 大页配置脚本
https://www.cndba.cn/dave/article/2672
1.7 禁用chronyd
关于Linux 7 中chronyd的配置,可以参考我的博客:
Linux 7 chrony 时间同步服务配置
https://www.cndba.cn/dave/article/1820
这里我们直接关闭chronyd:
[root@rac1 ~]# systemctl stop chronyd
[root@rac1 ~]# systemctl disable chronyd
Removed symlink /etc/systemd/system/multi-user.target.wants/chronyd.service.
[root@rac1 ~]#
移除chrony配置文件:(必须移除,否则校验NTP失败)
[root@rac1 software]# mv /etc/chrony.conf /etc/chrony.conf.bak
1.8 关闭avahi-daemon
Avahi-daemon 可能会带来网络问题和bug,尤其是对于RAC系统,Oracle建议直接关闭。
[root@rac1 ~]# systemctl stop avahi-daemon
Warning: Stopping avahi-daemon.service, but it can still be activated by:
avahi-daemon.socket
[root@rac1 ~]# systemctl disable avahi-daemon
Removed symlink /etc/systemd/system/multi-user.target.wants/avahi-daemon.service.
Removed symlink /etc/systemd/system/sockets.target.wants/avahi-daemon.socket.
Removed symlink /etc/systemd/system/dbus-org.freedesktop.Avahi.service.
[root@rac1 ~]#
1.9 创建目录
mkdir -p /u01/app/19.3.0/grid
mkdir -p /u01/app/grid
mkdir -p /u01/app/oracle/product/19.3.0/db_1
chown -R grid:oinstall /u01
chown -R oracle:oinstall /u01/app/oracle
chmod -R 775 /u01/
1.10 配置用户环境变量
1.10.1 ORACLE用户
cat /home/oracle/.bash_profile
ORACLE_SID=cndba1;export ORACLE_SID
ORACLE_UNQNAME=cndba;export ORACLE_UNQNAME
JAVA_HOME=/usr/local/java; export JAVA_HOME
ORACLE_BASE=/u01/app/oracle; export ORACLE_BASE
ORACLE_HOME=$ORACLE_BASE/product/19.3.0/db_1; export ORACLE_HOME
ORACLE_TERM=xterm; export ORACLE_TERM
NLS_DATE_FORMAT="YYYY:MM:DDHH24:MI:SS"; export NLS_DATE_FORMAT
NLS_LANG=american_america.ZHS16GBK; export NLS_LANG
TNS_ADMIN=$ORACLE_HOME/network/admin; export TNS_ADMIN
ORA_NLS11=$ORACLE_HOME/nls/data; export ORA_NLS11
PATH=.:${JAVA_HOME}/bin:${PATH}:$HOME/bin:$ORACLE_HOME/bin:$ORA_CRS_HOME/bin
PATH=${PATH}:/usr/bin:/bin:/usr/bin/X11:/usr/local/bin
export PATH
LD_LIBRARY_PATH=$ORACLE_HOME/lib
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$ORACLE_HOME/oracm/lib
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/lib:/usr/lib:/usr/local/lib
export LD_LIBRARY_PATH
CLASSPATH=$ORACLE_HOME/JRE
CLASSPATH=${CLASSPATH}:$ORACLE_HOME/jlib
CLASSPATH=${CLASSPATH}:$ORACLE_HOME/rdbms/jlib
CLASSPATH=${CLASSPATH}:$ORACLE_HOME/network/jlib
export CLASSPATH
THREADS_FLAG=native; export THREADS_FLAG
export TEMP=/tmp
export TMPDIR=/tmp
umask 022
1.10.2 GRID用户
cat /home/grid/.bash_profile
PATH=$PATH:$HOME/bin
export ORACLE_SID=+ASM1
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/19.3.0/grid
export PATH=$ORACLE_HOME/bin:$PATH:/usr/local/bin/:.
export TEMP=/tmp
export TMP=/tmp
export TMPDIR=/tmp
umask 022
export PATH
1.11 修改资源限制
修改/etc/security/limits.conf
[root@rac1 ~]# cat >> /etc/security/limits.conf <<EOF
grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536
grid soft stack 10240
grid hard stack 32768
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
oracle soft stack 10240
oracle hard stack 32768
oracle soft memlock 3145728
oracle hard memlock 3145728
EOF
1.12 设置PAM
确保加载了最新版本的 PAM,然后在 /etc/pam.d/login 文件中添加或编辑以下行:
[root@rac1 ~]# cat >> /etc/pam.d/login <<EOF
session required pam_limits.so
EOF
1.13 配置NOZEROCONF
vi /etc/sysconfig/network增加以下内容
NOZEROCONF=yes
也可以直接执行如下语法完成修改:
echo "NOZEROCONF=yes" >>/etc/sysconfig/network
1.14 修改内核参数
在Linux 7之前,内核参数文件是修改/etc/sysctl.conf文件,但在Linux 7.x 之后发生了变化:
Linux 7 中 /etc/sysctl.conf 的位置变化
https://www.cndba.cn/dave/article/2966
但仍然可以修改这个文件,Oracle做了个link指向了该文件。我们这里创建了一个新文件:
[root@rac1 ~]# cat >> /etc/sysctl.d/sysctl.conf <<EOF
fs.file-max = 6815744
kernel.sem = 250 32000 100 128
kernel.shmmni = 4096
kernel.shmall = 1073741824
kernel.shmmax = 4398046511104
kernel.panic_on_oops = 1
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048576
net.ipv4.conf.all.rp_filter = 2
net.ipv4.conf.default.rp_filter = 2
fs.aio-max-nr = 1048576
net.ipv4.ip_local_port_range = 9000 65500
EOF
[root@rac1 ~]# sysctl -p
1.15 安装必要的包
配置Yum:
[root@rac1 yum.repos.d]# cat >> /etc/yum.repos.d/cndba.repo <<EOF
[www.cndba.cn]
name = www.cndba.cn yum server
baseurl=file:///mnt
gpgcheck=0
enabled=1
EOF
[root@rac1 yum.repos.d]# yum clean all
Loaded plugins: langpacks, product-id, search-disabled-repos, subscription-manager
This system is not registered with an entitlement server. You can use subscription-manager to register.
Cleaning repos: www.cndba.cn
[root@rac1 yum.repos.d]#
[root@rac1 yum.repos.d]# mount /dev/cdrom /mnt
[root@rac1 yum.repos.d]# yum install binutils compat-libstdc++-33 gcc gcc-c++ glibc glibc.i686 glibc-devel ksh libgcc.i686 libstdc++-devel libaio libaio.i686 libaio-devel libaio-devel.i686 libXext libXext.i686 libXtst libXtst.i686 libX11 libX11.i686 libXau libXau.i686 libxcb libxcb.i686 libXi libXi.i686 make sysstat unixODBC unixODBC-devel zlib-devel zlib-devel.i686 compat-libcap1 -y
安装:compat-libstdc++-33-3.2.3-61.x86_64.rpm 包,该包yum源里没有,需要从网上下载来安装。
[root@rac1 software]# rpm -ivh compat-libstdc++-33-3.2.3-61.x86_64.rpm
warning: compat-libstdc++-33-3.2.3-61.x86_64.rpm: Header V3 DSA/SHA1 Signature, key ID e8562897: NOKEY
Preparing... ################################# [100%]
Updating / installing...
1:compat-libstdc++-33-3.2.3-61 ################################# [100%]
[root@rac1 software]#
1.16 配置共享磁盘
用脚本生成udev 配置:
for i in b c d e f g h;
do
echo "KERNEL==/"sd*/",ENV{DEVTYPE}==/"disk/",SUBSYSTEM==/"block/",PROGRAM==/"/usr/lib/udev/scsi_id -g -u -d /$devnode/",RESULT==/"`/usr/lib/udev/scsi_id -g -u /dev/sd$i`/", RUN+=/"/bin/sh -c 'mknod /dev/asmdisk$i b /$major /$minor; chown grid:asmadmin /dev/asmdisk$i; chmod 0660 /dev/asmdisk$i'/""
done
将脚本内容写入/etc/udev/rules.d/99-oracle-asmdevices.rules 文件。
[root@rac1 software]# vim /etc/udev/rules.d/99-oracle-asmdevices.rules
[root@rac1 software]# cat /etc/udev/rules.d/99-oracle-asmdevices.rules
KERNEL=="sd*",ENV{DEVTYPE}=="disk",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id -g -u -d $devnode",RESULT=="36000c29cf5ef7bd3907344106bcca59b", RUN+="/bin/sh -c 'mknod /dev/asmdiskb b $major $minor; chown grid:asmadmin /dev/asmdiskb; chmod 0660 /dev/asmdiskb'"
KERNEL=="sd*",ENV{DEVTYPE}=="disk",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id -g -u -d $devnode",RESULT=="36000c293ef1e02395716263ee17e8926", RUN+="/bin/sh -c 'mknod /dev/asmdiskc b $major $minor; chown grid:asmadmin /dev/asmdiskc; chmod 0660 /dev/asmdiskc'"
KERNEL=="sd*",ENV{DEVTYPE}=="disk",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id -g -u -d $devnode",RESULT=="36000c29a790b610473d4800954053180", RUN+="/bin/sh -c 'mknod /dev/asmdiskd b $major $minor; chown grid:asmadmin /dev/asmdiskd; chmod 0660 /dev/asmdiskd'"
KERNEL=="sd*",ENV{DEVTYPE}=="disk",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id -g -u -d $devnode",RESULT=="36000c292fe31e2c5ec2cf689791c09d7", RUN+="/bin/sh -c 'mknod /dev/asmdiske b $major $minor; chown grid:asmadmin /dev/asmdiske; chmod 0660 /dev/asmdiske'"
KERNEL=="sd*",ENV{DEVTYPE}=="disk",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id -g -u -d $devnode",RESULT=="36000c29c87abcc922d24e7bc0e2978a7", RUN+="/bin/sh -c 'mknod /dev/asmdiskf b $major $minor; chown grid:asmadmin /dev/asmdiskf; chmod 0660 /dev/asmdiskf'"
KERNEL=="sd*",ENV{DEVTYPE}=="disk",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id -g -u -d $devnode",RESULT=="36000c297ede8a5b0a407451699668920", RUN+="/bin/sh -c 'mknod /dev/asmdiskg b $major $minor; chown grid:asmadmin /dev/asmdiskg; chmod 0660 /dev/asmdiskg'"
[root@rac1 software]#
让UDEV生效:
[root@rac2 software]# /sbin/udevadm trigger —type=devices —action=change
如果权限没有变,尝试重启。
[root@rac2 software]# ll /dev/asm*
brw-rw----. 1 grid asmadmin 8, 16 Mar 11 21:36 /dev/asmdiskb
brw-rw----. 1 grid asmadmin 8, 32 Mar 11 21:36 /dev/asmdiskc
brw-rw----. 1 grid asmadmin 8, 48 Mar 11 21:36 /dev/asmdiskd
brw-rw----. 1 grid asmadmin 8, 64 Mar 11 21:36 /dev/asmdiske
brw-rw----. 1 grid asmadmin 8, 80 Mar 11 21:36 /dev/asmdiskf
brw-rw----. 1 grid asmadmin 8, 96 Mar 11 21:36 /dev/asmdiskg
[root@rac2 software]#
2 安装GRID
2.1 上传解压介质
[root@rac1 software]# ll
total 8890692
-rw-r--r--. 1 root root 232774 Mar 12 08:36 compat-libstdc++-33-3.2.3-61.x86_64.rpm
-rw-r--r--. 1 root root 2160976478 Mar 12 08:43 p30501910_190000_Linux-x86-64-GI.zip
-rw-r--r--. 1 root root 993959385 Mar 12 08:38 p30557433_190000_Linux-x86-64--DB.zip
-rw-r--r--. 1 root root 3059705302 Mar 11 21:49 V982063-01-db4linux.zip
-rw-r--r--. 1 root root 2889184573 Mar 11 22:19 V982068-01-gi4linux.zip
[root@rac1 software]#
从Oracle 12cR2开始,软件直接解压缩到GRID_HOME 或者ORACLE_HOME,而不是在安装时复制过去。 该操作只需要在节点1上完成解压缩即可。
我们这里是安装GRID,用grid用户来解压缩GI:
[grid@rac1 software]# unzip -d /u01/app/19.3.0/grid V982068-01-gi4linux.zip
2.2 安装cvuqdisk
cvuqdisk存于 GI的cv/rpm目录下:
[root@rac1 rpm]# pwd
/u01/app/19.3.0/grid/cv/rpm
[root@rac1 rpm]# ls
cvuqdisk-1.0.10-1.rpm
[root@rac1 rpm]# rpm -ivh cvuqdisk-1.0.10-1.rpm
Preparing... ################################# [100%]
Using default group oinstall to install package
Updating / installing...
1:cvuqdisk-1.0.10-1 ################################# [100%]
[root@rac1 rpm]#
在两个节点都安装。
2.3 运行安装脚本gridSetup.sh
在节点1,进入grid集群软件目录执行GI安装。
[grid@rac1 ~]$ cd /u01/app/19.3.0/grid/
[grid@rac1 grid]$ ls
addnode css diagnostics has jdbc network ords precomp rhp sdk tomcat wwg
assistants cv dmu hs jdk nls oss QOpatch root.sh slax ucp xag
bin dbjava env.ora install jlib OPatch oui qos root.sh.old sqlpatch usm xdk
cha dbs evm instantclient ldap opmn owm racg root.sh.old.1 sqlplus utl
clone deinstall gpnp inventory lib oracore perl rdbms rootupgrade.sh srvm welcome.html
crs demo gridSetup.sh javavm md ord plsql relnotes runcluvfy.sh suptools wlm
[grid@rac1 grid]$ ./gridSetup.sh
[root@rac1 software]# /u01/app/oraInventory/orainstRoot.sh
Changing permissions of /u01/app/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.
Changing groupname of /u01/app/oraInventory to oinstall.
The execution of the script is complete.
[root@rac1 software]# /u01/app/19.3.0/grid/root.sh
Performing root user operation.
The following environment variables are set as:
ORACLE_OWNER= grid
ORACLE_HOME= /u01/app/19.3.0/grid
Enter the full pathname of the local bin directory: [/usr/local/bin]:
Copying dbhome to /usr/local/bin ...
Copying oraenv to /usr/local/bin ...
Copying coraenv to /usr/local/bin ...
Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Relinking oracle with rac_on option
Using configuration parameter file: /u01/app/19.3.0/grid/crs/install/crsconfig_params
The log of current session can be found at:
/u01/app/grid/crsdata/rac1/crsconfig/rootcrs_rac1_2020-03-12_12-30-17AM.log
2020/03/12 12:30:39 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.
2020/03/12 12:30:39 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.
2020/03/12 12:30:39 CLSRSC-363: User ignored prerequisites during installation
2020/03/12 12:30:40 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.
2020/03/12 12:30:44 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.
2020/03/12 12:30:46 CLSRSC-594: Executing installation step 5 of 19: 'SetupOSD'.
2020/03/12 12:30:46 CLSRSC-594: Executing installation step 6 of 19: 'CheckCRSConfig'.
2020/03/12 12:30:47 CLSRSC-594: Executing installation step 7 of 19: 'SetupLocalGPNP'.
2020/03/12 12:31:18 CLSRSC-594: Executing installation step 8 of 19: 'CreateRootCert'.
2020/03/12 12:31:22 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.
2020/03/12 12:31:29 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.
2020/03/12 12:31:54 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.
2020/03/12 12:31:55 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.
2020/03/12 12:32:07 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.
2020/03/12 12:32:08 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'
2020/03/12 12:33:51 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.
2020/03/12 12:35:14 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.
2020/03/12 12:36:52 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.
2020/03/12 12:37:04 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.
[INFO] [DBT-30161] Disk label(s) created successfully. Check /u01/app/grid/cfgtoollogs/asmca/asmca-200312PM123748.log for details.
2020/03/12 12:39:11 CLSRSC-482: Running command: '/u01/app/19.3.0/grid/bin/ocrconfig -upgrade grid oinstall'
CRS-4256: Updating the profile
Successful addition of voting disk 467a1e181c974f82bf00ec778ad412af.
Successful addition of voting disk 28bb6785cb9d4fc6bf987da0e9b37d62.
Successful addition of voting disk 91c620923fad4fefbf440fc313f4f7cb.
Successfully replaced voting disk group with +OCR.
CRS-4256: Updating the profile
CRS-4266: Voting file(s) successfully replaced
## STATE File Universal Id File Name Disk group
-- ----- ----------------- --------- ---------
1. ONLINE 467a1e181c974f82bf00ec778ad412af (AFD:OCR1) [OCR]
2. ONLINE 28bb6785cb9d4fc6bf987da0e9b37d62 (AFD:OCR2) [OCR]
3. ONLINE 91c620923fad4fefbf440fc313f4f7cb (AFD:OCR3) [OCR]
Located 3 voting disk(s).
2020/03/12 12:41:42 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.
2020/03/12 12:43:13 CLSRSC-343: Successfully started Oracle Clusterware stack
2020/03/12 12:43:13 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.
2020/03/12 12:46:12 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.
[INFO] [DBT-30161] Disk label(s) created successfully. Check /u01/app/grid/cfgtoollogs/asmca/asmca-200312PM124619.log for details.
[INFO] [DBT-30001] Disk groups created successfully. Check /u01/app/grid/cfgtoollogs/asmca/asmca-200312PM124619.log for details.
2020/03/12 12:49:24 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded
[root@rac1 software]#
节点2报如下错误:
2020/03/12 12:57:06 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.
2020/03/12 12:57:32 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.
2020/03/12 12:58:06 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded
Error 4 opening dom ASM/Self in 0x63f6230
Domain name to open is ASM/Self
Error 4 opening dom ASM/Self in 0x63f6230
[root@rac2 software]#
忽略继续。
2.4 验证集群是否正常
[grid@rac1 ~]$ crsctl stat res -t
--------------------------------------------------------------------------------
Name Target State Server State details
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.LISTENER.lsnr
ONLINE ONLINE rac1 STABLE
ONLINE ONLINE rac2 STABLE
ora.MGMT.GHCHKPT.advm
OFFLINE OFFLINE rac1 STABLE
OFFLINE OFFLINE rac2 STABLE
ora.chad
ONLINE ONLINE rac1 STABLE
ONLINE ONLINE rac2 STABLE
ora.helper
OFFLINE OFFLINE rac1 STABLE
OFFLINE OFFLINE rac2 IDLE,STABLE
ora.mgmt.ghchkpt.acfs
OFFLINE OFFLINE rac1 volume /opt/oracle/r
hp_images/chkbase is
unmounted,STABLE
OFFLINE OFFLINE rac2 STABLE
ora.net1.network
ONLINE ONLINE rac1 STABLE
ONLINE ONLINE rac2 STABLE
ora.ons
ONLINE ONLINE rac1 STABLE
ONLINE ONLINE rac2 STABLE
ora.proxy_advm
OFFLINE OFFLINE rac1 STABLE
OFFLINE OFFLINE rac2 STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)
1 ONLINE ONLINE rac1 STABLE
2 ONLINE ONLINE rac2 STABLE
3 OFFLINE OFFLINE STABLE
ora.LISTENER_SCAN1.lsnr
1 ONLINE ONLINE rac1 STABLE
ora.MGMT.dg(ora.asmgroup)
1 ONLINE ONLINE rac1 STABLE
2 ONLINE ONLINE rac2 STABLE
3 OFFLINE OFFLINE STABLE
ora.MGMTLSNR
1 ONLINE ONLINE rac1 169.254.1.162 192.16
8.222.180,STABLE
ora.OCR.dg(ora.asmgroup)
1 ONLINE ONLINE rac1 STABLE
2 ONLINE ONLINE rac2 STABLE
3 OFFLINE OFFLINE STABLE
ora.asm(ora.asmgroup)
1 ONLINE ONLINE rac1 Started,STABLE
2 ONLINE ONLINE rac2 Started,STABLE
3 OFFLINE OFFLINE STABLE
ora.asmnet1.asmnetwork(ora.asmgroup)
1 ONLINE ONLINE rac1 STABLE
2 ONLINE ONLINE rac2 STABLE
3 OFFLINE OFFLINE STABLE
ora.cvu
1 ONLINE ONLINE rac1 STABLE
ora.mgmtdb
1 ONLINE ONLINE rac1 Open,STABLE
ora.qosmserver
1 ONLINE ONLINE rac1 STABLE
ora.rac1.vip
1 ONLINE ONLINE rac1 STABLE
ora.rac2.vip
1 ONLINE ONLINE rac2 STABLE
ora.rhpserver
1 OFFLINE OFFLINE STABLE
ora.scan1.vip
1 ONLINE ONLINE rac1 STABLE
--------------------------------------------------------------------------------
[grid@rac1 ~]$
3 ASMCA创建磁盘组
用grid用户执行asmca命令,创建DATA磁盘组用于存储数据文件。
4 安装DB
与grid操作相同,用oracle 用户解压缩到ORACLE_HOME。 该操作只需要在节点1上完成解压缩即可。
[oracle@rac1 software]$ ll
total 8890692
-rw-r--r--. 1 root root 232774 Mar 12 08:36 compat-libstdc++-33-3.2.3-61.x86_64.rpm
-rw-r--r--. 1 root root 2160976478 Mar 12 08:43 p30501910_190000_Linux-x86-64-GI.zip
-rw-r--r--. 1 root root 993959385 Mar 12 08:38 p30557433_190000_Linux-x86-64--DB.zip
-rw-r--r--. 1 oracle oinstall 3059705302 Mar 11 21:49 V982063-01-db4linux.zip
-rw-r--r--. 1 root root 2889184573 Mar 11 22:19 V982068-01-gi4linux.zip
[oracle@rac1 software]$ unzip -d /u01/app/oracle/product/19.3.0/db_1 V982063-01-db4linux.zip
安装Oracle DB软件
[oracle@rac1 db_1]$ pwd
/u01/app/oracle/product/19.3.0/db_1
[oracle@rac1 db_1]$ export DISPLAY=192.168.30.108:0.0
[oracle@rac1 db_1]$ ./runInstaller
Oracle 18c 中 SE2 和 EE 版本功能的区别
https://www.cndba.cn/cndba/dave/article/2980
[root@rac1 rpm]# /u01/app/oracle/product/19.3.0/db_1/root.sh
Performing root user operation.
The following environment variables are set as:
ORACLE_OWNER= oracle
ORACLE_HOME= /u01/app/oracle/product/19.3.0/db_1
Enter the full pathname of the local bin directory: [/usr/local/bin]:
The contents of "dbhome" have not changed. No need to overwrite.
The contents of "oraenv" have not changed. No need to overwrite.
The contents of "coraenv" have not changed. No need to overwrite.
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
[root@rac1 rpm]#
5 DBCA创建数据库
用oracle 用户执行dbca命令,创建实例。
6 验证
6.1 查看RAC集群状态
[grid@rac1 ~]$ crsctl stat res -t
--------------------------------------------------------------------------------
Name Target State Server State details
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.LISTENER.lsnr
ONLINE ONLINE rac1 STABLE
ONLINE ONLINE rac2 STABLE
ora.MGMT.GHCHKPT.advm
OFFLINE OFFLINE rac1 STABLE
OFFLINE OFFLINE rac2 STABLE
ora.chad
ONLINE ONLINE rac1 STABLE
ONLINE ONLINE rac2 STABLE
ora.helper
OFFLINE OFFLINE rac1 STABLE
OFFLINE OFFLINE rac2 IDLE,STABLE
ora.mgmt.ghchkpt.acfs
OFFLINE OFFLINE rac1 volume /opt/oracle/r
hp_images/chkbase is
unmounted,STABLE
OFFLINE OFFLINE rac2 STABLE
ora.net1.network
ONLINE ONLINE rac1 STABLE
ONLINE ONLINE rac2 STABLE
ora.ons
ONLINE ONLINE rac1 STABLE
ONLINE ONLINE rac2 STABLE
ora.proxy_advm
OFFLINE OFFLINE rac1 STABLE
OFFLINE OFFLINE rac2 STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)
1 ONLINE ONLINE rac1 STABLE
2 ONLINE ONLINE rac2 STABLE
3 OFFLINE OFFLINE STABLE
ora.DATA.dg(ora.asmgroup)
1 ONLINE ONLINE rac1 STABLE
2 ONLINE ONLINE rac2 STABLE
3 ONLINE OFFLINE STABLE
ora.LISTENER_SCAN1.lsnr
1 ONLINE ONLINE rac1 STABLE
ora.MGMT.dg(ora.asmgroup)
1 ONLINE ONLINE rac1 STABLE
2 ONLINE ONLINE rac2 STABLE
3 OFFLINE OFFLINE STABLE
ora.MGMTLSNR
1 ONLINE ONLINE rac1 169.254.1.162 192.16
8.222.180,STABLE
ora.OCR.dg(ora.asmgroup)
1 ONLINE ONLINE rac1 STABLE
2 ONLINE ONLINE rac2 STABLE
3 OFFLINE OFFLINE STABLE
ora.asm(ora.asmgroup)
1 ONLINE ONLINE rac1 Started,STABLE
2 ONLINE ONLINE rac2 Started,STABLE
3 OFFLINE OFFLINE STABLE
ora.asmnet1.asmnetwork(ora.asmgroup)
1 ONLINE ONLINE rac1 STABLE
2 ONLINE ONLINE rac2 STABLE
3 OFFLINE OFFLINE STABLE
ora.cndba.db
1 ONLINE ONLINE rac1 Open,HOME=/u01/app/o
racle/product/19.3.0
/db_1,STABLE
2 ONLINE ONLINE rac2 Open,HOME=/u01/app/o
racle/product/19.3.0
/db_1,STABLE
ora.cvu
1 ONLINE ONLINE rac1 STABLE
ora.mgmtdb
1 ONLINE ONLINE rac1 Open,STABLE
ora.qosmserver
1 ONLINE ONLINE rac1 STABLE
ora.rac1.vip
1 ONLINE ONLINE rac1 STABLE
ora.rac2.vip
1 ONLINE ONLINE rac2 STABLE
ora.rhpserver
1 OFFLINE OFFLINE STABLE
ora.scan1.vip
1 ONLINE ONLINE rac1 STABLE
--------------------------------------------------------------------------------
[grid@rac1 ~]$
6.2 查看数据库状态及版本
[oracle@rac1 db_1]$ sqlplus / as sysdba
SQL*Plus: Release 19.0.0.0.0 - Production on Thu Mar 12 16:54:28 2020
Version 19.3.0.0.0
Copyright (c) 1982, 2019, Oracle. All rights reserved.
Connected to:
Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production
Version 19.3.0.0.0
SQL> show pdbs
CON_ID CON_NAME OPEN MODE RESTRICTED
---------- ------------------------------ ---------- ----------
2 PDB$SEED READ ONLY NO
3 DAVE READ WRITE NO
SQL>
版权声明:本文为博主原创文章,未经博主允许不得转载。