1. 各节点创建需要的用户和组
groupadd -g 54321 oinstall
groupadd -g 54322 dba
groupadd -g 54323 oper
groupadd -g 54324 backupdba
groupadd -g 54325 dgdba
groupadd -g 54326 kmdba
groupadd -g 54327 asmdba
groupadd -g 54328 asmoper
groupadd -g 54329 asmadmin
groupadd -g 54330 racdba
useradd -u 54321 -g oinstall -G dba,asmdba,backupdba,dgdba,kmdba,racdba,oper oracle
useradd -u 54322 -g oinstall -G asmadmin,asmdba,asmoper,dba grid
echo oracle | passwd --stdin oracle
echo oracle | passwd --stdin grid
2. 共享存储规划配置
2.1 多路径绑定确认
节点1
[root@oracl-clinet-mn-1 ~]# multipath -F
[root@oracl-clinet-mn-1 ~]# multipath -v2
create: lun01 (36005853015761a7a390dc91d00000000) undef XSKY ,R_XEBS
size=1000G features='1 queue_if_no_path' hwhandler='0' wp=undef
`-+- policy='round-robin 0' prio=1 status=undef
|- 10:0:0:0 sda 8:0 undef ready running
`- 11:0:0:0 sdc 8:32 undef ready running
create: lun02 (3600585301c979275ba55ae5700000000) undef XSKY ,R_XEBS
size=10G features='1 queue_if_no_path' hwhandler='0' wp=undef
`-+- policy='round-robin 0' prio=1 status=undef
|- 10:0:0:7 sdb 8:16 undef ready running
`- 11:0:0:7 sdd 8:48 undef ready running
[root@oracl-clinet-mn-1 ~]#
节点2
[root@oracl-clinet-mn-2 ~]# multipath -v2
create: lun01 (36005853015761a7a390dc91d00000000) undef XSKY ,R_XEBS
size=1000G features='1 queue_if_no_path' hwhandler='0' wp=undef
`-+- policy='round-robin 0' prio=1 status=undef
|- 3:0:0:0 sda 8:0 undef ready running
`- 4:0:0:0 sdc 8:32 undef ready running
create: lun02 (3600585301c979275ba55ae5700000000) undef XSKY ,R_XEBS
size=10G features='1 queue_if_no_path' hwhandler='0' wp=undef
`-+- policy='round-robin 0' prio=1 status=undef
|- 3:0:0:7 sdb 8:16 undef ready running
`- 4:0:0:7 sdd 8:48 undef ready running
[root@oracl-clinet-mn-2 ~]#
2.2 udev + multipath 的最简配置
节点1
[root@oracl-clinet-mn-1 ~]# vi /etc/udev/rules.d/12-dm-permissions.rules
ENV{DM_UUID}=="mpath-?*", OWNER:="grid", GROUP:="asmadmin", MODE:="660"
[root@oracl-clinet-mn-1 ~]# udevadm control --reload
[root@oracl-clinet-mn-1 ~]# udevadm trigger
节点2
[root@oracl-clinet-mn-2 ~]# vi /etc/udev/rules.d/12-dm-permissions.rules
ENV{DM_UUID}=="mpath-?*", OWNER:="grid", GROUP:="asmadmin", MODE:="660"
[root@oracl-clinet-mn-1 ~]# udevadm control --reload
[root@oracl-clinet-mn-1 ~]# udevadm trigger
3 各节点时间系统校对
#关闭chrony服务,移除chrony配置文件(后续使用ctss)
[root@oracl-clinet-mn-1 ~]# systemctl list-unit-files|grep chronyd
chronyd.service enabled
[root@oracl-clinet-mn-1 ~]# systemctl status chronyd
● chronyd.service - NTP client/server
Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; vendor preset: enabled)
Active: active (running) since Tue 2019-10-29 03:09:39 EDT; 2 weeks 0 days ago
Docs: man:chronyd(8)
man:chrony.conf(5)
Process: 688 ExecStartPost=/usr/libexec/chrony-helper update-daemon (code=exited, status=0/SUCCESS)
Process: 664 ExecStart=/usr/sbin/chronyd $OPTIONS (code=exited, status=0/SUCCESS)
Main PID: 673 (chronyd)
CGroup: /system.slice/chronyd.service
└─673 /usr/sbin/chronyd
Oct 29 03:09:39 oracl-clinet-mn-1 systemd[1]: Starting NTP client/server...
Oct 29 03:09:39 oracl-clinet-mn-1 chronyd[673]: chronyd version 3.1 starting (+CMDMON +NTP +REFCLOCK +RTC +PRIVDROP +SCFILTER +SECHASH +SIGND +ASYNCDNS +IPV6 +DEBUG)
Oct 29 03:09:39 oracl-clinet-mn-1 chronyd[673]: Frequency -10.425 +/- 0.040 ppm read from /var/lib/chrony/drift
Oct 29 03:09:39 oracl-clinet-mn-1 systemd[1]: Started NTP client/server.
[root@oracl-clinet-mn-1 ~]# systemctl disable chronyd
Removed symlink /etc/systemd/system/multi-user.target.wants/chronyd.service.
[root@oracl-clinet-mn-1 ~]# systemctl stop chronyd
[root@oracl-clinet-mn-1 ~]# rm -rf /etc/ntp.conf
[root@oracl-clinet-mn-1 ~]# rm -rf /etc/chrony.conf
[root@oracl-clinet-mn-1 ~]# rm -rf /var/run/chronyd.pid
4 各节点关闭防火墙和SELinux
4.1 各节点关闭防火墙
[root@oracl-clinet-mn-1 ~]# systemctl list-unit-files|grep firewalld
firewalld.service disabled
[root@oracl-clinet-mn-1 ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
Active: inactive (dead)
Docs: man:firewalld(1)
[root@oracl-clinet-mn-1 ~]# systemctl disable firewalld
[root@oracl-clinet-mn-1 ~]# systemctl stop firewalld
[root@oracl-clinet-mn-1 ~]#
4.2 各节点关闭SELinux
[root@oracl-clinet-mn-1 ~]# getenforce
Enforcing
[root@oracl-clinet-mn-1 ~]# sed -i '/^SELINUX=.*/ s//SELINUX=disabled/' /etc/selinux/config
setenforce 0
5 各节点检查系统依赖包安装情况
5.1 各节点添加Oracle Yum源
[root@oracl-clinet-mn-1 ~]# cd /etc/yum.repos.d/
[root@oracl-clinet-mn-1 yum.repos.d]# wget http://public-yum.oracle.com/public-yum-ol7.repo
--2019-11-12 20:31:56-- http://public-yum.oracle.com/public-yum-ol7.repo
Connecting to 172.16.100.8:3128... connected.
Proxy request sent, awaiting response... 200 OK
Length: 16402 (16K) [text/plain]
Saving to: ‘public-yum-ol7.repo’
100%[==========================================================================================================================================================================================================>] 16,402 --.-K/s in 0s
2019-11-12 20:32:11 (270 MB/s) - ‘public-yum-ol7.repo’ saved [16402/16402]
[root@oracl-clinet-mn-2 ~]# wget https://yum.oracle.com/RPM-GPG-KEY-oracle-ol7 -O /etc/pki/rpm-gpg/RPM-GPG-KEY-oracle
--2019-11-12 20:50:35-- https://yum.oracle.com/RPM-GPG-KEY-oracle-ol7
Resolving yum.oracle.com (yum.oracle.com)... 23.34.140.198
Connecting to yum.oracle.com (yum.oracle.com)|23.34.140.198|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 1011 [text/plain]
Saving to: ‘/etc/pki/rpm-gpg/RPM-GPG-KEY-oracle’
100%[==========================================================================================================================================================================================================>] 1,011 --.-K/s in 0s
2019-11-12 20:50:37 (254 MB/s) - ‘/etc/pki/rpm-gpg/RPM-GPG-KEY-oracle’ saved [1011/1011]
5.2 各节点安装依赖检查包
[root@oracl-clinet-mn-2 ~]# yum install -y oracle-database-preinstall-19c-1.0-1.el7.x86_64
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
* base: mirrors.tuna.tsinghua.edu.cn
* extras: mirrors.tuna.tsinghua.edu.cn
* updates: mirrors.tuna.tsinghua.edu.cn
Resolving Dependencies
--> Running transaction check
---> Package oracle-database-preinstall-19c.x86_64 0:1.0-1.el7 will be installed
--> Processing Dependency: compat-libcap1 for package: oracle-database-preinstall-19c-1.0-1.el7.x86_64
--> Processing Dependency: ksh for package: oracle-database-preinstall-19c-1.0-1.el7.x86_64
--> Processing Dependency: libaio-devel for package: oracle-database-preinstall-19c-1.0-1.el7.x86_64
--> Processing Dependency: compat-libstdc++-33 for package: oracle-database-preinstall-19c-1.0-1.el7.x86_64
--> Running transaction check
---> Package compat-libcap1.x86_64 0:1.10-7.el7 will be installed
---> Package compat-libstdc++-33.x86_64 0:3.2.3-72.el7 will be installed
---> Package ksh.x86_64 0:20120801-139.0.1.el7 will be installed
---> Package libaio-devel.x86_64 0:0.3.109-13.el7 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
====================================================================================================================================================================================================================================================
Package Arch Version Repository Size
====================================================================================================================================================================================================================================================
Installing:
oracle-database-preinstall-19c x86_64 1.0-1.el7 ol7_latest 18 k
Installing for dependencies:
compat-libcap1 x86_64 1.10-7.el7 base 19 k
compat-libstdc++-33 x86_64 3.2.3-72.el7 base 191 k
ksh x86_64 20120801-139.0.1.el7 ol7_latest 883 k
libaio-devel x86_64 0.3.109-13.el7 base 13 k
Transaction Summary
====================================================================================================================================================================================================================================================
Install 1 Package (+4 Dependent packages)
Total size: 1.1 M
Installed size: 4.0 M
Downloading packages:
warning: /var/cache/yum/x86_64/7/ol7_latest/packages/ksh-20120801-139.0.1.el7.x86_64.rpm: Header V3 RSA/SHA256 Signature, key ID ec551f03: NOKEY
Retrieving key from file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle
Importing GPG key 0xEC551F03:
Userid : "Oracle OSS group (Open Source Software group) <build@oss.oracle.com>"
Fingerprint: 4214 4123 fecf c55b 9086 313d 72f9 7b74 ec55 1f03
From : /etc/pki/rpm-gpg/RPM-GPG-KEY-oracle
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : compat-libstdc++-33-3.2.3-72.el7.x86_64 1/5
Installing : compat-libcap1-1.10-7.el7.x86_64 2/5
Installing : ksh-20120801-139.0.1.el7.x86_64 3/5
Installing : libaio-devel-0.3.109-13.el7.x86_64 4/5
Installing : oracle-database-preinstall-19c-1.0-1.el7.x86_64 5/5
Verifying : libaio-devel-0.3.109-13.el7.x86_64 1/5
Verifying : ksh-20120801-139.0.1.el7.x86_64 2/5
Verifying : compat-libcap1-1.10-7.el7.x86_64 3/5
Verifying : oracle-database-preinstall-19c-1.0-1.el7.x86_64 4/5
Verifying : compat-libstdc++-33-3.2.3-72.el7.x86_64 5/5
Installed:
oracle-database-preinstall-19c.x86_64 0:1.0-1.el7
Dependency Installed:
compat-libcap1.x86_64 0:1.10-7.el7 compat-libstdc++-33.x86_64 0:3.2.3-72.el7 ksh.x86_64 0:20120801-139.0.1.el7 libaio-devel.x86_64 0:0.3.109-13.el7
Complete!
[root@oracl-clinet-mn-2 ~]#
6 查看修改主机名
6.1 查看主机名
节点1
[root@oracl-clinet-mn-1 yum.repos.d]# hostnamectl status
Static hostname: oracl-clinet-mn-1
Icon name: computer-vm
Chassis: vm
Machine ID: a01713ca0a5440b7bf2b8122ef5de9f5
Boot ID: bc3ae1c6ba404338a04be2217ef1b009
Virtualization: kvm
Operating System: CentOS Linux 7 (Core)
CPE OS Name: cpe:/o:centos:centos:7
Kernel: Linux 3.10.0-693.el7.x86_64
Architecture: x86-64
节点2
[root@oracl-clinet-mn-2 ~]# hostnamectl status
Static hostname: oracl-clinet-mn-2
Icon name: computer-vm
Chassis: vm
Machine ID: a01713ca0a5440b7bf2b8122ef5de9f5
Boot ID: ede66f99137c4c8fbfae13d7e0740d72
Virtualization: kvm
Operating System: CentOS Linux 7 (Core)
CPE OS Name: cpe:/o:centos:centos:7
Kernel: Linux 3.10.0-693.el7.x86_64
Architecture: x86-64
6.2 修改主机名
节点1
[root@oracl-clinet-mn-1 ~]# hostnamectl set-hostname dbnode1
[root@oracl-clinet-mn-1 ~]# hostnamectl status
Static hostname: dbnode1
Icon name: computer-vm
Chassis: vm
Machine ID: a01713ca0a5440b7bf2b8122ef5de9f5
Boot ID: bc3ae1c6ba404338a04be2217ef1b009
Virtualization: kvm
Operating System: CentOS Linux 7 (Core)
CPE OS Name: cpe:/o:centos:centos:7
Kernel: Linux 3.10.0-693.el7.x86_64
Architecture: x86-64
节点2
[root@oracl-clinet-mn-2 ~]# hostnamectl set-hostname dbnode2
[root@oracl-clinet-mn-2 ~]# hostnamectl status
Static hostname: dbnode2
Icon name: computer-vm
Chassis: vm
Machine ID: a01713ca0a5440b7bf2b8122ef5de9f5
Boot ID: ede66f99137c4c8fbfae13d7e0740d72
Virtualization: kvm
Operating System: CentOS Linux 7 (Core)
CPE OS Name: cpe:/o:centos:centos:7
Kernel: Linux 3.10.0-693.el7.x86_64
Architecture: x86-64
[root@oracl-clinet-mn-2 ~]#
7 各节点配置/etc/hosts
#public ip
12.1.1.113 dbnode1
12.1.1.115 dbnode2
#virtual ip
12.1.1.116 dbnode1-vip
12.1.1.117 dbnode2-vip
#scan ip
12.1.1.114 db19c-scan
#private ip
172.16.100.16 dbnode1-priv
172.16.100.8 dbnode2-priv
8 各节点创建安装目录
mkdir -p /u01/app/19.3.0/grid
mkdir -p /u01/app/grid
mkdir -p /u01/app/oracle
chown -R grid:oinstall /u01
chown oracle:oinstall /u01/app/oracle
chmod -R 775 /u01/
9 修改各节点系统配置参数
9.1 修改sysctl的参数
在/etc/sysctl.d目录下,增加文本文件,在文件中写入相应的参数及参数值,可以分模块的修改系统配置参数。
在安装oracle-database-preinstall-19c-1.0-1.el7.x86_64依赖包的时候,已经创建了一些必要的文件以修改系统参数
[root@oracl-clinet-mn-2 sysctl.d]# pwd
/etc/sysctl.d
[root@oracl-clinet-mn-2 sysctl.d]# ls
99-initial-sysctl.conf 99-oracle-database-preinstall-19c-sysctl.conf 99-sysctl.conf
在各节点上配置vm相关的参数
[root@oracl-clinet-mn-2 sysctl.d]# vi 99-vm.conf
vm.swappiness = 1
vm.dirty_background_ratio = 3
vm.dirty_ratio = 80
vm.dirty_expire_centisecs = 500
vm.dirty_writeback_centisecs = 100
[root@oracl-clinet-mn-2 sysctl.d]# sysctl -p 99-vm.conf
在各节点确认其他Oracle相关的配置参数
[root@oracl-clinet-mn-1 etc]# grep -v ^# sysctl.conf | grep -v ^$
fs.file-max = 6815744
kernel.sem = 250 32000 100 128
kernel.shmmni = 4096
kernel.shmall = 1073741824
kernel.shmmax = 4398046511104
kernel.panic_on_oops = 1
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048576
net.ipv4.conf.all.rp_filter = 2
net.ipv4.conf.default.rp_filter = 2
fs.aio-max-nr = 1048576
net.ipv4.ip_local_port_range = 9000 65500
[root@oracl-clinet-mn-1 etc]#
在各节点上配置私网网卡的rp_filter的值为2
[root@dbnode2 ~]# vi /etc/sysctl.conf
net.ipv4.conf.eth1.rp_filter = 2
[root@dbnode2 ~]# sysctl -p
fs.file-max = 6815744
kernel.sem = 250 32000 100 128
kernel.shmmni = 4096
kernel.shmall = 1073741824
kernel.shmmax = 4398046511104
kernel.panic_on_oops = 1
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048576
net.ipv4.conf.all.rp_filter = 2
net.ipv4.conf.default.rp_filter = 2
fs.aio-max-nr = 1048576
net.ipv4.ip_local_port_range = 9000 65500
net.ipv4.conf.eth1.rp_filter = 2
9.2 修改limits参数
确认各节点安装了oracle-database-preinstall-19c-1.0-1.el7.x86_64依赖包后的配置
[root@oracl-clinet-mn-1 limits.d]# grep -v ^# oracle-database-preinstall-19c.conf | grep -v ^$
oracle soft nofile 1024
oracle hard nofile 65536
oracle soft nproc 16384
oracle hard nproc 16384
oracle soft stack 10240
oracle hard stack 32768
oracle hard memlock 134217728
oracle soft memlock 134217728
[root@oracl-clinet-mn-1 limits.d]#
各节点追加对grid用户的限制
[root@oracl-clinet-mn-1 limits.d]# vi oracle-database-preinstall-19c.conf
# added by mike for limit grid
grid soft nproc 16384
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536
grid soft stack 10240
grid hard stack 32768
各节点追加控制limit的脚本
vi /etc/profile.d/oracle-grid.sh
#Setting the appropriate ulimits for oracle and grid user
if [ $USER = "oracle" ]; then
if [ $SHELL = "/bin/ksh" ]; then
ulimit -u 16384
ulimit -n 65536
else
ulimit -u 16384 -n 65536
fi
fi
if [ $USER = "grid" ]; then
if [ $SHELL = "/bin/ksh" ]; then
ulimit -u 16384
ulimit -n 65536
else
ulimit -u 16384 -n 65536
fi
fi
10 各节点设置用户的环境变量
dbnode1的gird用户
[grid@dbnode1 ~]$ vi .bash_profile
# export PATH
export ORACLE_SID=+ASM1;
export ORACLE_BASE=/u01/app/grid;
export ORACLE_HOME=/u01/app/19.3.0/grid;
export PATH=$ORACLE_HOME/bin:$PATH;
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib;
dbnode2的gird用户
[grid@dbnode2 ~]$ vi .bash_profile
#export PATH
export ORACLE_SID=+ASM2;
export ORACLE_BASE=/u01/app/grid;
export ORACLE_HOME=/u01/app/19.3.0/grid;
export PATH=$ORACLE_HOME/bin:$PATH;
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib;
dbnode1的oracle用户
[oracle@dbnode1 ~]$ vi .bash_profile
#export PATH
export ORACLE_SID=mndb1;
export ORACLE_BASE=/u01/app/oracle;
export ORACLE_HOME=/u01/app/oracle/product/19.3.0/db_1;
export PATH=$ORACLE_HOME/bin:$PATH;
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib;
dbnode2的oracle用户
[oracle@dbnode2 ~]$ vi .bash_profile
#export PATH
export ORACLE_SID=mndb2;
export ORACLE_BASE=/u01/app/oracle;
export ORACLE_HOME=/u01/app/oracle/product/19.3.0/db_1;
export PATH=$ORACLE_HOME/bin:$PATH;
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib;
11 每节点安装xorg
[root@dbnode2 Oracle19c]# yum whatprovides "*/xhost"
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
* base: mirrors.tuna.tsinghua.edu.cn
* extras: mirrors.tuna.tsinghua.edu.cn
* updates: mirrors.tuna.tsinghua.edu.cn
ol7_UEKR5/x86_64/filelists_db | 1.8 MB 00:02:26
ol7_latest/x86_64/filelists_db | 16 MB 00:37:40
xorg-x11-server-utils-7.7-20.el7.x86_64 : X.Org X11 X server utilities
Repo : base
Matched from:
Filename : /usr/bin/xhost
xorg-x11-server-utils-7.7-20.el7.x86_64 : X.Org X11 X server utilities
Repo : ol7_latest
Matched from:
Filename : /usr/bin/xhost
xorg-x11-server-utils-7.7-20.el7.x86_64 : X.Org X11 X server utilities
Repo : @base
Matched from:
Filename : /usr/bin/xhost
[root@dbnode1 rules.d]# yum install -y xorg-x11-server-utils-7.7-20.el7.x86_64
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
* base: ftp.sjtu.edu.cn
* extras: ap.stykers.moe
* updates: ap.stykers.moe
Resolving Dependencies
--> Running transaction check
---> Package xorg-x11-server-utils.x86_64 0:7.7-20.el7 will be installed
--> Processing Dependency: libXcursor.so.1()(64bit) for package: xorg-x11-server-utils-7.7-20.el7.x86_64
--> Running transaction check
---> Package libXcursor.x86_64 0:1.1.15-1.el7 will be installed
--> Processing Dependency: libXfixes.so.3()(64bit) for package: libXcursor-1.1.15-1.el7.x86_64
--> Running transaction check
---> Package libXfixes.x86_64 0:5.0.3-1.el7 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
=====================================================================================================================================================
Package Arch Version Repository Size
=====================================================================================================================================================
Installing:
xorg-x11-server-utils x86_64 7.7-20.el7 base 178 k
Installing for dependencies:
libXcursor x86_64 1.1.15-1.el7 base 30 k
libXfixes x86_64 5.0.3-1.el7 base 18 k
Transaction Summary
=====================================================================================================================================================
Install 1 Package (+2 Dependent packages)
Total download size: 227 k
Installed size: 494 k
Downloading packages:
(1/3): libXfixes-5.0.3-1.el7.x86_64.rpm | 18 kB 00:00:00
(2/3): libXcursor-1.1.15-1.el7.x86_64.rpm | 30 kB 00:00:00
(3/3): xorg-x11-server-utils-7.7-20.el7.x86_64.rpm | 178 kB 00:00:00
-----------------------------------------------------------------------------------------------------------------------------------------------------
Total 861 kB/s | 227 kB 00:00:00
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : libXfixes-5.0.3-1.el7.x86_64 1/3
Installing : libXcursor-1.1.15-1.el7.x86_64 2/3
Installing : xorg-x11-server-utils-7.7-20.el7.x86_64 3/3
Verifying : libXcursor-1.1.15-1.el7.x86_64 1/3
Verifying : xorg-x11-server-utils-7.7-20.el7.x86_64 2/3
Verifying : libXfixes-5.0.3-1.el7.x86_64 3/3
Installed:
xorg-x11-server-utils.x86_64 0:7.7-20.el7
Dependency Installed:
libXcursor.x86_64 0:1.1.15-1.el7 libXfixes.x86_64 0:5.0.3-1.el7
Complete!
12安装Grid
12.1 解压Grid软件
[grid@dbnode1 grid]$ pwd
/u01/app/19.3.0/grid
[grid@dbnode1 grid]$ unzip /tmp/LINUX.X64_193000_grid_home.zip
12.2 登陆VNC, 设定DISPLAY
dbnode2
[root@dbnode2 ~]# xhost +
access control disabled, clients can connect from any host
dbnode1
[root@dbnode1 ~]# su - grid
Last login: Tue Nov 19 09:03:48 CST 2019 on pts/1
[grid@dbnode1 ~]$ export DISPLAY=12.1.1.115:1
12.3 运行gridSetup配置GI
dbnode1
[grid@dbnode1 ~]$ cd /u01/app/19.3.0/grid/
[grid@dbnode1 grid]$ ./gridSetup.sh
12.4 图形化安装过程
12.4.1 图形化安装截图
12.4.2 执行脚本
dbnode1上以root用户执行脚本
云环境下无法使用HAIP,所以要在执行root脚本之前,设置HAIP_UNSUPPORTED变量,以禁用该功能
export HAIP_UNSUPPORTED=YES
[root@dbnode1 ~]# /u01/app/oraInventory/orainstRoot.sh
Changing permissions of /u01/app/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.
Changing groupname of /u01/app/oraInventory to oinstall.
The execution of the script is complete.
[root@dbnode1 ~]# /u01/app/19.3.0/grid/root.sh
Performing root user operation.
The following environment variables are set as:
ORACLE_OWNER= grid
ORACLE_HOME= /u01/app/19.3.0/grid
Enter the full pathname of the local bin directory: [/usr/local/bin]:
Copying dbhome to /usr/local/bin ...
Copying oraenv to /usr/local/bin ...
Copying coraenv to /usr/local/bin ...
Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Relinking oracle with rac_on option
Using configuration parameter file: /u01/app/19.3.0/grid/crs/install/crsconfig_params
The log of current session can be found at:
/u01/app/grid/crsdata/dbnode1/crsconfig/rootcrs_dbnode1_2019-11-19_10-39-45AM.log
2019/11/19 10:40:01 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.
2019/11/19 10:40:01 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.
2019/11/19 10:40:02 CLSRSC-363: User ignored prerequisites during installation
2019/11/19 10:40:02 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.
2019/11/19 10:40:04 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.
2019/11/19 10:40:05 CLSRSC-594: Executing installation step 5 of 19: 'SetupOSD'.
2019/11/19 10:40:05 CLSRSC-594: Executing installation step 6 of 19: 'CheckCRSConfig'.
2019/11/19 10:40:06 CLSRSC-594: Executing installation step 7 of 19: 'SetupLocalGPNP'.
2019/11/19 10:40:26 CLSRSC-594: Executing installation step 8 of 19: 'CreateRootCert'.
2019/11/19 10:40:29 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.
2019/11/19 10:40:32 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.
2019/11/19 10:40:47 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.
2019/11/19 10:40:47 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.
2019/11/19 10:40:55 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.
2019/11/19 10:40:56 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'
2019/11/19 10:41:23 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.
2019/11/19 10:41:31 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.
2019/11/19 10:41:40 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.
2019/11/19 10:41:48 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.
ASM has been created and started successfully.
[DBT-30001] Disk groups created successfully. Check /u01/app/grid/cfgtoollogs/asmca/asmca-191119AM104220.log for details.
2019/11/19 10:43:13 CLSRSC-482: Running command: '/u01/app/19.3.0/grid/bin/ocrconfig -upgrade grid oinstall'
CRS-4256: Updating the profile
Successful addition of voting disk 910bcd96a7394ff2bf138ec2823a3539.
Successfully replaced voting disk group with +CRS.
CRS-4256: Updating the profile
CRS-4266: Voting file(s) successfully replaced
## STATE File Universal Id File Name Disk group
-- ----- ----------------- --------- ---------
1. ONLINE 910bcd96a7394ff2bf138ec2823a3539 (/dev/mapper/lun02) [CRS]
Located 1 voting disk(s).
2019/11/19 10:44:50 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.
2019/11/19 10:45:50 CLSRSC-343: Successfully started Oracle Clusterware stack
2019/11/19 10:45:50 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.
2019/11/19 10:47:15 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.
2019/11/19 10:47:43 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded
[root@dbnode1 ~]#
dbnode2上以root用户执行脚本
[root@dbnode2 ~]# /u01/app/oraInventory/orainstRoot.sh
Changing permissions of /u01/app/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.
Changing groupname of /u01/app/oraInventory to oinstall.
The execution of the script is complete.
[root@dbnode2 ~]# /u01/app/19.3.0/grid/root.sh
Performing root user operation.
The following environment variables are set as:
ORACLE_OWNER= grid
ORACLE_HOME= /u01/app/19.3.0/grid
Enter the full pathname of the local bin directory: [/usr/local/bin]:
Copying dbhome to /usr/local/bin ...
Copying oraenv to /usr/local/bin ...
Copying coraenv to /usr/local/bin ...
Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Relinking oracle with rac_on option
Using configuration parameter file: /u01/app/19.3.0/grid/crs/install/crsconfig_params
The log of current session can be found at:
/u01/app/grid/crsdata/dbnode2/crsconfig/rootcrs_dbnode2_2019-11-19_10-50-25AM.log
2019/11/19 10:50:35 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.
2019/11/19 10:50:35 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.
2019/11/19 10:50:35 CLSRSC-363: User ignored prerequisites during installation
2019/11/19 10:50:35 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.
2019/11/19 10:50:37 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.
2019/11/19 10:50:37 CLSRSC-594: Executing installation step 5 of 19: 'SetupOSD'.
2019/11/19 10:50:37 CLSRSC-594: Executing installation step 6 of 19: 'CheckCRSConfig'.
2019/11/19 10:50:39 CLSRSC-594: Executing installation step 7 of 19: 'SetupLocalGPNP'.
2019/11/19 10:50:40 CLSRSC-594: Executing installation step 8 of 19: 'CreateRootCert'.
2019/11/19 10:50:40 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.
2019/11/19 10:50:46 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.
2019/11/19 10:50:46 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.
2019/11/19 10:50:47 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.
2019/11/19 10:50:48 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'
2019/11/19 10:51:02 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.
2019/11/19 10:51:08 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.
2019/11/19 10:51:10 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.
2019/11/19 10:51:12 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.
2019/11/19 10:51:14 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.
2019/11/19 10:51:24 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.
2019/11/19 10:52:09 CLSRSC-343: Successfully started Oracle Clusterware stack
2019/11/19 10:52:09 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.
2019/11/19 10:52:32 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.
2019/11/19 10:52:39 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded
[root@dbnode2 ~]#
12.4.3 图形化安装截图(后续)
注:最后这个报错提示,查看日志发现是因为使用了一个scan ip的提示,可以忽略。
至此GI安装配置完成
12.4.4 验证crs的状态
[grid@dbnode2 ~]$ crsctl stat res -t
--------------------------------------------------------------------------------
Name Target State Server State details
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.LISTENER.lsnr
ONLINE ONLINE dbnode1 STABLE
ONLINE ONLINE dbnode2 STABLE
ora.chad
ONLINE ONLINE dbnode1 STABLE
ONLINE ONLINE dbnode2 STABLE
ora.net1.network
ONLINE ONLINE dbnode1 STABLE
ONLINE ONLINE dbnode2 STABLE
ora.ons
ONLINE ONLINE dbnode1 STABLE
ONLINE ONLINE dbnode2 STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)
1 ONLINE ONLINE dbnode1 STABLE
2 ONLINE ONLINE dbnode2 STABLE
3 ONLINE OFFLINE STABLE
ora.CRS.dg(ora.asmgroup)
1 ONLINE ONLINE dbnode1 STABLE
2 ONLINE ONLINE dbnode2 STABLE
3 OFFLINE OFFLINE STABLE
ora.LISTENER_SCAN1.lsnr
1 ONLINE ONLINE dbnode2 STABLE
ora.asm(ora.asmgroup)
1 ONLINE ONLINE dbnode1 Started,STABLE
2 ONLINE ONLINE dbnode2 Started,STABLE
3 ONLINE OFFLINE STABLE
ora.asmnet1.asmnetwork(ora.asmgroup)
1 ONLINE ONLINE dbnode1 STABLE
2 ONLINE ONLINE dbnode2 STABLE
3 OFFLINE OFFLINE STABLE
ora.cvu
1 ONLINE ONLINE dbnode2 STABLE
ora.dbnode1.vip
1 ONLINE ONLINE dbnode1 STABLE
ora.dbnode2.vip
1 ONLINE ONLINE dbnode2 STABLE
ora.qosmserver
1 ONLINE ONLINE dbnode2 STABLE
ora.scan1.vip
1 ONLINE ONLINE dbnode2 STABLE
--------------------------------------------------------------------------------
[grid@dbnode2 ~]$ crsctl stat res -t -init
--------------------------------------------------------------------------------
Name Target State Server State details
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.asm
1 ONLINE ONLINE dbnode2 Started,STABLE
ora.cluster_interconnect.haip
1 ONLINE OFFLINE STABLE
ora.crf
1 ONLINE ONLINE dbnode2 STABLE
ora.crsd
1 ONLINE ONLINE dbnode2 STABLE
ora.cssd
1 ONLINE ONLINE dbnode2 STABLE
ora.cssdmonitor
1 ONLINE ONLINE dbnode2 STABLE
ora.ctssd
1 ONLINE ONLINE dbnode2 ACTIVE:0,STABLE
ora.diskmon
1 OFFLINE OFFLINE STABLE
ora.evmd
1 ONLINE ONLINE dbnode2 STABLE
ora.gipcd
1 ONLINE ONLINE dbnode2 STABLE
ora.gpnpd
1 ONLINE ONLINE dbnode2 STABLE
ora.mdnsd
1 ONLINE ONLINE dbnode2 STABLE
ora.storage
1 ONLINE ONLINE dbnode2 STABLE
--------------------------------------------------------------------------------
[grid@dbnode2 ~]$
13 安装DB
13.1 解压DB软件
[root@dbnode1 ~]# su - oracle
[oracle@dbnode1 19.3.0]$ mkdir -p /u01/app/oracle/product/19.3.0/db_1
[oracle@dbnode1 19.3.0]$ cd $ORACLE_HOME
[oracle@dbnode1 db_1]$ pwd
/u01/app/oracle/product/19.3.0/db_1
[oracle@dbnode1 db_1]$ unzip /tmp/LINUX.X64_193000_db_home.zip
13.2 启动安装
dbnode1
[oracle@dbnode1 db_1]$ export DISPLAY=12.1.1.115:1
[oracle@dbnode1 db_1]$ ./runInstaller
13.3 图形化安装过程
14 配置ASM磁盘组
14.1 运行asmca
[grid@dbnode1 ~]$ export DISPLAY=12.1.1.115:1
[grid@dbnode1 ~]$ asmca
14.2 创建DATA磁盘组
这里看到新创建的DATA磁盘组已经创建完成并成功mount
15 创建数据库
15.1 每个节点修改RemoveIPC参数
[root@dbnode2 ~]# vi /etc/systemd/logind.conf
# This file is part of systemd.
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Entries in this file show the compile time defaults.
# You can change settings by editing this file.
# Defaults can be restored by simply deleting this file.
#
# See logind.conf(5) for details.
[Login]
#NAutoVTs=6
#ReserveVT=6
#KillUserProcesses=no
#KillOnlyUsers=
#KillExcludeUsers=root
#InhibitDelayMaxSec=5
#HandlePowerKey=poweroff
#HandleSuspendKey=suspend
#HandleHibernateKey=hibernate
#HandleLidSwitch=suspend
#HandleLidSwitchDocked=ignore
#PowerKeyIgnoreInhibited=no
#SuspendKeyIgnoreInhibited=no
#HibernateKeyIgnoreInhibited=no
#LidSwitchIgnoreInhibited=yes
#IdleAction=ignore
#IdleActionSec=30min
#RuntimeDirectorySize=10%
RemoveIPC=no
#UserTasksMax=
[root@dbnode2 ~]# systemctl daemon-reload
[root@dbnode2 ~]# systemctl restart systemd-logind
15.2 运行dbca
[oracle@dbnode1 db_1]$ export DISPLAY=12.1.1.115:1
[oracle@dbnode1 db_1]$ dbca
15.3 运行dbca
16 验证集群整体状态
16.1 查看集群状态
[grid@dbnode2 ~]$ crsctl stat res -t
--------------------------------------------------------------------------------
Name Target State Server State details
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.LISTENER.lsnr
ONLINE ONLINE dbnode1 STABLE
ONLINE ONLINE dbnode2 STABLE
ora.chad
ONLINE ONLINE dbnode1 STABLE
ONLINE ONLINE dbnode2 STABLE
ora.net1.network
ONLINE ONLINE dbnode1 STABLE
ONLINE ONLINE dbnode2 STABLE
ora.ons
ONLINE ONLINE dbnode1 STABLE
ONLINE ONLINE dbnode2 STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)
1 ONLINE ONLINE dbnode1 STABLE
2 ONLINE ONLINE dbnode2 STABLE
3 ONLINE OFFLINE STABLE
ora.CRS.dg(ora.asmgroup)
1 ONLINE ONLINE dbnode1 STABLE
2 ONLINE ONLINE dbnode2 STABLE
3 OFFLINE OFFLINE STABLE
ora.DATA.dg(ora.asmgroup)
1 ONLINE ONLINE dbnode1 STABLE
2 ONLINE ONLINE dbnode2 STABLE
3 OFFLINE OFFLINE STABLE
ora.LISTENER_SCAN1.lsnr
1 ONLINE ONLINE dbnode1 STABLE
ora.asm(ora.asmgroup)
1 ONLINE ONLINE dbnode1 Started,STABLE
2 ONLINE ONLINE dbnode2 Started,STABLE
3 ONLINE OFFLINE STABLE
ora.asmnet1.asmnetwork(ora.asmgroup)
1 ONLINE ONLINE dbnode1 STABLE
2 ONLINE ONLINE dbnode2 STABLE
3 OFFLINE OFFLINE STABLE
ora.cvu
1 ONLINE ONLINE dbnode1 STABLE
ora.dbnode1.vip
1 ONLINE ONLINE dbnode1 STABLE
ora.dbnode2.vip
1 ONLINE ONLINE dbnode2 STABLE
ora.mndb.db
1 ONLINE ONLINE dbnode2 Open,HOME=/u01/app/o
racle/product/19.3.0
/db_1,STABLE
2 ONLINE ONLINE dbnode1 Open,HOME=/u01/app/o
racle/product/19.3.0
/db_1,STABLE
ora.qosmserver
1 ONLINE ONLINE dbnode1 STABLE
ora.scan1.vip
1 ONLINE ONLINE dbnode1 STABLE
--------------------------------------------------------------------------------
[grid@dbnode2 ~]$
16.2 查看数据库状态
[root@dbnode1 ~]# su - oracle
Last login: Thu Dec 12 10:06:13 CST 2019
[oracle@dbnode1 ~]$ sqlplus / as sysdba
SQL*Plus: Release 19.0.0.0.0 - Production on Thu Dec 12 10:52:37 2019
Version 19.3.0.0.0
Copyright (c) 1982, 2019, Oracle. All rights reserved.
Connected to:
Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production
Version 19.3.0.0.0
SQL> select inst_id, name, open_mode from gv$database;
INST_ID NAME OPEN_MODE
---------- --------- --------------------
1 MNDB READ WRITE
2 MNDB READ WRITE
SQL> select name from v$datafile;
NAME
--------------------------------------------------------------------------------
+DATA/MNDB/DATAFILE/system.277.1026755347
+DATA/MNDB/DATAFILE/sysaux.261.1026755391
+DATA/MNDB/DATAFILE/undotbs1.262.1026755417
+DATA/MNDB/86B637B62FE07A65E053F706E80A27CA/DATAFILE/system.264.1026755725
+DATA/MNDB/86B637B62FE07A65E053F706E80A27CA/DATAFILE/sysaux.265.1026755725
+DATA/MNDB/DATAFILE/users.281.1026755417
+DATA/MNDB/86B637B62FE07A65E053F706E80A27CA/DATAFILE/undotbs1.263.1026755725
+DATA/MNDB/DATAFILE/undotbs2.282.1026755993
+DATA/MNDB/996BD26F37C804D6E0537101010CF425/DATAFILE/system.269.1026756561
+DATA/MNDB/996BD26F37C804D6E0537101010CF425/DATAFILE/sysaux.268.1026756561
+DATA/MNDB/996BD26F37C804D6E0537101010CF425/DATAFILE/undotbs1.260.1026756561
NAME
--------------------------------------------------------------------------------
+DATA/MNDB/996BD26F37C804D6E0537101010CF425/DATAFILE/undo_2.258.1026756575
+DATA/MNDB/996BD26F37C804D6E0537101010CF425/DATAFILE/users.257.1026756577
13 rows selected.
SQL>