创建用户组
/usr/sbin/groupadd oinstall
/usr/sbin/groupadd dba
/usr/sbin/groupadd oper
/usr/sbin/groupadd asmadmin
/usr/sbin/groupadd asmdba
/usr/sbin/groupadd asmoper
/usr/sbin/groupadd backupdba
/usr/sbin/groupadd dgdba
/usr/sbin/groupadd kmdba
/usr/sbin/groupadd racdba
创建用户
/usr/sbin/useradd -g oinstall -G dba,asmadmin,asmdba,asmoper,oper -d /home/grid -m grid
/usr/sbin/useradd -g oinstall -G dba,oper,asmdba,asmoper,asmadmin,backupdba,dgdba,kmdba,racdba -d /home/oracle -m oracle
例如
$ id oracle
uid=54321(oracle) gid=54321(oinstall) groups=54321(oinstall),54322(dba),54323(oper),54324(backupdba),54325(dgdba),54326(kmdba),54327(asmdba),54330(racdba)
$ id grid
uid=54331(grid) gid=54321(oinstall) groups=54321(oinstall),54322(dba),54327(asmdba),54328(asmoper),54329(asmadmin),54330(racdba)
passwd oracle
passwd grid
挂在os安装盘
mount /dev/cdrom /mnt
设置yum源配置文件
cat >>/etc/yum.repos.d/local.repo <[local]
name=local
baseurl=file:///mnt
gpgcheck=0
enabled=1
EOF
cat >>/etc/sysctl.conf <kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
fs.file-max = 6815744
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048576
kernel.shmall = 67108864
kernel.shmmax = 274877906944
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.tcp_rmem = 4096 87380 4194304
net.ipv4.tcp_wmem = 4096 16384 4194304
EOF
让设置的内核参数生效
sysctl -p
cat >>/etc/security/limits.conf<grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
oracle soft stack 10240
oracle hard stack 32768
oracle hard memlock 134217728
oracle soft memlock 134217728
grid soft stack 10240
grid hard stack 32768
grid hard memlock 134217728
grid soft memlock 134217728
EOF
cat >>/etc/pam.d/login<session required pam_limits.so
EOF
cat >>/etc/profile<if [ $USER = "oracle" ] || [ $USER = "grid" ]; then
if [ $SHELL = "/bin/ksh" ]; then
ulimit -p 16384
ulimit -n 65536
else
ulimit -u 16384 -n 65536
fi
umask 022
fi
EOF
# Preventing Installation Errors Caused by Terminal Output Commands
# During an Oracle Grid Infrastructure installation, OUI uses SSH to run commands and copy files to the other nodes.
# During the installation, hidden files on the system (for example, .bashrc or .cshrc) can cause makefile
# and other installation errors if they contain terminal output commands.
cat >> ~grid/.bashrc<if [ -t 0 ]; then
stty intr ^C
fi
EOF
cat >> ~oracle/.bashrc<if [ -t 0 ]; then
stty intr ^C
fi
EOF
手动关闭透明大页,在终端中使用下面两行命令即可:
echo 'never' | sudo tee /sys/kernel/mm/transparent_hugepage/defrag
echo 'never' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled
sudo tee /etc/systemd/system/disable-thp.service <<-'EOF'
[Unit]
Description=Disable Transparent Huge Pages (THP)
[Service]
Type=simple
ExecStart=/bin/sh -c "echo 'never' > /sys/kernel/mm/transparent_hugepage/enabled && echo 'never' > /sys/kernel/mm/transparent_hugepage/defrag"
[Install]
WantedBy=multi-user.target
EOF
sudo systemctl daemon-reload
sudo systemctl start disable-thp
sudo systemctl enable disable-thp
mkdir -p /u01/app/oraInventory
chown -R grid:oinstall /u01/app/oraInventory
chmod -R 775 /u01/app/oraInventory
mkdir -p /u01/app/grid
chown -R 775 /u01/app/grid
chown -R grid:oinstall /u01/app/grid
mkdir -p /u01/app/12.2.0/grid
chown -R grid:oinstall /u01/app/12.2.0/grid
chmod -R 775 /u01/app/12.2.0/grid
mkdir -p /u01/app/oracle
mkdir /u01/app/oracle/cfgtoollogs
chown -R oracle:oinstall /u01/app/oracle
chmod -R 775 /u01/app/oracle
mkdir -p /u01/app/oracle/product/12.2.0/db_1
chown -R oracle:oinstall /u01/app/oracle/product/12.2.0/db_1
chmod -R 775 /u01/app/oracle/product/12.2.0/db_1
cat >>/etc/hosts <#public
10.25.79.101 uat-racdb01
10.25.79.102 uat-racdb02
# vip
10.25.79.201 uat-racdb01-vip
10.25.79.202 uat-racdb02-vip
# scan-vip 在dnsmasq的配置文件/etc/dnsmasq.d/hosts中设置.
#10.25.79.203 uat-racdb-scan
# private
192.168.78.101 uat-racdb01-priv
192.168.78.102 uat-racdb02-priv
EOF
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config;
setenforce 0 ;
cat /etc/selinux/config | grep -i SELINUX= | grep -v "^#"
systemctl stop firewalld
systemctl disable firewalld
hostnamectl set-hostname uat-racdb02
hostname
redhat enterprise linux 7的下载地址
一定要下载对应版本的软件包,否则安装grid执行root.sh时会导致读写asm磁盘失败而无法启动css进程!!!
oracleasmlib-2.0.12-1.el7.x86_64.rpm
oracleasm-support-2.1.11-2.el7.x86_64.rpm
https://www.oracle.com/linux/downloads/linux-asmlib-rhel7-downloads.html
如果可以连接网络,可以配置yum源
[oracleasm]
name=Oracle ASMLib Kernel Drivers for Red Hat Enterprise Linux Server 5
baseurl=//yum.oracle.com/repo/OracleASM/RHEL5/$basearch/
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle
gpgcheck=1
enabled=1
配置好yum源后,使用以下命令安装asmlib包
yum install oracleasm-`uname -r`
cd /etc/yum.repos.d/
mv public-yum-ol7.repo public-yum-ol7.repo.bak
yum install -y oracleasm*
oracleasm --help
注意,asm磁盘的owner应该是grid,group应该是asmadmin
[root@ca-test1 /]# oracleasm configure -i
Configuring the Oracle ASM library driver.
This will configure the on-boot properties of the Oracle ASM library
driver. The following questions will determine whether the driver is
loaded on boot and what permissions it will have. The current values
will be shown in brackets ('[]'). Hitting without typing an
answer will keep that current value. Ctrl-C will abort.
Default user to own the driver interface []: grid
Default group to own the driver interface []: asmadmin
Start Oracle ASM library driver on boot (y/n) [n]: y
Fix permissions of Oracle ASM disks on boot (y/n) [y]: y
Writing Oracle ASM library driver configuration [ OK ]
Creating /dev/oracleasm mount point [ OK ]
Loading module "oracleasm" [ OK ]
Mounting ASMlib driver filesystem [ OK ]
Scanning system for ASM disks [ OK ]
oracle的asm磁盘必须使用分区,不能直接使用/dev/sdb之类的裸设备!!!
fdisk /dev/sdb
/usr/sbin/oracleasm scandisks
/usr/sbin/oracleasm listdisks
/usr/sbin/oracleasm createdisk data01 /dev/sdb1
/usr/sbin/oracleasm createdisk data02 /dev/sdc1
Setting Disk I/O Scheduler on Linux
echo deadline > /sys/block/sdb/queue/scheduler
echo deadline > /sys/block/sdc/queue/scheduler
yum install -y vnc-server
yum install -y \
bc \
binutils \
compat-libcap1 \
compat-libstdc++ \
compat-libstdc++ \
glibc \
glibc-devel \
ksh \
libaio \
libaio-devel \
libX11 \
libXau \
libXi \
libXtst \
libgcc \
libstdc++ \
libstdc++-devel \
libxcb \
make \
nfs-utils \
net-tools \
smartmontools \
sysstat \
tree
yum install -y compat-libcap1
yum install -y glibc-devel
yum install -y ksh libstdc++ libaio-devel cvuqdisk
yum install -y libstdc++-devel
yum install -y bc \
binutils-2.23.52.0.1-12.el7 \(x86_64\)_ \
compat-libcap1-1.10-3.el7 \(x86_64\)_ \
compat-libstdc++-33-3.2.3-71.el7 \(i686\)_ \
compat-libstdc++-33-3.2.3-71.el7 \(x86_64\)_ \
glibc-2.17-36.el7 \(i686\)_ \
glibc-2.17-36.el7 \(x86_64\)_ \
glibc-devel-2.17-36.el7 \(i686\)_ \
glibc-devel-2.17-36.el7 \(x86_64\)_ \
ksh \
libaio-0.3.109-9.el7 \(i686\)_ \
libaio-0.3.109-9.el7 \(x86_64\)_ \
libaio-devel-0.3.109-9.el7 \(i686\)_ \
libaio-devel-0.3.109-9.el7 \(x86_64\)_ \
libX11-1.6.0-2.1.el7 \(i686\)_ \
libX11-1.6.0-2.1.el7 \(x86_64\)_ \
libXau-1.0.8-2.1.el7 \(i686\)_ \
libXau-1.0.8-2.1.el7 \(x86_64\)_ \
libXi-1.7.2-1.el7 \(i686\)_ \
libXi-1.7.2-1.el7 \(x86_64\)_ \
libXtst-1.2.2-1.el7 \(i686\)_ \
libXtst-1.2.2-1.el7 \(x86_64\)_ \
libXrender \(i686\)_ \
libXrender \(x86_64\)_ \
libXrender-devel \(i686\)_ \
libXrender-devel \(x86_64\)_ \
libgcc-4.8.2-3.el7 \(i686\)_ \
libgcc-4.8.2-3.el7 \(x86_64\)_ \
libstdc++-4.8.2-3.el7 \(i686\)_ \
libstdc++-4.8.2-3.el7 \(x86_64\)_ \
libstdc++-devel-4.8.2-3.el7 \(i686\)_ \
libstdc++-devel-4.8.2-3.el7 \(x86_64\)_ \
libxcb-1.9-5.el7 \(i686\)_ \
libxcb-1.9-5.el7 \(x86_64\)_ \
make-3.82-19.el7 \(x86_64\)_ \
nfs-utils-1.3.0-0.21.el7.x86_64 \(for Oracle ACFS\)_ \
net-tools-2.0-0.17.20131004git.el7 \(x86_64\)_ \(for Oracle RAC and Oracle Clusterware\)_ \
smartmontools-6.2-4.el7 \(x86_64\)_ \
sysstat-10.1.5-1.el7 \(x86_64\)_
yum install -y binutils compat-libstdc++ gcc glibc libaio libgcc libstdc++ make sysstat unixodbc
rpm -ivh /u01/app/12.2.0/grid/cv/rpm/cvuqdisk-1.0.10-1.rpm
cat >> ~oracle/.bash_profile<# User specific environment and startup programs
export ORACLE_SID=rac01
export ORACLE_UNQNAME=rac01
export JAVA_HOME=/usr/local/java
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=\$ORACLE_BASE/product/12.2.0/dbhome_1
export ORACLE_TERM=xterm
export NLS_DATE_FORMAT="YYYY:MM:DDHH24:MI:SS"
#export NLS_LANG=american_america.AL32UTF8
export TNS_ADMIN=\$ORACLE_HOME/network/admin
export ORA_NLS11=\$ORACLE_HOME/nls/data
export PATH=.:\${JAVA_HOME}/bin:\${PATH}:\$HOME/bin:\$ORACLE_HOME/bin:\$ORA_CRS_HOME/bin
export LD_LIBRARY_PATH=\$ORACLE_HOME/lib
export LD_LIBRARY_PATH=\${LD_LIBRARY_PATH}:\$ORACLE_HOME
export LD_LIBRARY_PATH=\${LD_LIBRARY_PATH}:/lib:/usr/lib:/usr/local/lib
export CLASSPATH=\$ORACLE_HOME/JRE:\$ORACLE_HOME/jlib:\$ORACLE_HOME/rdbms/jlib:\$ORACLE_HOME/network/jlib
export THREADS_FLAG=native
export TEMP=/tmp
export TMPDIR=/tmp
umask 022
EOF
cat >> ~grid/.bash_profile<# User specific environment and startup programs
PATH=$PATH:$HOME/bin
export ORACLE_SID=+ASM1
#export ORACLE_SID=+ASM2
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/12.2.0/grid
export PATH=$ORACLE_HOME/bin:$PATH:/usr/local/bin/:.
export TEMP=/tmp
export TMP=/tmp
export TMPDIR=/tmp
umask 022
export PATH
EOF
注意,12.2版本开始,scan ip必须使用dns解析,不能再通过/etc/hosts解析了
yum install -y dnsmasq
dnsmasq不需要任何配置即可满足我们的需求。
systemctl stop dnsmasq
systemctl start dnsmasq
systemctl status dnsmasq
cat >>/etc/resolv.conf<
EOF
修改前:
hosts: files dns myhostname
修改后:
hosts: files dns myhostname
dd if=/dev/zero of=/tmp/tempswap bs=512k count=32000
chmod 600 /tmp/tempswap
mkswap /tmp/tempswap
swapon /tmp/tempswap
free -g
从12.2版本开始,grid已经没有runInstall.sh脚本了。从该版本开始,grid的下载包就是解压包,解压后放在$ORACLE_HOME目录,然后执行以下命令即可开始安装grid
./gridSetup.sh
注意,第一个节点执行root.sh的输出信息。注意,第一个节点和第二个节点的输出信息是不一样的。
root@uat-racdb02 grid]# /u01/app/12.2.0/grid/root.sh
Performing root user operation.
The following environment variables are set as:
ORACLE_OWNER= grid
ORACLE_HOME= /u01/app/12.2.0/grid
Enter the full pathname of the local bin directory: [/usr/local/bin]:
The contents of "dbhome" have not changed. No need to overwrite.
The contents of "oraenv" have not changed. No need to overwrite.
The contents of "coraenv" have not changed. No need to overwrite.
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Relinking oracle with rac_on option
Using configuration parameter file: /u01/app/12.2.0/grid/crs/install/crsconfig_params
The log of current session can be found at:
/u01/app/grid/crsdata/uat-racdb02/crsconfig/rootcrs_uat-racdb02_2020-08-11_05-03-52PM.log
2020/08/11 17:03:54 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.
2020/08/11 17:03:54 CLSRSC-4001: Installing Oracle Trace File Analyzer (TFA) Collector.
2020/08/11 17:04:22 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.
2020/08/11 17:04:22 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.
2020/08/11 17:04:24 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.
2020/08/11 17:04:25 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.
2020/08/11 17:04:26 CLSRSC-594: Executing installation step 5 of 19: 'SaveParamFile'.
2020/08/11 17:04:30 CLSRSC-594: Executing installation step 6 of 19: 'SetupOSD'.
2020/08/11 17:04:31 CLSRSC-594: Executing installation step 7 of 19: 'CheckCRSConfig'.
2020/08/11 17:04:31 CLSRSC-594: Executing installation step 8 of 19: 'SetupLocalGPNP'.
2020/08/11 17:04:49 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.
2020/08/11 17:04:54 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.
2020/08/11 17:04:54 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.
2020/08/11 17:04:57 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.
2020/08/11 17:05:13 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'
2020/08/11 17:05:40 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.
2020/08/11 17:05:44 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.
CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'uat-racdb02'
CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'uat-racdb02' has completed
CRS-4133: Oracle High Availability Services has been stopped.
CRS-4123: Oracle High Availability Services has been started.
2020/08/11 17:06:03 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.
2020/08/11 17:06:06 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.
CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'uat-racdb02'
CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'uat-racdb02' has completed
CRS-4133: Oracle High Availability Services has been stopped.
CRS-4123: Oracle High Availability Services has been started.
CRS-2672: Attempting to start 'ora.evmd' on 'uat-racdb02'
CRS-2672: Attempting to start 'ora.mdnsd' on 'uat-racdb02'
CRS-2676: Start of 'ora.mdnsd' on 'uat-racdb02' succeeded
CRS-2676: Start of 'ora.evmd' on 'uat-racdb02' succeeded
CRS-2672: Attempting to start 'ora.gpnpd' on 'uat-racdb02'
CRS-2676: Start of 'ora.gpnpd' on 'uat-racdb02' succeeded
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'uat-racdb02'
CRS-2672: Attempting to start 'ora.gipcd' on 'uat-racdb02'
CRS-2676: Start of 'ora.cssdmonitor' on 'uat-racdb02' succeeded
CRS-2676: Start of 'ora.gipcd' on 'uat-racdb02' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'uat-racdb02'
CRS-2672: Attempting to start 'ora.diskmon' on 'uat-racdb02'
CRS-2676: Start of 'ora.diskmon' on 'uat-racdb02' succeeded
CRS-2676: Start of 'ora.cssd' on 'uat-racdb02' succeeded
Disk groups created successfully. Check /u01/app/grid/cfgtoollogs/asmca/asmca-200811PM050642.log for details.
2020/08/11 17:07:15 CLSRSC-482: Running command: '/u01/app/12.2.0/grid/bin/ocrconfig -upgrade grid oinstall'
CRS-2672: Attempting to start 'ora.crf' on 'uat-racdb02'
CRS-2672: Attempting to start 'ora.storage' on 'uat-racdb02'
CRS-2676: Start of 'ora.storage' on 'uat-racdb02' succeeded
CRS-2676: Start of 'ora.crf' on 'uat-racdb02' succeeded
CRS-2672: Attempting to start 'ora.crsd' on 'uat-racdb02'
CRS-2676: Start of 'ora.crsd' on 'uat-racdb02' succeeded
CRS-4256: Updating the profile
Successful addition of voting disk 268ecda0bb404f0fbf82856be9075c6b.
Successfully replaced voting disk group with +DATA.
CRS-4256: Updating the profile
CRS-4266: Voting file(s) successfully replaced
## STATE File Universal Id File Name Disk group
-- ----- ----------------- --------- ---------
1. ONLINE 268ecda0bb404f0fbf82856be9075c6b (/dev/sdc) [DATA]
Located 1 voting disk(s).
CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'uat-racdb02'
CRS-2673: Attempting to stop 'ora.crsd' on 'uat-racdb02'
CRS-2677: Stop of 'ora.crsd' on 'uat-racdb02' succeeded
CRS-2673: Attempting to stop 'ora.storage' on 'uat-racdb02'
CRS-2673: Attempting to stop 'ora.crf' on 'uat-racdb02'
CRS-2673: Attempting to stop 'ora.gpnpd' on 'uat-racdb02'
CRS-2673: Attempting to stop 'ora.mdnsd' on 'uat-racdb02'
CRS-2677: Stop of 'ora.crf' on 'uat-racdb02' succeeded
CRS-2677: Stop of 'ora.gpnpd' on 'uat-racdb02' succeeded
CRS-2677: Stop of 'ora.storage' on 'uat-racdb02' succeeded
CRS-2673: Attempting to stop 'ora.asm' on 'uat-racdb02'
CRS-2677: Stop of 'ora.mdnsd' on 'uat-racdb02' succeeded
CRS-2677: Stop of 'ora.asm' on 'uat-racdb02' succeeded
CRS-2673: Attempting to stop 'ora.cluster_interconnect.haip' on 'uat-racdb02'
CRS-2677: Stop of 'ora.cluster_interconnect.haip' on 'uat-racdb02' succeeded
CRS-2673: Attempting to stop 'ora.ctssd' on 'uat-racdb02'
CRS-2673: Attempting to stop 'ora.evmd' on 'uat-racdb02'
CRS-2677: Stop of 'ora.ctssd' on 'uat-racdb02' succeeded
CRS-2677: Stop of 'ora.evmd' on 'uat-racdb02' succeeded
CRS-2673: Attempting to stop 'ora.cssd' on 'uat-racdb02'
CRS-2677: Stop of 'ora.cssd' on 'uat-racdb02' succeeded
CRS-2673: Attempting to stop 'ora.gipcd' on 'uat-racdb02'
CRS-2677: Stop of 'ora.gipcd' on 'uat-racdb02' succeeded
CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'uat-racdb02' has completed
CRS-4133: Oracle High Availability Services has been stopped.
2020/08/11 17:07:53 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.
CRS-4123: Starting Oracle High Availability Services-managed resources
CRS-2672: Attempting to start 'ora.evmd' on 'uat-racdb02'
CRS-2672: Attempting to start 'ora.mdnsd' on 'uat-racdb02'
CRS-2676: Start of 'ora.mdnsd' on 'uat-racdb02' succeeded
CRS-2676: Start of 'ora.evmd' on 'uat-racdb02' succeeded
CRS-2672: Attempting to start 'ora.gpnpd' on 'uat-racdb02'
CRS-2676: Start of 'ora.gpnpd' on 'uat-racdb02' succeeded
CRS-2672: Attempting to start 'ora.gipcd' on 'uat-racdb02'
CRS-2676: Start of 'ora.gipcd' on 'uat-racdb02' succeeded
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'uat-racdb02'
CRS-2676: Start of 'ora.cssdmonitor' on 'uat-racdb02' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'uat-racdb02'
CRS-2672: Attempting to start 'ora.diskmon' on 'uat-racdb02'
CRS-2676: Start of 'ora.diskmon' on 'uat-racdb02' succeeded
CRS-2676: Start of 'ora.cssd' on 'uat-racdb02' succeeded
CRS-2672: Attempting to start 'ora.cluster_interconnect.haip' on 'uat-racdb02'
CRS-2672: Attempting to start 'ora.ctssd' on 'uat-racdb02'
CRS-2676: Start of 'ora.ctssd' on 'uat-racdb02' succeeded
CRS-2676: Start of 'ora.cluster_interconnect.haip' on 'uat-racdb02' succeeded
CRS-2672: Attempting to start 'ora.asm' on 'uat-racdb02'
CRS-2676: Start of 'ora.asm' on 'uat-racdb02' succeeded
CRS-2672: Attempting to start 'ora.storage' on 'uat-racdb02'
CRS-2676: Start of 'ora.storage' on 'uat-racdb02' succeeded
CRS-2672: Attempting to start 'ora.crf' on 'uat-racdb02'
CRS-2676: Start of 'ora.crf' on 'uat-racdb02' succeeded
CRS-2672: Attempting to start 'ora.crsd' on 'uat-racdb02'
CRS-2676: Start of 'ora.crsd' on 'uat-racdb02' succeeded
CRS-6023: Starting Oracle Cluster Ready Services-managed resources
CRS-6017: Processing resource auto-start for servers: uat-racdb02
CRS-6016: Resource auto-start has completed for server uat-racdb02
CRS-6024: Completed start of Oracle Cluster Ready Services-managed resources
CRS-4123: Oracle High Availability Services has been started.
2020/08/11 17:09:22 CLSRSC-343: Successfully started Oracle Clusterware stack
2020/08/11 17:09:22 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.
CRS-2672: Attempting to start 'ora.ASMNET1LSNR_ASM.lsnr' on 'uat-racdb02'
CRS-2676: Start of 'ora.ASMNET1LSNR_ASM.lsnr' on 'uat-racdb02' succeeded
CRS-2672: Attempting to start 'ora.asm' on 'uat-racdb02'
CRS-2676: Start of 'ora.asm' on 'uat-racdb02' succeeded
CRS-2672: Attempting to start 'ora.DATA.dg' on 'uat-racdb02'
CRS-2676: Start of 'ora.DATA.dg' on 'uat-racdb02' succeeded
2020/08/11 17:10:36 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.
2020/08/11 17:10:55 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded
第二个节点执行root.sh的输出日志信息
[root@uat-racdb01 ~]# /u01/app/12.2.0/grid/root.sh
Performing root user operation.
The following environment variables are set as:
ORACLE_OWNER= grid
ORACLE_HOME= /u01/app/12.2.0/grid
Enter the full pathname of the local bin directory: [/usr/local/bin]:
The contents of "dbhome" have not changed. No need to overwrite.
The contents of "oraenv" have not changed. No need to overwrite.
The contents of "coraenv" have not changed. No need to overwrite.
Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Relinking oracle with rac_on option
Using configuration parameter file: /u01/app/12.2.0/grid/crs/install/crsconfig_params
The log of current session can be found at:
/u01/app/grid/crsdata/uat-racdb01/crsconfig/rootcrs_uat-racdb01_2020-08-12_09-14-43AM.log
2020/08/12 09:14:45 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.
2020/08/12 09:14:45 CLSRSC-4001: Installing Oracle Trace File Analyzer (TFA) Collector.
2020/08/12 09:16:26 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.
2020/08/12 09:16:26 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.
2020/08/12 09:16:27 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.
2020/08/12 09:16:28 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.
2020/08/12 09:16:28 CLSRSC-594: Executing installation step 5 of 19: 'SaveParamFile'.
2020/08/12 09:16:31 CLSRSC-594: Executing installation step 6 of 19: 'SetupOSD'.
2020/08/12 09:16:31 CLSRSC-594: Executing installation step 7 of 19: 'CheckCRSConfig'.
2020/08/12 09:16:31 CLSRSC-594: Executing installation step 8 of 19: 'SetupLocalGPNP'.
2020/08/12 09:16:33 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.
2020/08/12 09:16:35 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.
2020/08/12 09:16:35 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.
2020/08/12 09:16:36 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.
2020/08/12 09:16:51 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'
2020/08/12 09:17:17 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.
2020/08/12 09:17:18 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.
CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'uat-racdb01'
CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'uat-racdb01' has completed
CRS-4133: Oracle High Availability Services has been stopped.
CRS-4123: Oracle High Availability Services has been started.
2020/08/12 09:17:34 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.
2020/08/12 09:17:36 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.
CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'uat-racdb01'
CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'uat-racdb01' has completed
CRS-4133: Oracle High Availability Services has been stopped.
CRS-4123: Oracle High Availability Services has been started.
CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'uat-racdb01'
CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'uat-racdb01' has completed
CRS-4133: Oracle High Availability Services has been stopped.
2020/08/12 09:17:54 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.
CRS-4123: Starting Oracle High Availability Services-managed resources
CRS-2672: Attempting to start 'ora.evmd' on 'uat-racdb01'
CRS-2672: Attempting to start 'ora.mdnsd' on 'uat-racdb01'
CRS-2676: Start of 'ora.mdnsd' on 'uat-racdb01' succeeded
CRS-2676: Start of 'ora.evmd' on 'uat-racdb01' succeeded
CRS-2672: Attempting to start 'ora.gpnpd' on 'uat-racdb01'
CRS-2676: Start of 'ora.gpnpd' on 'uat-racdb01' succeeded
CRS-2672: Attempting to start 'ora.gipcd' on 'uat-racdb01'
CRS-2676: Start of 'ora.gipcd' on 'uat-racdb01' succeeded
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'uat-racdb01'
CRS-2676: Start of 'ora.cssdmonitor' on 'uat-racdb01' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'uat-racdb01'
CRS-2672: Attempting to start 'ora.diskmon' on 'uat-racdb01'
CRS-2676: Start of 'ora.diskmon' on 'uat-racdb01' succeeded
CRS-2676: Start of 'ora.cssd' on 'uat-racdb01' succeeded
CRS-2672: Attempting to start 'ora.cluster_interconnect.haip' on 'uat-racdb01'
CRS-2672: Attempting to start 'ora.ctssd' on 'uat-racdb01'
CRS-2676: Start of 'ora.ctssd' on 'uat-racdb01' succeeded
CRS-2676: Start of 'ora.cluster_interconnect.haip' on 'uat-racdb01' succeeded
CRS-2672: Attempting to start 'ora.asm' on 'uat-racdb01'
CRS-2676: Start of 'ora.asm' on 'uat-racdb01' succeeded
CRS-2672: Attempting to start 'ora.storage' on 'uat-racdb01'
CRS-2676: Start of 'ora.storage' on 'uat-racdb01' succeeded
CRS-2672: Attempting to start 'ora.crf' on 'uat-racdb01'
CRS-2676: Start of 'ora.crf' on 'uat-racdb01' succeeded
CRS-2672: Attempting to start 'ora.crsd' on 'uat-racdb01'
CRS-2676: Start of 'ora.crsd' on 'uat-racdb01' succeeded
CRS-6017: Processing resource auto-start for servers: uat-racdb01
CRS-2672: Attempting to start 'ora.ASMNET1LSNR_ASM.lsnr' on 'uat-racdb01'
CRS-2672: Attempting to start 'ora.net1.network' on 'uat-racdb01'
CRS-2676: Start of 'ora.net1.network' on 'uat-racdb01' succeeded
CRS-2672: Attempting to start 'ora.ons' on 'uat-racdb01'
CRS-2676: Start of 'ora.ASMNET1LSNR_ASM.lsnr' on 'uat-racdb01' succeeded
CRS-2672: Attempting to start 'ora.asm' on 'uat-racdb01'
CRS-2676: Start of 'ora.ons' on 'uat-racdb01' succeeded
CRS-2676: Start of 'ora.asm' on 'uat-racdb01' succeeded
CRS-6016: Resource auto-start has completed for server uat-racdb01
CRS-6024: Completed start of Oracle Cluster Ready Services-managed resources
CRS-4123: Oracle High Availability Services has been started.
2020/08/12 09:19:22 CLSRSC-343: Successfully started Oracle Clusterware stack
2020/08/12 09:19:22 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.
2020/08/12 09:19:33 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.
2020/08/12 09:19:42 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded
[root@mdbuat2-ractestdbs-lv01 ~]# /u01/app/12.2.0/grid/root.sh -verbose
Performing root user operation.
The following environment variables are set as:
ORACLE_OWNER= grid
ORACLE_HOME= /u01/app/12.2.0/grid
Enter the full pathname of the local bin directory: [/usr/local/bin]:
The contents of "dbhome" have not changed. No need to overwrite.
The contents of "oraenv" have not changed. No need to overwrite.
The contents of "coraenv" have not changed. No need to overwrite.
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Relinking oracle with rac_on option
Using configuration parameter file: /u01/app/12.2.0/grid/crs/install/crsconfig_params
The log of current session can be found at:
/u01/app/grid/crsdata/mdbuat2-ractestdbs-lv01/crsconfig/rootcrs_mdbuat2-ractestdbs-lv01_2020-08-03_02-32-44PM.log
2020/08/03 14:32:46 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.
2020/08/03 14:32:46 CLSRSC-4001: Installing Oracle Trace File Analyzer (TFA) Collector.
2020/08/03 14:33:10 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.
2020/08/03 14:33:10 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.
2020/08/03 14:33:15 CLSRSC-363: User ignored prerequisites during installation
2020/08/03 14:33:15 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.
2020/08/03 14:33:17 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.
2020/08/03 14:33:18 CLSRSC-594: Executing installation step 5 of 19: 'SaveParamFile'.
2020/08/03 14:33:24 CLSRSC-594: Executing installation step 6 of 19: 'SetupOSD'.
2020/08/03 14:33:25 CLSRSC-594: Executing installation step 7 of 19: 'CheckCRSConfig'.
2020/08/03 14:33:25 CLSRSC-594: Executing installation step 8 of 19: 'SetupLocalGPNP'.
2020/08/03 14:33:44 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.
2020/08/03 14:33:52 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.
2020/08/03 14:33:52 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.
2020/08/03 14:33:57 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.
2020/08/03 14:34:13 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'
2020/08/03 14:34:42 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.
2020/08/03 14:34:48 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.
CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'mdbuat2-ractestdbs-lv01'
CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'mdbuat2-ractestdbs-lv01' has completed
CRS-4133: Oracle High Availability Services has been stopped.
CRS-4123: Oracle High Availability Services has been started.
2020/08/03 14:35:09 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.
2020/08/03 14:35:14 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.
CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'mdbuat2-ractestdbs-lv01'
CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'mdbuat2-ractestdbs-lv01' has completed
CRS-4133: Oracle High Availability Services has been stopped.
CRS-4123: Oracle High Availability Services has been started.
CRS-2672: Attempting to start 'ora.evmd' on 'mdbuat2-ractestdbs-lv01'
CRS-2672: Attempting to start 'ora.mdnsd' on 'mdbuat2-ractestdbs-lv01'
CRS-2676: Start of 'ora.mdnsd' on 'mdbuat2-ractestdbs-lv01' succeeded
CRS-2676: Start of 'ora.evmd' on 'mdbuat2-ractestdbs-lv01' succeeded
CRS-2672: Attempting to start 'ora.gpnpd' on 'mdbuat2-ractestdbs-lv01'
CRS-2676: Start of 'ora.gpnpd' on 'mdbuat2-ractestdbs-lv01' succeeded
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'mdbuat2-ractestdbs-lv01'
CRS-2672: Attempting to start 'ora.gipcd' on 'mdbuat2-ractestdbs-lv01'
CRS-2676: Start of 'ora.cssdmonitor' on 'mdbuat2-ractestdbs-lv01' succeeded
CRS-2676: Start of 'ora.gipcd' on 'mdbuat2-ractestdbs-lv01' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'mdbuat2-ractestdbs-lv01'
CRS-2672: Attempting to start 'ora.diskmon' on 'mdbuat2-ractestdbs-lv01'
CRS-2676: Start of 'ora.diskmon' on 'mdbuat2-ractestdbs-lv01' succeeded
CRS-2674: Start of 'ora.cssd' on 'mdbuat2-ractestdbs-lv01' failed
CRS-2679: Attempting to clean 'ora.cssd' on 'mdbuat2-ractestdbs-lv01'
CRS-2681: Clean of 'ora.cssd' on 'mdbuat2-ractestdbs-lv01' succeeded
CRS-2673: Attempting to stop 'ora.gipcd' on 'mdbuat2-ractestdbs-lv01'
CRS-2677: Stop of 'ora.gipcd' on 'mdbuat2-ractestdbs-lv01' succeeded
CRS-2673: Attempting to stop 'ora.cssdmonitor' on 'mdbuat2-ractestdbs-lv01'
CRS-2677: Stop of 'ora.cssdmonitor' on 'mdbuat2-ractestdbs-lv01' succeeded
CRS-2673: Attempting to stop 'ora.gpnpd' on 'mdbuat2-ractestdbs-lv01'
CRS-2677: Stop of 'ora.gpnpd' on 'mdbuat2-ractestdbs-lv01' succeeded
CRS-2673: Attempting to stop 'ora.mdnsd' on 'mdbuat2-ractestdbs-lv01'
CRS-2677: Stop of 'ora.mdnsd' on 'mdbuat2-ractestdbs-lv01' succeeded
CRS-2673: Attempting to stop 'ora.evmd' on 'mdbuat2-ractestdbs-lv01'
CRS-2677: Stop of 'ora.evmd' on 'mdbuat2-ractestdbs-lv01' succeeded
CRS-4000: Command Start failed, or completed with errors.
2020/08/03 14:47:49 CLSRSC-119: Start of the exclusive mode cluster failed
查看日志文件,会发现在读写磁盘/dev/sdb时遇到报错“ CLSB:1270335232: Oracle Clusterware infrastructure error in OCSSD (OS PID 15074): Fatal signal 6 has occurred in program ocssd thread 1270335232; nested signal count is 1”
2020-08-10 17:30:02.580 : SKGFD:1270335232: Lib :UFS:: closing handle 0x7f9a303417e0 for disk :/dev/sdc:
2020-08-10 17:30:02.580 : SKGFD:1270335232: Handle 0x7f9a303432d0 from lib :ASM:ASM Library - Generic Linux, version 2.0.12 (KABI_V2): for disk :ORCL:ASMDISK02:
2020-08-10 17:30:02.580 : CLSF:1270335232: Read header of dev:/dev/sdb:none:
2020-08-10 17:30:02.580 : SKGFD:1270335232: Lib :UFS:: closing handle 0x7f9a303408a0 for disk :/dev/sdb:
2020-08-10 17:30:02.580 : CLSF:1270335232: Read header of dev:/dev/sdc:none:
2020-08-10 17:30:02.580 : SKGFD:1270335232: Lib :UFS:: closing handle 0x7f9a303417e0 for disk :/dev/sdc:
CLSB:1270335232: Oracle Clusterware infrastructure error in OCSSD (OS PID 15074): Fatal signal 6 has occurred in program ocssd thread 1270335232; nested signal c
ount is 1
Trace file /u01/app/grid/diag/crs/uat-racdb01/crs/trace/ocssd.trc
Oracle Database 12c Clusterware Release 12.2.0.1.0 - Production Copyright 1996, 2016 Oracle. All rights reserved.
DDE: Flood control is not active
2020-08-10T17:30:02.610811+08:00
Incident 1 created, dump file: /u01/app/grid/diag/crs/uat-racdb01/crs/incident/incdir_1/ocssd_i1.trc
CRS-8503 [] [] [] [] [] [] [] [] [] [] [] []
2020-08-10 17:30:03.428 : CSSD:2380109568: clssscagSelect: endpoint(0x64) authenticated with user(root)
2020-08-10 17:30:03.428 : CSSD:2380109568: clssscagProcessInitialMsg: Handshake successful with agent 1
2020-08-10 17:30:03.428 : CSSD:2380109568: clssscagProcAgReq: got a successful connection
2020-08-10 17:30:03.428 : CSSD:2380109568: clssscagProcAgReq: Sending initdata
2020-08-10 17:30:03.428 : CSSD:2380109568: clssscagProcessInitialMsg: notify agent 1 that it is active
2020-08-10 17:30:03.428 : CSSD:2380109568: clssscagProcessInitialMsg: connection from agent 1, endp 0x64 - agents joined 1
2020-08-10 17:30:03.454 : CSSD:2380109568: clssscagSelect: endpoint(0x53) authenticated with user(root)
2020-08-10 17:30:03.455 : CSSD:2380109568: clssscagProcessInitialMsg: Handshake successful with agent 0
2020-08-10 17:30:03.455 : CSSD:2380109568: clssscagProcAgReq: got a successful connection
2020-08-10 17:30:03.455 : CSSD:2380109568: clssscagProcAgReq: Sending initdata
2020-08-10 17:30:03.455 : CSSD:2380109568: clssscagProcessInitialMsg: connection from agent 0, endp 0x53 - agents joined 1
2020-08-10 17:30:03.523 : CSSD:2376079104: clsssc_CLSFAInit_CB: System not ready for CLSFA initialization
2020-08-10 17:30:02.580 : SKGFD:1270335232: Lib :UFS:: closing handle 0x7f9a303417e0 for disk :/dev/sdc:
2020-08-10 17:30:02.580 : SKGFD:1270335232: Handle 0x7f9a303432d0 from lib :ASM:ASM Library - Generic Linux, version 2.0.12 (KABI_V2): for disk :ORCL:ASMDISK02:
2020-08-10 17:30:02.580 : CLSF:1270335232: Read header of dev:/dev/sdb:none:
2020-08-10 17:30:02.580 : SKGFD:1270335232: Lib :UFS:: closing handle 0x7f9a303408a0 for disk :/dev/sdb:
2020-08-10 17:30:02.580 : CLSF:1270335232: Read header of dev:/dev/sdc:none:
2020-08-10 17:30:02.580 : SKGFD:1270335232: Lib :UFS:: closing handle 0x7f9a303417e0 for disk :/dev/sdc:
CLSB:1270335232: Oracle Clusterware infrastructure error in OCSSD (OS PID 15074): Fatal signal 6 has occurred in program ocssd thread 1270335232; nested signal c
ount is 1
Trace file /u01/app/grid/diag/crs/uat-racdb01/crs/trace/ocssd.trc
Oracle Database 12c Clusterware Release 12.2.0.1.0 - Production Copyright 1996, 2016 Oracle. All rights reserved.
DDE: Flood control is not active
2020-08-10T17:30:02.610811+08:00
Incident 1 created, dump file: /u01/app/grid/diag/crs/uat-racdb01/crs/incident/incdir_1/ocssd_i1.trc
CRS-8503 [] [] [] [] [] [] [] [] [] [] [] []
2020-08-10 17:30:03.428 : CSSD:2380109568: clssscagSelect: endpoint(0x64) authenticated with user(root)
2020-08-10 17:30:03.428 : CSSD:2380109568: clssscagProcessInitialMsg: Handshake successful with agent 1
2020-08-10 17:30:03.428 : CSSD:2380109568: clssscagProcAgReq: got a successful connection
2020-08-10 17:30:03.428 : CSSD:2380109568: clssscagProcAgReq: Sending initdata
2020-08-10 17:30:03.428 : CSSD:2380109568: clssscagProcessInitialMsg: notify agent 1 that it is active
2020-08-10 17:30:03.428 : CSSD:2380109568: clssscagProcessInitialMsg: connection from agent 1, endp 0x64 - agents joined 1
2020-08-10 17:30:03.454 : CSSD:2380109568: clssscagSelect: endpoint(0x53) authenticated with user(root)
2020-08-10 17:30:03.455 : CSSD:2380109568: clssscagProcessInitialMsg: Handshake successful with agent 0
2020-08-10 17:30:03.455 : CSSD:2380109568: clssscagProcAgReq: got a successful connection
2020-08-10 17:30:03.455 : CSSD:2380109568: clssscagProcAgReq: Sending initdata
2020-08-10 17:30:03.455 : CSSD:2380109568: clssscagProcessInitialMsg: connection from agent 0, endp 0x53 - agents joined 1
2020-08-10 17:30:03.523 : CSSD:2376079104: clsssc_CLSFAInit_CB: System not ready for CLSFA initialization
如果执行root.sh时报错,可以尝试先删除grid的配置,然后再重新执行root.sh脚本.
./root.sh -verbose -deconfig
/u01/app/12.2.0/grid/root.sh -deconfig -force
cd /u01/app/12.2.0/grid
deinstall/deinstall