crs 维护模式 exclusive mode

news/2024/7/9 4:23:31
How To Validate ASM Instances And Diskgroups On A RAC Cluster (When CRS Does Not Start). (Doc ID 1609127.1)​编辑To Bottom

[root@rac1 ~]# ps -ef|grep grid
root      2477     1  1 20:47 ?        00:00:51 /opt/oracle.ahf/jre/bin/java -server -Xms32m -Xmx64m -Djava.awt.headless=true -Ddisable.checkForUpdate=true -XX:HeapDumpPath=/u01/app/grid/oracle.ahf/data/rac1/diag/tfa oracle.rat.tfa.TFAMain /opt/oracle.ahf/tfa
grid      3252     1  0 20:48 ?        00:00:00 /bin/sh ./OSWatcher.sh 30 48 /bin/gzip /u01/app/grid/oracle.ahf/data/repository/suptools/rac1/oswbb/grid/archive
grid      4136  3252  0 20:48 ?        00:00:00 /bin/sh ./OSWatcherFM.sh 48 /u01/app/grid/oracle.ahf/data/repository/suptools/rac1/oswbb/grid/archive
grid     21272  4136  0 21:37 ?        00:00:00 sleep 60
grid     21336  3252  0 21:37 ?        00:00:00 sleep 30
root     21559  6928  0 21:38 pts/0    00:00:00 grep --color=auto grid
[root@rac1 ~]# 
[root@rac1 ~]# 
[root@rac1 ~]# 
[root@rac1 ~]# 
[root@rac1 ~]# 
[root@rac1 ~]# crsctl start crs -excl -nocrs
CRS-4123: Oracle High Availability Services has been started.
CRS-2672: Attempting to start 'ora.evmd' on 'rac1'
CRS-2672: Attempting to start 'ora.mdnsd' on 'rac1'
CRS-2676: Start of 'ora.evmd' on 'rac1' succeeded
CRS-2676: Start of 'ora.mdnsd' on 'rac1' succeeded
CRS-2672: Attempting to start 'ora.gpnpd' on 'rac1'
CRS-2676: Start of 'ora.gpnpd' on 'rac1' succeeded
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'rac1'
CRS-2672: Attempting to start 'ora.gipcd' on 'rac1'
CRS-2676: Start of 'ora.cssdmonitor' on 'rac1' succeeded
CRS-2676: Start of 'ora.gipcd' on 'rac1' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'rac1'
CRS-2672: Attempting to start 'ora.diskmon' on 'rac1'
CRS-2676: Start of 'ora.diskmon' on 'rac1' succeeded
CRS-2676: Start of 'ora.cssd' on 'rac1' succeeded
CRS-2672: Attempting to start 'ora.cluster_interconnect.haip' on 'rac1'
CRS-2672: Attempting to start 'ora.ctssd' on 'rac1'
CRS-2676: Start of 'ora.ctssd' on 'rac1' succeeded
CRS-2676: Start of 'ora.cluster_interconnect.haip' on 'rac1' succeeded
CRS-2672: Attempting to start 'ora.asm' on 'rac1'
CRS-2676: Start of 'ora.asm' on 'rac1' succeeded
[root@rac1 ~]# ps -ef|grep grid
root      2477     1  1 20:47 ?        00:00:51 /opt/oracle.ahf/jre/bin/java -server -Xms32m -Xmx64m -Djava.awt.headless=true -Ddisable.checkForUpdate=true -XX:HeapDumpPath=/u01/app/grid/oracle.ahf/data/rac1/diag/tfa oracle.rat.tfa.TFAMain /opt/oracle.ahf/tfa
grid      3252     1  0 20:48 ?        00:00:00 /bin/sh ./OSWatcher.sh 30 48 /bin/gzip /u01/app/grid/oracle.ahf/data/repository/suptools/rac1/oswbb/grid/archive
grid      4136  3252  0 20:48 ?        00:00:00 /bin/sh ./OSWatcherFM.sh 48 /u01/app/grid/oracle.ahf/data/repository/suptools/rac1/oswbb/grid/archive
root     21723     1  2 21:38 ?        00:00:01 /u01/app/19.0.0/grid/bin/ohasd.bin exclusive
root     21812     1  1 21:38 ?        00:00:00 /u01/app/19.0.0/grid/bin/orarootagent.bin
grid     21930     1  0 21:38 ?        00:00:00 /u01/app/19.0.0/grid/bin/oraagent.bin
grid     21952     1  0 21:38 ?        00:00:00 /u01/app/19.0.0/grid/bin/mdnsd.bin
grid     21954     1  0 21:38 ?        00:00:00 /u01/app/19.0.0/grid/bin/evmd.bin
grid     21998     1  0 21:38 ?        00:00:00 /u01/app/19.0.0/grid/bin/gpnpd.bin
grid     22054 21954  0 21:38 ?        00:00:00 /u01/app/19.0.0/grid/bin/evmlogger.bin -o /u01/app/19.0.0/grid/log/[HOSTNAME]/evmd/evmlogger.info -l /u01/app/19.0.0/grid/log/[HOSTNAME]/evmd/evmlogger.log
root     22069     1  0 21:38 ?        00:00:00 /u01/app/19.0.0/grid/bin/cssdmonitor
grid     22074     1  0 21:38 ?        00:00:00 /u01/app/19.0.0/grid/bin/gipcd.bin
root     22157     1  0 21:38 ?        00:00:00 /u01/app/19.0.0/grid/bin/cssdagent
grid     22182     1  2 21:38 ?        00:00:01 /u01/app/19.0.0/grid/bin/ocssd.bin -X
grid     22357  4136  0 21:38 ?        00:00:00 sleep 60
root     22496     1  0 21:38 ?        00:00:00 /u01/app/19.0.0/grid/bin/octssd.bin reboot
grid     22576     1  0 21:38 ?        00:00:00 asm_pmon_+ASM1
grid     22580     1  0 21:38 ?        00:00:00 asm_clmn_+ASM1
grid     22584     1  0 21:38 ?        00:00:00 asm_psp0_+ASM1
grid     22589     1  0 21:38 ?        00:00:00 asm_vktm_+ASM1
grid     22595     1  0 21:38 ?        00:00:00 asm_gen0_+ASM1
grid     22599     1  0 21:38 ?        00:00:00 asm_mman_+ASM1
grid     22605     1  0 21:38 ?        00:00:00 asm_gen1_+ASM1
grid     22608     1  0 21:38 ?        00:00:00 asm_diag_+ASM1
grid     22610     1  0 21:38 ?        00:00:00 asm_ping_+ASM1
grid     22612     1  0 21:38 ?        00:00:00 asm_pman_+ASM1
grid     22616     1  0 21:38 ?        00:00:00 asm_dia0_+ASM1
grid     22618     1  1 21:38 ?        00:00:00 asm_lmon_+ASM1
grid     22622     1  0 21:38 ?        00:00:00 asm_lmd0_+ASM1
grid     22624     1  0 21:38 ?        00:00:00 asm_lms0_+ASM1
grid     22629     1  0 21:38 ?        00:00:00 asm_lmhb_+ASM1
grid     22633     1  0 21:38 ?        00:00:00 asm_lck1_+ASM1
grid     22637     1  0 21:38 ?        00:00:00 asm_dbw0_+ASM1
grid     22641     1  0 21:38 ?        00:00:00 asm_lgwr_+ASM1
grid     22645     1  0 21:38 ?        00:00:00 asm_ckpt_+ASM1
grid     22649     1  0 21:38 ?        00:00:00 asm_smon_+ASM1
grid     22653     1  0 21:38 ?        00:00:00 asm_lreg_+ASM1
grid     22657     1  0 21:38 ?        00:00:00 asm_pxmn_+ASM1
grid     22659     1  0 21:38 ?        00:00:00 asm_rbal_+ASM1
grid     22663     1  0 21:38 ?        00:00:00 asm_gmon_+ASM1
grid     22667     1  0 21:38 ?        00:00:00 asm_mmon_+ASM1
grid     22669     1  0 21:38 ?        00:00:00 asm_mmnl_+ASM1
grid     22671     1  0 21:38 ?        00:00:00 asm_imr0_+ASM1
grid     22674     1  0 21:38 ?        00:00:00 asm_scm0_+ASM1
grid     22676     1  0 21:38 ?        00:00:00 asm_lck0_+ASM1
grid     22680     1  0 21:38 ?        00:00:00 asm_m000_+ASM1
grid     22685     1  0 21:38 ?        00:00:00 asm_gcr0_+ASM1
grid     22838  3252  0 21:39 ?        00:00:00 sleep 30
root     22929  6928  0 21:39 pts/0    00:00:00 grep --color=auto grid
[root@rac1 ~]# crsctl stat res -t
CRS-4535: Cannot communicate with Cluster Ready Services
CRS-4000: Command Status failed, or completed with errors.
[root@rac1 ~]# crsctl stat res -t
CRS-4535: Cannot communicate with Cluster Ready Services
CRS-4000: Command Status failed, or completed with errors.
[root@rac1 ~]# su - grid
Last login: Sat Oct 28 21:23:14 CST 2023
[grid@rac1 ~]$ sqlplus '/as sysasm'

SQL*Plus: Release 19.0.0.0.0 - Production on Sat Oct 28 21:39:53 2023
Version 19.20.0.0.0

Copyright (c) 1982, 2022, Oracle.  All rights reserved.


Connected to:
Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production
Version 19.20.0.0.0

SQL> select name, state from v$asm_diskgroup;

NAME                           STATE
------------------------------ -----------
DATA1                          MOUNTED
DATA                           MOUNTED

SQL> 

 This document provides the steps to validate the ASM Instances and diskgroups on a RAC Cluster (when CRS does not start).

SOLUTION

In order to confirm & validate the ASM instances and diskgroups are in good shape (ASM instances start and diskgroups are mounted), please perform the next steps (bypassing CRS):

1) Shutdown CRS on all the nodes:

# crsctl stop crs


2) Then start the clusterware in exclusive mode on node #1:

# crsctl start crs -excl -nocrs

Note: On release 11.2.0.1, you need to use the next command:

# crsctl start crs -excl


3) Connect to the +ASM1 instance and then make sure all the diskgroups are mounted including the OCRVOTE diskgroup:

SQL> select name, state from v$asm_diskgroup;



4) If not, then mount them (example):

SQL. alter diskgroup OCRVOTE mount;

SQL> select name, state from v$asm_diskgroup;



5) Then shutdown the clusterware on node #1:

# crsctl stop crs -f



6) Now, start  the the clusterware in exclusive mode on node #2:

# crsctl start crs -excl -nocrs

Note: On release 11.2.0.1, you need to use the next command:

# crsctl start crs -excl


7) Connect to the +ASM2 instance and then make sure all the diskgroups are mounted including the OCRVOTE diskgroup:

SQL> select name, state from v$asm_diskgroup;



8) If not, then mount them:

SQL. alter diskgroup OCRVOTE mount;

SQL> select name, state from v$asm_diskgroup;



9) Then shutdown the clusterware on node #2:

# crsctl stop crs -f



10) Please repeat the same steps on the additional nodes.


http://lihuaxi.xjx100.cn/news/1729069.html

相关文章

【mediasoup-sfu-cpp】5: SfuDemo:分发ok

第一个加入 D:\XTRANS\soup\mediasoup-sfu-cpp\demo\main.cpp: Line 322: [Room] mainThread:33268I |2023-10-28 22:06:24 1698501984171690| main.cpp:clients are expected to connect at http://localhost:8001/I |2023-10-28 22:06:24 1698501984177341

【2024秋招】2023-9-16 贝壳后端开发二面

1 自我介绍 2 秒杀系统 2.1 超卖怎么解决 3 redis 3.1 过期策略 3.2 过期算法 4 kafka 4.1 说一说你对kafka的了解 4.2 如何保证事务性消息 4.3 如何保证消息不丢失 4.4 消息队列的两种通信方式 点对点模式 如上图所示,点对点模式通常是基于拉取或者轮询…

YOLOV8的tensorrt部署详解(目标检测模型-CUDA)

提示:基于cuda的yolov8的tensorrt部署方法(纯cuda编程处理),文中附有源码链接!!! 文章目录 前言一、基于cuda的yolov8部署工程代码图解1、基于cuda的yolov8部署代码图解工程文件介绍头文件介绍源文件介绍2、模型初始化图示3、cuda编写前/后处理代码图示4、基于cuda的yolov…

学习通签到

要在Vue中使用H5lock.js,首先需要将H5lock.js引入到项目中。可以通过以下步骤来使用: 1. 将H5lock.js文件保存到项目中的某个目录下,例如src/assets文件夹。 2. 在需要使用H5lock.js的组件中,通过import语句将H5lock.js引入进来…

力扣-88. 合并两个有序数组

文章目录 题目介绍1.使用qsort函数2.使用寻找数组中的最大纸 题目 力扣链接:https://leetcode.cn/problems/merge-sorted-array/description/介绍 使用了两种解题思路,下面将会对2种方法进行介绍。1.使用qsort函数 将nums2数组中的元素通过memcpy函数拷贝…

红队专题-从零开始VC++C/S远程控制软件RAT-MFC-远控介绍及界面编写

红队专题 招募六边形战士队员[1]远控介绍及界面编写1.远程控制软件演示及教程简要说明主程序可执行程序 服务端生成器主机上线服务端程序 和 服务文件管理CMD进程服务自启动主程序主对话框操作菜单列表框配置信息 多线程操作非模式对话框 2.环境:3.界面编程新建项目…

数据结构与算法之LRU: 实现 LRU 缓存算法功能 (Javascript版)

关于LRU缓存 LRU - Lease Recently Used 最近使用 如果内存优先,只缓存最近使用的,删除 ‘沉睡’ 数据 核心 api: get set 分析 使用哈希表来实现, O(1)必须是有序的,常用放在前面,沉睡放在后面, 即:有序&#xff0…

【音视频】音视频开发与学习

音视屏开发与学习 时间:2023年8月12日10:09:29 文章目录 音视屏开发与学习1.资料 1.资料 1.资料下载: 这是最新的课程视频教程,如有星球过期的朋友,可以在这个群公告或者私聊我拿资料,这个资料会一直发给大家的&#x…