码迷,mamicode.com
首页 > 数据库 > 详细

oracle 11g 修改private ip, public ip, vip, scan ip

时间:2016-07-10 19:08:19      阅读:312      评论:0      收藏:0      [点我收藏+]

标签:

查看当前系统信息

[root@node1 ~]# crs_stat -t

Name           Type           Target    State    Host       

------------------------------------------------------------

ora.DATA.dg    ora....up.type ONLINE    ONLINE   node1      

ora.FLASH.dg   ora....up.type ONLINE    ONLINE   node1      

ora.GRIDDG.dg  ora....up.type ONLINE    ONLINE   node1      

ora....ER.lsnr ora....er.type ONLINE    ONLINE   node2      

ora....N1.lsnr ora....er.type ONLINE    ONLINE   node1      

ora.asm        ora.asm.type   ONLINE   ONLINE    node1      

ora.cvu        ora.cvu.type   OFFLINE  OFFLINE              

ora.devdb.db   ora....se.type ONLINE    ONLINE   node2      

ora....ce1.svc ora....ce.type ONLINE    ONLINE   node2      

ora.gsd        ora.gsd.type   OFFLINE  OFFLINE              

ora....network ora....rk.type ONLINE    ONLINE   node1      

ora....SM1.asm application    ONLINE   ONLINE    node1      

ora....E1.lsnr application    ONLINE   OFFLINE              

ora.node1.gsd  application   OFFLINE   OFFLINE              

ora.node1.ons  application   ONLINE    ONLINE    node1      

ora....SM2.asm application    ONLINE   ONLINE    node2      

ora....E2.lsnr application    ONLINE   ONLINE    node2      

ora.node2.gsd  application   OFFLINE   OFFLINE              

ora.node2.ons  application   ONLINE    ONLINE    node2      

ora.node2.vip  ora....t1.type ONLINE    ONLINE   node2      

ora.oc4j       ora.oc4j.type  ONLINE   ONLINE    node1      

ora.ons        ora.ons.type   ONLINE   ONLINE    node1      

ora.scan1.vip  ora....ip.type ONLINE    ONLINE   node1

[root@node1 ~]# srvctl status database -ddevdb

Instance devdb1 is running on node node1

Instance devdb2 is running on node node2

[root@node1 ~]#

 技术分享

技术分享

节点一信息:

[root@node1 ~]# ifconfig -a

eth0     Link encap:Ethernet  HWaddr00:0C:29:E7:7F:E8 

         inet addr:192.168.40.191 Bcast:192.168.40.255 Mask:255.255.255.0

         inet6 addr: fe80::20c:29ff:fee7:7fe8/64 Scope:Link

         UP BROADCAST RUNNING MULTICAST MTU:1500  Metric:1

         RX packets:1455 errors:0 dropped:0 overruns:0 frame:0

         TX packets:1069 errors:0 dropped:0 overruns:0 carrier:0

         collisions:0 txqueuelen:1000

         RX bytes:173021 (168.9 KiB)  TXbytes:173124 (169.0 KiB)

         Interrupt:16 Base address:0x2400

 

eth0:1   Link encap:Ethernet  HWaddr00:0C:29:E7:7F:E8 

         inet addr:192.168.40.203 Bcast:192.168.40.255 Mask:255.255.255.0

         UP BROADCAST RUNNING MULTICAST MTU:1500  Metric:1

          Interrupt:16 Base address:0x2400

 

eth1     Link encap:Ethernet  HWaddr00:0C:29:E7:7F:DE 

         inet addr:192.168.94.11 Bcast:192.168.94.255 Mask:255.255.255.0

         inet6 addr: fe80::20c:29ff:fee7:7fde/64 Scope:Link

         UP BROADCAST RUNNING MULTICAST MTU:1500  Metric:1

         RX packets:25939 errors:0 dropped:0 overruns:0 frame:0

         TX packets:36179 errors:0 dropped:0 overruns:0 carrier:0

         collisions:0 txqueuelen:1000

         RX bytes:13458788 (12.8 MiB)  TXbytes:27208084 (25.9 MiB)

 

eth1:1   Link encap:Ethernet  HWaddr00:0C:29:E7:7F:DE 

         inet addr:169.254.180.168 Bcast:169.254.255.255 Mask:255.255.0.0

         UP BROADCAST RUNNING MULTICAST MTU:1500  Metric:1

 

lo       Link encap:Local Loopback 

         inet addr:127.0.0.1 Mask:255.0.0.0

         inet6 addr: ::1/128 Scope:Host

         UP LOOPBACK RUNNING MTU:16436  Metric:1

         RX packets:14222 errors:0 dropped:0 overruns:0 frame:0

         TX packets:14222 errors:0 dropped:0 overruns:0 carrier:0

         collisions:0 txqueuelen:0

         RX bytes:8493677 (8.1 MiB)  TXbytes:8493677 (8.1 MiB)

 

[root@node1 ~]#

节点二信息:

[root@node2 ~]# ifconfig -a

eth0     Link encap:Ethernet  HWaddr00:0C:29:B6:6C:0D 

         inet addr:192.168.40.192  Bcast:192.168.40.255  Mask:255.255.255.0

         inet6 addr: fe80::20c:29ff:feb6:6c0d/64 Scope:Link

         UP BROADCAST RUNNING MULTICAST MTU:1500  Metric:1

         RX packets:679 errors:0 dropped:0 overruns:0 frame:0

          TX packets:412 errors:0 dropped:0overruns:0 carrier:0

         collisions:0 txqueuelen:1000

         RX bytes:104501 (102.0 KiB)  TXbytes:100249 (97.8 KiB)

 

eth0:1   Link encap:Ethernet  HWaddr00:0C:29:B6:6C:0D 

         inet addr:192.168.40.194 Bcast:192.168.40.255 Mask:255.255.255.0

         UP BROADCAST RUNNING MULTICAST MTU:1500  Metric:1

 

eth1     Link encap:Ethernet  HWaddr00:0C:29:B6:6C:17 

         inet addr:192.168.94.12 Bcast:192.168.94.255 Mask:255.255.255.0

         inet6 addr: fe80::20c:29ff:feb6:6c17/64 Scope:Link

         UP BROADCAST RUNNING MULTICAST MTU:1500  Metric:1

         RX packets:37358 errors:0 dropped:0 overruns:0 frame:0

         TX packets:26614 errors:0 dropped:0 overruns:0 carrier:0

         collisions:0 txqueuelen:1000

         RX bytes:27634538 (26.3 MiB)  TXbytes:14210502 (13.5 MiB)

 

eth1:1   Link encap:Ethernet  HWaddr00:0C:29:B6:6C:17 

         inet addr:169.254.150.48 Bcast:169.254.255.255 Mask:255.255.0.0

         UP BROADCAST RUNNING MULTICAST MTU:1500  Metric:1

 

lo       Link encap:Local Loopback 

         inet addr:127.0.0.1 Mask:255.0.0.0

         inet6 addr: ::1/128 Scope:Host

         UP LOOPBACK RUNNING MTU:16436  Metric:1

         RX packets:9867 errors:0 dropped:0 overruns:0 frame:0

         TX packets:9867 errors:0 dropped:0 overruns:0 carrier:0

         collisions:0 txqueuelen:0

         RX bytes:5692086 (5.4 MiB)  TXbytes:5692086 (5.4 MiB)

 

[root@node2 ~]#

 

节点一hosts

root@node1 ~]# cat /etc/hosts

127.0.0.1  localhost localhost.localdomain localhost4 localhost4.localdomain4

::1        localhost localhost.localdomain localhost6 localhost6.localdomain6

 

# node1

192.168.40.191            node1.localdomain               node1

192.168.40.193            node1-vip.localdomain           node1-vip

192.168.94.11             node1-priv.localdomain          node1-priv

# node2

192.168.40.192            node2.localdomain               node2

192.168.40.194            node2-vip.localdomain           node2-vip

192.168.94.12             node2-priv.localdomain          node2-priv

# scan-ip

192.168.40.203            scan-cluster.localdomain        scan-cluster

[root@node1 ~]#

节点二hosts

[root@node2 ~]# cat /etc/hosts

127.0.0.1  localhost localhost.localdomain localhost4 localhost4.localdomain4

::1        localhost localhost.localdomain localhost6 localhost6.localdomain6

 

# node1

192.168.40.191            node1.localdomain               node1

192.168.40.193            node1-vip.localdomain           node1-vip

192.168.94.11             node1-priv.localdomain          node1-priv

# node2

192.168.40.192            node2.localdomain               node2

192.168.40.194            node2-vip.localdomain           node2-vip

192.168.94.12             node2-priv.localdomain          node2-priv

# scan-ip

192.168.40.203            scan-cluster.localdomain        scan-cluster

[root@node2 ~]#

修改public ip、private ip、scan ip

1、现ip地址和修改之后的对比

修改前的ip

# node1

192.168.40.191            node1.localdomain               node1

192.168.40.193            node1-vip.localdomain           node1-vip

192.168.94.11             node1-priv.localdomain          node1-priv

# node2

192.168.40.192            node2.localdomain               node2

192.168.40.194            node2-vip.localdomain           node2-vip

192.168.94.12             node2-priv.localdomain          node2-priv

# scan-ip

192.168.40.203            scan-cluster.localdomain        scan-cluster

修改后的ip

# node1

192.168.41.191            node1.localdomain               node1

192.168.41.193            node1-vip.localdomain           node1-vip

192.168.95.11             node1-priv.localdomain          node1-priv

# node2

192.168.41.192            node2.localdomain               node2

192.168.41.194            node2-vip.localdomain           node2-vip

192.168.95.12             node2-priv.localdomain          node2-priv

# scan-ip

192.168.41.203            scan-cluster.localdomain        scan-cluster

2、首先停止集群服务

--停数据库

[root@node1 script]#srvctl stop listener-l listener

[root@node1 script]#srvctl stop database -d devdb -o immediate

[root@node1script]#srvctl stop asm -n node1

[root@node1script]#srvctl stop asm -n node2

注:这两步11g中,需要和crs关闭一起,不能单独执行,否则报下面的错误

[root@node1 script]#srvctl stop asm -n node1

PRCR-1014 : Failed tostop resource ora.asm

PRCR-1065 : Failed tostop resource ora.asm

CRS-2529: Unable toact on ‘ora.asm‘ because that would require stopping or relocating‘ora.DATA.dg‘, but the force option was not specified

 

[root@node1 script]#srvctl stop vip -n node1

[root@node1 script]# srvctl stop vip -n node2

[root@node1 script]# srvctl stopscan_listener

[root@node1 script]#srvctl stop scan

[root@node1 script]#srvctl stop nodeapps -n node1

[root@node1 script]#srvctl stop nodeapps -n node2

 

 

[root@node1 script]# ./crs_stat.sh

Name                           Target     State     Host     

------------------------------ -------------------  -------  

ora.DATA.dg                    ONLINE     ONLINE    node1    

ora.FLASH.dg                   ONLINE     ONLINE    node1    

ora.GRIDDG.dg                  ONLINE     ONLINE    node1    

ora.LISTENER.lsnr              ONLINE     ONLINE    node2    

ora.LISTENER_SCAN1.lsnr        ONLINE     ONLINE    node1    

ora.asm                        ONLINE     ONLINE    node1    

ora.cvu                        OFFLINE    OFFLINE             

ora.devdb.db                  ONLINE     ONLINE     node2    

ora.devdb.service1.svc         ONLINE     ONLINE    node2    

ora.gsd                        OFFLINE    OFFLINE             

ora.net1.network               ONLINE     ONLINE    node1    

ora.node1.ASM1.asm             ONLINE     ONLINE    node1    

ora.node1.LISTENER_NODE1.lsnr  ONLINE    OFFLINE             

ora.node1.gsd                  OFFLINE    OFFLINE             

ora.node1.ons                  ONLINE     ONLINE    node1    

ora.node2.ASM2.asm             ONLINE     ONLINE    node2    

ora.node2.LISTENER_NODE2.lsnr  ONLINE    ONLINE     node2    

ora.node2.gsd                  OFFLINE    OFFLINE             

ora.node2.ons                  ONLINE     ONLINE    node2    

ora.node2.vip                  ONLINE     ONLINE    node2    

ora.oc4j                       ONLINE     ONLINE    node1    

ora.ons                        ONLINE     ONLINE    node1    

ora.scan1.vip                  ONLINE     ONLINE    node1

[root@node1 script]# srvctl  stopdatabase -d devdb -o immediate

[root@node1 script]# ./crs_stat.sh

Name                           Target     State     Host     

------------------------------ -------------------  -------  

ora.DATA.dg                    ONLINE     ONLINE    node1    

ora.FLASH.dg                   ONLINE     ONLINE    node1    

ora.GRIDDG.dg                  ONLINE     ONLINE    node1    

ora.LISTENER.lsnr              ONLINE     ONLINE    node2    

ora.LISTENER_SCAN1.lsnr        ONLINE     ONLINE    node1    

ora.asm                        ONLINE     ONLINE    node1    

ora.cvu                        OFFLINE    OFFLINE             

ora.devdb.db                   OFFLINE    OFFLINE             

ora.devdb.service1.svc        OFFLINE    OFFLINE             

ora.gsd                        OFFLINE    OFFLINE             

ora.net1.network               ONLINE     ONLINE    node1    

ora.node1.ASM1.asm             ONLINE     ONLINE    node1    

ora.node1.LISTENER_NODE1.lsnr  ONLINE    OFFLINE             

ora.node1.gsd                  OFFLINE    OFFLINE             

ora.node1.ons                  ONLINE     ONLINE    node1    

ora.node2.ASM2.asm             ONLINE     ONLINE    node2    

ora.node2.LISTENER_NODE2.lsnr  ONLINE    ONLINE     node2    

ora.node2.gsd                  OFFLINE    OFFLINE             

ora.node2.ons                  ONLINE     ONLINE    node2    

ora.node2.vip                  ONLINE     ONLINE    node2    

ora.oc4j                       ONLINE     ONLINE    node1    

ora.ons                        ONLINE     ONLINE    node1    

ora.scan1.vip                  ONLINE     ONLINE    node1    

[root@node1 script]#

切换到grid

[grid@node1 ~]$ srvctl stop listener -l listener

[grid@node1 ~]$ srvctl stop scan_listener

[grid@node1 ~]$ crs_stat -t

Name           Type           Target    State    Host       

------------------------------------------------------------

ora.DATA.dg    ora....up.type ONLINE    ONLINE   node1      

ora.FLASH.dg   ora....up.type ONLINE    ONLINE   node1      

ora.GRIDDG.dg  ora....up.type ONLINE    ONLINE   node1      

ora....ER.lsnr ora....er.type OFFLINE  OFFLINE              

ora....N1.lsnr ora....er.type OFFLINE  OFFLINE              

ora.asm        ora.asm.type   ONLINE   ONLINE    node1      

ora.cvu        ora.cvu.type   OFFLINE  OFFLINE              

ora.devdb.db   ora....se.type OFFLINE   OFFLINE              

ora....ce1.svc ora....ce.type OFFLINE   OFFLINE              

ora.gsd        ora.gsd.type   OFFLINE  OFFLINE              

ora....network ora....rk.type ONLINE    ONLINE   node1      

ora....SM1.asm application    ONLINE   ONLINE    node1      

ora....E1.lsnr application    OFFLINE  OFFLINE              

ora.node1.gsd  application   OFFLINE   OFFLINE              

ora.node1.ons  application   OFFLINE   OFFLINE              

ora....SM2.asm application    ONLINE   ONLINE    node2      

ora....E2.lsnr application    OFFLINE  OFFLINE              

ora.node2.gsd  application   OFFLINE   OFFLINE              

ora.node2.ons  application   ONLINE    ONLINE    node2      

ora.node2.vip  ora....t1.type ONLINE    ONLINE   node2      

ora.oc4j       ora.oc4j.type  ONLINE   ONLINE    node1      

ora.ons        ora.ons.type   ONLINE   ONLINE    node2      

ora.scan1.vip  ora....ip.type ONLINE    ONLINE   node1      

[grid@node1 ~]$ srvctl stop scan

[grid@node1 ~]$

[grid@node1 ~]$ crs_stat -t

Name          Type           Target    State    Host       

------------------------------------------------------------

ora.DATA.dg    ora....up.type ONLINE    ONLINE   node1      

ora.FLASH.dg   ora....up.type ONLINE    ONLINE   node1      

ora.GRIDDG.dg  ora....up.type ONLINE    ONLINE   node1      

ora....ER.lsnr ora....er.type OFFLINE   OFFLINE              

ora....N1.lsnr ora....er.type OFFLINE   OFFLINE              

ora.asm        ora.asm.type   ONLINE   ONLINE    node1      

ora.cvu        ora.cvu.type   OFFLINE  OFFLINE              

ora.devdb.db   ora....se.type OFFLINE   OFFLINE              

ora....ce1.svc ora....ce.type OFFLINE   OFFLINE              

ora.gsd        ora.gsd.type   OFFLINE  OFFLINE              

ora....network ora....rk.type ONLINE    ONLINE   node1      

ora....SM1.asm application    ONLINE   ONLINE    node1      

ora....E1.lsnr application    OFFLINE  OFFLINE              

ora.node1.gsd  application   OFFLINE   OFFLINE              

ora.node1.ons  application   OFFLINE   OFFLINE              

ora....SM2.asm application    ONLINE   ONLINE    node2      

ora....E2.lsnr application    OFFLINE  OFFLINE              

ora.node2.gsd  application   OFFLINE   OFFLINE              

ora.node2.ons  application   ONLINE    ONLINE    node2      

ora.node2.vip  ora....t1.type ONLINE    ONLINE   node2      

ora.oc4j       ora.oc4j.type  ONLINE   ONLINE    node1      

ora.ons        ora.ons.type   ONLINE   ONLINE    node2      

ora.scan1.vip  ora....ip.typeOFFLINE   OFFLINE   

[grid@node1 ~]$ srvctl stop nodeapps -nnode1

PRKO-2439 : VIP does not exist.

PRCR-1014 : Failed to stop resourceora.net1.network

PRCR-1065 : Failed to stop resourceora.net1.network

CRS-2529: Unable to act on ‘ora.net1.network‘because that would require stopping or relocating ‘ora.scan1.vip‘, but theforce option was not specified[grid@node1 ~]$ srvctl stop nodeapps -n node2

[grid@node1 ~]$ crs_stat -t

Name           Type           Target    State    Host       

------------------------------------------------------------

ora.DATA.dg    ora....up.type ONLINE    ONLINE   node1      

ora.FLASH.dg   ora....up.type ONLINE    ONLINE   node1      

ora.GRIDDG.dg  ora....up.type ONLINE    ONLINE   node1      

ora....ER.lsnr ora....er.type OFFLINE   OFFLINE              

ora....N1.lsnr ora....er.type OFFLINE   OFFLINE              

ora.asm        ora.asm.type   ONLINE   ONLINE    node1      

ora.cvu        ora.cvu.type   OFFLINE  OFFLINE              

ora.devdb.db   ora....se.type OFFLINE   OFFLINE              

ora....ce1.svc ora....ce.type OFFLINE   OFFLINE              

ora.gsd        ora.gsd.type   OFFLINE  OFFLINE              

ora....network ora....rk.type ONLINE    ONLINE   node1      

ora....SM1.asm application    ONLINE   ONLINE    node1      

ora....E1.lsnr application    OFFLINE  OFFLINE              

ora.node1.gsd  application   OFFLINE   OFFLINE              

ora.node1.ons  application   OFFLINE   OFFLINE              

ora....SM2.asm application    ONLINE   ONLINE    node2      

ora....E2.lsnr application    OFFLINE  OFFLINE              

ora.node2.gsd  application   OFFLINE   OFFLINE              

ora.node2.ons  application   OFFLINE   OFFLINE              

ora.node2.vip  ora....t1.type OFFLINE   OFFLINE              

ora.oc4j       ora.oc4j.type  ONLINE   ONLINE    node1      

ora.ons        ora.ons.type   OFFLINE  OFFLINE              

ora.scan1.vip  ora....ip.type OFFLINE   OFFLINE              

[grid@node1 ~]$

--停crs服务

[root@node1 ~]#./crsctl stop crs

[root@node2 ~]#./crsctl stop crs

两个节点都停

节点一信息

[root@node1 ~]# crsctl stop crs

CRS-2791: Starting shutdown of Oracle HighAvailability Services-managed resources on ‘node1‘

CRS-2673: Attempting to stop ‘ora.crsd‘ on ‘node1‘

CRS-2790: Starting shutdown of ClusterReady Services-managed resources on ‘node1‘

CRS-2673: Attempting to stop ‘ora.oc4j‘ on‘node1‘

CRS-2673: Attempting to stop‘ora.GRIDDG.dg‘ on ‘node1‘

CRS-2673: Attempting to stop ‘ora.DATA.dg‘on ‘node1‘

CRS-2673: Attempting to stop ‘ora.FLASH.dg‘on ‘node1‘

CRS-2677: Stop of ‘ora.DATA.dg‘ on ‘node1‘succeeded

CRS-2677: Stop of ‘ora.FLASH.dg‘ on ‘node1‘succeeded

CRS-2677: Stop of ‘ora.oc4j‘ on ‘node1‘succeeded

CRS-2672: Attempting to start ‘ora.oc4j‘ on‘node2‘

CRS-2676: Start of ‘ora.oc4j‘ on ‘node2‘succeeded

CRS-2677: Stop of ‘ora.GRIDDG.dg‘ on‘node1‘ succeeded

CRS-2673: Attempting to stop ‘ora.asm‘ on‘node1‘

CRS-2677: Stop of ‘ora.asm‘ on ‘node1‘succeeded

CRS-2673: Attempting to stop‘ora.net1.network‘ on ‘node1‘

CRS-2677: Stop of ‘ora.net1.network‘ on‘node1‘ succeeded

CRS-2792: Shutdown of Cluster ReadyServices-managed resources on ‘node1‘ has completed

CRS-2677: Stop of ‘ora.crsd‘ on ‘node1‘succeeded

CRS-2673: Attempting to stop ‘ora.ctssd‘ on‘node1‘

CRS-2673: Attempting to stop ‘ora.evmd‘ on‘node1‘

CRS-2673: Attempting to stop ‘ora.asm‘ on‘node1‘

CRS-2673: Attempting to stop ‘ora.mdnsd‘ on‘node1‘

CRS-2677: Stop of ‘ora.evmd‘ on ‘node1‘succeeded

CRS-2677: Stop of ‘ora.mdnsd‘ on ‘node1‘ succeeded

CRS-2677: Stop of ‘ora.asm‘ on ‘node1‘succeeded

CRS-2673: Attempting to stop‘ora.cluster_interconnect.haip‘ on ‘node1‘

CRS-2677: Stop of‘ora.cluster_interconnect.haip‘ on ‘node1‘ succeeded

CRS-2677: Stop of ‘ora.ctssd‘ on ‘node1‘succeeded

CRS-2673: Attempting to stop ‘ora.cssd‘ on‘node1‘

CRS-2677: Stop of ‘ora.cssd‘ on ‘node1‘succeeded

CRS-2673: Attempting to stop ‘ora.gipcd‘ on‘node1‘

CRS-2677: Stop of ‘ora.gipcd‘ on ‘node1‘succeeded

CRS-2673: Attempting to stop ‘ora.gpnpd‘ on‘node1‘

CRS-2677: Stop of ‘ora.gpnpd‘ on ‘node1‘succeeded

CRS-2793: Shutdown of Oracle HighAvailability Services-managed resources on ‘node1‘ has completed

CRS-4133: Oracle High Availability Serviceshas been stopped.

[root@node1 ~]#

节点二信息

[root@node2 ~]# crs_stat -t

Name           Type           Target    State    Host       

------------------------------------------------------------

ora.DATA.dg    ora....up.type ONLINE    ONLINE   node2      

ora.FLASH.dg   ora....up.type ONLINE    ONLINE   node2      

ora.GRIDDG.dg  ora....up.type ONLINE    ONLINE   node2      

ora....ER.lsnr ora....er.type OFFLINE   OFFLINE              

ora....N1.lsnr ora....er.type OFFLINE   OFFLINE              

ora.asm        ora.asm.type   ONLINE   ONLINE    node2      

ora.cvu        ora.cvu.type   OFFLINE  OFFLINE              

ora.devdb.db   ora....se.type OFFLINE   OFFLINE              

ora....ce1.svc ora....ce.type OFFLINE   OFFLINE              

ora.gsd        ora.gsd.type   OFFLINE  OFFLINE              

ora....network ora....rk.type ONLINE    OFFLINE              

ora....SM2.asm application    ONLINE   ONLINE    node2      

ora....E2.lsnr application    OFFLINE  OFFLINE              

ora.node2.gsd  application   OFFLINE   OFFLINE              

ora.node2.ons  application   OFFLINE   OFFLINE              

ora.node2.vip  ora....t1.type OFFLINE   OFFLINE              

ora.oc4j       ora.oc4j.type  ONLINE   ONLINE    node2      

ora.ons        ora.ons.type   OFFLINE  OFFLINE              

ora.scan1.vip  ora....ip.type OFFLINE   OFFLINE

[root@node2 ~]# crsctl stop crs

CRS-2791: Starting shutdown of Oracle HighAvailability Services-managed resources on ‘node2‘

CRS-2673: Attempting to stop ‘ora.crsd‘ on‘node2‘

CRS-2790: Starting shutdown of Cluster ReadyServices-managed resources on ‘node2‘

CRS-2673: Attempting to stop‘ora.GRIDDG.dg‘ on ‘node2‘

CRS-2673: Attempting to stop ‘ora.DATA.dg‘on ‘node2‘

CRS-2673: Attempting to stop ‘ora.FLASH.dg‘on ‘node2‘

CRS-2673: Attempting to stop ‘ora.oc4j‘ on‘node2‘

CRS-2677: Stop of ‘ora.DATA.dg‘ on ‘node2‘succeeded

CRS-2677: Stop of ‘ora.FLASH.dg‘ on ‘node2‘succeeded

CRS-2677: Stop of ‘ora.oc4j‘ on ‘node2‘succeeded

CRS-2677: Stop of ‘ora.GRIDDG.dg‘ on‘node2‘ succeeded

CRS-2673: Attempting to stop ‘ora.asm‘ on‘node2‘

CRS-2677: Stop of ‘ora.asm‘ on ‘node2‘succeeded

CRS-2792: Shutdown of Cluster ReadyServices-managed resources on ‘node2‘ has completed

CRS-2677: Stop of ‘ora.crsd‘ on ‘node2‘succeeded

CRS-2673: Attempting to stop ‘ora.ctssd‘ on‘node2‘

CRS-2673: Attempting to stop ‘ora.evmd‘ on‘node2‘

CRS-2673: Attempting to stop ‘ora.asm‘ on‘node2‘

CRS-2673: Attempting to stop ‘ora.mdnsd‘ on‘node2‘

CRS-2673: Attempting to stop‘ora.drivers.acfs‘ on ‘node2‘

CRS-2677: Stop of ‘ora.ctssd‘ on ‘node2‘succeeded

CRS-2677: Stop of ‘ora.evmd‘ on ‘node2‘succeeded

CRS-2677: Stop of ‘ora.mdnsd‘ on ‘node2‘succeeded

CRS-2677: Stop of ‘ora.asm‘ on ‘node2‘succeeded

CRS-2673: Attempting to stop‘ora.cluster_interconnect.haip‘ on ‘node2‘

CRS-2677: Stop of ‘ora.cluster_interconnect.haip‘on ‘node2‘ succeeded

CRS-2673: Attempting to stop ‘ora.cssd‘ on‘node2‘

CRS-2677: Stop of ‘ora.drivers.acfs‘ on‘node2‘ succeeded

CRS-2677: Stop of ‘ora.cssd‘ on ‘node2‘succeeded

CRS-2673: Attempting to stop ‘ora.gipcd‘ on‘node2‘

CRS-2677: Stop of ‘ora.gipcd‘ on ‘node2‘succeeded

CRS-2673: Attempting to stop ‘ora.gpnpd‘ on‘node2‘

CRS-2677: Stop of ‘ora.gpnpd‘ on ‘node2‘succeeded

CRS-2793: Shutdown of Oracle HighAvailability Services-managed resources on ‘node2‘ has completed

CRS-4133: Oracle High Availability Serviceshas been stopped.

[root@node2 ~]#

 

3、修改主机ip地址

两个节点都要做

节点一

[root@node1 ~]# vi /etc/hosts

127.0.0.1  localhost localhost.localdomain localhost4 localhost4.localdomain4

::1        localhost localhost.localdomain localhost6 localhost6.localdomain6

 

# node1

192.168.41.191            node1.localdomain               node1

192.168.41.193            node1-vip.localdomain           node1-vip

192.168.95.11             node1-priv.localdomain          node1-priv

# node2

192.168.41.192            node2.localdomain               node2

192.168.41.194            node2-vip.localdomain           node2-vip

192.168.95.12             node2-priv.localdomain          node2-priv

# scan-ip

192.168.41.203            scan-cluster.localdomain        scan-cluster

--编辑网卡文件

[root@node1 ~]# vi/etc/sysconfig/network-scripts/ifcfg-eth0

[root@node1 ~]# vi/etc/sysconfig/network-scripts/ifcfg-eth1

public网卡,eth1是私网

按照自己配置的ip信息写好!

这一步要注意,两个节点都要做

 

--重启网卡服务(这步不要做,不然会出现我下面遇到的问题)

(修改完hostsifcfg文件之后,不要直接重启network服务,这个时候可以启动crs,启动crs之后,修改private ip public ip,改成想要修改的ip,其实只要private ip和主机配置的一样之后,就可以启动crs,修改完成private ippublic ip之后,重启network服务,这个时候,crs还是活动的,然后再改其他vipscan ip即可,最后修改完之后,启动集群其他所有资源即可。)


[root@node1 ~]# service network restart

Shutting down interface eth0: 

Connection closed by foreign host.

 

Disconnected from remotehost(192.168.40.191) at 11:22:07.

 

Type `help‘ to learn how to use Xshellprompt.

--重启之后,第三方客户端会失去连接,需要重新用新地址连接

如果用的是vmware虚拟机的话,网卡需要重新配置

--重新连接之后

[root@node1 ~]# ifconfig -a

eth0     Link encap:Ethernet  HWaddr00:0C:29:E7:7F:E8 

         inet addr:192.168.41.191 Bcast:192.168.41.255 Mask:255.255.255.0

         inet6 addr: fe80::20c:29ff:fee7:7fe8/64 Scope:Link

         UP BROADCAST RUNNING MULTICAST MTU:1500  Metric:1

         RX packets:5379 errors:0 dropped:0 overruns:0 frame:0

         TX packets:3529 errors:0 dropped:0 overruns:0 carrier:0

         collisions:0 txqueuelen:1000

         RX bytes:529386 (516.9 KiB)  TXbytes:589411 (575.5 KiB)

         Interrupt:16 Base address:0x2400

 

eth1     Link encap:Ethernet  HWaddr00:0C:29:E7:7F:DE 

         inet addr:192.168.95.11 Bcast:192.168.95.255 Mask:255.255.255.0

         inet6 addr: fe80::20c:29ff:fee7:7fde/64 Scope:Link

         UP BROADCAST RUNNING MULTICAST MTU:1500  Metric:1

         RX packets:81275 errors:0 dropped:0 overruns:0 frame:0

         TX packets:85345 errors:0 dropped:0 overruns:0 carrier:0

         collisions:0 txqueuelen:1000

         RX bytes:48422935 (46.1 MiB)  TXbytes:54772644 (52.2 MiB)

 

lo       Link encap:Local Loopback 

         inet addr:127.0.0.1 Mask:255.0.0.0

         inet6 addr: ::1/128 Scope:Host

         UP LOOPBACK RUNNING MTU:16436  Metric:1

         RX packets:30567 errors:0 dropped:0 overruns:0 frame:0

         TX packets:30567 errors:0 dropped:0 overruns:0 carrier:0

         collisions:0 txqueuelen:0

         RX bytes:14887939 (14.1 MiB)  TXbytes:14887939 (14.1 MiB)

节点二

[root@node2 ~]# vi /etc/hosts

127.0.0.1  localhost localhost.localdomain localhost4 localhost4.localdomain4

::1        localhost localhost.localdomain localhost6 localhost6.localdomain6

 

# node1

192.168.41.191            node1.localdomain               node1

192.168.41.193            node1-vip.localdomain           node1-vip

192.168.95.11             node1-priv.localdomain          node1-priv

# node2

192.168.41.192            node2.localdomain               node2

192.168.41.194            node2-vip.localdomain           node2-vip

192.168.95.12             node2-priv.localdomain          node2-priv

# scan-ip

192.168.41.203            scan-cluster.localdomain        scan-cluster

 

--编辑网卡文件

[root@node1 ~]# vi/etc/sysconfig/network-scripts/ifcfg-eth0

[root@node1 ~]# vi /etc/sysconfig/network-scripts/ifcfg-eth1

[root@node2 ~]#

[root@node2 ~]#

[root@node2 ~]# cat/etc/sysconfig/network-scripts/ifcfg-eth0

GATEWAY=192.168.41.1

NETMASK=255.255.255.0

IPADDR=192.168.41.192

DEVICE=eth0

HWADDR=00:0C:29:B6:6C:0D

TYPE=Ethernet

UUID=c14d4003-083d-4cb4-be03-9f076d949608

ONBOOT=yes

NM_CONTROLLED=yes

BOOTPROTO=static

public网卡,eth1是私网

按照自己配置的ip信息写好!

这一步要注意,两个节点都要做

 

--重启网卡服务

(修改完hostsifcfg文件之后,不要直接重启network服务,这个时候可以启动crs,启动crs之后,修改private ip public ip,改成想要修改的ip,其实只要private ip和主机配置的一样之后,就可以启动crs,修改完成private ippublic ip之后,重启network服务,这个时候,crs还是活动的,然后再改其他vipscan ip即可,最后修改完之后,启动集群其他所有资源即可。)

[root@node1 ~]# service network restart

Shutting down interface eth0: 

Connection closed by foreign host.

 

Disconnected from remotehost(192.168.40.192) at 11:24:07.

 

Type `help‘ to learn how to use Xshellprompt.

--重启之后,第三方客户端会失去连接,需要重新用新地址连接

如果用的是vmware虚拟机的话,网卡需要重新配置

--重新连接之后

[root@node2 ~]# ifconfig -a

eth0     Link encap:Ethernet  HWaddr00:0C:29:B6:6C:0D 

         inet addr:192.168.41.192 Bcast:192.168.41.255 Mask:255.255.255.0

         inet6 addr: fe80::20c:29ff:feb6:6c0d/64 Scope:Link

         UP BROADCAST RUNNING MULTICAST MTU:1500  Metric:1

          RX packets:2150 errors:0 dropped:0overruns:0 frame:0

         TX packets:1509 errors:0 dropped:0 overruns:0 carrier:0

         collisions:0 txqueuelen:1000

         RX bytes:255529 (249.5 KiB)  TXbytes:220147 (214.9 KiB)

 

eth1     Link encap:Ethernet  HWaddr00:0C:29:B6:6C:17 

         inet addr:192.168.95.12 Bcast:192.168.95.255 Mask:255.255.255.0

         inet6 addr: fe80::20c:29ff:feb6:6c17/64 Scope:Link

         UP BROADCAST RUNNING MULTICAST MTU:1500  Metric:1

         RX packets:86513 errors:0 dropped:0 overruns:0 frame:0

         TX packets:80078 errors:0 dropped:0 overruns:0 carrier:0

         collisions:0 txqueuelen:1000

         RX bytes:54878309 (52.3 MiB)  TXbytes:48314875 (46.0 MiB)

 

lo       Link encap:Local Loopback 

         inet addr:127.0.0.1 Mask:255.0.0.0

         inet6 addr: ::1/128 Scope:Host

         UP LOOPBACK RUNNING MTU:16436  Metric:1

         RX packets:25201 errors:0 dropped:0 overruns:0 frame:0

         TX packets:25201 errors:0 dropped:0 overruns:0 carrier:0

         collisions:0 txqueuelen:0

         RX bytes:11156820 (10.6 MiB)  TXbytes:11156820 (10.6 MiB)

4、修改RAC public IP

[root@node1 ~]#./crsctl start crs

[root@node1 ~]#./ 

[root@node1 ~]#./oifcfg delif -global eth0

[root@node1 ~]#./oifcfg setif -global eth0/192.168.41.0:public

 

--启动crs

[root@node1 ~]# crsctl start crs

CRS-4123: Oracle High Availability Serviceshas been started.

[root@node1 ~]#

 

--这里遇到一个问题

(如果修改完hosts和和ifcfg文件之后,不重启network,此问题可以避免的哦,修改完hosts之后,就用oifcfg命令修改private地址,修改完private之后,在重启network服务,这个时候crs服务就可以启动了,就可以避免下面的错误,因为在主机层面修改ip地址,并重启之后,和ocr记录的记录的ip,subnet与实际中的不一致,导致crs服务启动不了)

[root@node1 ~]# oifcfg iflist

eth1 192.168.95.0

eth0 192.168.41.0

[root@node1 ~]#

[root@node1 ~]# oifcfg getif

PRIF-10: failed toinitialize the cluster registry

[root@node1 ~]#

[root@node1 ~]# oifcfg getif

PRIF-10: failed to initialize the clusterregistry

[root@node1 ~]# oifcfg delif -global eth0

PRIF-10: failed toinitialize the cluster registry

[root@node1 ~]# oifcfg setif -globaleth0/192.168.41.0:public

PRIF-10: failed to initialize the clusterregistry

 

--后台日志为

[ohasd(7176)]CRS-2765:Resource ‘ora.crsd‘has failed on server ‘node1‘.

2016-07-08 19:35:32.085

[crsd(7952)]CRS-0804:Cluster Ready Serviceaborted due to Oracle Cluster Registry error [PROC-44: Error in network addressand interface operations Network address and interface operations error [7]].Details at (:CRSD00111:) in /u01/app/11.2.0/grid/log/node1/crsd/crsd.log.

2016-07-08 19:35:32.865

[ohasd(7176)]CRS-2765:Resource ‘ora.crsd‘has failed on server ‘node1‘.

[crsd(8023)]CRS-0804:Cluster Ready Serviceaborted due to Oracle Cluster Registry error [PROC-44: Error in network addressand interface operations Network address and interface operations error [7]].Details at (:CRSD00111:) in /u01/app/11.2.0/grid/log/node1/crsd/crsd.log.

2016-07-08 19:35:41.039

[ohasd(7176)]CRS-2765:Resource ‘ora.crsd‘has failed on server ‘node1‘.

2016-07-08 19:35:41.039

[ohasd(7176)]CRS-2771:Maximum restartattempts reached for resource ‘ora.crsd‘; will not restart.

 

--问题分析

因为在主机层面修改ip地址,并重启之后,和ocr记录的记录的ip,subnet与实际中的不一致,导致crs服务启动不了。

--处理方法(网上查到方法)

可以使用gpnptool命令处理以上问题。GPNPD(Greater Pittsburgh Nonprofit Partnership)

进程的主要的作用是在集群各个节点中同步GPnP profile文件,在Clusterware中,CSS、GPnP

等服务的启动都需要依赖于GPnP  profile文件。而GPnPprofile文件是一个xml文件,它存

储的 位 置 是 在 : $GRID_HOME/gpnp/profile/peer/profile.xml,

$GRID_HOME/gpnp/profile/peer/profile.xml(全局备份,最原始的配置信息)。GPnPD进程写

的trace文件会存放在$GRID_HOME/log/gpnpd/gpnpd.log。

有几个重要的信息存储在GPnP profile文件:

网络接口和IP地址(公网和私网)

ASM diskstring和spfile信息

GPnP profile保存的是RAC的配置信息,包括集群名称、网络类型信息(public/private)、

ASM和CSS的存储信息、安全的数字签名,以及ASM实例的SPFILE文件位置。当集群配置

发生变化时,所有节点的该文件会被自动更新。在安装、系统引导或者当使用标准的集群工

具更新期间,这些活动包括:oifcfg 改变网络信息、crsctl 改变css设备、ASM额外的存储

等,会通过gpdpd进程复制GPnP profile到所有的其他节点。

  当集群启动的时候,ClusterSynchronization Services (CSS)将扫描所有的ASM disks,它利

用的是GPnP profile文件中的ASM discovery string。如下所示:

 

egistry.253.790450611"/>

这里可以看到spfile文件是存放在ASM卷组+DATA中的。但是有一个需要注意到事情是

我们启动ASM的时候,需要spfile文件,Oracle从GPnP  profile中知道spfile的路径,然后

它就会从底层磁盘中直接读取spfile标识,启动asm实例。

如果GPnP出现问题,可以使用cluvfy  comp  gpnp组件验证命令检查在集群中所有节点

网格即插即用的完整性:cluvfy comp gpnp [-n node_list] [-verbose]

最后,注意Oracle不支持手动修改profile.xml文件,直接对它的修改可能导致无法修复

的问题,最终导致在所有节点重建Clusterware。

2.1 以排他模式和不启动crsd进程的方式启动crs
# crsctl start crs -excl –nocrs

 

[root@node1 ~]# crsctl start crs -excl-nocrs

CRS-4123: Oracle High Availability Serviceshas been started.

CRS-2672: Attempting to start ‘ora.mdnsd‘on ‘node1‘

CRS-2676: Start of ‘ora.mdnsd‘ on ‘node1‘succeeded

CRS-2672: Attempting to start ‘ora.gpnpd‘on ‘node1‘

CRS-2676: Start of ‘ora.gpnpd‘ on ‘node1‘succeeded

CRS-2672: Attempting to start‘ora.cssdmonitor‘ on ‘node1‘

CRS-2672: Attempting to start ‘ora.gipcd‘on ‘node1‘

CRS-2676: Start of ‘ora.cssdmonitor‘ on‘node1‘ succeeded

CRS-2676: Start of ‘ora.gipcd‘ on ‘node1‘succeeded

CRS-2672: Attempting to start ‘ora.cssd‘ on‘node1‘

CRS-2672: Attempting to start ‘ora.diskmon‘on ‘node1‘

CRS-2676: Start of ‘ora.diskmon‘ on ‘node1‘succeeded

CRS-2676: Start of ‘ora.cssd‘ on ‘node1‘succeeded

CRS-2679: Attempting to clean‘ora.cluster_interconnect.haip‘ on ‘node1‘

CRS-2672: Attempting to start ‘ora.ctssd‘on ‘node1‘

CRS-2681: Clean of‘ora.cluster_interconnect.haip‘ on ‘node1‘ succeeded

CRS-2672: Attempting to start‘ora.cluster_interconnect.haip‘ on ‘node1‘

CRS-2676: Start of ‘ora.ctssd‘ on ‘node1‘succeeded

CRS-5017: The resource action"ora.cluster_interconnect.haip start" encountered the followingerror:

Start action for HAIP aborted. For detailsrefer to "(:CLSN00107:)" in"/u01/app/11.2.0/grid/log/node1/agent/ohasd/orarootagent_root/orarootagent_root.log".

CRS-2674: Start of‘ora.cluster_interconnect.haip‘ on ‘node1‘ failed

CRS-2679: Attempting to clean‘ora.cluster_interconnect.haip‘ on ‘node1‘

CRS-2681: Clean of‘ora.cluster_interconnect.haip‘ on ‘node1‘ succeeded

CRS-2672: Attempting to start ‘ora.asm‘ on‘node1‘

CRS-2676: Start of ‘ora.asm‘ on ‘node1‘succeeded

[root@node1 ~]#

技术分享


2.2 备份crs配置信息
[root@node1 ~]# mkdir /u01/gpnp

mkdir: cannot create directory `/u01/gpnp‘:File exists

[root@node1 ~]# gpnptool get-o=/u01/gpnp/profile.xml

Resulting profile written to"/u01/gpnp/profile.xml".

Success.

[root@node1 ~]#

技术分享


[root@node1 ~]# more /u01/gpnp/profile.xml

<?xml version="1.0"encoding="UTF-8"?><gpnp:GPnP-Profile Version="1.0"xmlns="http://www.grid-pnp.org/2005/11/gpnp-profile"xmlns:gpnp="http://www.grid-pnp.org/2005/11/

gpnp-profile"xmlns:orcl="http://www.oracle.com/gpnp/2005/11/gpnp-profile"xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"xsi:schemaLocation="http://www.grid-pnp

.org/2005/11/gpnp-profilegpnp-profile.xsd" ProfileSequence="6"ClusterUId="3236a9ede6925f3bbf93a5b1f366ed11"ClusterName="scan-cluster"PALocation=""><gpnp:Network-Pro

file><gpnp:HostNetworkid="gen" HostName="*"><gpnp:Networkid="net1" IP="192.168.94.0"Adapter="eth1"Use="cluster_interconnect"/><gpnp:Network id="net2"IP="192.168.40

.0" Adapter="eth0"Use="public"/></gpnp:HostNetwork></gpnp:Network-Profile><orcl:CSS-Profileid="css" DiscoveryString="+asm" LeaseDuration="400"/><orcl:ASM-Profileid="

asm" DiscoveryString=""SPFile="+GRIDDG/scan-cluster/asmparameterfile/registry.253.913494821"/><ds:Signaturexmlns:ds="http://www.w3.org/2000/09/xmldsig#"><ds:SignedInf

o><ds:CanonicalizationMethodAlgorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/><ds:SignatureMethodAlgorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/><ds:Refere

nceURI=""><ds:Transforms><ds:TransformAlgorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature"/><ds:TransformAlgorithm="http://www.w3.org/2001/10/xml-exc-c1

4n#"> <InclusiveNamespacesxmlns="http://www.w3.org/2001/10/xml-exc-c14n#" PrefixList="gpnporclxsi"/></ds:Transform></ds:Transforms><ds:DigestMethodAlgorithm="http:/

/www.w3.org/2000/09/xmldsig#sha1"/><ds:DigestValue>wp3L2kQv2+FWjS55HTv0nFoOykw=</ds:DigestValue></ds:Reference></ds:SignedInfo><ds:SignatureValue>ZOb+OsVU3bug/UO1dT/stc

a66voRtEArFwI4SZC5flUM93p0gZraQGF294seZ+kBYqbaXIp9NdqmxFq6Enf5pbY8F+KmAK/668RsBuMqdBZonzix+PhvxWssDWmejINXs1PlzPjLj0O3ya/8Ld3oisEUtbTFJXGkYcRDrTtmliU=</ds:SignatureValu

e></ds:Signature></gpnp:GPnP-Profile> 

 

技术分享

 

通过上面的信息可以看到,profile里面的eth0,eth1都是旧的ip地址


查看CRS的配置信息:
[root@node1 ~]# gpnptool   get

Warning: some command line parameters weredefaulted. Resulting command line:

         /u01/app/11.2.0/grid/bin/gpnptool.binget -o-

 

<?xml version="1.0"encoding="UTF-8"?><gpnp:GPnP-Profile Version="1.0"xmlns="http://www.grid-pnp.org/2005/11/gpnp-profile"xmlns:gpnp="http://www.grid-pnp.org/2005/11/gpnp-profile" xmlns:orcl="http://www.oracle.com/gpnp/2005/11/gpnp-profile"xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"xsi:schemaLocation="http://www.grid-pnp.org/2005/11/gpnp-profilegpnp-profile.xsd" ProfileSequence="6" ClusterUId="3236a9ede6925f3bbf93a5b1f366ed11"ClusterName="scan-cluster"PALocation=""><gpnp:Network-Profile><gpnp:HostNetworkid="gen" HostName="*"><gpnp:Networkid="net1" IP="192.168.94.0" Adapter="eth1"Use="cluster_interconnect"/><gpnp:Network id="net2"IP="192.168.40.0" Adapter="eth0"Use="public"/></gpnp:HostNetwork></gpnp:Network-Profile><orcl:CSS-Profileid="css" DiscoveryString="+asm"LeaseDuration="400"/><orcl:ASM-Profile id="asm"DiscoveryString="" SPFile="+GRIDDG/scan-cluster/asmparameterfile/registry.253.913494821"/><ds:Signaturexmlns:ds="http://www.w3.org/2000/09/xmldsig#"><ds:SignedInfo><ds:CanonicalizationMethodAlgorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/><ds:SignatureMethodAlgorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/><ds:ReferenceURI=""><ds:Transforms><ds:Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature"/><ds:TransformAlgorithm="http://www.w3.org/2001/10/xml-exc-c14n#"><InclusiveNamespacesxmlns="http://www.w3.org/2001/10/xml-exc-c14n#" PrefixList="gpnporcl xsi"/></ds:Transform></ds:Transforms><ds:DigestMethodAlgorithm="http://www.w3.org/2000/09/xmldsig#sha1"/><ds:DigestValue>wp3L2kQv2+FWjS55HTv0nFoOykw=</ds:DigestValue></ds:Reference></ds:SignedInfo><ds:SignatureValue>ZOb+OsVU3bug/UO1dT/stca66voRtEArFwI4SZC5flUM93p0gZraQGF294seZ+kBYqbaXIp9NdqmxFq6Enf5pbY8F+KmAK/668RsBuMqdBZonzix+PhvxWssDWmejINXs1PlzPjLj0O3ya/8Ld3oisEUtbTFJXGkYcRDrTtmliU=</ds:SignatureValue></ds:Signature></gpnp:GPnP-Profile>

Success.

[root@node1 ~]#

技术分享

 
备注:备份的profile.xml文件配置信息与’gpnptool  get’显示配置信息完全相同,同时也
与$ORACLE_HOME /gpnp/profiles/peer下的profile.xml文件信息相同。


2.3 修改备份的CRS配置信息
2.3.1 备份配置文件:


[root@node1 ~]# pwd

/root

[root@node1 ~]# cd/u01/app/11.2.0/grid/gpnp/profiles/peer/

[root@node1 peer]# pwd

/u01/app/11.2.0/grid/gpnp/profiles/peer

[root@node1 peer]# ls -al profile.xml

-rw-r--r--. 1 grid oinstall 1885 Jun  2 20:33 profile.xml

[root@node1 peer]#

[root@node1 peer]#

[root@node1 peer]# cp profile.xml p.xml

[root@node1 peer]# ls -al

total 20

drwxr-x---. 2 grid oinstall 4096 Jul  8 20:13 .

drwxr-x---. 3 grid oinstall 4096 Jun  2 20:07 ..

-rw-r--r--. 1 grid oinstall 1879 Jun  2 20:31 profile_orig.xml

-rw-r--r--. 1 grid oinstall 1885 Jun  2 20:33 profile.xml

-rw-r--r--. 1 root root     1885 Jul 8 20:13 p.xml

[root@node1 peer]#

[root@node1 peer]# mv p.xml /u01/gpnp/

[root@node1 peer]#
 
2.3.2 获取当前的序列号(每次修改并写回crs中会有一个序列号作为标识):
# gpnptool getpval-p=/u01/gpnp/p.xml -prf_sq -o-

 

[root@node1 peer]#  gpnptool getpval -p=/u01/gpnp/p.xml -prf_sq-o-

6

[root@node1 peer]#

技术分享
 
2.3.3 获取共有网络和私有网络标识id (与实际网卡名称不一致,可以在配置文件中找到):
# gpnptool getpval-p=/u01/gpnp/p.xml -net -o-

 

[root@node1 peer]# gpnptool getpval-p=/u01/gpnp/p.xml -net -o-

net1 net2

[root@node1 peer]#

技术分享


2.3.4 修改配置文件中的序列号(原序列号值加1)和私网的实际网段(subnet)信息:


[root@node1 peer]# gpnptool  edit -p=/u01/gpnp/p.xml -o=/u01/gpnp/p.xml  -ovr  -prf_sq=7 -net1:net_ip=192.168.95.0

Resulting profile written to"/u01/gpnp/p.xml".

Success.

[root@node1 peer]# 

 

技术分享


2.3.5 查看配置文件是否修改成功:
[root@node1 peer]# more /u01/gpnp/p.xml

<?xml version="1.0"encoding="UTF-8"?><gpnp:GPnP-Profile Version="1.0" xmlns="http://www.grid-pnp.org/2005/11/gpnp-profile"xmlns:gpnp="http://www.grid-pnp.org/2005/11/

gpnp-profile"xmlns:orcl="http://www.oracle.com/gpnp/2005/11/gpnp-profile"xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"xsi:schemaLocation="http://www.grid-pnp

.org/2005/11/gpnp-profilegpnp-profile.xsd" ProfileSequence="7"ClusterUId="3236a9ede6925f3bbf93a5b1f366ed11"ClusterName="scan-cluster"PALocation=""><gpnp:Network-Pro

file><gpnp:HostNetworkid="gen" HostName="*"><gpnp:Networkid="net1" IP="192.168.95.0"Adapter="eth1"Use="cluster_interconnect"/><gpnp:Network id="net2"IP="192.168.40

.0" Adapter="eth0"Use="public"/></gpnp:HostNetwork></gpnp:Network-Profile><orcl:CSS-Profileid="css" DiscoveryString="+asm"LeaseDuration="400"/><orcl:ASM-Profile id="

asm" DiscoveryString=""SPFile="+GRIDDG/scan-cluster/asmparameterfile/registry.253.913494821"/><ds:Signaturexmlns:ds="http://www.w3.org/2000/09/xmldsig#"><ds:SignedInf

o><ds:CanonicalizationMethodAlgorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/><ds:SignatureMethodAlgorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/><ds:Refere

nceURI=""><ds:Transforms><ds:TransformAlgorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature"/><ds:TransformAlgorithm="http://www.w3.org/2001/10/xml-exc-c1

4n#"> <InclusiveNamespacesxmlns="http://www.w3.org/2001/10/xml-exc-c14n#" PrefixList="gpnporclxsi"/></ds:Transform></ds:Transforms><ds:DigestMethodAlgorithm="http:/

/www.w3.org/2000/09/xmldsig#sha1"/><ds:DigestValue>wp3L2kQv2+FWjS55HTv0nFoOykw=</ds:DigestValue></ds:Reference></ds:SignedInfo><ds:SignatureValue>ZOb+OsVU3bug/UO1dT/stc

a66voRtEArFwI4SZC5flUM93p0gZraQGF294seZ+kBYqbaXIp9NdqmxFq6Enf5pbY8F+KmAK/668RsBuMqdBZonzix+PhvxWssDWmejINXs1PlzPjLj0O3ya/8Ld3oisEUtbTFJXGkYcRDrTtmliU=</ds:SignatureValu

e></ds:Signature></gpnp:GPnP-Profile>

备注:红色标识改动信息。
 
2.3.6 验证序列号是否修改完成:
[root@node1 peer]# gpnptool getpval -p=/u01/gpnp/p.xml -prf_sq -o-

7

[root@node1 peer]# 

2.3.7 用私钥重新标识配置文件:
[root@node1 peer]# gpnptool sign -p=/u01/gpnp/p.xml -o=/u01/gpnp/p.xml -ovr-w=cw-fs:peer

Resulting profile written to"/u01/gpnp/p.xml".

Success.

[root@node1 peer]#

技术分享

 
2.3.8 查看配置文件改动:
[root@node1 peer]# more /u01/gpnp/p.xml

<?xml version="1.0"encoding="UTF-8"?><gpnp:GPnP-Profile Version="1.0"xmlns="http://www.grid-pnp.org/2005/11/gpnp-profile"xmlns:gpnp="http://www.grid-pnp.org/2005/11/

gpnp-profile"xmlns:orcl="http://www.oracle.com/gpnp/2005/11/gpnp-profile"xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"xsi:schemaLocation="http://www.grid-pnp

.org/2005/11/gpnp-profilegpnp-profile.xsd" ProfileSequence="7"ClusterUId="3236a9ede6925f3bbf93a5b1f366ed11"ClusterName="scan-cluster"PALocation=""><gpnp:Network-Pro

file><gpnp:HostNetworkid="gen" HostName="*"><gpnp:Networkid="net1" IP="192.168.95.0" Adapter="eth1" Use="cluster_interconnect"/><gpnp:Networkid="net2" IP="192.168.40

.0" Adapter="eth0"Use="public"/></gpnp:HostNetwork></gpnp:Network-Profile><orcl:CSS-Profileid="css" DiscoveryString="+asm"LeaseDuration="400"/><orcl:ASM-Profile id="

asm" DiscoveryString=""SPFile="+GRIDDG/scan-cluster/asmparameterfile/registry.253.913494821"/><ds:Signaturexmlns:ds="http://www.w3.org/2000/09/xmldsig#"><ds:SignedInf

o><ds:CanonicalizationMethodAlgorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/><ds:SignatureMethodAlgorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/><ds:Refere

nceURI=""><ds:Transforms><ds:TransformAlgorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature"/><ds:TransformAlgorithm="http://www.w3.org/2001/10/xml-exc-c1

4n#"> <InclusiveNamespacesxmlns="http://www.w3.org/2001/10/xml-exc-c14n#" PrefixList="gpnporclxsi"/></ds:Transform></ds:Transforms><ds:DigestMethodAlgorithm="http:/

/www.w3.org/2000/09/xmldsig#sha1"/><ds:DigestValue>DyE81BDPVRrHAUk3T7/4Nsnb7B0=</ds:DigestValue></ds:Reference></ds:SignedInfo><ds:SignatureValue>YRpXsBlv9H3LuDDkF2jWVO

UA6ocEz/6SL2aC7yJDRkei6ipR5mIXAZc+MOOjeXp/QAT5Hp2Nkowj7TU8KWCujxxmsA8h6kpZFXHevrab1+zTsmCvuzdLBAaXaZRb3XVQPXboybF+geRVVhf+Kd6C3Bq6z9T6tG1NrN382mss0Hg=</ds:SignatureValu

e></ds:Signature></gpnp:GPnP-Profile>

技术分享

备注:红色标识改动信息。
 
2.3.9 回写配置文件信息到crs中:
[root@node1 peer]# gpnptool put -p=/u01/gpnp/p.xml

 

Success.

[root@node1 peer]#


技术分享
 
2.3.10 查看crs中配置信息:

 

[root@node1 peer]# gpnptool find-c=host-scan

 

Found 0 instances of service ‘gpnp‘.

[root@node1 peer]# gpnptool   get

 技术分享

 

备注:我自己的find提示,Found 0 instance,不知道是否正常。

 

下面是网友的结果
# gpnptool find -c=host-scan    //host-scan为RAC的scan name;
Found 1 instances of service ‘gpnp‘.
mdns:service:gpnp._tcp.local.://host1:30391/agent=gpnpd,cname=host-scan,host=host1,pid=12
648872/gpnpd h:host1 c:host-scan
 
# gpnptool rget -h=host1    //host1为主机名
Warning: some command line parameters were defaulted. Resulting command line: 
                 /u01/app/grid/product/11.2.0/grid/bin/gpnptool.bin rget -h=host1 -o-
Found 1 gpnp service instance(s) to rget profile from.
RGET  from  tcp://host1:30391
(mdns:service:gpnp._tcp.local.://host1:30391/agent=gpnpd,cname=host-scan,host=host1,pid=12
648872/gpnpd h:host1 c:host-scan):
 
<gpnp:gpnp-profile  version="1.0" <=""span="">
xmlns="http://www.grid-pnp.org/2005/11/gpnp-profile"
xmlns:gpnp="http://www.grid-pnp.org/2005/11/gpnp-profile"
xmlns:orcl="http://www.oracle.com/gpnp/2005/11/gpnp-profile"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.grid-pnp.org/2005/11/gpnp-profile gpnp-profile.xsd"
ProfileSequence="23" ClusterUId="8c32638bd2727ff8bfba9eec919fb4e4"
ClusterName="host-scan" PALocation=""><gpnp:hostnetwork  id="gen"<="" span="">
HostName="*"><gpnp:network  id="net1"  =""adapter="en0" ip="192.168.101.0" <=""span="">
Use="public"/><gpnp:network  id="net3"  =""adapter="en1" ip="10.10.11.0" <=""span="">
Use="cluster_interconnect"/></gpnp:network ></gpnp:network ></gpnp:hostnetwork ><=""span="">
id="css"  DiscoveryString="+asm" LeaseDuration="400"/><orcl:asm-profile  id="asm"<="" span="">
DiscoveryString=""
SPFile="+DATA/host-scan/asmparameterfile/registry.253.832710839"/><=""span="">
xmlns:ds="http://www.w3.org/2000/09/xmldsig#"><ds:canonicalizationmethod<span=""></ds:canonicalizationmethod<>
Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/><ds:signaturemethod<span=""></ds:signaturemethod<>
Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/><ds:reference<span=""></ds:reference<>
URI=""><="" span="">
Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature"/><ds:transform<span=""></ds:transform<>
Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"> <="" span="">
xmlns="http://www.w3.org/2001/10/xml-exc-c14n#" PrefixList="gpnp  orcl
xsi"/><="" span="">
Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>6p7GJFTV5ZoPtIqkUE
udEvYnHB4=QNcNKDq1n
bXDh1Htp8DYklSba6jzoYnSruNeJdLi6f9TALAQrLsrSAd6dYWaf4V7f2hGCb9qBGvr1pibl4JUeAnN7B
BhgWOKyfwYKEBx2FdP2RX5tPkLZC+k2X/PO3SpFfIH7NKbvyqmz5xiso2i5C134ZG0RF9s752ZU0e2j
Vk=
Success.
#
 
2.3.11 启动crsd进程:


[root@node1 peer]# crsctl start res ora.crsd -init

CRS-2672: Attempting to start ‘ora.crsd‘ on‘node1‘

CRS-2676: Start of ‘ora.crsd‘ on ‘node1‘succeeded

[root@node1 peer]#

技术分享
# crs_stat -t
[root@node1 peer]# crs_stat -t

CRS-0184: Cannot communicate with the CRSdaemon.
 
2.3.12 查看和修改私有网络配置:
[root@node1 peer]# oifcfg getif

eth1 192.168.95.0  global  cluster_interconnect

eth0 192.168.40.0  global  public

Only in OCR: eth1  192.168.94.0 global  cluster_interconnect

PRIF-30: Network information in OCR andGPnP profile differs

[root@node1 peer]# oifcfg setif -globalen1/192.168.95.0:cluster_interconnect

PRIF-33: Failed to set or delete interfacebecause hosts could not be discovered

 CRS-02307: No GPnP services on requested remote hosts.

PRIF-32: Error in checking for profileavailability for host node2

  CRS-02306: GPnP service on host"node2" not found.

[root@node1 peer]#

 

在节点2启动crs

[root@node2 ~]#crsctl start crs

CRS-4123: Oracle High Availability Serviceshas been started.

[root@node2 ~]# oifcfg getif

eth1 192.168.95.0  global  cluster_interconnect

eth0 192.168.40.0  global  public

Only in OCR: eth1  192.168.94.0 global  cluster_interconnect

PRIF-30: Network information in OCR andGPnP profile differs

[root@node2 ~]# crs_stat -t

Name           Type          Target    State     Host       

------------------------------------------------------------

ora.DATA.dg    ora....up.type ONLINE    ONLINE   node1      

ora.FLASH.dg   ora....up.type ONLINE    ONLINE   node1      

ora.GRIDDG.dg  ora....up.type ONLINE    ONLINE   node1      

ora....ER.lsnr ora....er.type OFFLINE   OFFLINE              

ora....N1.lsnr ora....er.type OFFLINE   OFFLINE              

ora.asm        ora.asm.type   ONLINE   ONLINE    node1      

ora.cvu        ora.cvu.type   OFFLINE  OFFLINE              

ora.devdb.db   ora....se.type OFFLINE   OFFLINE              

ora....ce1.svc ora....ce.type OFFLINE   OFFLINE              

ora.gsd        ora.gsd.type   OFFLINE  OFFLINE              

ora....network ora....rk.type ONLINE    OFFLINE              

ora....SM1.asm application    ONLINE   ONLINE    node1      

ora....E1.lsnr application    OFFLINE  OFFLINE              

ora.node1.gsd  application   OFFLINE   OFFLINE              

ora.node1.ons  application   ONLINE    OFFLINE              

ora....SM2.asm application    ONLINE   ONLINE    node2      

ora....E2.lsnr application    OFFLINE  OFFLINE              

ora.node2.gsd  application   OFFLINE   OFFLINE              

ora.node2.ons  application   ONLINE    OFFLINE              

ora.node2.vip  ora....t1.type OFFLINE   OFFLINE              

ora.oc4j       ora.oc4j.type  ONLINE   ONLINE    node1      

ora.ons        ora.ons.type   ONLINE   OFFLINE              

ora.scan1.vip  ora....ip.type OFFLINE   OFFLINE              

[root@node2 ~]#

 

节点2启动之后,再次修改ip地址

[root@node1 peer]# oifcfg setif -globalen1/192.168.95.0:cluster_interconnect

[root@node1 peer]# oifcfg getif

eth1 192.168.95.0  global  cluster_interconnect

eth0 192.168.40.0  global  public

en1 192.168.95.0  global  cluster_interconnect

[root@node1 peer]#
如不正确使用下面命令重新修改:
# oifcfg setif -global en1/192.168.95.0:cluster_interconnect
# oifcfg getif
en0    192.168.101.0    global   public
en1    10.10.11.0    global   cluster_interconnect

 
2.3.13 重启crs服务:
# crsctl stop crs -f
# crsctl start crs
CRS-4123: Oracle High Availability Services has been started.
# crs_stat -t
Name                     Type                     Target        State         Host               
------------------------------------------------------------
ora.DATA.dg        ora....up.type ONLINE       ONLINE        host1              
ora....ER.lsnr ora....er.type ONLINE       ONLINE        host1             
ora....N1.lsnr ora....er.type ONLINE       ONLINE        host1             
ora.asm               ora.asm.type      ONLINE       ONLINE        host1             
ora.cvu               ora.cvu.type      ONLINE       ONLINE        host1          
ora.gsd               ora.gsd.type      OFFLINE     OFFLINE                             
ora....SM1.asm application        ONLINE       ONLINE        host1             
ora....T1.lsnr application        ONLINE       ONLINE        host1             
ora.host1.gsd    application       OFFLINE      OFFLINE                             
ora.host1.ons    application       ONLINE        ONLINE       host1             
ora.host1.vip    ora....t1.type ONLINE       ONLINE        host1             
ora.host2.vip    ora....t1.type ONLINE       ONLINE        host1             
ora....network ora....rk.type ONLINE       ONLINE        host1             
ora.oc4j             ora.oc4j.type    ONLINE       OFFLINE                             
ora.ons               ora.ons.type      ONLINE       ONLINE        host1             
ora....ry.acfs ora....fs.type ONLINE       ONLINE        host1             
ora.scan1.vip    ora....ip.type ONLINE       ONLINE        host1             
ora.yxdb.db        ora....se.type ONLINE       ONLINE        host1              

# oifcfg getif
en0    192.168.101.0    global   public
en1    10.10.11.0    global   cluster_interconnect

问题节点之后

[root@node1 peer]# oifcfg delif -globaleth0

[root@node1 peer]# oifcfg setif -globaleth0/192.168.41.0:public

[root@node1 peer]# oifcfg getif

eth1 192.168.95.0  global  cluster_interconnect

en1 192.168.95.0  global  cluster_interconnect

eth0 192.168.41.0  global  public

[root@node1 peer]#

Ip地址修改过来了。

5、修改private IP

root@node1 peer]#./oifcfg delif -globaleth1

root@node1 peer]#./oifcfg setif -globaleth1/192.168.95.0:cluster_interconnect

技术分享

6、修改VIP

[root@node1 peer]# srvctl config nodeapps

Network exists: 1/192.168.40.0/255.255.255.0/eth0,type static

VIP exists:/node2-vip/192.168.41.194/192.168.40.0/255.255.255.0/eth0, hosting node node2

GSD exists

ONS exists: Local port 6100, remote port6200, EM port 2016

[root@node1 peer]# srvctl modify nodeapps-n node1 -A 192.168.41.193/255.255.255.0/eth0

PRKO-2310 : VIP does not exist on node node1.

[root@node1 peer]# srvctl modify nodeapps-n node2 -A 192.168.41.194/255.255.255.0/eth0

[root@node1 peer]#

[root@node1 peer]# srvctl config nodeapps

Network exists:1/192.168.41.0/255.255.255.0/eth0, type static

VIP exists: /node2-vip/192.168.41.194/192.168.41.0/255.255.255.0/eth0,hosting node node2

GSD exists

ONS exists: Local port 6100, remote port6200, EM port 2016

[root@node1 peer]#

提示节点一没有vip,下面新增一个

[root@node1 peer]# srvctl add vip -n node1-k 1 -A 192.168.41.193/255.255.255.0/eth0

[root@node1 peer]#

[root@node1 peer]# srvctl config nodeapps

Network exists:1/192.168.41.0/255.255.255.0/eth0, type static

VIP exists:/192.168.41.193/192.168.41.193/192.168.41.0/255.255.255.0/eth0, hosting nodenode1

VIP exists:/node2-vip/192.168.41.194/192.168.41.0/255.255.255.0/eth0, hosting node node2

GSD exists

ONS exists: Local port 6100, remote port6200, EM port 2016

[root@node1 peer]#

技术分享

 

7、修改SCAN IP

srvctl modify scan -n 192.168.41.203

[root@node1 peer]# srvctl config scan

SCAN name: scan-cluster, Network:1/192.168.41.0/255.255.255.0/eth0

SCAN VIP name: scan1, IP: /scan-cluster/192.168.40.203

[root@node1 peer]#

[root@node1 peer]# srvctl modify scan -n192.168.41.203

[root@node1 peer]# srvctl config scan

SCAN name: 192.168.41.203, Network:1/192.168.41.0/255.255.255.0/eth0

SCAN VIP name: scan1, IP: /192.168.41.203/192.168.41.203

[root@node1 peer]#

 

 技术分享

8、重新启动CRS

在各个节点执行

节点一

[root@node1 peer]# crsctl stop crs -f

CRS-2791: Starting shutdown of Oracle HighAvailability Services-managed resources on ‘node1‘

CRS-2673: Attempting to stop ‘ora.crsd‘ on‘node1‘

CRS-2790: Starting shutdown of ClusterReady Services-managed resources on ‘node1‘

CRS-2673: Attempting to stop ‘ora.oc4j‘ on‘node1‘

CRS-2673: Attempting to stop‘ora.GRIDDG.dg‘ on ‘node1‘

CRS-2673: Attempting to stop ‘ora.DATA.dg‘on ‘node1‘

CRS-2673: Attempting to stop ‘ora.FLASH.dg‘on ‘node1‘

CRS-2677: Stop of ‘ora.DATA.dg‘ on ‘node1‘succeeded

CRS-2677: Stop of ‘ora.FLASH.dg‘ on ‘node1‘succeeded

CRS-2677: Stop of ‘ora.oc4j‘ on ‘node1‘succeeded

CRS-2672: Attempting to start ‘ora.oc4j‘ on‘node2‘

CRS-2676: Start of ‘ora.oc4j‘ on ‘node2‘succeeded

CRS-2677: Stop of ‘ora.GRIDDG.dg‘ on‘node1‘ succeeded

CRS-2673: Attempting to stop ‘ora.asm‘ on‘node1‘

CRS-2677: Stop of ‘ora.asm‘ on ‘node1‘succeeded

CRS-2673: Attempting to stop ‘ora.ons‘ on‘node1‘

CRS-2677: Stop of ‘ora.ons‘ on ‘node1‘succeeded

CRS-2673: Attempting to stop‘ora.net1.network‘ on ‘node1‘

CRS-2677: Stop of ‘ora.net1.network‘ on‘node1‘ succeeded

CRS-2792: Shutdown of Cluster ReadyServices-managed resources on ‘node1‘ has completed

CRS-2677: Stop of ‘ora.crsd‘ on ‘node1‘succeeded

CRS-2673: Attempting to stop ‘ora.ctssd‘ on‘node1‘

CRS-2673: Attempting to stop ‘ora.evmd‘ on‘node1‘

CRS-2673: Attempting to stop ‘ora.asm‘ on‘node1‘

CRS-2673: Attempting to stop ‘ora.mdnsd‘ on‘node1‘

CRS-2677: Stop of ‘ora.evmd‘ on ‘node1‘succeeded

CRS-2677: Stop of ‘ora.mdnsd‘ on ‘node1‘succeeded

CRS-2677: Stop of ‘ora.ctssd‘ on ‘node1‘succeeded

CRS-2677: Stop of ‘ora.asm‘ on ‘node1‘succeeded

CRS-2673: Attempting to stop ‘ora.cluster_interconnect.haip‘on ‘node1‘

CRS-2677: Stop of‘ora.cluster_interconnect.haip‘ on ‘node1‘ succeeded

CRS-2673: Attempting to stop ‘ora.cssd‘ on‘node1‘

CRS-2677: Stop of ‘ora.cssd‘ on ‘node1‘succeeded

CRS-2673: Attempting to stop ‘ora.crf‘ on‘node1‘

CRS-2677: Stop of ‘ora.crf‘ on ‘node1‘succeeded

CRS-2673: Attempting to stop ‘ora.gipcd‘ on‘node1‘

CRS-2677: Stop of ‘ora.gipcd‘ on ‘node1‘succeeded

CRS-2673: Attempting to stop ‘ora.gpnpd‘ on‘node1‘

CRS-2677: Stop of ‘ora.gpnpd‘ on ‘node1‘ succeeded

CRS-2793: Shutdown of Oracle HighAvailability Services-managed resources on ‘node1‘ has completed

CRS-4133: Oracle High Availability Serviceshas been stopped.

[root@node1 peer]#

[root@node1 peer]# crsctl start crs

CRS-4123: Oracle High Availability Serviceshas been started.

[root@node1 peer]#

 

节点二

[root@node2 ~]# crsctl stop crs

CRS-2791: Starting shutdown of Oracle HighAvailability Services-managed resources on ‘node2‘

CRS-2673: Attempting to stop ‘ora.crsd‘ on‘node2‘

CRS-2790: Starting shutdown of ClusterReady Services-managed resources on ‘node2‘

CRS-2673: Attempting to stop ‘ora.oc4j‘ on‘node2‘

CRS-2673: Attempting to stop‘ora.GRIDDG.dg‘ on ‘node2‘

CRS-2673: Attempting to stop ‘ora.DATA.dg‘on ‘node2‘

CRS-2673: Attempting to stop ‘ora.FLASH.dg‘on ‘node2‘

CRS-2677: Stop of ‘ora.FLASH.dg‘ on ‘node2‘succeeded

CRS-2677: Stop of ‘ora.DATA.dg‘ on ‘node2‘succeeded

CRS-2677: Stop of ‘ora.oc4j‘ on ‘node2‘succeeded

CRS-2677: Stop of ‘ora.GRIDDG.dg‘ on‘node2‘ succeeded

CRS-2673: Attempting to stop ‘ora.asm‘ on‘node2‘

CRS-2677: Stop of ‘ora.asm‘ on ‘node2‘succeeded

CRS-2673: Attempting to stop ‘ora.ons‘ on‘node2‘

CRS-2677: Stop of ‘ora.ons‘ on ‘node2‘succeeded

CRS-2673: Attempting to stop‘ora.net1.network‘ on ‘node2‘

CRS-2677: Stop of ‘ora.net1.network‘ on‘node2‘ succeeded

CRS-2792: Shutdown of Cluster ReadyServices-managed resources on ‘node2‘ has completed

CRS-2677: Stop of ‘ora.crsd‘ on ‘node2‘succeeded

CRS-2673: Attempting to stop ‘ora.ctssd‘ on‘node2‘

CRS-2673: Attempting to stop ‘ora.evmd‘ on‘node2‘

CRS-2673: Attempting to stop ‘ora.asm‘ on‘node2‘

CRS-2673: Attempting to stop ‘ora.mdnsd‘ on‘node2‘

CRS-2673: Attempting to stop‘ora.drivers.acfs‘ on ‘node2‘

CRS-2677: Stop of ‘ora.ctssd‘ on ‘node2‘succeeded

CRS-2677: Stop of ‘ora.evmd‘ on ‘node2‘succeeded

CRS-2677: Stop of ‘ora.mdnsd‘ on ‘node2‘succeeded

CRS-2677: Stop of ‘ora.asm‘ on ‘node2‘succeeded

CRS-2673: Attempting to stop‘ora.cluster_interconnect.haip‘ on ‘node2‘

CRS-2677: Stop of‘ora.cluster_interconnect.haip‘ on ‘node2‘ succeeded

CRS-2673: Attempting to stop ‘ora.cssd‘ on‘node2‘

CRS-2677: Stop of ‘ora.drivers.acfs‘ on‘node2‘ succeeded

CRS-2677: Stop of ‘ora.cssd‘ on ‘node2‘succeeded

CRS-2673: Attempting to stop‘ora.cssdmonitor‘ on ‘node2‘

CRS-2673: Attempting to stop ‘ora.crf‘ on‘node2‘

CRS-2677: Stop of ‘ora.cssdmonitor‘ on‘node2‘ succeeded

CRS-2677: Stop of ‘ora.crf‘ on ‘node2‘succeeded

CRS-2673: Attempting to stop ‘ora.gipcd‘ on‘node2‘

CRS-2677: Stop of ‘ora.gipcd‘ on ‘node2‘succeeded

CRS-2673: Attempting to stop ‘ora.gpnpd‘ on‘node2‘

CRS-2677: Stop of ‘ora.gpnpd‘ on ‘node2‘succeeded

CRS-2793: Shutdown of Oracle HighAvailability Services-managed resources on ‘node2‘ has completed

CRS-4133: Oracle High Availability Serviceshas been stopped.

[root@node2 ~]# crsctl start crs

CRS-4123: Oracle High Availability Serviceshas been started.

[root@node2 ~]#

 

 

9、启动数据库

[root@node1 peer]# srvctl  start database -d devdb

[root@node2 ~]# crs_stat -t

Name           Type           Target    State    Host       

------------------------------------------------------------

ora.DATA.dg    ora....up.type ONLINE    ONLINE   node1      

ora.FLASH.dg   ora....up.type ONLINE    ONLINE   node1      

ora.GRIDDG.dg  ora....up.type ONLINE    ONLINE   node1      

ora....ER.lsnr ora....er.type ONLINE    ONLINE   node1      

ora....N1.lsnr ora....er.type ONLINE    ONLINE   node1      

ora.asm        ora.asm.type   ONLINE   ONLINE    node1      

ora.cvu        ora.cvu.type   OFFLINE  OFFLINE              

ora.devdb.db   ora....se.type ONLINE    ONLINE   node2      

ora....ce1.svc ora....ce.type ONLINE    ONLINE   node2      

ora.gsd        ora.gsd.type   OFFLINE  OFFLINE              

ora....network ora....rk.type ONLINE    ONLINE   node1      

ora....SM1.asm application    ONLINE   ONLINE    node1      

ora....E1.lsnr application    ONLINE   ONLINE    node1      

ora.node1.gsd  application   OFFLINE   OFFLINE              

ora.node1.ons  application   ONLINE    ONLINE    node1      

ora.node1.vip  ora....t1.type ONLINE    ONLINE   node1      

ora....SM2.asm application    ONLINE   ONLINE    node2      

ora....E2.lsnr application    ONLINE   ONLINE    node2      

ora.node2.gsd  application   OFFLINE   OFFLINE              

ora.node2.ons  application   ONLINE    ONLINE    node2      

ora.node2.vip  ora....t1.type ONLINE    ONLINE   node2      

ora.oc4j       ora.oc4j.type  ONLINE   ONLINE    node1      

ora.ons        ora.ons.type   ONLINE   ONLINE    node1      

ora.scan1.vip  ora....ip.type ONLINE    ONLINE   node1      

[root@node2 ~]#

技术分享


 

至此,整个过程配置成功。

oracle 11g 修改private ip, public ip, vip, scan ip

标签:

原文地址:http://blog.csdn.net/m15217321304/article/details/51860512

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!