++++++++++++++++++++++++++++++++++++++++++++++++++++++
설치 환경 구성
++++++++++++++++++++++++++++++++++++++++++++++++++++++
1. /etc/hosts 수정
>>> Public IP (BootIP) : Oracle 서비스를 제공하는 IP
HACMP 구성시 Boot IP와 동일
HACMP 구성시 Boot IP와 동일
Private IP (interconnect, heart-beat) : RAC간 통신을 위한 IP
Virtual IP : fail-over를 위한 IP, fail-over 발생시 vip가 넘어감
일반적으로 얘기하는 persistance ip와 동일
일반적으로 얘기하는 persistance ip와 동일
Scan IP : Oracle의 서비스를 위한 대표 IP(load balancing)
-------
node10:/# cat /etc/hosts
## Public node name
130.130.10.109 node9
130.130.10.110 node10
## Private node name(Interconnect)
192.168.10.109 node9rac
192.168.10.110 node10rac
## RAC virtualIP
130.130.10.209 node9vip
130.130.10.210 node10vip
## Oracle scan IP
130.130.10.159 node9scan
-------
2. network ip 설정
node10:/# netstat -in
Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll
en0 1500 link#2 0.11.25.a5.c6.a4 1719852 0 59114 3 0
en0 1500 130.130.10 130.130.10.110 1719852 0 59114 3 0
en1 1500 link#3 0.11.25.a5.c6.a5 77481 0 44 4 0
en1 1500 192.168.10 192.168.10.110 77481 0 44 4 0
lo0 16896 link#1 169806 0 169806 0 0
lo0 16896 127 127.0.0.1 169806 0 169806 0 0
lo0 16896 ::1%1 169806 0 169806 0 0
node10:/#
3. hostname 확인(uname -S 명령어로 hostname 불일치를 fix)
node10:/# uname -S node10
node10:/#
node10:/# uname -a
AIX node10 1 6 00C8CEFD4C00
node10:/# hostname
node10
4. user 및 group 생성
>>> smitty mkuser/mkgroup/chuser
User: oracle(uid:300), oragrid(uid:301)
Group: dba(gid:302), hagsuser(gid:302)
> 계정생성후 한번씩 로그인 password를 oracle로 생성
5. /oracle 파일 시스템 생성(20~30GB) : node9/node10 모두에서 수행
[oracle@node9:/home/oragrid] lsfs /oracle
Name Nodename Mount Pt VFS Size Options Auto Accounting
/dev/fslv00 -- /oracle jfs2 62914560 rw yes no
[oracle@node9:/home/oragrid] df -g /oracle
Filesystem GB blocks Free %Used Iused %Iused Mounted on
/dev/fslv00 30.00 30.00 1% 6 1% /oracle
[oracle@node9:/home/oragrid] chmod 775 /oracle
[oracle@node9:/home/oragrid] mkdir -p /oracle/db
[oracle@node9:/home/oragrid] mkdir -p /oracle/grid
[oracle@node9:/home/oragrid] chmod 775 db
[oracle@node9:/home/oragrid] chmod 775 grid
[oracle@node9:/home/oragrid] chown -R oracle:dba /oracle/db
[oracle@node9:/home/oragrid] chown -R oragrid:dba /oracle/grid
6. OS fileset 확인
bos.adt.base
bos.adt.lib
bos.adt.libm
bos.adt.syscalls
bos.perf.libperfstat
bos.perf.perfstat
bos.perf.proctools
rsct.basic.rte
rsct.compat.basic
rsct.compat.clients.rte
bos.clvm.enh
bos.data
7. User Capability 변경(양쪽 노드 모두 수행!!!)
# chuser capabilities=CAP_NUMA_ATTACH,CAP_BYPASS_RAC_VMM,CAP_PROPAGATE oracle
# chuser capabilities=CAP_NUMA_ATTACH,CAP_BYPASS_RAC_VMM,CAP_PROPAGATE oragrid
# lsuser oracle
# lsuser oragrid
8. System Limit
# node9:/home/scripts# cat /etc/security/limits
---------
default:
fsize = -1
core = 2097151
cpu = -1
data = -1
rss = -1
stack = -1
nofiles = -1
---------
9. Parameter 변경
chdev -l sys0 -a maxuproc=16384
chdev -l sys0 -a ncargs=256
ioo -p -o aio_maxservers=100
vmo -p -o minperm%=3
vmo -p -o maxperm%=90
vmo -p -o maxclient%=90
vmo -p -o lru_file_repage=0
vmo -p -o strict_maxperm=0
vmo -p -o strict_maxclient=1
no -r -o ipqmaxlen=512
no -p -o rfc1323=1
no -p -o sb_max=1310720
no -p -o tcp_recvspace=65536
no -p -o tcp_sendspace=65536
no -p -o udp_recvspace=655360
no -p -o udp_sendspace=65536
10. Disk attribute 변경
lsdev -Cc disk | grep -v grep | grep MPIO | awk '{print "chdev -l " $1 " -a reserve_policy=no_reserve "}' |sh -x
----
for i in 2 3 4 5 6 7 8 9
do
chdev -l ${i} -a reserve_policy=no_reserve
done
----
11. SSH 설정 : 마지막 부분 참조
12. User .profile 설정
------- root ---------
node9:/mnt/11g_rac# cat ~/.profile
export PATH=/usr/bin:/etc:/usr/sbin:/usr/ucb:$HOME/bin:/usr/bin/X11:/sbin:/usr/es/sbin/cluster/utilities/:.
if [ -s "$MAIL" ] # This is at Shell startup. In normal
then echo "$MAILMSG" # operation, the Shell checks
fi # periodically.
export PS1=`hostname -s`':$PWD# '
export TERM=vt100
set -o vi
------- oracle ---------
export PATH=/usr/bin:/etc:/usr/sbin:/usr/ucb:$HOME/bin:/usr/bin/X11:/sbin:.
if [ -s "$MAIL" ] # This is at Shell startup. In normal
then echo "$MAILMSG" # operation, the Shell checks
fi # periodically.
umask 022
export EDITOR=vi
HOST=`hostname`
USER=`logname`
export PS1='['`whoami`'@'`hostname`':$PWD] '
set -o vi
stty -istrip erase ^H
alias ls='ls -aCF'
alias ll='ls -alF'
alias rm='rm -i'
alias vi='/usr/bin/vi'
export LANG=C
export ORACLE_BASE=/oracle/db
export ORACLE_HOME=/oracle/db/11g
# <<< GI_HOME과 ORACLE_HOME의 순서가 중요 !!!!
export GI_HOME=/oracle/grid
export ORACLE_SID=RAW1
# <<< node2는 RAW2
export NLS_LANG=AMERICAN_AMERICA.UTF8
export PATH=$PATH:$ORACLE_HOME/bin:/oracle/dba:$GI_HOME/bin:$ORACLE_HOME/OPatch
------- oragrid ---------
export PATH=/usr/bin:/etc:/usr/sbin:/usr/ucb:$HOME/bin:/usr/bin/X11:/sbin:.
if [ -s "$MAIL" ] # This is at Shell startup. In normal
then echo "$MAILMSG" # operation, the Shell checks
fi # periodically.
umask 022
export EDITOR=vi
HOST=`hostname`
USER=`logname`
export PS1='['`whoami`'@'`hostname`':$PWD] '
set -o vi
stty -istrip erase ^H
alias ls='ls -aCF'
alias ll='ls -alF'
alias rm='rm -i'
alias vi='/usr/bin/vi'
export LANG=C
export ORACLE_BASE=/oracle/db
export GI_HOME=/oracle/grid
export ORACLE_HOME=/oracle/db/11g
# <<< GI_HOME과 ORACLE_HOME의 순서가 중요 !!!!
export ORACLE_SID=+ASM1
# <<< node2는 +ASM2
export NLS_LANG=AMERICAN_AMERICA.UTF8
export PATH=$PATH:$ORACLE_HOME/bin:/oracle/dba:$GI_HOME/bin:$ORACLE_HOME/OPatch
13. ASM 구성을 위한 디스크구성 (1G이상)
# chown oragrid:dba /dev/rhdisk5
# chmod 660 /dev/rhdisk5
>> node9 && node10 모두에서 수행
14. Raw Device 구성
# bootinfo -s hdisk8 >> hdisk8의 size조회
# mkvg -S -f -y racvg -s 64 -V 50 -C hdisk8 hdisk9
> -S : Scalable VG
> -f : force
> -y racvg : vg명을 racvg로
> -s 64 : pp size
> -V 50 : major number
> -C : Enhenced Concurrent VG
# varyonvg racvg
----- mklv script -----
mklv -y crs_ocr01 -t jfs2 racvg 5
mklv -y crs_ocr02 -t jfs2 racvg 5
mklv -y crs_vote01 -t jfs2 racvg 5
mklv -y crs_vote02 -t jfs2 racvg 5
mklv -y crs_vote03 -t jfs2 racvg 5
mklv -y rac_redo01 -t jfs2 racvg 4
mklv -y rac_redo02 -t jfs2 racvg 4
mklv -y rac_redo03 -t jfs2 racvg 4
mklv -y rac_redo04 -t jfs2 racvg 4
mklv -y rac_control01 -t jfs2 racvg 2
mklv -y rac_control02 -t jfs2 racvg 2
mklv -y rac_system01 -t jfs2 racvg 8
mklv -y rac_sysaux01 -t jfs2 racvg 8
mklv -y rac_undo01 -t jfs2 racvg 8
mklv -y rac_undo02 -t jfs2 racvg 8
mklv -y rac_temp01 -t jfs2 racvg 5
mklv -y rac_user01 -t jfs2 racvg 2
mklv -y rac_spfile01 -t jfs2 racvg 1
-------------------------
14. Raw Device 권한 설정
>>> node9
# chown oracle:dba /dev/rrac*
# chown oragrid:dba /dev/rcrs*
>>> node10
# importvg -V 50 -y racvg hdisk8 (hdisk는 하나만 지정해야함)
# chown oracle:dba /dev/rrac*
# chown oragrid:dba /dev/rcrs*
15. vnc && unzip 설치
16. NTP 구성
>>> node9
node9:/oracle/_src# cat /etc/ntp.conf
broadcastclient
driftfile /etc/ntp.drift
tracefile /etc/ntp.trace
server 127.0.0.1
node9:/# stopsrc -s xntpd
node9:/# startsrc -s xntpd -a"-x"
node9:/# cat /etc/rc.tcpip | grep ntp
#start /usr/sbin/xntpd -x "$src_running"
>>> node10
node10:/# cat /etc/ntp.conf
broadcastclient
driftfile /etc/ntp.drift
tracefile /etc/ntp.trace
server 130.130.10.109
node9:/# stopsrc -s xntpd
node9:/# startsrc -s xntpd -a"-x"
node9:/# cat /etc/rc.tcpip | grep ntp
#start /usr/sbin/xntpd -x "$src_running"
17. HACMP 구성 > fileset 설치 > OS 리부팅
18. HACMP 구성 > Cluster 구성
# smitty hacmp > extended configuration > extended topology configuration > configure hacmp cluster > add/change/show an hacmp cluster
Cluster Name : cluster5
# smitty hacmp > extended configuration > extended topology configuration > configure hacmp nodes > Add a Node to the HACMP cluster
Node Name : node9
Communication Path to Node : node9rac
# smitty hacmp > extended configuration > extended topology configuration > configure hacmp nodes > Add a Node to the HACMP cluster
Node Name : node10
Communication Path to Node : node10rac
>> 2개의 node를 cluster에 추가
# smitty hacmp > extended configuration > Discover HACMP-related Information from configured Nodes
>> Discover 수행
# smitty hacmp > extended configuration > extended topology configuration > configure Networks > Add a Network to the HACMP Cluster
Network Name : net_ether_01
>> HACMP Network 생성
# smitty hacmp > extended configuration > extended topology configuration > configure HACMP Communication Interface/Devices > Add Communication Interfaces/Devices
>> Add Discovered Communication Interface and Devices 선택
>> Add Communication Interfaces/Devices
>> Communication Interfaces
>> net_ether_01
>> node9rac && node10rac 선택
19. HACMP 구성 > Resource
# smitty hacmp > extended configuration > extended resource configration > HACMP Extended Resource Group Configuration > Add a Resource Group
Resource Group Name : con_rg
Participating Node : node9 && node10
*Startup Policy : Online On All Available Nodes
*Fallover Policy : Bring Offline (On Error Node Only)
*Fallback Policy : Never Fallback
# smitty hacmp > extended configuration > extended resource configration > HACMP Extended Resource Group Configuration > Change/Show Resources and Attributes for a Resource Group
Concurrent Volume Groups : con_vg
20. HACMP 구성 > Verification
# smitty hacmp > extended configuration > extended Verification and Synchronization
Automatically correct error found during verification? : Yes
21. HACMP 기동(smitty clstart/clstop)
# smitty clstart (node9 && node10)
> Start Cluster Services on these nodes : node9
> Startup Cluster Information Daemon? : Yes
> Automatically correct errors found during cluster start? : Yes
# lssrc -ls clstrmgrES
# /usr/es/sbin/cluster/utilities/cltopinfo
# /usr/es/sbin/cluster/utilities/clRGinfo
>> oracle grid infrasture 설치전에 양쪽 노드에서 lspv로 봤을때... 양쪽모두 'concurrent' 이어야함
----------------------------------------------------
Oracle Grid Infrastructure 설치
----------------------------------------------------
22. 사전 체크 scripts
# chfs -a size=+5G /tmp
>> /tmp 파일시스템은 5GB 이상 여유를 줄것
# smitty pgsp
>> paging space를 권장 메모리 크기인 3.5GB이상으로
# mkdir -p /oracle/_install/
# cp ${소스경로} /oracle/_install/
# chmod -R 775 /oracle/_install
# chown -R oragrid:dba /oracle/_install
# su - oragrid
oragrid@node9 ] unzip /oracle/_install/aix.ppc64_11gR2_grid.zip
oragrid@node9 ] cd /oracle/_install/grid
./runcluvfy.sh stage -pre crsinst -n node9,node10 -verbose > pre_chk.txt
>>> oinstall 관련 에러 && OS Path 관련 에러는 무시가능
>>> 설치 Memory는 2.5GB이상이면 가능하나 설치속도문제로 3.5GB이상 권장
>>> Paging Space는 Memory 사이즈 이상(3.5GB) 권장
>>> 'User Capability', node10의 /oracle, /oracle/db, /oracle/oragrid 퍼미션등은
runcluvfy.sh가 체크하지 않으므로 직접 확인할 것!!!
23. Rootpre.sh 수행
>>> node9
node9:/oracle/_src/11g_rac/grid# lspv
hdisk8 00c8cf6def2b30f0 racvg concurrent
hdisk9 00c8cf6de50fa7bc racvg concurrent
>>> node10
node10:/oracle/image# lspv
hdisk8 00c8cf6def2b30f0 racvg concurrent
hdisk9 00c8cf6de50fa7bc racvg concurrent
node10:/oracle/image# clRGinfo
-----------------------------------------------------------------------------
Group Name Group State Node
-----------------------------------------------------------------------------
con_rg ONLINE node9
ONLINE node10
node10:/oracle/image#
>>>>> HACMP 가 반드시 기동된 상태이어야 함
>>> node9
node9:/oracle/_src/11g_rac/grid# id oragrid
uid=301(oragrid) gid=302(dba) groups=303(hagsuser)
>>>>> hagsuser가 있는지 확인
node9:/oracle/_src/11g_rac/grid# id
uid=0(root) gid=0(system) groups=2(bin),3(sys),7(security),8(cron),10(audit),11(lp)
node9:/oracle/_src/11g_rac/grid# sh rootpre.sh
>>> node10
node10:/oracle/image# id oragrid
uid=301(oragrid) gid=302(dba) groups=303(hagsuser)
>>>>> hagsuser가 있는지 확인
node10:/oracle/image# id
uid=0(root) gid=0(system) groups=2(bin),3(sys),7(security),8(cron),10(audit),11(lp)
node10:/oracle/image# sh rootpre.sh
24. vnc server 실행 (oragrid 유저로 실행!!!)
25. runInstaller
> Installation Options : Install and Configure Grid Infrastructure for a Cluster
> Installation Type : Advanced Installation
> Select Languages : English
> Grid Plug and Play Information
>> Cluster Name : 아무거나
>> Scan Name : node9scan >> /etc/hosts에 scanIP로 설정한 이름
>> Scan Port : 1526 >> default는 1521이나 추후 scanIP삭제를 위해 1526으로 수정
> Cluster Node Information
>> Hostname 과 Virtual IP Name을 /etc/hosts와 맞도록 수정('Edit')
>> 만일 노드가 하나만 보일경우, 'Add'를 통해서 추가
> Network Interface Usage : 체크후 Next
> Storage Option Information : Automatic Storage Management(ASM)
> Create ASM Disk Group
>> Disk Group Name : DATA
>> Redundancy : External >> High는 copy를 2개/Normal은 copy를 1개/External은 미러를 external storage가 수행
>> Add Disk : ASM으로 사용할 Disk를 선택 && disk의 ownership 이 oragrid:dba인 disk만을 installer가 보여줌
> Specify ASM Password
>> Use same password for these accounts
> Privileaged Operating System Groups : 모두 'dba'로 !!!
> Specify Installation Location
>> Oracle Base : /oracle/db
>> Software Location : /oracle/grid
> Create Inventory : /oracle/oraInventory
> Perform Prerequisite Checks : Swap && OS Patch는 무시가능
> Summary : /tmp 가 부족한 경고가 뜨지 않았는지 확인후 Finish
26. runInstaller > Execute Configuration Scripts
> 'Execute Configuration Scripts' 창이 뜬 상태를 그대로 두고(창을 닫으면 않됨!!!!)
>> 반드시 root 로 실행해야하며, 실행순서도 중요
>>> node9
# id
uid=0(root) gid=0(system) groups=2(bin),3(sys),7(security),8(cron),10(audit),11(lp)
# /oracle/oraInventory/orainstRoot.sh
>>> node10
# id
uid=0(root) gid=0(system) groups=2(bin),3(sys),7(security),8(cron),10(audit),11(lp)
# /oracle/oraInventory/orainstRoot.sh
>>> node9
# id
uid=0(root) gid=0(system) groups=2(bin),3(sys),7(security),8(cron),10(audit),11(lp)
# /oracle/grid/root.sh
>>> node10
# id
uid=0(root) gid=0(system) groups=2(bin),3(sys),7(security),8(cron),10(audit),11(lp)
# /oracle/grid/root.sh
27. runInstaller > Execute Congiration Scripts > 창종료
> Process
>> 'Oracle Grid Infrasture' : [INS-20802] Oracle Cluster Verification Utility failed 메세지는 무시가능
> Finish : Close
28. 설치 확인
# ps -ef | grep d.bin
# netstat -in >> vip와 scanip 가 기동중인지 확인
# /oracle/grid/bin/crs_stat -t
----------------------------------------------------
Oracle 설치(node9에서 oracle 유저로 설치)
----------------------------------------------------
29. oracle 유저로 vncserver 실행 && vnc client 접속
30. oracle유저로 ./runInstaller 실행
> Has `rootpre.sh` been run by root? [y/n] : crs과정에서 실행했으므로 y를 선택
31. runInstaller
> Installation Options : Install databasesoftware only
> Grid Installation Options : RAC && node1/node2 선택
> Language : English
> Select Database Edition : Enterprise Edition
&& Select Options : Oracle Partitioning && Oracle Real Application Testing 만 선택
> Specify Installation Location
Oracle Base : /oracle/db
Software Location : /oracle/db/11g
> Privileged Operating System Groups : 모두 dba
> Prerequisite Checks : OS Patch는 무시가능
> Finish
32. runInstaller > Execute Configuration scripts (root계정으로 node9 -> node10의 순으로 실행해야함)
> Execute Configuration scripts 창을 그대로 유지
> node9 에서 root 유저로 로그인 : /oracle/db/11g/root.sh 실행 (crs와 경로가 다름)
> node10 에서 root 유저로 로그인 : /oracle/db/11g/root.sh 실행 (crs와 경로가 다름)
> Execute Configuration scripts 창 : 'OK' 선택
> Close
33. Public/Inter-Connect Configuration 확인
# su - oracle
[oracle@node10:/home/oracle] oifcfg getif
en0 130.130.10.0 global public
en1 192.168.10.0 global cluster_interconnect
[oracle@node10:/home/oracle]
34. OCR/VOTE 디스크를 ASM에서 Raw Device로 Migration (root로 수행)
node9:/# lsvg racvg
node9:/# ls -al /dev | grep rcrs*
node9:/# cd /oracle/grid/bin
node9:/oracle/grid/bin/# ./ocrconfig -add /dev/rcrs_ocr01
node9:/oracle/grid/bin/# ./ocrconfig -add /dev/rcrs_ocr02
node9:/oracle/grid/bin/# ./ocrconfig -delete +DATA
>>> CRS 설치시 OCR로 설정한 ASM영역(+DATA)를 제거
node9:/oracle/grid/bin/# ./crsctl replace votedisk /dev/rcrs_vote01
node9:/oracle/grid/bin/# ./crsctl add css votedisk /dev/rcrs_vote02
node9:/oracle/grid/bin/# ./crsctl add css votedisk /dev/rcrs_vote03
node9:/oracle/grid/bin/# ./crsctl query css votedisk
----------------------------------------------------
DBCA - Database Creation Script 생성 (node9에서 oracle 유저로 설치)
----------------------------------------------------
35. dbca 실행
# oracle로 vnc 접속
# /oracle/db/11g/bin/dbca
> Oracle Real Application Cluster database : Create a Database
> Database Templates : Custom Database
> Database Identification
Configuration Type : Admin-Managed
Global Database Name : RAW (.profile에 설정해준 SID값 && RAW로 설정하면 RAW1,RAW2와 같이 자동으로 인스턴스가 생성됨)
SID Prefix : RAW (.profile에 설정해준 SID값 && RAW로 설정하면 RAW1,RAW2와 같이 자동으로 인스턴스가 생성됨)
Node : Select All !!!
> Management Options
Enterprise Manager 탭 : Configure Enterprise Manager 선택
Automatic Maintenance Tasks 탭 : Enable automaic maintenance tasks 선택해제
> Database Credentials : Use the Same Administrative Password for All Accounts > 'oracle' 입력
> Database File Locations !!!
Storage Type : Automatic Storage Management(ASM)
Use Oracle-Managed Files 선택
>> +DATA 입력 > CRS설치시 OCR과 VOTE로 사용했던 영역을 우선 활용
ASM Credentials : 'oracle' (CRS설치시 암호) 입력
> Recovery Configuration : Specify Flash Recovery Area && Enable Archiving 모두 체크 해제
> Database Content ~ Database Storage : Database의 기본 권장치를 모두 사용
> Creation Options !!!
Create Database && Save as Database Template: 체크 해제
Generate Database Creation Scripts : 체크
>> Destination Directory : /oracle/db/admin/RAW/scripts
36. DBCA에서 생성된 Script 수정
# su - oracle
# cd /oracle/db/admin/RAW/scripts
> init.ora
---------------------------------
# db_create_file_dest=+DATA
>> 주석처리
control_files=('/dev/rrac_control01','/dev/rrac_control02')
>> 라인추가
---------------------------------
> RAW1.sql
---------------------------------
// host /oracle/db/11g/bin/srvctl add database -d RAW -o /oracle/db/11g -p +DATA/RAW/spfileRAW.ora -n RAW -a DATA
host /oracle/db/11g/bin/srvctl add database -d RAW -o /oracle/db/11g -p /dev/rrac_spfile01 -n RAW
>> 수정
---------------------------------
> CreateDB.sql
---------------------------------
// DATAFILE SIZE 700M AUTOEXTEND ON NEXT 10240K MAXSIZE UNLIMITED
DATAFILE '/dev/rrac_system01' SIZE 500M
// SYSAUX DATAFILE SIZE 600M AUTOEXTEND ON NEXT 10240K MAXSIZE UNLIMITED
SYSAUX DATAFILE '/dev/rrac_sysaux01' SIZE 500M
// SMALLFILE DEFAULT TEMPORARY TABLESPACE TEMP TEMPFILE SIZE 20M AUTOEXTEND ON NEXT 640K MAXSIZE UNLIMITED
SMALLFILE DEFAULT TEMPORARY TABLESPACE TEMP TEMPFILE '/dev/rrac_temp01' SIZE 300M
// SMALLFILE UNDO TABLESPACE "UNDOTBS1" DATAFILE SIZE 200M AUTOEXTEND ON NEXT 5120K MAXSIZE UNLIMITED
SMALLFILE UNDO TABLESPACE "UNDOTBS1" DATAFILE '/dev/rrac_undo01' SIZE 500M
// LOGFILE GROUP 1 SIZE 51200K,
LOGFILE GROUP 1 ('/dev/rrac_redo01') SIZE 250M,
// GROUP 2 SIZE 51200K
GROUP 2 ('/dev/rrac_redo02') SIZE 250M
---------------------------------
> CreateDBFiles.sql
---------------------------------
// CREATE SMALLFILE UNDO TABLESPACE "UNDOTBS2" DATAFILE SIZE 200M AUTOEXTEND ON NEXT 5120K MAXSIZE UNLIMITED;
CREATE SMALLFILE UNDO TABLESPACE "UNDOTBS2" DATAFILE '/dev/rrac_undo02' SIZE 500M ;
// CREATE SMALLFILE TABLESPACE "USERS" LOGGING DATAFILE SIZE 5M AUTOEXTEND ON NEXT 1280K MAXSIZE UNLIMITED EXTENT MANAGEMENT LOCAL SEGMENT SPACE MANAGEMENT AUTO;
CREATE SMALLFILE TABLESPACE "USERS" LOGGING DATAFILE '/dev/rrac_user01' SIZE 120M EXTENT MANAGEMENT LOCAL SEGMENT SPACE MANAGEMENT AUTO;
---------------------------------
> postDBCreation.sql
---------------------------------
// ALTER DATABASE ADD LOGFILE THREAD 2 GROUP 3 SIZE 51200K,
// GROUP 4 SIZE 51200K;
ALTER DATABASE ADD LOGFILE THREAD 2 GROUP 3 ('/dev/rrac_redo03') SIZE 250M,
GROUP 4 ('/dev/rrac_redo04') SIZE 250M;
// create spfile='+DATA/RAW/spfileRAW.ora' FROM pfile='/oracle/db/admin/RAW/scripts/init.ora';
create spfile='/dev/rrac_spfile01' FROM pfile='/oracle/db/admin/RAW/scripts/init.ora';
---------------------------------
37. DB Creation (oracle 계정으로 node1에서 수행)
node9:/# su - oracle
[oracle@node9:/home/oracle] cd /oracle/db/admin/RAW/scripts
[oracle@node9:/home/oracle] ./RAW1.sh 수행
38. ASM && SCAN IP Disable
-> GPFS 실습상 필요 > 이후 참조
++++++++++++++++++++++++++++++++++++++++++++++++++++++
RAC on GPFS
++++++++++++++++++++++++++++++++++++++++++++++++++++++
1. 기본구성
> (Oracle Grid Infrastructure > Raw Device에 위치) + (Oracle DB creation > GPFS를 이용)
> sg247541_10g_RAC_AIX_with_GPFS.pdf > Chapter 2.1.6 GPFS Configuratoin 참조
2. GPFS 기본 구조
> node & tie-break disk
>> GPFS 2.2 까지는 노드가 최소 3개 이상이어야 gpfs구성이 가능 (노드만을 가지고 quorum을 따짐)
>> GPFS 2.3 부터는 노드가 2개이어도 tie-break disk를 통해 quorum을 구성할 수 있음
>> cluster의 기본 권장치로 tie-break disk는 1G짜리 3개 이상을 권장하나 1G짜리 하나도 구성에는 무관
>> 노드수가 3개이상면 tie-break가 없어도 되며, 노드수가 많아지면 tie-break를 쓰지 않고 node quorum을 쓰는 것이 권장사항!!!
> gpfs node 파일
>> ex. node1rac:quorum-manager
node1rac : /etc/hosts에 등록된 host명으로 일반적으로 private 망을 사용하나 public도 무관, 일반적으로 oracle의 interconnect를 이용
quorum-manager : RAC구성시에는 일반적으로 모두 quorum-manager로 구성
> gpfs disk 파일
>> ex. hdisk10:::dataAndMetadata:1:nsd01
hdisk10 : lspv로 나오는 disk이름으로 nsd를 구성할 disk
::: : PrimaryServer와 BackupServer를 지정하는 항목으로 일반적으로 지정하지 않음
단, Oracle 10g까지는 RAC한쪽노드에서 direct로 연결된 san cable 장애시 해당 노드는 서비스를 중단하였으나,
11g부터는 한쪽 san cable에 장애가 나더라도 network을 통해 다른쪽으로 서비스를 할 수 있게 되어 Primary와
Backup을 지정하여서 Primary장애시 Secondary를 통해 서비스를 할 수 있게 됨
dataAndMetadata : 일반적으로 Data와 Metadata를 같은 disk에 구성
만일 성능차가 많이 나는 disk가 있다면 metadata를 성능이 우수한 disk에 할당하는 것이 유리
1 : failure Group, gpfs의 미러링인 gpfs replication을 위한 설정으로 같은 nsd내에서는 replication이 불가능하기 때문에 비슷한 사이즈의 group이 쌍으로 존재해야함
nsd01 : lspv 에서 보여질 desired name, 값을 주지 않으면 gpfs0Xnsd로 자동으로 생성됨
3. GPFS 파일셋 설치
node10:/oracle/image/gpfs321/gpfs3.2.1-15# lslpp -L | grep gpfs
gpfs.base 3.2.1.15 C F GPFS File Manager
gpfs.docs.data 3.2.1.1 C F GPFS Server Manpages and
4. GPFS 구성파일 설정
> gpfs_node
node9:/home/scripts/gpfs# cat gpfs_nodes
node9rac:quorum-manager
node10rac:quorum-manager
> gpfs tie-break disk
node9:/home/scripts/gpfs# cat gpfs_disks_tb
hdisk4
> gpfs nsd disk
node9:/home/scripts/gpfs# cat gpfs_disks
hdisk7
*. 기존에 gpfs로 한번 잡힌 disk는 lspv에서 'gpfs'표시되며, 이 경우 다시 gpfs용으로 잡을 수 없음 > 정보를 깨고 다시 구성해야함
node9 >>
dd if=/dev/zero of=/dev/rhdiskXX bs=1024 count=100
rmdev -dl hdiskXX
cfgmgr -v
chdev -l hdiskXX -a reserve_policy=no_reserve
chdev -l hdiskXX -a pv=yes
node10 >>
dd if=/dev/zero of=/dev/rhdiskXX bs=1024 count=100
rmdev -dl hdiskXX
cfgmgr -v
chdev -l hdiskXX -a reserve_policy=no_reserve
chdev -l hdiskXX -a pv=yes
---------------------
for i in 2 4 7
do
dd if=/dev/zero of=/dev/rhdisk${i} bs=1024 count=100
rmdev -dl hdisk${i}
done
cfgmgr -v
for i in 2 4 7
do
chdev -l hdisk${i} -a reserve_policy=no_reserve
chdev -l hdisk${i} -a pv=yes
done
---------------------
5. GPFS 구성 > cluster
node9:/usr/lpp/mmfs/bin# ssh node10rac >>> gpfs를 사용할 interconnect IP로 SSH를 접속해서 암호를 설정해주어야함
node10:/usr/lpp/mmfs/bin# ssh node9rac >>> gpfs를 사용할 interconnect IP로 SSH를 접속해서 암호를 설정해주어야함
node10:/usr/lpp/mmfs/bin# ./mmcrcluster -N /home/scripts/gpfs/gpfs_nodes -p node9rac -s node10rac -r /usr/bin/ssh -R /usr/bin/scp -C cluster9
>> ssh를 사용할 경우
node10:/usr/lpp/mmfs/bin# ./mmcrcluster -N /home/scripts/gpfs/gpfs_nodes -p node9rac -s node10rac -r /usr/bin/rshh -R /usr/bin/rcp -C cluster9
>> rsh를 사용할 경우
node9:/usr/lpp/mmfs/bin# mmlscluster
6. GPFS 구성 > Oracle Data용 NSD 생성
node9:/usr/lpp/mmfs/bin# ./mmcrnsd -F /home/scripts/gpfs/gpfs_disks
node9:/usr/lpp/mmfs/bin# ./mmlsnsd
7. GPFS 구성 > tie-break용 NSD 생성
node9:/usr/lpp/mmfs/bin# ./mmcrnsd -F /home/scripts/gpfs/gpfs_disks_tb
node9:/usr/lpp/mmfs/bin# ./mmlsnsd
8. GPFS 구성 > tiebreaker disk 설정
node9:/usr/lpp/mmfs/bin# lspv
hdisk0 00c677cd1781bc00 rootvg active
hdisk1 005162ba6bc84c46 oravg active
hdisk2 00c8cf6d65543f66 None
hdisk3 00c8cf6d5bc43002 None
hdisk4 00c8cf6d65544497 gpfs2nsd
hdisk5 00c8cf6da1eda0c9 None
hdisk6 00c8cf6da72636c7 None
hdisk7 00c8cf6d65544d9a gpfs1nsd
hdisk8 00c8cf6def2b30f0 racvg concurrent
hdisk9 00c8cf6de50fa7bc racvg concurrent
node9:/usr/lpp/mmfs/bin# bootinfo -s hdisk4
1024
node9:/usr/lpp/mmfs/bin# ./mmchconfig tiebreakerDisks=gpfs2nsd
Verifying GPFS is stopped on all nodes ...
mmchconfig: Command successfully completed
mmchconfig: Propagating the cluster configuration data to all
affected nodes. This is an asynchronous process.
node9:/usr/lpp/mmfs/bin#
>> nsd 삭제를 위해서는 tiebreaker disk를 삭제해야 함 > ./mmchconfig tiebreakerDisks=no
>> tiebreaker disk를 여러개 설정할 경우 > ./mmchconfig tiebreakerDisks='gpfs2nsd;gpfs3nsd;gpfs4nsd'
9. gpfs 파일 시스템 생성
node9:/usr/lpp/mmfs/bin# ./mmstartup -a : 모든 노든에서 gpfs기동
node9:/usr/lpp/mmfs/bin# ./mmcrfs /oradata /dev/oradata -F /home/scripts/gpfs/gpfs_disks -A yes -B 512k -n 2
> /oradata : mount point
> /dev/oradata : device 명
> -F /home/scripts/gpfs/gpfs_disks : nsd 파일명(mmcrnsd를 실행하면 기존 disk파일의 내용이 수정되어 있음)
> -A yes : mmstartup시 automount 될것인지 여부
> -B 512k : Block size 16k~1MB까지 설정가능, Oracle은 일반적으로 256k(512k)를 권장하나, file size가 작은 그룹웨어나 이메일은 Block size를 작게 설정해야함
> -n 2 : 파일 시스템을 사용할 노드의 개수, 한번 설정하면 수정이 불가능하므로 여유를 둬서 크게 설정하기도 함
cf. mmshutdown -a : 모든 노든에서 gpfs종료
10. gpfs 파일시스템 마운트
node9:/usr/lpp/mmfs/bin# ./mmmount all -a
>> all : 모든 파일 시스템 ex. mmmount /oradata -a : 모든 노드에서 /oradata 를 마운트
>> -a : 모든 노드
11. oracle용 permission 변경
node9:/usr/lpp/mmfs/bin# chown -R oracle:dba /oradata ; chmod -R 755 /oradata
12. GPFS 파일시스템에 Database 생성
# srvctl stop database -d RAW >> 기존 DB 인스턴스 종료
# su - oracle
[oracle@node9:/oracle/db/11g/bin] /oracle/db/11g/bin/dbca
> Oracle Real Application Cluster database : Create a Database
...
> Database File Locations !!!
Storage Type : Cluster File System
Use Common Location for All Database Files : 선택
Database Files Location : /oradata
...
> Creation Options !!!
Create Database && Save as Database Template: 체크 해제
Generate Database Creation Scripts : 체크
>> Destination Directory : /oracle/db/admin/GPFS/scripts
[oracle@node9:/oracle/db/admin/GPFS/scripts] ./GPFS1.sh
[oracle@node9:/home/oracle] srvctl start database -d GPFS
[oracle@node9:/home/oracle] export ORACLE_SID=GPFS1
[oracle@node9:/home/oracle] sqlplus "/as sysdba"
SQL*Plus: Release 11.2.0.1.0 Production on Wed Nov 2 15:03:37 2011
Copyright (c) 1982, 2009, Oracle. All rights reserved.
Connected to:
Oracle Database 11g Enterprise Edition Release 11.2.0.1.0 - 64bit Production
With the Partitioning, Real Application Clusters and Real Application Testing options
SQL> select name from v$database;
NAME
---------------------------
GPFS
SQL> quit
Disconnected from Oracle Database 11g Enterprise Edition Release 11.2.0.1.0 - 64bit Production
With the Partitioning, Real Application Clusters and Real Application Testing options
[oracle@node9:/home/oracle]
> oracle 계정의 .profile에서 'export ORACLE_SID=GPFS1' 처럼 SID를 변경해주어야 새로 생성한 Database로 접속가능(node9 & node10)
13. GPFS 관련 고려사항
sg247541_10g_RAC_AIX_with_GPFS.pdf > Chapter 2.1.7 Special consideration for GPFS with ORACLE 참조
ex. CIO나 DIO 옵션을 적용하면 않됨
sg247541_10g_RAC_AIX_with_GPFS.pdf > Chapter 3.5
> mklv 생성시 Oracle은 '-TO' 옵션을 주도록 가이드
>> mklv -y rac_control01 -t jfs2 racvg 2
>>> IBM에서 사용하는 방법
>>> LVCB(lv의 정보를 가지는 영역)를 LV의 앞 512byte에 생성
>> mklv -y LVNAME -T O -w n -s n -r n VGNAME NumPPs
>>> Oracle에서 가이드 하는 방법
>>> dd 로 disk 데이터 복사시 'skip=1'을 주지 않으면 LVCB 때문에 문제가 발생할 수 있음,
이 경우 T 옵션을 주면 LVCB 정보를 disk의 512byte 블럭에 두지 않고 VGDA에 둠
>>> lslv [LVname]
> 'DEVICESUBTYPE : DS_LVZ' 로 확인 가능
> 단, zero offset의 경우는 단순히 -T옵션뿐만 아니라 VolumeGroup의 Type에 따라 타입이 달라짐
sg247541_10g_RAC_AIX_with_GPFS.pdf > p148 참조
>>> 오라클 입장에서 dd로 disk control 방법이 달라지므로 이에 대해서 주의가 필요...
>>> Volume Group 선택시 Scalable VG이면 모두 T옵션이 적용되게 됨
++++++++++++++++++++++++++++++++++++++++++++++++++++++
기동 및 종료
++++++++++++++++++++++++++++++++++++++++++++++++++++++
+. 구성환경
OS > PowerHA > CRS(Grid Infrastructure) > ASM > Oracle
(or)
OS > GPFS > CRS > Oracle
+. 기동순서
OS -> Hacmp start : smitty clstart
-> CRS start : /oracle/grid/bin/crsctl start crs >> VIP & SCAN IP 기동(root로 node9 && node10 모두에서 실행)
>> ps -ef | grep d.bin 으로 프로세스가 많이 떠 있지 않으면... or netstat -in으로 VIP가 제대로 올라와 있지 않으면...
>> /oracle/grid/bin/crsctl stop crs -f : 강제로 crs를 내린 후 다시 crs start (root로 실행)
-> DB start : srvctl start database -d GPFS
(or)
OS -> GPFS start(mmstartup -a) / GPFS mount (mmmount all -a)
-> CRS start('/oracle/grid/bin/crsctl start crs' - VIP & SCAN IP 기동)
-> DB start : srvctl start database -d GPFS
-----------------------------
정상기동시 netstat -in 정보
[oracle@node9:/oracle/image/11g_rac] netstat -in
Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll
en0 1500 130.130.10 130.130.10.109 11571606 0 2977459 3 0
en0 1500 130.130.10 130.130.10.209 11571606 0 2977459 3 0 >>> VIP
en0 1500 130.130.10 130.130.10.159 11571606 0 2977459 3 0 >>> ScanIP
en1 1500 192.168.10 192.168.10.109 2952062 0 1466628 3 0
[oracle@node9:/oracle/image/11g_rac]
[oracle@node10:/oracle/db/admin/GPFS] netstat -in
Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll
en0 1500 130.130.10 130.130.10.110 12690945 0 5488816 3 0
en0 1500 130.130.10 130.130.10.210 12690945 0 5488816 3 0
en1 1500 192.168.10 192.168.10.110 2172186 0 1585354 3 0
[oracle@node10:/oracle/db/admin/GPFS]
-----------------------------
++++++++++++++++++++++++++++++++++++++++++++++++++++++
ASM && SCAN IP Disable
++++++++++++++++++++++++++++++++++++++++++++++++++++++
1. ASM Disable
[oracle@node9:/home/oracle] srvctl stop database -d RAW
[oracle@node9:/home/oracle] srvctl stop diskgroup -g DATA
>> DATA는 ASM 생성시 '+DATA'로 정의한 이름, +는 ASM을 의미
[oracle@node9:/home/oracle] srvctl stop asm -n node9
[oracle@node9:/home/oracle] srvctl stop asm -n node10
[oracle@node9:/home/oracle] srvctl disable diskgroup -g DATA
[oracle@node9:/home/oracle] srvctl disable asm
2. SCAN IP Disable
[oracle@node9:/home/oracle] srvctl stop scan_listener
[oracle@node9:/home/oracle] srvctl stop scan
[oracle@node9:/home/oracle] su -
node9:/# srvctl disable scan_listener
node9:/# /oracle/grid/bin/srvctl disable scan
node9:/# netstat -in
++++++++++++++++++++++++++++++++++++++++++++++++++++++
SSH Configuration
++++++++++++++++++++++++++++++++++++++++++++++++++++++
1. SSH 파일셋 설치
>> AIX 6.1 TL5 이상은 OS CD 에 파일셋이 존재 && 그외의 경우 toolbox CD가 필요
# lslpp -L | grep open
openssh.base.client 5.4.0.6100 C F Open Secure Shell Commands
openssh.base.server 5.4.0.6100 C F Open Secure Shell Server
openssl.base 0.9.8.1300 C F Open Secure Socket Layer
openssl.license 0.9.8.1300 C F Open Secure Socket License
2. Server Key 생성
>>> node 1
# ssh-keygen -t rsa1 -q -f /etc/ssh/ssh_host_key -N ''
# ssh-keygen -t dsa -q -f /etc/ssh/ssh_host_dsa_key -N ''
# ssh-keygen -t rsa -q -f /etc/ssh/ssh_host_rsa_key -N ''
# stopsrc -s sshd
# startsrc -s sshd
3. Client Key 생성
>>> node9
# ssh-keygen -t rsa -q -f ~/.ssh/id_rsa -N ''
# cd ~/.ssh/
# ssh-keyscan -t rsa node9 > ~/.ssh/known_hosts
# ssh-keyscan -t rsa node10 >> ~/.ssh/known_hosts
# cat id_rsa.pub >> ~/.ssh/authorized_keys
4. SSH key 분배(root, oracle, oragrid 계정 모두 동일한 key사용)
>>> node9
# scp -pr /.ssh node10:/
# cp -pr /.ssh /home/oracle/.ssh
# chown -R oracle:dba /home/oracle/.ssh
# su - oracle
# scp -pr .ssh node10:/home/oracle/
# cp -pr /.ssh /home/oragrid/
# chown -R oragrid:dba /home/oragrid/.ssh
# scp -pr .ssh node10:/home/oragrid
5. 계정별로 SSH로그인 수행(초기 메세지 때문에 수행이 꼭 필요)
>>> node9
# su -
# ssh node10
# ssh node10rac (GPFS용 inter-connect)
# su - oracle
# ssh node10
# su - oragrid
# ssh node10
>>> node10
# su -
# ssh node9
# ssh node9rac (GPFS용 inter-connect)
# su - oracle
# ssh node9
# su - oragrid
# ssh node9
++++++++++++++++++++++++++++++++++++++++++++++++++++++
Oracle RAC 기본 운영
++++++++++++++++++++++++++++++++++++++++++++++++++++++
+. Cluster Management - crsctl (root user)
node9:/oracle/grid/bin# crsctl -h
node9:/oracle/grid/bin# crsctl start crs
node9:/oracle/grid/bin# crsctl check crs
node9:/oracle/grid/bin# crsctl status res -t
node9:/oracle/grid/bin# crsctl check cluster
node9:/oracle/grid/bin# ocrcheck/crsctl query css votedisk
node9:/oracle/grid/bin# crsctl check ctss (Cluster Time Synchronization service)
node9:/oracle/grid/bin# crsctl config crs
node9:/oracle/grid/bin# crsctl enble/disable crs
node9:/oracle/grid/bin# crsctl get css misscount
node9:/oracle/grid/bin# crsctl relocate resource ora.node1.vip -s node1 -n node2 -f
>> (VIP 리소스를 1번 노드에서 2번 노드로 relocate하기)
node9:/oracle/grid/bin# crsctl query crs activeversion
node9:/oracle/grid/bin# oifcfg getif
node9:/oracle/grid/bin#
+. Oracle Server Management - srvctl (oracle user)
node9:/oracle/grid/bin# cat ~oracle/.profile | grep SID
export ORACLE_SID=GPFS1
>> 환경변수의 SID가 사용하고자하는 instance와 일치하여야 sqlplus등에서 문제가 없음
node9:/oracle/grid/bin# su - oracle
[oracle@node9:/home/oracle] srvctl -h
[oracle@node9:/home/oracle] srvctl start/stop instance -d ${database_name} -I ${instance_name} -o option
[oracle@node9:/home/oracle] srvctl start/stop database -d ${database_name} -o option
[oracle@node9:/home/oracle] srvctl start/stop listener -l ${listener_name}
>> RAC 환경에서는 'lsnrctl start/stop' 명령어로 리스너 기동시 문제가 발생할수있음
[oracle@node9:/home/oracle] ps -ef | grep smon ; ps -ef | grep tns
>> smon은 datbase별로 하나씩만 기동되며, 오라클 instance의 필수 프로세스로 db기동여부를 확인할 수 있음
>> smon을 kill -9로 죽이면 database instance가 바로 crash 됨
+. Oracle 정보 조회 (sqlplus)
[oracle@node9:/home/oracle] sqlplus "/as sysdba"
...
SQL> col ${instance_name} for a200
SQL> col status for a20
SQL> set lines 200
SQL> select instance_name, startup_time, status, version from v$instance;
INSTANCE_NAME STARTUP_TIME STATUS VERSION
---------------------------- --------------- -------------------- -----------
GPFS1 04-NOV-11 OPEN 11.2.0.1.0
SQL>
SQL> select comp_name, status, version from dba_registry;
SQL> select * from v$logfile;
SQL> >> Redo Log 정보 조회 -> Redo Log는 가장 빠른 disk를 사용해야 함
SQL> col member for a50
SQL> select member, status from v$logfile;
>> Re-do Log는 그룹당 4개로 구성되며, 그룹내에서 rolling 형태로 기록되며, 그룹간 log를 넘어갈때마다 Archieve 로그를 남김
SQL> select name, status from v$controlfile;
SQL> alter database backup controlfile to '$PWD/control.bak'; >> control file 백업
SQL> alter datbase backup controlfile to trace as '$PWD/con.txt'; >> control file의 정보를 trace로 백업
+. Oracle Basic Admin 예제 #1 - Table Space / 백업(imp/exp)
1. tablespace 생성
SQL> create tablespace test datafile '/oradata/test01.dbf' size 100m autoextend on;
2. 데이터파일 추가
SQL> alter tablespace test add datafile '/oradata/test02.dbf' size 100m autoextend on;
3. 데이터파일 리사이즈 및 속성 변경
SQL> select tablespace_name, bytes/1024/1024 , autoextensible from dba_data_files;
SQL> alter database datafile '/oradata/test02.dbf' resize 200m;
SQL> alter database datafile '/oradata/test01.dbf' autoextend off;
SQL> select tablespace_name, file_name, bytes/1024/1024 , autoextensible from dba_data_files;
4. 유저생성, 권한 부여
SQL> create user test identified by test default tablespace test;
SQL> grant connect , reousrce, dba to test;
5. test데이터 만들기
SQL> conn test/test
SQL> create table t1 as select * from dba_objects;
SQL> insert into t1 select * from t1;
SQL> commit;
SQL> alter system chekcpoint;
6. 오라클 백업 복구 ( exp )
SQL> mkdir /oradata/backup
SQL> exp system/oracle file=/oradata/backup/full.dmp log=/oradata/backup/full.log full=y feedback=10000 buffer=10240000
7. 유저삭제 및 확인
SQL> select count(*) from test.t1
SQL> drop user test cascade;
SQL> select username from dba_users;
8. 유저생성, 권한 부여
: full로 export된 파일에는 user정보가 존재하나, import 시 동일한 유저가 존재하면 그 정보를 가지고 데이터를 복구
SQL> create user test identified by test default tablespace test;
SQL> grant connect , reousrce, dba to test;
9. 데이터 복구 및 확인
SQL> imp system/oracle file=/oradata/backup/full.dmp fromuser=test touser=test feedback=100
SQL> select count(*) from test.t1
+. AWR로 성능 분석
@?/rdbms/admin/awrrpt.sql
>> 지정 시간대의 성능데이터
@?/rdbms/admin/awrddrpt.sql
>> 두시간대를 지정하여 성능관련 값들의 비교치를 레포트로 산출
@?/rdbms/admin/addmrpt.sql
>> 성능에 영향을 미치는 요인들에 대한 문제점(SQL...)에 대한 분석 결과 및 권장안 레포트
+. 백업/복구 방안
1. Hot backup : Archieve와 Control file 역시 백업
Cold backup
2. exp/imp (export/import)
expdp /impdp
3. RMAN
4. Flashback : oracle의 undo-table space를 통한 복구방법
ex. 특정 table만 한시간전으로 데이터 복원
5. logminer : Archieve 파일을 조회해서 transaction을 복구하는 방안
+. PowerHA IP 구성도
sg247739 - PowerHA for AIX Cookbook.pdf
Tips for implementing PowerHA in a virtual I/O environment
> http://www.ibm.com/developerworks/aix/library/au-powerha/?S_TACT=105AGY20&S_CMP=HP
> netmon.cf 설정
'IBM Power' 카테고리의 다른 글
AIX의 root 패스워드 분실시 reset (0) | 2012.02.02 |
---|---|
aix 6.1 performance 관련 bug fix (0) | 2012.01.31 |
oracle에서 cio(concurrent i/o) 사용을 위한 옵션 (0) | 2012.01.09 |
init.ora 기본 파라미터 변경 for 11gr2 on aix (0) | 2012.01.09 |
Oracle 10g control file 관리 (0) | 2011.12.21 |