jeudi 19 février 2015

Using RMAN


  • Some useful commands
backup incremental level 0 database ;

restore database datafile 9 from tag='TAG20101201T170101';


crosscheck archivelog all;
backup archivelog all not backed up 1 times;
delete noprompt archivelog until time 'sysdate - 1' backed up 1 times to device type sbt;
backup archivelog all not backed up delete input;

restore archivelog sequence between 932 and 935 thread=1;
restore archivelog SCN BETWEEN 6055449295951 AND 6055451858904;

---- controlfile

CONFIGURE CONTROLFILE AUTOBACKUP ON;
CONFIGURE SNAPSHOT CONTROLFILE NAME TO '/u01/app/oracle/product/10.2.0.4/dbs/snapcf_cis.f';
CONFIGURE CONTROLFILE AUTOBACKUP FORMAT FOR DEVICE TYPE DISK TO '/u02mars/ORADATA/cis/autobackup/%F';

restore controlfile to '/oradata/ctvfile.bak' from autobackup;
restore controlfile to '/oradata/ctvfile.bak' from 'c-4119285822-20100302-0a'
restore spfile from autobackup
create spfile from MEMORY


CONFIGURE DEVICE TYPE DISK BACKUP TYPE TO COMPRESSED BACKUPSET PARALLELISM 4;


--- duplicate --
run {
allocate channel nbu_sbt0 type 'SBT_TAPE' PARMS 'SBT_LIBRARY=/usr/openv/netbackup/bin
/libobk.so, ENV=(NB_ORA_SERV=mynetback,NB_ORA_CLIENT=myserv)';
allocate channel nbu_sbt1 type 'SBT_TAPE' PARMS 'SBT_LIBRARY=/usr/openv/netbackup/bin
/libobk.so, ENV=(NB_ORA_SERV=mynetback,NB_ORA_CLIENT=myserv)';
allocate auxiliary channel nbu_sbt2 type 'SBT_TAPE' PARMS 'SBT_LIBRARY=/usr/openv/netbackup
/bin/libobk.so, ENV=(NB_ORA_SERV=mynetbackNB_ORA_CLIENT=myserv)';
allocate auxiliary channel nbu_sbt3 type 'SBT_TAPE' PARMS 'SBT_LIBRARY=/usr/openv/netbackup
/bin/libobk.so, ENV=(NB_ORA_SERV=mynetback,NB_ORA_CLIENT=myserv)';
DUPLICATE TARGET DATABASE TO MYDBDEV until sequence=27757 thread=1 NOFILENAMECHECK;
}
DUPLICATE TARGET DATABASE TO MYDBDEV until time="to_date('30/06/2011 20:00:00', 'DD/MM/YYYY HH24:MI:SS')" NOFILENAMECHECK;

export NLS_DATE_FORMAT='dd/mm/yyyy hh24:mi:ss'
DUPLICATE TARGET DATABASE TO MYDBDEV until time '04/03/2010 04:00:00' NOFILENAMECHECK;

-- delete
delete noprompt obsolete;
delete noprompt archivelog until time 'sysdate - 8/24' backed up 1 times to device type sbt;
DELETE NOPROMPT OBSOLETE RECOVERY WINDOW OF 7 DAYS;

crosscheck archivelog all;
DELETE NOPROMPT EXPIRED archivelog all completed before 'sysdate -40' ;

crosscheck backup;
DELETE NOPROMPT EXPIRED backup completed before 'sysdate -40';

-- list
list copy of archivelog from time='sysdate - 2';
        list backupset by backup summary; Listing backupset by summary
spool log to list.txt
        list backupset by file;
list backup of database;
list backup of tablespace tbs_tab_users;
list incarnation of database;
-- report
       report schema; Display all datafiles in database including temporary datafile
       report need backup days 1;
       report obsolete;
       report need backup;
       report unrecoverable;
REPORT OBSOLETE REDUNDANCY = 2 DEVICE TYPE sbt;

 validate database;
 select * from V$DATABASE_BLOCK_CORRUPTION

-- catalog
create user catrman identified by catrman
default tablespace tbs_tab_repos temporary tablespace temp;
grant CONNECT , RESOURCE, RECOVERY_CATALOG_OWNER to catuser;
RMAN>CONNECT CATALOG catuser/catuser ;
RMAN>CREATE CATALOG TABLESPACE tbs_tab_repos
rman target / target catalog catuser/catuser@catmran;
register database;
unregister database;
  • Dealing controlfile enqueue issue
SELECT s.sid, username AS "User", program, module, action, logon_time "Logon", l.*
FROM v$session s, v$enqueue_lock l
WHERE l.sid = s.sid and l.type = 'CF' AND l.id1 = 0 and l.id2 = 2;

  • Restore database skeeping tbs or renaming datafiles

col redondance format 999,999
col fichier format a50
set pages 800
set lines 100
set feedback off
set heading off
spool switch.txt
define REP_CIBLE=/u01/ORADATA/myndb/
select 'switch datafile '||file_id||' to datafilecopy ''&&REP_CIBLE'||file_id||fichier||''';'
from
(
select substr(fichier,instr(fichier,'/',-1)+1) fichier,file_id
from
( select file_name fichier,file_id from dba_data_files where tablespace_name not in
('TBS1','TBS2')
) FIC) T
/
Generation switch datafiles script
for restore database with skeeped tablespaces

col redondance format 999,999
col fichier format a50
set pages 800
set lines 100
set feedback off
set heading off
spool newname.txt
define REP_CIBLE=/u01/ORADATA/myndb/
select 'set newname for datafile '||file_id||' to ''&&REP_CIBLE'||file_id||fichier||
''';'
from
(
select substr(fichier,instr(fichier,'/',-1)+1) fichier,file_id
from
( select file_name fichier,file_id from dba_data_files where tablespace_name not in
('TBS1','TBS2')
) FIC) T
/

Generating setnew name script for
restore database with skeeped tablespaces

run {
allocate channel nbu_sbt0 type 'SBT_TAPE' PARMS
'SBT_LIBRARY=/usr/openv/netbackup/bin/libobk.so, ENV=(NB_ORA_SERV=frnbumaster,NB_ORA_ CLIENT=mynclient)';
allocate channel nbu_sbt1 type 'SBT_TAPE' PARMS
'SBT_LIBRARY=/usr/openv/netbackup/bin/libobk.so, ENV=(NB_ORA_SERV=frnbumaster,NB_ORA_ CLIENT=mynclient)';
allocate channel nbu_sbt2 type 'SBT_TAPE' PARMS
'SBT_LIBRARY=/usr/openv/netbackup/bin/libobk.so, ENV=(NB_ORA_SERV=frnbumaster,NB_ORA_ CLIENT=mynclient)';
allocate channel nbu_sbt3 type 'SBT_TAPE' PARMS
'SBT_LIBRARY=/usr/openv/netbackup/bin/libobk.so, ENV=(NB_ORA_SERV=frnbumaster,NB_ORA_ CLIENT=mynclient)';
set newname for datafile 1 to '/u01/ORADATA/myndb/1system01.dbf';
set newname for datafile 2 to '/u01/ORADATA/myndb/2undotbs01.dbf';
set newname for datafile 3 to '/u01/ORADATA/myndb/3sysaux01.dbf';
set newname for datafile 10 to '/u01/ORADATA/myndb/10tbs_tab_iris_01.dbf';
set newname for datafile 11 to '/u01/ORADATA/myndb/11tbs_ind_iris_01.dbf';
restore database skip forever tablespace TBS1,TBS2;
switch datafile 1 to datafilecopy '/u01/ORADATA/myndb/1system01.dbf';
switch datafile 2 to datafilecopy '/u01/ORADATA/myndb/2undotbs01.dbf';
switch datafile 3 to datafilecopy '/u01/ORADATA/myndb/3sysaux01.dbf';
switch datafile 10 to datafilecopy '/u01/ORADATA/myndb/10tbs_tab_iris_01.dbf ';
switch datafile 11 to datafilecopy '/u01/ORADATA/myndb/11tbs_ind_iris_01.dbf ';


ASM


  • ASM instance

  • ASM Disk group



  • Convert a tablespace to ASM
        SQL>alter tablespace tbs1 offline;
RMAN>backup as copy tablespace tbs1 format '+DATA';
        RMAN>switch tablespace tbs1 to copy; 
        SQL>alter tablespace tbs1 online;

dimanche 15 février 2015

Dataguard : SETUP


  • Creating A DataGuard Broker Configuration Using DGMGRL


  1. Configure Primary tnsname hint db_unique_name  of standby
  2. LTER SYSTEM SET dg_broker_start=TRUE; # on both site, start DMON process
  3. dgmgrl> connect sys/xxxxx
CREATE CONFIGURATION 'myndb_config' AS
PRIMARY DATABASE IS 'myndb'
CONNECT IDENTIFIER IS myndb;

show configuration;

ADD DATABASE 'myndb_pra' AS
CONNECT IDENTIFIER IS myndb_pra
MAINTAINED AS PHYSICAL;
show configuration; 

SHOW DATABASE VERBOSE gdddev1;
SHOW DATABASE VERBOSE gdddev1_terre; 

ENABLE CONFIGURATION; 

EDIT DATABASE 'gdddev1_terre' SET PROPERTY LogXptMode='ASYNC';



  • Activating Standby and going back to Standby
alter system set log_archive_dest_state_3='defer'; # primary site
alter database recover managed standby database cancel;

alter system set DG_BROKER_START=FALSE;
CREATE RESTORE POINT before_pra_myn GUARANTEE FLASHBACK DATABASE;
select * from v$restore_point;
alter database activate standby database;
startup mount force;
alter database open;

create table test ( id number);

shutdown immediate
STARTUP MOUNT FORCE;
flashback database to restore point before_pra_myn;
ALTER DATABASE CONVERT TO PHYSICAL STANDBY;
ALTER SYSTEM SET DG_BROKER_START=TRUE;
STARTUP MOUNT FORCE;

shutdown immediate;
startup nomount;
alter database mount standby database;
alter database recover managed standby database disconnect from session;
drop restore point before_pra_myn ;
alter system set log_archive_dest_state_3='enable';  # primary site


  • Using RMAN Incremental Backups to Refresh a Standby Database
BACKUP DEVICE TYPE DISK INCREMENTAL FROM SCN 750983 DATABASE FORMAT '/tmp/incr_standby_%U'; # primary site
CATALOG START WITH '/standbydisk1/incrback/';
RECOVER DATABASE NOREDO;

  • Renaming a bad file name

ALTER SYSTEM SET standby_file_management='MANUAL';
alter database create datafile '/u01/app/oracle/product/10.2.0.4/dbs/UNNAMED00422' as '/u01/ORADATA/mydb/myntbs_06.dbf'; -
ALTER SYSTEM SET standby_file_management=AUTO;


  • Finding duplicate archivelog names on standby

set heading off
set feedback off
set linesize 200
set pagesize 0
spool calalog.txt
select al.recid, al.name from v$archived_log al
where al.status = 'A' and al.standby_dest= 'NO'
minus
select al.recid, al.name from v$archived_log al,
(select max(recid) recid
from v$archived_log
where status = 'A'
group by name) keepal
where al.status = 'A'
and al.standby_dest = 'NO'
and keepal.recid = al.recid;

RMAN> catalog archivelog '/u01/ORADATA/myndb/arch/arch_35_1_716749251';


  • Creating Standby with RMAN 11G

run {
DUPLICATE TARGET DATABASE FOR STANDBY FROM ACTIVE DATABASE DORECOVER NOFILENAMECHECK ;
}
rman target / AUXILIARY "sys/xxxxx@myndb" cmdfile=$1 log=log/$1.log 

Dataguard : Architecture


  • Overview


  • Broker

lundi 9 février 2015

Real Application Cluster : Performance

Module details
ORAUSER Logout
Module overviewEdit
   
The following informations are about the module RAC PERFORMANCE : GENERIC APPROCH
Module information detailsEdit
   
NAMEDEFINITION
RAC PERFORMANCE : GENERIC APPROCHMost global cache wait events that show a high total time as reported in the AWR and Statspack reports or
in the dynamic performance views are normal and may present themselves as the top database time consumers without actually indicating a problem; If user response times increase and a high proportion of time waited is for global cache, then you should determine the cause.
Most reports include a breakdown of events sorted by percentage of the total time.
It is useful to start with an ADDM report, which analyzes the routinely collected performance statistics with respect to their impact,
and points to the objects and SQL contributing most to the time waited, and then moves on to the more detailed reports produced by AWR and Statspack.
1 - 1
Add To FuncEdit
More informationsEdit
   
DESCRIPTION
Most global cache wait events that show a high total time as reported in the AWR and Statspack reports or
in the dynamic performance views are normal and may present themselves as the top database time consumers without actually indicating a problem; If user response times increase and a high proportion of time waited is for global cache, then you should determine the cause.
Most reports include a breakdown of events sorted by percentage of the total time.
It is useful to start with an ADDM report, which analyzes the routinely collected performance statistics with respect to their impact,
and points to the objects and SQL contributing most to the time waited, and then moves on to the more detailed reports produced by AWR and Statspack.
1 - 1
DetailEdit
   
no data found
AddEdit
Related optionsEdit
   
NAMEDESCRIPTION
Block-Related Wait Events The main wait events for block-related waits are:
  • gc current block 2-way
  • gc current block 3-way
  • gc cr block 2-way
  • gc cr block 3-way The block-related wait event statistics indicate that a block was received as either the result of a 2-way or a 3-way message,
  • 2-way : the block was sent from the resource master requiring 1 message and 1 transfer.
  • 3-way : the block was forwarded to a third node from which it was sent, requiring 2 messages and 1 block transfer.
  • Contention-Related Wait EventsThe main wait events for contention-related waits are:
  • gc current block busy
  • gc cr block busy
  • gc buffer busy acquire/release The contention-related wait event statistics indicate that a block was received which was pinned by a session on another node,
    was deferred because a change had not yet been flushed to disk or because of high concurrency, and therefore could not be shipped immediately
    The gc current block busy and gc cr block busy wait events indicate that the local instance that is making the request did not immediately receive a current or consistent read block.
    The term busy in these events' names indicates that the sending of the block was delayed on a remote instance.
    For example, a block cannot be shipped immediately if Oracle Database has not yet written the redo for the block's changes to a log file. In comparison to block busy wait events, a gc buffer busy event indicates that Oracle Database cannot immediately grant access to data that is stored in the local buffer cache.
    This is because a global operation on the buffer is pending and the operation has not yet completed.
    In other words, the buffer is busy and all other processes that are attempting to access the local buffer must wait to complete
  • Load-Related Wait Events The main wait events for load-related waits are:
  • gc current block congested
  • gc cr block congested The load-related wait events indicate that a delay in processing has occurred in the GCS,
    which is usually caused by high load, CPU saturation and would have to be solved by additional CPUs,
    load-balancing, off loading processing to different times or a new cluster node.
    For the events mentioned, the wait time encompasses the entire round trip from the time a session starts to wait after initiating a block request until the block arrives.
  • Message-Related Wait Events The main wait events for message-related waits are:
  • gc current grant 2-way
  • gc cr grant 2-way The message-related wait event statistics indicate that no block was received because it was not cached in any instance.
    Instead a global grant was given, enabling the requesting instance to read the block from disk(cr grant) or modify it(current grant).
    If the time consumed by these events is high, then it may be assumed that the frequently used SQL causes a lot of disk I/O (in the event of the cr grant) or
    that the workload inserts a lot of data and needs to find and format new blocks frequently (in the event of the current grant).
  • 1 - 4
    AddEdit
    Related sub optionsEdit
       
    no data found
    AddEdit
    Module componentsEdit
       
    Edit
    NAMEDEFINITION
    [ RAC : Hot objects ]hot tables
    hot sequences
    [ RAC : Library Cache ]
  • In RAC, Library Cache is global
  • So, parsing cost is worse than non-RAC
  • Solutions:
    1. Reduce parsing
    2. Minimize table alters, drops, creates, truncates
    3. Use PL/SQL stored programs, not unnamed blocks
  • [ RAC : Wait Events ]Various wait events
    1 - 3
    AddEdit
    Related parametersEdit
       
    no data found
    AddEdit
    All module typeEdit
       
    no data found
    AddEdit
    SQL commands informationsEdit
       
    STEPSTATEMENTDESCRIPTION
    1SELECT
    A.VALUE "GC BLOCKS LOST 1",
    B.VALUE "GC BLOCKS CORRUPT 1",
    C.VALUE "GC BLOCKS LOST 2",
    D.VALUE "GC BLOCKS CORRUPT 2"
    FROM GV$SYSSTAT A, GV$SYSSTAT B, GV$SYSSTAT C, GV$SYSSTAT D
    WHERE A.INST_ID=1 AND A.NAME='gc blocks lost'
    AND B.INST_ID=1 AND B.NAME='gc blocks corrupt'
    AND C.INST_ID=2 AND C.NAME='gc blocks lost'
    AND D.INST_ID=2 AND D.NAME='gc blocks corrupt';
    global cache blocks lost and global cache blocks corrupt should always be as near to zero as possible
    1 - 1
    AddEdit
    Related tasks Edit
       
    NAMEDESCRIPTION
    Measuring block transfer timeContention in blocks can be measured by using the block transfer time.
    To determine block transfer time, examine the statistics global cache cr block receive time and global cache cr blocks received.
    1 - 1
    AddEdit
    Related scriptsEdit
       
    DetailName
    Detailracdiag
    1 - 1
    HomeEditEdit AllEditAddEdit

     
     
    HomeApplication 101Edit Page 17CreateSessionActivityDebugShow Edit Links

    Partionned indexes

    Just like partitioned tables, partitioned indexes improve manageability, availability, performance, and scalability.
    They can either be partitioned independently (global indexes) or automatically linked to a table's partitioning method (local indexes).
    In general, you should use global indexes for OLTP applications and local indexes for data warehousing or DSS applications.
    Also, whenever possible, you should try to use local indexes because they are easier to manage.

    When deciding what kind of partitioned index to use,
    you should consider the following guidelines in order:
    # 1)If the table partitioning column is a subset of the index keys, use a local index. If this is the case, you are finished. If this is not the case, continue to guideline 2.
    # 2)If the index is unique, use a global index. If this is the case, you are finished. If this is not the case, continue to guideline 3.
    # 3)If your priority is manageability, use a local index. If this is the case, you are finished. If this is not the case, continue to guideline 4.
    # 4)If the application is an OLTP one and users need quick response times, use a global index. If the application is a DSS one and users are more interested in throughput, use a local index


    jeudi 5 février 2015

    REAL APPLICATION CLUSTER : Installation


    • Configuring Network Components


     Grid Infrastructure requires a number of network addresses to work correctly: 
    #A public network address for each host
    # A private network for each host
    # A virtual (not yet assigned) IP address per host
    # One to three unassigned IP addresses for the Single Client Access Name feature.
    # If Grid Plug and Play is used, another non-used virtual address for the Grid Naming Service

    • Configuring Hostnames and Name Resolution
    Check the file /etc/sysconfig/network -
    check the file /etc/resolv.conf -
    vi /etc/nsswitch.conf
              #hosts: db files nisplus nis dns
              hosts: files dns

    vi /etc/hosts 
    127.0.0.1 localhost.localdomain localhost
    ::1 localhost6.localdomain6 localhost6
    # CONFIGURATION RESEAU RAC
    # bond0 : Adresses Public
    10.178.140.17 frprod01 frprocar01.dc1.fr.myn.dns
    10.178.140.18 frprod02 frprocar02.dc1.fr.myn.dns
    # bond0 : Adresses Virtuelles
    10.178.140.20 frprod01-virt frprod01-virt.myn.dns frprod01-virt.dc1.fr.coface.dns
    10.178.140.21 frprod02-virt frprod02-virt.myn.dns frprod02-virt.dc1.fr.coface.dns
    # bond1 : Adresses Privees - reseau interconnect
    10.178.13.17 frprod01-interco
    10.178.13.18 frprod02-interco
    # Addresse SCAN : vip 11g
    10.178.140.19 frprod-vippub frprod-vippub.myn.dns frprod-vippub.dc1.fr.myn.dns
    # FIN CONF RAC

    /sbin/ifconfig From the output, identify the interface name (such as eth0) and
    IP address for each network adapter you specify as a public network interface

    • SCAN
    The Single Client Access Name (SCAN) is a new feature in Oracle 11.2. The SCAN name effectively provides a network alias for the cluster.
    It replaces individual node names in the connection string and allows clients to connect to the cluster regardless of which nodes it is running on.
    The SCAN address is accessed via a SCAN VIP that typically runs on three nodes in the cluster.
    If less than three nodes are available, then multiple SCAN VIPs may be running on the same node.
    A dedicated SCAN listener process runs on each node that has a SCAN VIP.
    The SCAN listener is responsible for load balancing across the cluster, and the connection is forwarded to a local listener process running on one of the nodes currently in the cluster

    The benefit is that the client¿s connect information does not need to change if you add or remove nodes in the cluster.

    There are 2 options for defining the SCAN:
    # Define the SCAN in your corporate DNS (Domain Name Service)
    # Use the Grid Naming Service (GNS)




    During cluster configuration, several resources are created in the cluster for SCAN.
    For each of the 3 IP addresses that the SCAN resolves to, a SCAN VIP resource is created and a SCAN Listener is created
    Each pair of resources (SCAN VIP + Listener) will be started on a different server in the cluster, assuming the cluster consists of three or more nodes.

    In case, a 2-node-cluster is used (for which 3 IPs are still recommended for simplification reasons),
    one server in the cluster will host two sets of SCAN resources under normal operations


    • ASM DISKGROUP for ocr/voting
      • Type normal ==> 3 disks
      • Type high ==> 5 disks