Thursday, July 31, 2014

Duplicate Rows Selections

Duplicate Rows Selections


SQL> create table taj ( col1 char(2),col2 char(2), col3 char(2));

Table created.


SQL> select * from taj;

CO CO CO
-- -- --
a a a
a a b
a b c
b b b
b b c
c c c
c c d
c d e

8 rows selected.

SQL> select * from taj t1
2 where t1.rowid >
3 ( select min(t2.rowid) from taj t2
4 where t1.col1 = t2.col2);

CO CO CO
-- -- --
a a b
a b c
b b b
b b c
c c d
c d e

6 rows selected.

Delete Duplicate Rows

Delete Duplicate Rows


SQL> select * from taj;

CO CO CO
-- -- --
a a a
a a b
a b c
b b b
b b c
c c c
c c d
c d e
b c d
b c d

10 rows selected.

SQL> ed
Wrote file afiedt.buf

1 delete from taj t1
2 where t1.rowid >
3 ( select min(t2.rowid) from taj t2
4* where t1.col1 = t2.col1 and t1.col2 = t2.col2 and t1.col3
= t2.col3)
SQL> /

1 row deleted.

SQL> select * from taj;

CO CO CO
-- -- --
a a a
a a b
a b c
b b b
b b c
c c c
c c d
c d e
b c d

9 rows selected.

Retrieve only the Nth row from a table

Retrieve only the Nth row from a table


SQL> select * from taj order by sal desc;

NAME SAL
-------------------- ----------
CEO 10000
MANAGER 8000
IT MANAGER 7000
PROGRAMMER 5000
SALESMAN 3000
SALESMAN 3000
OFFICEBOY 1000
OFFICEBOY 1000

8 rows selected.

Note: Select 6th hightest salary

SQL> select * from (
2 select name,sal,rownum rn from taj where rownum < 7)
3 where rn = 6;

NAME SAL RN
-------------------- ---------- ----------
OFFICEBOY 1000 6

BFILE datatype

BFILE datatype


BFILE Datatype
The BFILE datatype stores unstructured binary data in operating-system files outside the database. A BFILE column or attribute stores a file locator that points to an external file containing the data. BFILEs can store up to 8 terabytes of data.

BFILEs are read only; you cannot modify them. They support only random (not sequential) reads, and they do not participate in transactions. The underlying operating system must maintain the file integrity, security, and durability for BFILEs. The database administrator must ensure that the file exists and that Oracle processes have operating-system read permissions on the file.




SQL> --Create logical directory


SQL> create directory DATA as 'c:\lob';

Directory created.


SQL> --Grant READ,WRITE privs to PUBLIC

SQL> grant read,write on directory data to public;

Grant succeeded.


SQL> --Create Table to store pointer for large binary objects


SQL> create table bfileobj ( id number, type varchar2(20), image bfile);

Table created.


SQL> --Insert locator/populate table


SQL> insert into bfileobj values (&n,'&a',bfilename('DATA','&A'));
Enter value for n: 1
Enter value for a: winRAR
Enter value for a: 1.rar
old 1: insert into bfileobj values (&n,'&a',bfilename('DATA','&A'))
new 1: insert into bfileobj values (1,'winRAR',bfilename('DATA','1.rar'))

1 row created.

SQL> commit;

Commit complete.


SQL> --Select Table ( First Create function)


SQL> create or replace function get_filename( p_bfile in bfile ) return
2 varchar2
3 as
4 l_dir varchar2(4000);
5 l_fname varchar2(4000);
6 begin
7 dbms_lob.FILEGETNAME( p_bfile, l_dir, l_fname );
8 return l_dir ':' l_fname;
9 end;
10 /

Function created.

SQL> column fname format a30
SQL> select id,type, get_filename( image ) fname from bfileobj;

ID TYPE FNAME
---------- -------------------- ------------------------------
1 winRAR DATA:1.rar
2 Windows Media DATA:2.wmv
3 PowerPoint DATA:3.pps
4 Adobe Acrobat DATA:4.pdf
5 jpeg image DATA:5.jpg




SQL> -- Check BFILE is exists or not on physical location



SQL> set serveroutput on
SQL> ed
Wrote file afiedt.buf

1 create or replace procedure fileexists ( nn number) is
2 file_loc bfile;
3 begin
4 dbms_output.put_line ( '----------bfile fileexists example --------');
5 select image into file_loc from bfileobj
6 where id = nn;
7 if (dbms_lob.fileexists(file_loc) != 0)
8 then
9 dbms_output.put_line('Processing given that the bfile exists');
10 else
11 dbms_output.put_line('Processing given that the bfile does not exists');
12 end if;
13 exception
14 when others then
15 dbms_output.put_line('operation failed');
16* end;
SQL> /

Procedure created.

SQL> exec fileexists(1);
----------bfile fileexists example --------
Processing given that the bfile exists

PL/SQL procedure successfully completed.

SQL> exec fileexists(5);
----------bfile fileexists example --------
Processing given that the bfile does not exists

PL/SQL procedure successfully completed.




SQL> --Display data from bfile

SQL> insert into bfileobj values (7,'WORD DOC',bfilename('DATA','7.DOC'));

1 row created.

SQL> commit;

Commit complete.


SQL> ed
Wrote file afiedt.buf

1 CREATE OR REPLACE PROCEDURE displayBFILE ( nn varchar2)IS
2 file_loc BFILE := BFILENAME('DATA', nn);
3 Buffer RAW(1024);
4 Amount BINARY_INTEGER := 200;
5 Position INTEGER := 1;
6 BEGIN
7 DBMS_OUTPUT.PUT_LINE('------------ BFILE DISPLAY EXAMPLE ------------');

8 /* Opening the BFILE: */
9 DBMS_LOB.OPEN (file_loc, DBMS_LOB.LOB_READONLY);
10 LOOP
11 DBMS_LOB.READ (file_loc, Amount, Position, Buffer);
12 /* Display the buffer contents: */
13 DBMS_OUTPUT.PUT_LINE(substr(utl_raw.cast_to_varchar2(Buffer), 1, 250)
);
14 Position := Position + Amount;
15 END LOOP;
16 /* Closing the BFILE: */
17 DBMS_LOB.CLOSE (file_loc);
18 EXCEPTION
19 WHEN NO_DATA_FOUND THEN
20 DBMS_OUTPUT.PUT_LINE('End of data');
21* END;
SQL> /

Procedure created.

SQL> exec displaybfile('6.log');

PL/SQL procedure successfully completed.

SQL> set serveroutput on size 1000000
SQL> exec displaybfile('6.log');

------------ BFILE DISPLAY EXAMPLE ------------

Starting up ORACLE RDBMS Version: 10.1.0.5.0.
System parameters with
non-default values:
processes = 150
sga_target
= 285212672
control_files = C:\O
RACLE\PRODUCT\10.1.0\ORADATA\ORCL\CONTROL01.CTL,
C:\ORACLE\PRODUCT\10.1.0\ORADATA\ORCL\CONTROL02.CTL,
C:\ORACLE\PRODUCT\10.1.0\ORADATA\ORCL\CONTROL03.CTL
db_block_size
= 8192
compatib
le = 10.1.0.5.0
db_file_multiblock_read_count= 16

db_recovery_file_dest = C:\oracle\product\10.1.0\flash_recovery_area

db_recovery_file_dest_size= 2147483648
undo_manageme
nt = AUTO
undo_tablespace = UNDOTBS1

remote_login_passwordfile= EXCLUSIVE
db_domain =

dispatchers = (PROTOCOL=TCP) (SERVICE=orclXDB)
job_q
ueue_processes = 10
background_dump_dest =
C:\ORACLE\PRODUCT\10.1.0\ADMIN\ORCL\BDUMP
user_dump_dest =
C:\ORACLE\PRODUCT\10.1.0\ADMIN\ORCL\UDUMP
core_dump_dest = C
:\ORACLE\PRODUCT\10.1.0\ADMIN\ORCL\CDUMP
db_name = orcl

open_cursors = 300
pga_aggregate_target = 94371840

End of data


PL/SQL procedure successfully completed.


SQL> --Getting the length of a bfile


SQL> ed
Wrote file afiedt.buf

1 CREATE OR REPLACE PROCEDURE getLengthBFILE ( nn varchar2) IS
2 file_loc BFILE := BFILENAME('DATA',nn);
3 Length INTEGER;
4 BEGIN
5 DBMS_OUTPUT.PUT_LINE('------------ BFILE LENGTH EXAMPLE ------------');
6 /* Open the BFILE: */
7 DBMS_LOB.OPEN(file_loc, DBMS_LOB.LOB_READONLY);
8 /* Get the length of the LOB: */
9 Length := DBMS_LOB.GETLENGTH(file_loc);
10 IF Length IS NULL THEN
11 DBMS_OUTPUT.PUT_LINE('BFILE is null.');
12 ELSE
13 DBMS_OUTPUT.PUT_LINE('The length is ' length);
14 END IF;
15 /* Close the BFILE: */
16 DBMS_LOB.CLOSE(file_loc);
17* END;
SQL> /

Procedure created.

SQL> exec getlengthbfile('6.log');
------------ BFILE LENGTH EXAMPLE ------------
The length is 1150

PL/SQL procedure successfully completed.

SQL> exec getlengthbfile('1.rar');
------------ BFILE LENGTH EXAMPLE ------------
The length is 6953658

PL/SQL procedure successfully completed.


Note: you can display only text file.

-------------------------------------------------------------------------
BLOB datatype
In above we see how to store pdf,txt file through BFILE datatype now in this section we use BLOB datatype to store those files.
1. Create directory for store pdf,txt files which we want to store in database
create or replace directory MY_FILES as 'e:\';
2. Create table in database which store target files
create table demo (id number primary key, theBlob BLOB);
3. Write Small plsql for upload pdf,txt etc files from OS to Database
declare
l_blob blob;
l_bfile bfile;
begin
insert into demo values(1,empty_blob() )
returning theBlob into l_blob;
l_bfile := bfilename ('MY_FILES','test1.pdf');
dbms_lob.fileopen(l_bfile);
dbms_lob.loadfromfile(l_blob,l_bfile,
dbms_lob.getlength(l_bfile));
dbms_lob.fileclose(l_bfile);
end;
4. Check file is upload successfully?
SQL> column theblob format a30
SQL> select * from demo;
ID THEBLOB

---------- ------------------------------
1 255044462D312E330D332030206F62 6A0D3C3C2F54797065202F50616765 0D2F506172656E742031203020520D 2F5265736F75726365732032203020 520D2F436F6E74656E747320342030
declare
l_blob blob;
l_bfile bfile;
begin
insert into demo values(2,empty_blob() )
returning theBlob into l_blob;
l_bfile := bfilename ('MY_FILES','11gDBA.pdf');
dbms_lob.fileopen(l_bfile);
dbms_lob.loadfromfile(l_blob,l_bfile,
dbms_lob.getlength(l_bfile));
dbms_lob.fileclose(l_bfile);
end;

Table Fragmentation

Table Fragmentation


What is Table fragmentation


When rows are not stored contiguously, or if rows are split onto more than one page, performance decreases because these rows require additional page accesses. Table fragmentation is distinct from file fragmentation.

When lots of DML operation apply on tables then tables is fragmented.
because DML is not release free space from table below HWM.

Hint: HWM is indicator for USED BLOCKS in database. Blocks below the high water mark (used blocks) have at least once contained data. This data might have been deleted.
Since Oracle knows that blocks beyond the high water mark don't have data, it only reads blocks up to the high water mark in a full table scan.

DDL statement always reset HWM.


How to find table fragmentation



SQL> select count(*) from big1;

1000000 rows selected.

SQL> delete from big1 where rownum <= 300000;

300000 rows deleted.

SQL> commit;

Commit complete.

SQL> update big1 set object_id = 0 where rownum <=350000;

342226 rows updated.

SQL> commit;

Commit complete.

SQL> exec dbms_stats.gather_table_stats('SCOTT','BIG1');

PL/SQL procedure successfully completed.


Table Size ( with fragmented)




SQL> select table_name,round((blocks*8),2)||'kb' "size"
2 from user_tables
3 where table_name = 'BIG1';

TABLE_NAME size
------------------------------ ------------------------------------------
BIG1 72952kb


Actual data in table



SQL> select table_name,round((num_rows*avg_row_len/1024),2)||'kb' "size"
2 from user_tables
3 where table_name = 'BIG1';

TABLE_NAME size
------------------------------ ------------------------------------------
BIG1 30604.2kb

Note= 72952 – 30604 = 42348 Kb is wasted space in table

The difference between two values is 60% and Pctfree 10% (default) so table is 50% extra space which is wasted because there is no data.



How to reset HWM / remove fragemenation


For that we need to reorganize fragmented table

We have four options to reorganize fragmented tables

1. alter table … move + rebuild indexes
2. export / truncate / import
3. create table as select ( CTAS)
4. dbms_redefinition



Option: 1 “alter table … move + rebuild indexes”



SQL> alter table BIG1 move;

Table altered.

SQL> select status,index_name from user_indexes
2 where table_name = 'BIG1';

STATUS INDEX_NAME
-------- ------------------------------
UNUSABLE BIGIDX

SQL> alter index bigidx rebuild;

Index altered.

SQL> select status,index_name from user_indexes
2 where table_name = 'BIG1';

STATUS INDEX_NAME
-------- ------------------------------
VALID BIGIDX


SQL> exec dbms_stats.gather_table_stats('SCOTT','BIG1');

PL/SQL procedure successfully completed.

SQL> select table_name,round((blocks*8),2)||'kb' "size"
2 from user_tables
3 where table_name = 'BIG1';

TABLE_NAME size
------------------------------ ------------------------------------------
BIG1 38224kb

SQL> select table_name,round((num_rows*avg_row_len/1024),2)||'kb' "size"
2 from user_tables
3 where table_name = 'BIG1';

TABLE_NAME size
------------------------------ ------------------------------------------
BIG1 30727.37kb



Option: 2 “Create table as select”




SQL> create table big2 as select * from big1;

Table created.

SQL> drop table big1 purge;

Table dropped.

SQL> rename big2 to big1;

Table renamed.

SQL> exec dbms_stats.gather_table_stats('SCOTT','BIG1');

PL/SQL procedure successfully completed.

SQL> select table_name,round((blocks*8),2)||'kb' "size"
2 from user_tables
3 where table_name = 'BIG1';

TABLE_NAME size
------------------------------ ------------------------------------------
BIG1 85536kb

SQL> select table_name,round((num_rows*avg_row_len/1024),2)||'kb' "size"
2 from user_tables
3 where table_name = 'BIG1';

TABLE_NAME size
------------------------------ ------------------------------------------
BIG1 68986.97kb

SQL> select status from user_indexes
2 where table_name = 'BIG1';

no rows selected

SQL> --Note we need to create all indexes.



Option: 3 “export / truncate / import”



SQL> select table_name, round((blocks*8),2)||'kb' "size"
2 from user_tables
3 where table_name = 'BIG1';

TABLE_NAME size
------------------------------ ------------------------------------------
BIG1 85536kb

SQL> select table_name, round((num_rows*avg_row_len/1024),2)||'kb' "size"
2 from user_tables
3 where table_name = 'BIG1';

TABLE_NAME size
------------------------------ ------------------------------------------
BIG1 42535.54kb

SQL> select status from user_indexes where table_name = 'BIG1';

STATUS
--------
VALID

SQL> exit
Disconnected from Oracle Database 10g Enterprise Edition Release 10.1.0.5.0 - Pr
oduction
With the Partitioning, OLAP and Data Mining options

C:\>exp scott/tiger@Orcl file=c:\big1.dmp tables=big1

Export: Release 10.1.0.5.0 - Production on Sat Jul 28 16:30:44 2007

Copyright (c) 1982, 2005, Oracle. All rights reserved.

Connected to: Oracle Database 10g Enterprise Edition Release 10.1.0.5.0 - Produc
tion
With the Partitioning, OLAP and Data Mining options
Export done in WE8MSWIN1252 character set and AL16UTF16 NCHAR character set

About to export specified tables via Conventional Path ...
. . exporting table BIG1 468904 rows exported
Export terminated successfully without warnings.

C:\>sqlplus scott/tiger@orcl

SQL*Plus: Release 10.1.0.5.0 - Production on Sat Jul 28 16:31:12 2007

Copyright (c) 1982, 2005, Oracle. All rights reserved.

Connected to:
Oracle Database 10g Enterprise Edition Release 10.1.0.5.0 - Production
With the Partitioning, OLAP and Data Mining options

SQL> truncate table big1;

Table truncated.

SQL> exit
Disconnected from Oracle Database 10g Enterprise Edition Release 10.1.0.5.0 - Pr
oduction
With the Partitioning, OLAP and Data Mining options

C:\>imp scott/tiger@Orcl file=c:\big1.dmp ignore=y

Import: Release 10.1.0.5.0 - Production on Sat Jul 28 16:31:54 2007

Copyright (c) 1982, 2005, Oracle. All rights reserved.

Connected to: Oracle Database 10g Enterprise Edition Release 10.1.0.5.0 - Produc
tion
With the Partitioning, OLAP and Data Mining options

Export file created by EXPORT:V10.01.00 via conventional path
import done in WE8MSWIN1252 character set and AL16UTF16 NCHAR character set
. importing SCOTT's objects into SCOTT
. . importing table "BIG1" 468904 rows imported
Import terminated successfully without warnings.

C:\>sqlplus scott/tiger@orcl

SQL*Plus: Release 10.1.0.5.0 - Production on Sat Jul 28 16:32:21 2007

Copyright (c) 1982, 2005, Oracle. All rights reserved.

Connected to:
Oracle Database 10g Enterprise Edition Release 10.1.0.5.0 - Production
With the Partitioning, OLAP and Data Mining options

SQL> select table_name, round((blocks*8),2)||'kb' "size"
2 from user_tables
3 where table_name = 'BIG1';

TABLE_NAME size
------------------------------ ------------------------------------------
BIG1 85536kb

SQL> select table_name, round((num_rows*avg_row_len/1024),2)||'kb' "size"
2 from user_tables
3 where table_name = 'BIG1';

TABLE_NAME size
------------------------------ ------------------------------------------
BIG1 42535.54kb

SQL> exec dbms_stats.gather_table_stats('SCOTT','BIG1');

PL/SQL procedure successfully completed.

SQL> select table_name, round((blocks*8),2)||'kb' "size"
2 from user_tables
3 where table_name = 'BIG1';

TABLE_NAME size
------------------------------ ------------------------------------------
BIG1 51840kb

SQL> select table_name, round((num_rows*avg_row_len/1024),2)||'kb' "size"
2 from user_tables
3 where table_name = 'BIG1';

TABLE_NAME size
------------------------------ ------------------------------------------
BIG1 42542.27kb

SQL> select status from user_indexes where table_name = 'BIG1';

STATUS
--------
VALID

SQL> exec dbms_redefinition.can_redef_table('SCOTT','BIG1',-
> dbms_redefinition.cons_use_pk);

PL/SQL procedure successfully completed.



Option: 4 “dbms_redefinition”





SQL> create table TABLE1 (
2 no number,
3 name varchar2(20) default 'NONE',
4 ddate date default SYSDATE);

Table created.

SQL> alter table table1 add constraint pk_no primary key(no);

Table altered.

SQL> begin
2 for x in 1..100000 loop
3 insert into table1 ( no , name, ddate)
4 values ( x , default, default);
5 end loop;
6 end;
7 /

PL/SQL procedure successfully completed.

SQL> create or replace trigger tri_table1
2 after insert on table1
3 begin
4 null;
5 end;
6 /

Trigger created.

SQL> select count(*) from table1;

COUNT(*)
----------
100000

SQL> delete table1 where rownum <= 50000;

50000 rows deleted.

SQL> commit;

Commit complete.

SQL> exec dbms_stats.gather_table_stats('SCOTT','TABLE1');

PL/SQL procedure successfully completed.

SQL> select table_name, round((blocks*8),2)||'kb' "size"
2 from user_tables
3 where table_name = 'TABLE1';

TABLE_NAME size
------------------------------ ------------------------------------------
TABLE1 2960kb

SQL> select table_name, round((num_rows*avg_row_len/1024),2)||'kb' "size"
2 from user_tables
3 where table_name = 'TABLE1';

TABLE_NAME size
------------------------------ ------------------------------------------
TABLE1 822.69kb


SQL> --Minimum Privs required "DBA" role or "SELECT" on dbms_redefinition pkg
SQL> --First check table is condidate for redefinition.
SQL>
SQL> exec sys.dbms_redefinition.can_redef_table('SCOTT',-
> 'TABLE1',-
> sys.dbms_redefinition.cons_use_pk);

PL/SQL procedure successfully completed.

SQL> --After verifying that the table can be redefined online, you manually crea
te an empty interim table (in the same schema as the table to be redefined)
SQL>
SQL> create table TABLE2 as select * from table1 WHERE 1 = 2;

Table created.

SQL> exec sys.dbms_redefinition.start_redef_table ( 'SCOTT',-
> 'TABLE1',-
> 'TABLE2');

PL/SQL procedure successfully completed.

SQL> --This procedure keeps the interim table synchronized with the original tab
le.
SQL>
SQL> exec sys.dbms_redefinition.sync_interim_table ('SCOTT',-
> 'TABLE1',-
> 'TABLE2');

PL/SQL procedure successfully completed.

SQL> --Create PRIMARY KEY on interim table(TABLE2)
SQL> alter table TABLE2
2 add constraint pk_no1 primary key (no);

Table altered.

SQL> create trigger tri_table2
2 after insert on table2
3 begin
4 null;
5 end;
6 /

Trigger created.

SQL> --Disable foreign key on original table if exists before finish this proces
s.
SQL>
SQL> exec sys.dbms_redefinition.finish_redef_table ( 'SCOTT',-
> 'TABLE1',-
> 'TABLE2');

PL/SQL procedure successfully completed.

SQL> exec dbms_stats.gather_table_stats('SCOTT','TABLE1');

PL/SQL procedure successfully completed.

SQL> select table_name, round((blocks*8),2)||'kb' "size"
2 from user_tables
3 where table_name = 'TABLE1';

TABLE_NAME size
------------------------------ ------------------------------------------
TABLE1 1376kb

SQL> select table_name, round((num_rows*avg_row_len/1024),2)||'kb' "size"
2 from user_tables
3 where table_name = 'TABLE1';

TABLE_NAME size
------------------------------ ------------------------------------------
TABLE1 841.4kb

SQL> select status,constraint_name
2 from user_constraints
3 where table_name = 'TABLE1';

STATUS CONSTRAINT_NAME
-------- ------------------------------
ENABLED PK_NO1

SQL> select status ,trigger_name
2 from user_triggers
3 where table_name = 'TABLE1';

STATUS TRIGGER_NAME
-------- ------------------------------
ENABLED TRI_TABLE2

SQL> drop table TABLE2 PURGE;

Table dropped.

How to Enable ARCHIVELOG mode

How to Enable ARCHIVELOG mode



Benefits of ARCHIVELOG mode.



1. We can able to take database backup ONLINE.
2. No Need to Down Oracle DB Server
3. We can better Manage our Backup Policy through RMAN
4. Limited DBA task
5. We can able to RECOVER our database point-in-time


How can enable ARCHIVELOG MODE in 9iR1.


When we create new database then database is created NOARCHIVELOG mode by default.

shutdown database


SQL>shutdown immediate;


Edit below parameters in pfile(init.ora) file.


1. LOG_ARCHIVE_START=TRUE


Create New SPFILE from Modified pfile(init.ora)


SQL> create SPFILE from PFILE;


Startup Database in Mount Mode


SQL> startup mount;


Change Database log mode from NOARCHIVELOG to ARCHIVELOG


SQL> alter database archivelog;


Open DB for normal use


SQL> alter database open;


check archive log status


Database log mode Archive Mode
Automatic archival Enabled
Archive destination C:\Oracle\product\RDBMS
Oldest online log sequence 0
Next log sequence to archive 1
Current log sequence 1



Default Archivelog destination is :$ORACLE_HOME/rdbms.



How can enable ARCHIVELOG MODE in 10gr1



In 10g Archivelog mode enabling is easy becuase we need to just turn DB log mode and archive log is automatic start.


shutdown database


SQL> shutdown immediate;


startup database in mount mode


SQL> startup mount;


Change DB log mode


SQL> alter database archivelog;


Open DB for normal use


SQL> alter database open;


check archivelog status


SQL> archive log list


Default Archive log destination.


1. 10g introduce new feature called FRA (flashback recovery area) so all archivelog files created in FRA.

2. If your not using FRA then all files created in $ORACLE_HOME/database


We can check database archivelog mode through below query


SQL> select log_mode from v$database;
or
SQL> archive log list

Oracle Certification

Oracle Certification


There are couple of questions come in mind when anyone think about oracle certification.

1. Is Oracle Certification is beneficial for Job Seekers?
You know what is certification…?
It is just some couple of questions which based 80% on concept and theory.
And any Job opening wants Practical DBA means Practical Experience with Theory.

So as per my advice any oracle newbie first get some practical experience then go for certification.


2. How to prepare for Oracle Certification?
Oracle Certification is examination, so some rules apply which we follow in our college examination.
1. First define which Oracle Paper you want to write.
Go through below link and choose your paper
http://education.oracle.com/pls/web_prod-plq-dad/db_pages.getpage?page_id=42&p_org_id=1080544&lang=US
2. Second check Exam Topic, Objective.
Suppose we choose “izo-007” introduction to SQL paper. So now check what is exam topic is come in “introduction to SQL paper”. Because SQL is very big topic and exam question based on some most important chapters.
Check Exam topic for “introduction to SQL” paper
http://education.oracle.com/pls/web_prod-plq-dad/db_pages.getpage?page_id=41&p_org_id=1080544&lang=US&p_exam_id=1Z0_001#3

3. What is Oracle Exam format?
Whenever anybody going to write first time oracle paper then always worry about exam format. Now don’t worry Oracle provide demo for exam format.
1. Oracle Exam is objective types
2. For demo version please go through below link (paper:iz0-007)
http://www.selftestsoftware.com/demo/default.aspx

3. What material we used for preparation?
1. Best material is “student guide” simple, easy and short.
Student Guide books comes with Oracle Course Kits.
2. Oracle Documentation
http://tahiti.oracle.com
3. Practice Questions
On internet there are so many site which provide us practice question for oracle certification.

http://www.google.ae/search?hl=en&q=practice+question+oracle&meta=

4. Looking for Dumps?
You know what is Dumps?
Dumps are real question which comes in Oracle Certification.
Some centre sold exam question like dumps for money and it is illegal please check it below link
http://www.oracle.com/global/us/education/certification/canagreemt.html
And suppose you will get dumps from anywhere …so please don’t depend on it.
It is not sure all question comes from dumps.
Because some prometric centre keeps real question in their centre and refresh periodically. But some prometric centre like oracle prometric centre use main server through internet.

Recovery Catalog

Recovery Catalog

Recovery Catalog

What is recovery catalog?
Recovery catalog is external database which use to store RMAN repository

What is RMAN repository?
RMAN repository is collection of target database which store information about RMAN backup, recovery and maintenance.

When recovery catalog is not in used then where RMAN repository stored?
When recovery catalog is not in used then RMAN repository store in CONTROLFILE.

Where we create recovery catalog?
We have two option for it.
1. We can create recovery catalog on target database through create separate tablespace for recovery catalog.
Or
2. We can create separate database for recovery catalog.

NOTE: if we have more than one database then separate database for recovery catalog is recommended instead of separate tablespace.

How recovery catalog store information about RMAN repository?
After creation of recovery catalog we have to manually register each target database to recovery catalog.

Should we need to take backup of recovery catalog?
Yes, We must take backup of recovery catalog because recovery catalog store most important information about RMAN backup, recovery and RMAN configuration if we lost recovery catalog then we can’t get those information so it is always recommended to take recovery catalog backup.

How take recovery catalog backup?
1. If recovery catalog created as separate tablespace on target database
Then just export recovery catalog tablespace and store in backup drive.

2. If recovery catalog created as separate database.
Then we can use any backup method which we use for target database for instance we can use user managed backup or we can also use RMAN method.

Is there any view for query information about recovery catalog?
Yes, actually for recovery catalog we need to create separate user. And after user and recovery catalog creation there is some view created in recovery catalog users with RC_ prefix. For instance: RC_DATABASE

How to configure recovery catalog for target database?
There are three steps for recovery catalog configuration.
1. Configure the database that will content the recovery catalog,
2. Create owner for recovery catalog
3. Create recovery catalog

SQL> conn sys@dev as sysdba
Enter password:
Connected.
SQL> create tablespace CATDB
2 datafile 'c:\oracle\catdb01.dbf' size 10m;

Tablespace created.

NOTE: Here I am create separate tablespace for recovery catalog on target database.

SQL>conn sys@dev as sysdba
Connected.

SQL> create user CATDB
2 identified by CATDB
3 default tablespace CATDB;

NOTE: Owner for recovery catalog

SQL> grant connect,resource to catdb;

Grant succeeded.

SQL> grant recovery_catalog_owner to CATDB;

Grant succeeded.

NOTE: Grant the RECOVERY_CATALOG_OWNER role to the schema owner. This role provides the user with all privileges required to maintain and query the recovery catalog.


C:\>rman

Recovery Manager: Release 10.1.0.5.0 - Production

Copyright (c) 1995, 2004, Oracle. All rights reserved.

RMAN> connect catalog catdb/catdb@dev

connected to recovery catalog database
recovery catalog is not installed

RMAN> create catalog tablespace CATDB;

recovery catalog created

NOTE: now recovery catalog is created.

SQL> conn catdb/catdb@dev
Connected.
SQL> select table_name from user_tables where rownum =1;

TABLE_NAME
------------------------------
RCVER

NOTE: after recovery catalog creation we can check all recovery catalog views is created in recovery catalog owner.


What we do after create recovery catalog?
Now just register target database to recovery catalog and store RMAN repository in recovery catalog.

C:\>rman target sys@dev catalog catdb/catdb@dev

Recovery Manager: Release 10.1.0.5.0 - Production

Copyright (c) 1995, 2004, Oracle. All rights reserved.

target database Password:
connected to target database: DEV (DBID=3718891706)
connected to recovery catalog database

RMAN> register database;

database registered in recovery catalog
starting full resync of recovery catalog
full resync complete

NOTE: “target sys@dev” is use for target database which we want to register to recovery catalog through recovery catalog owner “catdb/catdb@dev”.

redolog file status

redolog file status

Redo log file is very crucial for database recovery...
and most of time we confuse about "status" of v$log file for redo log files.


1. UNUSED
Whenever you create any redo log file ...that time v$log view showing status "UNUSED"

2. CURRENT
After Creation when redo log "lgwr" process use that file ....that time v$log view showing status "CURRENT"

3. ACTIVE
If database running in archivelog mode then after log switch "arch" process use log file and create archivelog file that time v$Log file view showing status "ACTIVE"

4. CLEARING
Whenever any log file is corrupted or missing ( which is not "active" or not "current") then we use "alter database clear logfile" statement to recreate corrupted log file ... that time v$log view showing status "CLEARING" after clearing it is showing "UNUSED"

5. INACTIVE
During archivelog file creation v$log view showing status "ACTIVE" and when archivelog file is created then it is showing "INACTIVE" for particular one redo log file at a time.


6. CLEARING_CURRENT
If any error occured during "alter database clear logfile" then v$log file view showing "CLEARING_CURRENT".

During import table created in wrong tablespace.

During import table created in wrong tablespace.

During import table is created in other than default tablespace is happen what is reason.
Suppose we have two databases.
1. Orcl
2. Hgc

Now i created one table in ORCL database in SYSTEM tablespace.

SQL> create table test_orcl ( no number) tablespace SYSTEM;
Table created.

Now in HGC database i import above created table in user default tablespace.

SQL> conn system/oracle@hgc
Connected.
SQL> alter user scott default tablespace USERS;
User altered.

NOTE:
1. I assign default tablespace "USERS" to scott user which import table.
2. Scott user having "connect" & "resource" role privileges

Question: NOW TELL me during IMPORT in which tablespace "USERS" or "SYSTEM" tbs table "test_orcl" created ?

NOW guess your answer..we will check later ...

SQL> conn scott/tiger@Orcl
Connected.
SQL> host exp scott/tiger@Orcl file=c:\test_orcl.dmp tables=test_orcl
Export: Release 10.1.0.5.0 - Production on Wed Jan 2 11:59:18 2008
Copyright (c) 1982, 2005, Oracle. All rights reserved.
. . exporting table TEST_ORCL 0 rows exported
Export terminated successfully without warnings.

NOW import table in HGC database.

SQL> conn scott/tiger@HGC
Connected.
SQL> host imp scott/tiger@HGC file=c:\test_orcl.dmp fromuser=scott touser=scott
Import: Release 10.1.0.5.0 - Production on Wed Jan 2 12:02:26 2008
Copyright (c) 1982, 2005, Oracle. All rights reserved.
. . importing table "TEST_ORCL" 0 rows imported
Import terminated successfully without warnings.

now we check in which tablespace table is created ?

SQL> select table_name,tablespace_name
2 from user_tables
3 where table_name='TEST_ORCL';
TABLE_NAME TABLESPACE_NAME
------------------------------ ------------------------------
TEST_ORCL SYSTEM

ofh table is created in SYSTEM tablespace instead of user default tablespace means "USERS' why it so ?

Again import table but now make one changes ...
1. Revoke "resource" role and grant explicit quota on tablespace to user.

SQL> conn system/oracle@hgc
Connected.
SQL> revoke resource from scott;
Revoke succeeded.
SQL> alter user scott quota 100m on users;
User altered.

NOW import again...

SQL> conn scott/tiger@hgc
Connected.
SQL> drop table test_orcl purge;
Table dropped.
SQL> host imp scott/tiger@HGC file=c:\test_orcl.dmp fromuser=scott touser=scott
Import: Release 10.1.0.5.0 - Production on Wed Jan 2 12:06:51 2008
Copyright (c) 1982, 2005, Oracle. All rights reserved.
. . importing table "TEST_ORCL" 0 rows imported
Import terminated successfully without warnings.

SQL> select table_name,tablespace_name
2 from user_tables
3 where table_name='TEST_ORCL';
TABLE_NAME TABLESPACE_NAME
------------------------------ ------------------------------
TEST_ORCL USERS

But what is actual reason...?
1. Resource role having "unlimited tablespace" privileges means database user have quota on all tablespace in database if user having "unlimited tablespace" privileges.

2. table is created in "system" tablespace becuase tables is actually created in SYSTEM tablespace on "ORCL" database during first time creation.

Export file created by EXPORT:V10.01.00 via conventional path
import done in WE8MSWIN1252 character set and AL16UTF16 NCHAR character set
"CREATE TABLE "TEST_ORCL" ("NO" NUMBER) PCTFREE 10 PCTUSED 40 INITRANS 1 MA"
"XTRANS 255 STORAGE(INITIAL 65536 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL "
"DEFAULT) TABLESPACE "SYSTEM" LOGGING NOCOMPRESS"
. . skipping table "TEST_ORCL"

Database & Patch set Upgrade

Database & Patch set Upgrade

Database & patch upgrade is one of the most important work for Database Administrator.

Before going further first understand what are “upgrade” & “migration” & “patch set” & “CPU patch” word.

Upgrade:
Usually use for oracle database upgrade.
Eg: upgrade from oracle 9i to oracle 10g.

Migration:
Usually use for migrate non oracle database to oracle database.
Eg: From SQL server database to oracle database

Patch set:
There are two types of patch upgrade
1. Patch set update
From one oracle version to another oracle version
Eg: oracle 9i to oracle 10g

NOTE: Patch set based on oracle base release
Eg: oracle 10gr1 (10.1.0.2.0) and for 10gr2 (10.2.0.1.0)

2. CPU patch (Critical Patch update)
Patches is apply for fix database bug suppose after apply latest patch set for current release if there is any bug occur then oracle release cpu patches in regular interval to fix those bug.
Eg: oracle 10gr1 base rel: 10.1.0.2.0, Latest Patch set: 10.1.0.5.0, Latest CPU patch: Jan-08 CPU patch for 10.1.0.5.0

NOTE: CPU patch based on latest patch set.
For more info: http://www.oracle.com/technology/deploy/security/alerts.htm
How to upgrade database to oracle 10gr2?

Determine the Upgrade Path to the New Oracle Database 10g Release
http://download.oracle.com/docs/cd/B19306_01/server.102/b14238/preup.htm#i1007814

Before upgrading process we need to define upgrade method
There are four methods to upgrade our database
1. DBUA (Database upgrade assistant)
2. Manually upgrade
3. Export/Import
4. Data copying
http://download.oracle.com/docs/cd/B19306_01/server.102/b14238/preup.htm#i694345
Patch set upgrade example:
1. From oracle 9ir2 (9.2.0.2 to 9.2.0.8) on Windows http://babudba.blogspot.com/2007/11/patch-installation.html
2. From oracle 9ir2 (9.2.0.1 to 9.2.0.7) on Solaris
http://sabdarsyed.blogspot.com/2007/02/upgrade-oracle-database-9201-to-9207-on.html
3. From oracle 10gr1 (10.1.0.2 to 10.1.0.5) on windows
http://dbataj.blogspot.com/2007/06/upgrading-database-from-101020-to.html

4. From oracle 10gr2 (10.2.0.2 to 10.2.0.3) on windows
http://babudba.blogspot.com/2007/11/upgrade-oracle-10202-to-10203.html

5. From oracle 10gr2 (10.2.0.1 to 10.2.0.3) on Linux
http://sabdarsyed.blogspot.com/2007/03/upgrading-oracle-database-server-10g.html

Critical Patch update example:
On Oracle 10gr1 (10.1.0.5.0) patch set
http://dbataj.blogspot.com/2007/06/critical-patch-update.html

Connecting with sys user with Or without pwd

Connecting with sys user with Or without pwd

As we know "SYS" & "SYSDBA" is superuser privilege in oracle database.

There is two method to connect SYSDBA user.

1. WITH PASSWORD

2. WITHOUT PASSWORD



For example:

SQL> CONN / AS SYSDBA ---without password



For without password need follow thing to be done.

1. Oracle Database User must have SYSDBA privileges.

2. Operating System User must add in DBA group.

On windows: ORA_DBA group

On linux: DBA group

3. In sqlnet.ora file must content "NTS" in below line

SQLNET.AUTHENTICATION_SERVICES=(NTS)



For example:
SQL> CONN SYS/PWD AS SYSDBA --- with password or prevent to connect without password.


1. Remove OS user from DBA group

2. Edit sqlnet.ora file and change "NONE" to "NTS" in below line

SQLNET.AUTHENTICATION_SERVICES=(NONE)

Default Value is NTS when sqlnet.ora file created.

CURSOR_SHARING parameter

CURSOR_SHARING parameter


CURSOR_SHARING



What is cursor_sharing parameters ?

CURSOR_SHARING determines what kind of SQL statements can share the same cursors.

What is possible values for this parameter ?

1. EXACT (default)
Only allows statements with identical text to share the same cursor.

2. FORCE
Forces statements that may differ in some literals, but are otherwise identical, to share a cursor, unless the literals affect the meaning of the statement.

3. SIMILAR
Causes statements that may differ in some literals, but are otherwise identical, to share a cursor, unless the literals affect either the meaning of the statement or the degree to which the plan is optimized.

When we have to / should use this parameter ?

Whenever you have lots of some sql statement to execute but differ in liternal and your application is not design to use BIND VARIABLE that time you can use CURSOR_SHARING=FORCE to share cursor for every sql statement which differ only in literal.

Consider setting CURSOR_SHARING to SIMILAR or FORCE if both of the following questions are true:

Are there statements in the shared pool that differ only in the values of literals?
Is the response time low due to a very high number of library cache misses?


Tom Kyte said "cursor_sharing=force" is not permanent solution for performance it is use for temporary basis during developer fix there bug in application... instead of this parameter use BIND VARIABLE.


Performance improvement when we set cursor_sharing=force ?

When your application use lots of similar sql statement but differ in literal then yes performance will improve when you set cursor_sharing=force.

Side Effects on database when set cursor_sharing=FORCE/SIMILAR

Forcing cursor sharing among similar (but not identical) statements can have unexpected results in some DSS applications, or applications that use stored outlines.

Oracle does not recommend setting CURSOR_SHARING to FORCE in a DSS environment or if you are using complex queries. Also, star transformation is not supported with CURSOR_SHARING set to either SIMILAR or FORCE. For more information, see the "OPTIMIZER_FEATURES_ENABLE Parameter".

BUG with cursor_sharing=FORCE/SIMILAR

In Oracle Version 8i there is bug when set cursor_sharing=force/similar.

We need to down our database to set this parameter

No, we can set this parameter when our database is open.

alter system set CURSOR_SHARING=force SCOPE=both;

Autotrace in SQLPLUS

Autotrace in SQLPLUS


What is AUTOTRACE


In SQL*Plus you can automatically get a report on the execution path used by the SQL optimizer and the statement execution statistics. The report is generated after a successful SQL DML statement, such as SELECT, DELETE, UPDATE or INSERT. It is useful for monitoring and tuning the performance of these DML statements.


How to configure AUTOTRACE in SQLPLUS for database user


1. Change directory path and connect with SYSTEM user to database
C:\>cd c:\oracle\product\10.1.0\db_1\rdbms\admin

C:\Oracle\product\10.1.0\Db_1\RDBMS\ADMIN>sqlplus /nolog

SQL*Plus: Release 9.0.1.0.1 - Production on Tue Oct 16 17:08:20 2007

(c) Copyright 2001 Oracle Corporation. All rights reserved.

SQL> conn system/manager
Connected.

2. run UTLXPLAN.SQL script for plan table.
SQL> @utlxplan.sql

3. Create public synonym for plan table or grant all privilege.
SQL> create public synonym plan_table for plan_table;

SQL> grant all on plan_table to public;

Grant succeeded.

4. Exit and again change directory path and connect with SYS user to database
SQL> exit
Disconnected from Oracle9i Enterprise Edition Release 9.0.1.1.1 - Production
With the Partitioning option
JServer Release 9.0.1.1.1 - Production

C:\Oracle\product\10.1.0\Db_1\RDBMS\ADMIN>cd\

C:\>cd c:\oracle\product\10.1.0\db_1\sqlplus\admin

C:\Oracle\product\10.1.0\Db_1\sqlplus\admin>sqlplus /nolog

SQL*Plus: Release 9.0.1.0.1 - Production on Tue Oct 16 17:12:07 2007

(c) Copyright 2001 Oracle Corporation. All rights reserved.

SQL> conn sys as sysdba
Enter password:
Connected.

5. run plustrce script and script must be run with SYS user.
SQL> @plustrce
SQL>
SQL> drop role plustrace;

Role dropped.

SQL> create role plustrace;

Role created.

SQL>
SQL> grant select on v_$sesstat to plustrace;

Grant succeeded.

SQL> grant select on v_$statname to plustrace;

Grant succeeded.

SQL> grant select on v_$mystat to plustrace;

Grant succeeded.

SQL> grant plustrace to dba with admin option;

Grant succeeded.

SQL> set echo off

6. Grant plustrace role to public.
SQL> grant plustrace to public;

Grant succeeded.


Configuring the Autotrace Report


SET AUTOTRACE OFF - No AUTOTRACE report is generated. This is the
default.
SET AUTOTRACE ON EXPLAIN - The AUTOTRACE report shows only the optimizer
execution path.
SET AUTOTRACE ON STATISTICS - The AUTOTRACE report shows only the SQL
statement execution statistics.
SET AUTOTRACE ON - The AUTOTRACE report includes both the
optimizer execution path and the SQL
statement execution statistics.
SET AUTOTRACE TRACEONLY - Like SET AUTOTRACE ON, but suppresses the
printing of the user's query output, if any.


Database Statistics for SQL Statements


recursive calls
Number of recursive calls generated at both the user and system level. Oracle maintains tables used for internal processing. When Oracle needs to make a change to these tables, it internally generates an internal SQL statement, which in turn generates a recursive call.

db block gets
Number of times a CURRENT block was requested.

consistent gets
Number of times a consistent read was requested for a block.

physical reads
Total number of data blocks read from disk. This number equals the value of "physical reads direct" plus all reads into buffer cache.

redo size
Total amount of redo generated in bytes.

bytes sent via SQL*Net to client
Total number of bytes sent to the client from the foreground processes.

bytes received via SQL*Net from client
Total number of bytes received from the client over Oracle Net.

SQL*Net roundtrips to/from client
Total number of Oracle Net messages sent to and received from the client.

sorts (memory)
Number of sort operations that were performed completely in memory and did not require any disk writes.

sorts (disk)
Number of sort operations that required at least one disk write.

rows processed
Number of rows processed during the operation.


EXAMPLE


SQL> set autotrace on
SQL> select * from dept;

DEPTNO DNAME LOC
---------- -------------- -------------
10 ACCOUNTING NEW YORK
20 RESEARCH DALLAS
30 SALES CHICAGO
40 OPERATIONS BOSTON


Execution Plan
----------------------------------------------------------
0 SELECT STATEMENT Optimizer=CHOOSE
1 0 TABLE ACCESS (FULL) OF 'DEPT'




Statistics
----------------------------------------------------------
0 recursive calls
2 db block gets
2 consistent gets
0 physical reads
0 redo size
702 bytes sent via SQL*Net to client
503 bytes received via SQL*Net from client
2 SQL*Net roundtrips to/from client
0 sorts (memory)
0 sorts (disk)
4 rows processed

SQL> set autotrace traceonly
SQL> select * from dept;


Execution Plan
----------------------------------------------------------
0 SELECT STATEMENT Optimizer=CHOOSE
1 0 TABLE ACCESS (FULL) OF 'DEPT'




Statistics
----------------------------------------------------------
0 recursive calls
2 db block gets
2 consistent gets
0 physical reads
0 redo size
702 bytes sent via SQL*Net to client
503 bytes received via SQL*Net from client
2 SQL*Net roundtrips to/from client
0 sorts (memory)
0 sorts (disk)
4 rows processed

SQL> set autotrace trace explain
SQL> select * from dept;

Execution Plan
----------------------------------------------------------
0 SELECT STATEMENT Optimizer=CHOOSE
1 0 TABLE ACCESS (FULL) OF 'DEPT'



SQL> set autotrace trace statistics
SQL> select * from dept;


Statistics
----------------------------------------------------------
0 recursive calls
2 db block gets
2 consistent gets
0 physical reads
0 redo size
702 bytes sent via SQL*Net to client
503 bytes received via SQL*Net from client
2 SQL*Net roundtrips to/from client
0 sorts (memory)
0 sorts (disk)
4 rows processed

10046 trace event

10046 trace event


What is 10046 trace events


whenever we want to tune any sql statement and want to know about waits and bind variable ...then we can use 10046 trace events.


How to use 10046 trace events


First define trace identifier for generated trace files so we can easily identify our trace files in UDUMP folder.

SQL> alter session set tracefile_identifier='MYSESSION';

Enable 10046 tracing.

SQL> alter session set events '10046 trace name context forever, level 8';

Now execute SQL Statements...

select e.empno, e.ename, d.dname, d.deptno
from emp e , dept d
where e.deptno = d.deptno;

Disable 10046 tracing.

SQL> alter session set events '10046 trace name context off';

Check UDUMP directory for generated trace file.
file name like "XXXX_MYSESSION.TRC"

C:\Oracle\admin\ora9i\udump>dir *MYSESSION.trc

Now generated trace file is RAW trace file and very hard to read and understand ...so through TKPROF utility create readable output file for generated trace file.

C:\Oracle\admin\ora9i\udump>tkprof ORA01904_MYSESSION.TRC c:\output.log

TKPROF: Release 10.1.0.5.0 - Production on Wed Oct 17 19:01:44 2007

Copyright (c) 1982, 2005, Oracle. All rights reserved.

Open c:\output.log file and analyze it. below is content of output.log file

select e.empno, e.ename, d.dname, d.deptno
from emp e , dept d
where e.deptno = d.deptno


call count cpu elapsed disk query current rows
------- ------ -------- ---------- ---------- ---------- ---------- ----------
Parse 1 0.29 0.31 2 0 2 0
Execute 1 0.00 0.00 0 0 0 0
Fetch 2 0.00 0.00 3 30 2 14
------- ------ -------- ---------- ---------- ---------- ---------- ----------
total 4 0.29 0.31 5 30 4 14


Misses in library cache during parse: 1
Optimizer mode: CHOOSE
Parsing user id: 66


---Below is execution plan

Rows Row Source Operation
------- ---------------------------------------------------
14 NESTED LOOPS
14 TABLE ACCESS FULL EMP
14 TABLE ACCESS BY INDEX ROWID DEPT
14 INDEX UNIQUE SCAN (object id 32119)


---Waits time information.


Elapsed times include waiting on following events:
Event waited on Times Max. Wait Total Waited
---------------------------------------- Waited ---------- ------------
SQL*Net message to client 2 0.00 0.00
db file sequential read 3 0.00 0.00
SQL*Net message from client 2 56.49 56.51



10046 Trace Level


Level 1
Basic trace level. Like the standard SQL_TRACE trace file. Provides statistics for parse, execute, fetch, commit and rollback database calls.

Level 4
Displays bind variables

Level 8
Displays wait statistics

Level 12
Displays wait statistics and bind variables

EXECUTION PLAN

EXECUTION PLAN


What is execution plan ?


To run a DML statement, Oracle might need to perform many steps. Each of these steps either retrieves rows of data physically from the database or prepares them in some way for the user issuing the statement. The combination of the steps Oracle uses to run a statement is called an execution plan. An execution plan includes an access method for each table that the statement accesses and an ordering of the tables (the join order). The steps of the execution plan are not performed in the order in which they are numbered.


How to generate execution plan ?


There are three ways to view execution plan

1. Through AUTOTRACE

SQL> set autotrace trace exp
SQL> select * from emp;

Execution Plan
----------------------------------------------------------
0 SELECT STATEMENT Optimizer=ALL_ROWS (Cost=3 Card=14 Bytes=51
8)

1 0 TABLE ACCESS (FULL) OF 'EMP' (TABLE) (Cost=3 Card=14 Bytes
=518)


2. Through DBMS_XPLAN package

SQL> explain plan for select * from emp;

Explained.

SQL> select * from table(dbms_xplan.display);

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------

Plan hash value: 3956160932

--------------------------------------------------------------------------
| Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time |
--------------------------------------------------------------------------
| 0 | SELECT STATEMENT | | 14 | 518 | 3 (0)| 00:00:01 |
| 1 | TABLE ACCESS FULL| EMP | 14 | 518 | 3 (0)| 00:00:01 |
--------------------------------------------------------------------------

8 rows selected.

3. Through trace file (trace event 10046,sql_trace=true) and format trace file through TKPROF utility.

AWR Report/Snapshot

AWR Report/Snapshot






Prior Oracle 10g we use STATSPACK report to analyze instance health for performance. but now in 10g we use AWR(automatic workload repository) report to analyze Oracle Database health.


How to generate AWR report ?


It is very easy to generate AWR report in Oracle 10g.

We have to run just one sql script to generate AWR report.


There is TWO sql script to create AWR report.
1. awrrpt.sql
If we have only One Oracle Database then run awrrpt.sql sql script.

2. awrrpti.sql
If we have more than One Oracle Instance (Like RAC) then run awrrpti.sql script so that we can particular instance for awr report creation.



Location of AWR report sql script


$ORACLE_HOME/rdbms/admin


Example Of AWR report


AWR report Example


What is default interval period between two awr report ?


AWR report generate automatically for database every 1 hr interval period.
But Recommanded is 15 Minutes is enough in two snapshot for better performance bottleneck.


How to Manage & change interval period for awr report ?


There is two to modify or changes in AWR report.
1. Manually ---> through DBMS_WORKLOAD_REPOSITORY plsql package

2. GUI ----> OEM (Oracle Enterprise Manager)
Login to OEM ---> Administration Tag ----> Workload Section --->AUTOMATIC WORKLOAD REPOSITORY


How to Interpreting with AWR report ?


Below is Main Section in AWR report for concern.

Load Profile

In this section we know about Logical Read, Physical Read, Soft Parse, Hard Parse

Instance Efficiency Percentages

Many Oracle Guru's is not recommended to trust on Instance percentage ratio. becuase suppose if your instance percentage show BUFFER CAHCE HIT RATIO is 100% it is not sign for your database is very fast and running smootly.

Top 5 Timed Events

It is very important section in AWR report.
through this we can know most five wait event is effecting database performance.

Oracle Wait Event


SQL Statistics

It this section we will know about Execution time, cpu used, logical & physical read for top most sql statement.


Advisory Statistics

In this section we will get advice for PGA, BUFFER CACHE, SHARED POOL, JAVA POOL size for better performance.

Active Session History (ASH)

Active Session History (ASH)


Oracle 10gr2 Introduce new option for capture performance problem.
ASH >>> Active session History <<<<
What is ASH ?

Whenever you want to know information about blocker and waiter identifiers and their associated transaction IDs and SQL.

About V$ACTIVE_SESSION_HISTORY


1. The V$ACTIVE_SESSION_HISTORY view provides sampled session activity in the instance.

2. Active sessions are sampled every second and are stored in a circular buffer in SGA.

3. Any session that is connected to the database and is waiting for an event that does not belong to the Idle wait class is considered as an active session.

4. This includes any session that was on the CPU at the time of sampling.

5. Each session sample is a set of rows and the V$ACTIVE_SESSION_HISTORY view returns one row for each active session per sample, returning the latest session sample rows first. Because the active session samples are stored in a circular buffer in SGA, the greater the system activity, the smaller the number of seconds of session activity that can be stored in the circular buffer. This means that the duration for which a session sample appears in the V$ view, or the number of seconds of session activity that is displayed in the V$ view, is completely dependent on the database activity.

6. Using the Active Session History enables you to examine and perform detailed analysis on both current data in the V$ACTIVE_SESSION_HISTORY view

7. Historical data in the DBA_HIST_ACTIVE_SESS_HISTORY view,
SQL identifier of SQL statement


What information provide ASH view ?


1. Object number, file number, and block number

2. Wait event identifier and parameters

3. Session identifier and session serial number

4. Module and action name

5. Client identifier of the session

6. Service hash identifier


How to generate ASH report ?

It is New feature of 10GR2(10.2.0.1.0)
For report creation we have to use ASHRPT.SQL script.
Located: In $ORACLE_HOME/rdbms/admin folder


How to run ASHRPT.SQL script


To generate a text report of ASH information, run the ashrpt.sql script at the SQL prompt:

@$ORACLE_HOME/rdbms/admin/ashrpt.sql

First, you need to specify whether you want an HTML or a text report.

Enter value for report_type: text

Specify the time frame to collect ASH information by first specifying the begin time in minutes prior to the system date.

Enter value for begin_time: -10
Note: here you have to just put number in minutes eg: 10 for 10 minutes

Next, enter the duration in minutes that the report for which you want to capture ASH information from the begin time. The default duration of system date minus begin time is accepted in the following example:

Enter value for duration:
Note: left blank for default value. Default value is SYSDATE

The report in this example will gather ASH information beginning from 10 minutes before the current time and ending at the current time. Next, accept the default report name or enter a report name. The default name is accepted in the following example:

Enter value for report_name:
Using the report name ashrpt_1_0310_0131.txt
Note: Left it blank for default value.

The session history report is generated.


For Futhere Detail please check metalink note: 243132.1

System Statistics

System Statistics


What is system statistics ?


System statistics describe the system's hardware characteristics, such as I/O and CPU performance and utilization, to the query optimizer.

Why gather SYSTEM STATISTICS ?

When choosing an execution plan, then opitmizer estimate the I/O and CPU resources required for each query.
System statistics enable the query optimizer to more accurately estimate I/O and CPU costs, enabling the query optimizer to choose a better execution plan.

It is important to gather system statistics ?

System statistics give accurate cost for sql query so optimizer take good decision.
If you have good statistics then query take good decision and database performance is increase.

Many Oracle Guru's and Oracle Corp. Also recommended to gather system statistics.

How Gather System Statistics ?

Through DBMS_STATS package we can gather system statistics

Step1
SQL> exec dbms_stats.gather_system_stats('Start');

Step2
SQL>--Wait for some time ...it will 1 hr minimum or 2 hr or whole day according database load.

Step3
SQL exec dbms_stats.gather_system_stats('Stop');


Keep in mind.
1. User must granted DBA privilege role for gather system statistics.
2. After gather system statistics... Unlike table, index, or column statistics, Oracle does not invalidate already parsed SQL statements when system statistics get updated. All new SQL statements are parsed using new statistics.
3. Always Gather System statistics During Heavy Peak Load.


Where check gather system statistics data information ?

After Gather system statictics query sys.aux_stats$ view.

SQL> select pname, pval1 from sys.aux_stats$;

PNAME PVAL1
------------------------------ ----------
STATUS
DSTART
DSTOP
FLAGS 1
CPUSPEEDNW 904.86697
IOSEEKTIM 10
IOTFRSPEED 4096
SREADTIM .995
MREADTIM 1.701
CPUSPEED 1268
MBRC 16
MAXTHR
SLAVETHR

13 rows selected.

Note:
CPUSPEEDNW,IOSEEKTIM,IOTFRSPEED is New column comes with Oracle 10g. and these columns already populated before gather system statistics.


SREADTIM

Single block read time is the average time to read a single block randomly.

MREADTIM

Multiblock read is the average time to read a multiblock sequentially.

MBRC

Multiblock count is the average multiblock read count sequentially.

Oracle Latch

Oracle Latch


What is Latch ?

A mechanism to protect shared data structures in the System Global Area.
For Example: latches protect the list of users currently accessing the database and protect the data structures describing the blocks in the buffer cache.

A server or background process acquires a latch for a very short time while manipulating or looking at one of these structures.


During DB performance we will see LATCH event ...so what is latch event and how many types of latch events ?

A latch is a low-level internal lock used by Oracle to protect memory structures.

The latch free event is updated when a server process attempts to get a latch, and the latch is unavailable on the first attempt.

Most Popular latch wait event are ...



1. Latch: library cache or Latch: shared pool

Below is Possible causes for above both latch events.

1. Lack of statement reuse
2. Statements not using bind variables
3. Insufficient size of application cursor cache
4. Cursors closed explicitly after each execution
5. Frequent logon/logoffs
6. Underlying object structure being modified (for example truncate)
7. Shared pool too small

Below is Possible suggestion for aviod above both latch events.

1. Increase SHARED_POOL_SIZE parameter value.
2. Modify Frontend application to use BIND VARIABLE
3. Use CURSOR_SHARING='force' (for temporary basis)


2. Latch: cache buffers lru chain

Possible Causes

1. Inefficient SQL that accesses incorrect indexes iteratively (large index range scans) or many full table scans.
2. DBWR not keeping up with the dirty workload; hence, foreground process spends longer holding the latch looking for a free buffer
3. Cache may be too small

Possible Suggestion

1. Look for: Statements with very high logical I/O or physical I/O, using unselective indexes
2. Increase DB_CACHE_SIZE parameter value.
3. The cache buffers lru chain latches protect the lists of buffers in the cache. When adding, moving, or removing a buffer from a list, a latch must be obtained.

For symmetric multiprocessor (SMP) systems, Oracle automatically sets the number of LRU latches to a value equal to one half the number of CPUs on the system. For non-SMP systems, one LRU latch is sufficient.

Contention for the LRU latch can impede performance on SMP machines with a large number of CPUs. LRU latch contention is detected by querying V$LATCH, V$SESSION_EVENT, and V$SYSTEM_EVENT. To avoid contention, consider tuning the application, bypassing the buffer cache for DSS jobs, or redesigning the application.


Latch: cache buffers chains

Possible Causes

1. Repeated access to a block (or small number of blocks), known as a hot block
2. From AskTom:

Contention for these latches can be caused by:

- Very long buffer chains.
- very very heavy access to the same blocks.

Possible Suggestion

1. From AskTom:
When I see this, I try to see what SQL the waiters are trying to execute. Many times,
what I find, is they are all running the same query for the same data (hot blocks). If
you find such a query -- typically it indicates a query that might need to be tuned (to
access less blocks hence avoiding the collisions).

If it is long buffer chains, you can use multiple buffer pools to spread things out. You
can use DB_BLOCK_LRU_LATCHES to increase the number of latches. You can use both
together.

The cache buffers chains latches are used to protect a buffer list in the buffer cache. These latches are used when searching for, adding, or removing a buffer from the buffer cache. Contention on this latch usually means that there is a block that is greatly contended for (known as a hot block).

To identify the heavily accessed buffer chain, and hence the contended for block, look at latch statistics for the cache buffers chains latches using the view V$LATCH_CHILDREN. If there is a specific cache buffers chains child latch that has many more GETS, MISSES, and SLEEPS when compared with the other child latches, then this is the contended for child latch.

This latch has a memory address, identified by the ADDR column. Use the value in the ADDR column joined with the X$BH table to identify the blocks protected by this latch. For example, given the address (V$LATCH_CHILDREN.ADDR) of a heavily contended latch, this queries the file and block numbers:

SELECT OBJ data_object_id, FILE#, DBABLK,CLASS, STATE, TCH
FROM X$BH
WHERE HLADDR = 'address of latch'
ORDER BY TCH;

X$BH.TCH is a touch count for the buffer. A high value for X$BH.TCH indicates a hot block.

Many blocks are protected by each latch. One of these buffers will probably be the hot block. Any block with a high TCH value is a potential hot block. Perform this query a number of times, and identify the block that consistently appears in the output. After you have identified the hot block, query DBA_EXTENTS using the file number and block number, to identify the segment.

After you have identified the hot block, you can identify the segment it belongs to with the following query:

SELECT OBJECT_NAME, SUBOBJECT_NAME
FROM DBA_OBJECTS
WHERE DATA_OBJECT_ID = &obj;

In the query, &obj is the value of the OBJ column in the previous query on X$BH.


5. Latch: row cache objects

The row cache objects latches protect the data dictionary.
Suggestion: Increase SHARED_POOL_SIZE parameter to avoid this latch.

Materialized view for Data Warehouse

Materialized view for Data Warehouse

Did you hear about Materialized view?
Ans: Yes, No or May be.

Now I will explain you in details

What is materialized view?

1. Materialized view is normal database object like “table,index”
2. It is basically use for Data warehouse or Replication purpose.
3. Snapshot is synonym for materialized view.
4. A materialized view can be stored in the same database as its base tables or in a different database
5. A materialized view provides access to table data by storing the results of a query in a separate schema object. Unlike an ordinary view, which does not take up any storage space or contain any data, a materialized view contains the rows resulting from a query against one or more base tables or views.
6. A materialized view improve response time through query rewrite or reduce execution time.

Note: Query rewrite is a mechanism where Oracle or applications from the end user or database transparently improve query response time, by automatically rewriting the SQL query to use the materialized view instead of accessing the original tables. Query rewrites are particularly useful in a data warehouse environment.

What is syntax for materialized view?

Create materialized view “materialized_view_name”
Build [immediate / deferred]
Refresh [fash [on commit / demand]]
Enable query rewrite
As
Sql statement
/

Now Details explanation about syntax
1. Create materialized view “name’
I hope it is understood because self explanatory

2. Build [immediate / deferred]
1. Build immediate -- > materialized view populate with data on creation time from tables.
2. Build deferred --- > materialized view not populate with data on creation time, we have to populate later.

Eg:
With build immediate



create materialized view mymview1
build immediate
as
select ename,sum(sal) from emp group by ename;

SQL> select count(*) from emp;

COUNT(*)
----------
14

Note: materialized view populate with data on creation time.

With build deferred
create materialized view mymview2
build deferred
as
select ename,sum(sal) from emp group by ename;

SQL> select count(*) from mymview2;

COUNT(*)
----------
0

Note: materialized view not populate with data on creation time, we have to populate manually through DBMS_MVIEW.RERESH package.


3. Refresh [fast [on commit / demand]]
Refresh is method for populate materialized view with data whenever made changes in their base tables.

There are five refresh method for materialized view
1. refresh fast (materialized view log needed)
2. refresh on commit
3. refresh on demand

What is materialized view log?
A materialized view log is a schema object that records changes to a master table's data so that a materialized view defined on the master table can be refreshed incrementally.
Each materialized view log is associated with a single master table. The materialized view log resides in the same database and schema as its master table.

Eg:
With refresh on commit


SQL> select count(*) from emp;
COUNT(*)
----------
14
SQL> create materialized view MYVIEW1
2 build immediate
3 refresh on commit
4 enable query rewrite
5 as
6 select ename, sum(sal) from emp group by ename;
Materialized view created.

SQL> select count(*) from myview1;
COUNT(*)
----------
14
SQL> insert into emp (empno,ename,sal) values(1754,'TAJ',2500);
1 row created.
SQL> commit;
Commit complete.
SQL> select count(*) from myview1;
COUNT(*)
----------
15
With refresh on commit
SQL> select count(*) from emp;

COUNT(*)
----------
15

SQL> create materialized view MYVIEW5
2 build immediate
3 refresh on demand
4 enable query rewrite
5 as
6 select ename,sum(sal) from emp group by ename;

Materialized view created.

SQL> select count(*) from myview5;

COUNT(*)
----------
15

SQL> insert into emp (empno,ename,sal)values(1100,'xyz',1000);

1 row created.

SQL> commit;

Commit complete.

SQL> select count(*) from emp;

COUNT(*)
----------
16

SQL> select count(*) from myview5;

COUNT(*)
----------
15

SQL> exec dbms_mview.refresh('MYVIEW5');

PL/SQL procedure successfully completed.

SQL> select count(*) from myview5;

COUNT(*)
----------
16

Whenever any changes made in base tables and perform commit then materialized view refreshed.

4. Enable query rewrite
A materialized view is like a query with a result that is materialized and stored in a table. When a user query is found compatible with the query associated with a materialized view, the user query can be rewritten in terms of the materialized view. This technique improves the execution of the user query, because most of the query result has been precomputed. The query transformer looks for any materialized views that are compatible with the user query and selects one or more materialized views to rewrite the user query. The use of materialized views to rewrite a query is cost-based. That is, the query is not rewritten if the plan generated without the materialized views has a lower cost than the plan generated with the materialized views.
Note: query_rewrite_enabled=true AND query_rewrite_integrity=enforced must be set to use query rewrite feature

5. How Materialized view improve query performance.

Let’s check with demo

1. Create Big Table :)
SQL> create table big
2 as select * from all_objects
3 union all
4 select * from all_objects
5 union all
6 select * from all_objects
7 union all
8 select * from all_objects
9 union all
10 select * from all_objects;

Table created.

SQL> select count(*) from big;

COUNT(*)
----------
188995

2. Now execute below query and check query statistics
SQL> alter system flush shared_pool;

System altered.

SQL> set autotrace traceonly
SQL> set timing on
SQL> select owner,count(*) from big group by owner;

12 rows selected.

Elapsed: 00:00:00.01

Execution Plan
----------------------------------------------------------
0 SELECT STATEMENT Optimizer=ALL_ROWS (Cost=624 Card=188024 By
tes=3196408)

1 0 SORT (GROUP BY) (Cost=624 Card=188024 Bytes=3196408)
2 1 TABLE ACCESS (FULL) OF 'BIG' (TABLE) (Cost=598 Card=1880
24 Bytes=3196408)





Statistics
----------------------------------------------------------
957 recursive calls
0 db block gets
2844 consistent gets---- > too big :)
0 physical reads
0 redo size
1028 bytes sent via SQL*Net to client
504 bytes received via SQL*Net from client
2 SQL*Net roundtrips to/from client
10 sorts (memory)
0 sorts (disk)
12 rows processed

3. Now Create materialized view and try
SQL> create materialized view MYVIEWBIG
2 build immediate
3 refresh on commit
4 enable query rewrite
5 as
6 select owner,count(*) from big group by owner;

Materialized view created.

SQL> alter system flush shared_pool;

System altered.

SQL> set autotrace traceonly
SQL> set timing on
SQL> select owner,count(*) from big group by owner;

12 rows selected.

Elapsed: 00:00:00.01

Execution Plan
----------------------------------------------------------
0 SELECT STATEMENT Optimizer=ALL_ROWS (Cost=3 Card=12 Bytes=36
0)

1 0 MAT_VIEW REWRITE ACCESS (FULL) OF 'MYVIEWBIG' (MAT_VIEW RE
WRITE) (Cost=3 Card=12 Bytes=360)





Statistics
----------------------------------------------------------
7356 recursive calls
0 db block gets
1313 consistent gets --- > just less then half :)
0 physical reads
0 redo size
1028 bytes sent via SQL*Net to client
504 bytes received via SQL*Net from client
2 SQL*Net roundtrips to/from client
49 sorts (memory)
0 sorts (disk)
12 rows processed


4. What Privileges Need for materialized view ?

To create a materialized view in your own schema, you must have the CREATE MATERIALIZED VIEW privilege and the SELECT privilege to any tables referenced that are in another schema. To create a materialized view in another schema, you must have the CREATE ANY MATERIALIZED VIEW privilege and the owner of the materialized view needs SELECT privileges to the tables referenced if they are from another schema. Moreover, if you enable query rewrite on a materialized view that references tables outside your schema, you must have the GLOBAL QUERY REWRITE privilege or the QUERY REWRITE object privilege on each table outside your schema.

5. How to check Status of a Materialized View?

SQL> select mview_name,query,rewrite_enabled,refresh_mode,build_mode,staleness,c
ompile_state
2 from user_mviews;

MVIEW_NAME QUERY R REFRES BUILD_MOD STALENESS COMPILE_ST
---------- -------------------- - ------ --------- ---------- ----------

MYBIG select owner, count( Y DEMAND IMMEDIATE NEEDS_COMP NEEDS_COMP
*) from big group by ILE ILE
owner

MYMVIEW2 select ename,sum(sal N DEMAND DEFERRED UNUSABLE NEEDS_COMP
) from emp group by ILE
ename

MYVIEW5 select ename,sum(sal Y DEMAND IMMEDIATE FRESH VALID
) from emp group by
ename

MYMVIEW3 select ename,sum(sal Y COMMIT IMMEDIATE UNUSABLE VALID
) from emp group by
ename

MYVIEW1 select ename, sum(sa Y COMMIT IMMEDIATE FRESH VALID
l) from emp group by

Explanation:
MVIEW_NAME -------- > Materialized View name
QUERY ------------- > Query that defines the materialized view
REFRESH_ENABLED --- > If query_rewrite_enabled=TRUE then value is “Y” otherwise “N”
REFRESH_MODE ------ > DEMAND, COMMIT, NEVER
BUILD_MODE -------- > IMMEDIATE, DEFERRED
STALENESS --------- > Relationship between the contents of the materialized view and the contents of the materialized view's masters.
COMPILE_STATE ----- > Validity of the materialized view with respect to the objects upon which it depends
VALID -> No objects depends has changed since the last validation
NEED_COMPILE -> Some objects depends materialized view has changed an “alter materialized view … compile;” statement is required to validate this materialized view

Trace Event

Trace Event

How to Determine What Events/Parameters are Set in the Current Session?
Check Metalink Note: 28446.1
How To List All The Named Events Set For A Database?
Check Metalink Note: 436036.1

Checkpoint Not Complete In Alert.log

Checkpoint Not Complete In Alert.log


In our production database we are getting a lots of message like below
/db/app/oracle/Db_1/oradata/orcl/redo04.log
Thu Jul 16 22:02:07 2009
Thread 1 cannot allocate new log, sequence 66271
Checkpoint not complete
Current log# 4 seq# 66270 mem# 0:

When I searched on web/metalink found the below workaround ...

"archive_lag_target" should be disabled to fix the above message in the alertlog file.

"alter system set archive_lag_target=0 scope=both;"

check metalink note: 435780.1