---------------------------------------------------------------------------------------- -- -- File name: profile_hints.sql -- -- Purpose: Show hints associated with a SQL Profile. - -- Author: Kerry Osborne -- -- Usage: This scripts prompts for one value. -- -- profile_name: the name of the profile to be modified -- -- Description: This script pulls the hints associated with a SQL Profile. -- -- Mods: Modified to check for 10g or 11g as the hint structure changed. -- Modified to join on category as well as signature. -- -- See kerryosborne.oracle-guy.com for additional information. --------------------------------------------------------------------------------------- -- set sqlblanklines on set feedback off accept profile_name - prompt 'Enter value for profile_name: ' - default 'X0X0X0X0' declare ar_profile_hints sys.sqlprof_attr; cl_sql_text clob; version varchar2(3); l_category varchar2(30); l_force_matching varchar2(3); b_force_matching boolean; begin select regexp_replace(version,'\..*') into version from v$instance; if version = '10' then -- dbms_output.put_line('version: '||version); execute immediate -- to avoid 942 error 'select attr_val as outline_hints '|| 'from dba_sql_profiles p, sqlprof$attr h '|| 'where p.signature = h.signature '|| 'and p.category = h.category '|| 'and name like (''&&profile_name'') '|| 'order by attr#' bulk collect into ar_profile_hints; elsif version = '11' then -- dbms_output.put_line('version: '||version); execute immediate -- to avoid 942 error 'select hint as outline_hints '|| 'from (select p.name, p.signature, p.category, row_number() '|| ' over (partition by sd.signature, sd.category order by sd.signature) row_num, '|| ' extractValue(value(t), ''/hint'') hint '|| 'from sqlobj$data sd, dba_sql_profiles p, '|| ' table(xmlsequence(extract(xmltype(sd.comp_data), '|| ' ''/outline_data/hint''))) t '|| 'where sd.obj_type = 1 '|| 'and p.signature = sd.signature '|| 'and p.category = sd.category '|| 'and p.name like (''&&profile_name'')) '|| 'order by row_num' bulk collect into ar_profile_hints; end if; dbms_output.put_line(' '); dbms_output.put_line('HINT'); dbms_output.put_line('------------------------------------------------------------------------------------------------------------------------------------------------------'); for i in 1..ar_profile_hints.count loop dbms_output.put_line(ar_profile_hints(i)); end loop; dbms_output.put_line(' '); dbms_output.put_line(ar_profile_hints.count||' rows selected.'); dbms_output.put_line(' '); end; / undef profile_name set feedback on
Thursday, April 26, 2012
profile_hints.sql
create_sql_profile_awr.sqlPost title
---------------------------------------------------------------------------------------- -- -- File name: create_sql_profile_awr.sql -- -- Purpose: Create SQL Profile based on Outline hints in V$SQL.OTHER_XML. -- -- Author: Kerry Osborne -- -- Usage: This scripts prompts for five values. -- -- sql_id: the sql_id of the statement to attach the profile to -- (must be in the shared pool and in AWR history) -- -- plan_hash_value: the plan_hash_value of the statement in AWR history -- -- profile_name: the name of the profile to be generated -- -- category: the name of the category for the profile -- -- force_macthing: a toggle to turn on or off the force_matching feature -- -- Description: -- -- Based on a script by Randolf Giest. -- -- Mods: This is the 2nd version of this script which removes dependency on rg_sqlprof2.sql. -- -- See kerryosborne.oracle-guy.com for additional information. --------------------------------------------------------------------------------------- -- -- @rg_sqlprof1 '&&sql_id' &&child_no '&&category' '&force_matching' set feedback off set sqlblanklines on accept sql_id - prompt 'Enter value for sql_id: ' - default 'X0X0X0X0' accept plan_hash_value - prompt 'Enter value for plan_hash_value: ' accept profile_name - prompt 'Enter value for profile_name (PROF_sqlid_planhash): ' - default 'X0X0X0X0' accept category - prompt 'Enter value for category (DEFAULT): ' - default 'DEFAULT' accept force_matching - prompt 'Enter value for force_matching (FALSE): ' - default 'false' declare ar_profile_hints sys.sqlprof_attr; cl_sql_text clob; l_profile_name varchar2(30); begin select extractvalue(value(d), '/hint') as outline_hints bulk collect into ar_profile_hints from xmltable('/*/outline_data/hint' passing ( select xmltype(other_xml) as xmlval from dba_hist_sql_plan where sql_id = '&&sql_id' and plan_hash_value = &&plan_hash_value and other_xml is not null ) ) d; select sql_text, decode('&&profile_name','X0X0X0X0','PROF_&&sql_id'||'_'||'&&plan_hash_value','&&profile_name') into cl_sql_text, l_profile_name from dba_hist_sqltext where sql_id = '&&sql_id'; dbms_sqltune.import_sql_profile( sql_text => cl_sql_text, profile => ar_profile_hints, category => '&&category', name => l_profile_name, force_match => &&force_matching -- replace => true ); dbms_output.put_line(' '); dbms_output.put_line('SQL Profile '||l_profile_name||' created.'); dbms_output.put_line(' '); exception when NO_DATA_FOUND then dbms_output.put_line(' '); dbms_output.put_line('ERROR: sql_id: '||'&&sql_id'||' Plan: '||'&&plan_hash_value'||' not found in AWR.'); dbms_output.put_line(' '); end; / undef sql_id undef plan_hash_value undef profile_name undef category undef force_matching set sqlblanklines off set feedback on
create_sql_profile.sql
--THIS IS FROM uses cursor from the shared pool
---------------------------------------------------------------------------------------- -- -- File name: create_sql_profile.sql -- -- Purpose: Create SQL Profile based on Outline hints in V$SQL.OTHER_XML. -- -- Author: Kerry Osborne -- -- Usage: This scripts prompts for four values. -- -- sql_id: the sql_id of the statement to attach the profile to (must be in the shared pool) -- -- child_no: the child_no of the statement from v$sql -- -- profile_name: the name of the profile to be generated -- -- category: the name of the category for the profile -- -- force_macthing: a toggle to turn on or off the force_matching feature -- -- Description: -- -- Based on a script by Randolf Giest. -- -- Mods: This is the 2nd version of this script which removes dependency on rg_sqlprof1.sql. -- -- See kerryosborne.oracle-guy.com for additional information. --------------------------------------------------------------------------------------- -- -- @rg_sqlprof1 '&&sql_id' &&child_no '&&category' '&force_matching' set feedback off set sqlblanklines on accept sql_id - prompt 'Enter value for sql_id: ' - default 'X0X0X0X0' accept child_no - prompt 'Enter value for child_no (0): ' - default '0' accept profile_name - prompt 'Enter value for profile_name (PROF_sqlid_planhash): ' - default 'X0X0X0X0' accept category - prompt 'Enter value for category (DEFAULT): ' - default 'DEFAULT' accept force_matching - prompt 'Enter value for force_matching (FALSE): ' - default 'false' declare ar_profile_hints sys.sqlprof_attr; cl_sql_text clob; l_profile_name varchar2(30); begin select extractvalue(value(d), '/hint') as outline_hints bulk collect into ar_profile_hints from xmltable('/*/outline_data/hint' passing ( select xmltype(other_xml) as xmlval from v$sql_plan where sql_id = '&&sql_id' and child_number = &&child_no and other_xml is not null ) ) d; select sql_fulltext, decode('&&profile_name','X0X0X0X0','PROF_&&sql_id'||'_'||plan_hash_value,'&&profile_name') into cl_sql_text, l_profile_name from v$sql where sql_id = '&&sql_id' and child_number = &&child_no; dbms_sqltune.import_sql_profile( sql_text => cl_sql_text, profile => ar_profile_hints, category => '&&category', name => l_profile_name, force_match => &&force_matching -- replace => true ); dbms_output.put_line(' '); dbms_output.put_line('SQL Profile '||l_profile_name||' created.'); dbms_output.put_line(' '); exception when NO_DATA_FOUND then dbms_output.put_line(' '); dbms_output.put_line('ERROR: sql_id: '||'&&sql_id'||' Child: '||'&&child_no'||' not found in v$sql.'); dbms_output.put_line(' '); end; / undef sql_id undef child_no undef profile_name undef category undef force_matching set sqlblanklines off set feedback on
select level from dual connect by level<=10
SQL> select level from dual connect by level<=10;
LEVEL
----------
1
2
3
4
5
6
7
8
9
10
10 rows selected.
SQL>
Monday, April 23, 2012
column group statistics aka extended statistics in 11g
Using
extended statistics in Oracle 11g
when queries have a list of predicates, it may be beneficial to collect statistics for these columns listed in the predicates. Luckily this could be down by oracle automatically once it is setup:
- Observe workload that use those
tables for 300 seconds by simply run explain plan for the long running
query
Exec dbms_stats.seed_col_usage(null,null,300)
- review
the column usage information recorded in DBMS_STATS.REPORT_COL_USAGE
select
dbms_stats.report_col_usaage(schema_owner,’table_name’) from dual;
- Create
the column groups
Exec dbms_stats.create_extended_stats(schema_owner,’table_name’);
- review
extended statistics collection that will be maintained by oracle
select
dbms_stats.create_extended_stats(schema_owner,’table_name’) from dual;
- Re-gather
Statistics so that extended statistics for the column groups will be
collected
Exec dbms_stats.gather_table_stats(schema_owner,’table_name’);
- confirm
what new statistics have been collected
select column_name,num_distinct,
histogram from user_tab_col_statistics where table_name=’table_name’ order by
1;
Sunday, April 22, 2012
endian format
To determin the endian format for a platform:
select a.endian_format, a.platform_id
from v$transportable_platform a, v$database b
where a.platform_name=b.platform_name;
select a.endian_format, a.platform_id
from v$transportable_platform a, v$database b
where a.platform_name=b.platform_name;
Sunday, April 8, 2012
DBMS_SQLTUNE
Step 1: Creating Tuning Task
DECLARE
my_task_name VARCHAR2 (30);
my_sqltext CLOB;
BEGIN
my_sqltext := 'SELECT e.last_name, d.department_name, d.department_id
FROM employees e, departments d
WHERE e.department_id = d.department_id
AND d.department_id = :bnd';
my_task_name := dbms_sqltune.create_tuning_task (sql_text=> my_sqltext,
bind_list => sql_binds (anydata.convertnumber (9)),
user_name => 'HR',
scope => 'COMPREHENSIVE',
time_limit => 60,
task_name => 'vega_tuning_task',
description => 'Tuning Task'
);
END;
/
Create_tuning_task functions returns name of the task created.
Step 2: Executing SQL Tuning Task
BEGIN
dbms_sqltune.execute_tuning_task (task_name => 'vega_tuning_task');
END;
/
Step 3: Checking Status of SQL Tuning Task
SELECT status FROM USER_ADVISOR_TASKS WHERE task_name = 'vega_tuning_task';
Step 4: Retrieving results of SQL tuning task
After task is executed results can be obtained by calling REPORT_TUNING_TASK function
SET LONG 1000
SET LONGCHUNKSIZE 1000
SET LINESIZE 100
SELECT DBMS_SQLTUNE.REPORT_TUNING_TASK( 'vega_sql_tuning_task')
FROM DUAL;
Saturday, April 7, 2012
Block-change tracking: How effective it is
Oracle block-change tracking let's us do incremental backups for only the blocks that have been changed. To check the effectiveness, use the following query:
SELECT file#, avg(datafile_blocks), avg(blocks_read), avg(blocks_read/datafile_blocks)*100 AS PCT_READ_FOR_BACKUP, avg(blocks)
FROM V$BACKUP_DATAFILE
WHERE used_change_tracking='YES' and incremental_level>0
GROUP BY file#
Ideally we wanted to see the PCT_READ_FOR_BACKUP with a small value. If this value is very large, we can increase the frequency of incremental backups.
SELECT file#, avg(datafile_blocks), avg(blocks_read), avg(blocks_read/datafile_blocks)*100 AS PCT_READ_FOR_BACKUP, avg(blocks)
FROM V$BACKUP_DATAFILE
WHERE used_change_tracking='YES' and incremental_level>0
GROUP BY file#
Ideally we wanted to see the PCT_READ_FOR_BACKUP with a small value. If this value is very large, we can increase the frequency of incremental backups.
Oracle Data Pump (impdp and expdp)
User Crtl + C to suspend a expdp job started from a command line.
to stop, kill, resume, attach, cancel, restart a datapump job
http://blog.oracle48.nl/killing-and-resuming-datapump-expdp-and-impdp-jobs/
To enhance performance for large data imports with impdp, consider two things:
One: Using parallellims:
Degree=2 x number of CPUs
Degree <= number of dump files
Degree of parallelims can be adjusted on the fly
Oracle uses worker processes and parallel execution (PX) process. Worker processes are used in inter-segment parallelims using DIRECTPATH while PX processes are used in intra-segment parallelims using EXTERNAL TABLE.
Oracle will automatically choose which method to use in parallellims.
There are times when you can use a hidden parameter to only use DIRECTPATH method
access_method=direct_path
Note: if direct_path cannot be used for a segment and the above parameter is set, data for that segment is not going to be loaded.
Two equally sized segments will be loaded faster with access_method=direct_path
Indexes are created by one at a time by a single worker process using multiple paralle PX processes. Index creation could take a long time.
Two: Setting the following parameters before the load:
1. disk_asynch_io=true
2. noarchivelog (makes big difference)
3. db_block_checking=false
4. db_block_checksum=false
5. disable block change tracking
6. size data files properly before loading, do not use autoextend
Setting EXCLUDE/INCLUDE parameter will also exclude/include dependent objects
To check dependencies:
database_export_objects
schema_export_objects
table_export_objects
To import data into a database with archivelog mode:
1. put database in NOLOGGING mode, or
2. a) create tables with content=metadata_only exclude=indes, constraint, ref_constraint
b) put tables in nologging mode
c) import with table_exists_action=append
4) cretae index with nologging
TRACING:
TRACE=480300 for impdp and expdp
extended log information
not very excessive data output size
(metalink Note: 286496.1)
DATAPUMP NETWORK MODE (Without creating dump file)
STEP 1: grant exp_full_database role to socre shcema [IN REMOTE DB]
conn system/pass;
GRANT EXP_FULL_DATABASE to user_info;
STEP 2: [IN DESTINATION DB] Create destination user and grant necessary roles
conn system/pass;
CREATE USER user_info
IDENTIFIED BY pass
DEFAULT TABLESPACE USER
TEMPORARY TABLESPACE TEMP;
GRANT CONNECT , RESOURCE TO user_info;
STEP 3: [IN DESTINATION DB] grant read/write on dump directory
conn system/pass;
GRANT read,write on DIRECTORY dump_directory to user_info;
Network import does not requer any dump file. This directory is only requer to write the import log file.
STEP 4: [IN DESTINATION DB] create public DB Link
conn system/pass;
CREATE PUBLIC DATABASE LINK SATURN
connect to user_info identified by pass
using '(DESCRIPTION=(
ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=remote_db)
(PORT=1521)))
(CONNECT_DATA=(SERVICE_NAME=orcl.oracle.com)
(server=DEDICATED)))';
STEP 5: [IN DESTINATION DB MACHINE] execute impdp
impdp user_info/pass directory=dump_dir network_link=SATURN logfile=net_import_proddev.log EXCLUDE=GRANT,STATISTICS,SYNONYM,DB_LINK REMAP_SCHEMA=USER_INFO:USER_INFO
to stop, kill, resume, attach, cancel, restart a datapump job
http://blog.oracle48.nl/killing-and-resuming-datapump-expdp-and-impdp-jobs/
To enhance performance for large data imports with impdp, consider two things:
One: Using parallellims:
Degree=2 x number of CPUs
Degree <= number of dump files
Degree of parallelims can be adjusted on the fly
Oracle uses worker processes and parallel execution (PX) process. Worker processes are used in inter-segment parallelims using DIRECTPATH while PX processes are used in intra-segment parallelims using EXTERNAL TABLE.
Oracle will automatically choose which method to use in parallellims.
There are times when you can use a hidden parameter to only use DIRECTPATH method
access_method=direct_path
Note: if direct_path cannot be used for a segment and the above parameter is set, data for that segment is not going to be loaded.
Two equally sized segments will be loaded faster with access_method=direct_path
Indexes are created by one at a time by a single worker process using multiple paralle PX processes. Index creation could take a long time.
Two: Setting the following parameters before the load:
1. disk_asynch_io=true
2. noarchivelog (makes big difference)
3. db_block_checking=false
4. db_block_checksum=false
5. disable block change tracking
6. size data files properly before loading, do not use autoextend
Setting EXCLUDE/INCLUDE parameter will also exclude/include dependent objects
To check dependencies:
database_export_objects
schema_export_objects
table_export_objects
To import data into a database with archivelog mode:
1. put database in NOLOGGING mode, or
2. a) create tables with content=metadata_only exclude=indes, constraint, ref_constraint
b) put tables in nologging mode
c) import with table_exists_action=append
4) cretae index with nologging
TRACING:
TRACE=480300 for impdp and expdp
extended log information
not very excessive data output size
(metalink Note: 286496.1)
DATAPUMP NETWORK MODE (Without creating dump file)
STEP 1: grant exp_full_database role to socre shcema [IN REMOTE DB]
conn system/pass;
GRANT EXP_FULL_DATABASE to user_info;
STEP 2: [IN DESTINATION DB] Create destination user and grant necessary roles
conn system/pass;
CREATE USER user_info
IDENTIFIED BY pass
DEFAULT TABLESPACE USER
TEMPORARY TABLESPACE TEMP;
GRANT CONNECT , RESOURCE TO user_info;
STEP 3: [IN DESTINATION DB] grant read/write on dump directory
conn system/pass;
GRANT read,write on DIRECTORY dump_directory to user_info;
Network import does not requer any dump file. This directory is only requer to write the import log file.
STEP 4: [IN DESTINATION DB] create public DB Link
conn system/pass;
CREATE PUBLIC DATABASE LINK SATURN
connect to user_info identified by pass
using '(DESCRIPTION=(
ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=remote_db)
(PORT=1521)))
(CONNECT_DATA=(SERVICE_NAME=orcl.oracle.com)
(server=DEDICATED)))';
STEP 5: [IN DESTINATION DB MACHINE] execute impdp
impdp user_info/pass directory=dump_dir network_link=SATURN logfile=net_import_proddev.log EXCLUDE=GRANT,STATISTICS,SYNONYM,DB_LINK REMAP_SCHEMA=USER_INFO:USER_INFO
Thursday, April 5, 2012
Optimizer Statistics Restore
dba_optstat_operations: start and end time for all DBMS_STATS operations
dba_tab_stats_history: history of table statistics modifications for the past 31 days. This means you can use DBMS_STATS.RESTORE to restore optimizer statistics up to 31 days
dba_scheduler_jobs
If you user ANALYZE command to collect statistics, these statistics information will not be stored in the data dictionary.
dbms_stats.restore_table_stats(,,timestamp)
dbms_stats.restore_schema_stats
dbms_stats.restore_database_stats
dbms_stats.restore_dictionary_stats
dbms_stats.restore_fixed_objects_stats
dbms_stats.restore_system_stats
dbms_stats.purge_stats();
dbms_stats.alter_stats_history_retention ();
dbms_stats.get_stats_history_availability
dba_tab_stats_history: history of table statistics modifications for the past 31 days. This means you can use DBMS_STATS.RESTORE to restore optimizer statistics up to 31 days
dba_scheduler_jobs
If you user ANALYZE command to collect statistics, these statistics information will not be stored in the data dictionary.
dbms_stats.restore_table_stats(
dbms_stats.restore_schema_stats
dbms_stats.restore_database_stats
dbms_stats.restore_dictionary_stats
dbms_stats.restore_fixed_objects_stats
dbms_stats.restore_system_stats
dbms_stats.purge_stats(
dbms_stats.alter_stats_history_retention (
dbms_stats.get_stats_history_availability
Subscribe to:
Posts (Atom)