暂无图片
暂无图片
暂无图片
暂无图片
暂无图片

Oracle日常常用脚本合集参考

原创 郭鑫 2022-08-23
1475

AWR中的TOP SQL:
你要自己改一下instance_number和begin_interval_time

col sql_text format a25
select a.sql_id,
a.plan_hash_value,
round(elapsed_time_delta /decode(executions_delta, 0, 1, executions_delta) / 1000000,2) per_time_s,
executions_delta executions,
round(buffer_gets_delta * 8 / 1024 /1024/decode(executions_delta, 0, 1, executions_delta)) per_gets_gb,
to_char(b.begin_interval_time, 'yyyy-mm-dd hh24:mi:ss') begin_time,
to_char(b.end_interval_time, 'yyyy-mm-dd hh24:mi:ss') end_time,
substr(c.sql_text,1,25) sql_text
from dba_hist_sqlstat a, dba_hist_snapshot b, dba_hist_sqltext c
where a.snap_id = b.snap_id
and a.instance_number=b.instance_number
and a.dbid = c.dbid
and a.sql_id = c.sql_id
and a.parsing_schema_name<>'SYS'
and a.instance_number=1
and b.begin_interval_time>to_date('2021-06-24 13:00:00','yyyy-mm-dd hh24:mi:ss')
and round(elapsed_time_delta /decode(executions_delta, 0, 1, executions_delta) / 1000000,2)>3
and a.module='JDBC Thin Client'
order by 3 desc;

V$SQL+ASH中的TOP SQL:
你要自己改一下sample_time,另外要注意这个脚本别改成访问GV$,不然有些SQL要漏掉

select sq.sql_id,
sq.child_number,
sq.executions,
round(elapsed_time / decode(sq.executions, 0, 1, sq.executions) /1000000,2) per_time_s,
round(buffer_gets / decode(sq.executions, 0, 1, sq.executions) * 8 / 1024/1024) per_gets_gb,
(select object_name from dba_objects where object_id = program_id) procedure,
(select object_name from dba_objects where object_id = obj_id) package,
round(ROWS_PROCESSED / decode(sq.executions, 0, 1, sq.executions)) rows_per_exe,
to_char(last_active_time,'yyyy-mm-dd hh24:mi:ss') last_active_time
from v$sql sq,
(select *
from (select sql_id,
sql_child_number,
coalesce(plsql_object_id, plsql_entry_object_id) obj_id,
count(*) cnt
from v$active_session_history ash
where sample_time >= to_date('2021-01-01 00:00:00','yyyy-mm-dd hh24:mi:ss')
and sql_id is not null
group by sql_id,
sql_child_number,
coalesce(plsql_object_id, plsql_entry_object_id)
order by 4 desc)
where rownum <=50) ex
where ex.sql_id = sq.sql_id
and ex.sql_child_number = sq.child_number
and sq.last_active_time >= to_date('2021-01-01 00:00:00','yyyy-mm-dd hh24:mi:ss')
--and sq.module = 'JDBC Thin Client'
and round(elapsed_time / decode(sq.executions, 0, 1, sq.executions) / 1000000,2)>3
order by 4 desc;


定制执行计划:
select SQL_ID,CHILD_NUMBER from v$sql_shared_cursor where sql_id='3rqxn4wvgcqtm';

你要自己改一下SQL_ID和child_number
set lines 500 pages 200
col Column for a40
col Filter for a50
col Operation for a30
with a as
(select /*+ materialize */ * from v$sql_plan
where sql_id = '7g79dctzhrb6y' and child_number=0)
select case
when access_predicates is not null or filter_predicates is not null then
'*' || id
else
' ' || id
end as "Id",
lpad(' ', level) || operation || ' ' || options "Operation",
object_name "Name",
(select round(sum(bytes / 1024 / 1024))
from dba_segments
where a.object_name = segment_name
and a.object_owner = owner) MB,
case
when object_type like '%TABLE%' then
regexp_count(a.projection, ']') || '/' ||
(select count(*)
from dba_tab_columns
where owner = a.object_owner
and table_name = a.object_name)
end as "Column",
replace(coalesce(access_predicates,filter_predicates),'"','') "Filter"
from a
start with id = 0
connect by prior id = parent_id;

存储过程
select sq.sql_id,
sq.child_number,
sq.executions,
round(elapsed_time / decode(sq.executions, 0, 1, sq.executions) / 1000000,2) per_time_s,
round(buffer_gets / decode(sq.executions, 0, 1, sq.executions) *
(select value from v$parameter where name = 'db_block_size') / 1024 / 1024) per_gets_mb,
round(rows_processed / decode(sq.executions, 0, 1, sq.executions)) rows_per_exe,
to_char(last_active_time, 'yyyy-mm-dd hh24:mi:ss') last_active_time
from v$sql sq
where round(elapsed_time / decode(sq.executions, 0, 1, sq.executions) / 1000000,2) > 3
and (sql_id, child_number) in
(select sql_id, sql_child_number
from v$active_session_history ash, dba_objects b
where sample_time >=
to_date('2020-12-30 07:00:00', 'yyyy-mm-dd hh24:mi:ss')
and sql_id is not null
and coalesce(plsql_object_id, plsql_entry_object_id) =
b.object_id
and b.object_name = 'P_ASM_ACTION_MOENY')
order by 4 desc;



查看存储过程里执行慢的sql:
修改时间,object_name名字修改为存储过程名字
with a as (select /*+ materialize */ sql_id, sql_child_number
from v$active_session_history ash, dba_objects b
where sample_time >=
to_date('2020-12-30 00:00:00', 'yyyy-mm-dd hh24:mi:ss')
and sql_id is not null
and coalesce(plsql_object_id, plsql_entry_object_id) =
b.object_id
and b.object_name = 'P_ASM_ADDUSERSCORE')
select sq.sql_id,
sq.child_number,
sq.executions,
round(elapsed_time / decode(sq.executions, 0, 1, sq.executions) / 1000000,2) per_time_s,
round(buffer_gets / decode(sq.executions, 0, 1, sq.executions) *
(select value from v$parameter where name = 'db_block_size') / 1024 / 1024) per_gets_mb,
round(rows_processed / decode(sq.executions, 0, 1, sq.executions)) rows_per_exe,
to_char(last_active_time, 'yyyy-mm-dd hh24:mi:ss') last_active_time
from v$sql sq
where round(elapsed_time / decode(sq.executions, 0, 1, sq.executions) / 1000000,2) > 3
and (sql_id, child_number) in (select sql_id, sql_child_number from a)
order by 4 desc;

查看执行计划是否有变化
select sql_id,PLAN_HASH_VALUE,CHILD_NUMBER,timestamp from v$sql_plan;

查出表适合创建索引,但是列上未创建索引

exec dbms_stats.FLUSH_DATABASE_MONITORING_INFO();

select r.name owner,
o.name table_name,
c.name column_name,
equality_preds, --等值过滤
equijoin_preds, --等值join
nonequijoin_preds,
range_preds, --范围过滤
like_preds,
null_preds,
timestamp
from sys.col_usage$ u,sys.obj$ o,sys.col$ c,sys.user$ r
where o.obj#=u.obj# and c.obj#=u.obj# and c.col#=u.intcol# and r.name='SCOTT' and o.name='TEST';

set lines 500 pages 200
select owner,column_name,num_rows,Cardinality,selectivity,'need index' as notice
from (select b.owner,a.column_name,
b.num_rows, a.num_distinct Cardinality,
round(a.num_distinct / decode(b.num_rows, 0, 1, b.num_rows) * 100, 2) selectivity
from dba_tab_col_statistics a, dba_tables b
where a.owner = b.owner
and a.table_name = b.table_name
and a.owner = upper('SCOTT')
and a.table_name = upper('TEST')) where selectivity>=20
and column_name not in (select column_name from dba_ind_columns where table_owner='SCOTT' and table_name='TEST')
and column_name in (select c.name from sys.col_usage$ u,sys.obj$ o,sys.col$ c,sys.user$ r
where o.obj#=u.obj# and c.obj#=u.obj# and c.col#=u.intcol# and r.name='SCOTT' and o.name='TEST');


!当列未收集直方图统计信息的时候,cbo会认为数据分布是均衡的,这可能会导致走错误的执行计划,所以执行计划里的rows是假的
select a.owner, a.table_name, a.column_name,
b.num_rows, a.num_distinct Cardinality,
round(a.num_distinct / decode(b.num_rows, 0, 1, b.num_rows) * 100, 2) selectivity,a.histogram
from dba_tab_col_statistics a, dba_tables b
where a.owner = b.owner
and a.table_name = b.table_name
and a.owner = upper('DEV_PAS')
and a.table_name = upper('t_contract_master');

exec dbms_stats.gather_table_stats('SCOTT','TEST',estimate_percent =>DBMS_STATS.AUTO_SAMPLE_SIZE,method_opt=>'for all columns size auto',no_invalidate=>FALSE,degree=>12,granularity=>'ALL',cascade=>true);

exec dbms_stats.gather_table_stats('SCOTT','TEST',estimate_percent =>DBMS_STATS.AUTO_SAMPLE_SIZE,method_opt=>'for columns owner size skewonly',no_invalidate=>FALSE,degree=>12,granularity=>'ALL',cascade=>true);

如果有使用绑定变量的sql ,使用该语句查询数据分布是否均衡
select xxx,count(*) from test group by xxx order by 2 desc; --数据分布均衡ok,如果不均衡,需要收集查询列(where后的条件中,选择性小于1)直方图信息

查出必须收集直方图信息的列
set lines 500 pages 200
select a.owner,a.column_name,b.num_rows,a.num_distinct,trunc(num_distinct/num_rows*100,2) selectivity,'need gather histogram' as notice
from dba_tab_col_statistics a, dba_tables b
where a.owner = b.owner
and a.table_name = b.table_name
and a.owner = upper('SCOTT')
and a.table_name='TEST'
and num_distinct/num_rows<0.01
and (a.owner,a.table_name,a.column_name) in (select r.name owner,o.name table_name,c.name column_name from sys.col_usage$ u,sys.obj$ o,sys.col$ c,sys.user$ r
where o.obj#=u.obj# and c.obj#=u.obj# and c.col#=u.intcol# and r.name='SCOTT' and o.name='TEST') and a.histogram='NONE';



通过索引记录的rowid访问表中的数据叫做回表,回表是单块读(一个rowid对应一个数据块),回表次数太多严重影响sql性能,走索引扫描反而不如全表扫描,一定要注意回表次数,特别是回表的物理io次数

select * from test where必有回表,当要查询的列也包含在索引中这时候就不需要回表了。

集群因子用于判断索引回表需要消耗的物理io次数
select blocks from dba_tables where table_name='TEST' and owner='SCOTT';
select count(1) from scott.test;
select owner,index_name,clustering_factor from dba_indexes where owner='SCOTT' and index_name='IDX_ID';
如果集群因子与表块数接近,说明表数据是有序的,索引全扫描和索引范围扫描时候,回表只需读取少量的数据块就能完成
如果集群因子与表行数接近,说明表的数据和索引顺序差异性很大,索引全扫描和索引范围扫描时候,回表会读取更多的数据块。
索引唯一扫描只返回一条数据,索引快速扫描不回表
集群因子太大严重影响索引回表的物理io次数,通过建立合理的组合索引减少回表次数,或者把表缓存到buffer cache。



统计信息:

表统计信息表行数,表块数,表行平均长度 select owner,table_name,num_rows,blocks,avg_row_len from dba_tables where owner='SCOTT' and table_name='TEST';
列统计信息列的基数,列中null值数量,直方图 select column_name,num_distinct,num_nulls,num_buckets,histogram from dba_tab_col_statistics where owner='SCOTT' and table_name='TEST';

set lines 500 pages 200
col owner for a20
col table_name for a20
col COLUMN_NAME for a20
select a.owner, a.table_name, a.column_name,
b.num_rows, a.num_distinct Cardinality,
round(a.num_distinct / decode(b.num_rows, 0, 1, b.num_rows) * 100, 2) selectivity,
a.histogram,
a.num_buckets
from dba_tab_col_statistics a, dba_tables b
where a.owner = b.owner
and a.table_name = b.table_name
and a.owner = upper('SCOTT')
and a.table_name = upper('TEST');


select blevel,leaf_blocks,clustering_factor,status from dba_indexes where owner='SCOTT' and index_name='A';

begin
dbms_stats.gather_table_stats(ownname => 'SCOTT',
tabname =>'TEST',
estimate_percent =>dbms_stats.auto_sample_size,
method_opt =>'for all columns size auto', --repeat(有直方图的列收集直方图信息)
no_invalidate =>false,
degree =>8,
cascade =>true);
end;
/

'for columns xxx size skewonly'

查看表采样百分比
select owner,table_name,num_rows,sample_size,round(sample_size/num_rows*100) estimate_percent
from dba_tab_statistics
where owner='SCOTT' and table_name='TEST';

查看统计信息是否过期(表中百分之十数据量发生变划统计信息就会过期)
select owner,table_name,num_rows,last_analyzed,stale_stats from dba_tab_statistics where (last_analyzed is null or stale_stats='YES') and owner=upper('&owner') and table_name=upper('&tabname');

select table_owner,table_name,inserts,UPDATES,deletes,timestamp from sys.dba_tab_modifications where table_name='TEST';


select owner,table_name,object_type,stale_stats,last_analyzed from dba_tab_statistics where (owner,table_name) in
(select object_owner,object_name from plan_table where object_type like '%TABLE%'
union
select table_owner,table_name from dba_indexes where
(owner,index_name) in (select object_owner,object_name from plan_table where object_type like '%INDEX%'));

select * from dba_tab_modifications where (table_owner,table_name) in (select object_owner,object_name from plan_table where object_type like '%TABLE%'
union
select table_owner,table_name from dba_indexes where
(owner,index_name) in (select object_owner,object_name from plan_table where object_type like '%INDEX%'));

启用动态采样
alter session set optimizer_dynamic_sampling=4; 1-12个级别where 后使用函数运算或like或非等值过滤
/*+ dynamic_sampling(3) */ --表收集过统计信息但是无法准确估算返回行数,随机扫描64个数据块采样

扩展统计信息(只能用于等值查询)
select dbms_stats.create_extended_stats('SCOTT','T','(A,B)') from dual;
method_opt=>'for columns SYS_STUNA$6DVXJXTP05EH56DTIR0X size skewonly'

自动收集某个用户下表的统计信息(统计信息过期的表,dml频率大的表,为现有的直方图重新分析索引)
set serveroutput on
declare
cursor stale_table is
select owner,segment_name,
case
when segment_size <1 then
100
when segment_size>=1 and segment_size <=5 then
50
when segment_size>5 then
30
end as percent,
6 as degree
from (select owner,segment_name,sum(bytes/1024/1024/1024) segment_size from dba_segments
where owner='SCOTT' and segment_name in (select table_name from dba_tab_statistics where (last_analyzed is null or stale_stats='YES')
and owner ='SCOTT')
group by owner,segment_name);
begin
dbms_stats.FLUSH_DATABASE_MONITORING_INFO();
for stale in stale_table loop
DBMS_STATS.GATHER_TABLE_STATS(ownname=>stale.owner,
tabname=>stale.segment_name,
estimate_percent=>stale.percent,
method_opt=>'for all columns size repeat',
degree=>stale.degree,
cascade=>true);
end loop;
end;
/


查出全局临时表 --查询比较慢
select b.object_owner,b.object_name,a.temporary,sql_text from dba_tables a,v$sql_plan b,v$sql c where a.owner=b.object_owner and a.temporary='Y' and a.table_name=b.object_name and b.sql_id=c.sql_id;


获取执行计划方法
set autot trace 实际运行不显示结果
set autot on 实际运行显示结果
如果consistent gets远大于sql所有表段大小之和,则存在较大优化空间
rows processed表示查询结果返回多少条数据,如果很大一般走hash,如果很小一般走NL

临时表plan_table

explain plan for

set linesize 500
set pagesize 200
col PLAN_TABLE_OUTPUT for a145
select * from table(dbms_xplan.display(NULL,NULL,'advanced -projection'));

真实的执行计划(v$sql_plan)
alter session set statistics_level=all;

SELECT /*+ gather_plan_statistics */

set linesize 500
set pagesize 200
select * from table(dbms_xplan.display_cursor('','','ALLSTATS LAST'));

buffer表示逻辑读,reads表示物理读

set linesize 200
set pagesize 200
col PLAN_TABLE_OUTPUT for a145
select * from table(dbms_xplan.display_cursor('22wnq6ybkjrmt',NULL,'PEEKED_BINDS'));
SELECT * FROM table(dbms_xplan.display_awr('gx7jf2zugtfk6'));


* table access by index rowid(回表再过滤)


访问路径
table access full(多块读db file scattered reads,并行全表扫描direct path read)

禁用直接路径读
alter system set "_serial_direct_read"=false;

索引范围扫描返回数据很多,需要考虑消除回表或者改用全表扫描

索引范围扫描可以代替索引跳跃扫描
索引快速全扫描可以代替全表扫描

从磁盘一次读一个块到buffer cache叫单块读,一次读多个块叫多块读,一次物理io最多写1M数据(128个块),io次数越少,访问路径越好

表连接方式

嵌套循环:适用于两表关联返回少量数据,驱动表返回一条数据通过连接列传值给被驱动表,驱动表返回多少行,被驱动表就要被扫描多少次,被驱动表必须走索引(且是索引唯一扫描,索引范围扫描)
/*+use_nl(a,b) leading(b) */ --b作为驱动表走a,b嵌套循环,外连接主表固定,无法使用leading,执行计划哪个表离嵌套循环近哪个就是驱动表
两表关联后返回数据量少适合用嵌套循环,数据量多适合用hash

hash连接:两表等值关联,返回大量数据,将较小的表选为驱动表,将驱动表的select列join列读入pga,然后对驱动表连接列进行hash运算生成hash table,驱动表所有数据读入pga,读取被驱动表(不需要读到pga),对被驱动表连接列也进行hash运算,然后去pga探测hash table ,找到数据就关联上,只支持等值连接。
驱动表太大pga放不下就会溢出到临时表空间,hash连接性能就会下降。驱动表被驱动表连接列不需要索引。
/*+swap_join_inputs(a) */更改hash连接驱动表。
使用hash连接尽量避免使用select *,减少驱动表对pga占用,如果驱动表大,可以采取并行查询。

排序合并:处理两表非等值关联,两表关联,先对两表根据连接列排序,小表作为驱动表,从驱动表取出连接列的值,到排好序的被驱动表匹配数据,驱动表返回多少行,被驱动表匹配多少次。(需要将两个表都放入pga排序)

笛卡尔积:两个表没有连接条件,返回结果是两表数据乘积,多表关联时候,两个表没有直接关联条件,但是优化器错误的把某个表返回行数估算为1,这时也可能发生笛卡尔积。
/*+ordered */ --根据from后面的表依次关联
/*+opt_param('_optimizer_mjc_enabled','false') */ --禁止笛卡尔积

标量子查询:一个子查询介于select和from之间,类似于天然的嵌套循环,子查询的表连接列必须有索引,可以等价改写为外连接
select a.name,a.loc,(select max(b.sal) from emp b wehre b.deptno=a.deptno) max_sal from dept a;等价于
select a.name,a.loc,b.max_sal from dept a left join (select max(sal) max_sal,deptno from emp group by deptno) b on a.deptno=b.deptno;
主表连接列是外键,子查询连接列是主键可以改写成内连接,因为外键没有null值。

半连接:两表关联只返回一个表的数据,通常是in和exists,半连接主表是1子表是多的关系时,改写内连接需要group by去重,性能不如半连接

反连接:两表关联只返回主表数据,且只返回主表与子表没关联上的数据,一般指not in not exists。
not in里边存在null值查询结果为null,in里边存在null值,查询不受null影响
反连接可以等价改为外连接

filter:类似嵌套循环,算法与标量子查询类似,驱动表是固定的不可更改。
为什么要消除filter,因为filter驱动表是固定的,一旦驱动表被固定,执行计划也被固定了,如果固定的执行计划存在问题,就会产生性能问题。
/*+no_unnset */ --子查询不展开
使用in可以展开子查询,但是使用exists不能展开子查询


当子查询、视图中存在多个表关联,发生视图合并之后一般会把子查询、视图内部表关联顺序打乱
/*+no_merge */ --禁止视图合并
当存在固化子查询关键字不能发生视图合并(union,union all,start with connect by,rownum,rollup)

谓词推入:sql语句中包含不能合并的视图,同时视图中存在谓词过滤(where过滤条件)CBO会将谓词过滤条件推入视图中,其目的是让oracle尽早的过滤掉无用数据,提升查询性能
执行计划中view前面有*说明谓词没有被推入到视图中
连接列谓词推入(view pushed predicate)如果sql执行很慢可以先关闭谓词推入alter session set "_push_join_predicate"=false
一般不建议视图中带rownum,因为会导致无法谓词推入

调优技巧
使用union代替or

分页查询
select *
from (select *
from (select a.*,rownum rn from (分页sql) a) where
rownum <=10)
where rn>=1;
参与排序的表作为驱动表
分页语句如果出现sort order by ,意味着分页语句没有利用到索引已经排序的特性,需要创建正确的索引,创建索引时优先将等值过滤和排序列放在前面,非等直列放在最后,无等值过滤,排序列放最前面。
如果分页语句中排序的表是分区表,需要看是否有跨分区扫描,跨分区则创建global索引,不跨分区则创建local索引。
如果排序来自多个表,需要两表关联后在进行排序,这样是无法消除sort order by的,所以无法优化,只能走hash,外连接的分页也无法优化(假设驱动表主表是a,排序表是b)
分页语句中不能有distinct,group by,max,min,avg,union,union all这些需要表关联完再分页。性能很差。

分析函数优化自连接
select ename,deptno,sal from emp a where sal=(select max(sal) from emp b where a.deptno=b.deptno);
等价于
select ename,deptno,sal from (select a.*,max(sal) over(partition by deptno) max_sal from emp a) where sal=max_sal;
表越大效果越明显

超大表和超小表关联优化
a表30MB,b表30GB
select * from a,b where a.object_id=b.object_id;
hash连接驱动表放在pga里,小表广播到所有查询进程
/*+parallel(6) use_hash(a,b) pq_distribute(a none,broadcast) */ --执行计划出现PX SEND BROADCAST关键字 --如果是两张大表,不能这么用

大表和大表关联优化
a表5G b表10G
/*+parallel(6) use_hash(a,b) pq_distribute(b hash,hash) */ --执行计划出现PX SEND HASH 超大表情况下pga不够用并行hash也很难跑出结果,可拆分为分区表

dblink优化
/*+ driving_site(a) */ 本地表传送到远端关联
如果a,b表都很大,既不能从远端传到本地也不能从本地传到远端,可以本地创建一个带有dblink的物化视图

对表进行rowid分片(适用于大表delete操作)
select ' and rowid between ' || '''' ||
dbms_rowid.rowid_create(1,
b.data_object_id,
a.relative_fno,
a.block_id,
0) || '''' || ' and ' || '''' ||
dbms_rowid.rowid_create(1,
b.data_object_id,
a.relative_fno,
a.block_id + blocks -1,
999) || ''';'
from dba_extents a,dba_objects b
where a.segment_name=b.object_name
and a.owner=b.owner
and b.object_name='TEST'
and b.owner='SCOTT'
order by a.relative_fno,a.block_id;

and rowid between 'AAASJ8AAHAAAAFYAAA' and 'AAASJ8AAHAAAAFfAPn';
and rowid between 'AAASJ8AAHAAAAFgAAA' and 'AAASJ8AAHAAAAFnAPn';
.....

delete test where object_id>5000000 and rowid between 'AAASJ8AAHAAAAFYAAA' and 'AAASJ8AAHAAAAFfAPn';
delete test where object_id>5000000 and rowid between 'AAASJ8AAHAAAAFgAAA' and 'AAASJ8AAHAAAAFnAPn';
.....

存储过程方法:
create or replace procedure p_rowid(range number,id number) is
cursor cur_rowid is
select dbms_rowid.rowid_create(1,
b.data_object_id,
a.relative_fno,
a.block_id,
0) rowid1,
dbms_rowid.rowid_create(1,
b.data_object_id,
a.relative_fno,
a.block_id + blocks -1,
999) rowid2
from dba_extents a,dba_objects b
where a.segment_name=b.object_name
and a.owner=b.owner
and b.object_name='TEST'
and b.owner='SCOTT'
and mod(a.extent_id, range) = id;
v_sql varchar2(4000);
begin
for cur in cur_rowid loop
v_sql := 'delete test where object_id > 100 and rowid between :1 and :2';
execute immediate v_sql
using cur.rowid1,cur.rowid2;
commit;
end loop;
end;
/

假如将表切成3份则开3窗口执行
begin
p_rowid(3,0);
end;
/

begin
p_rowid(3,1);
end;
/

begin
p_rowid(3,2);
end;
/


with as 生成临时表
with t1 as (select /*+ materialize */ ....) select xxx,xxx from t1;


case when改写外连接or连接条件,从表只能是1关系,且从表要展示多少列就要写几个case when
select e.*,d.deptno d2,d.loc
from emp e left join dept d on d.deptno=e.deptno and (d.deptno>=e.deptno and e.sal<1000 or e.ename like '%O%');
改写如下:
select e.*,
case
when (d.deptno>=e.deptno and e.sal<1000 or e.ename like '%O%') then
d.deptno
end d2,
case
when (d.deptno>=e.deptno and e.sal<1000 or e.ename like '%O%') then
d.loc
end loc
from emp e left join dept d on d.deptno=e.deptno;


如果主从表是1多n关系
select d.*,e.deptno d2,e.ename,e.sal
from dept d left join emp e on d.deptno=e.deptno and (d.deptno>=e.deptno and e.sal<1000 or e.ename like '%O%');
改为如下:
select d.*,e.deptno d2,e.ename,e.sal
from dept d left join (select a.deptno,b.ename,b.sal
from dept a,emp b where a.deptno=b.deptno and (a.deptno>=b.deptno and b.sal<1000 or b.ename like '%O%')) e on d.deptno=e.deptno;



listagg改写wmsys.wm_concat
select a.id,wmsys.wm_concat(distinct(b.name)) groupname
from detail a left join gp b on b.id=a.id group by a.id;
改为如下:
select id,listagg(name,',') within
group (order by null)
from (select a.id,b.name from detail a left join gp b on b.id=a.id
group by a.id,b.name)
group by id;

让优化器知道真实行数hint
/*+ CARDINALITY (a 100000) */
/*+ CARDINALITY (@a 100000) */

sql monitor

set trimspool on
set arraysize 512
set trim on
set pagesize 0
set linesize 1000
set long 1000000
set longchunksize 1000000
spool sqlmon.html
select /*+ noparallel */ dbms_sqltune.report_sql_monitor (sql_id=>'0dubxndyxzmxf', report_level=>'ALL', type=>'ACTIVE') from dual;
spool off



最后修改时间:2022-08-23 12:04:38
「喜欢这篇文章,您的关注和赞赏是给作者最好的鼓励」
关注作者
【版权声明】本文为墨天轮用户原创内容,转载时必须标注文章的来源(墨天轮),文章链接,文章作者等基本信息,否则作者和墨天轮有权追究责任。如果您发现墨天轮中有涉嫌抄袭或者侵权的内容,欢迎发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。

评论