标签:dba values tween purpose sum bee hat useful round
select
trunc(completion_time) as "Date"
,count(*) as "Count"
,((sum(blocks * block_size)) /1024 /1024) as "MB"
from v$archived_log
group by trunc(completion_time);
SELECT TRUNC (COMPLETION_TIME, ‘HH‘) ARCHIVED_DATE_HOUR,
ROUND (SUM (BLOCKS * BLOCK_SIZE) / 1024 / 1024, 0) SIZE_IN_MB
FROM V$ARCHIVED_LOG
GROUP BY TRUNC (COMPLETION_TIME, ‘HH‘)
ORDER BY 1;
col program for a10
col username for a10
select to_char(sysdate,‘hh24:mi‘), username, program , a.sid, a.serial#, b.name, c.value
from v$session a, v$statname b, v$sesstat c
where b.STATISTIC# =c.STATISTIC#
and c.sid=a.sid and b.name like ‘redo%‘
order by value;
select * from dba_tab_modifications
where table_owner <> ‘SYS‘
order by updates desc;
I have googled for you and found a couple of good queries and metalink docs to drill down this issue.
select s.username, s.osuser, s.status,s.sql_id, sr.* from
(select sid, round(value/1024/1024) as "RedoSize(MB)"
from v$statname sn, v$sesstat ss
where sn.name = ‘redo size‘
and ss.statistic# = sn.statistic#
order by value desc) sr,
v$session s
where sr.sid = s.sid
and rownum <= 10;
------------------------------------
select s.sid, n.name, s.value, sn.username, sn.program, sn.type, sn.module
from v$sesstat s
join v$statname n on n.statistic# = s.statistic#
join v$session sn on sn.sid = s.sid
where name like ‘%redo entries%‘
order by value desc;
SQL: How to Find Sessions Generating Lots of Redo or Archive logs (Doc ID 167492.1)
Master Note: Troubleshooting Redo Logs and Archiving(Doc ID 1507157.1]
Troubleshooting High Redo Generation Issues(Doc ID 782935.1)
Archivelog generation on a daily basis:
set pages 1000
select trunc(COMPLETION_TIME,‘DD‘) Day, thread#, round(sum(BLOCKS*BLOCK_SIZE)/1048576) MB,count(*) Archives_Generated from v$archived_log
group by trunc(COMPLETION_TIME,‘DD‘),thread# order by 1;
Archive log generation on an hourly basis:
set pages 1000
select trunc(COMPLETION_TIME,‘HH‘) Hour,thread# , round(sum(BLOCKS*BLOCK_SIZE)/1048576) MB,count(*) Archives from v$archived_log
group by trunc(COMPLETION_TIME,‘HH‘),thread# order by 1 ;
the following script is useful to find the archivelog switches on an hourly basis that happened in the past one week:
SELECT to_date(first_time) DAY,
to_char(sum(decode(to_char(first_time,‘HH24‘),‘00‘,1,0)),‘99‘) "00",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘01‘,1,0)),‘99‘) "01",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘02‘,1,0)),‘99‘) "02",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘03‘,1,0)),‘99‘) "03",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘04‘,1,0)),‘99‘) "04",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘05‘,1,0)),‘99‘) "05",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘06‘,1,0)),‘99‘) "06",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘07‘,1,0)),‘99‘) "07",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘08‘,1,0)),‘99‘) "08",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘09‘,1,0)),‘99‘) "09",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘10‘,1,0)),‘99‘) "10",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘11‘,1,0)),‘99‘) "11",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘12‘,1,0)),‘99‘) "12",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘13‘,1,0)),‘99‘) "13",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘14‘,1,0)),‘99‘) "14",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘15‘,1,0)),‘99‘) "15",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘16‘,1,0)),‘99‘) "16",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘17‘,1,0)),‘99‘) "17",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘18‘,1,0)),‘99‘) "18",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘19‘,1,0)),‘99‘) "19",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘20‘,1,0)),‘99‘) "20",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘21‘,1,0)),‘99‘) "21",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘22‘,1,0)),‘99‘) "22",
to_char(sum(decode(to_char(first_time,‘HH24‘),‘23‘,1,0)),‘99‘) "23"
from
v$log_history
where to_date(first_time) > sysdate -8
GROUP by
to_char(first_time,‘YYYY-MON-DD‘), to_date(first_time)
order by to_date(first_time)
/
##################################################################################
> Check which session is generating more redo.
rem -----------------------------------------------------------------------
rem Purpose: Transaction which generating more redo
rem This view contains the column BLOCK_CHANGES which indicates how much blocks have been changed
rem by the session. High values indicate a session generating lots of redo.
rem Run the query multiple times and examine the delta between each occurrence
rem of BLOCK_CHANGES. Large deltas indicate high redo generation by the session.
rem -----------------------------------------------------------------------
set pages 1000
set lines 140
SELECT s.sid, s.serial#, s.username, s.program,
i.block_changes
FROM v$session s, v$sess_io i
WHERE s.sid = i.sid
ORDER BY 5 desc, 1, 2, 3, 4;
#############################Size of teh archives per each day wise ############
select decode(grouping (trunc(COMPLETION_TIME)),1,‘TOTAL‘,TRUNC(COMPLETION_TIME)) TIME, SUM(BLOCKS * BLOCK_SIZE)/1024/1024/1024 SIZE_MB from V$ARCHIVED_LOG group by cube (trunc (COMPLETION_TIME)) order by 1
/
As per my understanding if the log switches per hour 60 is not better practice and i would recommend to increase the size of the redo log as you said from 1G to 5G.
标签:dba values tween purpose sum bee hat useful round
原文地址:https://www.cnblogs.com/yaoyangding/p/14609126.html