EMB11203> cat setup.sql
drop table row_mig_chain_demo;
CREATE TABLE row_mig_chain_demo (
x int PRIMARY KEY,
a CHAR(2000),
b CHAR(2000),
c CHAR(2000),
d CHAR(2000),
e CHAR(2000)
);
EMB11203> cat data.sql
var pk number;
begin
select nvl(max(x),0) into :pk from row_mig_chain_demo;
end;
/
Prompt just in case I want to add more, I just re-execute the script.
INSERT INTO row_mig_chain_demo (x)
select rownum+:pk from dual connect by rownum <= 100;
COMMIT;
Prompt migrate the rows.
UPDATE row_mig_chain_demo SET a = rownum, b = rownum, c = rownum;
commit;
analyze table ROW_MIG_CHAIN_DEMO compute statistics;
SQL> select table_name,num_rows,blocks,CHAIN_CNT
2 from user_tables where table_name='ROW_MIG_CHAIN_DEMO';
TABLE_NAME NUM_ROWS BLOCKS CHAIN_CNT
------------------------------ ---------- ---------- ----------
ROW_MIG_CHAIN_DEMO 100 103 99
set pagesize 200 lines 180
select x,DBMS_ROWID.ROWID_RELATIVE_FNO (rowid) , dbms_rowid.ROWID_BLOCK_NUMBER (rowid)
from row_mig_chain_demo
order by 2,3
X DBMS_ROWID.ROWID_RELATIVE_FNO(ROWID) DBMS_ROWID.ROWID_BLOCK_NUMBER(ROWID)
---------- ------------------------------------ ------------------------------------
1 4 248213
2 4 248213
3 4 248213
...
100 4 248213
Start dump data blocks tsn: 4 file#:4 minblk 248213 maxblk 248215
Block dump from cache:
Dump of buffer cache at level 4 for tsn=4 rdba=17025429
BH (0x6a3884b8) file#: 4 rdba: 0x0103c995 (4/248213) class: 1 ba: 0x6bcc4000
...
===============
tsiz: 0x1f98
hsiz: 0xda
pbl: 0x2b017976ba64
76543210
flag=--------
ntab=1
nrow=100 <-- 100 rows.
...
block_row_dump:
tab 0, row 0, @0x5c1
tl: 6015 fb: --H-FL-- lb: 0x1 cc: 4 <-- Head, First and Last Piece
...
tab 0, row 1, @0x5b8
tl: 9 fb: --H----- lb: 0x1 cc: 0 <-- Only the head
nrid: 0x0103c996.0 <-- Where is the next piece.
tab 0, row 2, @0x5af
tl: 9 fb: --H----- lb: 0x1 cc: 0
nrid: 0x0103c997.0
....
tab 0, row 99, @0x246
tl: 9 fb: --H----- lb: 0x1 cc: 0
nrid: 0x0103ed84.0
end_of_block_dump
Dump of buffer cache at level 4 for tsn=4 rdba=17025430
BH (0x6a30e928) file#: 4 rdba: 0x0103c996 (4/248214) class: 1 ba: 0x6b048000
...
===============
tsiz: 0x1f80
hsiz: 0x14
pbl: 0x2b017976ba7c
76543210
flag=--------
ntab=1
nrow=1 <-- 1 row
...
block_row_dump:
tab 0, row 0, @0x7fb
tl: 6021 fb: ----FL-- lb: 0x2 cc: 4 <-- First and Last Piece
hrid: 0x0103c995.1 <-- Where is the head
EMB11203> cat sdml.sql
update row_mig_chain_demo
set a=rowid||'-'||to_char(sysdate,'yyyymmddhh24miss')
/
EMB11203> cat pdml.sql
alter session enable parallel dml;
alter session force parallel query;
update /*+ parallel */row_mig_chain_demo
set a=rowid||'-'||to_char(sysdate,'yyyymmddhh24miss')
/
EMB11203> cat d.sql
set pages 2000 lines 180
SELECT a.name, b.value value_QC
FROM v$statname a, v$mystat b
WHERE a.statistic# = b.statistic#
AND lower(a.name) = 'table fetch continued row';
SELECT a.name, b.value value_system
FROM v$statname a, v$sysstat b
WHERE a.statistic# = b.statistic#
AND lower(a.name) = 'table fetch continued row';
SQL> @d
NAME VALUE_QC
---------------------------------------------------------------- ----------
table fetch continued row 0
NAME VALUE_SYSTEM
---------------------------------------------------------------- ------------
table fetch continued row 482
SQL> @sdml
100 rows updated.
SQL> @d
NAME VALUE_QC
---------------------------------------------------------------- ----------
table fetch continued row 0
NAME VALUE_SYSTEM
---------------------------------------------------------------- ------------
table fetch continued row 482
SQL> commit;
Commit complete.
SQL> @d
NAME VALUE_QC
---------------------------------------------------------------- ----------
table fetch continued row 0
NAME VALUE_SYSTEM
---------------------------------------------------------------- ------------
table fetch continued row 482
NAME VALUE_QC
---------------------------------------------------------------- ----------
table fetch continued row 0
NAME VALUE_SYSTEM
---------------------------------------------------------------- ------------
table fetch continued row 602
SQL> @pdml
Session altered.
Session altered.
100 rows updated.
SQL> @d
NAME VALUE_QC
---------------------------------------------------------------- ----------
table fetch continued row 0
NAME VALUE_SYSTEM
---------------------------------------------------------------- ------------
table fetch continued row 700
SQL> commit;
Commit complete.
SQL> @d
NAME VALUE_QC
---------------------------------------------------------------- ----------
table fetch continued row 98
NAME VALUE_SYSTEM
---------------------------------------------------------------- ------------
table fetch continued row 700
The execution plan shows the Full Table Scan too so why the Parallel UPDATE did TFCR when the Serial did not ?
The explanation given by development is that some operations , in particular PDMLs, in order to avoid some deadlock scenarios the slaves need to do head piece scan while serial does a row piece scan.
Why do I care if it is a head piece scan vs row piece scan done ?
This is particulary bad in Exadata as customers like to have very wide tables (more than 255 columns which are automatically chained) and like to do PDML.
In Exadata system, the waitevent will be 'cell single block physical read'.
There is hope though, smart scan can cope with some cases of chained rows as long as the head and pieces are within the 1MB boundary of the IO but do not temp your luck and do your best to not produce chained or migrated rows, it is in everyones best interest.