Import Selected
Tables from Schema Dump and Remap Schema and Tablespace
Source database/tablespace used, e.g.: ORADB1/TBS1
Target database/tablespace used, e.g.: ORADB2/TBS2
CREATE TEST
SETTINGS on source environment:
CREATE
TABLESPACE TBS1
DATAFILE
'/u02/app/oracle/oradata/oradb1/TBS1.dbf' SIZE 50M REUSE AUTOEXTEND ON NEXT 10M
MAXSIZE UNLIMITED EXTENT MANAGEMENT LOCAL
SEGMENT SPACE MANAGEMENT AUTO;
CREATE
TABLESPACE TBS2
DATAFILE
'/u02/app/oracle/oradata/oradb1/TBS2.dbf' SIZE 50M REUSE AUTOEXTEND ON NEXT 10M
MAXSIZE UNLIMITED EXTENT MANAGEMENT LOCAL
SEGMENT SPACE MANAGEMENT AUTO;
drop
user test1 cascade;
drop
user test2 cascade;
grant
connect, resource, dba to TEST1 identified by test;
alter
user TEST1 default tablespace TBS1;
grant
connect, resource, dba to TEST2 identified by test;
CREATE TEST
SETTINGS on target environment:
CREATE
TABLESPACE TBS1
DATAFILE
'/u02/app/oracle/oradata/oradb2/TBS1.dbf' SIZE 50M REUSE AUTOEXTEND ON NEXT 10M
MAXSIZE UNLIMITED EXTENT MANAGEMENT LOCAL
SEGMENT SPACE MANAGEMENT AUTO;
CREATE
TABLESPACE TBS2
DATAFILE
'/u02/app/oracle/oradata/oradb2/TBS2.dbf' SIZE 50M REUSE AUTOEXTEND ON NEXT 10M
MAXSIZE UNLIMITED EXTENT MANAGEMENT LOCAL
SEGMENT SPACE MANAGEMENT AUTO;
drop
user test1 cascade;
drop
user test2 cascade;
grant
connect, resource, dba to TEST1 identified by test;
grant
connect, resource, dba to TEST2 identified by test;
Create directory
(object) on source and target database:
create
or replace directory DTPUMP as '/u02/testcases/';
grant
read, write on directory DTPUMP to PUBLIC;
Insert test data
into the TEST1 schema in the source database:
conn
TEST1/test
drop
table t1;
drop
table t2 ;
drop
table t3 ;
drop
table t4 ;
drop
table t5 ;
drop
table t6 ;
drop
table t7 ;
drop
table t8 ;
drop
table t9 ;
drop
table t10;
create
table t1 (c1 number, c2 number, c3 varchar2(20));
create
table t2 (c1 number, c2 number, c3 varchar2(20));
create
table t3 (c1 number, c2 number, c3 varchar2(20));
create
table t4 (c1 number, c2 number, c3 varchar2(20));
create
table t5 (c1 number, c2 number, c3 varchar2(20));
create
table t6 (c1 number, c2 number, c3 varchar2(20));
create
table t7 (c1 number, c2 number, c3 varchar2(20));
create
table t8 (c1 number, c2 number, c3 varchar2(20));
create
table t9 (c1 number, c2 number, c3 varchar2(20));
create
table t10 (c1 number, c2 number, c3 varchar2(20));
declare
i
number:=0;
begin
while i<10000
loop
insert into t1 values (1,1,'test1');
insert into t2 values (1,1,'test2');
insert into t3 values (1,1,'test3');
insert into t4 values (1,1,'test4');
insert into t5 values (1,1,'test5');
insert into t6 values (1,1,'test6');
insert into t7 values (1,1,'test7');
insert into t8 values (1,1,'test8');
insert into t9 values (1,1,'test9');
insert into t10 values (1,1,'test10');
i:=i+1;
end loop;
end;
/
commit;
Datapump
export/import using EXPDP/IMPDP commands:
expdp
test1/test schemas=TEST1 directory=DTPUMP dumpfile=TEST.dmp
logfile=expdpTEST1.log
impdp
test2/test tables=TEST1.T1,TEST1.T2 directory=DTPUMP REMAP_SCHEMA=TEST1:TEST2
dumpfile=TEST.dmp logfile=impdpTEST2.log
Datapump
export/import using DATAPUMP API - DATAPUMP EXPORT
set
serveroutput ON
DECLARE
h1 NUMBER;
BEGIN
dbms_output.Put_line('datapump open');
h1 := dbms_datapump.OPEN(operation =>
'EXPORT', job_mode => 'SCHEMA',
remote_link => NULL,
job_name => 'TEST1_EXPORT',
version => 'LATEST'
);
dbms_output.Put_line('add dump file');
dbms_datapump.Add_file(handle => h1,
filename => 'test_%U.dmp',
directory => 'DTPUMP');
dbms_output.Put_line('add parallelism');
dbms_datapump.Set_parallel(handle => h1,
degree => 2);
dbms_output.Put_line('add log file');
dbms_datapump.Add_file(handle => h1,
filename => 'test_exp.log',
directory => 'DTPUMP', filetype =>
dbms_datapump.ku$_file_type_log_file);
dbms_output.Put_line('before filter');
dbms_datapump.Metadata_filter(handle =>
h1, name => 'SCHEMA_LIST', value =>
'''TEST1''');
dbms_output.Put_line('start job');
dbms_datapump.Start_job(h1);
dbms_output.Put_line('detach job');
dbms_datapump.Detach(h1);
END;
/
Datapump
export/import using DATAPUMP API - DATAPUMP IMPORT
set
serveroutput ON
DECLARE
h1 NUMBER;
operation VARCHAR2(4000) := 'IMPORT';
table_exists_action VARCHAR2(4000) := 'SKIP';
job_parallel NUMBER := 2;
remap_schema_old_value VARCHAR2(4000) := 'TEST1';
-- the schema that has to be remapped
remap_schema_new_value VARCHAR2(4000) := 'TEST2'; -- the target
schema
remap_tablespace_old_value VARCHAR2(4000)
:= 'TBS1';
-- the tablespace that has to be remapped
remap_tablespace_new_value VARCHAR2(4000)
:= 'TBS2'; -- target tablespace
BEGIN
h1 := dbms_datapump.OPEN(operation =>
'IMPORT', job_mode => 'TABLE',
job_name =>
'TABLE_IMPORT3');
dbms_datapump.Add_file(handle => h1,
filename => 'test_%U.dmp',
directory => 'DTPUMP', filetype =>
dbms_datapump.ku$_file_type_dump_file);
dbms_datapump.Add_file(handle => h1,
filename => 'test_imp.log',
directory => 'DTPUMP', filetype =>
dbms_datapump.ku$_file_type_log_file);
dbms_output.Put_line ('1. remap USR...');
IF ( operation = 'IMPORT' )
AND ( remap_schema_old_value IS NOT NULL
AND remap_schema_new_value IS NOT
NULL ) THEN
dbms_output.Put_line ('usr');
dbms_datapump.Metadata_remap(handle =>
h1, name => 'REMAP_SCHEMA',
old_value => remap_schema_old_value,
value => remap_schema_new_value);
END IF;
dbms_datapump.Metadata_filter(handle =>
h1, name => 'SCHEMA_EXPR', value =>
'IN (''TEST1'')');
dbms_datapump.Metadata_filter(handle =>
h1, name => 'NAME_EXPR', value =>
'IN (''T1'', ''T2'')', object_type =>
'TABLE');
dbms_output.Put_line ('2. remap TBS...');
IF ( operation = 'IMPORT' )
AND ( remap_tablespace_old_value IS NOT
NULL
AND remap_tablespace_new_value IS
NOT NULL ) THEN
dbms_output.Put_line ('tbs');
dbms_datapump.Metadata_remap(handle =>
h1, name => 'REMAP_TABLESPACE',
old_value =>
remap_tablespace_old_value, value =>
remap_tablespace_new_value);
END IF;
IF operation = 'IMPORT' THEN
dbms_datapump.Set_parameter(handle =>
h1, name => 'TABLE_EXISTS_ACTION',
value
=> table_exists_action);
END IF;
IF job_parallel IS NOT NULL THEN
dbms_datapump.Set_parallel(h1,
job_parallel);
END IF;
-- start the DATAPUMP job ...
dbms_datapump.Start_job(h1);
dbms_datapump.Detach(h1);
END;
/
Verify the
movement of tables using below query:
col
owner for a12
col
segment_name for a20
col
tablespace_name for a20
set
lines 200 pages 100
select
owner,segment_name,tablespace_name from dba_segments where owner in
('TEST1','TEST2') order by segment_name;
No comments:
Post a Comment
Leave a Reply...