purpors: exp pro data to uat data
######for one owner table
1. step 1 colelct dmp file
ACCEPT v_owner PROMPT 'please input v_owner:'
define v_owner;
pause
ACCEPT v_table PROMPT 'please input v_table:'
define v_table;
REM srdc_rman_backup_output.sql - collect RMAN information with backup.
define SRDCNAME='EXPDP_BACKUP'
SET MARKUP HTML ON PREFORMAT ON
set TERMOUT off FEEDBACK off VERIFY off TRIMSPOOL on HEADING off
COLUMN SRDCSPOOLNAME NOPRINT NEW_VALUE SRDCSPOOLNAME
select 'SRDC_'||upper('&&SRDCNAME')||'_'||upper(instance_name)||'_'||
to_char(sysdate,'YYYYMMDD_HH24MISS') SRDCSPOOLNAME from v$instance;
set TERMOUT on MARKUP html preformat on
REM
spool &&SRDCSPOOLNAME..htm
select '+----------------------------------------------------+' from dual
union all
select '| Diagnostic-Name: '||'&&SRDCNAME' from dual
select '| Timestamp: '||to_char(systimestamp,'YYYY-MM-DD HH24:MI:SS TZH:TZM') from dual
select '| Machine: '||host_name from v$instance
select '| Version: '||version from v$instance
select '| DBName: '||name from v$database
select '| Instance: '||instance_name from v$instance
/
set pagesize 50000;
set echo on;
set feedback on;
select directory_name,directory_path from dba_directories;
select * from dba_registry_history;
select count(*) from dba_objects where owner IN
(select username from dba_users where account_status='OPEN' and username not in('SYS','SYSTEM','OPER','BACKUPDB','C##OPER','PDBADMIN'));
select owner,object_name from dba_objects where owner IN
(select username from dba_users where account_status='OPEN' and username not in ('SYS','SYSTEM','OPER','BACKUPDB','C##OPER','PDBADMIN'))
and statuS='INVALID';
select count(*),object_type,owner from dba_objects where owner IN
(select username from dba_users where account_status='OPEN' and username not in
('SYS','SYSTEM','OPER','BACKUPDB','C##OPER','PDBADMIN'))
group by object_type,owner;
select SUM(bytes)/1024/1024 "M" from dba_segments where owner like '%DATA%';
SELECT * FROM v$nls_parameters WHERE PARAMETER='NLS_CHARACTERSET';
SELECT '&v_owner' from dual;
SELECT '&v_table' from dual;
select SUM(bytes)/1024/1024 "M" from dba_segments where owner=upper('&v_owner') and segment_name=upper('&v_table');
select count(*) from &v_owner..&v_table;
spool off
set markup html off preformat off
###### for all owner
define SRDCNAME='DB_BACKUP'
select '+-------------dump info--------------------------+' from dual;
select '+-------------tablespace info--------------------------+' from dual;
SELECT total.tablespace_name,
Round(total.MB, 2) AS Total_MB,
Round(total.MB - free.MB, 2) AS Used_MB,
Round(( 1 - free.MB / total.MB ) * 100, 2)
|| '%' AS Used_Pct
FROM (SELECT tablespace_name,
Sum(bytes) / 1024 / 1024 AS MB
FROM dba_free_space
GROUP BY tablespace_name) free,
(SELECT tablespace_name,
FROM dba_data_files
GROUP BY tablespace_name) total
WHERE free.tablespace_name = total.tablespace_name;
select file_id,file_name,tablespace_name,autoextensible from dba_data_files;
########
1.exp and imp will not create user (all)
2.expdp and impdp will create user (default)
(impdp 做的時候,如果做之前沒有顯性删除使用者(drop user),則impdp 不會自動建立user,那麼也不需要重置密碼 )
3.expdp會導出DDL,并且導出表相關的LOBSEGMENT、LOBINDEX和正常的索引。
impdp會使用DDL建立表,并且建立表上相關的索引。
for 12c use @ to access pdb
1.from table to table
-> IN PROD TS
exp TS/TEST_Data130@TSprod file=D:\projects\TS\TS_TS_20160908_update.dmp log=D:\projects\TS\eais_all_exp_2016_09_02.log TABLES=(owner.TS_record_his,owner.TS_record)
->IN UAT (表導入前存在的 )
imp TS/gatepassuat_data2@TSuat file=D:\projects\TS\TS_TS_20160908_update.dmp log=D:\projects\TS\TS_upd_imp_2016_09_05.log fromuser=TS touser=TS tables=(TS_record_his,TS_record)
->IN UAT (表導入前是不存在的 )
imp TS/gatepassuat_data2@TSuat file=D:\projects\TS\TS_TS_20160908_update.dmp log=D:\projects\TS\TS_upd_imp_2016_09_05.log fromuser=TS touser=TS
expdp/impdp
-> IN PROD user:TS
expdp system/oracle123@TSprod file=TS_TS_20160908_update.dmp directory=TMP_DIR log=TS_all_exp_2016_09_02.log tables=('TS.TS_record_his','TS.TS_record')
->IN UAT for 12c use @ to access pdb
impdp system/oracle123_@TSuat directory=DUMP_DIR dumpfile=TS_TS_20160908_update.dmp log=TS_all_imp_2016_09_02.log table_exists_action=replace remap_schema=TS:TS
--tables=('TS_record_his','TS_record') table_exists_action=replace remap_schema=TS:TS;
--tables=TS.TS_record_his,TS.TS_record remap_schema=TS:TS;
-> in prod SQL> select * from dba_directories;
OWNER DIRECTORY_NAME ------------------------------ ------------------------------ DIRECTORY_PATH ------------------------------------------------------------------------ SYS TMP_DIR D:\dba\tmp
SYS XMLDIR D:\app\Administrator\product\11.2.0\dbhome_1\rdbms\xml
SYS ORACLE_OCM_CONFIG_DIR D:\app\Administrator\product\11.2.0\dbhome_1/ccr/state
OWNER DIRECTORY_NAME ------------------------------ ------------------------------ DIRECTORY_PATH ------------------------------------------------------------------------ SYS DATA_PUMP_DIR D:\app\Administrator/admin/TSprod/dpdump/
SYS ORACLE_OCM_CONFIG_DIR2 D:\app\Administrator\product\11.2.0\dbhome_1/ccr/state
-> in UAT SQL> select * from dba_directories;
OWNER DIRECTORY_NAME ------------------------------ ------------------------------ DIRECTORY_PATH -------------------------------------------------------------------------------- SYS DUMP_DIR D:\dba\dump
OWNER DIRECTORY_NAME ------------------------------ ------------------------------ DIRECTORY_PATH -------------------------------------------------------------------------------- SYS DATA_PUMP_DIR D:\app\Administrator/admin/TSuat/dpdump/
2.from schema to schema
-Prepare:
c:\app\oracle\dump: datapump
(select username from dba_users where account_status='OPEN' and username not in ('SYS','SYSTEM','OPER','BACKUPDB'))
-charater
SELECT * FROM v$nls_parameters WHERE PARAMETER='NLS_CHARACTERSET';
set NLS_LANG="AMERICAN_AMERICA.AL32UTF8"
select SUM(bytes)/1024/1024 "M" from dba_segments where owner like '%OTD%';
-rollout
in prod (if do below in pdb , make sure already create datapump manule )
in prod
set NLS_LANG=AMERICAN_AMERICA.AL32UTF8
set system=db
expdp system/oracle123_ file=TS_TS_201702_update.dmp directory=datapump log=TS_all_exp_2017_09_28.log SCHEMAS='%system%usr','%system%patch','%system%query','%system%data' parallel=2
export NLS_LANG=AMERICAN_AMERICA.AL32UTF8
export user=otdb
expdp system/oracle123_ file=TS_TS_201702_update.dmp directory=datapump log=TS_all_exp_2017_09_28.log SCHEMAS=$user'usr',$user'patch',$user'query',$user'data' parallel=2
in uat
1。notice :
如果data 使用者包括sequence ,那麼在目标端,導入之前,需要手工先drop sequnece ,在導入。
腳本如下:
spool drop_sequence.sql
set head off feedback off
--select 'drop SEQUENCE '||SEQUENCE_OWNER||'.'||SEQUENCE_NAME||';' from dba_sequences where ----------SEQUENCE_OWNER in ('FAMSDATA','SMASDATA');
select 'drop SEQUENCE '||SEQUENCE_OWNER||'.'||SEQUENCE_NAME||';' from dba_sequences where SEQUENCE_OWNER like
('%&DATA%') and SEQUENCE_OWNER not in ('ORDDATA');
2。 (if do below in pdb , make sure already create datapump manule )
export user=rpms
impdp system/sys_ file=TS_TS_201702_update.dmp directory=datapump SCHEMAS=$user'usr',$user'patch',$user'query',$user'data' table_exists_action=replace parallel=2 log=impdp.log;
expdp system/oracle123@testuat file=TS_TS_20160928_update.dmp directory=DATA_PUMP_DIR log=TS_all_exp_2016_09_28.log SCHEMAS='etestquery','testrusr','testdata' parallel=2;
impdp tuser/tuser file=TS_TS_20160928_update.dmp directory=DATA_PUMP_DIR SCHEMAS='etestquery','testrusr','testdata' table_exists_action=replace parallel=2 log=TS_all_imp_2016_09_28.log;
for 12c use @ to access pdb
impdp tuser/tuser@pdb file=TS_TS_20160928_update.dmp directory=DATA_PUMP_DIR SCHEMAS='etestquery','testrusr','testdata' table_exists_action=replace parallel=2 log=TS_all_imp_2016_09_28.log;
執行@$ORACLE_HOME/rdbms/admin/utlrp.sql腳本編譯資料庫失效對象。
Run the utlrp.sql script as SYS user.
@%ORACLE_HOME%/rdbms/admin/utlrp.sql
3.ps
使用imp進行資料導入時,若表已經存在,要先drop掉表,再進行導入。
而使用impdp完成資料庫導入時,若表已經存在,有四種的處理方式:
1) skip:預設操作
2) replace:先drop表,然後建立表,最後插入資料
3) append:在原來資料的基礎上增加資料
4) truncate:先truncate,然後再插入資料
notice :
select 'drop SEQUENCE '||SEQUENCE_OWNER||'.'||SEQUENCE_NAME||';' from dba_sequences where SEQUENCE_OWNER in ('FAMSDATA','SMASDATA');
6.
隻導入使用者下對象的中繼資料
impdp bys/bys schemas=bys directory=dir_dp file=bys5.dmp logfile=bys6.log content=metadata_only
隻導入使用者下對象的資料
impdp bys/bys schemas=bys directory=dir_dp file=bys5.dmp logfile=bys7.log content=data_only
導入使用者下所有對象及資料: --不寫content=,預設就是ALL
impdp bys/bys schemas=bys directory=dir_dp file=bys5.dmp logfile=bys8.log
7.for 12c
SQL> select name from v$datafile;
NAME
--------------------------------------------------------------------------------
/centrproddb/data/centrprod/system01.dbf
/centrproddb/data/centrprod/pdbseed/system01.dbf
/centrproddb/data/centrprod/sysaux01.dbf
/centrproddb/data/centrprod/pdbseed/sysaux01.dbf
/centrproddb/data/centrprod/undouser01.dbf
/centrproddb/data/centrprod/users01.dbf
/centrproddb/data/centrprod/penrsprod/system01.dbf
/centrproddb/data/centrprod/penrsprod/sysaux01.dbf
/centrproddb/data/centrprod/penrsprod/penrsprod_users01.dbf
/centrproddb/data/centrprod/pipamprod/system01.dbf
/centrproddb/data/centrprod/pipamprod/sysaux01.dbf
/centrproddb/data/centrprod/penrsprod/enrs_data_f01.dbf
/centrproddb/data/centrprod/penrsprod/enrs_idx_f01.dbf
/centrproddb/data/centrprod/pipamprod/ipam_data_f01.dbf
/centrproddb/data/centrprod/pipamprod/ipam_idx_f01.dbf
/centrproddb/data/centrprod/pipamprod/test.dbf
16 rows selected.
SQL> select file_name from dba_data_files;
FILE_NAME
/centrproddb/data/centrprod/users01.d
--exp 帶有query 條件的查詢 ,使用parfile ,可以防止query 條件查詢 導緻的 unix 指令行無法識别的報錯。
step 1:
vi /tmp/test.par
userid=dbdata/dbyida_01
file=/dbuatdblog/table_2017.dmp
log=/dbuatdblog/exp2017.log
tables=db_CONFIRMED_GATE_MOVEMENT
query="where to_char(LAST_UPD_DATE,'yyyy-mm-dd') >= '2017-06-01'"
step 2:
exp parfile=/tmp/test.par
ref
http://blog.csdn.net/kimsoft/article/details/5720814
###new 無需 expdp ,直接走網絡impdp
drop directory schema_imp;
connect / as sysdba
create directory schema_imp as '/db/db01/db_impdp';
grant read,write on directory schema_expdp to public;
impdp_schemas.par:
userid='/ as sysdba'
directory=schema_imp
dumpfile=expdp_schemas_%U.dmp
logfile=impdp_schemas.log
parallel=6
schemas=BBSP,BBSPODS,BCDPETL,DBMONOPR,DBMGR,OVSEE
content=all
network_link=dblink_migup
--- 監控資料庫impdp 任務的進度,檢視表空間的進度
select * from dba_datapump_jobs;
SYS_IMPORT_SCHEMA_02
####sample 0.1 以下案例展示19c pdb 資料庫如何導入,19c 無法使用impdp 導入 “/ as sysdba" ,必須加入連接配接符号impdp qn/[email protected]:1521/testpdb parfile=11g.parfile
感謝iaoxiao樹0
補充:
1 IMPDP+network_link, 直接導入到目标庫中(從dblink對應的遠端庫)
2 EXPDP+network_link,直接将遠端庫上的資料,導出到本地機器上
一、目标端
dblink
CREATE PUBLIC DATABASE LINK conn_old_11g
CONNECT TO system IDENTIFIED BY "oracle" USING '(DESCRIPTION = (ADDRESS_LIST = (ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.66.101)(PORT = 11521))) (CONNECT_DATA = (SERVER = DEDICATED) (SERVICE_NAME = testdb)))';
directory :
create directory QN as '/home/db/oracle/from11g/QN';
建立使用者并賦權限:
SQL> show pdbs
CON_ID CON_NAME OPEN MODE RESTRICTED
---------- ------------------------------ ---------- ----------
2 PDB$SEED READ ONLY NO
3 TESTPDB READ WRITE NO
4 TESTPDB2 MOUNTED
5 MY_APP MOUNTED
6 SALESPDB2 MOUNTED
8 TESTPDB22 MOUNTED
9 SALESACT MOUNTED
10 SALESPDB MOUNTED
11 MY_APP2 MOUNTED
12 SALESACT$SEED MOUNTED
13 MY_APP3 MOUNTED
SQL> alter session set container=testpdb ;
SQL> create user qn identified by qn default tablespace qn ;
User created.
alter user qn quota unlimited on qn;
grant IMP_FULL_DATABASE to qn;
grant read,write on directory SYS.QN to qn ;
impd+network恢複:
恢複的parfile=11g.parfile:
oracle@node1:/home/db/oracle$ cat 11g.parfile
network_link=CONN_OLD_11G
cluster=n
content=metadata_only
schemas=QN,LMIS_HIS,GJIC,IM,GJJS,DC,YSJL,STAT
#job_name=job_qn
directory=QN
logfile=QN.log
logtime=all
parallel=2
#sqlfile=job_qn.sql
table_exists_action=replace
#exclude=statistics
#version=12.1.0.2
remap_tablespace=QN:QN,LMIS:QN,GJIC:QN,IM:QN,GJJS:QN,DC:QN,YSJL:QN,STAT:QN
remap_schema=QN:QN,LMIS:QN,GJIC:QN,IM:QN,GJJS:QN,DC:QN,YSJL:QN,STAT:QN
transform=OID:N
恢複過程:
oracle@node1:/home/db/oracle$ impdp qn/[email protected]:1521/testpdb parfile=11g.parfile
Import: Release 19.0.0.0.0 - Production on Wed Apr 14 15:22:58 2021
Version 19.3.0.0.0
Copyright (c) 1982, 2019, Oracle and/or its affiliates. All rights reserved.
Connected to: Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production
14-APR-21 15:23:06.468: Starting "QN"."SYS_IMPORT_SCHEMA_01": qn/********@192.168.66.144:1521/testpdb parfile=11g.parfile
14-APR-21 15:23:09.521: Processing object type SCHEMA_EXPORT/USER
14-APR-21 15:23:10.065: ORA-31684: Object type USER:"QN" already exists
####sample 1
1.puserdb03 /usr/user/data/dump
SELECT * FROM v$nls_parameters WHERE PARAMETER='NLS_CHARACTERSET';
export NLS_LANG=AMERICAN_AMERICA.ZHS16GBK
directory=datapump
dumpfile=expdp_useropr_log_%U.dmp
logfile=expdp_useropr_log.log
parallel=4
filesize=10240M
schemas=useropr
expdp parfile=/usr/user/data/dump
2.root@pnuserdb01
導入資料前關閉限制和觸發器,不關閉的話:有可能報錯 ,感謝 clwangdong8888
使用資料泵導入資料,導入過程中沒有任何錯誤,導入的記錄數完全正常,但應用就是無法使用,取不到資料,找了很久都沒找到原因,最後,發現是由于序列的問題引起的,發現導出和導入的值不一樣,通過與開發溝通才明白,因為在導入資料時要對一些字段定義做了修改,是以先導入空表,然後再導入資料的,在向表插入資料時會插入序列,而序列是由觸發器來産生的,進而導緻兩邊的序列值不一緻,引起記錄的值有差異,找到原因就好辦了,在導入資料前關閉所有的限制和觸發器,
SQL>set heading off
SQL>select 'alter table '||table_name||' disable constraint '||constraint_name||';' from user_constraints;
SQL>alter table table_name disable constraint constraint_name;
SQL>select 'alter trigger '||trigger_name||' disable;' from user_triggers;
SQL>alter trigger trigger_name disable;
cat impdp_usermove.par
logfile=impdp_usermove_log.log
REMAP_SCHEMA=useropr:usermove
content=all
table_exists_action=truncate
impdp parfile=/usr/nuser/data/dump
--done
hagrp -freeze sg_nuser_db -sys pnuserdb01
-undone
hagrp -unfreeze sg_nuser_db -sys pnuserdb01
########sample
########### 前期資料1.8T (提前3天做)
step 0: old metadata
"export NLS_LANG=AMERICAN_AMERICA.ZHS16GBK
create directory schema_exp as '/db/cps/oradata/dump';
cd /db/cps/app/opcps/dba/exp_table
vi expdp_full_norows.par
directory=schema_exp
dump
file=expdp_full_norows.dmp
logfile=expdp_full_norows.log
full=y
content=metadata_only
"
step 1: old 1.8T data
vi expdp_table_round1.par
dumpfile=expdp__table_round1_%U.dmp
logfile=expdp__table_round1.log
tables=(db.AUP_PAYDETAILFLOW,db.AUP_PAYDETAILFLOW_BAK,db.AUP_PAYDETAILBOOK,db.db_IBPS_TRADEBOK_HIS,db.db_AUPS_MAINTRANSDTL,db.AUP_PAYBATCHBOOK,db.db_IBPS_TRADEBOK)
expdp parfile=expdp_table_round1.par
&
create or replace directory dp_dir as '/home/oracle';
STEP 2: new 1.8T data and index
導入先期需要資料的表 "
chown -R opcps:dba /db/cps/data/dump/*.dmp
create or replace directory schema_imp as '/db/cps/data/dump';
cd /db/cps/data/dump/
vi impdp_user_table_roud1.par
logfile=impdp__table_round1.log
nohup impdp parfile=impdp_user_table_roud1.par &
檢查導出導入日志是否有報錯"
create or replace directory schema_imp as '/db/DD/data/tmp';
####19C https://blog.csdn.net/weixin_33971977/article/details/92144283
impdp dbmgr/'Byscyrj8!user'@DD directory=schema_imp dumpfile=expdp__table_round1_%U.dmp
logfile=impdp.log
#############增量資料 450G 左右 (當天做)
分四批次導出的,總的時間大概是45分鐘
1.step 1 old 已經完成
all_expdp.par是所有業務使用者導出且排除大表的
DDDDD_nopar.par是DDDDD大表中的非分區表的
DDDDD_par.par是DDDDD大表中的分區表的
card_par.par是card大表分區表的
四個批次分别的導出時間,可在log檔案看到
09 April, 2019 18:35:19 --18:51 16 all_expdp.par
09 April, 2019 19:54:18 --20:16 22 DDDDD_nopar.par
09 April, 2019 22:08:11 --22:13 5 DDDDD_par.par
09 April, 2019 22:04:30 --22:05 1 card_par.par
導入時根據導出parfile調整下就可以導入了,導入時記得添加參數CLUSTER=n
[opDD@sDDdb07:/dbatmp/dump]$ pwd
/dbatmp/dump--------dmp所在路徑
[opDD@sDDdb07:/dbatmp/dump]$ ls -rtl *.par
-rw-r--r-- 1 opDD oinstall 21 Apr 09 18:33 stop.par
-rw-r--r-- 1 opDD oinstall 696 Apr 09 18:35 all_expdp.par
-rw-r--r-- 1 opDD oinstall 304 Apr 09 19:53 DDDDD_nopar.par
-rw-r--r-- 1 opDD oinstall 215 Apr 09 22:04 card_par.par
-rw-r--r-- 1 opDD oinstall 2124 Apr 09 22:06 DDDDD_par.par
[opDD@sDDdb07:/dbatmp/dump]$
1.1 all_expdp.par
DUMPFILE=DD_1_%U.dmp
filesize=10G
PARALLEL=8
DIRECTORY=SCHEMA_EXP
LOGFILE=DD_exp.log
JOB_NAME=DD_exp
SCHEMAS=CRDETL,CARD,DBMONOPR,SYMADM,SYMADM_APPL,SYMETL,DDDDD
EXCLUDE=TABLE:"IN ('GL_POST',
'ddd_TRAN_LOG_HIST',
'ddd_TRAN_HIST',
'ddd_BATCH_HIST',
'ddd_CHANNEL_HIST',
'TF_TRAN_HIST',
'ddd_CR_ACCR_HIST',
'ddd_AUDIT_LOG',
'ddd_TRAN_HIST_BAK2012_2014',
'ddd_ACCT_BAL',
'GL_ACCT_BAK',
'NV_MULTI_CONS_HIST',
'ddd_MTH_END',
'ddd_BASE_MTH_END',
'TB_CASH_JOURNAL')"
1.2 DDDDD_nopar.par
DUMPFILE=DDDDD_nopar_%U.dmp
parallel=8
LOGFILE=DDDDD_nopar.log
JOB_NAME=DDDDD_par03
tables=
DDDDD.TB_CASH_JOURNAL
DDDDD.TF_TRAN_HIST
DDDDD.ddd_CR_ACCR_HIST
DDDDD.ddd_ACCT_BAL
DDDDD.NV_MULTI_CONS_HIST
DDDDD.ddd_TRAN_HIST_BAK2012_2014
1.3 vi DDDDD_par.par
DUMPFILE=DDDDD_par_%U.dmp
EXCLUDE=STATISTICS
LOGFILE=DDDDD_par.log
JOB_NAME=DDDDD_par02
TABLES=(
DDDDD.ddd_CHANNEL_HIST:RCH_PART_201903,
DDDDD.ddd_CHANNEL_HIST:RCH_PART_201904,
DDDDD.ddd_MTH_END:RME_PART_201903,
DDDDD.ddd_MTH_END:RME_PART_201904,
DDDDD.GL_ACCT_BAK:GAB_PART_201903,
DDDDD.GL_ACCT_BAK:GAB_PART_201904,
DDDDD.ddd_AUDIT_LOG:RAL_PART_201903,
DDDDD.ddd_AUDIT_LOG:RAL_PART_201904,
DDDDD.GL_POST:KGP_PART_20190301,
DDDDD.GL_POST:KGP_PART_20190302,
DDDDD.GL_POST:KGP_PART_20190303,
DDDDD.GL_POST:KGP_PART_20190304,
DDDDD.GL_POST:KGP_PART_20190305,
DDDDD.GL_POST:KGP_PART_20190306,
DDDDD.GL_POST:KGP_PART_20190307,
DDDDD.GL_POST:KGP_PART_20190308,
DDDDD.GL_POST:KGP_PART_20190309,
DDDDD.GL_POST:KGP_PART_20190310,
DDDDD.GL_POST:KGP_PART_20190311,
DDDDD.GL_POST:KGP_PART_20190312,
DDDDD.GL_POST:KGP_PART_20190313,
DDDDD.GL_POST:KGP_PART_20190314,
DDDDD.GL_POST:KGP_PART_20190315,
DDDDD.GL_POST:KGP_PART_20190316,
DDDDD.GL_POST:KGP_PART_20190317,
DDDDD.GL_POST:KGP_PART_20190318,
DDDDD.GL_POST:KGP_PART_20190319,
DDDDD.GL_POST:KGP_PART_20190320,
DDDDD.GL_POST:KGP_PART_20190321,
DDDDD.GL_POST:KGP_PART_20190322,
DDDDD.GL_POST:KGP_PART_20190323,
DDDDD.GL_POST:KGP_PART_20190324,
DDDDD.GL_POST:KGP_PART_20190325,
DDDDD.GL_POST:KGP_PART_20190326,
DDDDD.GL_POST:KGP_PART_20190327,
DDDDD.GL_POST:KGP_PART_20190328,
DDDDD.GL_POST:KGP_PART_20190329,
DDDDD.GL_POST:KGP_PART_20190330,
DDDDD.GL_POST:KGP_PART_20190331,
DDDDD.GL_POST:KGP_PART_20190401,
DDDDD.GL_POST:KGP_PART_20190402,
DDDDD.GL_POST:KGP_PART_20190403,
DDDDD.GL_POST:KGP_PART_20190404,
DDDDD.GL_POST:KGP_PART_20190405,
DDDDD.GL_POST:KGP_PART_20190406,
DDDDD.GL_POST:KGP_PART_20190407,
DDDDD.GL_POST:KGP_PART_20190408,
DDDDD.GL_POST:KGP_PART_20190409,
DDDDD.ddd_BASE_MTH_END:dddME_PART_201903,
DDDDD.ddd_BASE_MTH_END:dddME_PART_201904,
DDDDD.ddd_TRAN_LOG_HIST:CTLH_PART_201903,
DDDDD.ddd_TRAN_LOG_HIST:CTLH_PART_201904,
DDDDD.ddd_TRAN_HIST:RTH_PART_201903,
DDDDD.ddd_TRAN_HIST:RTH_PART_201904)
1.4 vi card_par.par
userid='card/card1234'
DUMPFILE=card_par_%U.dmp
LOGFILE=card_par.log
JOB_NAME=card_par
ddd_TRAN_LOG_HIST:CTLH_PART_201903,
ddd_TRAN_LOG_HIST:CTLH_PART_201904)
step2. 導入 (并行度加大到20,可以有效加快速度,串行跑)
export NLS_LANG=American_america.AL32UTF8
create or replace directory schema_imp as '/dbatmp/dump';
grant read,write on directory schema_imp to public;
2.1 all_impdp.par ( Apr 10 16:36:55 2019 ~
PARALLEL=20
DIRECTORY=SCHEMA_IMP
LOGFILE=DD_imp.log
JOB_NAME=DD_imp
cluster=n
table_exists_action=replace
nohup impdp parfile=all_impdp.par &
19:12:47 ~ 20:23:59 持續時間:01:11:12
2.2 DDDDD_nopar_imp.par
parallel=20
LOGFILE=DDDDD_nopar_imp.log
JOB_NAME=DDDDD_par03_imp
nohup impdp parfile=DDDDD_nopar_imp.par &
17:09:43 ~ 18:10:54 持續時間:01:01:10
2.3 vi DDDDD_par_imp.par
LOGFILE=DDDDD_par_imp.log
JOB_NAME=DDDDD_par02_imp
nohup impdp parfile=DDDDD_par_imp.par &
nohup impdp parfile=DDDDD_par_imp.par > nohup.out6 &
20:56:28 ~ 21:00:13 持續時間:00:03:45
2.4 vi card_par_imp.par
LOGFILE=card_par_imp.log
JOB_NAME=card_par_imp
nohup impdp parfile=card_par_imp.par &
Apr 10 17:19:51 ~ 17:20:10 2019 elapsed 0 00:00:19
###全局索引失效檢查
select status,owner,table_name,index_name from dba_indexes where table_name in
(
'GL_POST',
'TB_CASH_JOURNAL')
####issue 1 (any data 類型的table引起導入報錯,可以忽略)
card_par_imp.log:ORA-22370: incorrect usage of method Nonexistent Type
DD_imp.log:ORA-22370: incorrect usage of method Nonexistent Type
DDDDD_par_imp.log:ORA-22370: incorrect usage of method Nonexistent Type
ORA-31693: Table data object "DDDDD"."ddd_TRAN_LOG":"CTL_PART3" failed to load/unload and is being skipped due to error:
ORA-29913: error in executing ODCIEXTTABLEFETCH callout
CREATE TABLE "DDDDD"."ddd_TRAN_LOG"
( "SEQ_NO" VARCHAR2(60 CHAR) NOT NULL ENABLE,
"TRAN_DATE" DATE NOT NULL ENABLE,
"SOURCE_TYPE" VARCHAR2(10 CHAR),
"STATUS" VARCHAR2(3 CHAR) NOT NULL ENABLE,
"CHANNEL_DATE" VARCHAR2(8 CHAR),
"MSG_CODE" VARCHAR2(6 CHAR),
"MSG_TYPE" VARCHAR2(6 CHAR),
"TRAN_TYPE" VARCHAR2(6 CHAR),
"BRANCH" VARCHAR2(6 CHAR),
"USER_ID" VARCHAR2(10 CHAR),
"PROGRAM_ID" VARCHAR2(10 CHAR),
"ORG_SYS_ID" VARCHAR2(6 CHAR),
"BUSS_SEQ_NO" VARCHAR2(60 CHAR),
"CONSUMER_ID" VARCHAR2(6 CHAR),
"IN_DATE_TIME" TIMESTAMP (6),
"OUT_DATE_TIME" TIMESTAMP (6),
"RET_CODE" VARCHAR2(10 CHAR),
"RET_MSG" VARCHAR2(512 CHAR),
"LOG_FLAG" VARCHAR2(1 CHAR),
"INPUT_DATA" "SYS"."ANYDATA" ,
"PARTITION_FLAG" NUMBER,
"REFERENCE" VARCHAR2(50 CHAR),
"STACK_MSG" VARCHAR2(2048 CHAR),
"IN_OUT_FLAG" VARCHAR2(3 CHAR),
"HOST_NAME" VARCHAR2(100 CHAR),
"HOST_IP" VARCHAR2(30 CHAR),
"FINANCIAL_TYPE" VARCHAR2(10 CHAR)
#########issue 2 并發度太高,4*20=80 個并發導緻報錯,修改為單個并發度為10,重新發起導數
DDDDD_par_imp.log:ORA-01555: snapshot too old: rollback segment number with name "" too small
DD_imp.log:ORA-01555: snapshot too old: rollback segment number with name "" too small
impdp parfile=DDDDD_par_imp.par
impdp parfile=all_impdp.par
#################附錄
chown -R opcps:dba /bakfs/dump/*.dmp
create or replace directory schema_imp as '/bakfs/dump';
cd /bakfs/dump
vi impdp_schemas.par
schemas=AUTEK,UPPETL,db,DBMONOPR
nohup impdp parfile=impdp_schemas.par &
########sample 32
使用where 語句導入導出符合條件的記錄,還可以導出多張表
Oracle selective export with conditional where clause Oracle Database Tips by Donald Burleson |
Question: I need to create a scaled-down version of my production database for developers. Can I use the export utility (exp and expdp) to restrict the rows that are dumped?
Answer: Yes, you can add a where clause to your export syntax to extract a sub-set of your production rows. In this example, we restrict the export to rows added in the last month:
exp scott/tiger tables=tab1, tab2 query="where mymonth > sysdate - 31"
You can generate syntax for all tables in your schema by building the export syntax with a query against user_tables.
Using the query Parameter to use WHERE Clause During Export
To export part of the data of a specific object, use the query parameter. In the following example, use the query parameter to get only one row from the tbl_two table and exclude table tbl_one:
directory=dir_test
dumpfile=test_tables.dmp
logfile=test_tables.log
exclude=table:"='tbl_one'"
query=test.tbl_two:"where id=1"
[oracle@localhost tmp]$ expdp test/test parfile=/tmp/test.par
. . exported "test"."tbl_two" 4.914 KB 1 rows
To use different restrictions, you can use more than one query parameter in the same parameter file.
炊煙起了;夕陽下了;細雨來了
多調試,互動式程式設計體驗
記錄,獨立思考,對比
感謝轉載作者
修車
國産化
read and connect
匍匐前進,
講故事