Skip to content

Data Pump Import (impdp) Guide

This guide provides detailed instructions for using Oracle Data Pump Import (impdp) to load data and metadata into Oracle databases efficiently.

Terminal window
impdp [username/password[@connect_identifier]] [parameter=value]...
ParameterDescriptionExample
directoryDirectory object for dump filesdirectory=dp_dir
dumpfileInput dump file name(s)dumpfile=export_%U.dmp
logfileImport log filelogfile=import.log
schemasSchema(s) to importschemas=hr,scott
tablesSpecific tables to importtables=hr.employees
fullFull database importfull=y
remap_schemaSchema remappingremap_schema=hr:hr_test
full_database_import.sh
#!/bin/bash
# Complete database import with parallel processing
export ORACLE_SID=DEV
export DATE=$(date +%Y%m%d_%H%M%S)
impdp system/password \
directory=dp_dir \
dumpfile=full_db_%U.dmp \
logfile=full_db_import_${DATE}.log \
full=y \
parallel=8 \
cluster=n \
table_exists_action=replace \
job_name=FULL_DB_IMPORT_${DATE}
schema_import_remap.sh
#!/bin/bash
# Import schema to different user/tablespace
impdp system/password \
directory=dp_dir \
dumpfile=hr_export.dmp \
logfile=hr_import_test.log \
schemas=hr \
remap_schema=hr:hr_test \
remap_tablespace=users:test_data \
remap_tablespace=hr_idx:test_idx \
transform=segment_attributes:n \
exclude=ref_constraint \
job_name=HR_TEST_IMPORT
table_import_transform.sh
#!/bin/bash
# Import specific tables with transformations
impdp system/password \
directory=dp_dir \
dumpfile=emp_tables.dmp \
logfile=emp_import.log \
tables=hr.employees,hr.departments \
remap_table=hr.employees:hr.employees_backup \
remap_table=hr.departments:hr.departments_backup \
table_exists_action=append \
data_options=skip_constraint_errors \
job_name=EMP_TABLE_IMPORT
partition_import.sh
#!/bin/bash
# Import specific partitions with partition remapping
impdp sales/password \
directory=dp_dir \
dumpfile=sales_2024.dmp \
logfile=sales_import_2024.log \
tables=sales_data:sales_2024_q1 \
partition_options=merge \
table_exists_action=append \
job_name=PARTITION_IMPORT
network_import.sh
#!/bin/bash
# Import directly from source database
impdp system/password@target_db \
network_link=source_db_link \
schemas=hr,finance \
remap_schema=hr:hr_copy \
remap_schema=finance:finance_copy \
parallel=4 \
exclude=statistics \
job_name=NETWORK_IMPORT
metadata_import.sh
#!/bin/bash
# Import database structure without data
impdp system/password \
directory=dp_dir \
dumpfile=metadata_only.dmp \
logfile=metadata_import.log \
full=y \
content=metadata_only \
exclude=tablespace,user \
sqlfile=metadata_preview.sql \
job_name=METADATA_IMPORT
data_only_import.sh
#!/bin/bash
# Import only data, skip metadata
impdp hr/password \
directory=dp_dir \
dumpfile=hr_full.dmp \
logfile=data_only_import.log \
schemas=hr \
content=data_only \
table_exists_action=append \
data_options=skip_constraint_errors \
job_name=DATA_ONLY_IMPORT
storage_transform_import.sh
#!/bin/bash
# Import with storage parameter modifications
impdp system/password \
directory=dp_dir \
dumpfile=prod_export.dmp \
schemas=app_schema \
transform=storage:n \
transform=pctspace:50 \
transform=segment_attributes:n:table \
transform=oid:n:type \
remap_datafile='/u01/prod/':'/u01/dev/' \
job_name=STORAGE_TRANSFORM_IMPORT
object_transform_import.sh
#!/bin/bash
# Import with object-level transformations
impdp system/password \
directory=dp_dir \
dumpfile=app_export.dmp \
schemas=app \
transform=disable_archive_logging:y \
transform=inmemory:n \
transform=lob_storage:securefile \
job_name=OBJECT_TRANSFORM_IMPORT
complex_remap_import.sh
#!/bin/bash
# Import with multiple remapping rules
# Create parameter file for complex mappings
cat > import_remap.par << EOF
directory=dp_dir
dumpfile=multi_schema.dmp
logfile=complex_import.log
remap_schema=hr:hr_dev
remap_schema=finance:fin_dev
remap_schema=sales:sales_dev
remap_tablespace=users:dev_data
remap_tablespace=hr_data:dev_data
remap_tablespace=fin_data:dev_data
remap_datafile='/prod/oradata/':'/dev/oradata/'
exclude=db_link
exclude=directory
transform=oid:n
job_name=COMPLEX_REMAP_IMPORT
EOF
impdp system/password parfile=import_remap.par
selective_import.sh
#!/bin/bash
# Import only specific object types
impdp system/password \
directory=dp_dir \
dumpfile=full_export.dmp \
schemas=app_schema \
include=table:"IN ('CUSTOMERS','ORDERS','PRODUCTS')" \
include=index:"LIKE 'PK_%'" \
include=constraint:"LIKE 'FK_%'" \
exclude=trigger \
exclude=statistics \
job_name=SELECTIVE_IMPORT
optimized_parallel_import.sh
#!/bin/bash
# Configure for maximum import performance
# Disable logging for faster imports
sqlplus -s system/password << EOF
ALTER TABLESPACE users NOLOGGING;
ALTER TABLESPACE idx_tbs NOLOGGING;
EOF
# Run optimized import
impdp system/password \
directory=dp_dir \
dumpfile=large_export_%U.dmp \
full=y \
parallel=16 \
cluster=n \
transform=disable_archive_logging:y \
metrics=y \
logtime=all \
job_name=OPTIMIZED_IMPORT
# Re-enable logging
sqlplus -s system/password << EOF
ALTER TABLESPACE users LOGGING;
ALTER TABLESPACE idx_tbs LOGGING;
EOF
large_table_import.sh
#!/bin/bash
# Import large tables with optimal settings
# Pre-create tablespace with optimal settings
sqlplus -s system/password << EOF
CREATE TABLESPACE large_data
DATAFILE SIZE 100G AUTOEXTEND ON NEXT 10G
EXTENT MANAGEMENT LOCAL UNIFORM SIZE 10M
SEGMENT SPACE MANAGEMENT AUTO;
EOF
# Import with optimizations
impdp system/password \
directory=dp_dir \
dumpfile=large_table_%U.dmp \
tables=big_schema.huge_table \
parallel=16 \
table_exists_action=replace \
data_options=disable_append_hint \
transform=disable_archive_logging:y \
job_name=LARGE_TABLE_IMPORT
-- monitor_import.sql
-- Monitor running Data Pump import jobs
-- Current import jobs
SELECT job_name, operation, job_mode, state,
degree, attached_sessions,
TO_CHAR(start_time, 'DD-MON-YY HH24:MI:SS') start_time
FROM dba_datapump_jobs
WHERE operation = 'IMPORT'
ORDER BY start_time DESC;
-- Detailed progress by object
SELECT owner_object, object_name, object_type,
completed_rows, total_rows,
ROUND(completed_rows/NULLIF(total_rows,0)*100,2) pct_complete
FROM dba_datapump_sessions
WHERE job_name = (SELECT job_name FROM dba_datapump_jobs
WHERE state = 'EXECUTING' AND operation = 'IMPORT');
-- Worker status
SELECT worker_number, process_name, state,
current_object_schema, current_object_name,
completed_objects, total_objects
FROM dba_datapump_sessions
WHERE state = 'EXECUTING';
control_import_job.sh
#!/bin/bash
# Manage running import jobs
# Function to control import job
control_import() {
local action=$1
local job_name=$2
case $action in
status)
expdp system/password attach=$job_name <<EOF
status
exit
EOF
;;
pause)
expdp system/password attach=$job_name <<EOF
stop_job=immediate
exit
EOF
;;
resume)
expdp system/password attach=$job_name <<EOF
start_job
exit
EOF
;;
parallel)
local degree=$3
expdp system/password attach=$job_name <<EOF
parallel=$degree
exit
EOF
;;
esac
}
# Usage examples
# control_import status IMPORT_JOB_001
# control_import pause IMPORT_JOB_001
# control_import resume IMPORT_JOB_001
# control_import parallel IMPORT_JOB_001 8
-- post_import_validation.sql
-- Validate imported data
-- Compare object counts
SELECT owner, object_type, COUNT(*) object_count
FROM dba_objects
WHERE owner IN ('HR_TEST','FIN_TEST')
GROUP BY owner, object_type
ORDER BY owner, object_type;
-- Check table row counts
SELECT table_name, num_rows
FROM dba_tables
WHERE owner = 'HR_TEST'
ORDER BY table_name;
-- Verify constraints
SELECT constraint_name, constraint_type, status
FROM dba_constraints
WHERE owner = 'HR_TEST'
AND status != 'ENABLED';
-- Check invalid objects
SELECT owner, object_name, object_type
FROM dba_objects
WHERE status = 'INVALID'
AND owner IN ('HR_TEST','FIN_TEST');
validate_import_data.sh
#!/bin/bash
# Comprehensive import validation
SCHEMA=$1
LOG_FILE="import_validation_${SCHEMA}.log"
sqlplus -s system/password > $LOG_FILE << EOF
SET PAGESIZE 1000
SET LINESIZE 200
-- Object summary
PROMPT Object Summary for $SCHEMA
SELECT object_type, COUNT(*) count
FROM dba_objects
WHERE owner = UPPER('$SCHEMA')
GROUP BY object_type;
-- Table analysis
PROMPT Table Statistics
SELECT table_name, num_rows, blocks,
last_analyzed
FROM dba_tables
WHERE owner = UPPER('$SCHEMA')
ORDER BY num_rows DESC;
-- Index status
PROMPT Index Status
SELECT index_name, table_name, status
FROM dba_indexes
WHERE owner = UPPER('$SCHEMA')
AND status != 'VALID';
-- Constraint violations
PROMPT Constraint Status
SELECT constraint_name, table_name,
constraint_type, status
FROM dba_constraints
WHERE owner = UPPER('$SCHEMA')
AND status = 'DISABLED';
EOF
echo "Validation complete. Check $LOG_FILE for results."
encrypted_import.sh
#!/bin/bash
# Import encrypted dump files
impdp system/password \
directory=dp_dir \
dumpfile=encrypted_data.dmp \
logfile=encrypted_import.log \
encryption_password=ComplexPass123! \
schemas=sensitive_data \
remap_schema=sensitive_data:test_sensitive \
transform=oid:n \
job_name=ENCRYPTED_IMPORT
masked_import.sh
#!/bin/bash
# Import with data masking for sensitive columns
impdp system/password \
directory=dp_dir \
dumpfile=prod_data.dmp \
schemas=hr \
remap_schema=hr:hr_test \
remap_data=hr.employees.salary:hr_pkg.mask_salary \
remap_data=hr.employees.ssn:hr_pkg.mask_ssn \
remap_data=hr.employees.email:hr_pkg.mask_email \
job_name=MASKED_IMPORT
automated_schema_refresh.sh
#!/bin/bash
# Automated test environment refresh
# Configuration
SOURCE_DUMP=$1
TARGET_SCHEMA=$2
TARGET_ENV=$3
LOG_DIR="/u01/logs"
# Validate parameters
if [ -z "$SOURCE_DUMP" ] || [ -z "$TARGET_SCHEMA" ] || [ -z "$TARGET_ENV" ]; then
echo "Usage: $0 <source_dump> <target_schema> <target_env>"
exit 1
fi
# Set timestamp
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
LOG_FILE="${LOG_DIR}/refresh_${TARGET_SCHEMA}_${TIMESTAMP}.log"
# Start logging
exec 1> >(tee -a "$LOG_FILE")
exec 2>&1
echo "Starting schema refresh at $(date)"
echo "Source: $SOURCE_DUMP"
echo "Target: ${TARGET_SCHEMA} in ${TARGET_ENV}"
# Drop existing schema
sqlplus -s system/password@${TARGET_ENV} << EOF
DROP USER ${TARGET_SCHEMA} CASCADE;
CREATE USER ${TARGET_SCHEMA} IDENTIFIED BY temp_password
DEFAULT TABLESPACE users
QUOTA UNLIMITED ON users;
GRANT connect, resource TO ${TARGET_SCHEMA};
EOF
# Import schema
impdp system/password@${TARGET_ENV} \
directory=dp_dir \
dumpfile=$SOURCE_DUMP \
schemas=prod_schema \
remap_schema=prod_schema:${TARGET_SCHEMA} \
remap_tablespace=prod_data:users \
exclude=db_link \
exclude=directory \
transform=oid:n \
parallel=4 \
job_name=REFRESH_${TARGET_SCHEMA}_${TIMESTAMP}
# Validate import
if [ $? -eq 0 ]; then
echo "Import completed successfully"
# Run post-import tasks
sqlplus -s system/password@${TARGET_ENV} << EOF
-- Compile invalid objects
EXEC DBMS_UTILITY.compile_schema(schema => '${TARGET_SCHEMA}');
-- Gather statistics
EXEC DBMS_STATS.gather_schema_stats('${TARGET_SCHEMA}');
EOF
# Send success notification
echo "Schema refresh completed successfully" | \
mail -s "Refresh Success - ${TARGET_SCHEMA}" $EMAIL
else
echo "Import failed with error"
tail -100 $LOG_FILE | \
mail -s "Refresh Failed - ${TARGET_SCHEMA}" $EMAIL
exit 1
fi
echo "Schema refresh completed at $(date)"
disaster_recovery_import.sh
#!/bin/bash
# Emergency database recovery from backup
# Configuration
BACKUP_DIR="/backup/datapump"
RECOVERY_DATE=$1
if [ -z "$RECOVERY_DATE" ]; then
echo "Usage: $0 <YYYYMMDD>"
exit 1
fi
# Find appropriate backup
BACKUP_FILE=$(ls -t ${BACKUP_DIR}/full_db_${RECOVERY_DATE}_*.dmp | head -1)
if [ -z "$BACKUP_FILE" ]; then
echo "ERROR: No backup found for date $RECOVERY_DATE"
exit 1
fi
echo "Starting disaster recovery from $BACKUP_FILE"
# Prepare database for recovery
sqlplus -s / as sysdba << EOF
-- Restrict database access
ALTER SYSTEM ENABLE RESTRICTED SESSION;
-- Clear existing data (careful!)
-- This is a simplified example - real DR would be more complex
EOF
# Run full import
impdp system/password \
directory=dp_dir \
dumpfile=$(basename $BACKUP_FILE) \
full=y \
parallel=16 \
table_exists_action=replace \
exclude=schema:"IN ('SYS','SYSTEM')" \
logfile=disaster_recovery_${RECOVERY_DATE}.log \
job_name=DISASTER_RECOVERY_${RECOVERY_DATE}
# Post-recovery tasks
sqlplus -s / as sysdba << EOF
-- Compile all schemas
EXEC UTL_RECOMP.recomp_parallel(16);
-- Update statistics
EXEC DBMS_STATS.gather_database_stats(degree=>16);
-- Enable normal access
ALTER SYSTEM DISABLE RESTRICTED SESSION;
EOF
echo "Disaster recovery completed"
-- pre_import_checks.sql
-- Essential checks before import
-- Check tablespace space
SELECT tablespace_name,
ROUND(SUM(bytes)/1024/1024/1024) allocated_gb,
ROUND(SUM(DECODE(autoextensible,'YES',maxbytes,bytes))/1024/1024/1024) max_gb
FROM dba_data_files
GROUP BY tablespace_name
ORDER BY tablespace_name;
-- Check directory object
SELECT directory_name, directory_path
FROM dba_directories
WHERE directory_name = 'DP_DIR';
-- Verify dump file exists
!ls -la /u01/exports/*.dmp
-- Check for running jobs
SELECT job_name, state
FROM dba_datapump_jobs
WHERE state != 'NOT RUNNING';
import_best_practices.sh
#!/bin/bash
# Template for production imports
# Enable error handling
set -e
set -o pipefail
# Configuration
DUMP_FILE=$1
TARGET_SCHEMA=$2
LOG_FILE="import_${TARGET_SCHEMA}_$(date +%Y%m%d_%H%M%S).log"
# Function for cleanup
cleanup() {
echo "Performing cleanup..."
# Add cleanup tasks here
}
# Set trap for cleanup
trap cleanup EXIT
# Validate inputs
if [ -z "$DUMP_FILE" ] || [ -z "$TARGET_SCHEMA" ]; then
echo "Usage: $0 <dump_file> <target_schema>"
exit 1
fi
# Check if dump file exists
if [ ! -f "$DUMP_FILE" ]; then
echo "ERROR: Dump file not found: $DUMP_FILE"
exit 1
fi
# Run import with best practices
impdp system/password \
directory=dp_dir \
dumpfile=$(basename $DUMP_FILE) \
logfile=$LOG_FILE \
schemas=source_schema \
remap_schema=source_schema:$TARGET_SCHEMA \
table_exists_action=replace \
exclude=statistics \
transform=oid:n \
parallel=4 \
metrics=y \
logtime=all \
job_name=IMPORT_${TARGET_SCHEMA}
# Post-import tasks
if [ $? -eq 0 ]; then
echo "Running post-import tasks..."
sqlplus -s system/password << EOF
-- Compile schema
EXEC DBMS_UTILITY.compile_schema('$TARGET_SCHEMA');
-- Gather statistics
EXEC DBMS_STATS.gather_schema_stats('$TARGET_SCHEMA', degree=>4);
EOF
echo "Import completed successfully"
else
echo "Import failed - check log file: $LOG_FILE"
exit 1
fi
ErrorCauseSolution
ORA-31626Job does not existCheck job_name parameter
ORA-31631Privileges requiredGrant IMP_FULL_DATABASE role
ORA-31684Object already existsUse table_exists_action
ORA-39001Invalid argumentCheck parameter syntax
ORA-39126Worker process errorCheck parallel degree
Terminal window
# Enable detailed tracing
impdp system/password \
directory=dp_dir \
dumpfile=test.dmp \
schemas=hr \
trace=480300 \
metrics=y \
logtime=all \
keep_master=y \
job_name=DEBUG_IMPORT

This comprehensive guide provides everything needed for successful Data Pump import operations in production environments.