Monday, January 5, 2015

Points to remember in Couchbase

#!/bin/bash
#############################################################################
# Script: noncdb2pdb_single_instance.sh
# Purpose: Enhanced Non-CDB to PDB Conversion for Single Instance
# Version: 4.0 Enhanced Edition
# Features:
#   - Full audit trail of all user inputs and SQL executions
#   - New directory with fresh timestamp on every run/retry
#   - Pre-migration capture of sequences, services, triggers, synonyms, TDE, stats
#   - DBMS_PDB.CHECK_PLUG_COMPATIBILITY validation
#   - Parameter comparison with source and target side-by-side
#   - Post-migration capture and comparison
#   - Comprehensive error detection and reporting
#   - Remediation script generation
#############################################################################

set -o pipefail

# Script identity
SCRIPT_NAME="$(basename "$0")"
SCRIPT_VERSION="4.0"
SCRIPT_START_TIME=$(date +%s)

# Get script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"

# Source all required modules
source "${SCRIPT_DIR}/lib/common_functions.sh"
source "${SCRIPT_DIR}/modules/compatibility_check.sh"
source "${SCRIPT_DIR}/modules/parameter_comparison.sh"
source "${SCRIPT_DIR}/modules/pre_migration_capture.sh"
source "${SCRIPT_DIR}/modules/post_migration_capture.sh"
source "${SCRIPT_DIR}/modules/post_migration_setup.sh"
source "${SCRIPT_DIR}/modules/dba_shortcuts.sh"

#############################################################################
# GLOBAL VARIABLES
#############################################################################

# Base directories
CONVERSION_BASE="${CONVERSION_BASE:-/opt/oracle/conversions}"

# Directory paths (will be set during initialization)
CONVERSION_DIR=""
LOG_DIR=""
SNAPSHOT_DIR=""
SCRIPTS_DIR=""
ARTIFACTS_DIR=""
STATE_DIR=""
PARAMS_DIR=""
REPORTS_DIR=""

# Database configuration
CDB_SID=""
NONCDB_SID=""
PDB_NAME=""
CDB_DIR=""
NONCDB_DIR=""
NEW_LOCATION=""
CONVERSION_METHOD=""

# TDE configuration
IS_TDE_ENABLED=false
TDE_PASSWORD=""
WALLET_LOCATION=""

# Service arrays
declare -A NONCDB_SERVICES
declare -A SERVICE_PROPERTIES

# Execution modes
PROMPT_USER=true
TEST_MODE=false
GENERATE_COMMANDS_ONLY=false
SKIP_BACKUP_CHECK=false
RESUME_MODE=false
AUTO_RETRY=3
RETRY_DELAY=10

# Pre/Post capture directories
PRE_CAPTURE_DIR=""
POST_CAPTURE_DIR=""

# XML descriptor
XML_DESCRIPTOR_FILE=""

#############################################################################
# ARGUMENT PARSING
#############################################################################

parse_arguments() {
    while [[ $# -gt 0 ]]; do
        case "$1" in
            --test)
                TEST_MODE=true
                shift
                ;;
            --auto)
                PROMPT_USER=false
                shift
                ;;
            --resume)
                RESUME_MODE=true
                shift
                ;;
            --generate-commands)
                GENERATE_COMMANDS_ONLY=true
                shift
                ;;
            --skip-backup-check)
                SKIP_BACKUP_CHECK=true
                shift
                ;;
            --debug)
                DEBUG=true
                shift
                ;;
            --cdb)
                CDB_SID="$2"
                shift 2
                ;;
            --noncdb)
                NONCDB_SID="$2"
                shift 2
                ;;
            --pdb-name)
                PDB_NAME="$2"
                shift 2
                ;;
            --method)
                CONVERSION_METHOD="$2"
                shift 2
                ;;
            --help)
                show_help
                exit 0
                ;;
            *)
                echo "Unknown option: $1"
                echo "Use --help for usage information"
                exit 1
                ;;
        esac
    done
}

show_help() {
    cat << EOF
================================================================================
$SCRIPT_NAME - Version $SCRIPT_VERSION
Enhanced Non-CDB to PDB Conversion Tool for Single Instance Oracle Databases
================================================================================

USAGE:
    $SCRIPT_NAME [OPTIONS]

OPTIONS:
    --test                  Run in test mode (no actual changes)
    --auto                  Run without user prompts (automated mode)
    --resume                Resume from last checkpoint
    --generate-commands     Generate commands only for DBA review
    --skip-backup-check     Skip backup validation (not recommended)
    --debug                 Enable debug output
    --cdb SID              Specify target CDB SID
    --noncdb SID           Specify source Non-CDB SID
    --pdb-name NAME        Specify target PDB name
    --method METHOD        Conversion method: COPY, MOVE, or NOCOPY
    --help                 Show this help message

FEATURES:
    - Complete audit trail of all user inputs and SQL executions
    - Fresh directory with new timestamp on every run
    - Pre-migration capture: sequences, services, triggers, synonyms, TDE, stats
    - DBMS_PDB.CHECK_PLUG_COMPATIBILITY validation before conversion
    - Parameter comparison between source and target (side-by-side)
    - Post-migration capture and comparison
    - Comprehensive error detection and reporting
    - Remediation script generation for discrepancies

REQUIREMENTS:
    - Oracle Database 19c or higher
    - SYSDBA privileges
    - Sufficient disk space for backups and file copies

EXAMPLES:
    # Interactive mode with all prompts
    $SCRIPT_NAME

    # Automated mode with specified databases
    $SCRIPT_NAME --auto --cdb CDB19C --noncdb ORCL --pdb-name ORCL_PDB

    # Generate commands only for review
    $SCRIPT_NAME --generate-commands --cdb CDB19C --noncdb ORCL

================================================================================
EOF
}

#############################################################################
# INITIALIZATION
#############################################################################

initialize_conversion() {
    log_section "Initializing Conversion Environment"

    # Validate Oracle environment with user option to continue
    if ! validate_oracle_env; then
        log_warning "Oracle environment validation failed"
        log_info "Current ORACLE_HOME: ${ORACLE_HOME:-not set}"
        log_info "This could cause issues with database operations"

        if [[ "$PROMPT_USER" == "true" ]]; then
            if prompt_user "Do you want to specify ORACLE_HOME and continue?"; then
                prompt_input "Enter ORACLE_HOME path" "/u01/app/oracle/product/19c/dbhome_1" ORACLE_HOME
                export ORACLE_HOME
                export PATH="$ORACLE_HOME/bin:$PATH"

                # Re-validate
                if ! validate_oracle_env; then
                    log_error "Oracle environment still invalid after update"
                    if ! prompt_user "Continue anyway (not recommended)?"; then
                        exit 1
                    fi
                fi
            else
                log_error "Cannot proceed without valid Oracle environment"
                exit 1
            fi
        else
            log_error "Cannot proceed without valid Oracle environment in auto mode"
            exit 1
        fi
    fi

    # Always create a new directory with fresh timestamp (addresses retry issue)
    TIMESTAMP=$(date +%Y%m%d_%H%M%S)

    CONVERSION_DIR=$(create_conversion_directory "$NONCDB_SID" "$CDB_SID" "$CONVERSION_BASE" "true")

    # Set all directory paths
    LOG_DIR="${CONVERSION_DIR}/logs"
    SNAPSHOT_DIR="${CONVERSION_DIR}/snapshots"
    SCRIPTS_DIR="${CONVERSION_DIR}/scripts"
    ARTIFACTS_DIR="${CONVERSION_DIR}/artifacts"
    STATE_DIR="${CONVERSION_DIR}/state"
    PARAMS_DIR="${CONVERSION_DIR}/parameters"
    REPORTS_DIR="${CONVERSION_DIR}/reports"
    PRE_CAPTURE_DIR="${SNAPSHOT_DIR}/before"
    POST_CAPTURE_DIR="${SNAPSHOT_DIR}/after"
    XML_DESCRIPTOR_FILE="${ARTIFACTS_DIR}/noncdb_descriptor.xml"

    # Initialize logging
    init_logging "$LOG_DIR" "conversion"

    # Record all user-provided parameters
    log_user_input "SCRIPT PARAMETERS:"
    log_user_input "  CDB_SID: $CDB_SID"
    log_user_input "  NONCDB_SID: $NONCDB_SID"
    log_user_input "  PDB_NAME: $PDB_NAME"
    log_user_input "  CONVERSION_METHOD: $CONVERSION_METHOD"
    log_user_input "  TEST_MODE: $TEST_MODE"
    log_user_input "  PROMPT_USER: $PROMPT_USER"
    log_user_input "  SKIP_BACKUP_CHECK: $SKIP_BACKUP_CHECK"

    log_info "Conversion ID: $CONVERSION_ID"
    log_info "Working Directory: $CONVERSION_DIR"

    # Create README
    cat > "${CONVERSION_DIR}/README.txt" << EOF
================================================================================
Non-CDB to PDB Conversion
================================================================================
Conversion ID: ${CONVERSION_ID}
Started: $(date)
Source Non-CDB: ${NONCDB_SID}
Target CDB: ${CDB_SID}
Target PDB: ${PDB_NAME}

Directory Structure:
- logs/           : All execution logs including audit trail
- snapshots/      : Before and after database captures
  - before/       : Pre-migration state
  - after/        : Post-migration state
- scripts/        : Generated SQL scripts
- artifacts/      : Conversion artifacts (XML descriptors)
- state/          : Checkpoint and state files
- parameters/     : Parameter comparison files
- reports/        : Comparison and validation reports

Key Log Files:
- logs/conversion_*.log      : Main execution log
- logs/conversion_audit_*.log : Audit trail of all operations
- logs/conversion_sql_*.log   : All SQL executions with output
- logs/conversion_user_input_*.log : All user responses recorded
- logs/conversion_errors_*.log : Error details

================================================================================
EOF

    log_success "Initialization completed"
}

#############################################################################
# DATABASE DISCOVERY
#############################################################################

discover_databases() {
    log_section "Discovering Oracle Databases"

    # Find running databases
    local pmon_list=$(ps -ef | grep pmon | grep -v grep | grep -v ASM | grep -v APX)
    local db_list=()

    while IFS= read -r line; do
        if [[ -n "$line" ]]; then
            local sid=$(echo "$line" | sed 's/.*pmon_//g')
            db_list+=("$sid")
        fi
    done <<< "$pmon_list"

    if [[ ${#db_list[@]} -eq 0 ]]; then
        log_error "No Oracle databases found running"
        log_warning "Make sure Oracle databases are started and ORACLE_HOME is set correctly"
        if ! prompt_user "Do you want to manually specify database SIDs?"; then
            log_error "Cannot proceed without running databases"
            exit 1
        fi
        # Allow manual entry
        prompt_input "Enter target CDB SID" "" CDB_SID
        prompt_input "Enter source Non-CDB SID" "" NONCDB_SID
        if [[ -z "$CDB_SID" ]] || [[ -z "$NONCDB_SID" ]]; then
            log_error "Both CDB and Non-CDB SIDs are required"
            exit 1
        fi
        log_info "Manually specified - Target CDB: $CDB_SID, Source Non-CDB: $NONCDB_SID"
        return 0
    fi

    log_info "Found ${#db_list[@]} running databases: ${db_list[*]}"

    # Classify databases
    local cdbs=()
    local noncdbs=()

    for sid in "${db_list[@]}"; do
        log_debug "Checking database: $sid"
        if is_database_running "$sid"; then
            if is_cdb "$sid"; then
                cdbs+=("$sid")
                log_info "  $sid: Container Database (CDB)"
            else
                noncdbs+=("$sid")
                log_info "  $sid: Non-Container Database"
            fi
        else
            log_warning "  $sid: Process found but database check failed"
        fi
    done

    # Select CDB if not specified - ALWAYS allow user to confirm selection
    if [[ -z "$CDB_SID" ]]; then
        if [[ ${#cdbs[@]} -eq 0 ]]; then
            log_warning "No CDB databases detected automatically"
            log_info "This could be due to:"
            log_info "  - Database not open"
            log_info "  - Permission issues"
            log_info "  - Database detection failure"
            if prompt_user "Do you want to manually specify the CDB SID?"; then
                prompt_input "Enter target CDB SID" "" CDB_SID
                if [[ -z "$CDB_SID" ]]; then
                    log_error "CDB SID is required"
                    exit 1
                fi
            else
                log_error "Cannot proceed without a target CDB"
                exit 1
            fi
        elif [[ ${#cdbs[@]} -eq 1 ]]; then
            # Even with single option, confirm with user (unless auto mode)
            if [[ "$PROMPT_USER" == "true" ]]; then
                prompt_confirm_selection "Confirm target CDB" CDB_SID "${cdbs[0]}"
            else
                CDB_SID="${cdbs[0]}"
                log_info "Auto-selected CDB: $CDB_SID"
            fi
        else
            prompt_select "Select target CDB:" CDB_SID "${cdbs[@]}"
        fi
    fi

    # Select Non-CDB if not specified - ALWAYS allow user to confirm selection
    if [[ -z "$NONCDB_SID" ]]; then
        if [[ ${#noncdbs[@]} -eq 0 ]]; then
            log_warning "No Non-CDB databases detected automatically"
            log_info "This could be due to:"
            log_info "  - Database not open"
            log_info "  - Permission issues"
            log_info "  - All databases are already CDBs"
            if prompt_user "Do you want to manually specify the Non-CDB SID?"; then
                prompt_input "Enter source Non-CDB SID" "" NONCDB_SID
                if [[ -z "$NONCDB_SID" ]]; then
                    log_error "Non-CDB SID is required"
                    exit 1
                fi
            else
                log_error "Cannot proceed without a source Non-CDB"
                exit 1
            fi
        elif [[ ${#noncdbs[@]} -eq 1 ]]; then
            # Even with single option, confirm with user (unless auto mode)
            if [[ "$PROMPT_USER" == "true" ]]; then
                prompt_confirm_selection "Confirm source Non-CDB" NONCDB_SID "${noncdbs[0]}"
            else
                NONCDB_SID="${noncdbs[0]}"
                log_info "Auto-selected Non-CDB: $NONCDB_SID"
            fi
        else
            prompt_select "Select source Non-CDB:" NONCDB_SID "${noncdbs[@]}"
        fi
    fi

    log_info "Target CDB: $CDB_SID"
    log_info "Source Non-CDB: $NONCDB_SID"

    # Get datafile locations with error handling
    export ORACLE_SID="$CDB_SID"
    CDB_DIR=$(execute_sql_query "SELECT SUBSTR(name, 1, INSTR(name, '/', -1)-1) FROM v\$datafile WHERE rownum = 1;" "$CDB_SID" "Get CDB datafile location")
    if [[ -z "$CDB_DIR" ]]; then
        log_warning "Could not determine CDB datafile location automatically"
        prompt_input "Enter CDB datafile directory" "/u01/app/oracle/oradata/$CDB_SID" CDB_DIR
    fi

    export ORACLE_SID="$NONCDB_SID"
    NONCDB_DIR=$(execute_sql_query "SELECT SUBSTR(name, 1, INSTR(name, '/', -1)-1) FROM v\$datafile WHERE rownum = 1;" "$NONCDB_SID" "Get Non-CDB datafile location")
    if [[ -z "$NONCDB_DIR" ]]; then
        log_warning "Could not determine Non-CDB datafile location automatically"
        prompt_input "Enter Non-CDB datafile directory" "/u01/app/oracle/oradata/$NONCDB_SID" NONCDB_DIR
    fi

    log_info "CDB datafile location: $CDB_DIR"
    log_info "Non-CDB datafile location: $NONCDB_DIR"
}

#############################################################################
# CONVERSION PARAMETERS
#############################################################################

get_conversion_parameters() {
    log_section "Gathering Conversion Parameters"

    # PDB Name
    if [[ -z "$PDB_NAME" ]]; then
        prompt_input "Enter PDB name" "${NONCDB_SID}_PDB" PDB_NAME
    fi

    log_user_input "PDB_NAME confirmed: $PDB_NAME"

    # Conversion Method
    if [[ -z "$CONVERSION_METHOD" ]]; then
        prompt_select "Select conversion method:" CONVERSION_METHOD \
            "COPY - Copy datafiles (safest, requires disk space)" \
            "MOVE - Move datafiles (faster, original files moved)" \
            "NOCOPY - Use existing files (fastest, modifies original)"

        # Extract just the method name
        CONVERSION_METHOD=$(echo "$CONVERSION_METHOD" | cut -d' ' -f1)
    fi

    log_user_input "CONVERSION_METHOD confirmed: $CONVERSION_METHOD"

    # New location for COPY/MOVE
    if [[ "$CONVERSION_METHOD" != "NOCOPY" ]]; then
        prompt_input "Enter new location for PDB files" "${CDB_DIR}/${PDB_NAME}" NEW_LOCATION

        if [[ ! -d "$NEW_LOCATION" ]]; then
            log_info "Creating directory: $NEW_LOCATION"
            mkdir -p "$NEW_LOCATION"
        fi
    fi

    log_info "Configuration Summary:"
    log_info "  Source Non-CDB: $NONCDB_SID"
    log_info "  Target CDB: $CDB_SID"
    log_info "  Target PDB: $PDB_NAME"
    log_info "  Conversion Method: $CONVERSION_METHOD"
    log_info "  New Location: ${NEW_LOCATION:-N/A}"
}

#############################################################################
# BACKUP VALIDATION
#############################################################################

validate_backup() {
    log_section "Backup Validation"

    if [[ "$SKIP_BACKUP_CHECK" == "true" ]]; then
        log_warning "Backup validation skipped as requested"
        log_user_input "Backup check skipped by user request"
        return 0
    fi

    export ORACLE_SID="$NONCDB_SID"

    # Check for recent backup
    local last_backup=$(execute_sql_query "
        SELECT TO_CHAR(MAX(completion_time), 'YYYY-MM-DD HH24:MI:SS')
        FROM v\$backup_set
        WHERE backup_type = 'D'
        AND completion_time > SYSDATE - 7;
    " "$NONCDB_SID" "Check backup status")

    # Save RMAN backup list
    log_info "Capturing RMAN backup information..."
    rman target / <<EOF > "${CONVERSION_DIR}/backup_info/rman_backup_list.txt" 2>&1
LIST BACKUP SUMMARY;
EXIT;
EOF

    if [[ -z "$last_backup" ]] || [[ "$last_backup" == *"no rows"* ]]; then
        log_error "No recent database backup found (within last 7 days)"

        if prompt_user "No recent backup found. Do you want to continue WITHOUT a backup? (DANGEROUS)"; then
            log_warning "User proceeding without backup"
            log_user_input "User acknowledged no backup and chose to proceed"
        else
            log_error "Conversion aborted - backup required"
            exit 1
        fi
    else
        log_success "Last backup completed: $last_backup"
    fi

    # Get user acknowledgment
    echo ""
    echo "================================================================================"
    echo "IMPORTANT: BACKUP ACKNOWLEDGMENT REQUIRED"
    echo "================================================================================"
    echo "You are about to convert Non-CDB database '$NONCDB_SID' to a PDB."
    echo "This is a major structural change to your database."
    echo ""
    echo "Last Backup: ${last_backup:-NOT FOUND}"
    echo ""
    echo "Please confirm:"
    echo "1. You have reviewed the backup status"
    echo "2. You understand the risks involved"
    echo "3. You have a recovery plan if something goes wrong"
    echo "================================================================================"

    if ! prompt_user "Do you acknowledge the backup status and want to proceed?"; then
        log_error "User did not acknowledge backup status"
        exit 1
    fi

    log_user_input "User acknowledged backup status and confirmed to proceed"
    log_success "Backup validation completed"
}

#############################################################################
# PRE-MIGRATION PHASE
#############################################################################

run_pre_migration_phase() {
    log_section "Pre-Migration Phase"

    # Step 1: Capture source database state
    log_info "Step 1: Capturing source database state..."
    run_pre_migration_capture "$NONCDB_SID" "$PRE_CAPTURE_DIR"

    # Step 2: Capture and display source parameters
    log_info "Step 2: Capturing and comparing parameters..."
    run_parameter_comparison "$NONCDB_SID" "$CDB_SID" "$PDB_NAME" "$PARAMS_DIR"

    # Ask if user wants to proceed after seeing parameters
    echo ""
    echo "Review the parameter differences above."
    echo "After conversion, you can set PDB-level parameters if needed."
    echo ""

    if ! prompt_user "Do you want to proceed with the conversion?"; then
        log_info "User chose to stop after parameter review"
        exit 0
    fi

    # Step 3: Handle TDE/Wallet
    log_info "Step 3: Checking TDE configuration..."
    handle_tde_configuration

    # Step 4: Capture services for migration
    log_info "Step 4: Capturing services..."
    capture_source_services

    log_success "Pre-migration phase completed"
}

handle_tde_configuration() {
    log_subsection "TDE/Wallet Configuration"

    export ORACLE_SID="$NONCDB_SID"

    # Check for encrypted tablespaces
    local encrypted_ts=$(execute_sql_query "SELECT COUNT(*) FROM dba_tablespaces WHERE encrypted = 'YES';" "$NONCDB_SID" "Check encrypted tablespaces")

    encrypted_ts=$(echo "$encrypted_ts" | tr -d ' ')

    if [[ "$encrypted_ts" -gt 0 ]]; then
        IS_TDE_ENABLED=true
        log_info "TDE is enabled - found $encrypted_ts encrypted tablespaces"

        # Get wallet information
        WALLET_LOCATION=$(execute_sql_query "SELECT wrl_parameter FROM v\$encryption_wallet WHERE rownum = 1;" "$NONCDB_SID" "Get wallet location")
        local wallet_type=$(execute_sql_query "SELECT wallet_type FROM v\$encryption_wallet WHERE rownum = 1;" "$NONCDB_SID" "Get wallet type")

        log_info "Wallet Location: $WALLET_LOCATION"
        log_info "Wallet Type: $wallet_type"

        # Capture TDE info
        capture_tde_info "$NONCDB_SID" "$PRE_CAPTURE_DIR"

        # Get password if needed
        if [[ "$wallet_type" == *"PASSWORD"* ]]; then
            prompt_password "Enter TDE wallet password" TDE_PASSWORD
        fi

        # Verify CDB wallet
        export ORACLE_SID="$CDB_SID"
        local cdb_wallet=$(execute_sql_query "SELECT status FROM v\$encryption_wallet WHERE rownum = 1;" "$CDB_SID" "Check CDB wallet")

        if [[ "$cdb_wallet" != "OPEN" ]]; then
            log_warning "CDB wallet is not open"

            if [[ -n "$TDE_PASSWORD" ]]; then
                log_info "Opening CDB wallet..."
                execute_sql "ADMINISTER KEY MANAGEMENT SET KEYSTORE OPEN IDENTIFIED BY \"$TDE_PASSWORD\";" "$CDB_SID" "Open CDB wallet"
            fi
        fi
    else
        IS_TDE_ENABLED=false
        log_info "TDE is not enabled in source database"
    fi
}

capture_source_services() {
    log_subsection "Capturing Source Services"

    export ORACLE_SID="$NONCDB_SID"

    local services=$(execute_sql_query "
        SELECT name || '|' || network_name
        FROM dba_services
        WHERE name NOT LIKE 'SYS%'
        AND name != UPPER('$NONCDB_SID')
        AND name != LOWER('$NONCDB_SID');
    " "$NONCDB_SID" "Get services")

    unset NONCDB_SERVICES
    declare -gA NONCDB_SERVICES

    if [[ -n "$services" ]]; then
        while IFS='|' read -r svc_name network_name; do
            if [[ -n "$svc_name" ]]; then
                svc_name=$(echo "$svc_name" | tr -d ' ')
                NONCDB_SERVICES["$svc_name"]="$network_name"
                log_info "Found service: $svc_name (Network: $network_name)"
            fi
        done <<< "$services"
    fi

    log_info "Total services found: ${#NONCDB_SERVICES[@]}"
}

#############################################################################
# COMPATIBILITY CHECK PHASE
#############################################################################

run_compatibility_check() {
    log_section "Compatibility Check Phase"

    # First, we need to generate the XML descriptor
    log_info "Preparing Non-CDB for compatibility check..."

    export ORACLE_SID="$NONCDB_SID"

    # Check current state
    local open_mode=$(get_db_open_mode "$NONCDB_SID")
    log_info "Current database mode: $open_mode"

    if [[ "$open_mode" != "READ ONLY" ]]; then
        log_info "Switching database to READ ONLY mode for descriptor generation..."

        if [[ "$TEST_MODE" != "true" ]]; then
            execute_sql "SHUTDOWN IMMEDIATE;" "$NONCDB_SID" "Shutdown Non-CDB"
            execute_sql "STARTUP MOUNT;" "$NONCDB_SID" "Start Non-CDB in mount"
            execute_sql "ALTER DATABASE OPEN READ ONLY;" "$NONCDB_SID" "Open read only"
        fi
    fi

    # Generate XML descriptor
    log_info "Generating PDB descriptor XML..."

    if [[ "$TEST_MODE" != "true" ]]; then
        execute_sql "
            BEGIN
                DBMS_PDB.DESCRIBE(
                    pdb_descr_file => '${XML_DESCRIPTOR_FILE}'
                );
            END;
            /
        " "$NONCDB_SID" "Generate XML descriptor"

        if [[ ! -f "$XML_DESCRIPTOR_FILE" ]]; then
            log_error "Failed to generate XML descriptor"
            exit 1
        fi

        log_success "XML descriptor generated: $XML_DESCRIPTOR_FILE"
    fi

    # Keep Non-CDB in read-only but don't shut down yet for compatibility check
    # Run compatibility check
    log_info "Running compatibility check..."

    if run_full_compatibility_check "$NONCDB_SID" "$CDB_SID" "$XML_DESCRIPTOR_FILE" "$PDB_NAME" "$REPORTS_DIR"; then
        log_success "Compatibility check passed"
    else
        log_warning "Compatibility check found issues"

        echo ""
        echo "The compatibility check found some issues."
        echo "Please review the reports in: $REPORTS_DIR"
        echo ""

        if ! prompt_user "Do you want to proceed with the conversion despite these issues?"; then
            log_info "User chose to stop after compatibility check"
            # Restore database state
            execute_sql "SHUTDOWN IMMEDIATE;" "$NONCDB_SID" "Shutdown"
            execute_sql "STARTUP;" "$NONCDB_SID" "Startup"
            exit 0
        fi

        log_user_input "User chose to proceed despite compatibility issues"
    fi
}

#############################################################################
# CONVERSION PHASE
#############################################################################

run_conversion_phase() {
    log_section "Conversion Phase"

    if [[ "$GENERATE_COMMANDS_ONLY" == "true" ]]; then
        generate_conversion_commands
        log_info "Commands generated. Exiting."
        exit 0
    fi

    # Final confirmation
    echo ""
    echo "================================================================================"
    echo "FINAL CONFIRMATION"
    echo "================================================================================"
    echo "You are about to perform the following conversion:"
    echo ""
    echo "  Source Non-CDB: $NONCDB_SID"
    echo "  Target CDB: $CDB_SID"
    echo "  New PDB: $PDB_NAME"
    echo "  Method: $CONVERSION_METHOD"
    echo ""
    echo "This will:"
    echo "  1. Shut down the Non-CDB database"
    echo "  2. Create a new PDB in the CDB"
    echo "  3. Run noncdb_to_pdb.sql conversion script"
    echo "  4. Migrate services to the new PDB"
    echo "================================================================================"

    if ! prompt_user "Are you sure you want to proceed?"; then
        log_info "User cancelled final confirmation"
        exit 0
    fi

    log_user_input "User confirmed final conversion proceed"

    # Step 1: Ensure Non-CDB is shut down (it should be read-only from compatibility check)
    log_info "Step 1: Shutting down Non-CDB..."

    if [[ "$TEST_MODE" != "true" ]]; then
        export ORACLE_SID="$NONCDB_SID"
        execute_sql "SHUTDOWN IMMEDIATE;" "$NONCDB_SID" "Shutdown Non-CDB"
    fi

    # Step 2: Create PDB
    log_info "Step 2: Creating PDB in target CDB..."
    create_pdb

    # Step 3: Complete conversion
    log_info "Step 3: Completing PDB conversion..."
    complete_pdb_conversion

    # Step 4: Migrate services
    log_info "Step 4: Migrating services..."
    migrate_services

    log_success "Conversion phase completed"
}

create_pdb() {
    log_subsection "Creating PDB"

    export ORACLE_SID="$CDB_SID"

    # Build CREATE PLUGGABLE DATABASE statement
    local create_sql="CREATE PLUGGABLE DATABASE $PDB_NAME USING '$XML_DESCRIPTOR_FILE'"

    case "$CONVERSION_METHOD" in
        COPY)
            create_sql="$create_sql COPY FILE_NAME_CONVERT=('$NONCDB_DIR','$NEW_LOCATION')"
            ;;
        MOVE)
            create_sql="$create_sql MOVE FILE_NAME_CONVERT=('$NONCDB_DIR','$NEW_LOCATION')"
            ;;
        NOCOPY)
            create_sql="$create_sql NOCOPY"
            ;;
    esac

    if [[ "$IS_TDE_ENABLED" == "true" ]] && [[ -n "$TDE_PASSWORD" ]]; then
        create_sql="$create_sql KEYSTORE IDENTIFIED BY \"$TDE_PASSWORD\""
    fi

    create_sql="$create_sql;"

    log_info "Executing PDB creation..."
    log_sql "CREATE PDB Command: $create_sql"

    if [[ "$TEST_MODE" != "true" ]]; then
        if ! execute_sql_with_retry "$create_sql" "$CDB_SID" "Create PDB"; then
            log_error "PDB creation failed"
            exit 1
        fi

        # Verify creation
        local pdb_exists=$(execute_sql_query "SELECT COUNT(*) FROM v\$pdbs WHERE name = '$PDB_NAME';" "$CDB_SID" "Verify PDB")

        if [[ "$pdb_exists" -eq 0 ]]; then
            log_error "PDB creation verification failed - PDB not found"
            exit 1
        fi

        log_success "PDB $PDB_NAME created successfully"
    fi
}

complete_pdb_conversion() {
    log_subsection "Completing PDB Conversion"

    export ORACLE_SID="$CDB_SID"

    if [[ "$TEST_MODE" != "true" ]]; then
        # Open PDB
        log_info "Opening PDB..."
        execute_sql "ALTER PLUGGABLE DATABASE $PDB_NAME OPEN;" "$CDB_SID" "Open PDB"

        # Run noncdb_to_pdb.sql
        log_info "Running noncdb_to_pdb.sql conversion script..."
        log_info "This may take several minutes..."

        execute_sql "
            ALTER SESSION SET CONTAINER=$PDB_NAME;
            @?/rdbms/admin/noncdb_to_pdb.sql
        " "$CDB_SID" "Run noncdb_to_pdb.sql"

        # Check violations
        log_info "Checking for post-conversion violations..."
        check_pdb_violations "$CDB_SID" "$PDB_NAME" "${REPORTS_DIR}/post_conversion_violations.txt"

        # Restart PDB
        log_info "Restarting PDB..."
        execute_sql "ALTER PLUGGABLE DATABASE $PDB_NAME CLOSE IMMEDIATE;" "$CDB_SID" "Close PDB"
        execute_sql "ALTER PLUGGABLE DATABASE $PDB_NAME OPEN;" "$CDB_SID" "Open PDB"

        # Save state
        log_info "Saving PDB state..."
        execute_sql "ALTER PLUGGABLE DATABASE $PDB_NAME SAVE STATE;" "$CDB_SID" "Save PDB state"

        log_success "PDB conversion completed"
    fi
}

migrate_services() {
    log_subsection "Service Migration"

    if [[ ${#NONCDB_SERVICES[@]} -eq 0 ]]; then
        log_info "No services to migrate"
        return 0
    fi

    log_info "Services to migrate: ${#NONCDB_SERVICES[@]}"

    if ! prompt_user "Do you want to migrate services to the new PDB?"; then
        log_info "Service migration skipped by user"
        return 0
    fi

    export ORACLE_SID="$CDB_SID"

    local success_count=0
    local fail_count=0

    for svc_name in "${!NONCDB_SERVICES[@]}"; do
        local network_name="${NONCDB_SERVICES[$svc_name]}"

        log_info "Migrating service: $svc_name"

        if [[ "$TEST_MODE" != "true" ]]; then
            local create_svc="
                ALTER SESSION SET CONTAINER=$PDB_NAME;
                BEGIN
                    DBMS_SERVICE.CREATE_SERVICE(
                        service_name => '$svc_name',
                        network_name => '$network_name'
                    );
                    DBMS_SERVICE.START_SERVICE('$svc_name');
                    DBMS_OUTPUT.PUT_LINE('Service created and started: $svc_name');
                EXCEPTION
                    WHEN OTHERS THEN
                        DBMS_OUTPUT.PUT_LINE('Error: ' || SQLERRM);
                END;
                /
            "

            if execute_sql "$create_svc" "$CDB_SID" "Create service $svc_name"; then
                log_success "Service $svc_name migrated"
                ((success_count++))
            else
                log_error "Failed to migrate service $svc_name"
                ((fail_count++))
            fi
        fi
    done

    log_info "Service migration results: $success_count succeeded, $fail_count failed"

    if [[ $fail_count -gt 0 ]]; then
        log_warning "Some services failed to migrate - manual intervention may be required"
    fi
}

generate_conversion_commands() {
    log_subsection "Generating Conversion Commands"

    local cmd_file="${SCRIPTS_DIR}/conversion_commands.txt"

    {
        echo "================================================================================"
        echo " CONVERSION COMMANDS FOR MANUAL EXECUTION"
        echo " Generated: $(date '+%Y-%m-%d %H:%M:%S')"
        echo " Source: $NONCDB_SID"
        echo " Target CDB: $CDB_SID"
        echo " Target PDB: $PDB_NAME"
        echo "================================================================================"
        echo ""
        echo "# Step 1: Prepare Non-CDB"
        echo "export ORACLE_SID=$NONCDB_SID"
        echo "sqlplus / as sysdba <<EOF"
        echo "SHUTDOWN IMMEDIATE;"
        echo "STARTUP MOUNT;"
        echo "ALTER DATABASE OPEN READ ONLY;"
        echo "BEGIN"
        echo "    DBMS_PDB.DESCRIBE(pdb_descr_file => '$XML_DESCRIPTOR_FILE');"
        echo "END;"
        echo "/"
        echo "SHUTDOWN IMMEDIATE;"
        echo "EXIT;"
        echo "EOF"
        echo ""
        echo "# Step 2: Create PDB"
        echo "export ORACLE_SID=$CDB_SID"
        echo "sqlplus / as sysdba <<EOF"
        echo "CREATE PLUGGABLE DATABASE $PDB_NAME"
        echo "USING '$XML_DESCRIPTOR_FILE'"
        case "$CONVERSION_METHOD" in
            COPY) echo "COPY FILE_NAME_CONVERT=('$NONCDB_DIR','$NEW_LOCATION');" ;;
            MOVE) echo "MOVE FILE_NAME_CONVERT=('$NONCDB_DIR','$NEW_LOCATION');" ;;
            NOCOPY) echo "NOCOPY;" ;;
        esac
        echo "EXIT;"
        echo "EOF"
        echo ""
        echo "# Step 3: Complete conversion"
        echo "sqlplus / as sysdba <<EOF"
        echo "ALTER PLUGGABLE DATABASE $PDB_NAME OPEN;"
        echo "ALTER SESSION SET CONTAINER=$PDB_NAME;"
        echo "@?/rdbms/admin/noncdb_to_pdb.sql"
        echo "ALTER PLUGGABLE DATABASE $PDB_NAME CLOSE IMMEDIATE;"
        echo "ALTER PLUGGABLE DATABASE $PDB_NAME OPEN;"
        echo "ALTER PLUGGABLE DATABASE $PDB_NAME SAVE STATE;"
        echo "EXIT;"
        echo "EOF"
        echo ""
        echo "================================================================================"
    } > "$cmd_file"

    log_success "Commands saved to: $cmd_file"
}

#############################################################################
# POST-MIGRATION PHASE
#############################################################################

run_post_migration_phase() {
    log_section "Post-Migration Phase"

    # Step 1: Capture post-migration state
    log_info "Step 1: Capturing post-migration state..."
    run_post_migration_capture "$CDB_SID" "$PDB_NAME" "$POST_CAPTURE_DIR"

    # Step 2: Compare pre and post states
    log_info "Step 2: Comparing pre and post migration states..."
    compare_captures "$PRE_CAPTURE_DIR" "$POST_CAPTURE_DIR" "$REPORTS_DIR"

    # Step 3: Generate remediation scripts
    log_info "Step 3: Generating remediation scripts..."
    generate_remediation_scripts "$PRE_CAPTURE_DIR" "$POST_CAPTURE_DIR" "${SCRIPTS_DIR}/remediation" "$PDB_NAME"

    # Step 4: Validate parameters
    log_info "Step 4: Validating parameter settings..."
    capture_pdb_parameters "$CDB_SID" "$PDB_NAME" "${PARAMS_DIR}/pdb_params_final.txt"

    # Step 5: Generate post-migration setup files (oratab, set_env, cleanup scripts)
    log_info "Step 5: Generating post-migration environment setup..."
    local setup_dir="${CONVERSION_DIR}/post_setup"
    run_post_migration_setup "$CDB_SID" "$PDB_NAME" "$ORACLE_HOME" "$NONCDB_SID" "$setup_dir"

    log_success "Post-migration phase completed"
}

#############################################################################
# SUMMARY AND CLEANUP
#############################################################################

show_summary() {
    log_section "Conversion Summary"

    local end_time=$(date +%s)
    local elapsed=$((end_time - SCRIPT_START_TIME))
    local hours=$((elapsed / 3600))
    local minutes=$(((elapsed % 3600) / 60))
    local seconds=$((elapsed % 60))

    cat << EOF

================================================================================
                         CONVERSION COMPLETED
================================================================================

Source Non-CDB:     $NONCDB_SID
Target CDB:         $CDB_SID
Target PDB:         $PDB_NAME
Conversion Method:  $CONVERSION_METHOD

Elapsed Time:       ${hours}h ${minutes}m ${seconds}s
Total Errors:       $TOTAL_ERRORS
Total Warnings:     $TOTAL_WARNINGS

Working Directory:  $CONVERSION_DIR

Key Files:
  - Main Log:       ${LOG_DIR}/conversion_*.log
  - Audit Log:      ${LOG_DIR}/conversion_audit_*.log
  - SQL Log:        ${LOG_DIR}/conversion_sql_*.log
  - User Input Log: ${LOG_DIR}/conversion_user_input_*.log
  - Error Log:      ${LOG_DIR}/conversion_errors_*.log

Reports:
  - Pre-Migration:  $PRE_CAPTURE_DIR
  - Post-Migration: $POST_CAPTURE_DIR
  - Comparison:     $REPORTS_DIR

Scripts:
  - Remediation:    ${SCRIPTS_DIR}/remediation/

Post-Setup Files:
  - Environment:    ${CONVERSION_DIR}/post_setup/set_env_additions.func
  - Oratab Entry:   ${CONVERSION_DIR}/post_setup/oratab_entry.txt
  - Cleanup Script: ${CONVERSION_DIR}/post_setup/cleanup_${NONCDB_SID}_*.sh
  - Checklist:      ${CONVERSION_DIR}/post_setup/cleanup_checklist_${NONCDB_SID}.txt

================================================================================

To verify the PDB:
  export ORACLE_SID=$CDB_SID
  sqlplus / as sysdba
  ALTER SESSION SET CONTAINER=$PDB_NAME;
  SELECT * FROM v\$instance;

================================================================================
EOF

    # Save summary to file
    {
        echo "Conversion completed at: $(date)"
        echo "Elapsed time: ${hours}h ${minutes}m ${seconds}s"
        echo "Errors: $TOTAL_ERRORS"
        echo "Warnings: $TOTAL_WARNINGS"
    } > "${CONVERSION_DIR}/completion_summary.txt"
}

#############################################################################
# MAIN EXECUTION
#############################################################################

main() {
    # Parse command line arguments
    parse_arguments "$@"

    # Display header
    cat << EOF
================================================================================
         Non-CDB to PDB Conversion Tool - Enhanced Edition v${SCRIPT_VERSION}
================================================================================
Starting at: $(date)
User: $(whoami)
Host: $(hostname)
================================================================================

EOF

    # Discover databases if not specified
    if [[ -z "$CDB_SID" ]] || [[ -z "$NONCDB_SID" ]]; then
        discover_databases
    fi

    # Get conversion parameters
    get_conversion_parameters

    # Initialize conversion environment (always creates new directory)
    initialize_conversion

    # Validate backup
    validate_backup

    # Run pre-migration phase
    run_pre_migration_phase

    # Run compatibility check
    run_compatibility_check

    # Run conversion phase
    run_conversion_phase

    # Run post-migration phase
    run_post_migration_phase

    # Show summary
    show_summary

    log_success "Conversion completed successfully"
}

# Set trap for cleanup
trap 'log_error "Script interrupted"; exit 130' INT TERM

# Run main function
main "$@"


#!/bin/bash
#############################################################################
# Library: common_functions.sh
# Purpose: Common functions for Oracle Non-CDB to PDB migration
# Version: 4.0 Enhanced Edition
# Features:
#   - Enhanced logging with full audit trail
#   - User input capture and recording
#   - SQL execution with output capture
#   - Error detection and reporting
#   - Directory management with retry support
#############################################################################

# Prevent multiple sourcing
[[ -n "${_COMMON_FUNCTIONS_LOADED:-}" ]] && return
_COMMON_FUNCTIONS_LOADED=1

#############################################################################
# GLOBAL VARIABLES
#############################################################################

# Script identity
SCRIPT_NAME="${SCRIPT_NAME:-$(basename "$0")}"
SCRIPT_VERSION="${SCRIPT_VERSION:-4.0}"
SCRIPT_START_TIME="${SCRIPT_START_TIME:-$(date +%s)}"

# Timestamps
TIMESTAMP="${TIMESTAMP:-$(date +%Y%m%d_%H%M%S)}"
DATE_STAMP="${DATE_STAMP:-$(date +%Y%m%d)}"

# Counters
TOTAL_ERRORS=0
TOTAL_WARNINGS=0
TOTAL_SQL_EXECUTIONS=0
TOTAL_SQL_TIME=0

# Mode flags
DEBUG="${DEBUG:-false}"
TEST_MODE="${TEST_MODE:-false}"
PROMPT_USER="${PROMPT_USER:-true}"

# Log file handles
declare -g LOG_FILE=""
declare -g AUDIT_LOG=""
declare -g SQL_LOG=""
declare -g USER_INPUT_LOG=""
declare -g ERROR_LOG=""

# Session tracking
declare -g SESSION_ID=""
declare -g CONVERSION_ID=""

#############################################################################
# LOGGING FUNCTIONS
#############################################################################

# Initialize logging system
init_logging() {
    local log_dir="$1"
    local prefix="${2:-conversion}"

    # Create log directory
    mkdir -p "$log_dir"

    # Set log file paths
    LOG_FILE="${log_dir}/${prefix}_${TIMESTAMP}.log"
    AUDIT_LOG="${log_dir}/${prefix}_audit_${TIMESTAMP}.log"
    SQL_LOG="${log_dir}/${prefix}_sql_${TIMESTAMP}.log"
    USER_INPUT_LOG="${log_dir}/${prefix}_user_input_${TIMESTAMP}.log"
    ERROR_LOG="${log_dir}/${prefix}_errors_${TIMESTAMP}.log"

    # Generate session ID
    SESSION_ID="$(hostname -s)_$$_${TIMESTAMP}"

    # Initialize log files with headers
    {
        echo "================================================================================"
        echo " CONVERSION LOG"
        echo " Session ID: ${SESSION_ID}"
        echo " Started: $(date '+%Y-%m-%d %H:%M:%S')"
        echo " User: $(whoami)"
        echo " Host: $(hostname)"
        echo " Script: ${SCRIPT_NAME} v${SCRIPT_VERSION}"
        echo " Oracle Home: ${ORACLE_HOME:-not set}"
        echo "================================================================================"
        echo ""
    } > "$LOG_FILE"

    {
        echo "================================================================================"
        echo " AUDIT LOG - All Executed Commands and Outputs"
        echo " Session ID: ${SESSION_ID}"
        echo " Started: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "================================================================================"
        echo ""
    } > "$AUDIT_LOG"

    {
        echo "================================================================================"
        echo " SQL EXECUTION LOG"
        echo " Session ID: ${SESSION_ID}"
        echo " Started: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "================================================================================"
        echo ""
    } > "$SQL_LOG"

    {
        echo "================================================================================"
        echo " USER INPUT LOG - All User Responses and Selections"
        echo " Session ID: ${SESSION_ID}"
        echo " Started: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "================================================================================"
        echo ""
    } > "$USER_INPUT_LOG"

    {
        echo "================================================================================"
        echo " ERROR LOG"
        echo " Session ID: ${SESSION_ID}"
        echo " Started: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "================================================================================"
        echo ""
    } > "$ERROR_LOG"

    log_info "Logging initialized - Session ID: ${SESSION_ID}"
    log_info "Log directory: ${log_dir}"
}

# Main logging function
log() {
    local level="$1"
    shift
    local message="$*"
    local timestamp=$(date '+%Y-%m-%d %H:%M:%S.%3N')
    local caller_info=""

    # Get caller information for debugging
    if [[ "$DEBUG" == "true" ]]; then
        caller_info=" [${FUNCNAME[2]:-main}:${BASH_LINENO[1]:-0}]"
    fi

    local log_entry="[${timestamp}] [${level}]${caller_info} ${message}"

    # Console output with colors (no fancy characters)
    case "$level" in
        ERROR)
            echo -e "\033[31m${log_entry}\033[0m"
            ((TOTAL_ERRORS++))
            # Also write to error log
            echo "$log_entry" >> "${ERROR_LOG:-/dev/null}"
            ;;
        WARNING)
            echo -e "\033[33m${log_entry}\033[0m"
            ((TOTAL_WARNINGS++))
            ;;
        SUCCESS)
            echo -e "\033[32m${log_entry}\033[0m"
            ;;
        INFO)
            echo "$log_entry"
            ;;
        DEBUG)
            if [[ "$DEBUG" == "true" ]]; then
                echo -e "\033[36m${log_entry}\033[0m"
            fi
            ;;
        AUDIT)
            # Audit entries go to audit log only
            echo "$log_entry" >> "${AUDIT_LOG:-/dev/null}"
            return
            ;;
        SQL)
            # SQL entries go to SQL log
            echo "$log_entry" >> "${SQL_LOG:-/dev/null}"
            return
            ;;
        USER_INPUT)
            # User input entries go to user input log
            echo "$log_entry" >> "${USER_INPUT_LOG:-/dev/null}"
            return
            ;;
        *)
            echo "$log_entry"
            ;;
    esac

    # Write to main log file
    if [[ -n "$LOG_FILE" ]]; then
        echo "$log_entry" >> "$LOG_FILE"
    fi
}

# Convenience logging functions
log_info() { log "INFO" "$@"; }
log_error() { log "ERROR" "$@"; }
log_warning() { log "WARNING" "$@"; }
log_success() { log "SUCCESS" "$@"; }
log_debug() { log "DEBUG" "$@"; }
log_audit() { log "AUDIT" "$@"; }
log_sql() { log "SQL" "$@"; }
log_user_input() { log "USER_INPUT" "$@"; }

# Log section header
log_section() {
    local title="$1"
    local separator="================================================================================"

    log_info ""
    log_info "$separator"
    log_info "  $title"
    log_info "$separator"

    log_audit ""
    log_audit "$separator"
    log_audit "  SECTION: $title"
    log_audit "  Time: $(date '+%Y-%m-%d %H:%M:%S')"
    log_audit "$separator"
}

# Log subsection header
log_subsection() {
    local title="$1"
    local separator="--------------------------------------------------------------------------------"

    log_info ""
    log_info "$separator"
    log_info "  $title"
    log_info "$separator"
}

#############################################################################
# USER INPUT FUNCTIONS - Captures all user responses
#############################################################################

# Prompt user with yes/no question and record response
prompt_user() {
    local prompt_message="$1"
    local default="${2:-}"

    # Record the question
    log_user_input "QUESTION: $prompt_message"

    if [[ "$PROMPT_USER" == "false" ]]; then
        log_info "AUTO MODE: $prompt_message - Proceeding automatically"
        log_user_input "RESPONSE: [AUTO MODE] yes"
        return 0
    fi

    local response
    while true; do
        if [[ -n "$default" ]]; then
            read -p "$prompt_message (yes/no) [$default]: " response
            response="${response:-$default}"
        else
            read -p "$prompt_message (yes/no): " response
        fi

        case "$response" in
            [yY][eE][sS]|[yY])
                log_user_input "RESPONSE: yes"
                log_audit "User confirmed: $prompt_message -> YES"
                return 0
                ;;
            [nN][oO]|[nN])
                log_user_input "RESPONSE: no"
                log_audit "User declined: $prompt_message -> NO"
                return 1
                ;;
            *)
                echo "Please answer yes or no."
                ;;
        esac
    done
}

# Prompt for text input and record response
prompt_input() {
    local prompt_message="$1"
    local default="${2:-}"
    local variable_name="$3"

    log_user_input "INPUT REQUEST: $prompt_message"

    if [[ "$PROMPT_USER" == "false" ]] && [[ -n "$default" ]]; then
        log_info "AUTO MODE: Using default value: $default"
        log_user_input "INPUT VALUE: [AUTO MODE] $default"
        eval "$variable_name=\"$default\""
        return 0
    fi

    local response
    if [[ -n "$default" ]]; then
        read -p "$prompt_message [$default]: " response
        response="${response:-$default}"
    else
        read -p "$prompt_message: " response
    fi

    log_user_input "INPUT VALUE: $response"
    log_audit "User input for '$prompt_message': $response"

    eval "$variable_name=\"$response\""
}

# Prompt for password (hidden input) and record that it was provided
prompt_password() {
    local prompt_message="$1"
    local variable_name="$2"

    log_user_input "PASSWORD REQUEST: $prompt_message"

    local response
    read -s -p "$prompt_message: " response
    echo ""

    log_user_input "PASSWORD PROVIDED: [HIDDEN]"
    log_audit "Password provided for: $prompt_message"

    eval "$variable_name=\"$response\""
}

# Prompt with menu selection and record choice
# Always shows options even if only one - user must confirm
prompt_select() {
    local prompt_message="$1"
    local variable_name="$2"
    shift 2
    local options=("$@")

    log_user_input "MENU SELECTION: $prompt_message"
    for i in "${!options[@]}"; do
        log_user_input "  Option $((i+1)): ${options[$i]}"
    done

    # If in auto mode with a single option, use it
    if [[ "$PROMPT_USER" == "false" ]] && [[ ${#options[@]} -eq 1 ]]; then
        log_info "AUTO MODE: Selecting only available option: ${options[0]}"
        log_user_input "SELECTED: [AUTO MODE] 1 - ${options[0]}"
        eval "$variable_name=\"${options[0]}\""
        return 0
    fi

    echo ""
    echo "$prompt_message"
    local i=1
    for opt in "${options[@]}"; do
        echo "  $i) $opt"
        ((i++))
    done

    local choice
    while true; do
        read -p "Enter choice (1-${#options[@]}): " choice
        if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#options[@]} ]]; then
            break
        fi
        echo "Invalid choice. Please enter a number between 1 and ${#options[@]}."
    done

    local selected="${options[$((choice-1))]}"

    log_user_input "SELECTED: $choice - $selected"
    log_audit "User selected option $choice ($selected) for: $prompt_message"

    eval "$variable_name=\"$selected\""
}

# Prompt to confirm a single selection (always asks even with one option)
prompt_confirm_selection() {
    local prompt_message="$1"
    local variable_name="$2"
    local current_value="$3"

    log_user_input "CONFIRM SELECTION: $prompt_message (Current: $current_value)"

    if [[ "$PROMPT_USER" == "false" ]]; then
        log_info "AUTO MODE: Confirming selection: $current_value"
        log_user_input "CONFIRMED: [AUTO MODE] $current_value"
        eval "$variable_name=\"$current_value\""
        return 0
    fi

    echo ""
    echo "$prompt_message"
    echo "  Current selection: $current_value"
    echo ""
    read -p "Press Enter to confirm, or type a different value: " new_value

    if [[ -z "$new_value" ]]; then
        new_value="$current_value"
    fi

    log_user_input "CONFIRMED: $new_value"
    log_audit "User confirmed: $prompt_message -> $new_value"

    eval "$variable_name=\"$new_value\""
}

#############################################################################
# SQL EXECUTION FUNCTIONS - With Full Output Capture
#############################################################################

# Execute SQL and capture all output
execute_sql() {
    local sql_command="$1"
    local sid="${2:-$ORACLE_SID}"
    local description="${3:-SQL Execution}"
    local output_file="${4:-}"

    ((TOTAL_SQL_EXECUTIONS++))
    local execution_id="SQL_${TOTAL_SQL_EXECUTIONS}_$(date +%H%M%S)"

    log_sql "================================================================================"
    log_sql "Execution ID: $execution_id"
    log_sql "Description: $description"
    log_sql "Oracle SID: $sid"
    log_sql "Timestamp: $(date '+%Y-%m-%d %H:%M:%S')"
    log_sql "--------------------------------------------------------------------------------"
    log_sql "SQL Command:"
    log_sql "$sql_command"
    log_sql "--------------------------------------------------------------------------------"

    log_debug "Executing SQL [$execution_id]: $description"

    if [[ "$TEST_MODE" == "true" ]]; then
        log_info "[TEST MODE] Would execute SQL: $description"
        log_sql "OUTPUT: [TEST MODE - Not executed]"
        log_sql "================================================================================"
        return 0
    fi

    export ORACLE_SID="$sid"

    local start_time=$(date +%s)
    local temp_output=$(mktemp)

    # Execute with full output capture
    sqlplus -s "/ as sysdba" <<EOF > "$temp_output" 2>&1
SET ECHO ON
SET FEEDBACK ON
SET SERVEROUTPUT ON SIZE UNLIMITED
SET LINESIZE 300
SET PAGESIZE 1000
SET TRIMSPOOL ON
WHENEVER SQLERROR CONTINUE
$sql_command
EXIT;
EOF

    local sql_status=$?
    local end_time=$(date +%s)
    local duration=$((end_time - start_time))
    TOTAL_SQL_TIME=$((TOTAL_SQL_TIME + duration))

    # Capture output
    local output=$(cat "$temp_output")
    rm -f "$temp_output"

    log_sql "OUTPUT:"
    log_sql "$output"
    log_sql "--------------------------------------------------------------------------------"
    log_sql "Exit Status: $sql_status"
    log_sql "Duration: ${duration}s"
    log_sql "================================================================================"

    # Also write to output file if specified
    if [[ -n "$output_file" ]]; then
        echo "$output" > "$output_file"
    fi

    # Check for ORA- errors in output
    if echo "$output" | grep -q "ORA-"; then
        local ora_errors=$(echo "$output" | grep "ORA-")
        log_error "SQL execution encountered Oracle errors:"
        echo "$ora_errors" | while read -r line; do
            log_error "  $line"
        done
        log_audit "SQL Error [$execution_id]: $ora_errors"
        return 1
    fi

    if [[ $sql_status -ne 0 ]]; then
        log_error "SQL execution failed with status $sql_status"
        return 1
    fi

    log_debug "SQL executed successfully in ${duration}s"
    return 0
}

# Execute SQL and return output
execute_sql_query() {
    local sql_query="$1"
    local sid="${2:-$ORACLE_SID}"
    local description="${3:-SQL Query}"

    export ORACLE_SID="$sid"

    log_sql "Query [$description]: $sql_query"

    if [[ "$TEST_MODE" == "true" ]]; then
        log_sql "Result: [TEST MODE - Not executed]"
        echo ""
        return 0
    fi

    local result=$(sqlplus -s "/ as sysdba" <<EOF
SET HEADING OFF FEEDBACK OFF PAGESIZE 0 LINESIZE 1000 TRIMSPOOL ON
$sql_query
EXIT;
EOF
)

    log_sql "Result: $result"
    echo "$result"
}

# Execute SQL with retry logic
execute_sql_with_retry() {
    local sql_command="$1"
    local sid="${2:-$ORACLE_SID}"
    local description="${3:-SQL Execution}"
    local max_retries="${4:-3}"
    local retry_delay="${5:-10}"

    local retry_count=0

    while (( retry_count < max_retries )); do
        log_info "Executing SQL (Attempt $((retry_count+1))/$max_retries): $description"

        if execute_sql "$sql_command" "$sid" "$description"; then
            log_success "SQL executed successfully"
            return 0
        else
            log_error "SQL execution failed"
            retry_count=$((retry_count + 1))
            if (( retry_count < max_retries )); then
                log_info "Retrying in $retry_delay seconds..."
                sleep $retry_delay
            fi
        fi
    done

    log_error "SQL execution failed after $max_retries attempts: $description"
    return 1
}

#############################################################################
# DIRECTORY MANAGEMENT - With Retry Support
#############################################################################

# Create conversion directory with proper timestamp handling
# This addresses the issue where retry operations should create new directories
create_conversion_directory() {
    local source_db="$1"
    local target_db="$2"
    local base_dir="${3:-/opt/oracle/conversions}"
    local force_new="${4:-true}"  # Always create new directory by default

    # Generate fresh timestamp for new operations
    if [[ "$force_new" == "true" ]]; then
        TIMESTAMP=$(date +%Y%m%d_%H%M%S)
    fi

    # Create unique conversion ID
    CONVERSION_ID="${source_db}_to_${target_db}_${TIMESTAMP}"
    local conversion_dir="${base_dir}/${CONVERSION_ID}"

    # Check if directory already exists (should not happen with unique timestamp)
    if [[ -d "$conversion_dir" ]]; then
        # Wait a second and regenerate timestamp
        sleep 1
        TIMESTAMP=$(date +%Y%m%d_%H%M%S)
        CONVERSION_ID="${source_db}_to_${target_db}_${TIMESTAMP}"
        conversion_dir="${base_dir}/${CONVERSION_ID}"
    fi

    # Create directory structure
    local dirs=(
        "$conversion_dir"
        "$conversion_dir/logs"
        "$conversion_dir/snapshots/before"
        "$conversion_dir/snapshots/after"
        "$conversion_dir/backup_info"
        "$conversion_dir/scripts"
        "$conversion_dir/commands"
        "$conversion_dir/artifacts"
        "$conversion_dir/state"
        "$conversion_dir/parameters"
        "$conversion_dir/statistics"
        "$conversion_dir/reports"
    )

    for dir in "${dirs[@]}"; do
        if ! mkdir -p "$dir"; then
            log_error "Failed to create directory: $dir" >&2
            return 1
        fi
    done

    log_info "Created conversion directory: $conversion_dir" >&2
    log_audit "Directory created: $conversion_dir"

    echo "$conversion_dir"
}

# Find most recent conversion directory for resume
find_latest_conversion_dir() {
    local source_db="$1"
    local target_db="$2"
    local base_dir="${3:-/opt/oracle/conversions}"

    local pattern="${source_db}_to_${target_db}_*"
    local latest=$(find "$base_dir" -maxdepth 1 -type d -name "$pattern" 2>/dev/null | sort -r | head -1)

    if [[ -n "$latest" ]] && [[ -d "$latest" ]]; then
        echo "$latest"
        return 0
    fi

    return 1
}

#############################################################################
# STATE MANAGEMENT
#############################################################################

# Save conversion state
save_state() {
    local state_dir="$1"
    local checkpoint="$2"
    shift 2

    # Save checkpoint
    echo "$checkpoint" > "${state_dir}/checkpoint.txt"

    # Save all variables passed as arguments
    local vars_file="${state_dir}/variables.sh"
    {
        echo "#!/bin/bash"
        echo "# Conversion State Variables"
        echo "# Saved: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "# Checkpoint: $checkpoint"
        echo ""
        for var in "$@"; do
            local value="${!var}"
            # Escape special characters
            value=$(printf '%q' "$value")
            echo "export $var=$value"
        done
    } > "$vars_file"

    log_debug "State saved at checkpoint: $checkpoint"
    log_audit "Checkpoint saved: $checkpoint"
}

# Load conversion state
load_state() {
    local state_dir="$1"

    local vars_file="${state_dir}/variables.sh"
    if [[ -f "$vars_file" ]]; then
        source "$vars_file"
        log_info "State loaded from: $vars_file"
        return 0
    fi

    return 1
}

# Get current checkpoint
get_checkpoint() {
    local state_dir="$1"
    local checkpoint_file="${state_dir}/checkpoint.txt"

    if [[ -f "$checkpoint_file" ]]; then
        cat "$checkpoint_file"
    else
        echo "START"
    fi
}

#############################################################################
# ERROR HANDLING
#############################################################################

# Record error with details
record_error() {
    local error_type="$1"
    local error_message="$2"
    local error_details="${3:-}"

    local error_entry="[$(date '+%Y-%m-%d %H:%M:%S')] [$error_type] $error_message"

    if [[ -n "${ERROR_LOG:-}" ]]; then
        {
            echo "================================================================================"
            echo "$error_entry"
            if [[ -n "$error_details" ]]; then
                echo "Details:"
                echo "$error_details"
            fi
            echo "Call Stack:"
            local i
            for ((i=1; i<${#FUNCNAME[@]}; i++)); do
                echo "  ${FUNCNAME[$i]}() at ${BASH_SOURCE[$i]}:${BASH_LINENO[$((i-1))]}"
            done
            echo "================================================================================"
        } >> "$ERROR_LOG"
    fi

    log_error "$error_message"
}

# Check for errors and report
check_errors() {
    if [[ $TOTAL_ERRORS -gt 0 ]]; then
        log_warning "Total errors encountered: $TOTAL_ERRORS"
        return 1
    fi
    return 0
}

#############################################################################
# UTILITY FUNCTIONS
#############################################################################

# Display elapsed time
show_elapsed_time() {
    local start_time="${1:-$SCRIPT_START_TIME}"
    local end_time=$(date +%s)
    local elapsed=$((end_time - start_time))

    local hours=$((elapsed / 3600))
    local minutes=$(((elapsed % 3600) / 60))
    local seconds=$((elapsed % 60))

    printf "%02d:%02d:%02d" $hours $minutes $seconds
}

# Format bytes to human readable
format_bytes() {
    local bytes=$1
    if [[ $bytes -ge 1073741824 ]]; then
        echo "$(echo "scale=2; $bytes/1073741824" | bc) GB"
    elif [[ $bytes -ge 1048576 ]]; then
        echo "$(echo "scale=2; $bytes/1048576" | bc) MB"
    elif [[ $bytes -ge 1024 ]]; then
        echo "$(echo "scale=2; $bytes/1024" | bc) KB"
    else
        echo "$bytes bytes"
    fi
}

# Validate Oracle environment
validate_oracle_env() {
    if [[ -z "${ORACLE_HOME:-}" ]]; then
        log_error "ORACLE_HOME is not set"
        return 1
    fi

    if [[ ! -d "$ORACLE_HOME" ]]; then
        log_error "ORACLE_HOME directory does not exist: $ORACLE_HOME"
        return 1
    fi

    if [[ ! -x "$ORACLE_HOME/bin/sqlplus" ]]; then
        log_error "sqlplus not found or not executable"
        return 1
    fi

    log_debug "Oracle environment validated: ORACLE_HOME=$ORACLE_HOME"
    return 0
}

# Check if database is running
is_database_running() {
    local sid="$1"

    if [[ -z "$sid" ]]; then
        log_debug "is_database_running: No SID provided"
        return 1
    fi

    # Check for pmon process - use word boundary matching
    if ps -ef | grep -v grep | grep -qE "ora_pmon_${sid}$|ora_pmon_${sid}[[:space:]]"; then
        log_debug "is_database_running: $sid is running"
        return 0
    fi

    log_debug "is_database_running: $sid is NOT running"
    return 1
}

# Get database open mode
get_db_open_mode() {
    local sid="$1"

    execute_sql_query "SELECT open_mode FROM v\$database;" "$sid" "Get database open mode"
}

# Check if database is CDB
is_cdb() {
    local sid="$1"

    if [[ -z "$sid" ]]; then
        log_debug "is_cdb: No SID provided"
        return 1
    fi

    export ORACLE_SID="$sid"

    # Direct query without using execute_sql_query to avoid logging noise
    local result=$(sqlplus -s "/ as sysdba" <<EOF 2>/dev/null
SET HEADING OFF FEEDBACK OFF PAGESIZE 0 LINESIZE 100 TRIMSPOOL ON
SELECT TRIM(cdb) FROM v\$database;
EXIT;
EOF
)

    # Clean up result - remove whitespace and newlines
    result=$(echo "$result" | tr -d '[:space:]')

    log_debug "is_cdb: SID=$sid, result='$result'"

    if [[ "$result" == "YES" ]]; then
        return 0
    fi
    return 1
}

#############################################################################
# CLEANUP TRAP
#############################################################################

cleanup_on_exit() {
    local exit_code=$?

    log_info ""
    log_info "================================================================================"
    log_info "Session Summary"
    log_info "================================================================================"
    log_info "Session ID: $SESSION_ID"
    log_info "Exit Code: $exit_code"
    log_info "Total Errors: $TOTAL_ERRORS"
    log_info "Total Warnings: $TOTAL_WARNINGS"
    log_info "SQL Executions: $TOTAL_SQL_EXECUTIONS"
    log_info "Total SQL Time: ${TOTAL_SQL_TIME}s"
    log_info "Elapsed Time: $(show_elapsed_time)"
    log_info "================================================================================"

    log_audit "Session ended with exit code: $exit_code"
    log_audit "Total errors: $TOTAL_ERRORS, warnings: $TOTAL_WARNINGS"
}

# Set trap for cleanup
trap cleanup_on_exit EXIT

log_debug "common_functions.sh loaded successfully"


#!/bin/bash
#############################################################################
# Module: pre_migration_capture.sh
# Purpose: Capture comprehensive source database state before migration
# Features:
#   - Sequences and their details
#   - Services configuration
#   - Triggers and their states
#   - Synonyms (public and private)
#   - TDE/encryption information
#   - Object-level statistics (separate file due to size)
#   - Database links
#   - Jobs and scheduler configuration
#   - Non-Oracle maintained schemas inventory
#############################################################################

# Source common functions if not already loaded
_PRE_CAPTURE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [[ -z "${_COMMON_FUNCTIONS_LOADED:-}" ]]; then
    _COMMON_FUNCS_PATH="${_PRE_CAPTURE_DIR}/../lib/common_functions.sh"
    if [[ -f "$_COMMON_FUNCS_PATH" ]]; then
        source "$_COMMON_FUNCS_PATH"
    else
        echo "[ERROR] common_functions.sh not found at: $_COMMON_FUNCS_PATH" >&2
        return 1 2>/dev/null || exit 1
    fi
fi

#############################################################################
# SEQUENCE CAPTURE
#############################################################################

capture_sequences() {
    local sid="$1"
    local output_dir="$2"
    local schema_filter="${3:-}"

    log_subsection "Capturing Sequences"

    export ORACLE_SID="$sid"

    local where_clause=""
    if [[ -n "$schema_filter" ]]; then
        where_clause="AND sequence_owner IN ($schema_filter)"
    else
        where_clause="AND sequence_owner NOT IN (SELECT username FROM dba_users WHERE oracle_maintained = 'Y')"
    fi

    # Detailed sequence information
    local seq_sql="
SET HEADING OFF FEEDBACK OFF PAGESIZE 0 LINESIZE 500 TRIMSPOOL ON
SELECT sequence_owner || '|' ||
       sequence_name || '|' ||
       min_value || '|' ||
       max_value || '|' ||
       increment_by || '|' ||
       cycle_flag || '|' ||
       order_flag || '|' ||
       cache_size || '|' ||
       last_number
FROM dba_sequences
WHERE 1=1 $where_clause
ORDER BY sequence_owner, sequence_name;
"

    local sequences=$(execute_sql_query "$seq_sql" "$sid" "Capture sequences")

    local seq_file="${output_dir}/sequences.csv"
    {
        echo "# Sequences Capture"
        echo "# Database: $sid"
        echo "# Captured: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "# Format: OWNER|NAME|MIN_VALUE|MAX_VALUE|INCREMENT|CYCLE|ORDER|CACHE|LAST_NUMBER"
        echo "#"
        echo "$sequences"
    } > "$seq_file"

    local seq_count=$(echo "$sequences" | grep -c "|" || echo "0")
    log_info "Captured $seq_count sequences"
    log_info "Saved to: $seq_file"

    # Also generate DDL for recreation
    local ddl_file="${output_dir}/sequences_ddl.sql"
    {
        echo "-- Sequences Recreation DDL"
        echo "-- Database: $sid"
        echo "-- Generated: $(date '+%Y-%m-%d %H:%M:%S')"
        echo ""
    } > "$ddl_file"

    while IFS='|' read -r owner name minval maxval incr cycle ord cache lastnum; do
        [[ "$owner" =~ ^# ]] && continue
        [[ -z "$owner" ]] && continue

        cat >> "$ddl_file" << EOF
-- Sequence: ${owner}.${name}
CREATE SEQUENCE "${owner}"."${name}"
    START WITH ${lastnum}
    INCREMENT BY ${incr}
    MINVALUE ${minval}
    MAXVALUE ${maxval}
    CACHE ${cache}
    $([ "$cycle" == "Y" ] && echo "CYCLE" || echo "NOCYCLE")
    $([ "$ord" == "Y" ] && echo "ORDER" || echo "NOORDER");

EOF
    done < "$seq_file"

    log_info "DDL saved to: $ddl_file"
}

#############################################################################
# SERVICES CAPTURE
#############################################################################

capture_services() {
    local sid="$1"
    local output_dir="$2"

    log_subsection "Capturing Database Services"

    export ORACLE_SID="$sid"

    # Get services from DBA_SERVICES
    local svc_sql="
SET HEADING OFF FEEDBACK OFF PAGESIZE 0 LINESIZE 500 TRIMSPOOL ON
SELECT name || '|' ||
       network_name || '|' ||
       NVL(TO_CHAR(creation_date, 'YYYY-MM-DD HH24:MI:SS'), 'N/A') || '|' ||
       NVL(failover_method, 'NONE') || '|' ||
       NVL(failover_type, 'NONE') || '|' ||
       NVL(TO_CHAR(failover_retries), '0') || '|' ||
       NVL(TO_CHAR(failover_delay), '0') || '|' ||
       NVL(goal, 'NONE') || '|' ||
       NVL(dtp, 'N') || '|' ||
       enabled || '|' ||
       NVL(aq_ha_notifications, 'NO') || '|' ||
       NVL(clb_goal, 'LONG') || '|' ||
       NVL(stop_option, 'NONE')
FROM dba_services
WHERE name NOT LIKE 'SYS\$%' ESCAPE '\\\\'
  AND name NOT LIKE 'SYS%'
ORDER BY name;
"

    local services=$(execute_sql_query "$svc_sql" "$sid" "Capture services")

    local svc_file="${output_dir}/services.csv"
    {
        echo "# Services Capture"
        echo "# Database: $sid"
        echo "# Captured: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "# Format: NAME|NETWORK_NAME|CREATION_DATE|FAILOVER_METHOD|FAILOVER_TYPE|RETRIES|DELAY|GOAL|DTP|ENABLED|AQ_HA|CLB_GOAL|STOP_OPTION"
        echo "#"
        echo "$services"
    } > "$svc_file"

    local svc_count=$(echo "$services" | grep -c "|" || echo "0")
    log_info "Captured $svc_count services"
    log_info "Saved to: $svc_file"

    # Generate service creation DDL
    local ddl_file="${output_dir}/services_creation.sql"
    {
        echo "-- Services Creation DDL"
        echo "-- Database: $sid"
        echo "-- Generated: $(date '+%Y-%m-%d %H:%M:%S')"
        echo ""
        echo "SET SERVEROUTPUT ON"
        echo ""
    } > "$ddl_file"

    while IFS='|' read -r name network_name creation fo_method fo_type fo_retries fo_delay goal dtp enabled aq_ha clb_goal stop_opt; do
        [[ "$name" =~ ^# ]] && continue
        [[ -z "$name" ]] && continue
        [[ "$name" == "${sid}" ]] && continue  # Skip default service

        cat >> "$ddl_file" << EOF
-- Service: ${name}
BEGIN
    DBMS_SERVICE.CREATE_SERVICE(
        service_name => '${name}',
        network_name => '${network_name}'
    );
    DBMS_OUTPUT.PUT_LINE('Created service: ${name}');
EXCEPTION
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error creating ${name}: ' || SQLERRM);
END;
/

BEGIN
    DBMS_SERVICE.START_SERVICE('${name}');
    DBMS_OUTPUT.PUT_LINE('Started service: ${name}');
EXCEPTION
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error starting ${name}: ' || SQLERRM);
END;
/

EOF
    done < "$svc_file"

    log_info "DDL saved to: $ddl_file"
}

#############################################################################
# TRIGGERS CAPTURE
#############################################################################

capture_triggers() {
    local sid="$1"
    local output_dir="$2"
    local schema_filter="${3:-}"

    log_subsection "Capturing Triggers"

    export ORACLE_SID="$sid"

    local where_clause=""
    if [[ -n "$schema_filter" ]]; then
        where_clause="AND owner IN ($schema_filter)"
    else
        where_clause="AND owner NOT IN (SELECT username FROM dba_users WHERE oracle_maintained = 'Y')"
    fi

    # Capture trigger metadata
    local trig_sql="
SET HEADING OFF FEEDBACK OFF PAGESIZE 0 LINESIZE 500 TRIMSPOOL ON
SELECT owner || '|' ||
       trigger_name || '|' ||
       trigger_type || '|' ||
       triggering_event || '|' ||
       table_owner || '|' ||
       base_object_type || '|' ||
       table_name || '|' ||
       column_name || '|' ||
       referencing_names || '|' ||
       when_clause || '|' ||
       status || '|' ||
       description || '|' ||
       action_type || '|' ||
       crossedition || '|' ||
       before_statement || '|' ||
       before_row || '|' ||
       after_row || '|' ||
       after_statement
FROM dba_triggers
WHERE 1=1 $where_clause
ORDER BY owner, trigger_name;
"

    local triggers=$(execute_sql_query "$trig_sql" "$sid" "Capture triggers")

    local trig_file="${output_dir}/triggers.csv"
    {
        echo "# Triggers Capture"
        echo "# Database: $sid"
        echo "# Captured: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "# Format: OWNER|NAME|TYPE|EVENT|TABLE_OWNER|BASE_TYPE|TABLE|COLUMN|REF|WHEN|STATUS|DESC|ACTION_TYPE|CROSSEDITION|BEFORE_STMT|BEFORE_ROW|AFTER_ROW|AFTER_STMT"
        echo "#"
        echo "$triggers"
    } > "$trig_file"

    local trig_count=$(echo "$triggers" | grep -c "|" || echo "0")
    log_info "Captured $trig_count triggers"

    # Count by status
    local enabled_count=$(echo "$triggers" | grep -c "|ENABLED|" || echo "0")
    local disabled_count=$(echo "$triggers" | grep -c "|DISABLED|" || echo "0")

    log_info "  Enabled:  $enabled_count"
    log_info "  Disabled: $disabled_count"
    log_info "Saved to: $trig_file"

    # Generate trigger status restore script
    local status_file="${output_dir}/triggers_status.sql"
    {
        echo "-- Trigger Status Restoration"
        echo "-- Database: $sid"
        echo "-- Generated: $(date '+%Y-%m-%d %H:%M:%S')"
        echo ""
    } > "$status_file"

    while IFS='|' read -r owner name ttype event towner btype table col ref whencl status rest; do
        [[ "$owner" =~ ^# ]] && continue
        [[ -z "$owner" ]] && continue

        if [[ "$status" == "ENABLED" ]]; then
            echo "ALTER TRIGGER \"${owner}\".\"${name}\" ENABLE;" >> "$status_file"
        else
            echo "ALTER TRIGGER \"${owner}\".\"${name}\" DISABLE;" >> "$status_file"
        fi
    done < "$trig_file"

    log_info "Status script saved to: $status_file"
}

#############################################################################
# SYNONYMS CAPTURE
#############################################################################

capture_synonyms() {
    local sid="$1"
    local output_dir="$2"
    local schema_filter="${3:-}"

    log_subsection "Capturing Synonyms"

    export ORACLE_SID="$sid"

    local where_clause=""
    if [[ -n "$schema_filter" ]]; then
        where_clause="AND owner IN ($schema_filter, 'PUBLIC')"
    else
        where_clause="AND (owner NOT IN (SELECT username FROM dba_users WHERE oracle_maintained = 'Y') OR owner = 'PUBLIC')"
    fi

    # Capture synonym details
    local syn_sql="
SET HEADING OFF FEEDBACK OFF PAGESIZE 0 LINESIZE 500 TRIMSPOOL ON
SELECT owner || '|' ||
       synonym_name || '|' ||
       table_owner || '|' ||
       table_name || '|' ||
       NVL(db_link, 'LOCAL') || '|' ||
       origin_con_id
FROM dba_synonyms
WHERE 1=1 $where_clause
ORDER BY owner, synonym_name;
"

    local synonyms=$(execute_sql_query "$syn_sql" "$sid" "Capture synonyms")

    local syn_file="${output_dir}/synonyms.csv"
    {
        echo "# Synonyms Capture"
        echo "# Database: $sid"
        echo "# Captured: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "# Format: OWNER|SYNONYM_NAME|TABLE_OWNER|TABLE_NAME|DB_LINK|ORIGIN_CON_ID"
        echo "#"
        echo "$synonyms"
    } > "$syn_file"

    local syn_count=$(echo "$synonyms" | grep -c "|" || echo "0")
    local pub_count=$(echo "$synonyms" | grep -c "^PUBLIC|" || echo "0")
    local priv_count=$((syn_count - pub_count))

    log_info "Captured $syn_count synonyms"
    log_info "  Public:  $pub_count"
    log_info "  Private: $priv_count"
    log_info "Saved to: $syn_file"

    # Generate synonym creation DDL
    local ddl_file="${output_dir}/synonyms_creation.sql"
    {
        echo "-- Synonyms Creation DDL"
        echo "-- Database: $sid"
        echo "-- Generated: $(date '+%Y-%m-%d %H:%M:%S')"
        echo ""
    } > "$ddl_file"

    while IFS='|' read -r owner name towner tname dblink origin; do
        [[ "$owner" =~ ^# ]] && continue
        [[ -z "$owner" ]] && continue

        if [[ "$owner" == "PUBLIC" ]]; then
            if [[ "$dblink" != "LOCAL" ]]; then
                echo "CREATE OR REPLACE PUBLIC SYNONYM \"${name}\" FOR \"${towner}\".\"${tname}\"@\"${dblink}\";" >> "$ddl_file"
            else
                echo "CREATE OR REPLACE PUBLIC SYNONYM \"${name}\" FOR \"${towner}\".\"${tname}\";" >> "$ddl_file"
            fi
        else
            if [[ "$dblink" != "LOCAL" ]]; then
                echo "CREATE OR REPLACE SYNONYM \"${owner}\".\"${name}\" FOR \"${towner}\".\"${tname}\"@\"${dblink}\";" >> "$ddl_file"
            else
                echo "CREATE OR REPLACE SYNONYM \"${owner}\".\"${name}\" FOR \"${towner}\".\"${tname}\";" >> "$ddl_file"
            fi
        fi
    done < "$syn_file"

    log_info "DDL saved to: $ddl_file"
}

#############################################################################
# TDE/ENCRYPTION CAPTURE
#############################################################################

capture_tde_info() {
    local sid="$1"
    local output_dir="$2"

    log_subsection "Capturing TDE/Encryption Information"

    export ORACLE_SID="$sid"

    local tde_file="${output_dir}/tde_info.txt"

    {
        echo "================================================================================"
        echo " TDE/ENCRYPTION INFORMATION"
        echo " Database: $sid"
        echo " Captured: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "================================================================================"
        echo ""
    } > "$tde_file"

    # Wallet Status - use direct sqlplus for reliability
    echo "=== WALLET STATUS ===" >> "$tde_file"
    sqlplus -s "/ as sysdba" <<'EOF' >> "$tde_file" 2>&1
SET LINESIZE 200 PAGESIZE 100 FEEDBACK ON
COLUMN wrl_type FORMAT A10
COLUMN wrl_parameter FORMAT A50
COLUMN status FORMAT A15
COLUMN wallet_type FORMAT A15
COLUMN wallet_order FORMAT A10

SELECT wrl_type, wrl_parameter, status, wallet_type, wallet_order
FROM v$encryption_wallet;
EXIT;
EOF

    # Encrypted Tablespaces
    echo "" >> "$tde_file"
    echo "=== ENCRYPTED TABLESPACES ===" >> "$tde_file"
    sqlplus -s "/ as sysdba" <<'EOF' >> "$tde_file" 2>&1
SET LINESIZE 200 PAGESIZE 100 FEEDBACK ON
COLUMN tablespace_name FORMAT A30
COLUMN encrypted FORMAT A10

SELECT tablespace_name, encrypted
FROM dba_tablespaces
WHERE encrypted = 'YES';
EXIT;
EOF

    # Encrypted Columns
    echo "" >> "$tde_file"
    echo "=== ENCRYPTED COLUMNS ===" >> "$tde_file"
    sqlplus -s "/ as sysdba" <<'EOF' >> "$tde_file" 2>&1
SET LINESIZE 200 PAGESIZE 100 FEEDBACK ON
COLUMN owner FORMAT A20
COLUMN table_name FORMAT A30
COLUMN column_name FORMAT A30
COLUMN encryption_alg FORMAT A15

SELECT owner, table_name, column_name, encryption_alg
FROM dba_encrypted_columns
ORDER BY owner, table_name, column_name;
EXIT;
EOF

    # Master Keys
    echo "" >> "$tde_file"
    echo "=== ENCRYPTION KEYS ===" >> "$tde_file"
    sqlplus -s "/ as sysdba" <<'EOF' >> "$tde_file" 2>&1
SET LINESIZE 200 PAGESIZE 100 FEEDBACK ON
COLUMN key_id FORMAT A52
COLUMN activation_time FORMAT A20
COLUMN key_use FORMAT A15

SELECT key_id, TO_CHAR(activation_time, 'YYYY-MM-DD HH24:MI') activation_time, key_use
FROM v$encryption_keys
ORDER BY activation_time;
EXIT;
EOF

    # TDE Parameters
    echo "" >> "$tde_file"
    echo "=== TDE PARAMETERS ===" >> "$tde_file"
    sqlplus -s "/ as sysdba" <<'EOF' >> "$tde_file" 2>&1
SET LINESIZE 200 PAGESIZE 100 FEEDBACK ON
COLUMN name FORMAT A40
COLUMN value FORMAT A50

SELECT name, value
FROM v$parameter
WHERE name IN ('encrypt_new_tablespaces', 'wallet_root', 'tde_configuration')
ORDER BY name;
EXIT;
EOF

    log_info "TDE information saved to: $tde_file"

    # Check if TDE is enabled
    local tde_status=$(sqlplus -s "/ as sysdba" <<EOF
SET HEADING OFF FEEDBACK OFF PAGESIZE 0
SELECT status FROM v\$encryption_wallet WHERE rownum = 1;
EXIT;
EOF
)
    tde_status=$(echo "$tde_status" | tr -d '[:space:]')

    if [[ "$tde_status" == "OPEN" ]]; then
        log_info "TDE Status: ENABLED and OPEN"
        return 0
    elif [[ "$tde_status" == "CLOSED" ]]; then
        log_warning "TDE Status: ENABLED but CLOSED - wallet needs to be opened"
        return 1
    else
        log_info "TDE Status: $tde_status"
        return 2
    fi
}

#############################################################################
# STATISTICS CAPTURE (Large - Separate File)
#############################################################################

capture_statistics() {
    local sid="$1"
    local output_dir="$2"
    local schema_filter="${3:-}"

    log_subsection "Capturing Object Statistics"

    export ORACLE_SID="$sid"

    local stats_dir="${output_dir}/statistics"
    mkdir -p "$stats_dir"

    local where_clause=""
    if [[ -n "$schema_filter" ]]; then
        where_clause="AND owner IN ($schema_filter)"
    else
        where_clause="AND owner NOT IN (SELECT username FROM dba_users WHERE oracle_maintained = 'Y')"
    fi

    # Table Statistics
    log_info "Capturing table statistics..."
    local table_stats_sql="
SET HEADING OFF FEEDBACK OFF PAGESIZE 0 LINESIZE 500 TRIMSPOOL ON
SELECT owner || '|' ||
       table_name || '|' ||
       NVL(TO_CHAR(num_rows), 'NULL') || '|' ||
       NVL(TO_CHAR(blocks), 'NULL') || '|' ||
       NVL(TO_CHAR(avg_row_len), 'NULL') || '|' ||
       NVL(TO_CHAR(sample_size), 'NULL') || '|' ||
       NVL(TO_CHAR(last_analyzed, 'YYYY-MM-DD HH24:MI:SS'), 'NEVER') || '|' ||
       NVL(stale_stats, 'NO')
FROM dba_tab_statistics
WHERE 1=1 $where_clause
ORDER BY owner, table_name;
"

    local table_stats=$(execute_sql_query "$table_stats_sql" "$sid" "Capture table statistics")

    {
        echo "# Table Statistics"
        echo "# Database: $sid"
        echo "# Captured: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "# Format: OWNER|TABLE_NAME|NUM_ROWS|BLOCKS|AVG_ROW_LEN|SAMPLE_SIZE|LAST_ANALYZED|STALE"
        echo "#"
        echo "$table_stats"
    } > "${stats_dir}/table_statistics.csv"

    local table_count=$(echo "$table_stats" | grep -c "|" || echo "0")
    log_info "  Tables: $table_count"

    # Index Statistics
    log_info "Capturing index statistics..."
    local index_stats_sql="
SET HEADING OFF FEEDBACK OFF PAGESIZE 0 LINESIZE 500 TRIMSPOOL ON
SELECT owner || '|' ||
       index_name || '|' ||
       table_owner || '|' ||
       table_name || '|' ||
       NVL(TO_CHAR(num_rows), 'NULL') || '|' ||
       NVL(TO_CHAR(leaf_blocks), 'NULL') || '|' ||
       NVL(TO_CHAR(distinct_keys), 'NULL') || '|' ||
       NVL(TO_CHAR(clustering_factor), 'NULL') || '|' ||
       NVL(TO_CHAR(last_analyzed, 'YYYY-MM-DD HH24:MI:SS'), 'NEVER') || '|' ||
       NVL(stale_stats, 'NO')
FROM dba_ind_statistics
WHERE 1=1 $where_clause
ORDER BY owner, index_name;
"

    local index_stats=$(execute_sql_query "$index_stats_sql" "$sid" "Capture index statistics")

    {
        echo "# Index Statistics"
        echo "# Database: $sid"
        echo "# Captured: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "# Format: OWNER|INDEX_NAME|TABLE_OWNER|TABLE_NAME|NUM_ROWS|LEAF_BLOCKS|DISTINCT_KEYS|CLUSTERING_FACTOR|LAST_ANALYZED|STALE"
        echo "#"
        echo "$index_stats"
    } > "${stats_dir}/index_statistics.csv"

    local index_count=$(echo "$index_stats" | grep -c "|" || echo "0")
    log_info "  Indexes: $index_count"

    # Column Statistics Summary (not detailed histogram due to size)
    log_info "Capturing column statistics summary..."
    local col_stats_sql="
SET HEADING OFF FEEDBACK OFF PAGESIZE 0 LINESIZE 500 TRIMSPOOL ON
SELECT owner || '|' ||
       table_name || '|' ||
       column_name || '|' ||
       NVL(TO_CHAR(num_distinct), 'NULL') || '|' ||
       NVL(TO_CHAR(num_nulls), 'NULL') || '|' ||
       NVL(TO_CHAR(density), 'NULL') || '|' ||
       NVL(histogram, 'NONE') || '|' ||
       NVL(TO_CHAR(last_analyzed, 'YYYY-MM-DD HH24:MI:SS'), 'NEVER')
FROM dba_tab_col_statistics
WHERE 1=1 $where_clause
ORDER BY owner, table_name, column_name;
"

    local col_stats=$(execute_sql_query "$col_stats_sql" "$sid" "Capture column statistics")

    {
        echo "# Column Statistics"
        echo "# Database: $sid"
        echo "# Captured: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "# Format: OWNER|TABLE_NAME|COLUMN_NAME|NUM_DISTINCT|NUM_NULLS|DENSITY|HISTOGRAM|LAST_ANALYZED"
        echo "#"
        echo "$col_stats"
    } > "${stats_dir}/column_statistics.csv"

    local col_count=$(echo "$col_stats" | grep -c "|" || echo "0")
    log_info "  Columns: $col_count"

    log_info "Statistics saved to: $stats_dir"
}

#############################################################################
# DATABASE LINKS CAPTURE
#############################################################################

capture_db_links() {
    local sid="$1"
    local output_dir="$2"

    log_subsection "Capturing Database Links"

    export ORACLE_SID="$sid"

    local link_sql="
SET HEADING OFF FEEDBACK OFF PAGESIZE 0 LINESIZE 500 TRIMSPOOL ON
SELECT owner || '|' ||
       db_link || '|' ||
       username || '|' ||
       host || '|' ||
       TO_CHAR(created, 'YYYY-MM-DD HH24:MI:SS')
FROM dba_db_links
ORDER BY owner, db_link;
"

    local links=$(execute_sql_query "$link_sql" "$sid" "Capture database links")

    local link_file="${output_dir}/db_links.csv"
    {
        echo "# Database Links Capture"
        echo "# Database: $sid"
        echo "# Captured: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "# Format: OWNER|DB_LINK|USERNAME|HOST|CREATED"
        echo "# NOTE: Passwords are not captured - must be re-entered when recreating"
        echo "#"
        echo "$links"
    } > "$link_file"

    local link_count=$(echo "$links" | grep -c "|" || echo "0")
    log_info "Captured $link_count database links"
    log_info "Saved to: $link_file"

    # Generate recreation script (without passwords)
    local ddl_file="${output_dir}/db_links_creation.sql"
    {
        echo "-- Database Links Creation DDL"
        echo "-- Database: $sid"
        echo "-- Generated: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "-- NOTE: Passwords must be added manually"
        echo ""
    } > "$ddl_file"

    while IFS='|' read -r owner link user host created; do
        [[ "$owner" =~ ^# ]] && continue
        [[ -z "$owner" ]] && continue

        if [[ "$owner" == "PUBLIC" ]]; then
            echo "CREATE PUBLIC DATABASE LINK \"${link}\" CONNECT TO \"${user}\" IDENTIFIED BY <password> USING '${host}';" >> "$ddl_file"
        else
            echo "CREATE DATABASE LINK \"${link}\" CONNECT TO \"${user}\" IDENTIFIED BY <password> USING '${host}';" >> "$ddl_file"
        fi
    done < "$link_file"

    log_info "DDL saved to: $ddl_file"
}

#############################################################################
# JOBS CAPTURE
#############################################################################

capture_jobs() {
    local sid="$1"
    local output_dir="$2"
    local schema_filter="${3:-}"

    log_subsection "Capturing Scheduler Jobs"

    export ORACLE_SID="$sid"

    local where_clause=""
    if [[ -n "$schema_filter" ]]; then
        where_clause="AND owner IN ($schema_filter)"
    else
        where_clause="AND owner NOT IN (SELECT username FROM dba_users WHERE oracle_maintained = 'Y')"
    fi

    local job_sql="
SET HEADING OFF FEEDBACK OFF PAGESIZE 0 LINESIZE 500 TRIMSPOOL ON
SELECT owner || '|' ||
       job_name || '|' ||
       job_type || '|' ||
       enabled || '|' ||
       state || '|' ||
       NVL(TO_CHAR(start_date, 'YYYY-MM-DD HH24:MI:SS'), 'N/A') || '|' ||
       NVL(repeat_interval, 'N/A') || '|' ||
       NVL(TO_CHAR(last_start_date, 'YYYY-MM-DD HH24:MI:SS'), 'NEVER') || '|' ||
       NVL(TO_CHAR(next_run_date, 'YYYY-MM-DD HH24:MI:SS'), 'N/A') || '|' ||
       NVL(TO_CHAR(run_count), '0') || '|' ||
       NVL(TO_CHAR(failure_count), '0')
FROM dba_scheduler_jobs
WHERE 1=1 $where_clause
ORDER BY owner, job_name;
"

    local jobs=$(execute_sql_query "$job_sql" "$sid" "Capture scheduler jobs")

    local job_file="${output_dir}/scheduler_jobs.csv"
    {
        echo "# Scheduler Jobs Capture"
        echo "# Database: $sid"
        echo "# Captured: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "# Format: OWNER|JOB_NAME|JOB_TYPE|ENABLED|STATE|START_DATE|REPEAT_INTERVAL|LAST_START|NEXT_RUN|RUN_COUNT|FAILURE_COUNT"
        echo "#"
        echo "$jobs"
    } > "$job_file"

    local job_count=$(echo "$jobs" | grep -c "|" || echo "0")
    log_info "Captured $job_count scheduler jobs"
    log_info "Saved to: $job_file"
}

#############################################################################
# INVALID OBJECTS CAPTURE
#############################################################################

capture_invalid_objects() {
    local sid="$1"
    local output_dir="$2"

    log_subsection "Capturing Invalid Objects"

    export ORACLE_SID="$sid"

    local invalid_sql="
SET HEADING OFF FEEDBACK OFF PAGESIZE 0 LINESIZE 500 TRIMSPOOL ON
SELECT owner || '|' ||
       object_type || '|' ||
       object_name || '|' ||
       status || '|' ||
       TO_CHAR(created, 'YYYY-MM-DD HH24:MI:SS') || '|' ||
       TO_CHAR(last_ddl_time, 'YYYY-MM-DD HH24:MI:SS')
FROM dba_objects
WHERE status = 'INVALID'
ORDER BY owner, object_type, object_name;
"

    local invalids=$(execute_sql_query "$invalid_sql" "$sid" "Capture invalid objects")

    local invalid_file="${output_dir}/invalid_objects.csv"
    {
        echo "# Invalid Objects Capture"
        echo "# Database: $sid"
        echo "# Captured: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "# Format: OWNER|OBJECT_TYPE|OBJECT_NAME|STATUS|CREATED|LAST_DDL_TIME"
        echo "#"
        echo "$invalids"
    } > "$invalid_file"

    local invalid_count=$(echo "$invalids" | grep -c "|" || echo "0")

    if [[ $invalid_count -gt 0 ]]; then
        log_warning "Found $invalid_count invalid objects"
    else
        log_info "No invalid objects found"
    fi

    log_info "Saved to: $invalid_file"
}

#############################################################################
# SCHEMA INVENTORY
#############################################################################

capture_schema_inventory() {
    local sid="$1"
    local output_dir="$2"

    log_subsection "Capturing Schema Inventory"

    export ORACLE_SID="$sid"

    # Non-Oracle maintained users
    local users_sql="
SET HEADING OFF FEEDBACK OFF PAGESIZE 0 LINESIZE 500 TRIMSPOOL ON
SELECT username || '|' ||
       account_status || '|' ||
       default_tablespace || '|' ||
       temporary_tablespace || '|' ||
       TO_CHAR(created, 'YYYY-MM-DD HH24:MI:SS') || '|' ||
       profile || '|' ||
       authentication_type
FROM dba_users
WHERE oracle_maintained = 'N'
ORDER BY username;
"

    local users=$(execute_sql_query "$users_sql" "$sid" "Capture users")

    local users_file="${output_dir}/schema_users.csv"
    {
        echo "# Non-Oracle Maintained Users"
        echo "# Database: $sid"
        echo "# Captured: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "# Format: USERNAME|ACCOUNT_STATUS|DEFAULT_TS|TEMP_TS|CREATED|PROFILE|AUTH_TYPE"
        echo "#"
        echo "$users"
    } > "$users_file"

    local user_count=$(echo "$users" | grep -c "|" || echo "0")
    log_info "Captured $user_count non-Oracle maintained schemas"

    # Object counts per schema
    local obj_count_sql="
SET HEADING OFF FEEDBACK OFF PAGESIZE 0 LINESIZE 500 TRIMSPOOL ON
SELECT owner || '|' ||
       object_type || '|' ||
       COUNT(*)
FROM dba_objects
WHERE owner NOT IN (SELECT username FROM dba_users WHERE oracle_maintained = 'Y')
GROUP BY owner, object_type
ORDER BY owner, object_type;
"

    local obj_counts=$(execute_sql_query "$obj_count_sql" "$sid" "Capture object counts")

    local obj_file="${output_dir}/schema_object_counts.csv"
    {
        echo "# Object Counts by Schema"
        echo "# Database: $sid"
        echo "# Captured: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "# Format: OWNER|OBJECT_TYPE|COUNT"
        echo "#"
        echo "$obj_counts"
    } > "$obj_file"

    log_info "Saved to: $users_file"
    log_info "Object counts saved to: $obj_file"
}

#############################################################################
# MAIN CAPTURE FUNCTION
#############################################################################

run_pre_migration_capture() {
    local sid="$1"
    local output_dir="$2"
    local schema_filter="${3:-}"

    log_section "Pre-Migration Database Capture"

    log_info "Source Database: $sid"
    log_info "Output Directory: $output_dir"
    if [[ -n "$schema_filter" ]]; then
        log_info "Schema Filter: $schema_filter"
    fi

    mkdir -p "$output_dir"

    # Capture all components
    capture_schema_inventory "$sid" "$output_dir"
    capture_sequences "$sid" "$output_dir" "$schema_filter"
    capture_services "$sid" "$output_dir"
    capture_triggers "$sid" "$output_dir" "$schema_filter"
    capture_synonyms "$sid" "$output_dir" "$schema_filter"
    capture_tde_info "$sid" "$output_dir"
    capture_statistics "$sid" "$output_dir" "$schema_filter"
    capture_db_links "$sid" "$output_dir"
    capture_jobs "$sid" "$output_dir" "$schema_filter"
    capture_invalid_objects "$sid" "$output_dir"

    # Generate summary
    local summary_file="${output_dir}/capture_summary.txt"
    {
        echo "================================================================================"
        echo " PRE-MIGRATION CAPTURE SUMMARY"
        echo " Database: $sid"
        echo " Captured: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "================================================================================"
        echo ""
        echo "Files Generated:"
        ls -la "$output_dir"/*.csv "$output_dir"/*.txt "$output_dir"/*.sql 2>/dev/null | awk '{print "  " $NF}'
        echo ""
        echo "Statistics Directory:"
        ls -la "${output_dir}/statistics"/*.csv 2>/dev/null | awk '{print "  " $NF}'
        echo ""
        echo "================================================================================"
    } > "$summary_file"

    log_success "Pre-migration capture completed"
    log_info "Summary: $summary_file"

    return 0
}

#############################################################################
# STANDALONE EXECUTION
#############################################################################

if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
    echo "Pre-Migration Capture Module"
    echo "============================"
    echo ""
    echo "Usage: $0 --sid <oracle_sid> --output <output_dir> [--schema <schema_list>]"
    echo ""

    SID=""
    OUTPUT_DIR=""
    SCHEMA_FILTER=""

    while [[ $# -gt 0 ]]; do
        case "$1" in
            --sid) SID="$2"; shift 2 ;;
            --output) OUTPUT_DIR="$2"; shift 2 ;;
            --schema) SCHEMA_FILTER="$2"; shift 2 ;;
            *) shift ;;
        esac
    done

    if [[ -n "$SID" ]] && [[ -n "$OUTPUT_DIR" ]]; then
        mkdir -p "$OUTPUT_DIR"
        init_logging "$OUTPUT_DIR" "capture"
        run_pre_migration_capture "$SID" "$OUTPUT_DIR" "$SCHEMA_FILTER"
    fi
fi



1. Architecture Overview

 Couchbase Cluster Components

  • Data Service (KV): Handles read/write operations and manages the key-value store.

  • Index Service: Manages GSI (Global Secondary Indexes) to support N1QL queries.

  • Query Service: Processes N1QL queries using indexes or views.

  • Search Service: Handles Full-Text Search.

  • Analytics Service: Performs parallel analytics workloads.

  • Eventing Service: Real-time event-driven processing.

🕸️ Typical Production Deployment (2015-era Reference)

+------------------------+
| Load Balancer         |
+-----------+------------+
            |
+-----------v------------+
|   Couchbase Query/API  |  ← 3 Nodes (Query, Index)
+-----------+------------+
            |
+-----------v------------+
|   Couchbase Data Tier  |  ← 5-9 Nodes (KV + Index)
+-----------+------------+
            |
+-----------v------------+
| XDCR/Remote Clusters   |
+------------------------+

2. Kernel & OS-Level Tuning

 Memory and Swapping

  • Disable Transparent Huge Pages (THP):

    echo never > /sys/kernel/mm/transparent_hugepage/enabled echo never > /sys/kernel/mm/transparent_hugepage/defrag
  • Disable Swap:

    swapoff -a

 File Descriptors

  • Increase limits in /etc/security/limits.conf:

    couchbase soft nofile 100000 couchbase hard nofile 100000

 Network Tuning

  • Use low-latency network settings:

    sysctl -w net.core.somaxconn=4096 sysctl -w net.ipv4.tcp_tw_reuse=1 sysctl -w net.ipv4.ip_local_port_range="10240 65535"

3. Best Practices (Operational)

3.1 Storage

  • Use ext4 or XFS file systems with noatime flag.

  • Place index and data on separate physical volumes for IO separation.

  • Prefer RAID 10 for low-latency workloads.

3.2 Memory

  • Ensure working set fits in memory.

  • Monitor ephemeral buckets and eviction policies (value-only vs full).

  • Monitor Resident Ratio > 95% in healthy clusters.

3.3 Compaction

  • Schedule off-peak automatic compaction.

  • Monitor compaction queues in cbcollect_info or Couchbase Admin UI.

3.4 Backup

  • Use cbbackupmgr for scheduled backups.

  • Store backups off-node and test recovery monthly.

  • Automate with crontab/Ansible and validate integrity.

4. Error Handling & Troubleshooting

4.1 Node Join Failure (HTTP 500)

Resolution:

  • Clear /data/couchbase/ → recreate data and index folders.

  • Ensure required ports (8091, 8092, 11210, etc.) are not in use.

  • Use netstat to validate before restarting services.

4.2 Memcached or Moxi Exit (status 134 or 0)

Diagnosis:

  • Check ns_server.log and babysitter logs.

  • Look for memory exhaustion or kernel panic signatures.

4.3 Auto-failover Recurrence

Fixes:

  • Increase auto-failover timeout to 60s or more.

  • Validate NIC / MTU mismatch and system logs (/var/log/messages).

5. Upgrades & Version Management

 Upgrade Path

  • Follow supported paths:
    2.2 → 3.0.3 → 4.1.0 → 5.x+

 Use Swap Rebalance for Seamless Upgrade

  • Script the following:

    couchbase-cli rebalance \ --cluster 127.0.0.1:8091 \ --user admin --password pass \ --server-remove oldnode \ --server-add newnode

6. Cross Data Center Replication (XDCR)

  • Set up bidirectional or unidirectional replication.

  • Secure using SSL for XDCR.

  • Monitor lag using:

    couchbase-cli xdcr-replicate --list

XDCR Best Practices

  • Avoid XDCR loops (clusters replicating back to each other).

  • Monitor for large checkpoint backlogs.

  • Enable compression (if supported by version).


7. Monitoring & Automation

Tools

  • cbcollect_info: Cluster diagnostics.

  • cbstats: Node-level stats.

  • Custom scripts: monitor resident ratio, replication lag, memory usage.

Automation Ideas

  • Rebalancing triggers (after node recovery)

  • Scheduled backup + integrity validation

  • Alerting: memory, disk, XDCR lag thresholds


8. Sample Scripts Snippet

#!/bin/bash # Monitor XDCR lag CLUSTER="127.0.0.1" curl -s -u admin:password \ "http://${CLUSTER}:8091/pools/default/tasks" \ | jq '.[] | select(.type=="xdcr") | {source, target, replication_lag}'

 9. Final Points to Remember

  • Avoid SELECT * — write targeted queries with covering indexes.

  • Use partial indexes (WHERE type = 'txn') to reduce index bloat.

  • Monitor rebalance queue length during cluster changes.

  • Avoid over-committing memory across data and index services.

  • Tune Linux kernel to avoid memory fragmentation and swap usage.


Appendix

A. Relevant Logs to Monitor

  • /opt/couchbase/var/lib/couchbase/logs/ns_server.log

  • babysitter.log, memcached.log, stats.log

B. Key Directories

  • /opt/couchbase/var/lib/couchbase/data/

  • /opt/couchbase/var/lib/couchbase/index/