Monday, January 5, 2015

Points to remember in Couchbase

#!/bin/bash

# filepath: /path/to/cpu_analysis.sh

# Set environment variables
LOG_FOLDER="/x/home/oracle/admin/scripts/logs/cpu_analysis"
mkdir -p "$LOG_FOLDER"

# Oracle environment variables (update as per your environment)
source ~/.profile

# Temporary files
TOTAL_CPUS_FILE="$LOG_FOLDER/total_cpus.txt"
SYSTEM_CPU_USAGE_FILE="$LOG_FOLDER/system_cpu_usage.txt"
DATABASE_COUNT_FILE="$LOG_FOLDER/database_count.txt"
DATABASE_LIST_FILE="$LOG_FOLDER/database_list.txt"
DATABASE_CPU_USAGE_FILE="$LOG_FOLDER/database_cpu_usage.txt"
CAPACITY_FILE="$LOG_FOLDER/capacity.txt"

# Function to get total number of CPUs on the host
get_total_cpus() {
grep -c ^processor /proc/cpuinfo > "$TOTAL_CPUS_FILE"
}

# Function to get system-level CPU usage
get_system_cpu_usage() {
mpstat -P ALL 1 1 | awk '/all/ {print 100 - $NF}' > "$SYSTEM_CPU_USAGE_FILE"
}

# Function to get the number of databases on the host
get_number_of_databases() {
sqlplus -s / as sysdba <<EOF > "$DATABASE_COUNT_FILE"
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SELECT COUNT(*) FROM v\$database;
EXIT;
EOF
}

# Function to get list of databases
get_database_list() {
sqlplus -s / as sysdba <<EOF > "$DATABASE_LIST_FILE"
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SELECT name FROM v\$database;
EXIT;
EOF
}

# Function to get CPU usage for each database
get_database_cpu_usage() {
databases=$(cat "$DATABASE_LIST_FILE")
> "$DATABASE_CPU_USAGE_FILE"
for db in $databases; do
echo "Database: $db" >> "$DATABASE_CPU_USAGE_FILE"
sqlplus -s / as sysdba <<EOF >> "$DATABASE_CPU_USAGE_FILE"
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SELECT ROUND(SUM(VALUE) / COUNT(*), 2) FROM v\$sysmetric WHERE METRIC_NAME = 'Host CPU Utilization (%)' AND BEGIN_TIME > SYSDATE - 1/24;
EXIT;
EOF
done
}

# Function to calculate current available capacity
calculate_capacity() {
total_cpus=$(cat "$TOTAL_CPUS_FILE")
used_capacity=$(cat "$SYSTEM_CPU_USAGE_FILE")
available_capacity=$(echo "$total_cpus - $used_capacity" | bc)
echo "Current available capacity (CPUs): $available_capacity" > "$CAPACITY_FILE"
echo "Total used capacity (CPUs): $used_capacity" >> "$CAPACITY_FILE"
}

# Function to print the report
print_report() {
echo "==================== CPU Analysis Report ===================="
echo "Date: $(date)"
echo "-------------------------------------------------------------"
echo "Total number of CPUs on the host: $(cat "$TOTAL_CPUS_FILE")"
echo "-------------------------------------------------------------"
echo "System CPU Usage:"
echo " - Number of CPUs currently in use: $(cat "$SYSTEM_CPU_USAGE_FILE")"
echo " - Percentage of CPU being used: $(cat "$SYSTEM_CPU_USAGE_FILE")%"
echo "-------------------------------------------------------------"
echo "Number of databases on the host: $(cat "$DATABASE_COUNT_FILE")"
echo "-------------------------------------------------------------"
echo "Database CPU Usage:"
cat "$DATABASE_CPU_USAGE_FILE" | while read line; do
echo " $line"
done
echo "-------------------------------------------------------------"
echo "Capacity Analysis:"
cat "$CAPACITY_FILE" | while read line; do
echo " $line"
done
echo "============================================================="
}

# Main function
main() {
# Get total number of CPUs
get_total_cpus

# Get system-level CPU usage
get_system_cpu_usage

# Get number of databases
get_number_of_databases

# Get CPU usage for each database
get_database_list
get_database_cpu_usage

# Calculate current available capacity
calculate_capacity

# Print the report
print_report
}

# Run the main function
main

 


DECLARE
    users_to_lock SYS.ODCIVARCHAR2LIST := SYS.ODCIVARCHAR2LIST('abc', 'def', 'ghi', 'jkl', 'mno');
    account_status VARCHAR2(30);
BEGIN
    FOR i IN 1 .. users_to_lock.COUNT LOOP
        BEGIN
            EXECUTE IMMEDIATE 'ALTER USER ' || users_to_lock(i) || ' ACCOUNT LOCK';

            -- Check the account status after attempting to lock
            SELECT account_status INTO account_status
            FROM dba_users
            WHERE username = UPPER(users_to_lock(i));

            IF account_status = 'LOCKED' THEN
                DBMS_OUTPUT.PUT_LINE('User ' || users_to_lock(i) || ' is now locked.');
            ELSE
                DBMS_OUTPUT.PUT_LINE('User ' || users_to_lock(i) || ' is not locked.');
            END IF;
        EXCEPTION
            WHEN NO_DATA_FOUND THEN
                DBMS_OUTPUT.PUT_LINE('User ' || users_to_lock(i) || ' does not exist.');
            WHEN OTHERS THEN
                IF SQLCODE = -1918 THEN
                    DBMS_OUTPUT.PUT_LINE('User ' || users_to_lock(i) || ' does not exist.');
                ELSE
                    DBMS_OUTPUT.PUT_LINE('An unexpected error occurred for user ' || users_to_lock(i) || '.');
                END IF;
        END;
    END LOOP;
END;
/

DECLARE
    users_to_check SYS.ODCIVARCHAR2LIST := SYS.ODCIVARCHAR2LIST('abc', 'def', 'ghi', 'jkl', 'mno');
    account_status VARCHAR2(30);
BEGIN
    FOR i IN 1 .. users_to_check.COUNT LOOP
        BEGIN
            SELECT account_status INTO account_status
            FROM dba_users
            WHERE username = UPPER(users_to_check(i));

            DBMS_OUTPUT.PUT_LINE('User ' || users_to_check(i) || ' is ' || account_status || '.');
        EXCEPTION
            WHEN NO_DATA_FOUND THEN
                DBMS_OUTPUT.PUT_LINE('User ' || users_to_check(i) || ' does not exist.');
            WHEN OTHERS THEN
                DBMS_OUTPUT.PUT_LINE('An unexpected error occurred for user ' || users_to_check(i) || '.');
        END;
    END LOOP;
END;
/

 Yes, you can achieve this by checking if the user exists before attempting to lock the account. Here's how you can modify your SQL to handle this and send it as a single statement:


sql
BEGIN
EXECUTE IMMEDIATE 'ALTER USER abc ACCOUNT LOCK';
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -1918 THEN
DBMS_OUTPUT.PUT_LINE('User abc does not exist.');
ELSE
RAISE; -- Re-raise other exceptions
END IF;
END;
/

BEGIN
EXECUTE IMMEDIATE 'ALTER USER def ACCOUNT LOCK';
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -1918 THEN
DBMS_OUTPUT.PUT_LINE('User def does not exist.');
ELSE
RAISE; -- Re-raise other exceptions
END IF;
END;
/
Explanation:

BEGIN...END; Blocks: We enclose each ALTER USER statement within a PL/SQL block. This allows us to handle exceptions for each user individually.
EXECUTE IMMEDIATE: We use dynamic SQL (EXECUTE IMMEDIATE) to execute the ALTER USER command. This is necessary because we're embedding SQL within PL/SQL.
Exception Handling: The EXCEPTION block catches any errors that occur during the execution of the ALTER USER command.
WHEN OTHERS: This handles all exceptions.
IF SQLCODE = -1918 THEN: We check if the specific error ORA-01918 (user not found) occurred.
DBMS_OUTPUT.PUT_LINE(...): If the user doesn't exist, we print a message to the output (you'll need to enable server output to see this message, e.g., SET SERVEROUTPUT ON; in SQL*Plus or SQL Developer). You could replace this with other actions like logging to a table or raising a different exception.
ELSE RAISE;: For any other exception besides ORA-01918, we re-raise the exception so that it's not swallowed and can be handled by a higher-level error handler if needed.
/: This is a SQL*Plus or SQL Developer command that separates PL/SQL blocks and tells the database to execute the preceding block.
How to send it:

You would send the entire block of code above as a single SQL statement to your $sql1 and $sql2 variables. The database will execute each PL/SQL block sequentially. Because of the error handling, if a user doesn't exist, the code will continue to the next block without stopping. It will only attempt to lock users that actually exist (or at least, it will handle the "user not found" error gracefully if they don't).

Rate this answer:

Mattewada Rajkiran
can you do it in one block for 5 users?


Gemini
Sure, I can consolidate that into a single block for five users! Here's how you can do it:

sql
BEGIN
FOR user_name IN ('abc', 'def', 'user3', 'user4', 'user5') LOOP
BEGIN
EXECUTE IMMEDIATE 'ALTER USER ' || user_name || ' ACCOUNT LOCK';
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -1918 THEN
DBMS_OUTPUT.PUT_LINE('User ' || user_name || ' does not exist.');
ELSE
RAISE; -- Re-raise other exceptions
END IF;
END;
END LOOP;
END;
/
This approach uses a FOR loop to iterate through a list of usernames. Within the loop, it attempts to lock each user's account. The exception handling remains the same, specifically addressing the ORA-01918 error. If a user doesn't exist, it prints a message; otherwise, it moves on to the next user in the list. Now, you can provide all five usernames in a single, concise block!


To execute multiple SQL statements in one go and handle cases where a user might not exist, you can use PL/SQL blocks with conditional logic. Here's how you can achieve this:

BEGIN
-- Attempt to lock user 'abc'
BEGIN
EXECUTE IMMEDIATE 'ALTER USER abc ACCOUNT LOCK';
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -1918 THEN
DBMS_OUTPUT.PUT_LINE('User abc does not exist.');
ELSE
RAISE;
END IF;
END;

-- Attempt to lock user 'def'
BEGIN
EXECUTE IMMEDIATE 'ALTER USER def ACCOUNT LOCK';
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -1918 THEN
DBMS_OUTPUT.PUT_LINE('User def does not exist.');
ELSE
RAISE;
END IF;
END;
END;
/
Explanation:
PL/SQL Block: The entire operation is wrapped in a PL/SQL block, allowing for procedural logic.
EXECUTE IMMEDIATE: This is used to execute dynamic SQL statements.
Exception Handling: Each ALTER USER statement is wrapped in its own BEGIN...EXCEPTION...END block to handle exceptions individually.
SQLCODE -1918: This specific error code corresponds to ORA-01918, which indicates that the user does not exist. The block checks for this error and handles it gracefully by printing a message.
DBMS_OUTPUT.PUT_LINE: This is used to output messages. Ensure that DBMS_OUTPUT is enabled in your SQL client to see these messages.
Usage:
You can send this entire block to your SQL execution environment (e.g., SQL*Plus, SQL Developer) as a single command.
This approach ensures that each user is only locked if they exist, and it provides feedback if a user does not exist.
Is this helpful?

can you do it in one block for 5 users? instead of multiple $sql1 $sq2, etc.,


Certainly! You can handle multiple users in a single PL/SQL block by iterating over a list of usernames and applying the same logic to each. Here's how you can do it for five users:

DECLARE
users_to_lock SYS.ODCIVARCHAR2LIST := SYS.ODCIVARCHAR2LIST('abc', 'def', 'ghi', 'jkl', 'mno');
BEGIN
FOR i IN 1 .. users_to_lock.COUNT LOOP
BEGIN
EXECUTE IMMEDIATE 'ALTER USER ' || users_to_lock(i) || ' ACCOUNT LOCK';
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -1918 THEN
DBMS_OUTPUT.PUT_LINE('User ' || users_to_lock(i) || ' does not exist.');
ELSE
RAISE;
END IF;
END;
END LOOP;
END;
/
Explanation:
SYS.ODCIVARCHAR2LIST: This is a collection type that can hold a list of strings. It's used here to store the usernames you want to lock.
FOR LOOP: Iterates over each username in the list.
EXECUTE IMMEDIATE: Dynamically constructs and executes the ALTER USER statement for each username.
Exception Handling: Catches the ORA-01918 error for non-existent users and outputs a message using DBMS_OUTPUT.PUT_LINE.
DBMS_OUTPUT.PUT_LINE: Outputs messages for each user that does not exist. Ensure DBMS_OUTPUT is enabled in your SQL client to see these messages.
Usage:
You can execute this block in your SQL environment as a single command.
This approach efficiently handles multiple users in one block, checking for existence and locking accounts as needed.





To enhance the document for the "move" method in the context of converting a non-CDB to a CDB, we need to outline the specific steps, benefits, and considerations associated with this approach. The "move" method involves relocating datafiles without copying them, similar to the NOCOPY method, but with additional considerations for file management and metadata updates.

Enhanced Document: Non-CDB to CDB Conversion Using MOVE Method

Internal Actions:

  1. CREATE PLUGGABLE DATABASE Statement:

    • This command is used within the CDB to initiate the creation of the new PDB using the MOVE method.
  2. XML File Parsing:

    • Oracle reads and parses the new_pdb.xml file generated earlier to understand the structure and metadata of the non-CDB.
  3. PDB Creation:

    • A new PDB entry is created in the CDB's data dictionary.
  4. Tablespace and Datafile Setup:

    • Based on the XML file:
      • New tablespaces are created within the CDB to match the tablespaces found in the XML.
      • Datafiles are moved from the non-CDB location to the CDB location, updating paths as necessary.
  5. Metadata Update:

    • Oracle updates the metadata in the new PDB to reference the moved datafiles. The path is modified using the file_name_convert clause.
  6. Shared Files:

    • The PDB now uses the moved datafiles, which are no longer associated with the original non-CDB.
  7. Control File:

    • The PDB will get its own control file, separate from the non-CDB.
  8. Object Recreation:

    • Database objects (tables, views, etc.) are recreated inside the PDB based on the XML metadata.
  9. User Accounts:

    • User accounts, roles, and privileges are created and assigned within the PDB.
  10. PDB Status:

    • The new PDB is created in a MOUNTED state. It is not yet OPEN.
  11. Log Files:

    • The new PDB will have its own redo log files, separate from the CDB and other PDBs.
  12. Undo Tablespace:

    • The new PDB will have its own undo tablespace.
  13. Temporary Tablespace:

    • The new PDB will have its own temporary tablespace.
  14. Original Non-CDB Data Files:

    • The original non-CDB data files are now considered to belong to the PDB.

Pros:

  • Speed: The conversion is fast because files are moved rather than copied.
  • Storage Efficiency: No additional storage space is required, as the original datafiles are moved.

Cons:

  • Risk to Original Data: The PDB now uses the moved non-CDB datafiles. Any corruption within the PDB could affect the data within the moved files.
  • Complex Rollback: Rolling back to the original non-CDB is significantly more complex and risky.
  • Interdependency: If the PDB is dropped, the files are also dropped, as they are now considered to belong to the PDB.

Rollback for MOVE

Warning: Rollback for MOVE is complicated and carries a significant risk of data loss if not performed carefully.

  1. Stop and Drop the PDB:

    • If the PDB is open, you must close it.
    • Drop the PDB, including datafiles.
  2. Verification (Critical):

    • Check oldspec: Even though you executed the drop including datafiles, the files under oldspec were dropped. They were considered part of the PDB.
  3. Recover from Backup:

    • If the files were dropped, you have to restore them from backup. If you didn't make a backup of the files, there is no way to recover them.
  4. Recover to a New Name:

    • If you don't have a backup of the non-CDB, you can try to start the non-CDB as a new database, with a new name.
  5. Clean Up:

    • Remove any new files created, like log files and undo tablespace.
    • Try to start the non-CDB again.
  6. Phase 3: Post-Creation Cleanup:

    • Run a cleanup to get rid of non-CDB items in the new pluggable database using @$ORACLE_HOME/rdbms/admin/noncdb_to_pdb.sql.

Internal Actions:

  • Script Execution: The noncdb_to_pdb.sql script is designed to remove remnants of the non-CDB structure from the newly created PDB.
  • SYS Connection: This script needs to be run connected as a user with SYSDBA privileges, and connected to the PDB.
  • Script Actions: The specific actions in the script are version-dependent, but they generally include:
    • Invalidating Obsolete Objects: It might drop or invalidate some objects that are no longer relevant in the PDB environment.
    • Removing Non-PDB User: It likely removes the database user SYSBACKUP, as well as other database objects that are no longer used in a PDB environment.
    • Updating Dictionary: It modifies some data dictionary information to correctly reflect the PDB's identity.
    • Resource Management: It modifies some parameters in the database to suit it to the new PDB environment.
    • Cleaning up: Removes data, settings, and items that are no longer used.
    • Recompiles: Recompile invalid objects.

Why it is Important:

This step is vital for a clean conversion. It ensures that the PDB is properly integrated into the CDB environment and doesn't retain unnecessary or conflicting information from the original non-CDB. This cleanup script should be run in the PDB, and not in the CDB.

Method 1: COPY vs Method 2: NOCOPY vs Method 3: MOVE - Summary Table

FeatureCOPYNOCOPYMOVE
DatafilesCopied to new location.Reused from the original non-CDB.Moved from original non-CDB.
SpeedSlower (due to file copy).Faster (no file copy).Fast (file move).
StorageRequires double the storage initially.No extra storage initially.No extra storage initially.
Original DataIsolated. Changes in PDB do not affect non-CDB data.Shared. Changes in PDB can affect non-CDB data (risky).Shared. Changes in PDB can affect non-CDB data (risky).
RollbackEasier: Drop PDB, copied files are removed. Original files untouched.Difficult & Risky: Requires careful manual intervention. Data loss possible. ORA-01122 errors likely.Difficult & Risky: Requires careful manual intervention. Data loss possible.
RiskLower risk to original non-CDB data.Higher risk to original non-CDB data.Higher risk to original non-CDB data.
File removalIf the PDB is dropped, only the copied files are removed.If the PDB is dropped, all files from the old non-CDB are removed.If the PDB is dropped, all files from the old non-CDB are removed.
DependenciesNo dependencies with the non-CDB data files.Strong dependency with non-CDB data files.Strong dependency with non-CDB data files.
ORA errorsNo errors will happen with the original non-CDB if you shut down the CDB and try to start it.ORA-01122, ORA-01110, ORA-01204 will happen if you shut down the CDB and try to start the non-CDB.ORA-01122, ORA-01110, ORA-01204 will happen if you shut down the CDB and try to start the non-CDB.

Key Points & Considerations:

  • Datafile Management: The MOVE method involves relocating datafiles, which requires careful management to ensure paths are updated correctly.
  • file_name_convert is Essential: This clause is critical for relocating datafiles to the correct location in the CDB.
  • Metadata vs. Data: The DBMS_PDB.DESCRIBE procedure only extracts metadata. The data itself is moved during the CREATE PLUGGABLE DATABASE process.
  • Downtime: The non-CDB is unavailable during the SHUTDOWN and data file move. The time required for the move is directly related to the size of the non-CDB's datafiles.
  • CDB Requirements: The CDB must have sufficient disk space for the new PDB's datafiles, and it must be running and accessible.
  • Non-CDB files: After the conversion, you might decide to keep the non-CDB files as a backup, or remove them (only if you used COPY).
  • Compatibility: The Oracle version of the CDB should be the same or later than the non-CDB.
  • Backup: Always take a backup of the files before using MOVE.

This enhanced document provides a comprehensive overview of the MOVE method for converting a non-CDB to a CDB, highlighting the internal actions, pros and cons, rollback procedures, and key considerations.



Migrating a non-CDB with Transparent Data Encryption (TDE) enabled to a CDB involves several steps to ensure that encryption keys and encrypted data are properly handled. TDE is used to encrypt data at rest, and it's crucial to manage the encryption keys during the migration process. Below are detailed steps and commands to facilitate this migration:

Pre-Migration Steps

  1. Verify TDE Configuration:

    • Ensure that TDE is properly configured in the non-CDB. Check the status of the wallet and the encryption keys.
    SELECT * FROM V$ENCRYPTION_WALLET;
    
  2. Backup the Wallet:

    • Backup the wallet directory from the non-CDB. This is crucial as the wallet contains the encryption keys.
    cp -r $ORACLE_BASE/admin/<non-cdb>/wallet /backup/location/
    
  3. Backup the Database:

    • Perform a full backup of the non-CDB to ensure data safety.

Migration Steps

  1. Prepare the CDB Environment:

    • Ensure that the CDB is configured to use TDE. If not, set up TDE in the CDB.
    ADMINISTER KEY MANAGEMENT CREATE KEYSTORE '/path/to/cdb/wallet' IDENTIFIED BY "password";
    ADMINISTER KEY MANAGEMENT SET KEYSTORE OPEN IDENTIFIED BY "password";
    ADMINISTER KEY MANAGEMENT SET ENCRYPTION KEY IDENTIFIED BY "password" WITH BACKUP;
    
  2. Copy the Wallet:

    • Copy the wallet from the non-CDB to the CDB's wallet location.
    cp -r /backup/location/wallet /path/to/cdb/wallet
    
  3. Open the Wallet in CDB:

    • Open the wallet in the CDB to access the encryption keys.
    ADMINISTER KEY MANAGEMENT SET KEYSTORE OPEN IDENTIFIED BY "password";
    
  4. Create Pluggable Database:

    • Use the CREATE PLUGGABLE DATABASE command to migrate the non-CDB to a PDB within the CDB.
    CREATE PLUGGABLE DATABASE pdb_name USING '/path/to/new_pdb.xml' NOCOPY
    FILE_NAME_CONVERT = ('/path/to/non-cdb/datafiles', '/path/to/cdb/datafiles');
    
  5. Verify Encryption in PDB:

    • Check that the encryption keys are accessible and that encrypted data is intact in the new PDB.
    SELECT * FROM V$ENCRYPTION_WALLET;
    SELECT TABLESPACE_NAME, ENCRYPTED FROM DBA_TABLESPACES WHERE ENCRYPTED = 'YES';
    

Post-Migration Steps

  1. Close the Wallet:

    • After verifying the migration, close the wallet in the CDB.
    ADMINISTER KEY MANAGEMENT SET KEYSTORE CLOSE IDENTIFIED BY "password";
    
  2. Backup the CDB Wallet:

    • Backup the wallet in the CDB to ensure encryption keys are safe.
    cp -r /path/to/cdb/wallet /backup/location/
    
  3. Test the PDB:

    • Perform tests to ensure that the PDB is functioning correctly and that encrypted data is accessible.
  4. Cleanup:

    • Remove any temporary files or backups that are no longer needed.

Additional Considerations

  • Compatibility: Ensure that the Oracle version of the CDB supports TDE and is compatible with the non-CDB.
  • Security: Maintain security protocols during the migration, especially when handling encryption keys and wallets.
  • Documentation: Document each step of the migration process for future reference and auditing purposes.

By following these detailed steps, you can successfully migrate a non-CDB with TDE enabled to a CDB, ensuring that encryption keys and encrypted data are properly managed throughout the process.