#!/bin/bash
# filepath: /path/to/cpu_analysis.sh
# Set environment variablesLOG_FOLDER="/x/home/oracle/admin/scripts/logs/cpu_analysis"mkdir -p "$LOG_FOLDER"
# Oracle environment variables (update as per your environment)source ~/.profile
# Temporary filesTOTAL_CPUS_FILE="$LOG_FOLDER/total_cpus.txt"SYSTEM_CPU_USAGE_FILE="$LOG_FOLDER/system_cpu_usage.txt"DATABASE_COUNT_FILE="$LOG_FOLDER/database_count.txt"DATABASE_LIST_FILE="$LOG_FOLDER/database_list.txt"DATABASE_CPU_USAGE_FILE="$LOG_FOLDER/database_cpu_usage.txt"CAPACITY_FILE="$LOG_FOLDER/capacity.txt"
# Function to get total number of CPUs on the hostget_total_cpus() { grep -c ^processor /proc/cpuinfo > "$TOTAL_CPUS_FILE"}
# Function to get system-level CPU usageget_system_cpu_usage() { mpstat -P ALL 1 1 | awk '/all/ {print 100 - $NF}' > "$SYSTEM_CPU_USAGE_FILE"}
# Function to get the number of databases on the hostget_number_of_databases() { sqlplus -s / as sysdba <<EOF > "$DATABASE_COUNT_FILE"SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFFSELECT COUNT(*) FROM v\$database;EXIT;EOF}
# Function to get list of databasesget_database_list() { sqlplus -s / as sysdba <<EOF > "$DATABASE_LIST_FILE"SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFFSELECT name FROM v\$database;EXIT;EOF}
# Function to get CPU usage for each databaseget_database_cpu_usage() { databases=$(cat "$DATABASE_LIST_FILE") > "$DATABASE_CPU_USAGE_FILE" for db in $databases; do echo "Database: $db" >> "$DATABASE_CPU_USAGE_FILE" sqlplus -s / as sysdba <<EOF >> "$DATABASE_CPU_USAGE_FILE"SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFFSELECT ROUND(SUM(VALUE) / COUNT(*), 2) FROM v\$sysmetric WHERE METRIC_NAME = 'Host CPU Utilization (%)' AND BEGIN_TIME > SYSDATE - 1/24;EXIT;EOF done}
# Function to calculate current available capacitycalculate_capacity() { total_cpus=$(cat "$TOTAL_CPUS_FILE") used_capacity=$(cat "$SYSTEM_CPU_USAGE_FILE") available_capacity=$(echo "$total_cpus - $used_capacity" | bc) echo "Current available capacity (CPUs): $available_capacity" > "$CAPACITY_FILE" echo "Total used capacity (CPUs): $used_capacity" >> "$CAPACITY_FILE"}
# Function to print the reportprint_report() { echo "==================== CPU Analysis Report ====================" echo "Date: $(date)" echo "-------------------------------------------------------------" echo "Total number of CPUs on the host: $(cat "$TOTAL_CPUS_FILE")" echo "-------------------------------------------------------------" echo "System CPU Usage:" echo " - Number of CPUs currently in use: $(cat "$SYSTEM_CPU_USAGE_FILE")" echo " - Percentage of CPU being used: $(cat "$SYSTEM_CPU_USAGE_FILE")%" echo "-------------------------------------------------------------" echo "Number of databases on the host: $(cat "$DATABASE_COUNT_FILE")" echo "-------------------------------------------------------------" echo "Database CPU Usage:" cat "$DATABASE_CPU_USAGE_FILE" | while read line; do echo " $line" done echo "-------------------------------------------------------------" echo "Capacity Analysis:" cat "$CAPACITY_FILE" | while read line; do echo " $line" done echo "============================================================="}
# Main functionmain() { # Get total number of CPUs get_total_cpus
# Get system-level CPU usage get_system_cpu_usage
# Get number of databases get_number_of_databases
# Get CPU usage for each database get_database_list get_database_cpu_usage
# Calculate current available capacity calculate_capacity
# Print the report print_report}
# Run the main functionmain
DECLARE
users_to_lock SYS.ODCIVARCHAR2LIST := SYS.ODCIVARCHAR2LIST('abc', 'def', 'ghi', 'jkl', 'mno');
account_status VARCHAR2(30);
BEGIN
FOR i IN 1 .. users_to_lock.COUNT LOOP
BEGIN
EXECUTE IMMEDIATE 'ALTER USER ' || users_to_lock(i) || ' ACCOUNT LOCK';
-- Check the account status after attempting to lock
SELECT account_status INTO account_status
FROM dba_users
WHERE username = UPPER(users_to_lock(i));
IF account_status = 'LOCKED' THEN
DBMS_OUTPUT.PUT_LINE('User ' || users_to_lock(i) || ' is now locked.');
ELSE
DBMS_OUTPUT.PUT_LINE('User ' || users_to_lock(i) || ' is not locked.');
END IF;
EXCEPTION
WHEN NO_DATA_FOUND THEN
DBMS_OUTPUT.PUT_LINE('User ' || users_to_lock(i) || ' does not exist.');
WHEN OTHERS THEN
IF SQLCODE = -1918 THEN
DBMS_OUTPUT.PUT_LINE('User ' || users_to_lock(i) || ' does not exist.');
ELSE
DBMS_OUTPUT.PUT_LINE('An unexpected error occurred for user ' || users_to_lock(i) || '.');
END IF;
END;
END LOOP;
END;
/
DECLARE
users_to_check SYS.ODCIVARCHAR2LIST := SYS.ODCIVARCHAR2LIST('abc', 'def', 'ghi', 'jkl', 'mno');
account_status VARCHAR2(30);
BEGIN
FOR i IN 1 .. users_to_check.COUNT LOOP
BEGIN
SELECT account_status INTO account_status
FROM dba_users
WHERE username = UPPER(users_to_check(i));
DBMS_OUTPUT.PUT_LINE('User ' || users_to_check(i) || ' is ' || account_status || '.');
EXCEPTION
WHEN NO_DATA_FOUND THEN
DBMS_OUTPUT.PUT_LINE('User ' || users_to_check(i) || ' does not exist.');
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('An unexpected error occurred for user ' || users_to_check(i) || '.');
END;
END LOOP;
END;
/
Yes, you can achieve this by checking if the user exists before attempting to lock the account. Here's how you can modify your SQL to handle this and send it as a single statement:
To enhance the document for the "move" method in the context of converting a non-CDB to a CDB, we need to outline the specific steps, benefits, and considerations associated with this approach. The "move" method involves relocating datafiles without copying them, similar to the NOCOPY method, but with additional considerations for file management and metadata updates.
Enhanced Document: Non-CDB to CDB Conversion Using MOVE Method
Internal Actions:
CREATE PLUGGABLE DATABASE Statement:
- This command is used within the CDB to initiate the creation of the new PDB using the MOVE method.
XML File Parsing:
- Oracle reads and parses the new_pdb.xml file generated earlier to understand the structure and metadata of the non-CDB.
PDB Creation:
- A new PDB entry is created in the CDB's data dictionary.
Tablespace and Datafile Setup:
- Based on the XML file:
- New tablespaces are created within the CDB to match the tablespaces found in the XML.
- Datafiles are moved from the non-CDB location to the CDB location, updating paths as necessary.
- Based on the XML file:
Metadata Update:
- Oracle updates the metadata in the new PDB to reference the moved datafiles. The path is modified using the file_name_convert clause.
Shared Files:
- The PDB now uses the moved datafiles, which are no longer associated with the original non-CDB.
Control File:
- The PDB will get its own control file, separate from the non-CDB.
Object Recreation:
- Database objects (tables, views, etc.) are recreated inside the PDB based on the XML metadata.
User Accounts:
- User accounts, roles, and privileges are created and assigned within the PDB.
PDB Status:
- The new PDB is created in a MOUNTED state. It is not yet OPEN.
Log Files:
- The new PDB will have its own redo log files, separate from the CDB and other PDBs.
Undo Tablespace:
- The new PDB will have its own undo tablespace.
Temporary Tablespace:
- The new PDB will have its own temporary tablespace.
Original Non-CDB Data Files:
- The original non-CDB data files are now considered to belong to the PDB.
Pros:
- Speed: The conversion is fast because files are moved rather than copied.
- Storage Efficiency: No additional storage space is required, as the original datafiles are moved.
Cons:
- Risk to Original Data: The PDB now uses the moved non-CDB datafiles. Any corruption within the PDB could affect the data within the moved files.
- Complex Rollback: Rolling back to the original non-CDB is significantly more complex and risky.
- Interdependency: If the PDB is dropped, the files are also dropped, as they are now considered to belong to the PDB.
Rollback for MOVE
Warning: Rollback for MOVE is complicated and carries a significant risk of data loss if not performed carefully.
Stop and Drop the PDB:
- If the PDB is open, you must close it.
- Drop the PDB, including datafiles.
Verification (Critical):
- Check oldspec: Even though you executed the drop including datafiles, the files under oldspec were dropped. They were considered part of the PDB.
Recover from Backup:
- If the files were dropped, you have to restore them from backup. If you didn't make a backup of the files, there is no way to recover them.
Recover to a New Name:
- If you don't have a backup of the non-CDB, you can try to start the non-CDB as a new database, with a new name.
Clean Up:
- Remove any new files created, like log files and undo tablespace.
- Try to start the non-CDB again.
Phase 3: Post-Creation Cleanup:
- Run a cleanup to get rid of non-CDB items in the new pluggable database using @$ORACLE_HOME/rdbms/admin/noncdb_to_pdb.sql.
Internal Actions:
- Script Execution: The noncdb_to_pdb.sql script is designed to remove remnants of the non-CDB structure from the newly created PDB.
- SYS Connection: This script needs to be run connected as a user with SYSDBA privileges, and connected to the PDB.
- Script Actions: The specific actions in the script are version-dependent, but they generally include:
- Invalidating Obsolete Objects: It might drop or invalidate some objects that are no longer relevant in the PDB environment.
- Removing Non-PDB User: It likely removes the database user SYSBACKUP, as well as other database objects that are no longer used in a PDB environment.
- Updating Dictionary: It modifies some data dictionary information to correctly reflect the PDB's identity.
- Resource Management: It modifies some parameters in the database to suit it to the new PDB environment.
- Cleaning up: Removes data, settings, and items that are no longer used.
- Recompiles: Recompile invalid objects.
Why it is Important:
This step is vital for a clean conversion. It ensures that the PDB is properly integrated into the CDB environment and doesn't retain unnecessary or conflicting information from the original non-CDB. This cleanup script should be run in the PDB, and not in the CDB.
Method 1: COPY vs Method 2: NOCOPY vs Method 3: MOVE - Summary Table
Feature | COPY | NOCOPY | MOVE |
---|---|---|---|
Datafiles | Copied to new location. | Reused from the original non-CDB. | Moved from original non-CDB. |
Speed | Slower (due to file copy). | Faster (no file copy). | Fast (file move). |
Storage | Requires double the storage initially. | No extra storage initially. | No extra storage initially. |
Original Data | Isolated. Changes in PDB do not affect non-CDB data. | Shared. Changes in PDB can affect non-CDB data (risky). | Shared. Changes in PDB can affect non-CDB data (risky). |
Rollback | Easier: Drop PDB, copied files are removed. Original files untouched. | Difficult & Risky: Requires careful manual intervention. Data loss possible. ORA-01122 errors likely. | Difficult & Risky: Requires careful manual intervention. Data loss possible. |
Risk | Lower risk to original non-CDB data. | Higher risk to original non-CDB data. | Higher risk to original non-CDB data. |
File removal | If the PDB is dropped, only the copied files are removed. | If the PDB is dropped, all files from the old non-CDB are removed. | If the PDB is dropped, all files from the old non-CDB are removed. |
Dependencies | No dependencies with the non-CDB data files. | Strong dependency with non-CDB data files. | Strong dependency with non-CDB data files. |
ORA errors | No errors will happen with the original non-CDB if you shut down the CDB and try to start it. | ORA-01122, ORA-01110, ORA-01204 will happen if you shut down the CDB and try to start the non-CDB. | ORA-01122, ORA-01110, ORA-01204 will happen if you shut down the CDB and try to start the non-CDB. |
Key Points & Considerations:
- Datafile Management: The MOVE method involves relocating datafiles, which requires careful management to ensure paths are updated correctly.
- file_name_convert is Essential: This clause is critical for relocating datafiles to the correct location in the CDB.
- Metadata vs. Data: The DBMS_PDB.DESCRIBE procedure only extracts metadata. The data itself is moved during the CREATE PLUGGABLE DATABASE process.
- Downtime: The non-CDB is unavailable during the SHUTDOWN and data file move. The time required for the move is directly related to the size of the non-CDB's datafiles.
- CDB Requirements: The CDB must have sufficient disk space for the new PDB's datafiles, and it must be running and accessible.
- Non-CDB files: After the conversion, you might decide to keep the non-CDB files as a backup, or remove them (only if you used COPY).
- Compatibility: The Oracle version of the CDB should be the same or later than the non-CDB.
- Backup: Always take a backup of the files before using MOVE.
This enhanced document provides a comprehensive overview of the MOVE method for converting a non-CDB to a CDB, highlighting the internal actions, pros and cons, rollback procedures, and key considerations.
Migrating a non-CDB with Transparent Data Encryption (TDE) enabled to a CDB involves several steps to ensure that encryption keys and encrypted data are properly handled. TDE is used to encrypt data at rest, and it's crucial to manage the encryption keys during the migration process. Below are detailed steps and commands to facilitate this migration:
Pre-Migration Steps
Verify TDE Configuration:
- Ensure that TDE is properly configured in the non-CDB. Check the status of the wallet and the encryption keys.
Backup the Wallet:
- Backup the wallet directory from the non-CDB. This is crucial as the wallet contains the encryption keys.
Backup the Database:
- Perform a full backup of the non-CDB to ensure data safety.
Migration Steps
Prepare the CDB Environment:
- Ensure that the CDB is configured to use TDE. If not, set up TDE in the CDB.
Copy the Wallet:
- Copy the wallet from the non-CDB to the CDB's wallet location.
Open the Wallet in CDB:
- Open the wallet in the CDB to access the encryption keys.
Create Pluggable Database:
- Use the CREATE PLUGGABLE DATABASE command to migrate the non-CDB to a PDB within the CDB.
Verify Encryption in PDB:
- Check that the encryption keys are accessible and that encrypted data is intact in the new PDB.
Post-Migration Steps
Close the Wallet:
- After verifying the migration, close the wallet in the CDB.
Backup the CDB Wallet:
- Backup the wallet in the CDB to ensure encryption keys are safe.
Test the PDB:
- Perform tests to ensure that the PDB is functioning correctly and that encrypted data is accessible.
Cleanup:
- Remove any temporary files or backups that are no longer needed.
Additional Considerations
- Compatibility: Ensure that the Oracle version of the CDB supports TDE and is compatible with the non-CDB.
- Security: Maintain security protocols during the migration, especially when handling encryption keys and wallets.
- Documentation: Document each step of the migration process for future reference and auditing purposes.
By following these detailed steps, you can successfully migrate a non-CDB with TDE enabled to a CDB, ensuring that encryption keys and encrypted data are properly managed throughout the process.