Monday, January 5, 2015

Points to remember in Couchbase

#!/bin/bash
# Oracle CPU Capacity Report with Solid Validation
# Version 2.4 - Robust Output Parsing

REPORT_FILE="/tmp/cpu_capacity_report_$(date +%Y%m%d_%H%M%S).txt"
VALIDATION_FILE="/tmp/cpu_validation_$(date +%Y%m%d_%H%M%S).log"

# Improved SQL execution function
get_sql_value() {
local sql=$1
local sid=$2
ORACLE_SID=$sid sqlplus -s / as sysdba <<EOF | awk '/^[[:space:]]*[0-9]/{print $1; exit} /^[[:space:]]*-/{print $2; exit}'
set pagesize 0
set feedback off
set heading off
$sql
EOF
}

# Function to get safe numeric value
safe_number() {
local num=$1
if [[ $num =~ ^[+-]?[0-9]+\.?[0-9]*$ ]]; then
echo "$num"
else
echo "0"
fi
}

# Function to validate calculations
validate_cpu_metrics() {
local total_cores=$1
local system_usage=$2
local db_usage=$3
local os_usage=$4
echo "CPU METRICS VALIDATION" > $VALIDATION_FILE
echo "======================" >> $VALIDATION_FILE
echo "Validation Time: $(date)" >> $VALIDATION_FILE
echo "" >> $VALIDATION_FILE
# Rule 1: System + Available = Total
local calculated_total=$(echo "$system_usage + $available_cores" | bc)
if [ "$(echo "$calculated_total == $total_cores" | bc -l)" -eq 1 ]; then
echo "[✓] System + Available = Total" >> $VALIDATION_FILE
echo " $system_usage (System) + $available_cores (Available) = $total_cores (Total)" >> $VALIDATION_FILE
else
echo "[✗] System + Available ≠ Total" >> $VALIDATION_FILE
echo " $system_usage + $available_cores = $calculated_total (expected $total_cores)" >> $VALIDATION_FILE
fi
# Rule 2: DB + OS = System
local calculated_system=$(echo "$db_usage + $os_usage" | bc)
if [ "$(echo "$calculated_system == $system_usage" | bc -l)" -eq 1 ]; then
echo "[✓] DB + OS = System Usage" >> $VALIDATION_FILE
echo " $db_usage (DB) + $os_usage (OS) = $system_usage (System)" >> $VALIDATION_FILE
else
echo "[✗] DB + OS ≠ System Usage" >> $VALIDATION_FILE
echo " $db_usage + $os_usage = $calculated_system (expected $system_usage)" >> $VALIDATION_FILE
echo " Difference: $(echo "$system_usage - $calculated_system" | bc) cores unaccounted" >> $VALIDATION_FILE
fi
# Rule 3: Percentages sum to 100%
local total_pct=$(echo "$system_usage_pct + $available_pct" | bc)
if [ "$(echo "$total_pct == 100" | bc -l)" -eq 1 ]; then
echo "[✓] System% + Available% = 100%" >> $VALIDATION_FILE
echo " $system_usage_pct% + $available_pct% = 100%" >> $VALIDATION_FILE
else
echo "[✗] System% + Available% ≠ 100%" >> $VALIDATION_FILE
echo " $system_usage_pct% + $available_pct% = $total_pct%" >> $VALIDATION_FILE
fi
echo "" >> $VALIDATION_FILE
echo "VALIDATION NOTES:" >> $VALIDATION_FILE
echo "1. Minor discrepancies (<0.5%) may be due to timing differences in metric collection" >> $VALIDATION_FILE
echo "2. Large differences indicate either measurement errors or unaccounted CPU usage" >> $VALIDATION_FILE
}

# Get system CPU metrics
TOTAL_CORES=$(nproc 2>/dev/null || grep -c ^processor /proc/cpuinfo)
CPU_USAGE_PCT=$(top -bn1 | grep "Cpu(s)" | awk '{print 100 - $8}' | cut -d. -f1)
SYSTEM_CPU=$(echo "scale=2; $CPU_USAGE_PCT * $TOTAL_CORES / 100" | bc)

# Get database CPU usage
DATABASES=$(ps -eo command | grep -E '[p]mon|[_]pmon' | awk -F_ '{print $NF}' | sort | uniq)
TOTAL_DB_CPU=0

for DB in $DATABASES; do
DB_CPU_CENTISEC=$(get_sql_value "SELECT ROUND(SUM(value)) FROM v\$sysmetric WHERE metric_name='CPU Usage Per Sec' AND group_id=2;" "$DB")
DB_CPU_CENTISEC=$(safe_number "$DB_CPU_CENTISEC")
DB_CPU=$(echo "scale=2; $DB_CPU_CENTISEC / 100" | bc)
TOTAL_DB_CPU=$(echo "$TOTAL_DB_CPU + $DB_CPU" | bc)
done

# Calculate derived metrics
OTHER_CPU=$(echo "scale=2; $SYSTEM_CPU - $TOTAL_DB_CPU" | bc)
AVAILABLE_CPU=$(echo "scale=2; $TOTAL_CORES - $SYSTEM_CPU" | bc)
AVAILABLE_PCT=$(echo "scale=2; 100 - $CPU_USAGE_PCT" | bc)

# Run validation
validate_cpu_metrics "$TOTAL_CORES" "$SYSTEM_CPU" "$TOTAL_DB_CPU" "$OTHER_CPU"

# Generate report
{
echo "ORACLE CPU CAPACITY REPORT WITH VALIDATION"
echo "========================================="
echo "Generated: $(date)"
echo ""
echo "1. SYSTEM RESOURCES"
echo "-------------------"
echo " Total CPU Cores : $TOTAL_CORES"
echo " System CPU Usage : $SYSTEM_CPU cores ($CPU_USAGE_PCT%)"
echo " - Database Usage : $TOTAL_DB_CPU cores"
echo " - OS/Other Usage : $OTHER_CPU cores"
echo " Available Cores : $AVAILABLE_CPU cores ($AVAILABLE_PCT%)"
echo ""
echo "2. VALIDATION SUMMARY"
echo "---------------------"
grep -E '^\[[✓✗]\]' "$VALIDATION_FILE"
echo ""
echo "3. DETAILED METRICS VALIDATION"
echo "------------------------------"
cat "$VALIDATION_FILE"
} > "$REPORT_FILE"

echo "Report generated: $REPORT_FILE"
echo "Validation details: $VALIDATION_FILE"


#!/bin/bash
# Oracle CPU Capacity Report with Validation
# Version 2.3 - Includes Cross-Verification

REPORT_FILE="/tmp/cpu_capacity_report_$(date +%Y%m%d_%H%M%S).txt"
VALIDATION_FILE="/tmp/cpu_validation_$(date +%Y%m%d_%H%M%S).log"

# Function to validate calculations
validate_cpu_metrics() {
local total_cores=$1
local system_usage=$2
local db_usage=$3
local os_usage=$4
echo "VALIDATION REPORT" > $VALIDATION_FILE
echo "=================" >> $VALIDATION_FILE
echo "Timestamp: $(date)" >> $VALIDATION_FILE
echo "" >> $VALIDATION_FILE
# Rule 1: System + Available should equal Total
local calculated_total=$(echo "$system_usage + $available_cores" | bc)
if [ "$(echo "$calculated_total == $total_cores" | bc)" -eq 1 ]; then
echo "[PASS] System + Available = Total: $system_usage + $available_cores = $total_cores" >> $VALIDATION_FILE
else
echo "[FAIL] System + Available ($system_usage + $available_cores) != Total ($total_cores)" >> $VALIDATION_FILE
fi
# Rule 2: DB + OS should equal System
local calculated_system=$(echo "$db_usage + $os_usage" | bc)
if [ "$(echo "$calculated_system == $system_usage" | bc)" -eq 1 ]; then
echo "[PASS] DB + OS = System: $db_usage + $os_usage = $system_usage" >> $VALIDATION_FILE
else
echo "[FAIL] DB + OS ($db_usage + $os_usage) != System ($system_usage)" >> $VALIDATION_FILE
echo " Difference: $(echo "$system_usage - ($db_usage + $os_usage)" | bc) cores" >> $VALIDATION_FILE
fi
# Rule 3: Percentages should sum to 100%
local total_pct=$(echo "$system_usage_pct + $available_pct" | bc)
if [ "$(echo "$total_pct == 100" | bc)" -eq 1 ]; then
echo "[PASS] System% + Available% = 100%" >> $VALIDATION_FILE
else
echo "[FAIL] System% ($system_usage_pct) + Available% ($available_pct) = $total_pct%" >> $VALIDATION_FILE
fi
echo "" >> $VALIDATION_FILE
echo "Note: Small discrepancies may occur due to timing differences in metric collection" >> $VALIDATION_FILE
}

# Get accurate CPU metrics
TOTAL_CORES=$(grep -c ^processor /proc/cpuinfo)
CPU_USAGE_PCT=$(top -bn1 | grep "Cpu(s)" | sed "s/.*, *\([0-9.]*\)%* id.*/\1/" | awk '{print 100 - $1}')
SYSTEM_CPU=$(echo "scale=2; $CPU_USAGE_PCT * $TOTAL_CORES / 100" | bc)

# Get database CPU usage
DATABASES=$(ps -eo command | grep -E '[p]mon|[_]pmon' | awk -F_ '{print $NF}' | sort | uniq)
TOTAL_DB_CPU=0
for DB in $DATABASES; do
export ORACLE_SID=$DB
DB_CPU_CENTISEC=$(sqlplus -s / as sysdba <<EOF | awk '/^ / {print $1}'
set pagesize 0
set feedback off
set heading off
SELECT ROUND(SUM(value))
FROM v\$sysmetric
WHERE metric_name='CPU Usage Per Sec'
AND group_id=2;
EOF
)
[ -z "$DB_CPU_CENTISEC" ] && DB_CPU_CENTISEC=0
TOTAL_DB_CPU=$(echo "scale=2; $TOTAL_DB_CPU + ($DB_CPU_CENTISEC / 100)" | bc)
done

# Calculate derived metrics
OTHER_CPU=$(echo "scale=2; $SYSTEM_CPU - $TOTAL_DB_CPU" | bc)
AVAILABLE_CPU=$(echo "scale=2; $TOTAL_CORES - $SYSTEM_CPU" | bc)
AVAILABLE_PCT=$(echo "scale=2; 100 - $CPU_USAGE_PCT" | bc)

# Run validation
validate_cpu_metrics $TOTAL_CORES $SYSTEM_CPU $TOTAL_DB_CPU $OTHER_CPU

# Generate report
echo "CPU CAPACITY REPORT WITH VALIDATION" > $REPORT_FILE
echo "===================================" >> $REPORT_FILE
echo "Generated: $(date)" >> $REPORT_FILE
echo "" >> $REPORT_FILE

echo "1. SYSTEM RESOURCES" >> $REPORT_FILE
echo "-------------------" >> $REPORT_FILE
echo " Total CPU Cores : $TOTAL_CORES" >> $REPORT_FILE
echo " System CPU Usage : $SYSTEM_CPU cores ($CPU_USAGE_PCT%)" >> $REPORT_FILE
echo " - Database Usage : $TOTAL_DB_CPU cores" >> $REPORT_FILE
echo " - OS/Other Usage : $OTHER_CPU cores" >> $REPORT_FILE
echo " Available Cores : $AVAILABLE_CPU cores ($AVAILABLE_PCT%)" >> $REPORT_FILE
echo "" >> $REPORT_FILE

echo "2. VALIDATION SUMMARY" >> $REPORT_FILE
echo "---------------------" >> $REPORT_FILE
grep -E '^\[(PASS|FAIL)\]' $VALIDATION_FILE >> $REPORT_FILE
echo "" >> $REPORT_FILE

echo "3. DETAILED VALIDATION" >> $REPORT_FILE
echo "----------------------" >> $REPORT_FILE
cat $VALIDATION_FILE >> $REPORT_FILE

echo "" >> $REPORT_FILE
echo "Report generated: $REPORT_FILE" >> $REPORT_FILE
echo "Validation log: $VALIDATION_FILE" >> $REPORT_FILE

echo "CPU report with validation generated:"
echo $REPORT_FILE
echo "Validation details:"
echo $VALIDATION_FILE


RAJ===

#!/bin/bash

# filepath: /path/to/cpu_analysis.sh

# Set environment variables
LOG_FOLDER="/x/home/oracle/admin/scripts/logs/cpu_analysis"
mkdir -p "$LOG_FOLDER"

# Oracle environment variables (update as per your environment)
source ~/.profile

# Temporary files
TOTAL_CPUS_FILE="$LOG_FOLDER/total_cpus.txt"
SYSTEM_CPU_USAGE_FILE="$LOG_FOLDER/system_cpu_usage.txt"
DATABASE_COUNT_FILE="$LOG_FOLDER/database_count.txt"
DATABASE_LIST_FILE="$LOG_FOLDER/database_list.txt"
DATABASE_CPU_USAGE_FILE="$LOG_FOLDER/database_cpu_usage.txt"
CAPACITY_FILE="$LOG_FOLDER/capacity.txt"

# Function to get total number of CPUs on the host
get_total_cpus() {
grep -c ^processor /proc/cpuinfo > "$TOTAL_CPUS_FILE"
}

# Function to get system-level CPU usage
get_system_cpu_usage() {
mpstat -P ALL 1 1 | awk '/all/ {print 100 - $NF}' > "$SYSTEM_CPU_USAGE_FILE"
}

# Function to get the number of databases on the host
get_number_of_databases() {
sqlplus -s / as sysdba <<EOF > "$DATABASE_COUNT_FILE"
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SELECT COUNT(*) FROM v\$database;
EXIT;
EOF
}

# Function to get list of databases
get_database_list() {
sqlplus -s / as sysdba <<EOF > "$DATABASE_LIST_FILE"
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SELECT name FROM v\$database;
EXIT;
EOF
}

# Function to get CPU usage for each database
get_database_cpu_usage() {
databases=$(cat "$DATABASE_LIST_FILE")
> "$DATABASE_CPU_USAGE_FILE"
for db in $databases; do
echo "Database: $db" >> "$DATABASE_CPU_USAGE_FILE"
sqlplus -s / as sysdba <<EOF >> "$DATABASE_CPU_USAGE_FILE"
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SELECT ROUND(SUM(VALUE) / COUNT(*), 2) FROM v\$sysmetric WHERE METRIC_NAME = 'Host CPU Utilization (%)' AND BEGIN_TIME > SYSDATE - 1/24;
EXIT;
EOF
done
}

# Function to calculate current available capacity
calculate_capacity() {
total_cpus=$(cat "$TOTAL_CPUS_FILE")
used_capacity=$(cat "$SYSTEM_CPU_USAGE_FILE")
available_capacity=$(echo "$total_cpus - $used_capacity" | bc)
echo "Current available capacity (CPUs): $available_capacity" > "$CAPACITY_FILE"
echo "Total used capacity (CPUs): $used_capacity" >> "$CAPACITY_FILE"
}

# Function to print the report
print_report() {
echo "==================== CPU Analysis Report ===================="
echo "Date: $(date)"
echo "-------------------------------------------------------------"
echo "Total number of CPUs on the host: $(cat "$TOTAL_CPUS_FILE")"
echo "-------------------------------------------------------------"
echo "System CPU Usage:"
echo " - Number of CPUs currently in use: $(cat "$SYSTEM_CPU_USAGE_FILE")"
echo " - Percentage of CPU being used: $(cat "$SYSTEM_CPU_USAGE_FILE")%"
echo "-------------------------------------------------------------"
echo "Number of databases on the host: $(cat "$DATABASE_COUNT_FILE")"
echo "-------------------------------------------------------------"
echo "Database CPU Usage:"
cat "$DATABASE_CPU_USAGE_FILE" | while read line; do
echo " $line"
done
echo "-------------------------------------------------------------"
echo "Capacity Analysis:"
cat "$CAPACITY_FILE" | while read line; do
echo " $line"
done
echo "============================================================="
}

# Main function
main() {
# Get total number of CPUs
get_total_cpus

# Get system-level CPU usage
get_system_cpu_usage

# Get number of databases
get_number_of_databases

# Get CPU usage for each database
get_database_list
get_database_cpu_usage

# Calculate current available capacity
calculate_capacity

# Print the report
print_report
}

# Run the main function
main

 


DECLARE
    users_to_lock SYS.ODCIVARCHAR2LIST := SYS.ODCIVARCHAR2LIST('abc', 'def', 'ghi', 'jkl', 'mno');
    account_status VARCHAR2(30);
BEGIN
    FOR i IN 1 .. users_to_lock.COUNT LOOP
        BEGIN
            EXECUTE IMMEDIATE 'ALTER USER ' || users_to_lock(i) || ' ACCOUNT LOCK';

            -- Check the account status after attempting to lock
            SELECT account_status INTO account_status
            FROM dba_users
            WHERE username = UPPER(users_to_lock(i));

            IF account_status = 'LOCKED' THEN
                DBMS_OUTPUT.PUT_LINE('User ' || users_to_lock(i) || ' is now locked.');
            ELSE
                DBMS_OUTPUT.PUT_LINE('User ' || users_to_lock(i) || ' is not locked.');
            END IF;
        EXCEPTION
            WHEN NO_DATA_FOUND THEN
                DBMS_OUTPUT.PUT_LINE('User ' || users_to_lock(i) || ' does not exist.');
            WHEN OTHERS THEN
                IF SQLCODE = -1918 THEN
                    DBMS_OUTPUT.PUT_LINE('User ' || users_to_lock(i) || ' does not exist.');
                ELSE
                    DBMS_OUTPUT.PUT_LINE('An unexpected error occurred for user ' || users_to_lock(i) || '.');
                END IF;
        END;
    END LOOP;
END;
/

DECLARE
    users_to_check SYS.ODCIVARCHAR2LIST := SYS.ODCIVARCHAR2LIST('abc', 'def', 'ghi', 'jkl', 'mno');
    account_status VARCHAR2(30);
BEGIN
    FOR i IN 1 .. users_to_check.COUNT LOOP
        BEGIN
            SELECT account_status INTO account_status
            FROM dba_users
            WHERE username = UPPER(users_to_check(i));

            DBMS_OUTPUT.PUT_LINE('User ' || users_to_check(i) || ' is ' || account_status || '.');
        EXCEPTION
            WHEN NO_DATA_FOUND THEN
                DBMS_OUTPUT.PUT_LINE('User ' || users_to_check(i) || ' does not exist.');
            WHEN OTHERS THEN
                DBMS_OUTPUT.PUT_LINE('An unexpected error occurred for user ' || users_to_check(i) || '.');
        END;
    END LOOP;
END;
/

 Yes, you can achieve this by checking if the user exists before attempting to lock the account. Here's how you can modify your SQL to handle this and send it as a single statement:


sql
BEGIN
EXECUTE IMMEDIATE 'ALTER USER abc ACCOUNT LOCK';
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -1918 THEN
DBMS_OUTPUT.PUT_LINE('User abc does not exist.');
ELSE
RAISE; -- Re-raise other exceptions
END IF;
END;
/

BEGIN
EXECUTE IMMEDIATE 'ALTER USER def ACCOUNT LOCK';
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -1918 THEN
DBMS_OUTPUT.PUT_LINE('User def does not exist.');
ELSE
RAISE; -- Re-raise other exceptions
END IF;
END;
/
Explanation:

BEGIN...END; Blocks: We enclose each ALTER USER statement within a PL/SQL block. This allows us to handle exceptions for each user individually.
EXECUTE IMMEDIATE: We use dynamic SQL (EXECUTE IMMEDIATE) to execute the ALTER USER command. This is necessary because we're embedding SQL within PL/SQL.
Exception Handling: The EXCEPTION block catches any errors that occur during the execution of the ALTER USER command.
WHEN OTHERS: This handles all exceptions.
IF SQLCODE = -1918 THEN: We check if the specific error ORA-01918 (user not found) occurred.
DBMS_OUTPUT.PUT_LINE(...): If the user doesn't exist, we print a message to the output (you'll need to enable server output to see this message, e.g., SET SERVEROUTPUT ON; in SQL*Plus or SQL Developer). You could replace this with other actions like logging to a table or raising a different exception.
ELSE RAISE;: For any other exception besides ORA-01918, we re-raise the exception so that it's not swallowed and can be handled by a higher-level error handler if needed.
/: This is a SQL*Plus or SQL Developer command that separates PL/SQL blocks and tells the database to execute the preceding block.
How to send it:

You would send the entire block of code above as a single SQL statement to your $sql1 and $sql2 variables. The database will execute each PL/SQL block sequentially. Because of the error handling, if a user doesn't exist, the code will continue to the next block without stopping. It will only attempt to lock users that actually exist (or at least, it will handle the "user not found" error gracefully if they don't).

Rate this answer:

Mattewada Rajkiran
can you do it in one block for 5 users?


Gemini
Sure, I can consolidate that into a single block for five users! Here's how you can do it:

sql
BEGIN
FOR user_name IN ('abc', 'def', 'user3', 'user4', 'user5') LOOP
BEGIN
EXECUTE IMMEDIATE 'ALTER USER ' || user_name || ' ACCOUNT LOCK';
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -1918 THEN
DBMS_OUTPUT.PUT_LINE('User ' || user_name || ' does not exist.');
ELSE
RAISE; -- Re-raise other exceptions
END IF;
END;
END LOOP;
END;
/
This approach uses a FOR loop to iterate through a list of usernames. Within the loop, it attempts to lock each user's account. The exception handling remains the same, specifically addressing the ORA-01918 error. If a user doesn't exist, it prints a message; otherwise, it moves on to the next user in the list. Now, you can provide all five usernames in a single, concise block!


To execute multiple SQL statements in one go and handle cases where a user might not exist, you can use PL/SQL blocks with conditional logic. Here's how you can achieve this:

BEGIN
-- Attempt to lock user 'abc'
BEGIN
EXECUTE IMMEDIATE 'ALTER USER abc ACCOUNT LOCK';
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -1918 THEN
DBMS_OUTPUT.PUT_LINE('User abc does not exist.');
ELSE
RAISE;
END IF;
END;

-- Attempt to lock user 'def'
BEGIN
EXECUTE IMMEDIATE 'ALTER USER def ACCOUNT LOCK';
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -1918 THEN
DBMS_OUTPUT.PUT_LINE('User def does not exist.');
ELSE
RAISE;
END IF;
END;
END;
/
Explanation:
PL/SQL Block: The entire operation is wrapped in a PL/SQL block, allowing for procedural logic.
EXECUTE IMMEDIATE: This is used to execute dynamic SQL statements.
Exception Handling: Each ALTER USER statement is wrapped in its own BEGIN...EXCEPTION...END block to handle exceptions individually.
SQLCODE -1918: This specific error code corresponds to ORA-01918, which indicates that the user does not exist. The block checks for this error and handles it gracefully by printing a message.
DBMS_OUTPUT.PUT_LINE: This is used to output messages. Ensure that DBMS_OUTPUT is enabled in your SQL client to see these messages.
Usage:
You can send this entire block to your SQL execution environment (e.g., SQL*Plus, SQL Developer) as a single command.
This approach ensures that each user is only locked if they exist, and it provides feedback if a user does not exist.
Is this helpful?

can you do it in one block for 5 users? instead of multiple $sql1 $sq2, etc.,


Certainly! You can handle multiple users in a single PL/SQL block by iterating over a list of usernames and applying the same logic to each. Here's how you can do it for five users:

DECLARE
users_to_lock SYS.ODCIVARCHAR2LIST := SYS.ODCIVARCHAR2LIST('abc', 'def', 'ghi', 'jkl', 'mno');
BEGIN
FOR i IN 1 .. users_to_lock.COUNT LOOP
BEGIN
EXECUTE IMMEDIATE 'ALTER USER ' || users_to_lock(i) || ' ACCOUNT LOCK';
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -1918 THEN
DBMS_OUTPUT.PUT_LINE('User ' || users_to_lock(i) || ' does not exist.');
ELSE
RAISE;
END IF;
END;
END LOOP;
END;
/
Explanation:
SYS.ODCIVARCHAR2LIST: This is a collection type that can hold a list of strings. It's used here to store the usernames you want to lock.
FOR LOOP: Iterates over each username in the list.
EXECUTE IMMEDIATE: Dynamically constructs and executes the ALTER USER statement for each username.
Exception Handling: Catches the ORA-01918 error for non-existent users and outputs a message using DBMS_OUTPUT.PUT_LINE.
DBMS_OUTPUT.PUT_LINE: Outputs messages for each user that does not exist. Ensure DBMS_OUTPUT is enabled in your SQL client to see these messages.
Usage:
You can execute this block in your SQL environment as a single command.
This approach efficiently handles multiple users in one block, checking for existence and locking accounts as needed.





To enhance the document for the "move" method in the context of converting a non-CDB to a CDB, we need to outline the specific steps, benefits, and considerations associated with this approach. The "move" method involves relocating datafiles without copying them, similar to the NOCOPY method, but with additional considerations for file management and metadata updates.

Enhanced Document: Non-CDB to CDB Conversion Using MOVE Method

Internal Actions:

  1. CREATE PLUGGABLE DATABASE Statement:

    • This command is used within the CDB to initiate the creation of the new PDB using the MOVE method.
  2. XML File Parsing:

    • Oracle reads and parses the new_pdb.xml file generated earlier to understand the structure and metadata of the non-CDB.
  3. PDB Creation:

    • A new PDB entry is created in the CDB's data dictionary.
  4. Tablespace and Datafile Setup:

    • Based on the XML file:
      • New tablespaces are created within the CDB to match the tablespaces found in the XML.
      • Datafiles are moved from the non-CDB location to the CDB location, updating paths as necessary.
  5. Metadata Update:

    • Oracle updates the metadata in the new PDB to reference the moved datafiles. The path is modified using the file_name_convert clause.
  6. Shared Files:

    • The PDB now uses the moved datafiles, which are no longer associated with the original non-CDB.
  7. Control File:

    • The PDB will get its own control file, separate from the non-CDB.
  8. Object Recreation:

    • Database objects (tables, views, etc.) are recreated inside the PDB based on the XML metadata.
  9. User Accounts:

    • User accounts, roles, and privileges are created and assigned within the PDB.
  10. PDB Status:

    • The new PDB is created in a MOUNTED state. It is not yet OPEN.
  11. Log Files:

    • The new PDB will have its own redo log files, separate from the CDB and other PDBs.
  12. Undo Tablespace:

    • The new PDB will have its own undo tablespace.
  13. Temporary Tablespace:

    • The new PDB will have its own temporary tablespace.
  14. Original Non-CDB Data Files:

    • The original non-CDB data files are now considered to belong to the PDB.

Pros:

  • Speed: The conversion is fast because files are moved rather than copied.
  • Storage Efficiency: No additional storage space is required, as the original datafiles are moved.

Cons:

  • Risk to Original Data: The PDB now uses the moved non-CDB datafiles. Any corruption within the PDB could affect the data within the moved files.
  • Complex Rollback: Rolling back to the original non-CDB is significantly more complex and risky.
  • Interdependency: If the PDB is dropped, the files are also dropped, as they are now considered to belong to the PDB.

Rollback for MOVE

Warning: Rollback for MOVE is complicated and carries a significant risk of data loss if not performed carefully.

  1. Stop and Drop the PDB:

    • If the PDB is open, you must close it.
    • Drop the PDB, including datafiles.
  2. Verification (Critical):

    • Check oldspec: Even though you executed the drop including datafiles, the files under oldspec were dropped. They were considered part of the PDB.
  3. Recover from Backup:

    • If the files were dropped, you have to restore them from backup. If you didn't make a backup of the files, there is no way to recover them.
  4. Recover to a New Name:

    • If you don't have a backup of the non-CDB, you can try to start the non-CDB as a new database, with a new name.
  5. Clean Up:

    • Remove any new files created, like log files and undo tablespace.
    • Try to start the non-CDB again.
  6. Phase 3: Post-Creation Cleanup:

    • Run a cleanup to get rid of non-CDB items in the new pluggable database using @$ORACLE_HOME/rdbms/admin/noncdb_to_pdb.sql.

Internal Actions:

  • Script Execution: The noncdb_to_pdb.sql script is designed to remove remnants of the non-CDB structure from the newly created PDB.
  • SYS Connection: This script needs to be run connected as a user with SYSDBA privileges, and connected to the PDB.
  • Script Actions: The specific actions in the script are version-dependent, but they generally include:
    • Invalidating Obsolete Objects: It might drop or invalidate some objects that are no longer relevant in the PDB environment.
    • Removing Non-PDB User: It likely removes the database user SYSBACKUP, as well as other database objects that are no longer used in a PDB environment.
    • Updating Dictionary: It modifies some data dictionary information to correctly reflect the PDB's identity.
    • Resource Management: It modifies some parameters in the database to suit it to the new PDB environment.
    • Cleaning up: Removes data, settings, and items that are no longer used.
    • Recompiles: Recompile invalid objects.

Why it is Important:

This step is vital for a clean conversion. It ensures that the PDB is properly integrated into the CDB environment and doesn't retain unnecessary or conflicting information from the original non-CDB. This cleanup script should be run in the PDB, and not in the CDB.

Method 1: COPY vs Method 2: NOCOPY vs Method 3: MOVE - Summary Table

FeatureCOPYNOCOPYMOVE
DatafilesCopied to new location.Reused from the original non-CDB.Moved from original non-CDB.
SpeedSlower (due to file copy).Faster (no file copy).Fast (file move).
StorageRequires double the storage initially.No extra storage initially.No extra storage initially.
Original DataIsolated. Changes in PDB do not affect non-CDB data.Shared. Changes in PDB can affect non-CDB data (risky).Shared. Changes in PDB can affect non-CDB data (risky).
RollbackEasier: Drop PDB, copied files are removed. Original files untouched.Difficult & Risky: Requires careful manual intervention. Data loss possible. ORA-01122 errors likely.Difficult & Risky: Requires careful manual intervention. Data loss possible.
RiskLower risk to original non-CDB data.Higher risk to original non-CDB data.Higher risk to original non-CDB data.
File removalIf the PDB is dropped, only the copied files are removed.If the PDB is dropped, all files from the old non-CDB are removed.If the PDB is dropped, all files from the old non-CDB are removed.
DependenciesNo dependencies with the non-CDB data files.Strong dependency with non-CDB data files.Strong dependency with non-CDB data files.
ORA errorsNo errors will happen with the original non-CDB if you shut down the CDB and try to start it.ORA-01122, ORA-01110, ORA-01204 will happen if you shut down the CDB and try to start the non-CDB.ORA-01122, ORA-01110, ORA-01204 will happen if you shut down the CDB and try to start the non-CDB.

Key Points & Considerations:

  • Datafile Management: The MOVE method involves relocating datafiles, which requires careful management to ensure paths are updated correctly.
  • file_name_convert is Essential: This clause is critical for relocating datafiles to the correct location in the CDB.
  • Metadata vs. Data: The DBMS_PDB.DESCRIBE procedure only extracts metadata. The data itself is moved during the CREATE PLUGGABLE DATABASE process.
  • Downtime: The non-CDB is unavailable during the SHUTDOWN and data file move. The time required for the move is directly related to the size of the non-CDB's datafiles.
  • CDB Requirements: The CDB must have sufficient disk space for the new PDB's datafiles, and it must be running and accessible.
  • Non-CDB files: After the conversion, you might decide to keep the non-CDB files as a backup, or remove them (only if you used COPY).
  • Compatibility: The Oracle version of the CDB should be the same or later than the non-CDB.
  • Backup: Always take a backup of the files before using MOVE.

This enhanced document provides a comprehensive overview of the MOVE method for converting a non-CDB to a CDB, highlighting the internal actions, pros and cons, rollback procedures, and key considerations.



Migrating a non-CDB with Transparent Data Encryption (TDE) enabled to a CDB involves several steps to ensure that encryption keys and encrypted data are properly handled. TDE is used to encrypt data at rest, and it's crucial to manage the encryption keys during the migration process. Below are detailed steps and commands to facilitate this migration:

Pre-Migration Steps

  1. Verify TDE Configuration:

    • Ensure that TDE is properly configured in the non-CDB. Check the status of the wallet and the encryption keys.
    SELECT * FROM V$ENCRYPTION_WALLET;
    
  2. Backup the Wallet:

    • Backup the wallet directory from the non-CDB. This is crucial as the wallet contains the encryption keys.
    cp -r $ORACLE_BASE/admin/<non-cdb>/wallet /backup/location/
    
  3. Backup the Database:

    • Perform a full backup of the non-CDB to ensure data safety.

Migration Steps

  1. Prepare the CDB Environment:

    • Ensure that the CDB is configured to use TDE. If not, set up TDE in the CDB.
    ADMINISTER KEY MANAGEMENT CREATE KEYSTORE '/path/to/cdb/wallet' IDENTIFIED BY "password";
    ADMINISTER KEY MANAGEMENT SET KEYSTORE OPEN IDENTIFIED BY "password";
    ADMINISTER KEY MANAGEMENT SET ENCRYPTION KEY IDENTIFIED BY "password" WITH BACKUP;
    
  2. Copy the Wallet:

    • Copy the wallet from the non-CDB to the CDB's wallet location.
    cp -r /backup/location/wallet /path/to/cdb/wallet
    
  3. Open the Wallet in CDB:

    • Open the wallet in the CDB to access the encryption keys.
    ADMINISTER KEY MANAGEMENT SET KEYSTORE OPEN IDENTIFIED BY "password";
    
  4. Create Pluggable Database:

    • Use the CREATE PLUGGABLE DATABASE command to migrate the non-CDB to a PDB within the CDB.
    CREATE PLUGGABLE DATABASE pdb_name USING '/path/to/new_pdb.xml' NOCOPY
    FILE_NAME_CONVERT = ('/path/to/non-cdb/datafiles', '/path/to/cdb/datafiles');
    
  5. Verify Encryption in PDB:

    • Check that the encryption keys are accessible and that encrypted data is intact in the new PDB.
    SELECT * FROM V$ENCRYPTION_WALLET;
    SELECT TABLESPACE_NAME, ENCRYPTED FROM DBA_TABLESPACES WHERE ENCRYPTED = 'YES';
    

Post-Migration Steps

  1. Close the Wallet:

    • After verifying the migration, close the wallet in the CDB.
    ADMINISTER KEY MANAGEMENT SET KEYSTORE CLOSE IDENTIFIED BY "password";
    
  2. Backup the CDB Wallet:

    • Backup the wallet in the CDB to ensure encryption keys are safe.
    cp -r /path/to/cdb/wallet /backup/location/
    
  3. Test the PDB:

    • Perform tests to ensure that the PDB is functioning correctly and that encrypted data is accessible.
  4. Cleanup:

    • Remove any temporary files or backups that are no longer needed.

Additional Considerations

  • Compatibility: Ensure that the Oracle version of the CDB supports TDE and is compatible with the non-CDB.
  • Security: Maintain security protocols during the migration, especially when handling encryption keys and wallets.
  • Documentation: Document each step of the migration process for future reference and auditing purposes.

By following these detailed steps, you can successfully migrate a non-CDB with TDE enabled to a CDB, ensuring that encryption keys and encrypted data are properly managed throughout the process.



Draj

#!/bin/bash
# Oracle CPU Capacity Report Generator
# Works for both Exadata and non-Exadata systems
# Version 1.0

# Configuration
OUTPUT_DIR="/tmp/oracle_cpu_reports"
REPORT_DATE=$(date +%Y%m%d_%H%M%S)
REPORT_FILE="$OUTPUT_DIR/cpu_capacity_report_$REPORT_DATE.txt"
ORACLE_ENV="/etc/oratab" # Default oratab location

# Create output directory
mkdir -p "$OUTPUT_DIR"

# Function to check if Exadata
is_exadata() {
if grep -q "Exadata" /etc/oracle-release 2>/dev/null ||
dmidecode | grep -q "Exadata" 2>/dev/null ||
[ -d /opt/oracle.cell ]; then
return 0
else
return 1
fi
}

# Function to run SQL and format output
run_sql() {
local sql=$1
local conn=$2
echo -e "\n$sql" >> "$REPORT_FILE"
if [ "$conn" == "sys" ]; then
sqlplus -s / as sysdba << EOF >> "$REPORT_FILE"
set pagesize 1000
set linesize 200
set heading on
set feedback off
$sql
EOF
else
sqlplus -s "/ as sysoper" << EOF >> "$REPORT_FILE"
set pagesize 1000
set linesize 200
set heading on
set feedback off
$sql
EOF
fi
}

# Start report
echo "Oracle CPU Capacity Report - $(date)" > "$REPORT_FILE"
echo "====================================" >> "$REPORT_FILE"

# System Overview
echo -e "\n1. SYSTEM OVERVIEW" >> "$REPORT_FILE"
echo "1.1. Host Information" >> "$REPORT_FILE"
echo "---------------------" >> "$REPORT_FILE"
hostnamectl >> "$REPORT_FILE"
echo "" >> "$REPORT_FILE"

echo "1.2. CPU Information" >> "$REPORT_FILE"
echo "--------------------" >> "$REPORT_FILE"
lscpu >> "$REPORT_FILE"
echo "" >> "$REPORT_FILE"

echo "1.3. Memory Information" >> "$REPORT_FILE"
echo "-----------------------" >> "$REPORT_FILE"
free -h >> "$REPORT_FILE"
echo "" >> "$REPORT_FILE"

echo "1.4. Uptime and Load Average" >> "$REPORT_FILE"
echo "----------------------------" >> "$REPORT_FILE"
uptime >> "$REPORT_FILE"
echo "" >> "$REPORT_FILE"

echo "1.5. Top Processes by CPU" >> "$REPORT_FILE"
echo "-------------------------" >> "$REPORT_FILE"
ps -eo pid,user,pcpu,pmem,cmd --sort=-pcpu | head -20 >> "$REPORT_FILE"
echo "" >> "$REPORT_FILE"

# Check if Exadata
if is_exadata; then
echo "1.6. Exadata Detected - Additional Metrics" >> "$REPORT_FILE"
echo "------------------------------------------" >> "$REPORT_FILE"
echo "Exadata storage servers:" >> "$REPORT_FILE"
cellcli -e list cell detail >> "$REPORT_FILE" 2>/dev/null || echo "Could not query cellcli" >> "$REPORT_FILE"
echo "" >> "$REPORT_FILE"
fi

# Oracle Database CPU Usage
echo -e "\n2. ORACLE DATABASE CPU USAGE" >> "$REPORT_FILE"

# Get list of running databases from oratab
DATABASES=$(grep -v '^#' $ORACLE_ENV | grep -v '^$' | cut -d: -f1)

for DB in $DATABASES; do
export ORACLE_SID=$DB
echo -e "\n2.1. Database: $DB" >> "$REPORT_FILE"
echo "=========================" >> "$REPORT_FILE"
# High-level CPU usage
SQL_HIGHLEVEL="
SELECT
TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') as report_time,
instance_name,
ROUND(value/100, 2) as cpu_cores_used,
(SELECT ROUND(value/100, 2) FROM v\$sysmetric WHERE metric_name='CPU Usage Per Sec' AND group_id=2) as cpu_cores_used_sysmetric,
(SELECT ROUND(utilization, 2) FROM v\$sysmetric_summary WHERE metric_name='CPU Usage Per Sec') as cpu_pct
FROM v\$sysmetric
WHERE metric_name='CPU Usage Per Sec'
AND group_id=2;"
run_sql "$SQL_HIGHLEVEL" "sys"
# Top SQL by CPU
SQL_TOPSQL="
SELECT
sql_id,
executions,
ROUND(elapsed_time/1000000, 2) as elapsed_sec,
ROUND(cpu_time/1000000, 2) as cpu_sec,
ROUND(buffer_gets/decode(executions,0,1,executions)) as buffer_gets_per_exec,
SUBSTR(sql_text, 1, 100) as sql_text_sample
FROM (
SELECT sql_id, executions, elapsed_time, cpu_time, buffer_gets, sql_text,
RANK() OVER (ORDER BY cpu_time DESC) as cpu_rank
FROM v\$sqlstats
WHERE executions > 0)
WHERE cpu_rank <= 10;"
run_sql "$SQL_TOPSQL" "sys"
# CPU usage history
SQL_HISTORY="
SELECT
TO_CHAR(begin_time, 'YYYY-MM-DD HH24:MI') as time,
ROUND(value/100, 2) as cpu_cores_used
FROM v\$sysmetric_history
WHERE metric_name='CPU Usage Per Sec'
AND group_id=2
AND begin_time > SYSDATE - 1/24 -- Last hour
ORDER BY begin_time;"
run_sql "$SQL_HISTORY" "sys"
done

# Detailed CPU Analysis
echo -e "\n3. DETAILED CPU ANALYSIS" >> "$REPORT_FILE"
echo "3.1. CPU Usage History (sar)" >> "$REPORT_FILE"
echo "----------------------------" >> "$REPORT_FILE"
sar -u >> "$REPORT_FILE" 2>/dev/null || echo "sar command not available" >> "$REPORT_FILE"
echo "" >> "$REPORT_FILE"

echo "3.2. CPU Usage by Core" >> "$REPORT_FILE"
echo "----------------------" >> "$REPORT_FILE"
mpstat -P ALL >> "$REPORT_FILE" 2>/dev/null || echo "mpstat command not available" >> "$REPORT_FILE"
echo "" >> "$REPORT_FILE"

# Exadata-specific reports
if is_exadata; then
echo -e "\n4. EXADATA-SPECIFIC METRICS" >> "$REPORT_FILE"
echo "4.1. Cell CPU Utilization" >> "$REPORT_FILE"
echo "-------------------------" >> "$REPORT_FILE"
cellcli -e "list metriccurrent where name = 'CD_IO_UTIL' or name = 'CD_CPU_UTIL'" >> "$REPORT_FILE" 2>/dev/null
echo "4.2. Smart Scan Efficiency" >> "$REPORT_FILE"
echo "-------------------------" >> "$REPORT_FILE"
SQL_EXADATA="
SELECT
name,
value
FROM v\$sysstat
WHERE name LIKE '%cell%'
AND (name LIKE '%offload%' OR name LIKE '%smart scan%');"
run_sql "$SQL_EXADATA" "sys"
fi

# Recommendations
echo -e "\n5. RECOMMENDATIONS" >> "$REPORT_FILE"
echo "5.1. Immediate Actions" >> "$REPORT_FILE"
echo "----------------------" >> "$REPORT_FILE"
echo "- Review top CPU-consuming SQL statements identified in section 2" >> "$REPORT_FILE"
echo "- Check for runaway processes in section 1.5" >> "$REPORT_FILE"
echo "" >> "$REPORT_FILE"

echo "5.2. Long-term Considerations" >> "$REPORT_FILE"
echo "-----------------------------" >> "$REPORT_FILE"
echo "- Consider database consolidation if multiple instances show high CPU" >> "$REPORT_FILE"
echo "- Evaluate hardware upgrades based on peak CPU usage metrics" >> "$REPORT_FILE"
if is_exadata; then
echo "- Review Exadata storage cell CPU utilization for potential rebalancing" >> "$REPORT_FILE"
fi

# Final summary
echo -e "\nREPORT GENERATION COMPLETE" >> "$REPORT_FILE"
echo "Report saved to: $REPORT_FILE" >> "$REPORT_FILE"

# Print completion message

echo "CPU capacity report generated successfully at $REPORT_FILE"




DRAJ2



#!/bin/bash # Oracle CPU Capacity Report - Vertical Format # Works for both Exadata and non-Exadata # Version 2.0 - Simplified Output # Configuration REPORT_FILE="/tmp/cpu_capacity_report_$(date +%Y%m%d_%H%M%S).txt" ORACLE_ENV="/etc/oratab" # Function to check if Exadata is_exadata() { if grep -q "Exadata" /etc/oracle-release 2>/dev/null || dmidecode | grep -q "Exadata" 2>/dev/null || [ -d /opt/oracle.cell ]; then return 0 else return 1 fi } # Function to run SQL and get single value get_sql_value() { local sql=$1 local conn=$2 if [ "$conn" == "sys" ]; then sqlplus -s / as sysdba << EOF | awk '/^ / {print $1}' set pagesize 0 set feedback off set heading off $sql EOF else sqlplus -s "/ as sysoper" << EOF | awk '/^ / {print $1}' set pagesize 0 set feedback off set heading off $sql EOF fi } # Header echo "==============================================" > $REPORT_FILE echo " CPU CAPACITY REPORT - $(hostname)" >> $REPORT_FILE echo " Generated: $(date)" >> $REPORT_FILE echo "==============================================" >> $REPORT_FILE echo "" >> $REPORT_FILE # 1. System CPU Overview echo "1. HOST CPU RESOURCES" >> $REPORT_FILE echo "---------------------" >> $REPORT_FILE # Get CPU info TOTAL_CORES=$(grep -c ^processor /proc/cpuinfo) TOTAL_THREADS=$(lscpu | grep 'Thread(s) per core' | awk '{print $4}') TOTAL_SOCKETS=$(lscpu | grep 'Socket(s)' | awk '{print $2}') CPU_MODEL=$(lscpu | grep 'Model name' | cut -d':' -f2 | sed 's/^[ \t]*//') # Get CPU usage (1 min avg) CPU_LOAD=$(awk '{print $1}' /proc/loadavg) CPU_USAGE_PCT=$(top -bn1 | grep "Cpu(s)" | sed "s/.*, *\([0-9.]*\)%* id.*/\1/" | awk '{print 100 - $1}') echo " Hostname : $(hostname)" >> $REPORT_FILE echo " CPU Model : $CPU_MODEL" >> $REPORT_FILE echo " Total Sockets : $TOTAL_SOCKETS" >> $REPORT_FILE echo " Total Cores : $TOTAL_CORES" >> $REPORT_FILE echo " Total Threads : $((TOTAL_CORES * TOTAL_THREADS))" >> $REPORT_FILE echo " Current CPU Usage : $CPU_USAGE_PCT%" >> $REPORT_FILE echo " Load Average (1m) : $CPU_LOAD" >> $REPORT_FILE echo "" >> $REPORT_FILE # 2. Oracle Database CPU Usage echo "2. ORACLE DATABASE CPU USAGE" >> $REPORT_FILE echo "----------------------------" >> $REPORT_FILE # Get list of running databases DATABASES=$(ps -ef | grep pmon | grep -v grep | awk -F_ '{print $3}') TOTAL_DBS=$(echo "$DATABASES" | wc -l) echo " Total Databases : $TOTAL_DBS" >> $REPORT_FILE echo "" >> $REPORT_FILE TOTAL_DB_CPU=0 for DB in $DATABASES; do export ORACLE_SID=$DB DB_CPU=$(get_sql_value " SELECT ROUND(SUM(value)/100, 2) FROM v\$sysmetric WHERE metric_name='CPU Usage Per Sec' AND group_id=2;" "sys") DB_CPU_PCT=$(get_sql_value " SELECT ROUND(SUM(value)/$TOTAL_CORES, 2) FROM v\$sysmetric WHERE metric_name='CPU Usage Per Sec' AND group_id=2;" "sys") TOTAL_DB_CPU=$(echo "$TOTAL_DB_CPU + $DB_CPU" | bc) echo " Database: $DB" >> $REPORT_FILE echo " CPU Cores Used : $DB_CPU" >> $REPORT_FILE echo " CPU Percentage : $DB_CPU_PCT%" >> $REPORT_FILE # Get top consuming SQL for this DB TOP_SQL=$(get_sql_value " SELECT sql_id FROM ( SELECT sql_id, cpu_time FROM v\$sqlstats ORDER BY cpu_time DESC) WHERE rownum = 1;" "sys") echo " Top SQL ID : $TOP_SQL" >> $REPORT_FILE echo "" >> $REPORT_FILE done # 3. Capacity Summary echo "3. CAPACITY SUMMARY" >> $REPORT_FILE echo "-------------------" >> $REPORT_FILE AVAILABLE_CPU=$(echo "$TOTAL_CORES - $TOTAL_DB_CPU" | bc) AVAILABLE_PCT=$(echo "100 - $CPU_USAGE_PCT" | bc) echo " Total CPU Cores : $TOTAL_CORES" >> $REPORT_FILE echo " Used by Databases : $TOTAL_DB_CPU" >> $REPORT_FILE echo " Used by OS/Other : $(echo "$CPU_USAGE_PCT*$TOTAL_CORES/100 - $TOTAL_DB_CPU" | bc | awk '{printf "%.2f", $1}')" >> $REPORT_FILE echo " Available Cores : $AVAILABLE_CPU" >> $REPORT_FILE echo " Available Percent : $AVAILABLE_PCT%" >> $REPORT_FILE echo "" >> $REPORT_FILE # 4. Exadata Specific (if detected) if is_exadata; then echo "4. EXADATA METRICS" >> $REPORT_FILE echo "------------------" >> $REPORT_FILE CELL_CPU=$(cellcli -e "list metriccurrent CD_CPU_UTIL" 2>/dev/null | awk '{print $4}') CELL_COUNT=$(cellcli -e "list cell" 2>/dev/null | wc -l) echo " Storage Cells : $CELL_COUNT" >> $REPORT_FILE echo " Avg Cell CPU : $CELL_CPU%" >> $REPORT_FILE echo "" >> $REPORT_FILE fi # 5. Recommendations echo "5. RECOMMENDATIONS" >> $REPORT_FILE echo "------------------" >> $REPORT_FILE if (( $(echo "$CPU_USAGE_PCT > 80" | bc -l) )); then echo " * CRITICAL: CPU usage exceeds 80%" >> $REPORT_FILE echo " - Investigate top SQL statements immediately" >> $REPORT_FILE echo " - Consider adding CPU capacity" >> $REPORT_FILE elif (( $(echo "$CPU_USAGE_PCT > 60" | bc -l) )); then echo " * WARNING: CPU usage exceeds 60%" >> $REPORT_FILE echo " - Review performance of top SQL statements" >> $REPORT_FILE echo " - Plan for capacity expansion" >> $REPORT_FILE else echo " * CPU utilization is within normal range" >> $REPORT_FILE fi echo "" >> $REPORT_FILE echo "Report generated: $REPORT_FILE" >> $REPORT_FILE # Display report path echo "" echo "CPU capacity report generated:" echo $REPORT_FILE


=== DRAJ4

#!/bin/bash
# Oracle CPU Capacity Report - No Sudo Required
# Works for both Exadata and non-Exadata
# Version 2.1 - No Privileged Commands

# Configuration
REPORT_FILE="/tmp/cpu_capacity_report_$(date +%Y%m%d_%H%M%S).txt"
ORACLE_ENV="/etc/oratab"

# Function to check if Exadata (without privileged commands)
is_exadata() {
    if [ -f /etc/oracle-release ] && grep -q "Exadata" /etc/oracle-release 2>/dev/null; then
        return 0
    elif [ -d /opt/oracle.cell ]; then
        return 0
    else
        return 1
    fi
}

# Function to run SQL and get single value
get_sql_value() {
    local sql=$1
    local conn=$2
    if [ "$conn" == "sys" ]; then
        sqlplus -s / as sysdba << EOF | awk '/^ / {print $1}'
set pagesize 0
set feedback off
set heading off
$sql
EOF
    else
        sqlplus -s "/ as sysoper" << EOF | awk '/^ / {print $1}'
set pagesize 0
set feedback off
set heading off
$sql
EOF
    fi
}

# Header
echo "==============================================" > $REPORT_FILE
echo " CPU CAPACITY REPORT - $(hostname)" >> $REPORT_FILE
echo " Generated: $(date)" >> $REPORT_FILE
echo "==============================================" >> $REPORT_FILE
echo "" >> $REPORT_FILE

# 1. System CPU Overview
echo "1. HOST CPU RESOURCES" >> $REPORT_FILE
echo "---------------------" >> $REPORT_FILE

# Get CPU info (no sudo needed)
TOTAL_CORES=$(nproc 2>/dev/null || grep -c ^processor /proc/cpuinfo 2>/dev/null || echo "N/A")
CPU_MODEL=$(grep 'model name' /proc/cpuinfo | head -1 | cut -d':' -f2 | sed 's/^[ \t]*//' 2>/dev/null || echo "N/A")

# Get CPU usage (1 min avg) - no privileged commands
CPU_LOAD=$(awk '{print $1}' /proc/loadavg 2>/dev/null || echo "N/A")
CPU_USAGE_PCT=$(top -bn1 2>/dev/null | grep "Cpu(s)" | sed "s/.*, *\([0-9.]*\)%* id.*/\1/" | awk '{print 100 - $1}' 2>/dev/null || echo "N/A")

echo "  Hostname           : $(hostname)" >> $REPORT_FILE
echo "  CPU Model          : $CPU_MODEL" >> $REPORT_FILE
echo "  Total Cores        : $TOTAL_CORES" >> $REPORT_FILE
echo "  Current CPU Usage  : $CPU_USAGE_PCT%" >> $REPORT_FILE
echo "  Load Average (1m)  : $CPU_LOAD" >> $REPORT_FILE
echo "" >> $REPORT_FILE

# 2. Oracle Database CPU Usage
echo "2. ORACLE DATABASE CPU USAGE" >> $REPORT_FILE
echo "----------------------------" >> $REPORT_FILE

# Get list of running databases (no sudo needed)
DATABASES=$(ps -eo command | grep -E '[p]mon|[_]pmon' | awk -F_ '{print $NF}' | sort | uniq)

TOTAL_DBS=$(echo "$DATABASES" | wc -l)
echo "  Total Databases    : $TOTAL_DBS" >> $REPORT_FILE
echo "" >> $REPORT_FILE

TOTAL_DB_CPU=0

for DB in $DATABASES; do
    export ORACLE_SID=$DB
    
    DB_CPU=$(get_sql_value "
    SELECT ROUND(SUM(value)/100, 2) 
    FROM v\$sysmetric 
    WHERE metric_name='CPU Usage Per Sec' 
    AND group_id=2;" "sys" 2>/dev/null || echo "N/A")
    
    if [ "$DB_CPU" != "N/A" ]; then
        TOTAL_DB_CPU=$(echo "$TOTAL_DB_CPU + $DB_CPU" | bc)
        DB_CPU_PCT=$(echo "scale=2; $DB_CPU * 100 / $TOTAL_CORES" | bc)
    else
        DB_CPU_PCT="N/A"
    fi
    
    echo "  Database: $DB" >> $REPORT_FILE
    echo "    CPU Cores Used    : $DB_CPU" >> $REPORT_FILE
    echo "    CPU Percentage    : $DB_CPU_PCT%" >> $REPORT_FILE
    
    # Get top consuming SQL for this DB
    TOP_SQL=$(get_sql_value "
    SELECT sql_id FROM (
        SELECT sql_id, cpu_time 
        FROM v\$sqlstats 
        ORDER BY cpu_time DESC)
    WHERE rownum = 1;" "sys" 2>/dev/null || echo "N/A")
    
    echo "    Top SQL ID        : $TOP_SQL" >> $REPORT_FILE
    echo "" >> $REPORT_FILE
done

# 3. Capacity Summary
echo "3. CAPACITY SUMMARY" >> $REPORT_FILE
echo "-------------------" >> $REPORT_FILE

if [[ "$TOTAL_CORES" != "N/A" && "$TOTAL_DB_CPU" != "N/A" ]]; then
    AVAILABLE_CPU=$(echo "$TOTAL_CORES - $TOTAL_DB_CPU" | bc)
    AVAILABLE_PCT=$(echo "scale=2; 100 - ($TOTAL_DB_CPU * 100 / $TOTAL_CORES)" | bc)
    
    echo "  Total CPU Cores    : $TOTAL_CORES" >> $REPORT_FILE
    echo "  Used by Databases  : $TOTAL_DB_CPU" >> $REPORT_FILE
    echo "  Available Cores    : $AVAILABLE_CPU" >> $REPORT_FILE
    echo "  Available Percent  : $AVAILABLE_PCT%" >> $REPORT_FILE
else
    echo "  Could not calculate capacity summary" >> $REPORT_FILE
    echo "  (Missing CPU or database metrics)" >> $REPORT_FILE
fi
echo "" >> $REPORT_FILE

# 4. Exadata Detection (no privileged commands)
if is_exadata; then
    echo "4. EXADATA DETECTED" >> $REPORT_FILE
    echo "-------------------" >> $REPORT_FILE
    echo "  Exadata environment detected" >> $REPORT_FILE
    echo "  (Detailed cell metrics require cellcli access)" >> $REPORT_FILE
    echo "" >> $REPORT_FILE
fi

# 5. Recommendations
echo "5. RECOMMENDATIONS" >> $REPORT_FILE
echo "------------------" >> $REPORT_FILE

if [[ "$CPU_USAGE_PCT" =~ ^[0-9.]+$ ]]; then
    if (( $(echo "$CPU_USAGE_PCT > 80" | bc -l) )); then
        echo "  * CRITICAL: CPU usage exceeds 80%" >> $REPORT_FILE
        echo "    - Investigate top SQL statements immediately" >> $REPORT_FILE
    elif (( $(echo "$CPU_USAGE_PCT > 60" | bc -l) )); then
        echo "  * WARNING: CPU usage exceeds 60%" >> $REPORT_FILE
        echo "    - Review performance of top SQL statements" >> $REPORT_FILE
    else
        echo "  * CPU utilization is within normal range" >> $REPORT_FILE
    fi
else
    echo "  * Could not determine CPU usage level" >> $REPORT_FILE
fi

echo "" >> $REPORT_FILE
echo "Report generated: $REPORT_FILE" >> $REPORT_FILE

# Display report path
echo ""
echo "CPU capacity report generated:"
echo $REPORT_FILE




#!/bin/bash

# Function to summarize CPU usage per command
summarize_cpu_per_cmd() {
echo "SUMMARY PER CMD"
echo "Time USER_NAME PID CPU_SEC NB_CPU OS_COMMAND"

# Collect data
ps -eo user,pid,pcpu,time,args | while read -r USER PID CPU TIME CMD; do
CPU_SEC=$(convert_to_seconds "$TIME")
echo "$USER $PID $CPU_SEC $CPU $CMD"
done | awk -v interval="$INTERVAL" '
{
user = $1
pid = $2
cpu_sec = $3
nb_cpu = $4
cmd = $5
cmd_stats[pid]["user"] = user
cmd_stats[pid]["cpu_sec"] += cpu_sec
cmd_stats[pid]["nb_cpu"] += nb_cpu
cmd_stats[pid]["cmd"] = cmd
}
END {
for (pid in cmd_stats) {
printf "%-10s %10d %10.1f %10.1f %s\n", cmd_stats[pid]["user"], pid, cmd_stats[pid]["cpu_sec"], cmd_stats[pid]["nb_cpu"], cmd_stats[pid]["cmd"]
}
}'
}

# Function to display metrics explanation
display_metrics_explanation() {
echo "==================== Metrics Explanation ===================="
echo "1. DB_NAME: The name of the database or ASM instance."
echo "2. CPU_SEC: Total CPU time consumed by the database in seconds."
echo "3. NB_CPU: The number of CPUs currently being used by the database."
echo "4. AVG_NB_CPU: The average number of CPUs used by the database over the reporting interval."
echo "5. MAX_NB_CPU: The maximum number of CPUs used by the database during the reporting interval."
echo "6. MIN_NB_CPU: The minimum number of CPUs used by the database during the reporting interval."
echo "7. OS CPU Usage: The percentage of total CPU capacity currently being used by the operating system."
echo "8. Database CPU Usage: The total number of CPUs currently being used by all databases combined."
echo "9. Available Capacity: The number of CPUs available for additional workloads after accounting for OS and database usage."
echo "============================================================="
}

# Main loop
for ((i = 0; i < COUNT; i++)); do
echo "==================== CPU Analysis Report ===================="
echo "Date: $(date)"
echo "-------------------------------------------------------------"

summarize_cpu_per_db
echo "-------------------------------------------------------------"

if [[ "$DISPLAY_USER" == "Y" ]]; then
summarize_cpu_per_user
echo "-------------------------------------------------------------"
fi

if [[ "$DISPLAY_CMD" == "Y" ]]; then
summarize_cpu_per_cmd
echo "-------------------------------------------------------------"
fi

# Display metrics explanation
display_metrics_explanation
echo "-------------------------------------------------------------"

sleep "$INTERVAL"
done



=== 2nd
#!/bin/bash

# Ensure required commands are available
command -v ps >/dev/null 2>&1 || { echo "ps command is required but not found. Exiting."; exit 1; }
command -v mpstat >/dev/null 2>&1 || { echo "mpstat command is required but not found. Exiting."; exit 1; }

# Default parameters
INTERVAL=1
COUNT=10 # Set a reasonable default value
TOPN=10
DISPLAY_USER="N"
DISPLAY_CMD="N"
SORT_FIELD="CPU_SEC"

# Parse command-line arguments
for ARG in "$@"; do
case $ARG in
help)
echo "Usage: $0 [Interval [Count]] [top=] [sort_field=] [displayuser=[Y|N]] [displaycmd=[Y|N]]"
echo "Default Interval: 1 second."
echo "Default Count: 10."
echo "Parameters:"
echo " top= Number of rows to display (default: 10)"
echo " sort_field= CPU_SEC|AVG_NB_CPU|MAX_NB_CPU|MIN_NB_CPU (default: CPU_SEC)"
echo " displayuser= Report on user too (Y/N, default: N)"
echo " displaycmd= Report on command too (Y/N, default: N)"
exit 0
;;
top=*)
TOPN="${ARG#*=}"
;;
sort_field=*)
SORT_FIELD="${ARG#*=}"
;;
displayuser=*)
DISPLAY_USER="${ARG#*=}"
;;
displaycmd=*)
DISPLAY_CMD="${ARG#*=}"
;;
*)
if [[ "$ARG" =~ ^[0-9]+$ ]]; then
if [ -z "$INTERVAL" ]; then
INTERVAL="$ARG"
else
COUNT="$ARG"
fi
fi
;;
esac
done

# Function to convert time (HH:MM:SS or D-HH:MM:SS) to seconds
convert_to_seconds() {
local TIME=$1
local SECONDS=0
local DAYS=0
local H=0
local M=0
local S=0

# Check if the time includes days (e.g., 1-08:00:00)
if [[ "$TIME" =~ ([0-9]+)-([0-9]+):([0-9]+):([0-9]+) ]]; then
DAYS=${BASH_REMATCH[1]}
H=${BASH_REMATCH[2]}
M=${BASH_REMATCH[3]}
S=${BASH_REMATCH[4]}
elif [[ "$TIME" =~ ([0-9]+):([0-9]+):([0-9]+) ]]; then
H=${BASH_REMATCH[1]}
M=${BASH_REMATCH[2]}
S=${BASH_REMATCH[3]}
else
# Invalid time format, return 0 seconds
echo 0
return
fi

# Remove leading zeros and convert to base 10
DAYS=$((10#$DAYS))
H=$((10#$H))
M=$((10#$M))
S=$((10#$S))

# Calculate total seconds
SECONDS=$((DAYS * 86400 + H * 3600 + M * 60 + S))
echo "$SECONDS"
}

# Function to summarize CPU usage per database
summarize_cpu_per_db() {
echo "SUMMARY PER DB"
echo "DB_NAME CPU_SEC NB_CPU AVG_NB_CPU MAX_NB_CPU MIN_NB_CPU"

# Collect data
ps -eo pcpu,time,args | grep -E 'ora_|asm_' | while read -r CPU TIME CMD; do
DB=$(echo "$CMD" | sed -E 's/.*ora_...._//; s/ \(DESCRIPTION=.*//; s/ \(LOCAL=.*//; s/oracle//; s/_.*//; s/.* .*//; s/\+asm/+ASM/; s/asm/+ASM/; s/\+
ASM[1-9]*/+ASM/')
[ -z "$DB" ] && continue

CPU_SEC=$(convert_to_seconds "$TIME")
echo "$DB $CPU_SEC $CPU"
done | awk -v interval="$INTERVAL" '
{
db = $1
cpu_sec = $2
nb_cpu = $3
db_stats[db]["cpu_sec"] += cpu_sec
db_stats[db]["nb_cpu"] += nb_cpu
db_stats[db]["count"]++
if (cpu_sec > db_stats[db]["max_cpu"]) db_stats[db]["max_cpu"] = cpu_sec
if (db_stats[db]["min_cpu"] == 0 || cpu_sec < db_stats[db]["min_cpu"]) db_stats[db]["min_cpu"] = cpu_sec
}
END {
total_db_cpu = 0
for (db in db_stats) {
avg_cpu = db_stats[db]["cpu_sec"] / db_stats[db]["count"]
total_db_cpu += db_stats[db]["nb_cpu"]
printf "%-10s %10.1f %10.1f %10.1f %10.1f %10.1f\n", db, db_stats[db]["cpu_sec"], db_stats[db]["nb_cpu"], avg_cpu, db_stats[db]["max_cpu"], db_s
tats[db]["min_cpu"]
}
print total_db_cpu > "/tmp/total_db_cpu.txt" # Save total DB CPU usage for capacity calculation
}'
}

# Function to calculate OS CPU usage
calculate_os_cpu_usage() {
# Use mpstat to get the percentage of idle CPU time
local idle_cpu
idle_cpu=$(mpstat 1 1 | awk '/all/ {print $NF}' | head -n 1) # Ensure single-line output

# Calculate the percentage of CPU used by the OS
local os_cpu_usage
os_cpu_usage=$(echo "100 - $idle_cpu" | bc)
echo "$os_cpu_usage"
}

# Function to calculate total and available CPU capacity
calculate_capacity() {
local total_cpus
local os_cpu_usage
local db_cpu_usage
local normalized_db_cpu_usage
local available_capacity

# Get the total number of CPUs
total_cpus=$(nproc)

# Get the OS CPU usage
os_cpu_usage=$(calculate_os_cpu_usage)

# Get the total database CPU usage
db_cpu_usage=$(cat /tmp/total_db_cpu.txt)

# Normalize database CPU usage to a percentage of total CPUs
normalized_db_cpu_usage=$(echo "$db_cpu_usage / $total_cpus" | bc -l)

# Calculate the available capacity
available_capacity=$(echo "$total_cpus - $os_cpu_usage - $normalized_db_cpu_usage" | bc -l)

# Display the results
echo "Total CPUs: $total_cpus"
echo "OS CPU Usage: $(printf "%.2f" "$os_cpu_usage")%"
echo "Database CPU Usage: $(printf "%.2f" "$normalized_db_cpu_usage") CPUs"
echo "Available Capacity: $(printf "%.2f" "$available_capacity") CPUs"
}

# Main loop
for ((i = 0; i < COUNT; i++)); do
echo "==================== CPU Analysis Report ===================="
echo "Date: $(date)"
echo "-------------------------------------------------------------"

summarize_cpu_per_db
echo "-------------------------------------------------------------"

if [[ "$DISPLAY_USER" == "Y" ]]; then
summarize_cpu_per_user
echo "-------------------------------------------------------------"
fi

if [[ "$DISPLAY_CMD" == "Y" ]]; then
summarize_cpu_per_cmd
echo "-------------------------------------------------------------"
fi

# Calculate and display capacity
calculate_capacity
echo "-------------------------------------------------------------"

sleep "$INTERVAL"
done


The script you provided is a Bash script designed to generate a CPU analysis report. It offers several modes and methods for running the report, based on the command-line arguments and options specified. Here are the different ways you can run this script, along with the modes and methods available:

Command-Line Arguments and Options

  1. Interval and Count:

    • Interval: The time interval in seconds between each report generation. Default is 1 second.
    • Count: The number of times the report is generated. Default is 10.

    Usage./script.sh [Interval] [Count]

  2. Top N Rows:

    • top=: Specifies the number of rows to display in the report. Default is 10.

    Usage./script.sh top=5

  3. Sort Field:

    • sort_field=: Determines the field by which the report is sorted. Options include CPU_SECAVG_NB_CPUMAX_NB_CPUMIN_NB_CPU. Default is CPU_SEC.

    Usage./script.sh sort_field=AVG_NB_CPU

  4. Display User Information:

    • displayuser=: Whether to include user information in the report. Options are Y (Yes) or N (No). Default is N.

    Usage./script.sh displayuser=Y

  5. Display Command Information:

    • displaycmd=: Whether to include command information in the report. Options are Y (Yes) or N (No). Default is N.

    Usage./script.sh displaycmd=Y

  6. Help:

    • help: Displays usage information and exits.

    Usage./script.sh help

Combining Options

You can combine these options to customize the report generation. For example:

  • Example 1: Run the report every 2 seconds, 5 times, displaying the top 5 rows, sorted by average number of CPUs, including user and command information.

    ./script.sh 2 5 top=5 sort_field=AVG_NB_CPU displayuser=Y displaycmd=Y
    
  • Example 2: Run the report with default settings but display command information.

    ./script.sh displaycmd=Y
    

Summary




In the script you provided, the CPU time consumed by the database in seconds and other metrics are calculated based on the output of the ps command, which provides information about currently running processes. The reference point for the CPU time consumed is the cumulative CPU time used by each process since it started. This means that the CPU time reported is not limited to a specific time window like the last hour; rather, it reflects the total CPU time consumed by the process from its start until the moment the script is run.

Key Points:

  1. Cumulative CPU Time:

    • The ps command outputs the cumulative CPU time for each process, which is the total time the process has been using the CPU since it started. This is represented in the TIME field in the ps output.
  2. Real-Time Snapshot:

    • The script provides a real-time snapshot of CPU usage at the moment it is run. It does not inherently track or limit the data to a specific historical period like the last hour.
  3. Interval and Count:

    • The script runs in a loop based on the specified INTERVAL and COUNT, which means it can provide repeated snapshots of CPU usage at regular intervals. However, each snapshot is independent and reflects the state at that particular moment.
  4. Aggregation:

    • The script aggregates CPU usage data per database by summing up the CPU time for processes associated with each database. This aggregation is done for each snapshot taken during the script's execution.
  5. Normalization:

    • The script calculates average, maximum, and minimum CPU usage metrics based on the cumulative data collected during its execution.

Considerations:

  • Historical Data: If you need historical data or CPU usage over a specific time window, you would need to implement additional logging or use a monitoring tool that tracks CPU usage over time.
  • Real-Time Monitoring: The script is useful for real-time monitoring and analysis but does not provide historical trends unless modified to store and analyze past data.

In summary, the CPU time consumed by the database as reported by the script is cumulative from the start of each process, and the script provides real-time snapshots of this data at specified intervals.

Is this helpful?

give me what actually the script is doine logic wise

Certainly! Let's break down the logic of the script step by step to understand what it is doing:

Script Overview

The script is designed to generate a CPU analysis report for Oracle database processes running on a system. It uses the ps and mpstat commands to gather data about CPU usage and processes, and it provides a summary of CPU usage per database, as well as overall system CPU capacity.

Detailed Logic Breakdown

  1. Command Availability Check:

    • The script first checks if the ps and mpstat commands are available on the system. If either command is missing, the script exits with an error message.
  2. Default Parameters:

    • Sets default values for various parameters such as INTERVALCOUNTTOPNDISPLAY_USERDISPLAY_CMD, and SORT_FIELD.
  3. Command-Line Argument Parsing:

    • Parses command-line arguments to override default parameters. It supports options for interval, count, top rows, sort field, and whether to display user and command information.
  4. Time Conversion Function:

    • Defines a function convert_to_seconds to convert time from the format HH:MM:SS or D-HH:MM:SS into total seconds. This is used to convert the CPU time reported by ps into seconds.
  5. Summarize CPU Usage Per Database:

    • Collects data using ps to list processes related to Oracle databases (identified by patterns like ora_ and asm_).
    • Extracts the database name from the command line of each process.
    • Converts the CPU time for each process into seconds.
    • Aggregates CPU usage statistics per database, including total CPU time, number of CPUs used, average, maximum, and minimum CPU usage.
  6. Calculate OS CPU Usage:

    • Uses mpstat to determine the percentage of idle CPU time and calculates the percentage of CPU used by the operating system.
  7. Calculate Total and Available CPU Capacity:

    • Determines the total number of CPUs using nproc.
    • Calculates the normalized database CPU usage as a percentage of total CPUs.
    • Computes the available CPU capacity by subtracting OS and database CPU usage from the total CPU capacity.
  8. Main Loop:

    • Runs the report generation in a loop for the specified COUNT times, with a delay of INTERVAL seconds between each iteration.
    • Displays the date and CPU analysis report for each iteration.
    • Calls functions to summarize CPU usage per database, optionally per user and command, and calculates CPU capacity.

Summary

The script provides a real-time analysis of CPU usage by Oracle database processes, summarizing the data per database and calculating overall system CPU capacity. It allows customization through command-line arguments to adjust the frequency, duration, and detail level of the reports. The script is useful for monitoring and analyzing CPU usage patterns in environments running Oracle databases.