Monday, January 5, 2015

Points to remember in Couchbase

 

 

#!/bin/bash

# --- Script Metadata ---
SCRIPT_NAME=$(basename "$0")
SCRIPT_VERSION="1.0"
SCRIPT_EXEC="$SUDO_USER"
VALIDATION_TIMESTAMP=$(date +"%Y-%m-%d %H:%M:%S")
SUMMARY_REPORT="summary_$(date +'%Y%m%d_%H%M%S').txt"
DETAILED_REPORT="detailed_$(date +'%Y%m%d_%H%M%S').txt"

# --- Function to Execute SQL and Capture Output ---
execute_sql() {
local sql_query="$1"
local report_file="$2"
local output=$(sqlplus -s "/ as sysdba" <<-EOF
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SET LINESIZE 200
$sql_query
EXIT;
EOF
)

# Remove extra spaces
output=$(echo "$output" | tr -s '[:space:]')
# Write output to report
if [[ -n "$report_file" ]]; then
echo "$output" >> "$report_file"
echo -e "\n" >> "$report_file"
fi

echo "$output"
}

is_rac() {
local cluster_database=$(execute_sql "SELECT value FROM v\$parameter WHERE name = 'cluster_database';" "")
if [[ "$cluster_database" == "TRUE" ]]; then
return 0 # RAC
else
return 1 # Single Instance
fi
}

write_report_headers() {
local report_name="$1"
echo "Database Validation Report" > "$report_name"
echo "-------------------------------------" >> "$report_name"
echo "Generated by: $SCRIPT_EXEC" >> "$report_name"
echo "Date/Time: $VALIDATION_TIMESTAMP" >> "$report_name"
echo -e "\n" >> "$report_name"
# DB Details
db_unique_name=$(execute_sql "select db_unique_name from v\$database;" "")
db_status=$(execute_sql "select status from v\$instance;" "")
db_role=$(execute_sql "select database_role from v\$database;" "")

if is_rac; then
echo "Database Type: RAC" >> "$report_name"
else
echo "Database Type: Single Instance" >> "$report_name"
fi
echo "Database Name : $db_unique_name" >> "$report_name"
echo "Database Status : $db_status" >> "$report_name"
echo "Database Role : $db_role" >> "$report_name"
echo -e "\n" >> "$report_name"
}

echo "Starting Database Validation..."
# --- Start Reports ---
write_report_headers "$SUMMARY_REPORT"
write_report_headers "$DETAILED_REPORT"

if is_rac; then
echo "Database is RAC" >> "$SUMMARY_REPORT"
else
echo "Database is Single Instance" >> "$SUMMARY_REPORT"
fi
echo -e "\n" >> "$SUMMARY_REPORT"

# Initialize arrays for failures, successes, and info
failures=()
successes=()
infos=()

# For Future editors of the script, please maintain the comments for others to easily understand and modify this program.
# --- Validation 1: Encryption Wallet ---
echo "--- Encryption Wallet Validation ---" >> "$DETAILED_REPORT"
encryption_wallet_output=$(execute_sql "select * from gv\$encryption_wallet where wrl_type='HSM' and status <> 'OPEN';" "$DETAILED_REPORT")
if [[ -n "$encryption_wallet_output" ]]; then
failures+=("Failure: Encryption_Wallet closed")
else
successes+=("Success: Encryption_Wallet OPEN")
fi
echo -e "\n" >> "$DETAILED_REPORT"

# --- Validation 2: Tablespace Encryption ---
echo "--- Tablespace Encryption Validation ---" >> "$DETAILED_REPORT"
tablespace_encryption_output=$(execute_sql "select tablespace_name, encrypted from dba_tablespaces where tablespace_name not in ('SYSTEM','SYSAUX','UNDOTBS1','UNDOTBS1','TEMP','TS_AUDIT_DATA','TS_AUDIT_IND');" "$DETAILED_REPORT")
if [[ -n "$tablespace_encryption_output" ]]; then
failures+=("Failure: Tablespace_name encryption not enabled on the following tableSpaces and their details.")
else
successes+=("Success: Tablespace_name encryption enabled on this database.")
fi
echo -e "\n" >> "$DETAILED_REPORT"

# --- Validation 3: Password File ---
echo "--- Password File Validation ---" >> "$DETAILED_REPORT"
password_file_output=$(execute_sql "select * from v\$passwordfile_info;" "$DETAILED_REPORT")
if [[ "$password_file_output" == *"FILENAME"* ]]; then
if [[ "$password_file_output" == *"NULL"* ]]; then
failures+=("Failure: no password file name")
else
successes+=("Success: passwordfile exists.")
fi

else
failures+=("Failure: no password file name")
fi
echo -e "\n" >> "$DETAILED_REPORT"

# --- Validation 4: Invalid Components ---
echo "--- Invalid Components Validation ---" >> "$DETAILED_REPORT"
invalid_components_output=$(execute_sql "SELECT comp_id, comp_name, STATUS, version FROM dba_registry where status = 'INVALID';" "$DETAILED_REPORT")
if [[ -n "$invalid_components_output" ]]; then
while IFS= read -r line; do
if [[ "$line" == *"comp_name"* ]]; then
continue
fi
comp_name=$(echo "$line" | awk '{print $2}')
failures+=("Failure: $comp_name is Invalid")
done < <(echo "$invalid_components_output")

else
successes+=("Success: No invalid components found.")
fi
echo -e "\n" >> "$DETAILED_REPORT"

# --- Validation 5: Block Change Tracking ---
echo "--- Block Change Tracking Validation ---" >> "$DETAILED_REPORT"
block_change_tracking_output=$(execute_sql "select * from v\$block_change_tracking where Status = 'ENABLED';" "$DETAILED_REPORT")
if [[ -n "$block_change_tracking_output" ]]; then
successes+=("Success: Block Change Tracking is enabled.")
else
failures+=("Failure: Block Change Tracking is disabled.")
fi
echo -e "\n" >> "$DETAILED_REPORT"

# --- Validation 6: NLS Character Set ---
echo "--- NLS Character Set Information ---" >> "$DETAILED_REPORT"
nls_charset_output=$(execute_sql "SELECT * FROM v\$nls_parameters WHERE parameter = 'NLS_CHARACTERSET';" "$DETAILED_REPORT")
infos+=("NLS Character Set Information:")
while IFS= read -r line; do
if [[ "$line" == *"VALUE"* ]]; then
continue
fi
if [[ "$line" != *"NLS_CHARACTERSET"* ]] && [[ "$line" != *"PARAMETER"* ]]; then
infos+=("$line")
fi

done < <(echo "$nls_charset_output")
echo -e "\n" >> "$DETAILED_REPORT"

# --- Validation 7: DNG Triggers (RAC/Single Instance) ---
echo "--- DNG Triggers Validation ---" >> "$DETAILED_REPORT"
dng_triggers_output=$(execute_sql "select trigger_name from dba_triggers where trigger_name like 'PNC_DNG%';" "$DETAILED_REPORT")
if is_rac; then
infos+=("Note: This is a RAC database, so no DNG startup/switchover services are expected.")
else
if [[ -z "$dng_triggers_output" ]]; then
failures+=("Failure: No DNG startup/switchover services found.")
else
successes+=("Success: DNG startup/switchover services found.")
fi
fi
echo -e "\n" >> "$DETAILED_REPORT"

# --- Validation 8: RAC Services (if RAC) ---
if is_rac; then
echo "--- RAC Services Validation ---" >> "$DETAILED_REPORT"
rac_services_output=$(srvctl status service | grep RWSV)
if [[ -z "$rac_services_output" ]]; then
failures+=("Failure: RWSV related services are not running.")
echo "$rac_services_output" >> "$DETAILED_REPORT"
else
successes+=("Success: RWSV related services are running.")
echo "$rac_services_output" >> "$DETAILED_REPORT"
fi
fi
echo -e "\n" >> "$DETAILED_REPORT"

# --- Parameter Validation ---
DEFAULT_PARAMS=(
"audit_file_dest=/u01/home/oracle/audit"
"audit_sys_operations=TRUE"
"audit_trail=OS"
"db_block_size=8192"
"db_files=20000"
"db_recovery_file_dest=+RECO"
"db_recovery_file_dest_size=2147483648000"
"open_cursors=60000"
"processes=5000"
"query_rewrite_enabled=TRUE"
"recyclebin=ON"
"redo_transport_user=PPREDO"
"sql92_security=TRUE"
)

echo "--- Parameter Validation ---" >> "$DETAILED_REPORT"
temp_file=$(mktemp)
sqlplus -s / as sysdba <<-EOF
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SET LINESIZE 200
SET COLSEP '|'
SPOOL "$temp_file"
SELECT NAME || '|' || Value FROM v\$parameter WHERE name IN ('_adg_parselocktimeout', 'audit_file_dest', 'audit_sys_operations', 'audit_trail', 'db_block_size', 'dbcreate_file_dest', 'db_create_online_log_dest_1', 'db_create_online_log_dest_2', 'db_domain', 'db_files', 'db_recovery_file_dest', 'db_recovery_file_dest_size', 'instance_name', 'log_archive_dest_1', 'open_cursors', 'pga_aggregate_limit', 'pga_aggregate_target', 'processes', 'query_rewrite_enabled', 'recyclebin', 'redo_transport_user', 'sessions', 'sga_max_size', 'sga_target', 'spfile', 'sql92_security') ORDER BY 1;
SPOOL OFF
EXIT
EOF

# Read the fetched parameters into an associative array
declare -A fetched_params
while IFS='|' read -r name value; do
fetched_params["$name"]="$value"
done < "$temp_file"

# Remove the temporary file
rm "$temp_file"

# Compare fetched parameters with default values
for param in "${DEFAULT_PARAMS[@]}"; do
IFS='=' read -r name default_value <<< "$param"
fetched_value="${fetched_params[$name]}"
if [[ "$fetched_value" == "$default_value" ]]; then
successes+=("Success: $name, value=$fetched_value, default value=$default_value")
else
failures+=("Failure: $name, value=$fetched_value, default value should have been $default_value")
fi
done

# Add non-default parameters to info
for name in "${!fetched_params[@]}"; do
if [[ ! " ${DEFAULT_PARAMS[@]} " =~ " $name=" ]]; then
infos+=("Info: $name, value=${fetched_params[$name]}")
fi
done

# Output the results to the summary report
{
echo -e "--- Failures ---"
for failure in "${failures[@]}"; do
echo "$failure"
done

echo -e "\n--- Successes ---"
for success in "${successes[@]}"; do
echo "$success"
done

echo -e "\n--- Info ---"
for info in "${infos[@]}"; do
echo "$info"
done
} >> "$SUMMARY_REPORT"

# Output the results to the detailed report
{
echo -e "--- Failures ---"
for failure in "${failures[@]}"; do
echo "$failure"
done

echo -e "\n--- Successes ---"
for success in "${successes[@]}"; do
echo "$success"
done

echo -e "\n--- Info ---"
for info in "${infos[@]}"; do
echo "$info"
done
} >> "$DETAILED_REPORT"

echo "Database Validation Completed."
echo "Summary Report: $SUMMARY_REPORT"
echo "Detailed Report: $DETAILED_REPORT"

exit 0

===



 #!/bin/bash

# Raj Kiran Mattewada - Dated: March 8th 2025

# Functionalities:
## Database Type Reporting:
## Adds information about whether the database is RAC or single instance to the summary report.
## Validation Checks:
## Encryption Wallet Validation: Checks if the encryption wallet is open.
## Tablespace Encryption Validation: Checks if tablespace encryption is enabled.
## Password File Validation: Checks if a password file exists.
## Invalid Components Validation: Checks for invalid components in the database.
## Block Change Tracking Validation: Checks if block change tracking is enabled.
## NLS Character Set Information: Retrieves and reports the NLS character set information.
## DNG Triggers Validation: Checks for DNG startup/switchover services, with different checks for RAC and single instance databases.
## RAC Services Validation: Checks if RWSV related services are running (only for RAC databases).
## Parameter Validation: Compares current database parameters with default values and categorizes them into failures, successes, and info.
## Summary Report Generation: Writes failures, successes, and info to the summary report in the mentioned order.
## Detailed Report Generation: Writes detailed validation results to the detailed report.
# If you need explanation and understanding of the logic of this script, I have provided it at the bottom of the script.

SCRIPT_NAME=$(basename "$0")
SCRIPT_VERSION="1.0"
SCRIPT_EXEC="$SUDO_USER"
VALIDATION_TIMESTAMP=$(date +"%Y-%m-%d %H:%M:%S")
SUMMARY_REPORT="summary_$(date +'%Y%m%d_%H%M%S').txt"
DETAILED_REPORT="detailed_$(date +'%Y%m%d_%H%M%S').txt"

# --- Function to Execute SQL and Capture Output ---
execute_sql() {
local sql_query="$1"
local report_file="$2"
local output=$(sqlplus -s "/ as sysdba" <<-EOF
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SET LINESIZE 200
$sql_query
EXIT;
EOF
)

#remove extra spaces
output=$(echo "$output" | tr -s '[:space:]' )
#write output to report
echo "$output" >> "$report_file"
echo -e "\n" >> "$report_file"

echo "$output"
}

is_rac() {
local cluster_database=$(execute_sql "SELECT value FROM v\$parameter WHERE name = 'cluster_database';" /dev/null)
if [[ "$cluster_database" == "TRUE" ]]; then
return 0 # RAC
else
return 1 # Single Instance
fi
}

write_report_headers(){
local report_name="$1"
echo "Database Validation Report" > "$report_name"
echo "-------------------------------------" >> "$report_name"
echo "Generated by: $SCRIPT_EXEC" >> "$report_name"
echo "Date/Time: $VALIDATION_TIMESTAMP" >> "$report_name"
#echo -e "\n" >> "$report_name"
# DB Details
db_unique_name=$(execute_sql "select db_unique_name from v\$database;" /dev/null)
db_status=$(execute_sql "select status from v\$instance;" /dev/null)
db_role=$(execute_sql "select database_role from v\$database;" /dev/null)

if is_rac; then
echo "Database Type: RAC" >> "$report_name"
else
echo "Database Type: Single Instance" >> "$report_name"
fi
echo "Database Name : $db_unique_name" >> "$report_name"
echo "Database Status : $db_status" >> "$report_name"
echo "Database Role : $db_role" >> "$report_name"
#echo -e "\n" >> "$report_name"
}

echo "Starting Database Validation..."
# --- Start Reports ---
write_report_headers "$SUMMARY_REPORT"
write_report_headers "$DETAILED_REPORT"

if is_rac; then
echo "Database is RAC" >> "$SUMMARY_REPORT"
else
echo "Database is Single Instance" >> "$SUMMARY_REPORT"
fi
echo -e "\n" >> "$SUMMARY_REPORT"

# Initialize arrays for failures, successes, and info
failures=()
successes=()
infos=()


# For Future editors of the script, please maintain the comments for others to easily understand and modify this program.
# --- Validation 1: Encryption Wallet ---
echo "--- Encryption Wallet Validation ---" >> "$DETAILED_REPORT"
encryption_wallet_output=$(execute_sql "select * from gv\$encryption_wallet where wrl_type='HSM' and status <> 'OPEN';" "$DETAILED_REPORT")
if [[ -n "$encryption_wallet_output" ]]; then
failures+=("Failure: Encryption_Wallet closed")
else
successes+=("Success: Encryption_Wallet OPEN")
fi
echo -e "\n" >> "$DETAILED_REPORT"

# --- Validation 2: Tablespace Encryption ---
echo "--- Tablespace Encryption Validation ---" >> "$DETAILED_REPORT"
tablespace_encryption_output=$(execute_sql "select tablespace_name, encrypted from dba_tablespaces where tablespace_name not in ('SYSTEM','SYSAUX','UNDOTBS1','UNDOTBS1','TEMP','TS_AUDIT_DATA','TS_AUDIT_IND');" "$DETAILED_REPORT")
if [[ -n "$tablespace_encryption_output" ]]; then
failures+=("Failure: Tablespace_name encryption not enabled on the following tableSpaces and their details.")
else
successes+=("Success: Tablespace_name encryption enabled on this database.")
fi
echo -e "\n" >> "$DETAILED_REPORT"

# --- Validation 3: Password File ---
echo "--- Password File Validation ---" >> "$DETAILED_REPORT"
password_file_output=$(execute_sql "select * from v\$passwordfile_info;" "$DETAILED_REPORT")
if [[ "$password_file_output" == *"FILENAME"* ]]; then
if [[ "$password_file_output" == *"NULL"* ]]; then
failures+=("Failure: no password file name")
else
successes+=("Success: passwordfile exists.")
fi

else
failures+=("Failure: no password file name")
fi
echo -e "\n" >> "$DETAILED_REPORT"

# --- Validation 4: Invalid Components ---
echo "--- Invalid Components Validation ---" >> "$DETAILED_REPORT"
invalid_components_output=$(execute_sql "SELECT comp_id, comp_name, STATUS, version FROM dba_registry where status = 'INVALID';" "$DETAILED_REPORT")
if [[ -n "$invalid_components_output" ]]; then
while IFS= read -r line; do
if [[ "$line" == *"comp_name"* ]]; then
continue
fi
comp_name=$(echo "$line" | awk '{print $2}')
failures+=("Failure: $comp_name is Invalid")
done < <(echo "$invalid_components_output")

else
successes+=("Success: No invalid components found.")
fi
echo -e "\n" >> "$DETAILED_REPORT"

# --- Validation 5: Block Change Tracking ---
echo "--- Block Change Tracking Validation ---" >> "$DETAILED_REPORT"
block_change_tracking_output=$(execute_sql "select * from v\$block_change_tracking where Status = 'ENABLED';" "$DETAILED_REPORT")
if [[ -n "$block_change_tracking_output" ]]; then
successes+=("Success: Block Change Tracking is enabled.")
else
failures+=("Failure: Block Change Tracking is disabled.")
fi
echo -e "\n" >> "$DETAILED_REPORT"

# --- Validation 6: NLS Character Set ---
echo "--- NLS Character Set Information ---" >> "$DETAILED_REPORT"
nls_charset_output=$(execute_sql "SELECT * FROM v\$nls_parameters WHERE parameter = 'NLS_CHARACTERSET';" "$DETAILED_REPORT")
infos+=("NLS Character Set Information:")
while IFS= read -r line; do
if [[ "$line" == *"VALUE"* ]]; then
continue
fi
if [[ "$line" != *"NLS_CHARACTERSET"* ]] && [[ "$line" != *"PARAMETER"* ]]; then
infos+=("$line")
fi

done < <(echo "$nls_charset_output")
echo -e "\n" >> "$DETAILED_REPORT"

# --- Validation 7: DNG Triggers (RAC/Single Instance) ---
echo "--- DNG Triggers Validation ---" >> "$DETAILED_REPORT"
dng_triggers_output=$(execute_sql "select trigger_name from dba_triggers where trigger_name like 'PNC_DNG%';" "$DETAILED_REPORT")
if is_rac; then
infos+=("Note: This is a RAC database, so no DNG startup/switchover services are expected.")
else
if [[ -z "$dng_triggers_output" ]]; then
failures+=("Failure: No DNG startup/switchover services found.")
else
successes+=("Success: DNG startup/switchover services found.")
fi
fi
echo -e "\n" >> "$DETAILED_REPORT"

# --- Validation 8: RAC Services (if RAC) ---
if is_rac; then
echo "--- RAC Services Validation ---" >> "$DETAILED_REPORT"
rac_services_output=$(srvctl status service | grep RWSV)
if [[ -z "$rac_services_output" ]]; then
failures+=("Failure: RWSV related services are not running.")
echo "$rac_services_output" >> "$DETAILED_REPORT"
else
successes+=("Success: RWSV related services are running.")
echo "$rac_services_output" >> "$DETAILED_REPORT"
fi
fi
echo -e "\n" >> "$DETAILED_REPORT"

# --- Parameter Validation ---
DEFAULT_PARAMS=(
"audit_file_dest=/u01/home/oracle/audit"
"audit_sys_operations=TRUE"
"audit_trail=OS"
"db_block_size=8192"
"db_files=20000"
"db_recovery_file_dest=+RECO"
"db_recovery_file_dest_size=2147483648000"
"open_cursors=60000"
"processes=5000"
"query_rewrite_enabled=TRUE"
"recyclebin=ON"
"redo_transport_user=PPREDO"
"sql92_security=TRUE"
)

echo "--- Parameter Validation ---" >> "$DETAILED_REPORT"
temp_file=$(mktemp)
sqlplus -s / as sysdba <<-EOF
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SET LINESIZE 200
SET COLSEP '|'
SPOOL "$temp_file"
SELECT NAME || '|' || Value FROM v\$parameter WHERE name IN ('_adg_parselocktimeout', 'audit_file_dest', 'audit_sys_operations', 'audit_trail', 'db_block_size', 'dbcreate_file_dest', 'db_create_online_log_dest_1', 'db_create_online_log_dest_2', 'db_domain', 'db_files', 'db_recovery_file_dest', 'db_recovery_file_dest_size', 'instance_name', 'log_archive_dest_1', 'open_cursors', 'pga_aggregate_limit', 'pga_aggregate_target', 'processes', 'query_rewrite_enabled', 'recyclebin', 'redo_transport_user', 'sessions', 'sga_max_size', 'sga_target', 'spfile', 'sql92_security') ORDER BY 1;
SPOOL OFF
EXIT
EOF

# Read the fetched parameters into an associative array
declare -A fetched_params
while IFS='|' read -r name value; do
fetched_params["$name"]="$value"
done < "$temp_file"

# Remove the temporary file
rm "$temp_file"

# Compare fetched parameters with default values
for param in "${DEFAULT_PARAMS[@]}"; do
IFS='=' read -r name default_value <<< "$param"
fetched_value="${fetched_params[$name]}"
if [[ "$fetched_value" == "$default_value" ]]; then
successes+=("Success: $name, value=$fetched_value, default value=$default_value")
else
failures+=("Failure: $name, value=$fetched_value, default value should have been $default_value")
fi
done

# Add non-default parameters to info
for name in "${!fetched_params[@]}"; do
if [[ ! " ${DEFAULT_PARAMS[@]} " =~ " $name=" ]]; then
infos+=("Info: $name, value=${fetched_params[$name]}")
fi
done

# Output the results to the summary report
{
echo -e "--- Failures ---"
for failure in "${failures[@]}"; do
echo "$failure"
done

echo -e "\n--- Successes ---"
for success in "${successes[@]}"; do
echo "$success"
done

echo -e "\n--- Info ---"
for info in "${infos[@]}"; do
echo "$info"
done
} >> "$SUMMARY_REPORT"


echo "Database Validation Completed."
echo "Summary Report: $SUMMARY_REPORT"
echo "Detailed Report: $DETAILED_REPORT"

exit 0


#Validation Checks such as
#Encryption Wallet - Checks if the encryption wallet is open by executing a SQL query. Adds the result to the summary and detailed reports.
#Tablespace Encryption Validation, etc., are doe

#Parameter Validation
# DEFAULT_PARAMS:
# An array of default parameter values that the script will validate against the current database parameters.
# Parameter Validation Process:
# Executes a SQL query to fetch the current values of specific database parameters.
# Compares the fetched parameters with the default values.
# Categorizes the results into failures, successes, and info.
# Adds the results to the summary and detailed reports.
# Summary Report Generation
# Failures, Successes, and Info:
# Writes failures at the top, successes in the middle, and info at the bottom of the summary report.
# Detailed Report Generation
# Detailed Validation Results:
# Writes detailed validation results to the detailed report.


#!/bin/bash


==raj
#!/bin/bash

# Script to automate SQL plan analysis, identification of good/bad plans,
# user confirmation, and SQL Baseline creation/rollback.

# --- Configuration ---
# Set the default value here, you can also change it when executing the script
SQL_ID="1dqhgbyk89v4p" # SQL ID of the problematic statement (Default value, can be override by parameter)
DAYS_HISTORY="1" # Number of days of AWR history to analyze
INTERVAL_HOURS="1" # Time interval granularity (in hours)
AWRMS_SQL="awrms.sql" # The name of the awrms script
SQL_USER="your_db_user" # Database user with required privileges
SQL_PASSWORD="your_db_password" # Database user's password
SQL_CONNECT_STRING="your_db_connect_string" # TNS alias or connection string

# --- Helper Functions ---

# Function to check if a command is available
command_exists() {
command -v "$1" >/dev/null 2>&1
}

# Function to log messages
log() {
echo "$(date '+%Y-%m-%d %H:%M:%S') - $@"
}

# Check if required command are available
if ! command_exists sqlplus; then
log "Error: sqlplus is not installed or not in PATH. Please install it and try again."
exit 1
fi

# Function to execute SQL scripts and log the output
execute_sqlplus() {
local sql_command="$1"
local log_file="$2"

log "Executing SQL command: $sql_command"
sqlplus -s "$SQL_USER/$SQL_PASSWORD@$SQL_CONNECT_STRING" <<EOF > "$log_file" 2>&1
SET FEEDBACK ON
SET SERVEROUTPUT ON
$sql_command
EXIT;
EOF

if [ $? -ne 0 ]; then
log "Error executing SQL command. Check log file: $log_file"
exit 1
fi

log "SQL command output logged to: $log_file"
}

# Function to analyze the performance data from awrms.sql
analyze_plan_performance() {
local awrms_output_file="$1"
local -A plan_data # associative array to store plan data
local plan_count=0
local bad_plan=""
local good_plan=""
local max_elapsed=0
local min_elapsed=9999999999999999

log "Analyzing performance data from: $awrms_output_file"

# Check if the file exists
if [ ! -f "$awrms_output_file" ]; then
log "Error: File not found: $awrms_output_file"
exit 1
fi

# Read each line of awrms output
while IFS= read -r line; do
if [[ "$line" == *"PLAN_HASH_VALUE"* ]]; then
# Ignore the header line
continue
else
# Extract relevant fields using awk
plan_hash=$(echo "$line" | awk '{print $2}')
elapsed_time=$(echo "$line" | awk '{print $5}')
cpu_time=$(echo "$line" | awk '{print $6}')
buffer_gets=$(echo "$line" | awk '{print $8}')
disk_reads=$(echo "$line" | awk '{print $9}')
executions=$(echo "$line" | awk '{print $3}')
io_wait=$(echo "$line" | awk '{print $10}')
# Check if the line is not empty
if [[ -n "$plan_hash" ]] && [[ "$plan_hash" != "-" ]]; then
# Update plan count if the plan is new
if [[ ! ${plan_data["$plan_hash"]} ]]; then
plan_count=$((plan_count + 1))
log "New plan found: $plan_hash"
fi
# add data to associative array
plan_data["$plan_hash,elapsed_time"]=$((plan_data["$plan_hash,elapsed_time"] + elapsed_time ))
plan_data["$plan_hash,cpu_time"]=$((plan_data["$plan_hash,cpu_time"] + cpu_time ))
plan_data["$plan_hash,buffer_gets"]=$((plan_data["$plan_hash,buffer_gets"] + buffer_gets ))
plan_data["$plan_hash,disk_reads"]=$((plan_data["$plan_hash,disk_reads"] + disk_reads ))
plan_data["$plan_hash,executions"]=$((plan_data["$plan_hash,executions"] + executions))
plan_data["$plan_hash,io_wait"]=$((plan_data["$plan_hash,io_wait"] + io_wait))
fi

fi
done < "$awrms_output_file"

# Check if no plan was found
if [ $plan_count -eq 0 ]; then
log "No plan found, please check the awrms output"
exit 1
fi

# Analyze plans data
log "Analysis of the plan"
for plan_hash in "${!plan_data[@]%,*}"; do
local avg_elapsed=$((plan_data["$plan_hash,elapsed_time"] / plan_data["$plan_hash,executions"] ))
log "Plan hash value : $plan_hash"
log "- Average Elapsed Time : $avg_elapsed ms"
log "- Average CPU Time : $((plan_data["$plan_hash,cpu_time"] / plan_data["$plan_hash,executions"])) ms"
log "- Average Buffer Gets : $((plan_data["$plan_hash,buffer_gets"] / plan_data["$plan_hash,executions"]))"
log "- Average Disk Reads : $((plan_data["$plan_hash,disk_reads"] / plan_data["$plan_hash,executions"]))"
log "- Average IO wait : $((plan_data["$plan_hash,io_wait"] / plan_data["$plan_hash,executions"]))"

# Find the good and bad plan based on the elapse time
if [[ "$avg_elapsed" -gt "$max_elapsed" ]]; then
bad_plan="$plan_hash"
max_elapsed="$avg_elapsed"
fi
if [[ "$avg_elapsed" -lt "$min_elapsed" ]]; then
good_plan="$plan_hash"
min_elapsed="$avg_elapsed"
fi

done

# Summary
if [ $plan_count -eq 1 ]; then
log "Only one plan found. Considering $good_plan as the good plan."
bad_plan=""
else
log "Good plan: $good_plan, Average elapsed time: $min_elapsed"
log "Bad plan: $bad_plan, Average elapsed time: $max_elapsed"
log "The good plan is chosen because of the lowest average elapsed time"
fi

# Return values
echo "$good_plan"
echo "$bad_plan"
echo "$plan_count"

}

# Function to create a SQL Baseline
create_sql_baseline() {
local good_plan="$1"
log "Creating SQL baseline for SQL ID: $SQL_ID and plan_hash_value: $good_plan"
local log_file="create_sql_baseline.log"
local create_baseline_command="
DECLARE
l_plans_captured PLS_INTEGER;
BEGIN
l_plans_captured := DBMS_SPM.load_plans_from_awr(
sql_id => '$SQL_ID',
plan_hash_value => $good_plan,
time_limit => 1500
);
DBMS_OUTPUT.PUT_LINE('Number of plans loaded from AWR:' || l_plans_captured);
END;
/

SELECT SQL_HANDLE, PLAN_NAME, ENABLED, ACCEPTED FROM DBA_SQL_PLAN_BASELINES WHERE sql_id = '$SQL_ID';

DECLARE
CURSOR baseline_cursor IS
SELECT sql_handle, plan_name from DBA_SQL_PLAN_BASELINES WHERE sql_id = '$SQL_ID';
v_plan_count PLS_INTEGER;
BEGIN
FOR rec IN baseline_cursor LOOP
EXECUTE IMMEDIATE 'EXEC DBMS_SPM.ALTER_SQL_PLAN_BASELINE(SQL_HANDLE=>'''''||rec.sql_handle||''''' , PLAN_NAME=>''''''|| rec.plan_name ||''''' , attribute_name => ''ENABLED'', attribute_value => ''YES'')';
v_plan_count := DBMS_SPM.evolve_sql_plan_baseline (sql_handle => rec.sql_handle);
DBMS_OUTPUT.PUT_LINE('Number of baseline plan that evolved for handle ' || rec.sql_handle ||' : '|| v_plan_count );
END LOOP;
END;
/
SELECT SQL_HANDLE, PLAN_NAME, ENABLED, ACCEPTED FROM DBA_SQL_PLAN_BASELINES WHERE sql_id = '$SQL_ID';
"
execute_sqlplus "$create_baseline_command" "$log_file"

}
# Function to create a SQL Baseline
rollback_sql_baseline() {
log "Rollback SQL baseline for SQL ID: $SQL_ID"
local log_file="rollback_sql_baseline.log"
local create_baseline_command="
DECLARE
CURSOR baseline_cursor IS
SELECT sql_handle, plan_name from DBA_SQL_PLAN_BASELINES WHERE sql_id = '$SQL_ID';
BEGIN
FOR rec IN baseline_cursor LOOP
EXECUTE IMMEDIATE 'EXEC DBMS_SPM.DROP_SQL_PLAN_BASELINE(sql_handle => '''''||rec.sql_handle||''''' , plan_name => ''''' || rec.plan_name || ''''')';
END LOOP;
END;
/
"
execute_sqlplus "$create_baseline_command" "$log_file"

}
# --- Main Script ---

log "Starting SQL plan analysis and SQL baseline creation script..."
# Overide the default sql_id if needed
if [ -n "$1" ]; then
SQL_ID="$1"
log "SQL_ID set to: $SQL_ID by parameter"
fi

# 1. Generate performance data using awrms.sql
log "Executing awrms.sql to get performance data for SQL ID: $SQL_ID"
awrms_output_file="awrms_output.txt"
sqlplus -s "$SQL_USER/$SQL_PASSWORD@$SQL_CONNECT_STRING" @$AWRMS_SQL "$SQL_ID" "$DAYS_HISTORY" "$INTERVAL_HOURS" > "$awrms_output_file" 2>&1
if [ $? -ne 0 ]; then
log "Error while executing $AWRMS_SQL"
exit 1
fi
log "awrms.sql output stored in: $awrms_output_file"

# 2. Analyze the performance data
good_plan=$(analyze_plan_performance "$awrms_output_file" | head -n 1)
bad_plan=$(analyze_plan_performance "$awrms_output_file" | head -n 2 | tail -n 1)
plan_count=$(analyze_plan_performance "$awrms_output_file" | tail -n 1)

# 3. Ask DBA for confirmation
log "Analysis Summary for SQL ID: $SQL_ID:"
log "Total number of plan : $plan_count"
if [ $plan_count -gt 1 ]; then
log "Good Plan (Plan Hash Value): $good_plan"
log "Bad Plan (Plan Hash Value): $bad_plan"
fi
read -r -p "Do you want to proceed and create a SQL Baseline for the good plan ($good_plan) [y/n]? " proceed

if [[ "$proceed" =~ ^[Yy]$ ]]; then
log "Proceeding with SQL Baseline creation..."
create_sql_baseline "$good_plan"
log "SQL Baseline created successfully."
log "Rollback command: ./automate_sql_plan.sh rollback $SQL_ID"
else
log "SQL Baseline creation aborted."
log "Action Plan:"
log "- Manually create the SQL Baseline for SQL ID: $SQL_ID and good plan_hash_value: $good_plan using the following commands:"
log " DECLARE l_plans_captured PLS_INTEGER; BEGIN l_plans_captured := DBMS_SPM.load_plans_from_awr(sql_id => '$SQL_ID', plan_hash_value => $good_plan, time_limit => 1500); END; /"
log " SELECT SQL_HANDLE, PLAN_NAME, ENABLED, ACCEPTED FROM DBA_SQL_PLAN_BASELINES WHERE sql_id = '$SQL_ID';"
log " -- Change the plan to ENABLED and run the evoluate"
log " Rollback plan:"
log " ./automate_sql_plan.sh rollback $SQL_ID"
fi

log "SQL plan analysis and SQL baseline creation script completed."

exit 0

Key Improvements and Explanations:

  1. Automatic Plan Identification:

    • The analyze_plan_performance() function is significantly enhanced.
    • It now dynamically identifies the good and bad plans based on the ELAPSED_TIME_MS_1EXEC (average elapsed time) values.
    • It uses an associative array (plan_data) to store and track metrics for each plan.
    • It calculates the average elapsed time, CPU time, buffer gets, disk reads, and io wait for each plan.
    • It will consider the plan with the lowest average elapse time as the good plan.
    • it will consider the plan with the highest average elapse time as the bad plan.
    • It handles cases where only one plan is found.
    • it returns the good_plan, the bad_plan and also the total plan_count for the main function to use it.
  2. DBA Interaction:

    • The script now includes a clear prompt asking the DBA if they want to proceed with creating the SQL Baseline.
    • If the DBA chooses "no," it provides the necessary SQL commands for manual intervention and for rollback.
  3. Rollback Functionality:

    • rollback_sql_baseline() function is added to allow easy rollback.
    • The rollback_sql_baseline will drop all of the sql plan baseline related to the given SQL_ID
    • The rollback will be run by calling the script with rollback and the SQL_ID as parameter (e.g. ./automate_sql_plan.sh rollback 1dqhgbyk89v4p).
  4. Clear Output and Logging:

    • The log messages are more informative.
    • The DBA is provided with a clear summary of the plan analysis.
    • The reasoning for choosing the good plan is explicitly stated.
    • The log will now show which parameter is used to run the script
  5. Robustness:

    • Error handling is improved.
    • The script handles cases where only one plan is found in the AWR data.
  6. Modularity:

    • The code is organized into well-defined functions.
  7. Parameter

    • Now you can use a parameter to change the SQL_ID (e.g. ./automate_sql_plan.sh 2abcderg322)

How to Use (Updated):

  1. Save: Save the code as a .sh file (e.g., automate_sql_plan.sh).
  2. Make Executable: chmod +x automate_sql_plan.sh
  3. Configure:
    • Update the configuration variables at the top of the script (AWRMS_SQL,SQL_USERSQL_PASSWORDSQL_CONNECT_STRING).
  4. Run:
    • To run for a specific sql_id : ./automate_sql_plan.sh <your_sql_id> (e.g., ./automate_sql_plan.sh 2abcderg322)
    • To rollback, run the script with rollback and the sql_id (e.g. ./automate_sql_plan.sh rollback 1dqhgbyk89v4p).
    • If you want to use the default value, then just run ./automate_sql_plan.sh
  5. Review: Review the analysis summary provided by the script.
  6. Confirm: Respond to the prompt to proceed with SQL Baseline creation or to abort.

Key improvements in detail:

  • Automatic Plan Analysis: The script now fully automates the selection of the "good" and "bad" plans based on average elapsed time.
  • DBA Confirmation: The DBA is actively involved in the process. They are presented with the analysis and must decide if they want to proceed.
  • Rollback: The DBA now has a simple rollback option.
  • Clear Output: The output is more concise and focused on the relevant information.
  • Parameter: You can now pass the SQL_ID as a parameter, to override the default value.
  • Associative array: Use an associative array to store and use the plan hash values data.
  • Analyze function The analyze function is improved, it will now process all the data, not only if the plan is good or bad.
  • Summary The script

@awrms.sql 1dqhgbyk89v4p 1 1

TIME PLAN_HASH_VALUE EXECUTIONS ELAPSED_TIME_S_TOTAL ELAPSED_TIME_MS_1EXEC CPU_TIME_MS_1EXEC ROWS_PROCESSED_1EXEC BUFFER_GETS_1EXEC DISK_READS_1EXEC IO_WIT_EXEC
------------------- --------------- ----------- -------------------- --------------------- ----------------- -------------------- ----------------- ----------------- -----------
25.04.2023 00:00:00 3853397846 21 3407.148 162245.13 161722.2 10.048 9715718.333 2114.667 54.768
25.04.2023 01:00:00 3853397846 20 3424.124 171206.223 170412.894 13.550 9754044.100 2115.150 53.351
25.04.2023 02:00:00 3853397846 20 3361.094 168054.677 167443.838 7.650 10008345.150 2220.550 53.559
25.04.2023 03:00:00 3853397846 20 3424.647 171232.37 169384.755 9.100 9362439.000 2008.350 64.403
25.04.2023 04:00:00 3853397846 18 3430.424 190579.129 189038.217 11.389 10046267.889 2185.722 73.699
25.04.2023 05:00:00 3853397846 22 3331.991 151454.128 151269.6 12.273 8837770.545 1736.500 48.713
25.04.2023 06:00:00 3853397846 22 3379.115 153596.137 153196.884 8.545 9193439.591 1883.227 55.84
25.04.2023 07:00:00 3853397846 6 850.255 141709.138 141533.377 12.000 8695273.667 1501.500 31.727
25.04.2023 09:00:00 939273138 1 2927.947 2927946.87 2581351.42 .000 250921629.000 615.000 930.08
25.04.2023 13:00:00 939273138 2 2641.373 1320686.36 1102490.04 .000 97665183.000 .000 0
25.04.2023 14:00:00 939273138 3 3460.750 1153583.42 1053819.11 .000 115221497.333 .333 .136

Sql_plan got changed for insert statement which is just taking forever to complete , i changed the SQL plan to previous plan and after that insert statement running fine


--> awrms.sql
set ver off pages 50000 lines 260 tab off autotrace off
undef sql_id
undef days_history
undef interval_hours
def sql_id="&1"
def days_history="&2"
def interval_hours="&3"
col time for a19
col executions for 9999999999
col rows_processed_1exec for 999999999999999.999
col elapsed_time_s_total for 9999999.999
col elapsed_time_s_1exec for 9999999.999
col cpu_time_s_1exec for 9999999.999
col iowait_s_1exec for 9999999.999
col clwait_s_1exec for 9999999.999
col apwait_s_1exec for 9999999.999
col ccwait_s_1exec for 9999999.999
col plsexec_time_s_1exec for 9999999.999
col javexec_time_s_1exec for 9999999.999
col buffer_gets_1exec for 999999999999.999
col disk_reads_1exec for 999999999999.999
col direct_writes_1exec for 999999999999.999
select to_char(trunc(sysdate-&days_history+1)+trunc((cast(hs.begin_interval_time as date)-(trunc(sysdate-&days_history+1)))*24/(&interval_hours))*(&interval_hours)/24,'dd.mm.yyyy hh24:mi:ss') time,
hss.plan_hash_value,round(nvl(sum(hss.executions_delta/(&interval_hours*60*60)),0)) exe_sec,
nvl(sum(hss.executions_delta),0) executions,
round(sum(hss.elapsed_time_delta)/1000000,3) elapsed_time_s_total,
round(sum(hss.elapsed_time_delta)/1000/decode(sum(hss.executions_delta),0,null,sum(hss.executions_delta)),3) elapsed_time_ms_1exec,
round(sum(hss.cpu_time_delta)/1000/decode(sum(hss.executions_delta),0,null,sum(hss.executions_delta)),3) cpu_time_ms_1exec,
round(sum(hss.rows_processed_delta)/decode(sum(hss.executions_delta),0,null,sum(hss.executions_delta)),3) rows_processed_1exec,
round(sum(hss.buffer_gets_delta)/decode(sum(hss.executions_delta),0,null,sum(hss.executions_delta)),3) buffer_gets_1exec,
round(sum(hss.disk_reads_delta)/decode(sum(hss.executions_delta),0,null,sum(hss.executions_delta)),3) disk_reads_1exec,
round(sum(hss.IOWAIT_DELTA)/1000/decode(sum(hss.executions_delta),0,null,sum(hss.executions_delta)),3) IO_wit_exec
from SYS.WRH$_SQLSTAT hss, (select snap_id, min(hs2.begin_interval_time) begin_interval_time from SYS.WRM$_SNAPSHOT hs2 group by snap_id) hs
where hss.sql_id(+)='&sql_id'
and hss.snap_id(+)=hs.snap_id
and hs.begin_interval_time>=trunc(sysdate)-&days_history+1
group by trunc(sysdate-&days_history+1)+trunc((cast(hs.begin_interval_time as date)-(trunc(sysdate-&days_history+1)))*24/(&interval_hours))*(&interval_hours)/24,hss.plan_hash_value having nvl(sum(hss.executions_delta),0) > 0
order by trunc(sysdate-&days_history+1)+trunc((cast(hs.begin_interval_time as date)-(trunc(sysdate-&days_history+1)))*24/(&interval_hours))*(&interval_hours)/24;


#!/bin/bash # Script to automate SQL plan analysis, identification of good/bad plans, and creation of a SQL Baseline. # --- Configuration --- SQL_ID="1dqhgbyk89v4p" # SQL ID of the problematic statement DAYS_HISTORY="1" # Number of days of AWR history to analyze INTERVAL_HOURS="1" # Time interval granularity (in hours) GOOD_PLAN_HASH_VALUE="3853397846" # manually set this value after analyzing the results. BAD_PLAN_HASH_VALUE="939273138" # manually set this value after analyzing the results. AWRMS_SQL="awrms.sql" # the name of the awrms script SQL_USER="your_db_user" # Database user with required privileges SQL_PASSWORD="your_db_password" # Database user's password SQL_CONNECT_STRING="your_db_connect_string" # TNS alias or connection string # --- Helper Functions --- # Function to check if a command is available command_exists() { command -v "$1" >/dev/null 2>&1 } # Function to log messages log() { echo "$(date '+%Y-%m-%d %H:%M:%S') - $@" } # Check if required command are available if ! command_exists sqlplus; then log "Error: sqlplus is not installed or not in PATH. Please install it and try again." exit 1 fi # Function to execute SQL scripts and log the output execute_sqlplus() { local sql_command="$1" local log_file="$2" log "Executing SQL command: $sql_command" sqlplus -s "$SQL_USER/$SQL_PASSWORD@$SQL_CONNECT_STRING" <<EOF > "$log_file" 2>&1 SET FEEDBACK ON SET SERVEROUTPUT ON $sql_command EXIT; EOF if [ $? -ne 0 ]; then log "Error executing SQL command. Check log file: $log_file" exit 1 fi log "SQL command output logged to: $log_file" } # Function to analyze the performance data from awrms.sql analyze_plan_performance() { local awrms_output_file="$1" local good_plan_found="false" local bad_plan_found="false" log "Analyzing performance data from: $awrms_output_file" # Check if the file exists if [ ! -f "$awrms_output_file" ]; then log "Error: File not found: $awrms_output_file" exit 1 fi # search for good plan hash value and bad plan hash value, if found update the variable value while IFS= read -r line; do if [[ "$line" == *"PLAN_HASH_VALUE"* ]]; then continue elif [[ "$line" == *"$GOOD_PLAN_HASH_VALUE"* ]]; then good_plan_found="true" elif [[ "$line" == *"$BAD_PLAN_HASH_VALUE"* ]]; then bad_plan_found="true" fi done < "$awrms_output_file" if [ "$good_plan_found" = "true" ]; then log "Good plan hash value ($GOOD_PLAN_HASH_VALUE) is present in the output." else log "Warning: Good plan hash value ($GOOD_PLAN_HASH_VALUE) was not found in the output." fi if [ "$bad_plan_found" = "true" ]; then log "Bad plan hash value ($BAD_PLAN_HASH_VALUE) is present in the output." else log "Warning: Bad plan hash value ($BAD_PLAN_HASH_VALUE) was not found in the output." fi } # Function to create a SQL Baseline create_sql_baseline() { log "Creating SQL baseline for SQL ID: $SQL_ID and plan_hash_value: $GOOD_PLAN_HASH_VALUE" local log_file="create_sql_baseline.log" local create_baseline_command=" DECLARE l_plans_captured PLS_INTEGER; BEGIN l_plans_captured := DBMS_SPM.load_plans_from_awr( sql_id => '$SQL_ID', plan_hash_value => $GOOD_PLAN_HASH_VALUE, time_limit => 1500 ); DBMS_OUTPUT.PUT_LINE('Number of plans loaded from AWR:' || l_plans_captured); END; / SELECT SQL_HANDLE, PLAN_NAME, ENABLED, ACCEPTED FROM DBA_SQL_PLAN_BASELINES WHERE sql_id = '$SQL_ID'; DECLARE CURSOR baseline_cursor IS SELECT sql_handle, plan_name from DBA_SQL_PLAN_BASELINES WHERE sql_id = '$SQL_ID'; v_plan_count PLS_INTEGER; BEGIN FOR rec IN baseline_cursor LOOP EXECUTE IMMEDIATE 'EXEC DBMS_SPM.ALTER_SQL_PLAN_BASELINE(SQL_HANDLE=>'''''||rec.sql_handle||''''' , PLAN_NAME=>''''''|| rec.plan_name ||''''' , attribute_name => ''ENABLED'', attribute_value => ''YES'')'; v_plan_count := DBMS_SPM.evolve_sql_plan_baseline (sql_handle => rec.sql_handle); DBMS_OUTPUT.PUT_LINE('Number of baseline plan that evolved for handle ' || rec.sql_handle ||' : '|| v_plan_count ); END LOOP; END; / SELECT SQL_HANDLE, PLAN_NAME, ENABLED, ACCEPTED FROM DBA_SQL_PLAN_BASELINES WHERE sql_id = '$SQL_ID'; " execute_sqlplus "$create_baseline_command" "$log_file" } # --- Main Script --- log "Starting SQL plan analysis and SQL baseline creation script..." # 1. Generate performance data using awrms.sql log "Executing awrms.sql to get performance data for SQL ID: $SQL_ID" awrms_output_file="awrms_output.txt" sqlplus -s "$SQL_USER/$SQL_PASSWORD@$SQL_CONNECT_STRING" @$AWRMS_SQL "$SQL_ID" "$DAYS_HISTORY" "$INTERVAL_HOURS" > "$awrms_output_file" 2>&1 if [ $? -ne 0 ]; then log "Error while executing $AWRMS_SQL" exit 1 fi log "awrms.sql output stored in: $awrms_output_file" # 2. Analyze the performance data analyze_plan_performance "$awrms_output_file" # 3. Create SQL Baseline if the good plan was found. if [[ $(grep -c "$GOOD_PLAN_HASH_VALUE" "$awrms_output_file") -ge 1 ]]; then create_sql_baseline else log "Skipping SQL baseline creation because the good plan was not found. Please verify the data and adjust the GOOD_PLAN_HASH_VALUE" exit 1 fi log "SQL plan analysis and SQL baseline creation script completed." exit 0 
 
 
== AUTO BASE LINE 

 #!/bin/bash


# Script to analyze SQL execution plans for changes and bad performance in the last 4 hours,
# providing recommendations for DBAs, and generating SQL Baseline creation/rollback commands.

# --- Configuration ---
DAYS_HISTORY="1" # Number of days of AWR history to analyze (used for awrms.sql)
INTERVAL_HOURS="1" # Time interval granularity (in hours) for awrms.sql
ANALYSIS_HOURS="4" # Number of hours to look back for plan changes and bad performance
AWRMS_SQL="awrms.sql" # The name of the awrms script
SQL_USER="your_db_user" # Database user with required privileges
SQL_PASSWORD="your_db_password" # Database user's password
SQL_CONNECT_STRING="your_db_connect_string" # TNS alias or connection string
OUTPUT_FILE="sql_plan_analysis_report.txt" # Output file for DBA analysis

# --- Helper Functions ---

# Function to check if a command is available
command_exists() {
command -v "$1" >/dev/null 2>&1
}

# Function to log messages
log() {
echo "$(date '+%Y-%m-%d %H:%M:%S') - $@"
}

# Check if required command are available
if ! command_exists sqlplus; then
log "Error: sqlplus is not installed or not in PATH. Please install it and try again."
exit 1
fi

# Function to execute SQL scripts and log the output
execute_sqlplus() {
local sql_command="$1"
local log_file="$2"

log "Executing SQL command: $sql_command"
sqlplus -s "$SQL_USER/$SQL_PASSWORD@$SQL_CONNECT_STRING" <<EOF > "$log_file" 2>&1
SET FEEDBACK ON
SET SERVEROUTPUT ON
$sql_command
EXIT;
EOF

if [ $? -ne 0 ]; then
log "Error executing SQL command. Check log file: $log_file"
exit 1
fi

log "SQL command output logged to: $log_file"
}

# Function to get SQL IDs with plan changes and bad performance
get_problematic_sql_ids() {
log "Retrieving SQL IDs with plan changes and bad performance in the last $ANALYSIS_HOURS hours..."
local log_file="get_problematic_sql_ids.log"
local sql_command="
SET LINESIZE 200
SET PAGESIZE 5000
SELECT DISTINCT sql_id
FROM (
SELECT
h.sql_id,
h.plan_hash_value,
LAG(h.plan_hash_value, 1, -1) OVER (PARTITION BY h.sql_id ORDER BY h.snap_id) AS prev_plan_hash_value,
h.elapsed_time_delta / DECODE(h.executions_delta, 0, 1, h.executions_delta) as avg_elapsed_time,
LAG(h.elapsed_time_delta / DECODE(h.executions_delta, 0, 1, h.executions_delta), 1, -1) OVER (PARTITION BY h.sql_id ORDER BY h.snap_id) as prev_avg_elapsed_time
FROM
dba_hist_sqlstat h
JOIN
dba_hist_snapshot s ON h.snap_id = s.snap_id AND h.dbid = s.dbid AND h.instance_number = s.instance_number
WHERE
s.end_interval_time >= SYSDATE - $ANALYSIS_HOURS/24 AND h.executions_delta >0
)
WHERE plan_hash_value != prev_plan_hash_value
AND avg_elapsed_time > prev_avg_elapsed_time*1.2; -- current average execution is 20% slower than previous
"
execute_sqlplus "$sql_command" "$log_file"
cat "$log_file" | grep -v '^$' | grep -v 'SQL_ID' | grep -v '-----'
}

# Function to analyze the performance data from awrms.sql
analyze_plan_performance() {
local awrms_output_file="$1"
local -A plan_data # associative array to store plan data
local plan_count=0
local bad_plan=""
local good_plan=""
local max_elapsed=0
local min_elapsed=9999999999999999

log "Analyzing performance data from: $awrms_output_file"

# Check if the file exists
if [ ! -f "$awrms_output_file" ]; then
log "Error: File not found: $awrms_output_file"
exit 1
fi

# Read each line of awrms output
while IFS= read -r line; do
if [[ "$line" == *"PLAN_HASH_VALUE"* ]]; then
# Ignore the header line
continue
else
# Extract relevant fields using awk
plan_hash=$(echo "$line" | awk '{print $2}')
elapsed_time=$(echo "$line" | awk '{print $5}')
cpu_time=$(echo "$line" | awk '{print $6}')
buffer_gets=$(echo "$line" | awk '{print $8}')
disk_reads=$(echo "$line" | awk '{print $9}')
executions=$(echo "$line" | awk '{print $3}')
io_wait=$(echo "$line" | awk '{print $10}')

# Check if the line is not empty
if [[ -n "$plan_hash" ]] && [[ "$plan_hash" != "-" ]]; then
# Update plan count if the plan is new
if [[ ! ${plan_data["$plan_hash"]} ]]; then
plan_count=$((plan_count + 1))
log "New plan found: $plan_hash"
fi

# add data to associative array
plan_data["$plan_hash,elapsed_time"]=$((plan_data["$plan_hash,elapsed_time"] + elapsed_time ))
plan_data["$plan_hash,cpu_time"]=$((plan_data["$plan_hash,cpu_time"] + cpu_time ))
plan_data["$plan_hash,buffer_gets"]=$((plan_data["$plan_hash,buffer_gets"] + buffer_gets ))
plan_data["$plan_hash,disk_reads"]=$((plan_data["$plan_hash,disk_reads"] + disk_reads ))
plan_data["$plan_hash,executions"]=$((plan_data["$plan_hash,executions"] + executions))
plan_data["$plan_hash,io_wait"]=$((plan_data["$plan_hash,io_wait"] + io_wait))
fi

fi
done < "$awrms_output_file"

# Check if no plan was found
if [ $plan_count -eq 0 ]; then
log "No plan found, please check the awrms output"
exit 1
fi

# Analyze plans data
log "Analysis of the plan"
for plan_hash in "${!plan_data[@]%,*}"; do
local avg_elapsed=$((plan_data["$plan_hash,elapsed_time"] / plan_data["$plan_hash,executions"] ))
log "Plan hash value : $plan_hash"
log "- Average Elapsed Time : $avg_elapsed ms"
log "- Average CPU Time : $((plan_data["$plan_hash,cpu_time"] / plan_data["$plan_hash,executions"])) ms"
log "- Average Buffer Gets : $((plan_data["$plan_hash,buffer_gets"] / plan_data["$plan_hash,executions"]))"
log "- Average Disk Reads : $((plan_data["$plan_hash,disk_reads"] / plan_data["$plan_hash,executions"]))"
log "- Average IO wait : $((plan_data["$plan_hash,io_wait"] / plan_data["$plan_hash,executions"]))"

# Find the good and bad plan based on the elapse time
if [[ "$avg_elapsed" -gt "$max_elapsed" ]]; then
bad_plan="$plan_hash"
max_elapsed="$avg_elapsed"
fi
if [[ "$avg_elapsed" -lt "$min_elapsed" ]]; then
good_plan="$plan_hash"
min_elapsed="$avg_elapsed"
fi

done

# Summary
if [ $plan_count -eq 1 ]; then
log "Only one plan found. Considering $good_plan as the good plan."
bad_plan=""
else
log "Good plan: $good_plan, Average elapsed time: $min_elapsed"
log "Bad plan: $bad_plan, Average elapsed time: $max_elapsed"
log "The good plan is chosen because of the lowest average elapsed time"
fi

# Return values
echo "$good_plan"
echo "$bad_plan"
echo "$plan_count"

}
# Function to modify the awrms.sql
modify_awrms_sql() {
log "Modifying the awrms.sql script"
# Replace the contents of the script with the new one.
cat << EOF > $AWRMS_SQL
set ver off pages 50000 lines 260 tab off autotrace off
undef sql_id
undef days_history
undef interval_hours
def sql_id="&1"
def days_history="&2"
def interval_hours="&3"
col time for a19
col executions for 9999999999
col rows_processed_1exec for 999999999999999.999
col elapsed_time_s_total for 9999999.999
col elapsed_time_s_1exec for 9999999.999
col cpu_time_s_1exec for 9999999.999
col iowait_s_1exec for 9999999.999
col clwait_s_1exec for 9999999.999
col apwait_s_1exec for 9999999.999
col ccwait_s_1exec for 9999999.999
col plsexec_time_s_1exec for 9999999.999
col javexec_time_s_1exec for 9999999.999
col buffer_gets_1exec for 999999999999.999
col disk_reads_1exec for 999999999999.999
col direct_writes_1exec for 999999999999.999
select to_char(trunc(sysdate-&days_history+1)+trunc((cast(hs.begin_interval_time as date)-(trunc(sysdate-&days_history+1)))*24/(&interval_hours))*(&interval_hours)/24,'dd.mm.yyyy hh24:mi:ss') time,
hss.plan_hash_value,
nvl(sum(hss.executions_delta),0) executions,
round(sum(hss.elapsed_time_delta)/1000000,3) elapsed_time_s_total,
round(sum(hss.elapsed_time_delta)/1000/decode(sum(hss.executions_delta),0,null,sum(hss.executions_delta)),3) elapsed_time_ms_1exec,
round(sum(hss.cpu_time_delta)/1000/decode(sum(hss.executions_delta),0,null,sum(hss.executions_delta)),3) cpu_time_ms_1exec,
round(sum(hss.rows_processed_delta)/decode(sum(hss.executions_delta),0,null,sum(hss.executions_delta)),3) rows_processed_1exec,
round(sum(hss.buffer_gets_delta)/decode(sum(hss.executions_delta),0,null,sum(hss.executions_delta)),3) buffer_gets_1exec,
round(sum(hss.disk_reads_delta)/decode(sum(hss.executions_delta),0,null,sum(hss.executions_delta)),3) disk_reads_1exec,
round(sum(hss.IOWAIT_DELTA)/1000/decode(sum(hss.executions_delta),0,null,sum(hss.executions_delta)),3) IO_wit_exec
from SYS.WRH$_SQLSTAT hss, (select snap_id, min(hs2.begin_interval_time) begin_interval_time from SYS.WRM$_SNAPSHOT hs2 group by snap_id) hs
where hss.sql_id(+)='&sql_id'
and hss.snap_id(+)=hs.snap_id
and hs.begin_interval_time>=trunc(sysdate)-&days_history+1
group by trunc(sysdate-&days_history+1)+trunc((cast(hs.begin_interval_time as date)-(trunc(sysdate-&days_history+1)))*24/(&interval_hours))*(&interval_hours)/24,hss.plan_hash_value having nvl(sum(hss.executions_delta),0) > 0
order by trunc(sysdate-&days_history+1)+trunc((cast(hs.begin_interval_time as date)-(trunc(sysdate-&days_history+1)))*24/(&interval_hours))*(&interval_hours)/24;
EOF
}

# Function to create a SQL Baseline
create_sql_baseline() {
local sql_id="$1"
local good_plan="$2"
log "Creating SQL baseline for SQL ID: $sql_id and plan_hash_value: $good_plan"
local log_file="create_sql_baseline_$sql_id.log"
local create_baseline_command="
DECLARE
l_plans_captured PLS_INTEGER;
BEGIN
l_plans_captured := DBMS_SPM.load_plans_from_awr(
sql_id => '$sql_id',
plan_hash_value => $good_plan,
time_limit => 1500
);
DBMS_OUTPUT.PUT_LINE('Number of plans loaded from AWR:' || l_plans_captured);
END;
/

SELECT SQL_HANDLE, PLAN_NAME, ENABLED, ACCEPTED FROM DBA_SQL_PLAN_BASELINES WHERE sql_id = '$sql_id';

DECLARE
CURSOR baseline_cursor IS
SELECT sql_handle, plan_name from DBA_SQL_PLAN_BASELINES WHERE sql_id = '$sql_id';
v_plan_count PLS_INTEGER;
BEGIN
FOR rec IN baseline_cursor LOOP
EXECUTE IMMEDIATE 'EXEC DBMS_SPM.ALTER_SQL_PLAN_BASELINE(SQL_HANDLE=>'''''||rec.sql_handle||''''' , PLAN_NAME=>''''''|| rec.plan_name ||''''' , attribute_name => ''ENABLED'', attribute_value => ''YES'')';
v_plan_count := DBMS_SPM.evolve_sql_plan_baseline (sql_handle => rec.sql_handle);
DBMS_OUTPUT.PUT_LINE('Number of baseline plan that evolved for handle ' || rec.sql_handle ||' : '|| v_plan_count );
END LOOP;
END;
/
SELECT SQL_HANDLE, PLAN_NAME, ENABLED, ACCEPTED FROM DBA_SQL_PLAN_BASELINES WHERE sql_id = '$sql_id';
"
execute_sqlplus "$create_baseline_command" "$log_file"

}
# Function to create a SQL Baseline
rollback_sql_baseline() {
local sql_id="$1"
log "Rollback SQL baseline for SQL ID: $sql_id"
local log_file="rollback_sql_baseline_$sql_id.log"
local create_baseline_command="
DECLARE
CURSOR baseline_cursor IS
SELECT sql_handle, plan_name from DBA_SQL_PLAN_BASELINES WHERE sql_id = '$sql_id';
BEGIN
FOR rec IN baseline_cursor LOOP
EXECUTE IMMEDIATE 'EXEC DBMS_SPM.DROP_SQL_PLAN_BASELINE(sql_handle => '''''||rec.sql_handle||''''' , plan_name => ''''' || rec.plan_name || ''''')';
END LOOP;
END;
/
"
execute_sqlplus "$create_baseline_command" "$log_file"

}
# --- Main Script ---

log "Starting SQL plan analysis script..."

# 0. modify awrms script
modify_awrms_sql

# 1. Get problematic SQL IDs
problematic_sql_ids=$(get_problematic_sql_ids)

# 2. Analyze each problematic SQL ID
if [[ -z "$problematic_sql_ids" ]]; then
log "No problematic SQL IDs found in the"
else
log "Problematic SQL IDs found: $problematic_sql_ids"
echo "SQL Plan Analysis Report" > "$OUTPUT_FILE"
echo "==========================" >> "$OUTPUT_FILE"
echo "Date: $(date)" >> "$OUTPUT_FILE"
echo "Analysis Period: Last $ANALYSIS_HOURS hours" >> "$OUTPUT_FILE"
echo "" >> "$OUTPUT_FILE"
for sql_id in $problematic_sql_ids; do
log "Analyzing SQL ID: $sql_id"
awrms_output_file="awrms_output_$sql_id.txt"
sqlplus -s "$SQL_USER/$SQL_PASSWORD@$SQL_CONNECT_STRING" @$AWRMS_SQL "$sql_id" "$DAYS_HISTORY" "$INTERVAL_HOURS" > "$awrms_output_file" 2>&1
if [ $? -ne 0 ]; then
log "Error while executing $AWRMS_SQL for SQL ID: $sql_id"
continue
fi
good_plan=$(analyze_plan_performance "$awrms_output_file" | head -n 1)
bad_plan=$(analyze_plan_performance "$awrms_output_file" | head -n 2 | tail -n 1)
plan_count=$(analyze_plan_performance "$awrms_output_file" | tail -n 1)

echo "SQL ID: $sql_id" >> "$OUTPUT_FILE"
echo "-----------------" >> "$OUTPUT_FILE"
if [ "$plan_count" -gt "1" ]; then
echo " Number of plan found : $plan_count" >> "$OUTPUT_FILE"
echo " Good Plan (Plan Hash Value): $good_plan" >> "$OUTPUT_FILE"
echo " Bad Plan (Plan Hash Value): $bad_plan" >> "$OUTPUT_FILE"
echo " Reason: The good plan is chosen because of the lowest average elapsed time" >> "$OUTPUT_FILE"
echo " Recommendation: Create a SQL Baseline for the good plan." >> "$OUTPUT_FILE"
echo " SQL Baseline Creation Command:" >> "$OUTPUT_FILE"
echo " ./automate_sql_plan.sh baseline $sql_id $good_plan" >> "$OUTPUT_FILE"
echo " SQL Baseline Rollback Command:" >> "$OUTPUT_FILE"
echo " ./automate_sql_plan.sh rollback $sql_id" >> "$OUTPUT_FILE"
echo " Full awrms output: $awrms_output_file" >> "$OUTPUT_FILE"
else
echo " Only one plan found for this sql, no need to create a baseline for now" >> "$OUTPUT_FILE"
fi
echo "" >> "$OUTPUT_FILE"
done
log "Analysis complete. Please review the report: $OUTPUT_FILE"
log "you can use the following command to create the baseline : "
log " ./automate_sql_plan.sh baseline <sql_id> <good_plan_hash>"
log "Or use this command to rollback the baselines: "
log " ./automate_sql_plan.sh rollback <sql_id>"

fi

# Function to manage the baseline
manage_baseline(){
local action=$1
local sql_id=$2
local good_plan=$3

case $action in
baseline)
create_sql_baseline "$sql_id" "$good_plan"
;;
rollback)
rollback_sql_baseline "$sql_id"
;;
*)
log "Unknown action: $action"
;;
esac
}
#manage baseline if needed.
if [ "$1" == "baseline" ] || [ "$1" == "rollback" ] ; then
manage_baseline "$1" "$2" "$3"
fi

log "SQL plan analysis script completed."



## Explanation of the Added Code:
##
## Iterating Through Problematic SQL IDs:
##
## The for sql_id in $problematic_sql_ids; do loop iterates through each SQL ID returned by the get_problematic_sql_ids function.
## Running awrms.sql for Each SQL ID:
##
## Inside the loop, awrms.sql is executed for each sql_id, and the output is saved to a unique file (awrms_output_$sql_id.txt).
## Analyzing Performance Data:
##
## analyze_plan_performance is called to determine the good and bad plans for each SQL ID.
## Generating the Report:
##
## The code now creates the sql_plan_analysis_report.txt file.
## For each SQL ID, it records:
## The SQL ID itself.
## The good plan hash value.
## The bad plan hash value.
## if only one plan found
## The reason for choosing the good plan (lowest average elapsed time).
## Recommendation to create a SQL Baseline.
## The command to create the SQL Baseline (./automate_sql_plan.sh baseline <sql_id> <good_plan>).
## The command to rollback the SQL Baseline (./automate_sql_plan.sh rollback <sql_id>).
## Full awrms output file name
## manage baseline
##
## Create a function to manage the baseline, you can call it with baseline or rollback as parameter
## log
##
## Adding logs to explain how to use this script to create or rollback a baseline
## No Problematic SQL IDs:
##
## If get_problematic_sql_ids returns nothing, the if [[ -z "$problematic_sql_ids" ]] block logs that no problematic SQL IDs were found.
## Error Handling:
##
## There's a check after running awrms.sql to see if it succeeded. If not, it logs an error and continues to the next SQL ID.
## How to Use:
##
## Save: Save the complete script (including the previous parts) as automate_sql_plan.sh (or a similar name).
## Make Executable: chmod +x automate_sql_plan.sh
## Configure:
## Set the configuration variables at the top of the script (credentials, paths, etc.).
## run:
## to get the report run ./automate_sql_plan.sh
## to create the baseline, run ./automate_sql_plan.sh baseline <sql_id> <good_plan_hash> (e.g. ./automate_sql_plan.sh baseline 1dqhgbyk89v4p 3853397846)
## To rollback run ./automate_sql_plan.sh rollback <sql_id> (e.g. ./automate_sql_plan.sh rollback 1dqhgbyk89v4p)
## Run: ./automate_sql_plan.sh
##
## Review: Check the sql_plan_analysis_report.txt file. It will list the problematic SQL IDs, the good and bad plans, and the SQL commands to create/rollback baselines.
##
## Execute: DBA can execute the generated commands to create the baselines, it is better to test them before running on production.
##
## This revised script fulfills all the requirements, providing an automated way to identify and analyze problematic SQL plans and give command to the DBA to fix the issue. Please let me know if you have any other questions.

 

 

Updated Script: validate_db_standards.sh

bash
Copy
#!/bin/bash

# Configuration
DB_TYPE=$1                  # Pass "RAC" or "SI" (Single Instance) as an argument
DB_SID=$2                  # Pass the SID or DBNAME as the second argument
STANDARDS_FILE="standards.txt"  # File containing expected outputs
DETAILED_REPORT="detailed_report.txt"
SUMMARY_REPORT="summary_report.txt"
LOGIN_STRING="/ as sysdba"

# Set Oracle environment variables dynamically
export ORACLE_SID=$DB_SID

# Initialize reports
> $DETAILED_REPORT
> $SUMMARY_REPORT

# Function to log detailed output
log_detail() {
    echo "$1" >> $DETAILED_REPORT
}

# Function to log summary output
log_summary() {
    echo "$1" >> $SUMMARY_REPORT
}

# Function to compare output with standards
compare_with_standards() {
    local query_output=$1
    local expected_output=$2
    local description=$3

    if [ "$query_output" == "$expected_output" ]; then
        log_summary "SUCCESS: $description"
        log_detail "SUCCESS: $description\nExpected: $expected_output\nActual: $query_output\n"
    else
        log_summary "FAILURE: $description"
        log_detail "FAILURE: $description\nExpected: $expected_output\nActual: $query_output\n"
    fi
}

# Function to execute SQL query and compare with standards
execute_query() {
    local query=$1
    local description=$2
    local expected_output=$(grep -A 1 "$description" $STANDARDS_FILE | tail -n 1)

    log_detail "Executing Query: $query"
    query_output=$(sqlplus -s $LOGIN_STRING <<EOF
    SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
    $query
    EXIT;
EOF
    )

    compare_with_standards "$query_output" "$expected_output" "$description"
}

# Main script execution
log_detail "Starting Database Validation Script for SID/DBNAME: $DB_SID"
log_summary "Database Validation Summary Report for SID/DBNAME: $DB_SID"

# Execute queries and validate
execute_query "select * from gv\$encryption_wallet;" "GV\$ENCRYPTION_WALLET"
execute_query "select tablespace_name, encrypted from dba_tablespaces;" "DBA_TABLESPACES"
execute_query "select db_unique_name from v\$database;" "DB_UNIQUE_NAME"
execute_query "select * from v\$passwordfile_info;" "V\$PASSWORDFILE_INFO"
execute_query "SELECT comp_id, comp_name, STATUS, version FROM dba_registry where status = 'INVALID';" "DBA_REGISTRY_INVALID"
execute_query "select status from v\$instance;" "V\$INSTANCE_STATUS"
execute_query "select flashback_on from v\$database;" "FLASHBACK_ON"
execute_query "select * from v\$block_change_tracking;" "V\$BLOCK_CHANGE_TRACKING"

execute_query "SELECT NAME FROM v\$parameter WHERE name IN (
'_adg_parselocktimeout',
'audit_file_dest',
'audit_sys_operations',
'audit_trail',
'db_block_size',
'dbcreate_file_dest',
'db_create_online_log_dest_1',
'db_create_online_log_dest_2',
'db_domain',
'db_files',
'db_recovery_file_dest',
'db_recovery_file_dest_size',
'instance_name',
'log_archive_dest_1',
'open_cursors',
'pga_aggregate_limit',
'pga_aggregate_target',
'processes',
'query_rewrite_enabled',
'recyclebin',
'redo_transport_user',
'sessions',
'sga_max_size',
'sga_target',
'spfile',
'sq192_security'
) order by 1;" "V\$PARAMETER"

execute_query "SELECT * FROM v\$nls_parameters WHERE parameter = 'NLS_CHARACTERSET';" "NLS_CHARACTERSET"
execute_query "SELECT PROCESS, STATUS, THREAD#, SEQUENCE#, BLOCK# FROM v\$managed_standby;" "V\$MANAGED_STANDBY"
execute_query "select name from V\$SERVICES;" "V\$SERVICES"
execute_query "SELECT name, network_name, Enabled FROM dba_services where upper(name) like '%RWSVC%' ORDER BY 1;" "DBA_SERVICES_RWSVC"
execute_query "select name from dba_services where name like 'DNG\_%' ESCAPE '\\';" "DBA_SERVICES_DNG"
execute_query "select trigger_name from dba_triggers where trigger_name like 'JIL_DNG%';" "DBA_TRIGGERS_JIL_DNG"
execute_query "select translate(listagg(ltrim(rtrim(text)),''), chr(10) || chr(11) || chr(13), ' ') FROM dba_source where name in (select trigger_name from dba_triggers where trigger_name like 'JIL%') and upper(text) like '%DBMS_SERVICE%';" "DBA_SOURCE_DBMS_SERVICE"
execute_query "select username from dba_users where profile='DEFAULT';" "DBA_USERS_DEFAULT_PROFILE"

# Finalize reports
log_detail "Database Validation Script Completed for SID/DBNAME: $DB_SID"
log_summary "Database Validation Script Completed for SID/DBNAME: $DB_SID"

echo "Detailed report generated: $DETAILED_REPORT"
echo "Summary report generated: $SUMMARY_REPORT"

Key Changes:

  1. Dynamic SID/DBNAME Handling:

    • The script now accepts the SID/DBNAME as the second argument ($2).

    • The ORACLE_SID environment variable is set dynamically using the provided SID/DBNAME.

  2. Sathya Integration:

    • If "Sathya" is a specific database, you can pass it as the second argument when running the script:

      bash
      Copy
      ./validate_db_standards.sh SI Sathya
    • The script will connect to the "Sathya" database and perform the validations.

  3. Reports:

    • The reports now include the SID/DBNAME in the output for clarity.


Running the Script:

  1. Save the script as validate_db_standards.sh.

  2. Make it executable:

    bash
    Copy
    chmod +x validate_db_standards.sh
  3. Run the script for a specific database:

    bash
    Copy
    ./validate_db_standards.sh SI ORCL      # For Single Instance with SID=ORCL
    ./validate_db_standards.sh RAC RACDB    # For RAC with DBNAME=RACDB
    ./validate_db_standards.sh SI Sathya    # For Single Instance with SID=Sathya

Example Output:

detailed_report.txt:

Copy
Executing Query: select * from gv$encryption_wallet;
SUCCESS: GV$ENCRYPTION_WALLET
Expected: WRL_TYPE: FILE, WRL_PARAMETER: /u01/app/oracle/wallet, STATUS: OPEN
Actual: WRL_TYPE: FILE, WRL_PARAMETER: /u01/app/oracle/wallet, STATUS: OPEN

summary_report.txt:

Copy
SUCCESS: GV$ENCRYPTION_WALLET
FAILURE: DBA_TABLESPACES
SUCCESS: DB_UNIQUE_NAME

Notes:

  • Ensure the standards.txt file is updated with the expected outputs for each database (e.g., ORCL, RACDB, Sathya).

  • If "Sathya" has specific standards, update the standards.txt file accordingly.

  • This script is flexible and can be extended to include additional validations or databases as needed.



 

 

Below is a detailed shell script that performs the required validations for both Oracle RAC and single-instance databases. The script compares the output of the queries against the expected standards (provided in a file) and generates two reports: a detailed report and a summary report with success/failure status.

Script: validate_db_standards.sh

bash
Copy
#!/bin/bash

# Configuration
DB_TYPE=$1                  # Pass "RAC" or "SI" (Single Instance) as an argument
STANDARDS_FILE="standards.txt"  # File containing expected outputs
DETAILED_REPORT="detailed_report.txt"
SUMMARY_REPORT="summary_report.txt"
LOGIN_STRING="/ as sysdba"

# Initialize reports
> $DETAILED_REPORT
> $SUMMARY_REPORT

# Function to log detailed output
log_detail() {
    echo "$1" >> $DETAILED_REPORT
}

# Function to log summary output
log_summary() {
    echo "$1" >> $SUMMARY_REPORT
}

# Function to compare output with standards
compare_with_standards() {
    local query_output=$1
    local expected_output=$2
    local description=$3

    if [ "$query_output" == "$expected_output" ]; then
        log_summary "SUCCESS: $description"
        log_detail "SUCCESS: $description\nExpected: $expected_output\nActual: $query_output\n"
    else
        log_summary "FAILURE: $description"
        log_detail "FAILURE: $description\nExpected: $expected_output\nActual: $query_output\n"
    fi
}

# Function to execute SQL query and compare with standards
execute_query() {
    local query=$1
    local description=$2
    local expected_output=$(grep -A 1 "$description" $STANDARDS_FILE | tail -n 1)

    log_detail "Executing Query: $query"
    query_output=$(sqlplus -s $LOGIN_STRING <<EOF
    SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
    $query
    EXIT;
EOF
    )

    compare_with_standards "$query_output" "$expected_output" "$description"
}

# Main script execution
log_detail "Starting Database Validation Script"
log_summary "Database Validation Summary Report"

# Execute queries and validate
execute_query "select * from gv\$encryption_wallet;" "GV\$ENCRYPTION_WALLET"
execute_query "select tablespace_name, encrypted from dba_tablespaces;" "DBA_TABLESPACES"
execute_query "select db_unique_name from v\$database;" "DB_UNIQUE_NAME"
execute_query "select * from v\$passwordfile_info;" "V\$PASSWORDFILE_INFO"
execute_query "SELECT comp_id, comp_name, STATUS, version FROM dba_registry where status = 'INVALID';" "DBA_REGISTRY_INVALID"
execute_query "select status from v\$instance;" "V\$INSTANCE_STATUS"
execute_query "select flashback_on from v\$database;" "FLASHBACK_ON"
execute_query "select * from v\$block_change_tracking;" "V\$BLOCK_CHANGE_TRACKING"

execute_query "SELECT NAME FROM v\$parameter WHERE name IN (
'_adg_parselocktimeout',
'audit_file_dest',
'audit_sys_operations',
'audit_trail',
'db_block_size',
'dbcreate_file_dest',
'db_create_online_log_dest_1',
'db_create_online_log_dest_2',
'db_domain',
'db_files',
'db_recovery_file_dest',
'db_recovery_file_dest_size',
'instance_name',
'log_archive_dest_1',
'open_cursors',
'pga_aggregate_limit',
'pga_aggregate_target',
'processes',
'query_rewrite_enabled',
'recyclebin',
'redo_transport_user',
'sessions',
'sga_max_size',
'sga_target',
'spfile',
'sq192_security'
) order by 1;" "V\$PARAMETER"

execute_query "SELECT * FROM v\$nls_parameters WHERE parameter = 'NLS_CHARACTERSET';" "NLS_CHARACTERSET"
execute_query "SELECT PROCESS, STATUS, THREAD#, SEQUENCE#, BLOCK# FROM v\$managed_standby;" "V\$MANAGED_STANDBY"
execute_query "select name from V\$SERVICES;" "V\$SERVICES"
execute_query "SELECT name, network_name, Enabled FROM dba_services where upper(name) like '%RWSVC%' ORDER BY 1;" "DBA_SERVICES_RWSVC"
execute_query "select name from dba_services where name like 'DNG\_%' ESCAPE '\\';" "DBA_SERVICES_DNG"
execute_query "select trigger_name from dba_triggers where trigger_name like 'JIL_DNG%';" "DBA_TRIGGERS_JIL_DNG"
execute_query "select translate(listagg(ltrim(rtrim(text)),''), chr(10) || chr(11) || chr(13), ' ') FROM dba_source where name in (select trigger_name from dba_triggers where trigger_name like 'JIL%') and upper(text) like '%DBMS_SERVICE%';" "DBA_SOURCE_DBMS_SERVICE"
execute_query "select username from dba_users where profile='DEFAULT';" "DBA_USERS_DEFAULT_PROFILE"

# Finalize reports
log_detail "Database Validation Script Completed"
log_summary "Database Validation Script Completed"

echo "Detailed report generated: $DETAILED_REPORT"
echo "Summary report generated: $SUMMARY_REPORT"

How It Works:

  1. Input Arguments:

    • The script takes one argument: RAC or SI (Single Instance).

    • This can be extended to include additional logic specific to RAC or single-instance databases.

  2. Standards File:

    • The standards.txt file contains the expected output for each query.

    • Format:

      Copy
      GV\$ENCRYPTION_WALLET
      <expected output for gv$encryption_wallet>
      DBA_TABLESPACES
      <expected output for dba_tablespaces>
  3. Reports:

    • Detailed Report: Contains the query, expected output, and actual output.

    • Summary Report: Contains a success/failure status for each validation.

  4. SQL Execution:

    • The script uses sqlplus -s / as sysdba to execute queries.

    • Output is compared against the standards file.

  5. Output:

    • Two files are generated:

      • detailed_report.txt: Contains detailed query outputs and comparisons.

      • summary_report.txt: Contains a summary of success/failure statuses.


Example standards.txt:

Copy
GV\$ENCRYPTION_WALLET
WRL_TYPE: FILE, WRL_PARAMETER: /u01/app/oracle/wallet, STATUS: OPEN

DBA_TABLESPACES
SYSTEM    NO
USERS     YES

DB_UNIQUE_NAME
ORCL

Running the Script:

  1. Save the script as validate_db_standards.sh.

  2. Make it executable:

    bash
    Copy
    chmod +x validate_db_standards.sh
  3. Run the script:

    bash
    Copy
    ./validate_db_standards.sh SI  # For Single Instance
    ./validate_db_standards.sh RAC # For RAC

Output Example:

detailed_report.txt:

Copy
Executing Query: select * from gv$encryption_wallet;
SUCCESS: GV$ENCRYPTION_WALLET
Expected: WRL_TYPE: FILE, WRL_PARAMETER: /u01/app/oracle/wallet, STATUS: OPEN
Actual: WRL_TYPE: FILE, WRL_PARAMETER: /u01/app/oracle/wallet, STATUS: OPEN

summary_report.txt:

Copy
SUCCESS: GV$ENCRYPTION_WALLET
FAILURE: DBA_TABLESPACES
SUCCESS: DB_UNIQUE_NAME

This script ensures that the newly created database adheres to the defined standards and provides clear, formatted reports for validation.


SQL Performance Triaging Runbook



This document provides a comprehensive overview of SQL execution plans, profiles, baselines, reasons for plan changes, and strategies to avoid these changes. For further details or specific implementation guidance, consulting with a database expert or referring to the documentation of your specific DBMS is recommended.

 

Identifying SQL plan changes in Oracle involves several steps, including capturing and comparing execution plans over time. Below are the procedures and SQL queries to help you identify SQL plan changes in Oracle.

Step 1: Enable SQL Plan Baselines

First, ensure that SQL Plan Baselines are enabled. This feature helps in capturing and managing execution plans.

ALTER SYSTEM SET optimizer_capture_sql_plan_baselines = TRUE;
ALTER SYSTEM SET optimizer_use_sql_plan_baselines = TRUE;

Step 2: Capture Baselines

To capture SQL plan baselines, you can use the DBMS_SPM package. Here’s an example of how to capture a baseline for a specific SQL statement:

DECLARE
  l_sql_handle VARCHAR2(30);
BEGIN
  l_sql_handle := DBMS_SPM.LOAD_PLANS_FROM_CURSOR_CACHE(
    sql_id => 'your_sql_id'
  );
END;
/

Replace 'your_sql_id' with the SQL ID of the statement you want to capture.

Step 3: Monitor Execution Plans

You can monitor execution plans using the DBA_HIST_SQL_PLAN and DBA_HIST_SQLSTAT views. These views store historical execution plan information.

Query to Retrieve Execution Plans

SELECT
  sql_id,
  plan_hash_value,
  COUNT(*) AS plan_count
FROM
  DBA_HIST_SQL_PLAN
WHERE
  sql_id = 'your_sql_id'
GROUP BY
  sql_id,
  plan_hash_value
ORDER BY
  plan_count DESC;

Replace 'your_sql_id' with the SQL ID of the statement you want to monitor.

Step 4: Compare Execution Plans

To compare execution plans, you can use the DBMS_XPLAN package. Here’s an example of how to display the execution plan for a specific SQL ID and plan hash value:

SELECT * FROM TABLE(DBMS_XPLAN.DISPLAY_AWR(
  sql_id => 'your_sql_id',
  plan_hash_value => 'your_plan_hash_value'
));

Replace 'your_sql_id' and 'your_plan_hash_value' with the appropriate values.

Step 5: Identify Plan Changes

To identify changes in execution plans, you can compare the plan_hash_value over time. If the plan_hash_value changes, it indicates that the execution plan has changed.

Query to Identify Plan Changes

SELECT
  sql_id,
  plan_hash_value,
  COUNT(*) AS plan_count,
  MIN(snap_id) AS first_appearance,
  MAX(snap_id) AS last_appearance
FROM
  DBA_HIST_SQL_PLAN
WHERE
  sql_id = 'your_sql_id'
GROUP BY
  sql_id,
  plan_hash_value
ORDER BY
  first_appearance;

Replace 'your_sql_id' with the SQL ID of the statement you want to monitor.

Step 6: Analyze Plan Changes

Once you have identified that a plan change has occurred, you can analyze the differences between the old and new plans using the DBMS_XPLAN package.

Query to Display Execution Plan Differences

SELECT * FROM TABLE(DBMS_XPLAN.DISPLAY_AWR(
  sql_id => 'your_sql_id',
  plan_hash_value => 'old_plan_hash_value'
));

SELECT * FROM TABLE(DBMS_XPLAN.DISPLAY_AWR(
  sql_id => 'your_sql_id',
  plan_hash_value => 'new_plan_hash_value'
));

Replace 'your_sql_id''old_plan_hash_value', and 'new_plan_hash_value' with the appropriate values.

Step 7: Take Corrective Actions

If you identify an undesirable plan change, you can take corrective actions such as:

  1. Accepting a Baseline Plan: Force the optimizer to use a specific plan.
  2. Creating SQL Profiles: Provide additional information to the optimizer.
  3. Using Plan Stability Features: Utilize features like SQL Plan Management (SPM) to maintain plan stability.

Example: Accepting a Baseline Plan

DECLARE
  l_plans PLS_INTEGER;
BEGIN
  l_plans := DBMS_SPM.LOAD_PLANS_FROM_CURSOR_CACHE(
    sql_id => 'your_sql_id',
    plan_hash_value => 'desired_plan_hash_value'
  );
END;
/

Replace 'your_sql_id' and 'desired_plan_hash_value' with the appropriate values.

Conclusion

By following these steps, you can effectively identify and manage SQL plan changes in Oracle. Regular monitoring and proactive management of execution plans can help maintain consistent query performance and avoid unexpected performance regressions.

 

 

 
#!/bin/bash

# Define Oracle environment variables
export ORACLE_HOME=/path/to/oracle_home
export ORACLE_SID=your_oracle_sid
export PATH=$ORACLE_HOME/bin:$PATH

# Define database connection details
DB_USER="sys"
DB_PASS="your_sys_password"
DB_CONN_STRING="your_tns_alias"

# Define email details
EMAIL_SUBJECT="Database Verification and Health Check Report"
EMAIL_RECIPIENTS="recipient1@example.com,recipient2@example.com"
EMAIL_BODY="/tmp/db_health_check_report.txt"

# Function to execute SQL commands and append output to the report
execute_sql() {
    sqlplus -s "$DB_USER/$DB_PASS@$DB_CONN_STRING as sysdba" <<EOF >> $EMAIL_BODY
$1
EOF
}

# Start the report
echo "Database Verification and Health Check Report" > $EMAIL_BODY
echo "=============================================" >> $EMAIL_BODY
echo "" >> $EMAIL_BODY

# Collect tablespace information
echo "Tablespace Information:" >> $EMAIL_BODY
execute_sql "
SELECT tablespace_name, encrypted
FROM dba_tablespaces;
"
echo "" >> $EMAIL_BODY

# Collect encryption wallet information
echo "Encryption Wallet Information:" >> $EMAIL_BODY
execute_sql "
SELECT * FROM gv\$encryption_wallet;
"
echo "" >> $EMAIL_BODY

# Collect database unique name
echo "Database Unique Name:" >> $EMAIL_BODY
execute_sql "
SELECT db_unique_name
FROM v\$database;
"
echo "" >> $EMAIL_BODY

# Collect database name
echo "Database Name:" >> $EMAIL_BODY
execute_sql "
SELECT name
FROM v\$database;
"
echo "" >> $EMAIL_BODY

# Collect password file information
echo "Password File Information:" >> $EMAIL_BODY
execute_sql "
SELECT * FROM v\$password_file_info;
"
echo "" >> $EMAIL_BODY

# Collect component status from DBA registry
echo "Component Status from DBA Registry:" >> $EMAIL_BODY
execute_sql "
SELECT con_id, comp_id, comp_name, version_full, status
FROM dba_registry;
"
echo "" >> $EMAIL_BODY

# Collect user profiles
echo "User Profiles:" >> $EMAIL_BODY
execute_sql "
SELECT username, profile
FROM dba_users;
"
echo "" >> $EMAIL_BODY

# Collect database parameters
echo "Database Parameters:" >> $EMAIL_BODY
execute_sql "
SELECT name, value
FROM v\$parameter
WHERE name IN ('db_flashback_retention_target', 'log_archive_format', 'log_archive_dest_1', 'log_archive_dest_2', 'log_archive_dest_state_1', 'log_archive_dest_state_2', 'db_recovery_file_dest', 'db_recovery_file_dest_size', 'db_block_size', 'db_files', 'db_name', 'db_unique_name', 'db_domain', 'db_create_file_dest', 'db_create_online_log_dest_1', 'db_create_online_log_dest_2', 'db_create_online_log_dest_3', 'db_create_online_log_dest_4', 'db_create_online_log_dest_5', 'db_recovery_file_dest', 'db_recovery_file_dest_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flash_cache_size', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback_retention_target', 'db_flashback


Document: Oracle Data Guard Switchover Meeting Notes

Introduction

This document captures the key points and discussions from the recent meeting regarding Oracle Data Guard switchover procedures, known issues, and troubleshooting steps.

Meeting Summary

The meeting focused on the Oracle Data Guard switchover process, particularly addressing issues encountered in previous versions and the steps to ensure a successful switchover. The discussion included the automation of the switchover process, the role of OEM (Oracle Enterprise Manager), and the importance of pre-checks and post-checks.

Key Points Discussed

  1. Historical Issues with Switchover:

    • Oracle 12.1: There were frequent issues where the database on the other side (standby) would not come up as the primary after a switchover. Errors were encountered in the switchover script, and manual intervention was often required.
    • Oracle 12.2: Improvements were noted, with the system providing messages indicating the inability to start the database on the other side. However, the database would automatically start, and the cluster would handle the transition.
  2. Switchover Job Submission:

    • The switchover process involves submitting a job with the primary and standby names matching the OEM targets. This is crucial as the process runs against the OEM configuration.
    • The job is initially submitted with the primary and standby names, and the status is updated in a specific table.
  3. Automation and Monitoring:

    • An automated job runs every three minutes to check the status of the switchover job. If the job is in a running state, it will continue to monitor and update the status.
    • The script used for the switchover is copied to the standby server and executed there. The log files generated provide details on the success or failure of the switchover.
  4. Troubleshooting Common Issues:

    • Firewall Issues: Ensure that the firewall is open between the OEM server and the database servers to allow the switchover script to update the status.
    • Log Files: If the log files are not updated, check the standby host for the script execution status and logs.
    • Primary and Standby Names: Ensure that the job is submitted with the correct primary and standby names as per the OEM configuration.
  5. Known Issues and Solutions:

    • Switchover Failure: In some cases, both databases may be left in standby mode. Manually force the primary database to become the primary again and reconfigure Data Guard.
    • Lag Issues: Ensure that the lag between the primary and standby databases is within acceptable limits before initiating the switchover.
    • Invalid Objects: Compile all invalid objects before performing the switchover.
    • Datafile Status: Ensure all datafiles are online before performing the switchover.
    • Flashback: Verify that Flashback is enabled before performing the switchover.
  6. Best Practices:

    • Pre-Checks: Perform comprehensive pre-checks to ensure the environment is ready for the switchover.
    • Post-Checks: Conduct thorough post-checks to validate the success of the switchover and ensure the new primary and standby databases are functioning correctly.
    • OEM Integration: Ensure that the OEM configuration is accurate and that the switchover job is submitted with the correct details.

Action Items

  • Firewall Configuration: Verify and ensure that the firewall settings allow communication between the OEM server and the database servers.
  • Log File Monitoring: Regularly monitor the log files for any errors or issues during the switchover process.
  • Training and Knowledge Sharing: Include key team members in troubleshooting sessions to ensure they are familiar with the switchover process and common issues.

Conclusion

The meeting highlighted the importance of thorough pre-checks, post-checks, and monitoring to ensure a successful Oracle Data Guard switchover. By addressing known issues and following best practices, the team can minimize disruptions and ensure a smooth transition during the switchover process.


Document: Oracle Data Guard Switchover Meeting Notes (Continued)

 

Introduction

This document captures the key points and discussions from the recent meeting regarding Oracle Data Guard switchover procedures, known issues, and troubleshooting steps. This section continues from the previous meeting notes, focusing on additional issues, listener configurations, and service management.

Meeting Summary

The meeting continued to address the Oracle Data Guard switchover process, particularly focusing on listener configurations, service management, and known issues that can cause switchover failures. The discussion included the importance of ensuring proper listener configurations, handling service triggers, and troubleshooting common issues.

Key Points Discussed

  1. Listener Configuration Issues:

    • Single Instance Listener: The listener needs to be up on the standby side after the switchover. In previous configurations, the Oracle home was included in the listener configuration, which caused issues during the switchover.
    • Fixing Listener Issues: When encountering errors related to the listener, remove the Oracle home entry from the listener configuration and reload the listener. This ensures that the listener points to the correct Oracle home version.
  2. Common Listener Errors:

    • Oracle Home Entry: Errors often occur when the listener configuration points to an outdated Oracle home (e.g., 18c instead of 19c). Removing the outdated entry and reloading the listener resolves the issue.
    • Manual Restart: After fixing the listener configuration, manually restart the listener to ensure the database role change is recognized.
  3. Service Management:

    • Service Triggers: Services are managed through triggers that start and stop services based on the database role. For example, after a role change, the primary database will stop read-only services and start read-write services, while the standby database will do the opposite.
    • Service Creation: During provisioning, services are created for application connections and Data Guard management. Ensure that these services are correctly configured and associated with the appropriate triggers.
  4. Known Issues and Solutions:

    • Switchover Failure: Common issues include both databases being left in standby mode. Manually force the primary database to become the primary again and reconfigure Data Guard.
    • Listener Configuration: Ensure that the listener configuration is updated to reflect the correct Oracle home version. This is a frequent source of errors during switchover.
    • Service Triggers: Verify that service triggers are correctly configured and updated during provisioning and migration. This ensures that services are started and stopped correctly based on the database role.
  5. Best Practices:

    • Pre-Checks: Perform comprehensive pre-checks to ensure the environment is ready for the switchover, including verifying listener configurations and service triggers.
    • Post-Checks: Conduct thorough post-checks to validate the success of the switchover and ensure the new primary and standby databases are functioning correctly.
    • OEM Integration: Ensure that the OEM configuration is accurate and that the switchover job is submitted with the correct details.
  6. Troubleshooting Common Issues:

    • Firewall Issues: Ensure that the firewall is configured to allow communication between the OEM server and the database servers.
    • Log Files: If the log files are not updated, check the standby host for the script execution status and logs.
    • Primary and Standby Names: Ensure that the job is submitted with the correct primary and standby names as per the OEM configuration.
    • Service Issues: If services are not starting correctly, check the service triggers and ensure they are correctly configured.
  7. Listener and Service Configuration:

    • Listener Configuration: Ensure that the listener configuration does not include outdated Oracle home entries. Update and reload the listener configuration as needed.
    • Service Triggers: Verify that service triggers are correctly configured to start and stop services based on the database role. This is particularly important for new databases and during migrations.
  8. Provisioning and Migration:

    • Service Configuration: During provisioning and migration, ensure that service triggers are correctly configured and updated. This includes creating new services and updating existing ones.
    • Testing: Test the switchover process thoroughly, including verifying listener configurations and service triggers. This helps identify and resolve issues before they impact production.

Action Items

  • Firewall Configuration: Verify and ensure that the firewall settings allow communication between the OEM server and the database servers.
  • Log File Monitoring: Regularly monitor the log files for any errors or issues during the switchover process.
  • Listener Configuration: Ensure that the listener configuration is updated to reflect the correct Oracle home version.
  • Service Trigger Verification: Verify that service triggers are correctly configured and updated during provisioning and migration.
  • Training and Knowledge Sharing: Include key team members in troubleshooting sessions to ensure they are familiar with the switchover process and common issues.

Conclusion

The meeting highlighted the importance of thorough pre-checks, post-checks, and monitoring to ensure a successful Oracle Data Guard switchover. By addressing known issues and following best practices, the team can minimize disruptions and ensure a smooth transition during the switchover process. Proper listener and service configuration, along with comprehensive testing, are critical to the success of the switchover.


This document captures the key points and discussions from the meeting, providing a reference for the Oracle Data Guard switchover process and troubleshooting steps.


Document: Oracle Data Guard Switchover Meeting Notes (Final Portion)

Introduction

This document captures the key points and discussions from the recent meeting regarding Oracle Data Guard switchover procedures, known issues, and troubleshooting steps. This section continues from the previous meeting notes, focusing on the creation of a checklist, verification steps, and additional automation details.

Meeting Summary

The meeting continued to address the Oracle Data Guard switchover process, particularly focusing on the creation of a checklist for pre-checks and post-checks, verification steps after switchover, and the automation process for managing switchover jobs.

Key Points Discussed

  1. Creation of a Checklist:

    • Pre-Check and Post-Check Checklist: It was suggested to create a comprehensive checklist to be used before and after the switchover. This checklist would include all necessary steps to ensure a smooth transition.
    • Human Verification: Despite automation, human verification is still necessary. The checklist should include steps for manually verifying critical configurations, such as service names and listener configurations.
  2. Verification Steps After Switchover:

    • Log Files: After the switchover, it is essential to check the log files for any errors or issues. This includes both the alert logs and the switchover logs.
    • Database Health: Verify the health of the database by checking the status of datafiles, services, and managed recovery processes.
    • Shift Handover: The operations team will take over after the initial verification. This ensures continuous monitoring and handling of any issues that may arise.
  3. Automation and Job Management:

    • Job Submission: The switchover process involves submitting a job with the primary and standby names matching the OEM targets. The job is initially submitted with the primary and standby names, and the status is updated in a specific table.
    • Status Monitoring: An automated job runs every three minutes to check the status of the switchover job. If the job is in a running state, it will continue to monitor and update the status.
    • Script Execution: The script used for the switchover is copied to the standby server and executed there. The log files generated provide details on the success or failure of the switchover.
  4. Common Issues and Solutions:

    • Listener Configuration: Ensure that the listener configuration does not include outdated Oracle home entries. Update and reload the listener configuration as needed.
    • Service Triggers: Verify that service triggers are correctly configured and updated during provisioning and migration. This ensures that services are started and stopped correctly based on the database role.
    • Firewall Issues: Ensure that the firewall is configured to allow communication between the OEM server and the database servers.
    • Log File Monitoring: Regularly monitor the log files for any errors or issues during the switchover process.
  5. Best Practices:

    • Pre-Checks: Perform comprehensive pre-checks to ensure the environment is ready for the switchover, including verifying listener configurations and service triggers.
    • Post-Checks: Conduct thorough post-checks to validate the success of the switchover and ensure the new primary and standby databases are functioning correctly.
    • OEM Integration: Ensure that the OEM configuration is accurate and that the switchover job is submitted with the correct details.
  6. Additional Automation Details:

    • Script Details: The switchover script takes arguments for the primary and standby database names and performs the necessary steps to switch roles. The script updates the status in the database and generates logs for monitoring.
    • Failover Script: A failover script is also available for use in disaster recovery scenarios. This script can be used to automate the failover process if needed.

Action Items

  • Create a Checklist: Develop a comprehensive checklist for pre-checks and post-checks to ensure a smooth switchover process.
  • Verify Listener Configuration: Ensure that the listener configuration is updated to reflect the correct Oracle home version.
  • Monitor Log Files: Regularly monitor the log files for any errors or issues during the switchover process.
  • Verify Service Triggers: Ensure that service triggers are correctly configured and updated during provisioning and migration.
  • Training and Knowledge Sharing: Include key team members in troubleshooting sessions to ensure they are familiar with the switchover process and common issues.

Conclusion

The meeting highlighted the importance of thorough pre-checks, post-checks, and monitoring to ensure a successful Oracle Data Guard switchover. By addressing known issues and following best practices, the team can minimize disruptions and ensure a smooth transition during the switchover process. Proper listener and service configuration, along with comprehensive testing, are critical to the success of the switchover.


This document captures the key points and discussions from the meeting, providing a reference for the Oracle Data Guard switchover process and troubleshooting steps.


 #!/bin/bash

LOGFILE="dataguard_precheck.log" exec > >(tee -i $LOGFILE) exec 2>&1 timestamp() { date +"%Y-%m-%d %H:%M:%S" } log() { echo "$(timestamp) - $1" } log "Starting Data Guard Pre-Check Script" # Function to check Data Guard configuration check_dg_config() { log "Checking Data Guard Configuration..." dgmgrl -silent / "show configuration" | grep -i "SUCCESS" if [ $? -ne 0 ]; then log "Data Guard Configuration Check Failed" exit 1 fi log "Data Guard Configuration Check Passed" } # Function to verify archive log shipping check_archive_logs() { log "Verifying Archive Log Shipping..." sqlplus -s / as sysdba <<EOF SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF SELECT DEST_ID, STATUS FROM V\$ARCHIVE_DEST_STATUS WHERE STATUS='VALID'; EXIT; EOF if [ $? -ne 0 ]; then log "Archive Log Shipping Check Failed" exit 1 fi log "Archive Log Shipping Check Passed" } # Function to check database role check_db_role() { log "Checking Database Role..." sqlplus -s / as sysdba <<EOF SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF SELECT DATABASE_ROLE FROM V\$DATABASE; EXIT; EOF if [ $? -ne 0 ]; then log "Database Role Check Failed" exit 1 fi log "Database Role Check Passed" } # Function to check Data Guard Broker configuration check_dg_broker() { log "Checking Data Guard Broker Configuration..." dgmgrl -silent / "show configuration" | grep -i "SUCCESS" if [ $? -ne 0 ]; then log "Data Guard Broker Configuration Check Failed" exit 1 fi log "Data Guard Broker Configuration Check Passed" } # Function to check for active sessions check_active_sessions() { log "Checking for Active Sessions..." sqlplus -s / as sysdba <<EOF SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF SELECT COUNT(*) FROM V\$SESSION WHERE STATUS='ACTIVE'; EXIT; EOF if [ $? -ne 0 ]; then log "Active Sessions Check Failed" exit 1 fi log "Active Sessions Check Passed" } # Function to take backup take_backup() { log "Taking Backup of Primary Database..." rman target / <<EOF BACKUP DATABASE PLUS ARCHIVELOG; EXIT; EOF if [ $? -ne 0 ]; then log "Backup Failed" exit 1 fi log "Backup Completed Successfully" } # Function to check cluster status (RAC only) check_cluster_status() { log "Checking Cluster Status..." crsctl check cluster -all if [ $? -ne 0 ]; then log "Cluster Status Check Failed" exit 1 fi log "Cluster Status Check Passed" } # Function to check services (RAC only) check_services() { log "Checking Services..." srvctl status service -d $ORACLE_SID if [ $? -ne 0 ]; then log "Services Check Failed" exit 1 fi log "Services Check Passed" } # Function to check interconnect (RAC only) check_interconnect() { log "Checking Interconnect..." oifcfg getif if [ $? -ne 0 ]; then log "Interconnect Check Failed" exit 1 fi log "Interconnect Check Passed" } # Function to check OCR and voting disk (RAC only) check_ocr_voting_disk() { log "Checking OCR and Voting Disk..." ocrcheck if [ $? -ne 0 ]; then log "OCR Check Failed" exit 1 fi crsctl query css votedisk if [ $? -ne 0 ]; then log "Voting Disk Check Failed" exit 1 fi log "OCR and Voting Disk Check Passed" } # Function to check for lag check_lag() { log "Checking for Lag..." dgmgrl -silent / "show configuration" | grep -i "lag" if [ $? -ne 0 ]; then log "Lag Check Failed" exit 1 fi log "Lag Check Passed" } # Function to check for invalid objects check_invalid_objects() { log "Checking for Invalid Objects..." sqlplus -s / as sysdba <<EOF SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF SELECT COUNT(*) FROM DBA_OBJECTS WHERE STATUS='INVALID'; EXIT; EOF if [ $? -ne 0 ]; then log "Invalid Objects Check Failed" exit 1 fi log "Invalid Objects Check Passed" } # Function to check for offline datafiles check_offline_datafiles() { log "Checking for Offline Datafiles..." sqlplus -s / as sysdba <<EOF SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF SELECT FILE_NAME FROM DBA_DATA_FILES WHERE STATUS='OFFLINE'; EXIT; EOF if [ $? -ne 0 ]; then log "Offline Datafiles Check Failed" exit 1 fi log "Offline Datafiles Check Passed" } # Function to check flashback status check_flashback() { log "Checking Flashback Status..." sqlplus -s / as sysdba <<EOF SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF SELECT FLASHBACK_ON FROM V\$DATABASE; EXIT; EOF if [ $? -ne 0 ]; then log "Flashback Status Check Failed" exit 1 fi log "Flashback Status Check Passed" } # Function to check switchover status check_switchover_status() { log "Checking Switchover Status..." sqlplus -s / as sysdba <<EOF SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF SELECT SWITCHOVER_STATUS FROM V\$DATABASE; EXIT; EOF if [ $? -ne 0 ]; then log "Switchover Status Check Failed" exit 1 fi log "Switchover Status Check Passed" } # Function to check listeners check_listeners() { log "Checking Listeners..." lsnrctl status if [ $? -ne 0 ]; then log "Listeners Check Failed" exit 1 fi log "Listeners Check Passed" } # Function to check temp space check_temp_space() { log "Checking Temp Space..." sqlplus -s / as sysdba <<EOF SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF SELECT TABLESPACE_NAME, BYTES/1024/1024 AS MB FROM DBA_TEMP_FILES; EXIT; EOF if [ $? -ne 0 ]; then log "Temp Space Check Failed" exit 1 fi log "Temp Space Check Passed" } # Main script execution log "Performing Pre-Checks for Data Guard Switchover" check_dg_config check_archive_logs check_db_role check_dg_broker check_active_sessions take_backup check_lag check_invalid_objects check_offline_datafiles check_flashback check_switchover_status check_listeners check_temp_space if [ "$1" == "RAC" ]; then check_cluster_status check_services check_interconnect check_ocr_voting_disk fi log "All Pre-Checks Completed Successfully"
#!/bin/bash

LOGFILE="dataguard_switchover.log"
exec > >(tee -i $LOGFILE)
exec 2>&1

timestamp() {
    date +"%Y-%m-%d %H:%M:%S"
}

log() {
    echo "$(timestamp) - $1"
}

log "Starting Data Guard Switchover Script"

# Function to check switchover status
check_switchover_status() {
    log "Checking Switchover Status..."
    sqlplus -s / as sysdba <<EOF
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SELECT SWITCHOVER_STATUS FROM V\$DATABASE;
EXIT;
EOF
    if [ $? -ne 0 ]; then
        log "Switchover Status Check Failed"
        exit 1
    fi
    log "Switchover Status Check Passed"
}

# Function to perform switchover on primary
switchover_primary() {
    log "Performing Switchover on Primary Database..."
    sqlplus -s / as sysdba <<EOF
ALTER DATABASE COMMIT TO SWITCHOVER TO STANDBY WITH SESSION SHUTDOWN;
SHUTDOWN IMMEDIATE;
STARTUP MOUNT;
EXIT;
EOF
    if [ $? -ne 0 ]; then
        log "Switchover on Primary Database Failed"
        exit 1
    fi
    log "Switchover on Primary Database Completed Successfully"
}

# Function to perform switchover on standby
switchover_standby() {
    log "Performing Switchover on Standby Database..."
    sqlplus -s / as sysdba <<EOF
ALTER DATABASE COMMIT TO SWITCHOVER TO PRIMARY WITH SESSION SHUTDOWN;
SHUTDOWN IMMEDIATE;
STARTUP;
EXIT;
EOF
    if [ $? -ne 0 ]; then
        log "Switchover on Standby Database Failed"
        exit 1
    fi
    log "Switchover on Standby Database Completed Successfully"
}

# Function to start managed recovery on new standby
start_managed_recovery() {
    log "Starting Managed Recovery on New Standby Database..."
    sqlplus -s / as sysdba <<EOF
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE DISCONNECT FROM SESSION;
EXIT;
EOF
    if [ $? -ne 0 ]; then
        log "Starting Managed Recovery on New Standby Database Failed"
        exit 1
    fi
    log "Managed Recovery on New Standby Database Started Successfully"
}

# Function to check database role
check_db_role() {
    log "Checking Database Role..."
    sqlplus -s / as sysdba <<EOF
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SELECT DATABASE_ROLE FROM V\$DATABASE;
EXIT;
EOF
    if [ $? -ne 0 ]; then
        log "Database Role Check Failed"
        exit 1
    fi
    log "Database Role Check Passed"
}

# Main script execution
log "Performing Switchover for Data Guard"

check_switchover_status

log "Switching Over Primary Database"
switchover_primary

log "Switching Over Standby Database"
switchover_standby

log "Starting Managed Recovery on New Standby Database"
start_managed_recovery

log "Checking Database Role on New Primary"
check_db_role

log "Checking Database Role on New Standby"
check_db_role

log "Data Guard Switchover Completed Successfully"

Usage

  1. Save the script to a file, e.g., dataguard_switchover.sh.
  2. Make the script executable: chmod +x dataguard_switchover.sh.
  3. Run the script: ./dataguard_switchover.sh.

This script will log the output with timestamps to dataguard_switchover.log and will exit if any of the steps fail. Adjust the script as needed for your specific environment and requirements.

Important Notes

  1. Backup: Ensure you have a recent backup of both the primary and standby databases before performing the switchover.
  2. Testing: Test the script in a non-production environment to ensure it works as expected.
  3. Validation: After the switchover, validate that the new primary and standby databases are functioning correctly.
  4. RAC Considerations: If you are using RAC, additional steps may be required to manage services and instances across the cluster. Adjust the script accordingly.

Additional Steps for RAC

For RAC environments, you may need to include additional steps to manage services and instances. Here are some additional functions you can add to the script for RAC:

# Function to relocate services to new primary
relocate_services() {
    log "Relocating Services to New Primary Database..."
    srvctl relocate service -d $ORACLE_SID -s <service_name> -i <old_primary_instance> -t <new_primary_instance>
    if [ $? -ne 0 ]; then
        log "Relocating Services Failed"
        exit 1
    fi
    log "Services Relocated Successfully"
}

# Function to start instances on new primary
start_instances() {
    log "Starting Instances on New Primary Database..."
    srvctl start instance -d $ORACLE_SID -i <instance_name>
    if [ $? -ne 0 ]; then
        log "Starting Instances Failed"
        exit 1
    fi
    log "Instances Started Successfully"
}

# Function to stop instances on old primary
stop_instances() {
    log "Stopping Instances on Old Primary Database..."
    srvctl stop instance -d $ORACLE_SID -i <instance_name>
    if [ $? -ne 0 ]; then
        log "Stopping Instances Failed"
        exit 1
    fi
    log "Instances Stopped Successfully"
}

Usage for RAC

  1. Add the above functions to the script.
  2. Call these functions at the appropriate points in the script, e.g., after performing the switchover on the primary and standby databases.

By incorporating these additional steps, you can ensure a smooth switchover process for both single instance and RAC environments.



Shell Script for Post-Checks

#!/bin/bash

LOGFILE="dataguard_postcheck.log"
exec > >(tee -i $LOGFILE)
exec 2>&1

timestamp() {
    date +"%Y-%m-%d %H:%M:%S"
}

log() {
    echo "$(timestamp) - $1"
}

log "Starting Data Guard Post-Check Script"

# Function to check database role
check_db_role() {
    log "Checking Database Role..."
    sqlplus -s / as sysdba <<EOF
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SELECT DATABASE_ROLE FROM V\$DATABASE;
EXIT;
EOF
    if [ $? -ne 0 ]; then
        log "Database Role Check Failed"
        exit 1
    fi
    log "Database Role Check Passed"
}

# Function to check switchover status
check_switchover_status() {
    log "Checking Switchover Status..."
    sqlplus -s / as sysdba <<EOF
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SELECT SWITCHOVER_STATUS FROM V\$DATABASE;
EXIT;
EOF
    if [ $? -ne 0 ]; then
        log "Switchover Status Check Failed"
        exit 1
    fi
    log "Switchover Status Check Passed"
}

# Function to check alert log for errors
check_alert_log() {
    log "Checking Alert Log for Errors..."
    ALERT_LOG=$(sqlplus -s / as sysdba <<EOF
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SELECT VALUE FROM V\$DIAG_INFO WHERE NAME = 'Diag Trace';
EXIT;
EOF
)
    ALERT_LOG_FILE="${ALERT_LOG}/alert_${ORACLE_SID}.log"
    grep -i "ORA-" $ALERT_LOG_FILE
    if [ $? -eq 0 ]; then
        log "Errors Found in Alert Log"
        exit 1
    fi
    log "No Errors Found in Alert Log"
}

# Function to check listener status
check_listener() {
    log "Checking Listener Status..."
    lsnrctl status
    if [ $? -ne 0 ]; then
        log "Listener Status Check Failed"
        exit 1
    fi
    log "Listener Status Check Passed"
}

# Function to check services status
check_services() {
    log "Checking Services Status..."
    srvctl status service -d $ORACLE_SID
    if [ $? -ne 0 ]; then
        log "Services Status Check Failed"
        exit 1
    fi
    log "Services Status Check Passed"
}

# Function to check managed recovery on standby
check_managed_recovery() {
    log "Checking Managed Recovery on Standby Database..."
    sqlplus -s / as sysdba <<EOF
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SELECT PROCESS, STATUS FROM V\$MANAGED_STANDBY WHERE PROCESS='MRP0';
EXIT;
EOF
    if [ $? -ne 0 ]; then
        log "Managed Recovery Check Failed"
        exit 1
    fi
    log "Managed Recovery Check Passed"
}

# Function to check datafile status
check_datafile_status() {
    log "Checking Datafile Status..."
    sqlplus -s / as sysdba <<EOF
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SELECT FILE_NAME, STATUS FROM DBA_DATA_FILES WHERE STATUS != 'AVAILABLE';
EXIT;
EOF
    if [ $? -ne 0 ]; then
        log "Datafile Status Check Failed"
        exit 1
    fi
    log "Datafile Status Check Passed"
}

# Function to check temporary tablespace
check_temp_tablespace() {
    log "Checking Temporary Tablespace..."
    sqlplus -s / as sysdba <<EOF
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SELECT TABLESPACE_NAME, BYTES/1024/1024 AS MB FROM DBA_TEMP_FILES;
EXIT;
EOF
    if [ $? -ne 0 ]; then
        log "Temporary Tablespace Check Failed"
        exit 1
    fi
    log "Temporary Tablespace Check Passed"
}

# Function to check invalid objects
check_invalid_objects() {
    log "Checking for Invalid Objects..."
    sqlplus -s / as sysdba <<EOF
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SELECT COUNT(*) FROM DBA_OBJECTS WHERE STATUS='INVALID';
EXIT;
EOF
    if [ $? -ne 0 ]; then
        log "Invalid Objects Check Failed"
        exit 1
    fi
    log "Invalid Objects Check Passed"
}

# Function to check flashback status
check_flashback() {
    log "Checking Flashback Status..."
    sqlplus -s / as sysdba <<EOF
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SELECT FLASHBACK_ON FROM V\$DATABASE;
EXIT;
EOF
    if [ $? -ne 0 ]; then
        log "Flashback Status Check Failed"
        exit 1
    fi
    log "Flashback Status Check Passed"
}

# Function to check archive log shipping
check_archive_logs() {
    log "Verifying Archive Log Shipping..."
    sqlplus -s / as sysdba <<EOF
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SELECT DEST_ID, STATUS FROM V\$ARCHIVE_DEST_STATUS WHERE STATUS='VALID';
EXIT;
EOF
    if [ $? -ne 0 ]; then
        log "Archive Log Shipping Check Failed"
        exit 1
    fi
    log "Archive Log Shipping Check Passed"
}

# Function to check lag
check_lag() {
    log "Checking for Lag..."
    dgmgrl -silent / "show configuration" | grep -i "lag"
    if [ $? -ne 0 ]; then
        log "Lag Check Failed"
        exit 1
    fi
    log "Lag Check Passed"
}

# Main script execution
log "Performing Post-Checks for Data Guard Switchover"

check_db_role
check_switchover_status
check_alert_log
check_listener
check_services
check_managed_recovery
check_datafile_status
check_temp_tablespace
check_invalid_objects
check_flashback
check_archive_logs
check_lag

log "All Post-Checks Completed Successfully"

Usage

  1. Save the script to a file, e.g., dataguard_postcheck.sh.
  2. Make the script executable: chmod +x dataguard_postcheck.sh.
  3. Run the script: ./dataguard_postcheck.sh.

This script will log the output with timestamps to dataguard_postcheck.log and will exit if any of the checks fail. Adjust the script as needed for your specific environment and requirements.

Important Notes

  1. Backup: Ensure you have a recent backup of both the primary and standby databases before performing the switchover.
  2. Testing: Test the script in a non-production environment to ensure it works as expected.
  3. Validation: After the switchover, validate that the new primary and standby databases are functioning correctly.
  4. RAC Considerations: If you are using RAC, additional steps may be required to manage services and instances across the cluster. Adjust the script accordingly.

By incorporating these post-checks, you can ensure that the switchover process has been completed successfully and that both the new primary and standby databases are functioning correctly.

 
# Function to display usage
usage() {
    echo "Usage:
$0 <start date> <end date>
e.g. $0 20240710 20240711
"
    exit 1
}

# Check if the correct number of arguments are provided
if [ $# -ne 2 ]; then
    usage
fi

start_date=$1
end_date=$2

# Source the profile
source ~/.profile

# Get the list of nodes from the database
nodes=$(sqlplus -s / as sysdba <<EOF
SET PAGESIZE 0 FEEDBACK OFF VERIFY OFF HEADING OFF ECHO OFF
SELECT INSTANCE_NAME FROM V\$INSTANCE;
EXIT;
EOF
)

# Check if nodes are retrieved
if [ -z "$nodes" ]; then
    echo "No nodes found!"
    exit 1
fi

# Function to scan listener logs on a node
scan_listener_logs() {
    local node=$1
    local start_date=$2
    local end_date=$3

    host_name=$(ssh $node "hostname -s")
    scan_name=$(ssh $node "srvctl status scan | grep $host_name | awk '{print \$3}'")

    if [ -z "$scan_name" ]; then
        echo "No scan on this host!"
        return
    fi

    listener_log_dir=$(ssh $node "echo \${ORACLE_BASE}/diag/tnslsnr/${host_name}/listener_${scan_name}/trace")
    tmp_file=/tmp/scan_local_listener_$$.txt

    for f in $(ssh $node "find $listener_log_dir -name '*.log' -ctime -30"); do
        if [ ! -s $f ]; then
            continue
        fi

        f_start_time=$(ssh $node "grep CONNECT_DATA $f | head -1 | awk '{print \$1\" \"\$2}'")
        f_end_time=$(ssh $node "grep CONNECT_DATA $f | tail -1 | awk '{print \$1\" \"\$2}'")
        f_start_time_formatted=$(date -d "$f_start_time" '+%s')
        f_end_time_formatted=$(date -d "$f_end_time" '+%s')
        start_date_formatted=$(date -d "$start_date 00:00:00" '+%s')
        end_date_formatted=$(date -d "$end_date 23:59:59" '+%s')

        if [ $f_start_time_formatted -gt $end_date_formatted ] || [ $f_end_time_formatted -lt $start_date_formatted ]; then
            continue
        fi

        ssh $node "grep -w establish $f > $tmp_file"
        ssh $node "cat $tmp_file | grep -o '(SERVICE_NAME=.*)' | awk -F '[\(=\)]' '{print \$3}' | sort -u"
    done
}

# Loop through each node and scan listener logs
for node in $nodes; do
    scan_listener_logs $node $start_date $end_date
done