diff --git a/.vscode/settings.json b/.vscode/settings.json index 74b6a3c..becb01c 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -121,6 +121,13 @@ "password": "Cloudpass#34", "connectionString": "ggmichalski_high", "walletLocation": "c:\\_git\\OracleAI\\oracledb1\\Wallet_ggmichalski" + }, + { + "name": "OU_C2D@ggmichalski_high", + "username": "OU_C2D", + "password": "Cloudpass#34", + "connectionString": "ggmichalski_high", + "walletLocation": "c:\\_git\\OracleAI\\oracledb1\\Wallet_ggmichalski" } ] } diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-826/01_MARS_826_export_ADHOC_ADJ_tables.sql b/MARS_Packages/REL01_ADDITIONS/MARS-826/01_MARS_826_export_ADHOC_ADJ_tables.sql index 81183c5..1d8ca40 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-826/01_MARS_826_export_ADHOC_ADJ_tables.sql +++ b/MARS_Packages/REL01_ADDITIONS/MARS-826/01_MARS_826_export_ADHOC_ADJ_tables.sql @@ -24,7 +24,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_ADHOC_ADJUSTMENTS_HEADER', - pParallelDegree => 1 + pParallelDegree => 1, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_ADHOC_ADJ_HEADER exported'); EXCEPTION @@ -44,7 +45,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_ADHOC_ADJUSTMENTS_ITEM', - pParallelDegree => 1 + pParallelDegree => 1, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_ADHOC_ADJ_ITEM exported'); EXCEPTION @@ -64,7 +66,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_ADHOC_ADJUSTMENTS_ITEM_HEADER', - pParallelDegree => 1 + pParallelDegree => 1, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_ADHOC_ADJ_ITEM_HEADER exported'); EXCEPTION diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-826/02_MARS_826_export_BALANCESHEET_tables.sql b/MARS_Packages/REL01_ADDITIONS/MARS-826/02_MARS_826_export_BALANCESHEET_tables.sql index bd9a561..7294564 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-826/02_MARS_826_export_BALANCESHEET_tables.sql +++ b/MARS_Packages/REL01_ADDITIONS/MARS-826/02_MARS_826_export_BALANCESHEET_tables.sql @@ -29,7 +29,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_KEY', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_BALANCESHEET_HEADER', - pParallelDegree => 4 + pParallelDegree => 4, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_BALANCESHEET_HEADER exported'); EXCEPTION @@ -49,7 +50,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_KEY', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_BALANCESHEET_ITEM', - pParallelDegree => 16 + pParallelDegree => 16, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_BALANCESHEET_ITEM exported'); EXCEPTION diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-826/03_MARS_826_export_CSM_ADJ_tables.sql b/MARS_Packages/REL01_ADDITIONS/MARS-826/03_MARS_826_export_CSM_ADJ_tables.sql index 8a9453d..f86e25f 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-826/03_MARS_826_export_CSM_ADJ_tables.sql +++ b/MARS_Packages/REL01_ADDITIONS/MARS-826/03_MARS_826_export_CSM_ADJ_tables.sql @@ -24,7 +24,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_CSM_ADJUSTMENTS_HEADER', - pParallelDegree => 1 + pParallelDegree => 1, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_CSM_ADJ_HEADER exported'); EXCEPTION @@ -44,7 +45,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_CSM_ADJUSTMENTS_ITEM', - pParallelDegree => 2 + pParallelDegree => 2, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_CSM_ADJ_ITEM exported'); EXCEPTION @@ -64,7 +66,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_CSM_ADJUSTMENTS_ITEM_HEADER', - pParallelDegree => 2 + pParallelDegree => 2, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_CSM_ADJ_ITEM_HEADER exported'); EXCEPTION diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-826/04_MARS_826_export_STANDING_FACILITY_tables.sql b/MARS_Packages/REL01_ADDITIONS/MARS-826/04_MARS_826_export_STANDING_FACILITY_tables.sql index f1931c5..fef4bf9 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-826/04_MARS_826_export_STANDING_FACILITY_tables.sql +++ b/MARS_Packages/REL01_ADDITIONS/MARS-826/04_MARS_826_export_STANDING_FACILITY_tables.sql @@ -29,7 +29,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_STANDING_FACILITIES', - pParallelDegree => 8 + pParallelDegree => 8, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_STANDING_FACILITY exported'); EXCEPTION @@ -49,7 +50,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_STANDING_FACILITIES_HEADER', - pParallelDegree => 2 + pParallelDegree => 2, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_STANDING_FACILITY_HEADER exported'); EXCEPTION diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-826/05_MARS_826_export_MRR_IND_CURRENT_ACCOUNT_tables.sql b/MARS_Packages/REL01_ADDITIONS/MARS-826/05_MARS_826_export_MRR_IND_CURRENT_ACCOUNT_tables.sql index 5ae57de..6d31e2d 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-826/05_MARS_826_export_MRR_IND_CURRENT_ACCOUNT_tables.sql +++ b/MARS_Packages/REL01_ADDITIONS/MARS-826/05_MARS_826_export_MRR_IND_CURRENT_ACCOUNT_tables.sql @@ -25,7 +25,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_KEY', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_CURRENT_ACCOUNTS_HEADER', - pParallelDegree => 2 + pParallelDegree => 2, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_MRR_IND_CURRENT_ACCOUNT_HEADER exported'); EXCEPTION @@ -45,7 +46,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_KEY', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_CURRENT_ACCOUNTS_ITEM', - pParallelDegree => 16 + pParallelDegree => 16, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_MRR_IND_CURRENT_ACCOUNT_ITEM exported'); EXCEPTION diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-826/06_MARS_826_export_FORECAST_tables.sql b/MARS_Packages/REL01_ADDITIONS/MARS-826/06_MARS_826_export_FORECAST_tables.sql index 3cd4a8d..7e0202c 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-826/06_MARS_826_export_FORECAST_tables.sql +++ b/MARS_Packages/REL01_ADDITIONS/MARS-826/06_MARS_826_export_FORECAST_tables.sql @@ -29,7 +29,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_FORECAST_HEADER', - pParallelDegree => 4 + pParallelDegree => 4, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_FORECAST_HEADER exported'); EXCEPTION @@ -49,7 +50,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_FORECAST_ITEM', - pParallelDegree => 16 + pParallelDegree => 16, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_FORECAST_ITEM exported'); EXCEPTION diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-826/07_MARS_826_export_QR_ADJ_tables.sql b/MARS_Packages/REL01_ADDITIONS/MARS-826/07_MARS_826_export_QR_ADJ_tables.sql index 1767964..cc512ba 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-826/07_MARS_826_export_QR_ADJ_tables.sql +++ b/MARS_Packages/REL01_ADDITIONS/MARS-826/07_MARS_826_export_QR_ADJ_tables.sql @@ -24,7 +24,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_QRE_ADJUSTMENTS_HEADER', - pParallelDegree => 1 + pParallelDegree => 1, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_QR_ADJ_HEADER exported'); EXCEPTION @@ -44,7 +45,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_QRE_ADJUSTMENTS_ITEM', - pParallelDegree => 4 + pParallelDegree => 4, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_QR_ADJ_ITEM exported'); EXCEPTION @@ -64,7 +66,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_QRE_ADJUSTMENTS_ITEM_HEADER', - pParallelDegree => 2 + pParallelDegree => 2, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_QR_ADJ_ITEM_HEADER exported'); EXCEPTION diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-826/08_MARS_826_export_TTS_tables.sql b/MARS_Packages/REL01_ADDITIONS/MARS-826/08_MARS_826_export_TTS_tables.sql index 6dfc97f..8150372 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-826/08_MARS_826_export_TTS_tables.sql +++ b/MARS_Packages/REL01_ADDITIONS/MARS-826/08_MARS_826_export_TTS_tables.sql @@ -24,7 +24,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_TTS_HEADER', - pParallelDegree => 1 + pParallelDegree => 1, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_TTS_HEADER exported'); EXCEPTION @@ -44,7 +45,8 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/LM/LM_TTS_ITEM', - pParallelDegree => 1 + pParallelDegree => 1, + pJobClass => 'high' ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_TTS_ITEM exported'); EXCEPTION diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/02_MARS_835_PREHOOK_install_DATA_EXPORTER.sql b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/02_MARS_835_PREHOOK_install_DATA_EXPORTER.sql index b7b07d7..0f87db6 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/02_MARS_835_PREHOOK_install_DATA_EXPORTER.sql +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/02_MARS_835_PREHOOK_install_DATA_EXPORTER.sql @@ -1,9 +1,10 @@ -- ============================================================================ -- MARS-835-PREHOOK Installation Script 02: DATA_EXPORTER Package -- ============================================================================ --- Purpose: Deploy updated DATA_EXPORTER package (SPEC + BODY) with parallel processing +-- Purpose: Deploy updated DATA_EXPORTER package (SPEC + BODY) v2.8.1 -- Schema: CT_MRDS -- Object: PACKAGE DATA_EXPORTER + -- ============================================================================ SET SERVEROUTPUT ON SIZE UNLIMITED @@ -13,8 +14,8 @@ PROMPT ========================================================================= PROMPT MARS-835-PREHOOK: Installing CT_MRDS.DATA_EXPORTER Package PROMPT ============================================================================ PROMPT Package: CT_MRDS.DATA_EXPORTER -PROMPT Version: 2.2.0 -> 2.4.0 (MINOR) -PROMPT Change: Added parallel processing + Smart Column Mapping for CSV exports +PROMPT Version: 2.2.0 -> 2.8.1 (PATCH) +PROMPT Change: Fixed query in EXPORT_TABLE_DATA - removed A_LOAD_HISTORY join for single file PROMPT ============================================================================ PROMPT diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/03_MARS_835_PREHOOK_update_SOURCE_FILE_RECEIVED_table.sql b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/03_MARS_835_PREHOOK_update_SOURCE_FILE_RECEIVED_table.sql new file mode 100644 index 0000000..707439f --- /dev/null +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/03_MARS_835_PREHOOK_update_SOURCE_FILE_RECEIVED_table.sql @@ -0,0 +1,70 @@ +-- ==================================================================== +-- MARS-835-PREHOOK: Update A_SOURCE_FILE_RECEIVED Table Structure +-- ==================================================================== +-- Purpose: +-- 1. Rename column ARCH_FILE_NAME to ARCH_PATH +-- 2. Add new column PROCESS_NAME VARCHAR2(200) +-- Author: Grzegorz Michalski +-- Date: 2026-02-13 +-- ==================================================================== + +SET SERVEROUTPUT ON SIZE UNLIMITED +SET TIMING ON + +PROMPT ==================================================================== +PROMPT MARS-835-PREHOOK: Updating A_SOURCE_FILE_RECEIVED table structure +PROMPT ==================================================================== + +-- Check if column ARCH_FILE_NAME exists +DECLARE + v_column_exists NUMBER; + v_process_name_exists NUMBER; +BEGIN + -- Check if ARCH_FILE_NAME exists + SELECT COUNT(*) + INTO v_column_exists + FROM dba_tab_columns + WHERE owner = 'CT_MRDS' + AND table_name = 'A_SOURCE_FILE_RECEIVED' + AND column_name = 'ARCH_FILE_NAME'; + + IF v_column_exists > 0 THEN + DBMS_OUTPUT.PUT_LINE('INFO: Renaming column ARCH_FILE_NAME to ARCH_PATH...'); + EXECUTE IMMEDIATE 'ALTER TABLE CT_MRDS.A_SOURCE_FILE_RECEIVED RENAME COLUMN ARCH_FILE_NAME TO ARCH_PATH'; + DBMS_OUTPUT.PUT_LINE('SUCCESS: Column renamed to ARCH_PATH'); + ELSE + DBMS_OUTPUT.PUT_LINE('INFO: Column ARCH_FILE_NAME does not exist (already renamed or first install)'); + END IF; + + -- Check if PROCESS_NAME already exists + SELECT COUNT(*) + INTO v_process_name_exists + FROM dba_tab_columns + WHERE owner = 'CT_MRDS' + AND table_name = 'A_SOURCE_FILE_RECEIVED' + AND column_name = 'PROCESS_NAME'; + + IF v_process_name_exists = 0 THEN + DBMS_OUTPUT.PUT_LINE('INFO: Adding new column PROCESS_NAME...'); + EXECUTE IMMEDIATE 'ALTER TABLE CT_MRDS.A_SOURCE_FILE_RECEIVED ADD (PROCESS_NAME VARCHAR2(200))'; + DBMS_OUTPUT.PUT_LINE('SUCCESS: Column PROCESS_NAME added'); + + -- Add comment on new column + EXECUTE IMMEDIATE 'COMMENT ON COLUMN CT_MRDS.A_SOURCE_FILE_RECEIVED.PROCESS_NAME IS ''Name of the process that created this record'''; + DBMS_OUTPUT.PUT_LINE('SUCCESS: Comment added to PROCESS_NAME column'); + ELSE + DBMS_OUTPUT.PUT_LINE('INFO: Column PROCESS_NAME already exists'); + END IF; + + DBMS_OUTPUT.PUT_LINE('SUCCESS: A_SOURCE_FILE_RECEIVED table structure updated successfully'); + +EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE('ERROR: Failed to update table structure: ' || SQLERRM); + RAISE; +END; +/ + +PROMPT ==================================================================== +PROMPT A_SOURCE_FILE_RECEIVED Table Update Completed +PROMPT ==================================================================== diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/93_MARS_835_PREHOOK_rollback_SOURCE_FILE_RECEIVED_table.sql b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/93_MARS_835_PREHOOK_rollback_SOURCE_FILE_RECEIVED_table.sql new file mode 100644 index 0000000..489a0e5 --- /dev/null +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/93_MARS_835_PREHOOK_rollback_SOURCE_FILE_RECEIVED_table.sql @@ -0,0 +1,65 @@ +-- ==================================================================== +-- MARS-835-PREHOOK ROLLBACK: Revert A_SOURCE_FILE_RECEIVED Table Structure +-- ==================================================================== +-- Purpose: +-- 1. Rename column ARCH_PATH back to ARCH_FILE_NAME +-- 2. Remove column PROCESS_NAME +-- Author: Grzegorz Michalski +-- Date: 2026-02-13 +-- ==================================================================== + +SET SERVEROUTPUT ON SIZE UNLIMITED +SET TIMING ON + +PROMPT ==================================================================== +PROMPT MARS-835-PREHOOK ROLLBACK: Reverting A_SOURCE_FILE_RECEIVED table +PROMPT ==================================================================== + +DECLARE + v_column_exists NUMBER; + v_process_name_exists NUMBER; +BEGIN + -- Check if ARCH_PATH exists (needs to be renamed back) + SELECT COUNT(*) + INTO v_column_exists + FROM dba_tab_columns + WHERE owner = 'CT_MRDS' + AND table_name = 'A_SOURCE_FILE_RECEIVED' + AND column_name = 'ARCH_PATH'; + + IF v_column_exists > 0 THEN + DBMS_OUTPUT.PUT_LINE('INFO: Renaming column ARCH_PATH back to ARCH_FILE_NAME...'); + EXECUTE IMMEDIATE 'ALTER TABLE CT_MRDS.A_SOURCE_FILE_RECEIVED RENAME COLUMN ARCH_PATH TO ARCH_FILE_NAME'; + DBMS_OUTPUT.PUT_LINE('SUCCESS: Column renamed back to ARCH_FILE_NAME'); + ELSE + DBMS_OUTPUT.PUT_LINE('INFO: Column ARCH_PATH does not exist (already rolled back)'); + END IF; + + -- Check if PROCESS_NAME exists (needs to be dropped) + SELECT COUNT(*) + INTO v_process_name_exists + FROM dba_tab_columns + WHERE owner = 'CT_MRDS' + AND table_name = 'A_SOURCE_FILE_RECEIVED' + AND column_name = 'PROCESS_NAME'; + + IF v_process_name_exists > 0 THEN + DBMS_OUTPUT.PUT_LINE('INFO: Dropping column PROCESS_NAME...'); + EXECUTE IMMEDIATE 'ALTER TABLE CT_MRDS.A_SOURCE_FILE_RECEIVED DROP COLUMN PROCESS_NAME'; + DBMS_OUTPUT.PUT_LINE('SUCCESS: Column PROCESS_NAME dropped'); + ELSE + DBMS_OUTPUT.PUT_LINE('INFO: Column PROCESS_NAME does not exist (already rolled back)'); + END IF; + + DBMS_OUTPUT.PUT_LINE('SUCCESS: A_SOURCE_FILE_RECEIVED table structure rollback completed'); + +EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE('ERROR: Failed to rollback table structure: ' || SQLERRM); + RAISE; +END; +/ + +PROMPT ==================================================================== +PROMPT A_SOURCE_FILE_RECEIVED Table Rollback Completed +PROMPT ==================================================================== diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/install_mars835_prehook.sql b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/install_mars835_prehook.sql index 2542116..899b14f 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/install_mars835_prehook.sql +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/install_mars835_prehook.sql @@ -31,6 +31,7 @@ PROMPT ========================================================================= PROMPT PROMPT This script will: PROMPT - Create A_PARALLEL_EXPORT_CHUNKS table with unique timestamp task names +PROMPT - Update A_SOURCE_FILE_RECEIVED table (rename ARCH_FILE_NAME to ARCH_PATH, add PROCESS_NAME column) PROMPT - Update ENV_MANAGER to v3.2.0 (add parallel execution error codes) PROMPT - Update DATA_EXPORTER to v2.4.0 (DBMS_PARALLEL_EXECUTE + Smart Column Mapping) PROMPT - Add pParallelDegree parameter (1-16 threads) to EXPORT_*_BY_DATE procedures @@ -59,25 +60,31 @@ PROMPT ========================================================================= PROMPT PROMPT ========================================================================= -PROMPT Step 2: Deploy ENV_MANAGER Package +PROMPT Step 2: Update A_SOURCE_FILE_RECEIVED Table Structure +PROMPT ========================================================================= +@@03_MARS_835_PREHOOK_update_SOURCE_FILE_RECEIVED_table.sql + +PROMPT +PROMPT ========================================================================= +PROMPT Step 3: Deploy ENV_MANAGER Package PROMPT ========================================================================= @@01_MARS_835_PREHOOK_install_ENV_MANAGER.sql PROMPT PROMPT ========================================================================= -PROMPT Step 3: Deploy DATA_EXPORTER Package +PROMPT Step 4: Deploy DATA_EXPORTER Package PROMPT ========================================================================= @@02_MARS_835_PREHOOK_install_DATA_EXPORTER.sql PROMPT PROMPT ========================================================================= -PROMPT Step 4: Track Package Versions +PROMPT Step 5: Track Package Versions PROMPT ========================================================================= @@track_package_versions.sql PROMPT PROMPT ========================================================================= -PROMPT Step 5: Verify Package Versions +PROMPT Step 6: Verify Package Versions PROMPT ========================================================================= @@verify_packages_version.sql diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/A_PARALLEL_EXPORT_CHUNKS.sql b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/A_PARALLEL_EXPORT_CHUNKS.sql index a5514cb..bd37230 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/A_PARALLEL_EXPORT_CHUNKS.sql +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/A_PARALLEL_EXPORT_CHUNKS.sql @@ -43,6 +43,7 @@ CREATE TABLE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS ( FILE_BASE_NAME VARCHAR2(1000), TEMPLATE_TABLE_NAME VARCHAR2(200), MAX_FILE_SIZE NUMBER DEFAULT 104857600 NOT NULL, + JOB_CLASS VARCHAR2(128), STATUS VARCHAR2(30) DEFAULT 'PENDING' NOT NULL, ERROR_MESSAGE VARCHAR2(4000), EXPORT_TIMESTAMP TIMESTAMP, @@ -69,6 +70,7 @@ COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.FORMAT_TYPE IS 'Export format COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.FILE_BASE_NAME IS 'Base filename for CSV exports (NULL for Parquet)'; COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.TEMPLATE_TABLE_NAME IS 'Template table name for per-column date format configuration (e.g., CT_ET_TEMPLATES.TABLE_NAME)'; COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.MAX_FILE_SIZE IS 'Maximum file size in bytes for CSV exports only (e.g., 104857600 = 100MB, 1073741824 = 1GB) - default 100MB (104857600). NOTE: Not applicable for PARQUET format (Oracle limitation)'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.JOB_CLASS IS 'Oracle Scheduler job class name for resource management (e.g., ''high'', ''DEFAULT_JOB_CLASS'') - NULL uses default scheduler priority'; COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.STATUS IS 'Chunk processing status: PENDING (not started), PROCESSING (in progress), COMPLETED (success), FAILED (error) - allows retry of failed partitions only'; COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.ERROR_MESSAGE IS 'Error message if chunk processing failed (STATUS = FAILED)'; COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.EXPORT_TIMESTAMP IS 'Timestamp when chunk export was completed (STATUS = COMPLETED)'; diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/A_SOURCE_FILE_RECEIVED.sql b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/A_SOURCE_FILE_RECEIVED.sql new file mode 100644 index 0000000..b67d6b5 --- /dev/null +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/A_SOURCE_FILE_RECEIVED.sql @@ -0,0 +1,30 @@ +-- ==================================================================== +-- A_SOURCE_FILE_RECEIVED Table +-- ==================================================================== +-- Purpose: Track received files and their processing status +-- ==================================================================== + +CREATE TABLE CT_MRDS.A_SOURCE_FILE_RECEIVED ( + A_SOURCE_FILE_RECEIVED_KEY NUMBER(38,0) NOT NULL ENABLE, + A_SOURCE_FILE_CONFIG_KEY NUMBER(38,0) NOT NULL ENABLE, + SOURCE_FILE_NAME VARCHAR2(1000) NOT NULL, + CHECKSUM VARCHAR2(128), + CREATED TIMESTAMP(6) WITH TIME ZONE, + BYTES NUMBER, + RECEPTION_DATE DATE NOT NULL, + PROCESSING_STATUS VARCHAR2(200), + EXTERNAL_TABLE_NAME VARCHAR2(200), + PARTITION_YEAR VARCHAR2(4), + PARTITION_MONTH VARCHAR2(2), + ARCH_PATH VARCHAR2(1000), + PROCESS_NAME VARCHAR2(200), + CONSTRAINT A_SOURCE_FILE_RECEIVED_PK PRIMARY KEY (A_SOURCE_FILE_RECEIVED_KEY), + CONSTRAINT ASFR_A_SOURCE_FILE_CONFIG_KEY_FK FOREIGN KEY(A_SOURCE_FILE_CONFIG_KEY) REFERENCES CT_MRDS.A_SOURCE_FILE_CONFIG(A_SOURCE_FILE_CONFIG_KEY), + CONSTRAINT A_SOURCE_FILE_RECEIVED_CHK CHECK (PROCESSING_STATUS IN ('RECEIVED', 'VALIDATED', 'READY_FOR_INGESTION', 'INGESTED', 'ARCHIVED')) +) TABLESPACE "DATA"; + +-- Unique index for file identification (workaround for TIMESTAMP WITH TIMEZONE constraint limitation) +CREATE UNIQUE INDEX CT_MRDS.A_SOURCE_FILE_RECEIVED_UK1 +ON CT_MRDS.A_SOURCE_FILE_RECEIVED(CHECKSUM, CREATED, BYTES); + +GRANT SELECT, INSERT, UPDATE, DELETE ON CT_MRDS.A_SOURCE_FILE_RECEIVED TO MRDS_LOADER_ROLE; \ No newline at end of file diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkb b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkb index 20bc5fa..05d97d8 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkb +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkb @@ -501,6 +501,7 @@ AS vFormat VARCHAR2(20); vFileBaseName VARCHAR2(1000); vMaxFileSize NUMBER; + vJobClass VARCHAR2(128); vParameters VARCHAR2(4000); BEGIN -- Retrieve chunk context from global temporary table @@ -518,7 +519,8 @@ AS CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, - MAX_FILE_SIZE + MAX_FILE_SIZE, + JOB_CLASS INTO vYear, vMonth, @@ -533,7 +535,8 @@ AS vCredentialName, vFormat, vFileBaseName, - vMaxFileSize + vMaxFileSize, + vJobClass FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE CHUNK_ID = pStartId; @@ -602,20 +605,17 @@ AS pKeyColumnName IN VARCHAR2, pBucketArea IN VARCHAR2, pFolderName IN VARCHAR2, + pFileName IN VARCHAR2 default NULL, pTemplateTableName IN VARCHAR2 default NULL, + pMaxFileSize IN NUMBER default 104857600, pRegisterExport IN BOOLEAN default FALSE, + pProcessName IN VARCHAR2 default 'DATA_EXPORTER', pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName ) IS - -- Type definition for key values - TYPE key_value_tab IS TABLE OF VARCHAR2(4000); - vKeyValues key_value_tab; vCount INTEGER; - vSql VARCHAR2(4000); - vKeyValue VARCHAR2(4000); vQuery VARCHAR2(32767); vUri VARCHAR2(4000); - vDataType VARCHAR2(30); vTableName VARCHAR2(128); vSchemaName VARCHAR2(128); vKeyColumnName VARCHAR2(128); @@ -638,8 +638,11 @@ AS ,'pKeyColumnName => '''||nvl(pKeyColumnName, 'NULL')||'''' ,'pBucketArea => '''||nvl(pBucketArea, 'NULL')||'''' ,'pFolderName => '''||nvl(pFolderName, 'NULL')||'''' + ,'pFileName => '''||nvl(pFileName, 'NULL')||'''' ,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pMaxFileSize => '''||nvl(TO_CHAR(pMaxFileSize), 'NULL')||'''' ,'pRegisterExport => '''||CASE WHEN pRegisterExport THEN 'TRUE' ELSE 'FALSE' END||'''' + ,'pProcessName => '''||nvl(pProcessName, 'NULL')||'''' ,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||'''' )); ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); @@ -671,16 +674,8 @@ AS IF vCount = 0 THEN RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, ENV_MANAGER.MSG_COLUMN_NOT_EXISTS); - END IF; - -- Get the data type of the key column - SELECT data_type INTO vDataType - FROM all_tab_columns - WHERE table_name = vTableName - AND column_name = vKeyColumnName - AND owner = vSchemaName; - -- Validate template table if provided IF pTemplateTableName IS NOT NULL THEN DECLARE @@ -760,183 +755,174 @@ AS ENV_MANAGER.LOG_PROCESS_EVENT('File registration enabled with config key: ' || vConfigKey, 'INFO', vParameters); END IF; - -- Fetch unique key values from A_LOAD_HISTORY - vSql := 'SELECT DISTINCT L.A_ETL_LOAD_SET_KEY' || - ' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' || - ' WHERE T.' || DBMS_ASSERT.simple_sql_name(vKeyColumnName) || ' = L.A_ETL_LOAD_SET_KEY'; + -- Construct single query for entire table (no join with A_LOAD_HISTORY - ensures single file output) + vQuery := 'SELECT ' || vProcessedColumnList || + ' FROM ' || vTableName || ' T'; + + -- Construct the URI for the file in OCI Object Storage + vUri := vBucketUri || + CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || + NVL(pFileName, UPPER(vTableName) || '.csv'); + + ENV_MANAGER.LOG_PROCESS_EVENT('Exporting to single file: ' || vUri, 'INFO', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Export query: ' || vQuery, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Max file size: ' || pMaxFileSize || ' bytes (' || ROUND(pMaxFileSize/1048576, 2) || ' MB)', 'DEBUG', vParameters); + + -- Use DBMS_CLOUD package to export data to the URI + -- Oracle maxfilesize: min 10MB (10485760), max 1GB (1073741824), default 100MB (104857600) + DBMS_CLOUD.EXPORT_DATA( + credential_name => pCredentialName, + file_uri_list => vUri, + query => vQuery, + format => json_object( + 'type' VALUE 'CSV', + 'header' VALUE true, + 'quote' VALUE CHR(34), + 'delimiter' VALUE ',', + 'escape' VALUE true, + 'recorddelimiter' VALUE CHR(13)||CHR(10), -- CRLF dla Windows + 'maxfilesize' VALUE pMaxFileSize -- Dynamic maxfilesize in bytes + ) + ); - ENV_MANAGER.LOG_PROCESS_EVENT('Executing key values query: ' || vSql, 'DEBUG', vParameters); - EXECUTE IMMEDIATE vSql BULK COLLECT INTO vKeyValues; - ENV_MANAGER.LOG_PROCESS_EVENT('Found ' || vKeyValues.COUNT || ' unique key values to process', 'DEBUG', vParameters); - - -- Loop over each unique key value - FOR i IN 1 .. vKeyValues.COUNT LOOP - vKeyValue := vKeyValues(i); - - -- Construct the query to extract data for the current key value with A_WORKFLOW_HISTORY_KEY mapping - IF vDataType IN ('VARCHAR2', 'CHAR', 'NCHAR', 'NVARCHAR2') THEN - vQuery := 'SELECT ' || vProcessedColumnList || - ' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' || - ' WHERE T.' || DBMS_ASSERT.simple_sql_name(vKeyColumnName) || ' = L.A_ETL_LOAD_SET_KEY' || - ' AND L.A_ETL_LOAD_SET_KEY = ' || CHR(39) || vKeyValue || CHR(39); - ELSIF vDataType IN ('NUMBER', 'FLOAT', 'BINARY_FLOAT', 'BINARY_DOUBLE') THEN - vQuery := 'SELECT ' || vProcessedColumnList || - ' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' || - ' WHERE T.' || DBMS_ASSERT.simple_sql_name(vKeyColumnName) || ' = L.A_ETL_LOAD_SET_KEY' || - ' AND L.A_ETL_LOAD_SET_KEY = ' || vKeyValue; - ELSIF vDataType LIKE 'TIMESTAMP%' OR vDataType = 'DATE' THEN - vQuery := 'SELECT ' || vProcessedColumnList || - ' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' || - ' WHERE T.' || DBMS_ASSERT.simple_sql_name(vKeyColumnName) || ' = L.A_ETL_LOAD_SET_KEY' || - ' AND L.A_ETL_LOAD_SET_KEY = TO_TIMESTAMP(' || CHR(39) || vKeyValue || CHR(39) ||', ''YYYY-MM-DD HH24:MI:SS.FF'')'; - ELSE - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNSUPPORTED_DATA_TYPE, ENV_MANAGER.MSG_UNSUPPORTED_DATA_TYPE); - END IF; - - -- Construct the URI for the file in OCI Object Storage - vUri := vBucketUri || - CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || - sanitizeFilename(vKeyValue) || '.csv'; - - ENV_MANAGER.LOG_PROCESS_EVENT('Processing key value: ' || vKeyValue || ' (' || (i) || '/' || vKeyValues.COUNT || ')', 'DEBUG', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT('Export query: ' || vQuery, 'DEBUG', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT('Export URI: ' || vUri, 'DEBUG', vParameters); - - -- Use DBMS_CLOUD package to export data to the URI - DBMS_CLOUD.EXPORT_DATA( - credential_name => pCredentialName, - file_uri_list => vUri, - query => vQuery, - format => json_object('type' VALUE 'CSV', 'header' VALUE true) - ); - - -- Register exported file to A_SOURCE_FILE_RECEIVED if requested - IF pRegisterExport THEN - DECLARE - vChecksum VARCHAR2(128); - vCreated TIMESTAMP WITH TIME ZONE; - vBytes NUMBER; - vActualFileName VARCHAR2(1000); -- Actual filename with Oracle suffix - vSanitizedFileName VARCHAR2(1000); - vFileName VARCHAR2(1000); - vRetryCount NUMBER := 0; - vMaxRetries NUMBER := 1; -- One retry after initial attempt - vRetryDelay NUMBER := 2; -- 2 seconds delay - BEGIN - -- Extract filename from URI (after last '/') - vFileName := SUBSTR(vUri, INSTR(vUri, '/', -1) + 1); - - -- Sanitize filename first (PL/SQL function cannot be used directly in SQL) - vSanitizedFileName := sanitizeFilename(vFileName); - - -- Remove .csv extension for LIKE pattern matching (Oracle adds suffixes BEFORE .csv) - -- Example: keyvalue.csv becomes keyvalue_1_20260211T102621591769Z.csv - vSanitizedFileName := REGEXP_REPLACE(vSanitizedFileName, '\.csv$', '', 1, 0, 'i'); - - -- Try to get file metadata with retry logic - <> - LOOP - BEGIN - SELECT object_name, checksum, created, bytes - INTO vActualFileName, vChecksum, vCreated, vBytes - FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( - credential_name => pCredentialName, - location_uri => vBucketUri - )) - WHERE object_name LIKE CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || vSanitizedFileName || '%' - ORDER BY created DESC, bytes DESC - FETCH FIRST 1 ROW ONLY; - - -- Extract filename only from full path (remove bucket folder prefix) - vActualFileName := SUBSTR(vActualFileName, INSTR(vActualFileName, '/', -1) + 1); - - -- Success - exit retry loop - EXIT metadata_retry_loop; - - EXCEPTION - WHEN NO_DATA_FOUND THEN - vRetryCount := vRetryCount + 1; - - IF vRetryCount <= vMaxRetries THEN - -- Log retry attempt - ENV_MANAGER.LOG_PROCESS_EVENT('File not found in bucket (attempt ' || vRetryCount || '/' || (vMaxRetries + 1) || '), retrying after ' || vRetryDelay || ' seconds: ' || vFileName, 'DEBUG', vParameters); - - -- Wait before retry using DBMS_SESSION.SLEEP (alternative to DBMS_LOCK) - DBMS_SESSION.SLEEP(vRetryDelay); - ELSE - -- Max retries exceeded - re-raise exception - RAISE; - END IF; - END; - END LOOP metadata_retry_loop; - - -- Create A_SOURCE_FILE_RECEIVED record for this export with metadata - vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; - INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( - A_SOURCE_FILE_RECEIVED_KEY, - A_SOURCE_FILE_CONFIG_KEY, - SOURCE_FILE_NAME, - CHECKSUM, - CREATED, - BYTES, - RECEPTION_DATE, - PROCESSING_STATUS, - PARTITION_YEAR, - PARTITION_MONTH, - ARCH_FILE_NAME - ) VALUES ( - vSourceFileReceivedKey, - NVL(vConfigKey, -1), -- Use config key if found, otherwise -1 - vActualFileName, -- Use actual filename with Oracle suffix - vChecksum, - vCreated, - vBytes, - SYSDATE, - 'INGESTED', - NULL, -- PARTITION_YEAR not used for single-file exports - NULL, -- PARTITION_MONTH not used for single-file exports - NULL -- ARCH_FILE_NAME not used for single-file exports - ); - - ENV_MANAGER.LOG_PROCESS_EVENT('Registered file: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vActualFileName || ', Size=' || vBytes || ' bytes', 'DEBUG', vParameters); - EXCEPTION - WHEN NO_DATA_FOUND THEN - -- File not found after retries - log warning and continue without metadata - ENV_MANAGER.LOG_PROCESS_EVENT('WARNING: File not found in bucket after ' || (vMaxRetries + 1) || ' attempts: ' || vFileName, 'WARNING', vParameters); - - -- Sanitize filename for fallback INSERT (function cannot be used in SQL) - vSanitizedFileName := sanitizeFilename(vFileName); - - -- Insert without metadata using theoretical filename - vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; - INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( - A_SOURCE_FILE_RECEIVED_KEY, - A_SOURCE_FILE_CONFIG_KEY, - SOURCE_FILE_NAME, - RECEPTION_DATE, - PROCESSING_STATUS, - PARTITION_YEAR, - PARTITION_MONTH, - ARCH_FILE_NAME - ) VALUES ( - vSourceFileReceivedKey, - NVL(vConfigKey, -1), -- Use config key if found, otherwise -1 - vSanitizedFileName, -- Use pre-calculated sanitized filename - SYSDATE, - 'INGESTED', - NULL, -- PARTITION_YEAR not used for single-file exports - NULL, -- PARTITION_MONTH not used for single-file exports - NULL -- ARCH_FILE_NAME not used for single-file exports - ); - - ENV_MANAGER.LOG_PROCESS_EVENT('Registered file without metadata: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vSanitizedFileName, 'DEBUG', vParameters); - END; - END IF; - END LOOP; - - -- Log summary of file registration if enabled + -- Register exported file to A_SOURCE_FILE_RECEIVED if requested IF pRegisterExport THEN - ENV_MANAGER.LOG_PROCESS_EVENT('Registered ' || vKeyValues.COUNT || ' exported files to A_SOURCE_FILE_RECEIVED with config key: ' || vConfigKey, 'INFO', vParameters); + DECLARE + vActualFileName VARCHAR2(1000); -- Actual filename with Oracle suffix + vSanitizedFileName VARCHAR2(1000); + vFileName VARCHAR2(1000); + vRetryCount NUMBER := 0; + vMaxRetries NUMBER := 1; -- One retry after initial attempt + vRetryDelay NUMBER := 2; -- 2 seconds delay + vFilesFound NUMBER := 0; + vTotalBytes NUMBER := 0; + BEGIN + -- Extract filename from URI (after last '/') + vFileName := SUBSTR(vUri, INSTR(vUri, '/', -1) + 1); + + -- Sanitize filename first (PL/SQL function cannot be used directly in SQL) + vSanitizedFileName := sanitizeFilename(vFileName); + + -- Remove .csv extension for LIKE pattern matching (Oracle adds suffixes BEFORE .csv) + -- Example: tablename.csv becomes tablename_1_20260211T102621591769Z.csv + vSanitizedFileName := REGEXP_REPLACE(vSanitizedFileName, '\.csv$', '', 1, 0, 'i'); + + -- Try to get ALL exported files with retry logic + -- Oracle DBMS_CLOUD.EXPORT_DATA can create MULTIPLE files due to: + -- 1. maxfilesize parameter (splits files larger than limit) + -- 2. Automatic parallel processing (especially on large production instances) + -- We must register ALL files, not just the first one + <> + LOOP + BEGIN + -- Register ALL files matching the pattern (cursor loop) + FOR rec IN ( + SELECT object_name, checksum, created, bytes + FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( + credential_name => pCredentialName, + location_uri => vBucketUri + )) + WHERE object_name LIKE CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || vSanitizedFileName || '%' + ORDER BY created DESC, bytes DESC + ) LOOP + -- Extract filename only from full path (remove bucket folder prefix) + vActualFileName := SUBSTR(rec.object_name, INSTR(rec.object_name, '/', -1) + 1); + + -- Create A_SOURCE_FILE_RECEIVED record for EACH exported file + vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; + INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( + A_SOURCE_FILE_RECEIVED_KEY, + A_SOURCE_FILE_CONFIG_KEY, + SOURCE_FILE_NAME, + CHECKSUM, + CREATED, + BYTES, + RECEPTION_DATE, + PROCESSING_STATUS, + PARTITION_YEAR, + PARTITION_MONTH, + ARCH_PATH, + PROCESS_NAME + ) VALUES ( + vSourceFileReceivedKey, + NVL(vConfigKey, -1), -- Use config key if found, otherwise -1 + vActualFileName, -- Use actual filename with Oracle suffix + rec.checksum, + rec.created, + rec.bytes, + SYSDATE, + 'INGESTED', + NULL, -- PARTITION_YEAR not used for single-file exports + NULL, -- PARTITION_MONTH not used for single-file exports + NULL, -- ARCH_PATH not used for single-file exports + pProcessName -- Process name from parameter + ); + + vFilesFound := vFilesFound + 1; + vTotalBytes := vTotalBytes + rec.bytes; + + ENV_MANAGER.LOG_PROCESS_EVENT('Registered file ' || vFilesFound || ': FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vActualFileName || ', Size=' || rec.bytes || ' bytes', 'INFO', vParameters); + END LOOP; + + -- Check if any files were found + IF vFilesFound = 0 THEN + RAISE NO_DATA_FOUND; + END IF; + + -- Success - exit retry loop + ENV_MANAGER.LOG_PROCESS_EVENT('Total registered: ' || vFilesFound || ' file(s), Total size: ' || vTotalBytes || ' bytes (' || ROUND(vTotalBytes/1048576, 2) || ' MB)', 'INFO', vParameters); + EXIT metadata_retry_loop; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + vRetryCount := vRetryCount + 1; + + IF vRetryCount <= vMaxRetries THEN + -- Log retry attempt + ENV_MANAGER.LOG_PROCESS_EVENT('File(s) not found in bucket (attempt ' || vRetryCount || '/' || (vMaxRetries + 1) || '), retrying after ' || vRetryDelay || ' seconds: ' || vFileName, 'DEBUG', vParameters); + + -- Wait before retry using DBMS_SESSION.SLEEP (alternative to DBMS_LOCK) + DBMS_SESSION.SLEEP(vRetryDelay); + ELSE + -- Max retries exceeded - re-raise exception + RAISE; + END IF; + END; + END LOOP metadata_retry_loop; + EXCEPTION + WHEN NO_DATA_FOUND THEN + -- File not found after retries - log warning and continue without metadata + ENV_MANAGER.LOG_PROCESS_EVENT('WARNING: File not found in bucket after ' || (vMaxRetries + 1) || ' attempts: ' || vFileName, 'WARNING', vParameters); + + -- Sanitize filename for fallback INSERT (function cannot be used in SQL) + vSanitizedFileName := sanitizeFilename(vFileName); + + -- Insert without metadata using theoretical filename + vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; + INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( + A_SOURCE_FILE_RECEIVED_KEY, + A_SOURCE_FILE_CONFIG_KEY, + SOURCE_FILE_NAME, + RECEPTION_DATE, + PROCESSING_STATUS, + PARTITION_YEAR, + PARTITION_MONTH, + ARCH_PATH, + PROCESS_NAME + ) VALUES ( + vSourceFileReceivedKey, + NVL(vConfigKey, -1), -- Use config key if found, otherwise -1 + vSanitizedFileName, -- Use pre-calculated sanitized filename + SYSDATE, + 'INGESTED', + NULL, -- PARTITION_YEAR not used for single-file exports + NULL, -- PARTITION_MONTH not used for single-file exports + NULL, -- ARCH_PATH not used for single-file exports + pProcessName -- Process name from parameter + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('Registered file without metadata: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vSanitizedFileName, 'INFO', vParameters); + END; END IF; ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); @@ -949,10 +935,6 @@ AS vgMsgTmp := ENV_MANAGER.MSG_COLUMN_NOT_EXISTS || ' (TableName.ColumnName): ' || vTableName||'.'||vKeyColumnName||CASE WHEN vCurrentCol IS NOT NULL THEN '.'||vCurrentCol||' in column list' ELSE '' END; ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, vgMsgTmp); - WHEN ENV_MANAGER.ERR_UNSUPPORTED_DATA_TYPE THEN - vgMsgTmp := ENV_MANAGER.MSG_UNSUPPORTED_DATA_TYPE || ' vDataType: '||vDataType; - ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNSUPPORTED_DATA_TYPE, vgMsgTmp); WHEN OTHERS THEN -- Log complete error details including full stack trace and backtrace ENV_MANAGER.LOG_PROCESS_ERROR('Export failed: ' || SQLERRM, vParameters, 'DATA_EXPORTER'); @@ -974,6 +956,7 @@ AS pMaxDate IN DATE default SYSDATE, pParallelDegree IN NUMBER default 1, pTemplateTableName IN VARCHAR2 default NULL, + pJobClass IN VARCHAR2 default NULL, pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName ) IS @@ -997,6 +980,7 @@ AS ,'pMaxDate => '''||nvl(TO_CHAR(pMaxDate, 'YYYY-MM-DD HH24:MI:SS'), 'NULL')||'''' ,'pParallelDegree => '''||nvl(TO_CHAR(pParallelDegree), 'NULL')||'''' ,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pJobClass => '''||nvl(pJobClass, 'NULL')||'''' ,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||'''' )); ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); @@ -1068,22 +1052,7 @@ AS vChunkId NUMBER; BEGIN ENV_MANAGER.LOG_PROCESS_EVENT('Using parallel processing with ' || pParallelDegree || ' threads', 'INFO', vParameters); - - -- Clean up old completed chunks (>24 hours) to prevent table bloat - -- CRITICAL: Do NOT delete chunks from other active sessions (same-day tasks) - -- This prevents race conditions when multiple exports run simultaneously - DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS - WHERE STATUS = 'COMPLETED' - AND CREATED_DATE < SYSTIMESTAMP - INTERVAL '1' DAY; - COMMIT; - - ENV_MANAGER.LOG_PROCESS_EVENT('Cleared old COMPLETED chunks (>24h). Active session chunks preserved.', 'DEBUG', vParameters); - -- This prevents re-exporting successfully completed partitions - DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'COMPLETED'; - COMMIT; - - ENV_MANAGER.LOG_PROCESS_EVENT('Cleared COMPLETED chunks. FAILED chunks retained for retry.', 'DEBUG', vParameters); - + -- Populate chunks table (insert new chunks, preserve FAILED chunks for retry) FOR i IN 1 .. vPartitions.COUNT LOOP MERGE INTO CT_MRDS.A_PARALLEL_EXPORT_CHUNKS t @@ -1092,10 +1061,10 @@ AS WHEN NOT MATCHED THEN INSERT (CHUNK_ID, TASK_NAME, YEAR_VALUE, MONTH_VALUE, SCHEMA_NAME, TABLE_NAME, KEY_COLUMN_NAME, BUCKET_URI, FOLDER_NAME, PROCESSED_COLUMNS, MIN_DATE, MAX_DATE, - CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE, STATUS) + CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE, JOB_CLASS, STATUS) VALUES (i, vTaskName, vPartitions(i).year, vPartitions(i).month, vSchemaName, vTableName, vKeyColumnName, vBucketUri, pFolderName, vProcessedColumnList, pMinDate, pMaxDate, - pCredentialName, 'PARQUET', NULL, pTemplateTableName, 104857600, 'PENDING') + pCredentialName, 'PARQUET', NULL, pTemplateTableName, 104857600, pJobClass, 'PENDING') WHEN MATCHED THEN UPDATE SET TASK_NAME = vTaskName, STATUS = CASE WHEN t.STATUS = 'FAILED' THEN 'PENDING' ELSE t.STATUS END, @@ -1127,14 +1096,24 @@ AS ); -- Execute task in parallel - ENV_MANAGER.LOG_PROCESS_EVENT('Executing parallel task: ' || vTaskName, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Executing parallel task: ' || vTaskName || CASE WHEN pJobClass IS NOT NULL THEN ' with job class: ' || pJobClass ELSE '' END, 'DEBUG', vParameters); - DBMS_PARALLEL_EXECUTE.RUN_TASK( - task_name => vTaskName, - sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;', - language_flag => DBMS_SQL.NATIVE, - parallel_level => pParallelDegree - ); + IF pJobClass IS NOT NULL THEN + DBMS_PARALLEL_EXECUTE.RUN_TASK( + task_name => vTaskName, + sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;', + language_flag => DBMS_SQL.NATIVE, + parallel_level => pParallelDegree, + job_class => pJobClass + ); + ELSE + DBMS_PARALLEL_EXECUTE.RUN_TASK( + task_name => vTaskName, + sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;', + language_flag => DBMS_SQL.NATIVE, + parallel_level => pParallelDegree + ); + END IF; -- Check for errors DECLARE @@ -1238,6 +1217,8 @@ AS pTemplateTableName IN VARCHAR2 default NULL, pMaxFileSize IN NUMBER default 104857600, pRegisterExport IN BOOLEAN default FALSE, + pProcessName IN VARCHAR2 default 'DATA_EXPORTER', + pJobClass IN VARCHAR2 default NULL, pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName ) IS @@ -1275,6 +1256,7 @@ AS ,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' ,'pMaxFileSize => '''||nvl(TO_CHAR(pMaxFileSize), 'NULL')||'''' ,'pRegisterExport => '''||CASE WHEN pRegisterExport THEN 'TRUE' ELSE 'FALSE' END||'''' + ,'pJobClass => '''||nvl(pJobClass, 'NULL')||'''' ,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||'''' )); ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); @@ -1383,10 +1365,10 @@ AS WHEN NOT MATCHED THEN INSERT (CHUNK_ID, TASK_NAME, YEAR_VALUE, MONTH_VALUE, SCHEMA_NAME, TABLE_NAME, KEY_COLUMN_NAME, BUCKET_URI, FOLDER_NAME, PROCESSED_COLUMNS, MIN_DATE, MAX_DATE, - CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE, STATUS) + CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE, JOB_CLASS, STATUS) VALUES (i, vTaskName, vPartitions(i).year, vPartitions(i).month, vSchemaName, vTableName, vKeyColumnName, vBucketUri, pFolderName, vProcessedColumnList, pMinDate, pMaxDate, - pCredentialName, 'CSV', vFileBaseName, pTemplateTableName, pMaxFileSize, 'PENDING') + pCredentialName, 'CSV', vFileBaseName, pTemplateTableName, pMaxFileSize, pJobClass, 'PENDING') WHEN MATCHED THEN UPDATE SET TASK_NAME = vTaskName, STATUS = CASE WHEN t.STATUS = 'FAILED' THEN 'PENDING' ELSE t.STATUS END, @@ -1418,14 +1400,24 @@ AS ); -- Execute task in parallel - ENV_MANAGER.LOG_PROCESS_EVENT('Executing parallel CSV export task: ' || vTaskName, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Executing parallel CSV export task: ' || vTaskName || CASE WHEN pJobClass IS NOT NULL THEN ' with job class: ' || pJobClass ELSE '' END, 'DEBUG', vParameters); - DBMS_PARALLEL_EXECUTE.RUN_TASK( - task_name => vTaskName, - sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;', - language_flag => DBMS_SQL.NATIVE, - parallel_level => pParallelDegree - ); + IF pJobClass IS NOT NULL THEN + DBMS_PARALLEL_EXECUTE.RUN_TASK( + task_name => vTaskName, + sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;', + language_flag => DBMS_SQL.NATIVE, + parallel_level => pParallelDegree, + job_class => pJobClass + ); + ELSE + DBMS_PARALLEL_EXECUTE.RUN_TASK( + task_name => vTaskName, + sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;', + language_flag => DBMS_SQL.NATIVE, + parallel_level => pParallelDegree + ); + END IF; -- Check for errors DECLARE @@ -1584,7 +1576,8 @@ AS PROCESSING_STATUS, PARTITION_YEAR, PARTITION_MONTH, - ARCH_FILE_NAME + ARCH_PATH, + PROCESS_NAME ) VALUES ( vSourceFileReceivedKey, vConfigKey, -- Config key from A_SOURCE_FILE_CONFIG lookup @@ -1596,7 +1589,8 @@ AS 'INGESTED', NULL, -- PARTITION_YEAR not used for CSV exports NULL, -- PARTITION_MONTH not used for CSV exports - NULL -- ARCH_FILE_NAME not used for CSV exports + NULL, -- ARCH_PATH not used for CSV exports + pProcessName -- Process name from parameter ); ENV_MANAGER.LOG_PROCESS_EVENT('Registered file: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vActualFileName || ', Size=' || vBytes || ' bytes', 'DEBUG', vParameters); @@ -1618,7 +1612,8 @@ AS PROCESSING_STATUS, PARTITION_YEAR, PARTITION_MONTH, - ARCH_FILE_NAME + ARCH_PATH, + PROCESS_NAME ) VALUES ( vSourceFileReceivedKey, vConfigKey, -- Config key from A_SOURCE_FILE_CONFIG lookup @@ -1627,7 +1622,8 @@ AS 'INGESTED', NULL, -- PARTITION_YEAR not used for CSV exports NULL, -- PARTITION_MONTH not used for CSV exports - NULL -- ARCH_FILE_NAME not used for CSV exports + NULL, -- ARCH_PATH not used for CSV exports + pProcessName -- Process name from parameter ); END; END LOOP; diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkg b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkg index dcb7e51..a723db1 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkg +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkg @@ -9,21 +9,17 @@ AS **/ -- Package Version Information - PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.7.5'; - PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2026-02-11 12:15:00'; + PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.11.0'; + PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2026-02-18 10:00:00'; PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski'; -- Version History (last 3-5 changes) VERSION_HISTORY CONSTANT VARCHAR2(4000) := - 'v2.7.5 (2026-02-11): Added pRegisterExport parameter to EXPORT_TABLE_DATA procedure. When TRUE, registers each exported CSV file in A_SOURCE_FILE_RECEIVED.' || CHR(10) || - 'v2.7.4 (2026-02-11): ACTUAL FILENAME STORAGE - Store real filename with Oracle suffix in SOURCE_FILE_NAME instead of theoretical filename.' || CHR(10) || - 'v2.7.3 (2026-02-11): FIX LIKE pattern for DBMS_CLOUD.LIST_OBJECTS - Removed .csv extension from filename before pattern matching.' || CHR(10) || - 'v2.7.2 (2026-02-11): FIX pRegisterExport in EXPORT_TABLE_DATA_TO_CSV_BY_DATE - Added missing pRegisterExport parameter to EXPORT_SINGLE_PARTITION call.' || CHR(10) || - 'v2.7.1 (2026-02-11): AUTO-LOOKUP A_SOURCE_FILE_CONFIG_KEY - Parse pFolderName to automatically find config key from A_SOURCE_FILE_CONFIG.' || CHR(10) || - 'v2.7.0 (2026-02-10): Added pRegisterExport parameter to EXPORT_TABLE_DATA_TO_CSV_BY_DATE. When TRUE, registers each exported CSV file in A_SOURCE_FILE_RECEIVED.' || CHR(10) || - 'v2.6.3 (2026-01-28): COMPILATION FIX - Resolved ORA-00904 error in EXPORT_PARTITION_PARALLEL. SQLERRM properly assigned to vgMsgTmp variable.' || CHR(10) || - 'v2.6.2 (2026-01-28): CRITICAL FIX - Race condition when multiple exports run simultaneously. Session-safe cleanup with TASK_NAME filtering.' || CHR(10) || - 'v2.6.0 (2026-01-28): CRITICAL FIX - Added STATUS tracking to A_PARALLEL_EXPORT_CHUNKS table to prevent data duplication on retry.' || CHR(10); + 'v2.11.0 (2026-02-18): Added pJobClass parameter to EXPORT_TABLE_DATA_BY_DATE and EXPORT_TABLE_DATA_TO_CSV_BY_DATE for Oracle Scheduler job class support (resource/priority management).' || CHR(10) || + 'v2.10.1 (2026-02-17): CRITICAL FIX - Remove redundant COMPLETED chunks deletion before parallel export that caused ORA-01403 errors (phantom chunks created by CREATE_CHUNKS_BY_NUMBER_COL).' || CHR(10) || + 'v2.10.0 (2026-02-13): CRITICAL FIX - Register ALL files created by DBMS_CLOUD.EXPORT_DATA (multi-file support due to Oracle parallel processing on large instances). Prevents orphaned files in rollback.' || CHR(10) || + 'v2.9.0 (2026-02-13): Added pProcessName parameter to EXPORT_TABLE_DATA and EXPORT_TABLE_DATA_TO_CSV_BY_DATE procedures for process tracking in A_SOURCE_FILE_RECEIVED table.' || CHR(10) || + 'v2.8.1 (2026-02-12): FIX query in EXPORT_TABLE_DATA - removed A_LOAD_HISTORY join to ensure single file output (simple SELECT).' || CHR(10); cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10); vgMsgTmp VARCHAR2(32000); @@ -71,16 +67,19 @@ AS /** * @name EXPORT_TABLE_DATA * @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA. - * Exports data into CSV file on OCI infrustructure. + * Exports data into single CSV file on OCI infrastructure. * pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE' * Supports template table for column order and per-column date formatting. - * When pRegisterExport=TRUE, successfully exported files are registered in: + * When pRegisterExport=TRUE, successfully exported file is registered in: * - CT_MRDS.A_SOURCE_FILE_RECEIVED (tracks file location, size, checksum, and metadata) + * @param pFileName - Optional filename (e.g., 'export.csv'). NULL = auto-generate from table name * @param pTemplateTableName - Optional template table (SCHEMA.TABLE or TABLE) for: * - Column order control (template defines CSV structure) * - Per-column date formatting via FILE_MANAGER.GET_DATE_FORMAT * - NULL = use source table columns in natural order - * @param pRegisterExport - When TRUE, registers each exported CSV file in A_SOURCE_FILE_RECEIVED table + * @param pMaxFileSize - Maximum file size in bytes (default 104857600 = 100MB, min 10MB, max 1GB) + * @param pRegisterExport - When TRUE, registers exported CSV file in A_SOURCE_FILE_RECEIVED table + * @param pProcessName - Process name stored in PROCESS_NAME column (default 'DATA_EXPORTER') * @example * begin * DATA_EXPORTER.EXPORT_TABLE_DATA( @@ -89,7 +88,9 @@ AS * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', * pBucketArea => 'DATA', * pFolderName => 'csv_exports', + * pFileName => 'my_export.csv', -- Optional * pTemplateTableName => 'CT_ET_TEMPLATES.MY_TEMPLATE', -- Optional + * pMaxFileSize => 104857600, -- Optional, default 100MB * pRegisterExport => TRUE -- Optional, default FALSE * ); * end; @@ -100,8 +101,11 @@ AS pKeyColumnName IN VARCHAR2, pBucketArea IN VARCHAR2, pFolderName IN VARCHAR2, + pFileName IN VARCHAR2 default NULL, pTemplateTableName IN VARCHAR2 default NULL, + pMaxFileSize IN NUMBER default 104857600, pRegisterExport IN BOOLEAN default FALSE, + pProcessName IN VARCHAR2 default 'DATA_EXPORTER', pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName ); @@ -143,6 +147,7 @@ AS pMaxDate IN DATE default SYSDATE, pParallelDegree IN NUMBER default 1, pTemplateTableName IN VARCHAR2 default NULL, + pJobClass IN VARCHAR2 default NULL, pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName ); @@ -158,6 +163,7 @@ AS * File naming pattern: {pFileName}_YYYYMM.csv or {TABLENAME}_YYYYMM.csv (if pFileName is NULL) * When pRegisterExport=TRUE, successfully exported files are registered in: * - CT_MRDS.A_SOURCE_FILE_RECEIVED (tracks file location, size, checksum, and metadata) + * @param pProcessName - Process name stored in PROCESS_NAME column (default 'DATA_EXPORTER') * @example * begin * -- With custom filename @@ -203,6 +209,8 @@ AS pTemplateTableName IN VARCHAR2 default NULL, pMaxFileSize IN NUMBER default 104857600, pRegisterExport IN BOOLEAN default FALSE, + pProcessName IN VARCHAR2 default 'DATA_EXPORTER', + pJobClass IN VARCHAR2 default NULL, pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName ); diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_mars835_prehook.sql b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_mars835_prehook.sql index 20e1d38..992ff6d 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_mars835_prehook.sql +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_mars835_prehook.sql @@ -29,6 +29,7 @@ PROMPT MARS-835-PREHOOK: Rollback to Previous Versions PROMPT ========================================================================= PROMPT WARNING: This will reverse all changes from MARS-835-PREHOOK installation! PROMPT - Removes A_PARALLEL_EXPORT_CHUNKS table +PROMPT - Reverts A_SOURCE_FILE_RECEIVED table (rename ARCH_PATH to ARCH_FILE_NAME, drop PROCESS_NAME column) PROMPT - Restores ENV_MANAGER v3.1.0 (removes parallel error codes) PROMPT - Restores DATA_EXPORTER v2.1.0 (removes parallel + Smart Column Mapping) PROMPT ========================================================================= @@ -65,13 +66,19 @@ PROMPT ========================================================================= PROMPT PROMPT ========================================================================= -PROMPT Step 3: Track Rollback Version +PROMPT Step 3: Rollback A_SOURCE_FILE_RECEIVED Table Structure +PROMPT ========================================================================= +@@93_MARS_835_PREHOOK_rollback_SOURCE_FILE_RECEIVED_table.sql + +PROMPT +PROMPT ========================================================================= +PROMPT Step 4: Track Rollback Version PROMPT ========================================================================= @@track_package_versions.sql PROMPT PROMPT ========================================================================= -PROMPT Step 4: Verify Package Versions After Rollback +PROMPT Step 5: Verify Package Versions After Rollback PROMPT ========================================================================= @@verify_packages_version.sql diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/A_SOURCE_FILE_RECEIVED.sql b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/A_SOURCE_FILE_RECEIVED.sql new file mode 100644 index 0000000..4e5bbea --- /dev/null +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/A_SOURCE_FILE_RECEIVED.sql @@ -0,0 +1,29 @@ +-- ==================================================================== +-- A_SOURCE_FILE_RECEIVED Table +-- ==================================================================== +-- Purpose: Track received files and their processing status +-- ==================================================================== + +CREATE TABLE CT_MRDS.A_SOURCE_FILE_RECEIVED ( + A_SOURCE_FILE_RECEIVED_KEY NUMBER(38,0) NOT NULL ENABLE, + A_SOURCE_FILE_CONFIG_KEY NUMBER(38,0) NOT NULL ENABLE, + SOURCE_FILE_NAME VARCHAR2(1000) NOT NULL, + CHECKSUM VARCHAR2(128), + CREATED TIMESTAMP(6) WITH TIME ZONE, + BYTES NUMBER, + RECEPTION_DATE DATE NOT NULL, + PROCESSING_STATUS VARCHAR2(200), + EXTERNAL_TABLE_NAME VARCHAR2(200), + PARTITION_YEAR VARCHAR2(4), + PARTITION_MONTH VARCHAR2(2), + ARCH_FILE_NAME VARCHAR2(1000), + CONSTRAINT A_SOURCE_FILE_RECEIVED_PK PRIMARY KEY (A_SOURCE_FILE_RECEIVED_KEY), + CONSTRAINT ASFR_A_SOURCE_FILE_CONFIG_KEY_FK FOREIGN KEY(A_SOURCE_FILE_CONFIG_KEY) REFERENCES CT_MRDS.A_SOURCE_FILE_CONFIG(A_SOURCE_FILE_CONFIG_KEY), + CONSTRAINT A_SOURCE_FILE_RECEIVED_CHK CHECK (PROCESSING_STATUS IN ('RECEIVED', 'VALIDATED', 'READY_FOR_INGESTION', 'INGESTED', 'ARCHIVED')) +) TABLESPACE "DATA"; + +-- Unique index for file identification (workaround for TIMESTAMP WITH TIMEZONE constraint limitation) +CREATE UNIQUE INDEX CT_MRDS.A_SOURCE_FILE_RECEIVED_UK1 +ON CT_MRDS.A_SOURCE_FILE_RECEIVED(CHECKSUM, CREATED, BYTES); + +GRANT SELECT, INSERT, UPDATE, DELETE ON CT_MRDS.A_SOURCE_FILE_RECEIVED TO MRDS_LOADER_ROLE; \ No newline at end of file diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.7.5/DATA_EXPORTER.pkb b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.7.5/DATA_EXPORTER.pkb new file mode 100644 index 0000000..20bc5fa --- /dev/null +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.7.5/DATA_EXPORTER.pkb @@ -0,0 +1,1698 @@ +create or replace PACKAGE BODY CT_MRDS.DATA_EXPORTER +AS + + ---------------------------------------------------------------------------------------------------- + -- PRIVATE HELPER FUNCTIONS (USED BY MULTIPLE PROCEDURES) + ---------------------------------------------------------------------------------------------------- + + /** + * Sanitizes filename by replacing disallowed characters with underscores + **/ + FUNCTION sanitizeFilename(pFilename IN VARCHAR2) RETURN VARCHAR2 IS + vFilename VARCHAR2(1000); + BEGIN + vFilename := REGEXP_REPLACE(pFilename, '[^a-zA-Z0-9._-]', '_'); + RETURN vFilename; + END sanitizeFilename; + + ---------------------------------------------------------------------------------------------------- + + /** + * Deletes export file from OCI bucket if it exists (used for cleanup before retry) + * Silently ignores if file doesn't exist (ORA-20404) + **/ + PROCEDURE DELETE_FAILED_EXPORT_FILE( + pFileUri IN VARCHAR2, + pCredentialName IN VARCHAR2, + pParameters IN VARCHAR2 + ) IS + BEGIN + BEGIN + ENV_MANAGER.LOG_PROCESS_EVENT('Attempting to delete potentially corrupted file: ' || pFileUri, 'DEBUG', pParameters); + + DBMS_CLOUD.DELETE_OBJECT( + credential_name => pCredentialName, + object_uri => pFileUri + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('Deleted existing file (cleanup before retry): ' || pFileUri, 'INFO', pParameters); + EXCEPTION + WHEN OTHERS THEN + -- Object not found is OK (file doesn't exist) + IF SQLCODE = -20404 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('File does not exist (OK): ' || pFileUri, 'DEBUG', pParameters); + ELSE + -- Log but don't fail - export will attempt anyway + ENV_MANAGER.LOG_PROCESS_EVENT('Warning: Could not delete file (will retry export anyway): ' || SQLERRM, 'WARNING', pParameters); + END IF; + END; + END DELETE_FAILED_EXPORT_FILE; + + ---------------------------------------------------------------------------------------------------- + + /** + * Builds query with TO_CHAR for date/timestamp columns using per-column formats + * Retrieves format for each date column from FILE_MANAGER.GET_DATE_FORMAT + **/ + FUNCTION buildQueryWithDateFormats( + pColumnList IN VARCHAR2, + pTableName IN VARCHAR2, + pSchemaName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pTemplateTableName IN VARCHAR2 + ) RETURN VARCHAR2 IS + vResult VARCHAR2(32767); + vColumns VARCHAR2(32767); + vPos PLS_INTEGER; + vNextPos PLS_INTEGER; + vCurrentCol VARCHAR2(128); + vAllCols VARCHAR2(32767); + vDataType VARCHAR2(30); + vDateFormat VARCHAR2(200); + vTemplateSchema VARCHAR2(128); + vTemplateTable VARCHAR2(128); + vColExists NUMBER; + BEGIN + -- Build column list if not provided + IF pColumnList IS NULL THEN + -- Use template table for column order when provided + -- Template defines which columns to export and in what order + IF pTemplateTableName IS NOT NULL THEN + -- Parse template table name (SCHEMA.TABLE or just TABLE) + IF INSTR(pTemplateTableName, '.') > 0 THEN + vTemplateSchema := SUBSTR(pTemplateTableName, 1, INSTR(pTemplateTableName, '.') - 1); + vTemplateTable := SUBSTR(pTemplateTableName, INSTR(pTemplateTableName, '.') + 1); + ELSE + vTemplateSchema := pSchemaName; + vTemplateTable := pTemplateTableName; + END IF; + + -- Get columns from TEMPLATE table in template column order + -- Template defines target CSV structure (column order and which columns to include) + SELECT LISTAGG(column_name, ', ') WITHIN GROUP (ORDER BY column_id) + INTO vAllCols + FROM all_tab_columns + WHERE table_name = vTemplateTable + AND owner = vTemplateSchema; + ELSE + -- Get columns from source table when no template + SELECT LISTAGG(column_name, ', ') WITHIN GROUP (ORDER BY column_id) + INTO vAllCols + FROM all_tab_columns + WHERE table_name = pTableName + AND owner = pSchemaName; + END IF; + ELSE + vAllCols := pColumnList; + END IF; + + -- Process each column + vColumns := UPPER(REPLACE(vAllCols, ' ', '')); + vPos := 1; + vResult := ''; + + WHILE vPos <= LENGTH(vColumns) LOOP + vNextPos := INSTR(vColumns, ',', vPos); + IF vNextPos = 0 THEN + vNextPos := LENGTH(vColumns) + 1; + END IF; + + vCurrentCol := SUBSTR(vColumns, vPos, vNextPos - vPos); + + -- When using template table, check if column exists in SOURCE table + -- Template defines target structure, source provides data + -- Skip template columns that don't exist in source (except A_WORKFLOW_HISTORY_KEY) + IF pTemplateTableName IS NOT NULL THEN + -- Check if template column exists in SOURCE table + SELECT COUNT(*) INTO vColExists + FROM all_tab_columns + WHERE table_name = pTableName + AND column_name = vCurrentCol + AND owner = pSchemaName; + + -- Skip columns that don't exist in source table + -- Exception: A_WORKFLOW_HISTORY_KEY is virtual (mapped from pKeyColumnName) + IF vColExists = 0 AND UPPER(vCurrentCol) != 'A_WORKFLOW_HISTORY_KEY' THEN + vPos := vNextPos + 1; + CONTINUE; + END IF; + END IF; + + -- Get column data type from appropriate table (template or source) + IF pTemplateTableName IS NOT NULL THEN + -- Get data type from template table + SELECT data_type INTO vDataType + FROM all_tab_columns + WHERE table_name = vTemplateTable + AND column_name = vCurrentCol + AND owner = vTemplateSchema; + ELSE + -- Get data type from source table + SELECT data_type INTO vDataType + FROM all_tab_columns + WHERE table_name = pTableName + AND column_name = vCurrentCol + AND owner = pSchemaName; + END IF; + + -- Handle key column alias (template table has A_WORKFLOW_HISTORY_KEY, source table has pKeyColumnName) + IF UPPER(vCurrentCol) = 'A_WORKFLOW_HISTORY_KEY' THEN + vResult := vResult || CASE WHEN vResult IS NOT NULL THEN ', ' ELSE '' END || + 'T.' || pKeyColumnName || ' AS A_WORKFLOW_HISTORY_KEY'; + + -- Convert DATE/TIMESTAMP columns to CHAR with specific format + ELSIF vDataType IN ('DATE', 'TIMESTAMP', 'TIMESTAMP WITH TIME ZONE', 'TIMESTAMP WITH LOCAL TIME ZONE') THEN + IF pTemplateTableName IS NOT NULL THEN + vDateFormat := CT_MRDS.FILE_MANAGER.GET_DATE_FORMAT( + pTemplateTableName => pTemplateTableName, + pColumnName => vCurrentCol + ); + ELSE + vDateFormat := ENV_MANAGER.gvDefaultDateFormat; + END IF; + vResult := vResult || CASE WHEN vResult IS NOT NULL THEN ', ' ELSE '' END || + 'TO_CHAR(T.' || vCurrentCol || ', ''' || vDateFormat || ''') AS ' || vCurrentCol; + + -- Other columns as-is with T. prefix + ELSE + vResult := vResult || CASE WHEN vResult IS NOT NULL THEN ', ' ELSE '' END || + 'T.' || vCurrentCol; + END IF; + + vPos := vNextPos + 1; + END LOOP; + + RETURN vResult; + END buildQueryWithDateFormats; + + ---------------------------------------------------------------------------------------------------- + + -- Internal shared function to process column list with T. prefix and key column mapping + FUNCTION processColumnList(pColumnList IN VARCHAR2, pTableName IN VARCHAR2, pSchemaName IN VARCHAR2, pKeyColumnName IN VARCHAR2) RETURN VARCHAR2 IS + vResult VARCHAR2(32767); + vColumns VARCHAR2(32767); + vPos PLS_INTEGER; + vNextPos PLS_INTEGER; + vCurrentCol VARCHAR2(128); + vAllCols VARCHAR2(32767); + BEGIN + IF pColumnList IS NULL THEN + -- Build list of all columns + SELECT LISTAGG(column_name, ', ') WITHIN GROUP (ORDER BY column_id) + INTO vAllCols + FROM all_tab_columns + WHERE table_name = pTableName + AND owner = pSchemaName; + + -- Add T. prefix to all columns + vResult := 'T.' || REPLACE(vAllCols, ', ', ', T.'); + + -- Replace key column with aliased version (e.g., T.A_ETL_LOAD_SET_KEY_FK AS A_WORKFLOW_HISTORY_KEY) + vResult := REPLACE(vResult, 'T.' || pKeyColumnName, 'T.' || pKeyColumnName || ' AS A_WORKFLOW_HISTORY_KEY'); + + RETURN vResult; + END IF; + + -- Remove extra spaces and convert to uppercase + vColumns := UPPER(REPLACE(pColumnList, ' ', '')); + vPos := 1; + vResult := ''; + + -- Parse comma-separated column list and add T. prefix + WHILE vPos <= LENGTH(vColumns) LOOP + vNextPos := INSTR(vColumns, ',', vPos); + IF vNextPos = 0 THEN + vNextPos := LENGTH(vColumns) + 1; + END IF; + + vCurrentCol := SUBSTR(vColumns, vPos, vNextPos - vPos); + + -- Check if this is the key column (e.g., A_ETL_LOAD_SET_KEY_FK) and add alias + IF UPPER(vCurrentCol) = UPPER(pKeyColumnName) THEN + vCurrentCol := 'T.' || pKeyColumnName || ' AS A_WORKFLOW_HISTORY_KEY'; + ELSE + -- Add T. prefix if not already present + IF INSTR(vCurrentCol, '.') = 0 THEN + vCurrentCol := 'T.' || vCurrentCol; + END IF; + END IF; + + -- Add to result with comma separator + IF vResult IS NOT NULL THEN + vResult := vResult || ', '; + END IF; + vResult := vResult || vCurrentCol; + + vPos := vNextPos + 1; + END LOOP; + + RETURN vResult; + END processColumnList; + + ---------------------------------------------------------------------------------------------------- + + /** + * Validates table existence, key column existence, and column list + **/ + PROCEDURE VALIDATE_TABLE_AND_COLUMNS ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pColumnList IN VARCHAR2, + pParameters IN VARCHAR2 + ) IS + vCount INTEGER; + vColumns VARCHAR2(32767); + vPos PLS_INTEGER; + vNextPos PLS_INTEGER; + vCurrentCol VARCHAR2(128); + BEGIN + -- Check if table exists + SELECT COUNT(*) INTO vCount + FROM all_tables + WHERE table_name = pTableName + AND owner = pSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, ENV_MANAGER.MSG_TABLE_NOT_EXISTS); + END IF; + + -- Check if key column exists + SELECT COUNT(*) INTO vCount + FROM all_tab_columns + WHERE table_name = pTableName + AND column_name = pKeyColumnName + AND owner = pSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, ENV_MANAGER.MSG_COLUMN_NOT_EXISTS); + END IF; + + -- Validate pColumnList - check if all column names exist in the table + IF pColumnList IS NOT NULL THEN + vColumns := UPPER(REPLACE(pColumnList, ' ', '')); + vPos := 1; + + WHILE vPos <= LENGTH(vColumns) LOOP + vNextPos := INSTR(vColumns, ',', vPos); + IF vNextPos = 0 THEN + vNextPos := LENGTH(vColumns) + 1; + END IF; + + vCurrentCol := SUBSTR(vColumns, vPos, vNextPos - vPos); + + -- Remove table alias prefix if present + IF INSTR(vCurrentCol, '.') > 0 THEN + vCurrentCol := SUBSTR(vCurrentCol, INSTR(vCurrentCol, '.') + 1); + END IF; + + -- Check if column exists + SELECT COUNT(*) INTO vCount + FROM all_tab_columns + WHERE table_name = pTableName + AND column_name = vCurrentCol + AND owner = pSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, ENV_MANAGER.MSG_COLUMN_NOT_EXISTS); + END IF; + + vPos := vNextPos + 1; + END LOOP; + END IF; + END VALIDATE_TABLE_AND_COLUMNS; + + ---------------------------------------------------------------------------------------------------- + + /** + * Retrieves list of year/month partitions based on date range + **/ + FUNCTION GET_PARTITIONS ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pMinDate IN DATE, + pMaxDate IN DATE, + pParameters IN VARCHAR2 + ) RETURN partition_tab IS + vSql VARCHAR2(32000); + vPartitions partition_tab; + vKeyValuesYear DBMS_SQL.VARCHAR2_TABLE; + vKeyValuesMonth DBMS_SQL.VARCHAR2_TABLE; + vFullTableName VARCHAR2(200); + BEGIN + -- Build fully qualified table name if not already qualified + IF INSTR(pTableName, '.') > 0 THEN + vFullTableName := pTableName; -- Already fully qualified + ELSE + vFullTableName := pSchemaName || '.' || pTableName; + END IF; + + vSql := 'SELECT DISTINCT TO_CHAR(L.LOAD_START,''YYYY'') AS YR, TO_CHAR(L.LOAD_START,''MM'') AS MN + FROM ' || vFullTableName || ' T, CT_ODS.A_LOAD_HISTORY L + WHERE T.' || pKeyColumnName || ' = L.A_ETL_LOAD_SET_KEY + AND L.LOAD_START >= :pMinDate + AND L.LOAD_START < :pMaxDate + ORDER BY YR, MN'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Executing date range query: ' || vSql, 'DEBUG', pParameters); + EXECUTE IMMEDIATE vSql BULK COLLECT INTO vKeyValuesYear, vKeyValuesMonth USING pMinDate, pMaxDate; + + ENV_MANAGER.LOG_PROCESS_EVENT('Found ' || vKeyValuesYear.COUNT || ' year/month combinations to export', 'DEBUG', pParameters); + + -- Convert to partition_tab + vPartitions := partition_tab(); + vPartitions.EXTEND(vKeyValuesYear.COUNT); + FOR i IN 1 .. vKeyValuesYear.COUNT LOOP + vPartitions(i).year := vKeyValuesYear(i); + vPartitions(i).month := vKeyValuesMonth(i); + END LOOP; + + RETURN vPartitions; + END GET_PARTITIONS; + + ---------------------------------------------------------------------------------------------------- + + /** + * Exports single partition (year/month) to specified format (PARQUET or CSV) + * This is the core worker procedure that will be used for parallel processing in v2.3.0 + **/ + PROCEDURE EXPORT_SINGLE_PARTITION ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pYear IN VARCHAR2, + pMonth IN VARCHAR2, + pBucketUri IN VARCHAR2, + pFolderName IN VARCHAR2, + pProcessedColumns IN VARCHAR2, + pMinDate IN DATE, + pMaxDate IN DATE, + pCredentialName IN VARCHAR2, + pFormat IN VARCHAR2 DEFAULT 'PARQUET', + pFileBaseName IN VARCHAR2 DEFAULT NULL, + pMaxFileSize IN NUMBER DEFAULT 104857600, + pParameters IN VARCHAR2 + ) IS + vQuery VARCHAR2(32767); + vUri VARCHAR2(4000); + vFileName VARCHAR2(1000); + vFullTableName VARCHAR2(200); + BEGIN + -- Build fully qualified table name if not already qualified + IF INSTR(pTableName, '.') > 0 THEN + vFullTableName := pTableName; -- Already fully qualified + ELSE + vFullTableName := pSchemaName || '.' || pTableName; + END IF; + + -- Construct the query to extract data for the current year/month + vQuery := 'SELECT ' || pProcessedColumns || ' + FROM ' || vFullTableName || ' T, CT_ODS.A_LOAD_HISTORY L + WHERE T.' || pKeyColumnName || ' = L.A_ETL_LOAD_SET_KEY + AND TO_CHAR(L.LOAD_START,''YYYY'') = ' || CHR(39) || pYear || CHR(39) || ' + AND TO_CHAR(L.LOAD_START,''MM'') = ' || CHR(39) || pMonth || CHR(39) || ' + AND L.LOAD_START >= TO_DATE(' || CHR(39) || TO_CHAR(pMinDate, 'YYYY-MM-DD HH24:MI:SS') || CHR(39) || ', ''YYYY-MM-DD HH24:MI:SS'') + AND L.LOAD_START < TO_DATE(' || CHR(39) || TO_CHAR(pMaxDate, 'YYYY-MM-DD HH24:MI:SS') || CHR(39) || ', ''YYYY-MM-DD HH24:MI:SS'')'; + + -- Construct the URI based on format + IF pFormat = 'PARQUET' THEN + -- Parquet: Use Hive-style partitioning + -- Note: maxfilesize is NOT supported for Parquet format (Oracle limitation) + vUri := pBucketUri || + CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || + 'PARTITION_YEAR=' || sanitizeFilename(pYear) || '/' || + 'PARTITION_MONTH=' || sanitizeFilename(pMonth) || '/' || + sanitizeFilename(pYear) || sanitizeFilename(pMonth) || '.parquet'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Parquet export URI: ' || vUri, 'DEBUG', pParameters); + + -- Delete potentially corrupted file from previous failed attempt + -- This prevents Oracle from creating _1 suffixed files on retry + DELETE_FAILED_EXPORT_FILE(vUri, pCredentialName, pParameters); + + DBMS_CLOUD.EXPORT_DATA( + credential_name => pCredentialName, + file_uri_list => vUri, + query => vQuery, + format => json_object('type' VALUE 'parquet') + ); + ELSIF pFormat = 'CSV' THEN + -- CSV: Flat file structure with year/month in filename + vFileName := NVL(pFileBaseName, UPPER(pTableName)) || '_' || pYear || pMonth || '.csv'; + vUri := pBucketUri || + CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || + sanitizeFilename(vFileName); + + ENV_MANAGER.LOG_PROCESS_EVENT('CSV export URI: ' || vUri, 'DEBUG', pParameters); + + -- Delete potentially corrupted file from previous failed attempt + -- This prevents Oracle from creating _1 suffixed files on retry + DELETE_FAILED_EXPORT_FILE(vUri, pCredentialName, pParameters); + + -- Use json_object() for CSV export with maxfilesize in bytes (Oracle requirement) + -- Oracle maxfilesize: min 10MB (10485760), max 1GB (1073741824), default 10MB + -- NOTE: maxfilesize must be NUMBER (bytes), not string like '1000M' + -- Using 100MB (104857600) to avoid PGA memory issues with large files + DBMS_CLOUD.EXPORT_DATA( + credential_name => pCredentialName, + file_uri_list => vUri, + query => vQuery, + format => json_object( + 'type' VALUE 'CSV', + 'header' VALUE true, + 'quote' VALUE CHR(34), + 'delimiter' VALUE ',', + 'escape' VALUE true, + 'recorddelimiter' VALUE CHR(13)||CHR(10), -- CRLF dla Windows + 'maxfilesize' VALUE pMaxFileSize -- Dynamic maxfilesize in bytes (e.g., 104857600 = 100MB) + ) + ); + ELSE + RAISE_APPLICATION_ERROR(-20001, 'Unsupported format: ' || pFormat || '. Use PARQUET or CSV.'); + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('Processing Year/Month: ' || pYear || '/' || pMonth || ' (Format: ' || pFormat || ')', 'DEBUG', pParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Export query: ' || vQuery, 'DEBUG', pParameters); + END EXPORT_SINGLE_PARTITION; + + ---------------------------------------------------------------------------------------------------- + + /** + * Callback procedure for DBMS_PARALLEL_EXECUTE + * Processes single partition (year/month) chunk in parallel task + * Called by DBMS_PARALLEL_EXECUTE framework for each chunk + **/ + PROCEDURE EXPORT_PARTITION_PARALLEL ( + pStartId IN NUMBER, + pEndId IN NUMBER + ) IS + vYear VARCHAR2(4); + vMonth VARCHAR2(2); + vSchemaName VARCHAR2(128); + vTableName VARCHAR2(128); + vKeyColumnName VARCHAR2(128); + vBucketUri VARCHAR2(4000); + vFolderName VARCHAR2(1000); + vProcessedColumns VARCHAR2(32767); + vMinDate DATE; + vMaxDate DATE; + vCredentialName VARCHAR2(200); + vFormat VARCHAR2(20); + vFileBaseName VARCHAR2(1000); + vMaxFileSize NUMBER; + vParameters VARCHAR2(4000); + BEGIN + -- Retrieve chunk context from global temporary table + SELECT + YEAR_VALUE, + MONTH_VALUE, + SCHEMA_NAME, + TABLE_NAME, + KEY_COLUMN_NAME, + BUCKET_URI, + FOLDER_NAME, + PROCESSED_COLUMNS, + MIN_DATE, + MAX_DATE, + CREDENTIAL_NAME, + FORMAT_TYPE, + FILE_BASE_NAME, + MAX_FILE_SIZE + INTO + vYear, + vMonth, + vSchemaName, + vTableName, + vKeyColumnName, + vBucketUri, + vFolderName, + vProcessedColumns, + vMinDate, + vMaxDate, + vCredentialName, + vFormat, + vFileBaseName, + vMaxFileSize + FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + WHERE CHUNK_ID = pStartId; + + vParameters := 'Parallel task - Year: ' || vYear || ', Month: ' || vMonth || ', ChunkID: ' || pStartId; + ENV_MANAGER.LOG_PROCESS_EVENT('Starting parallel export for partition ' || vYear || '/' || vMonth, 'DEBUG', vParameters); + + -- Mark chunk as PROCESSING + UPDATE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + SET STATUS = 'PROCESSING', + ERROR_MESSAGE = NULL + WHERE CHUNK_ID = pStartId; + COMMIT; + + -- Call the worker procedure + EXPORT_SINGLE_PARTITION( + pSchemaName => vSchemaName, + pTableName => vTableName, + pKeyColumnName => vKeyColumnName, + pYear => vYear, + pMonth => vMonth, + pBucketUri => vBucketUri, + pFolderName => vFolderName, + pProcessedColumns => vProcessedColumns, + pMinDate => vMinDate, + pMaxDate => vMaxDate, + pCredentialName => vCredentialName, + pFormat => vFormat, + pFileBaseName => vFileBaseName, + pMaxFileSize => vMaxFileSize, + pParameters => vParameters + ); + + -- Mark chunk as COMPLETED + UPDATE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + SET STATUS = 'COMPLETED', + EXPORT_TIMESTAMP = SYSTIMESTAMP, + ERROR_MESSAGE = NULL + WHERE CHUNK_ID = pStartId; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Completed parallel export for partition ' || vYear || '/' || vMonth, 'DEBUG', vParameters); + EXCEPTION + WHEN OTHERS THEN + -- Capture error details in variable (SQLERRM cannot be used directly in SQL) + vgMsgTmp := 'Parallel task error for partition ' || vYear || '/' || vMonth || ' (ChunkID: ' || pStartId || '): ' || SQLERRM || cgBL || DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + + -- Mark chunk as FAILED with error message + -- Use vgMsgTmp variable instead of SQLERRM directly (Oracle limitation in SQL context) + UPDATE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + SET STATUS = 'FAILED', + ERROR_MESSAGE = SUBSTR(vgMsgTmp, 1, 4000) + WHERE CHUNK_ID = pStartId; + COMMIT; + + RAISE; + END EXPORT_PARTITION_PARALLEL; + + ---------------------------------------------------------------------------------------------------- + -- MAIN EXPORT PROCEDURES + ---------------------------------------------------------------------------------------------------- + + PROCEDURE EXPORT_TABLE_DATA ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pTemplateTableName IN VARCHAR2 default NULL, + pRegisterExport IN BOOLEAN default FALSE, + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ) + IS + -- Type definition for key values + TYPE key_value_tab IS TABLE OF VARCHAR2(4000); + vKeyValues key_value_tab; + vCount INTEGER; + vSql VARCHAR2(4000); + vKeyValue VARCHAR2(4000); + vQuery VARCHAR2(32767); + vUri VARCHAR2(4000); + vDataType VARCHAR2(30); + vTableName VARCHAR2(128); + vSchemaName VARCHAR2(128); + vKeyColumnName VARCHAR2(128); + vParameters VARCHAR2(4000); + vBucketUri VARCHAR2(4000); + vProcessedColumnList VARCHAR2(32767); + vCurrentCol VARCHAR2(128); + + -- Variables for file registration (when pRegisterExport=TRUE) + vConfigKey NUMBER; + vSourceKey VARCHAR2(100); + vTableId VARCHAR2(100); + vSlashPos1 NUMBER; + vSlashPos2 NUMBER; + vSourceFileReceivedKey NUMBER; + + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pSchemaName => '''||nvl(pSchemaName, 'NULL')||'''' + ,'pTableName => '''||nvl(pTableName, 'NULL')||'''' + ,'pKeyColumnName => '''||nvl(pKeyColumnName, 'NULL')||'''' + ,'pBucketArea => '''||nvl(pBucketArea, 'NULL')||'''' + ,'pFolderName => '''||nvl(pFolderName, 'NULL')||'''' + ,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pRegisterExport => '''||CASE WHEN pRegisterExport THEN 'TRUE' ELSE 'FALSE' END||'''' + ,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||'''' + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + -- Get bucket URI based on bucket area using FILE_MANAGER function + vBucketUri := FILE_MANAGER.GET_BUCKET_URI(pBucketArea); + + -- Convert table and column names to uppercase to match data dictionary + vTableName := UPPER(pTableName); + vSchemaName := UPPER(pSchemaName); + vKeyColumnName := UPPER(pKeyColumnName); + + -- Check if table exists + SELECT COUNT(*) INTO vCount + FROM all_tables + WHERE table_name = vTableName + AND owner = vSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, ENV_MANAGER.MSG_TABLE_NOT_EXISTS); + END IF; + + -- Check if key column exists + SELECT COUNT(*) INTO vCount + FROM all_tab_columns + WHERE table_name = vTableName + AND column_name = vKeyColumnName + AND owner = vSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, ENV_MANAGER.MSG_COLUMN_NOT_EXISTS); + + END IF; + + -- Get the data type of the key column + SELECT data_type INTO vDataType + FROM all_tab_columns + WHERE table_name = vTableName + AND column_name = vKeyColumnName + AND owner = vSchemaName; + + -- Validate template table if provided + IF pTemplateTableName IS NOT NULL THEN + DECLARE + vTemplateSchema VARCHAR2(128); + vTemplateTable VARCHAR2(128); + vTemplateCount NUMBER; + BEGIN + -- Parse template table name (SCHEMA.TABLE or just TABLE) + IF INSTR(pTemplateTableName, '.') > 0 THEN + vTemplateSchema := UPPER(SUBSTR(pTemplateTableName, 1, INSTR(pTemplateTableName, '.') - 1)); + vTemplateTable := UPPER(SUBSTR(pTemplateTableName, INSTR(pTemplateTableName, '.') + 1)); + ELSE + vTemplateSchema := vSchemaName; + vTemplateTable := UPPER(pTemplateTableName); + END IF; + + -- Check if template table exists + SELECT COUNT(*) INTO vTemplateCount + FROM all_tables + WHERE table_name = vTemplateTable + AND owner = vTemplateSchema; + + IF vTemplateCount = 0 THEN + vgMsgTmp := ENV_MANAGER.MSG_TABLE_NOT_EXISTS || ': Template table ' || vTemplateSchema || '.' || vTemplateTable; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, vgMsgTmp); + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('Template table validated: ' || vTemplateSchema || '.' || vTemplateTable, 'DEBUG', vParameters); + END; + END IF; + + -- Build query with TO_CHAR for date columns (per-column format support) + vProcessedColumnList := buildQueryWithDateFormats(NULL, vTableName, vSchemaName, vKeyColumnName, pTemplateTableName); + + ENV_MANAGER.LOG_PROCESS_EVENT('Processed column list with TO_CHAR for date columns: ' || vProcessedColumnList, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Template table: ' || NVL(pTemplateTableName, 'NULL - using global default for all dates'), 'INFO', vParameters); + + vTableName := DBMS_ASSERT.SCHEMA_NAME(vSchemaName) || '.' || DBMS_ASSERT.simple_sql_name(vTableName); + + -- Lookup A_SOURCE_FILE_CONFIG_KEY based on pFolderName parsing if pRegisterExport is enabled + IF pRegisterExport THEN + -- Format: {BUCKET_AREA}/{SOURCE_KEY}/{TABLE_ID} + -- Example: 'ODS/CSDB/CSDB_DEBT_DAILY' -> SOURCE_KEY='CSDB', TABLE_ID='CSDB_DEBT_DAILY' + + -- Parse pFolderName to extract SOURCE_KEY and TABLE_ID + vSlashPos1 := INSTR(pFolderName, '/', 1, 1); -- First '/' position + vSlashPos2 := INSTR(pFolderName, '/', 1, 2); -- Second '/' position + + IF vSlashPos1 > 0 AND vSlashPos2 > 0 THEN + -- Extract segment 2 (SOURCE_KEY) and segment 3 (TABLE_ID) + vSourceKey := SUBSTR(pFolderName, vSlashPos1 + 1, vSlashPos2 - vSlashPos1 - 1); + vTableId := SUBSTR(pFolderName, vSlashPos2 + 1); + + -- Find configuration based on SOURCE_KEY and TABLE_ID + BEGIN + SELECT A_SOURCE_FILE_CONFIG_KEY + INTO vConfigKey + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE A_SOURCE_KEY = vSourceKey + AND TABLE_ID = vTableId + AND SOURCE_FILE_TYPE = 'INPUT' + AND ROWNUM = 1; + + ENV_MANAGER.LOG_PROCESS_EVENT('Found config key: ' || vConfigKey || ' for SOURCE=' || vSourceKey || ', TABLE=' || vTableId, 'DEBUG', vParameters); + EXCEPTION + WHEN NO_DATA_FOUND THEN + vConfigKey := -1; + ENV_MANAGER.LOG_PROCESS_EVENT('No config found for SOURCE=' || vSourceKey || ', TABLE=' || vTableId || ' - using default (-1)', 'INFO', vParameters); + END; + ELSE + -- Cannot parse folder name - use default + vConfigKey := -1; + ENV_MANAGER.LOG_PROCESS_EVENT('Cannot parse pFolderName: ' || pFolderName || ' - using default (-1)', 'WARNING', vParameters); + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('File registration enabled with config key: ' || vConfigKey, 'INFO', vParameters); + END IF; + + -- Fetch unique key values from A_LOAD_HISTORY + vSql := 'SELECT DISTINCT L.A_ETL_LOAD_SET_KEY' || + ' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' || + ' WHERE T.' || DBMS_ASSERT.simple_sql_name(vKeyColumnName) || ' = L.A_ETL_LOAD_SET_KEY'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Executing key values query: ' || vSql, 'DEBUG', vParameters); + EXECUTE IMMEDIATE vSql BULK COLLECT INTO vKeyValues; + ENV_MANAGER.LOG_PROCESS_EVENT('Found ' || vKeyValues.COUNT || ' unique key values to process', 'DEBUG', vParameters); + + -- Loop over each unique key value + FOR i IN 1 .. vKeyValues.COUNT LOOP + vKeyValue := vKeyValues(i); + + -- Construct the query to extract data for the current key value with A_WORKFLOW_HISTORY_KEY mapping + IF vDataType IN ('VARCHAR2', 'CHAR', 'NCHAR', 'NVARCHAR2') THEN + vQuery := 'SELECT ' || vProcessedColumnList || + ' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' || + ' WHERE T.' || DBMS_ASSERT.simple_sql_name(vKeyColumnName) || ' = L.A_ETL_LOAD_SET_KEY' || + ' AND L.A_ETL_LOAD_SET_KEY = ' || CHR(39) || vKeyValue || CHR(39); + ELSIF vDataType IN ('NUMBER', 'FLOAT', 'BINARY_FLOAT', 'BINARY_DOUBLE') THEN + vQuery := 'SELECT ' || vProcessedColumnList || + ' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' || + ' WHERE T.' || DBMS_ASSERT.simple_sql_name(vKeyColumnName) || ' = L.A_ETL_LOAD_SET_KEY' || + ' AND L.A_ETL_LOAD_SET_KEY = ' || vKeyValue; + ELSIF vDataType LIKE 'TIMESTAMP%' OR vDataType = 'DATE' THEN + vQuery := 'SELECT ' || vProcessedColumnList || + ' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' || + ' WHERE T.' || DBMS_ASSERT.simple_sql_name(vKeyColumnName) || ' = L.A_ETL_LOAD_SET_KEY' || + ' AND L.A_ETL_LOAD_SET_KEY = TO_TIMESTAMP(' || CHR(39) || vKeyValue || CHR(39) ||', ''YYYY-MM-DD HH24:MI:SS.FF'')'; + ELSE + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNSUPPORTED_DATA_TYPE, ENV_MANAGER.MSG_UNSUPPORTED_DATA_TYPE); + END IF; + + -- Construct the URI for the file in OCI Object Storage + vUri := vBucketUri || + CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || + sanitizeFilename(vKeyValue) || '.csv'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Processing key value: ' || vKeyValue || ' (' || (i) || '/' || vKeyValues.COUNT || ')', 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Export query: ' || vQuery, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Export URI: ' || vUri, 'DEBUG', vParameters); + + -- Use DBMS_CLOUD package to export data to the URI + DBMS_CLOUD.EXPORT_DATA( + credential_name => pCredentialName, + file_uri_list => vUri, + query => vQuery, + format => json_object('type' VALUE 'CSV', 'header' VALUE true) + ); + + -- Register exported file to A_SOURCE_FILE_RECEIVED if requested + IF pRegisterExport THEN + DECLARE + vChecksum VARCHAR2(128); + vCreated TIMESTAMP WITH TIME ZONE; + vBytes NUMBER; + vActualFileName VARCHAR2(1000); -- Actual filename with Oracle suffix + vSanitizedFileName VARCHAR2(1000); + vFileName VARCHAR2(1000); + vRetryCount NUMBER := 0; + vMaxRetries NUMBER := 1; -- One retry after initial attempt + vRetryDelay NUMBER := 2; -- 2 seconds delay + BEGIN + -- Extract filename from URI (after last '/') + vFileName := SUBSTR(vUri, INSTR(vUri, '/', -1) + 1); + + -- Sanitize filename first (PL/SQL function cannot be used directly in SQL) + vSanitizedFileName := sanitizeFilename(vFileName); + + -- Remove .csv extension for LIKE pattern matching (Oracle adds suffixes BEFORE .csv) + -- Example: keyvalue.csv becomes keyvalue_1_20260211T102621591769Z.csv + vSanitizedFileName := REGEXP_REPLACE(vSanitizedFileName, '\.csv$', '', 1, 0, 'i'); + + -- Try to get file metadata with retry logic + <> + LOOP + BEGIN + SELECT object_name, checksum, created, bytes + INTO vActualFileName, vChecksum, vCreated, vBytes + FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( + credential_name => pCredentialName, + location_uri => vBucketUri + )) + WHERE object_name LIKE CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || vSanitizedFileName || '%' + ORDER BY created DESC, bytes DESC + FETCH FIRST 1 ROW ONLY; + + -- Extract filename only from full path (remove bucket folder prefix) + vActualFileName := SUBSTR(vActualFileName, INSTR(vActualFileName, '/', -1) + 1); + + -- Success - exit retry loop + EXIT metadata_retry_loop; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + vRetryCount := vRetryCount + 1; + + IF vRetryCount <= vMaxRetries THEN + -- Log retry attempt + ENV_MANAGER.LOG_PROCESS_EVENT('File not found in bucket (attempt ' || vRetryCount || '/' || (vMaxRetries + 1) || '), retrying after ' || vRetryDelay || ' seconds: ' || vFileName, 'DEBUG', vParameters); + + -- Wait before retry using DBMS_SESSION.SLEEP (alternative to DBMS_LOCK) + DBMS_SESSION.SLEEP(vRetryDelay); + ELSE + -- Max retries exceeded - re-raise exception + RAISE; + END IF; + END; + END LOOP metadata_retry_loop; + + -- Create A_SOURCE_FILE_RECEIVED record for this export with metadata + vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; + INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( + A_SOURCE_FILE_RECEIVED_KEY, + A_SOURCE_FILE_CONFIG_KEY, + SOURCE_FILE_NAME, + CHECKSUM, + CREATED, + BYTES, + RECEPTION_DATE, + PROCESSING_STATUS, + PARTITION_YEAR, + PARTITION_MONTH, + ARCH_FILE_NAME + ) VALUES ( + vSourceFileReceivedKey, + NVL(vConfigKey, -1), -- Use config key if found, otherwise -1 + vActualFileName, -- Use actual filename with Oracle suffix + vChecksum, + vCreated, + vBytes, + SYSDATE, + 'INGESTED', + NULL, -- PARTITION_YEAR not used for single-file exports + NULL, -- PARTITION_MONTH not used for single-file exports + NULL -- ARCH_FILE_NAME not used for single-file exports + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('Registered file: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vActualFileName || ', Size=' || vBytes || ' bytes', 'DEBUG', vParameters); + EXCEPTION + WHEN NO_DATA_FOUND THEN + -- File not found after retries - log warning and continue without metadata + ENV_MANAGER.LOG_PROCESS_EVENT('WARNING: File not found in bucket after ' || (vMaxRetries + 1) || ' attempts: ' || vFileName, 'WARNING', vParameters); + + -- Sanitize filename for fallback INSERT (function cannot be used in SQL) + vSanitizedFileName := sanitizeFilename(vFileName); + + -- Insert without metadata using theoretical filename + vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; + INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( + A_SOURCE_FILE_RECEIVED_KEY, + A_SOURCE_FILE_CONFIG_KEY, + SOURCE_FILE_NAME, + RECEPTION_DATE, + PROCESSING_STATUS, + PARTITION_YEAR, + PARTITION_MONTH, + ARCH_FILE_NAME + ) VALUES ( + vSourceFileReceivedKey, + NVL(vConfigKey, -1), -- Use config key if found, otherwise -1 + vSanitizedFileName, -- Use pre-calculated sanitized filename + SYSDATE, + 'INGESTED', + NULL, -- PARTITION_YEAR not used for single-file exports + NULL, -- PARTITION_MONTH not used for single-file exports + NULL -- ARCH_FILE_NAME not used for single-file exports + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('Registered file without metadata: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vSanitizedFileName, 'DEBUG', vParameters); + END; + END IF; + END LOOP; + + -- Log summary of file registration if enabled + IF pRegisterExport THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Registered ' || vKeyValues.COUNT || ' exported files to A_SOURCE_FILE_RECEIVED with config key: ' || vConfigKey, 'INFO', vParameters); + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + EXCEPTION + WHEN ENV_MANAGER.ERR_TABLE_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_TABLE_NOT_EXISTS ||': '||vTableName; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_COLUMN_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_COLUMN_NOT_EXISTS || ' (TableName.ColumnName): ' || vTableName||'.'||vKeyColumnName||CASE WHEN vCurrentCol IS NOT NULL THEN '.'||vCurrentCol||' in column list' ELSE '' END; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_UNSUPPORTED_DATA_TYPE THEN + vgMsgTmp := ENV_MANAGER.MSG_UNSUPPORTED_DATA_TYPE || ' vDataType: '||vDataType; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNSUPPORTED_DATA_TYPE, vgMsgTmp); + WHEN OTHERS THEN + -- Log complete error details including full stack trace and backtrace + ENV_MANAGER.LOG_PROCESS_ERROR('Export failed: ' || SQLERRM, vParameters, 'DATA_EXPORTER'); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END EXPORT_TABLE_DATA; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE EXPORT_TABLE_DATA_BY_DATE ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pColumnList IN VARCHAR2 default NULL, + pMinDate IN DATE default DATE '1900-01-01', + pMaxDate IN DATE default SYSDATE, + pParallelDegree IN NUMBER default 1, + pTemplateTableName IN VARCHAR2 default NULL, + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ) + IS + vTableName VARCHAR2(128); + vSchemaName VARCHAR2(128); + vKeyColumnName VARCHAR2(128); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vProcessedColumnList VARCHAR2(32767); + vBucketUri VARCHAR2(4000); + vCurrentCol VARCHAR2(128); + vPartitions partition_tab; + + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pSchemaName => '''||nvl(pSchemaName, 'NULL')||'''' + ,'pTableName => '''||nvl(pTableName, 'NULL')||'''' + ,'pKeyColumnName => '''||nvl(pKeyColumnName, 'NULL')||'''' + ,'pBucketArea => '''||nvl(pBucketArea, 'NULL')||'''' + ,'pFolderName => '''||nvl(pFolderName, 'NULL')||'''' + ,'pColumnList => '''||nvl(pColumnList, 'NULL')||'''' + ,'pMinDate => '''||nvl(TO_CHAR(pMinDate, 'YYYY-MM-DD HH24:MI:SS'), 'NULL')||'''' + ,'pMaxDate => '''||nvl(TO_CHAR(pMaxDate, 'YYYY-MM-DD HH24:MI:SS'), 'NULL')||'''' + ,'pParallelDegree => '''||nvl(TO_CHAR(pParallelDegree), 'NULL')||'''' + ,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||'''' + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + -- Get bucket URI based on bucket area using FILE_MANAGER function + vBucketUri := FILE_MANAGER.GET_BUCKET_URI(pBucketArea); + + -- Convert table and column names to uppercase to match data dictionary + vTableName := UPPER(pTableName); + vSchemaName := UPPER(pSchemaName); + vKeyColumnName := UPPER(pKeyColumnName); + + -- Validate table, key column, and column list using shared procedure + VALIDATE_TABLE_AND_COLUMNS(vSchemaName, vTableName, vKeyColumnName, pColumnList, vParameters); + + -- Build query with TO_CHAR for date columns (per-column format support) + vProcessedColumnList := buildQueryWithDateFormats(pColumnList, vTableName, vSchemaName, vKeyColumnName, pTemplateTableName); + + ENV_MANAGER.LOG_PROCESS_EVENT('Input column list: ' || NVL(pColumnList, 'NULL (building dynamic list from table metadata)'), 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Processed column list with TO_CHAR for date columns: ' || vProcessedColumnList, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Template table: ' || NVL(pTemplateTableName, 'NULL - using global default for all dates'), 'INFO', vParameters); + + vTableName := DBMS_ASSERT.SCHEMA_NAME(vSchemaName) || '.' || DBMS_ASSERT.simple_sql_name(vTableName); + + -- Validate parallel degree parameter + IF pParallelDegree < 1 OR pParallelDegree > 16 THEN + vgMsgTmp := ENV_MANAGER.MSG_INVALID_PARALLEL_DEGREE || ': ' || pParallelDegree || '. Valid range: 1-16'; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_INVALID_PARALLEL_DEGREE, vgMsgTmp); + END IF; + + -- Get partitions using shared function + vPartitions := GET_PARTITIONS(vSchemaName, vTableName, vKeyColumnName, pMinDate, pMaxDate, vParameters); + + ENV_MANAGER.LOG_PROCESS_EVENT('Found ' || vPartitions.COUNT || ' partitions to export with parallel degree ' || pParallelDegree, 'INFO', vParameters); + + -- Sequential processing (parallel degree = 1) + IF pParallelDegree = 1 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Using sequential processing (pParallelDegree = 1)', 'DEBUG', vParameters); + + FOR i IN 1 .. vPartitions.COUNT LOOP + EXPORT_SINGLE_PARTITION( + pSchemaName => vSchemaName, + pTableName => vTableName, + pKeyColumnName => vKeyColumnName, + pYear => vPartitions(i).year, + pMonth => vPartitions(i).month, + pBucketUri => vBucketUri, + pFolderName => pFolderName, + pProcessedColumns => vProcessedColumnList, + pMinDate => pMinDate, + pMaxDate => pMaxDate, + pCredentialName => pCredentialName, + pFormat => 'PARQUET', + pFileBaseName => NULL, + pMaxFileSize => 104857600, + pParameters => vParameters + ); + END LOOP; + + -- Parallel processing (parallel degree > 1) + ELSE + -- Skip parallel processing if no partitions found + IF vPartitions.COUNT = 0 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('No partitions to export - skipping parallel processing', 'INFO', vParameters); + ELSE + DECLARE + vTaskName VARCHAR2(128) := 'DATA_EXPORT_TASK_' || TO_CHAR(SYSTIMESTAMP, 'YYYYMMDDHH24MISSFF'); + vChunkId NUMBER; + BEGIN + ENV_MANAGER.LOG_PROCESS_EVENT('Using parallel processing with ' || pParallelDegree || ' threads', 'INFO', vParameters); + + -- Clean up old completed chunks (>24 hours) to prevent table bloat + -- CRITICAL: Do NOT delete chunks from other active sessions (same-day tasks) + -- This prevents race conditions when multiple exports run simultaneously + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + WHERE STATUS = 'COMPLETED' + AND CREATED_DATE < SYSTIMESTAMP - INTERVAL '1' DAY; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Cleared old COMPLETED chunks (>24h). Active session chunks preserved.', 'DEBUG', vParameters); + -- This prevents re-exporting successfully completed partitions + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'COMPLETED'; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Cleared COMPLETED chunks. FAILED chunks retained for retry.', 'DEBUG', vParameters); + + -- Populate chunks table (insert new chunks, preserve FAILED chunks for retry) + FOR i IN 1 .. vPartitions.COUNT LOOP + MERGE INTO CT_MRDS.A_PARALLEL_EXPORT_CHUNKS t + USING (SELECT i AS chunk_id, vPartitions(i).year AS yr, vPartitions(i).month AS mn FROM DUAL) s + ON (t.CHUNK_ID = s.chunk_id) + WHEN NOT MATCHED THEN + INSERT (CHUNK_ID, TASK_NAME, YEAR_VALUE, MONTH_VALUE, SCHEMA_NAME, TABLE_NAME, KEY_COLUMN_NAME, + BUCKET_URI, FOLDER_NAME, PROCESSED_COLUMNS, MIN_DATE, MAX_DATE, + CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE, STATUS) + VALUES (i, vTaskName, vPartitions(i).year, vPartitions(i).month, vSchemaName, vTableName, vKeyColumnName, + vBucketUri, pFolderName, vProcessedColumnList, pMinDate, pMaxDate, + pCredentialName, 'PARQUET', NULL, pTemplateTableName, 104857600, 'PENDING') + WHEN MATCHED THEN + UPDATE SET TASK_NAME = vTaskName, + STATUS = CASE WHEN t.STATUS = 'FAILED' THEN 'PENDING' ELSE t.STATUS END, + ERROR_MESSAGE = CASE WHEN t.STATUS = 'FAILED' THEN NULL ELSE t.ERROR_MESSAGE END; + END LOOP; + COMMIT; + + -- Log chunk statistics + DECLARE + vPendingCount NUMBER; + vFailedCount NUMBER; + BEGIN + SELECT COUNT(*) INTO vPendingCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'PENDING'; + SELECT COUNT(*) INTO vFailedCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'FAILED'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Chunk statistics: PENDING=' || vPendingCount || ', FAILED (retry)=' || vFailedCount, 'INFO', vParameters); + END; + + -- Create parallel task + DBMS_PARALLEL_EXECUTE.CREATE_TASK(task_name => vTaskName); + + -- Define chunks by number range (1 to partition count) + DBMS_PARALLEL_EXECUTE.CREATE_CHUNKS_BY_NUMBER_COL( + task_name => vTaskName, + table_owner => 'CT_MRDS', + table_name => 'A_PARALLEL_EXPORT_CHUNKS', + table_column => 'CHUNK_ID', + chunk_size => 1 -- Each partition is one chunk + ); + + -- Execute task in parallel + ENV_MANAGER.LOG_PROCESS_EVENT('Executing parallel task: ' || vTaskName, 'DEBUG', vParameters); + + DBMS_PARALLEL_EXECUTE.RUN_TASK( + task_name => vTaskName, + sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;', + language_flag => DBMS_SQL.NATIVE, + parallel_level => pParallelDegree + ); + + -- Check for errors + DECLARE + vErrorCount NUMBER; + BEGIN + SELECT COUNT(*) INTO vErrorCount + FROM USER_PARALLEL_EXECUTE_CHUNKS + WHERE task_name = vTaskName AND status = 'PROCESSED_WITH_ERROR'; + + IF vErrorCount > 0 THEN + vgMsgTmp := 'Parallel execution completed with ' || vErrorCount || ' errors. Check USER_PARALLEL_EXECUTE_CHUNKS for details.'; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + END IF; + END; + + -- Clean up task + DBMS_PARALLEL_EXECUTE.DROP_TASK(task_name => vTaskName); + + -- Clean up chunks for THIS specific task only (session-safe) + -- CRITICAL: Use TASK_NAME filter to avoid deleting chunks from other active sessions + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE TASK_NAME = vTaskName; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Parallel execution completed successfully', 'INFO', vParameters); + EXCEPTION + WHEN OTHERS THEN + -- Attempt to drop task on error + BEGIN + DBMS_PARALLEL_EXECUTE.DROP_TASK(task_name => vTaskName); + EXCEPTION + WHEN OTHERS THEN NULL; -- Ignore drop errors + END; + + vgMsgTmp := ENV_MANAGER.MSG_PARALLEL_EXECUTION_FAILED || ': ' || SQLERRM || cgBL || DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + END; + END IF; + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + EXCEPTION + WHEN ENV_MANAGER.ERR_TABLE_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_TABLE_NOT_EXISTS ||': '||vTableName; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_COLUMN_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_COLUMN_NOT_EXISTS || ' (TableName.ColumnName): ' || vTableName||'.'||vKeyColumnName||CASE WHEN vCurrentCol IS NOT NULL THEN '.'||vCurrentCol||' in pColumnList' ELSE '' END; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_INVALID_PARALLEL_DEGREE THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_INVALID_PARALLEL_DEGREE, vgMsgTmp); + WHEN ENV_MANAGER.ERR_PARALLEL_EXECUTION_FAILED THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + WHEN OTHERS THEN + -- Log complete error details including full stack trace and backtrace + ENV_MANAGER.LOG_PROCESS_ERROR('Export failed: ' || SQLERRM, vParameters, 'DATA_EXPORTER'); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END EXPORT_TABLE_DATA_BY_DATE; + + ---------------------------------------------------------------------------------------------------- + + /** + * @name EXPORT_TABLE_DATA_TO_CSV_BY_DATE + * @desc Exports data to a single CSV file with date filtering. + * Unlike EXPORT_TABLE_DATA_BY_DATE, this procedure creates one CSV file + * instead of multiple Parquet files partitioned by year/month. + * Uses the same date filtering mechanism with CT_ODS.A_LOAD_HISTORY. + * Allows specifying custom column list or uses T.* if pColumnList is NULL. + * Validates that all columns in pColumnList exist in the target table. + * Automatically adds 'T.' prefix to column names in pColumnList. + * @example + * begin + * DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE( + * pSchemaName => 'CT_MRDS', + * pTableName => 'MY_TABLE', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'DATA', + * pFolderName => 'exports', + * pFileName => 'my_export.csv', + * pColumnList => 'COLUMN1, COLUMN2, COLUMN3', -- Optional + * pMinDate => DATE '2024-01-01', + * pMaxDate => SYSDATE + * ); + * end; + **/ + PROCEDURE EXPORT_TABLE_DATA_TO_CSV_BY_DATE ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pFileName IN VARCHAR2 DEFAULT NULL, + pColumnList IN VARCHAR2 default NULL, + pMinDate IN DATE default DATE '1900-01-01', + pMaxDate IN DATE default SYSDATE, + pParallelDegree IN NUMBER default 1, + pTemplateTableName IN VARCHAR2 default NULL, + pMaxFileSize IN NUMBER default 104857600, + pRegisterExport IN BOOLEAN default FALSE, + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ) + IS + vTableName VARCHAR2(128); + vSchemaName VARCHAR2(128); + vKeyColumnName VARCHAR2(128); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vFileBaseName VARCHAR2(4000); + vFileExtension VARCHAR2(10); + vProcessedColumnList VARCHAR2(32767); + vBucketUri VARCHAR2(4000); + vCurrentCol VARCHAR2(128); + vPartitions partition_tab; + vSourceFileReceivedKey NUMBER; + vFileName VARCHAR2(1000); + vFileUri VARCHAR2(4000); + -- Variables for A_SOURCE_FILE_CONFIG lookup + vSourceKey VARCHAR2(100); + vTableId VARCHAR2(200); + vConfigKey NUMBER := -1; + vSlashPos1 NUMBER; + vSlashPos2 NUMBER; + + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pSchemaName => '''||nvl(pSchemaName, 'NULL')||'''' + ,'pTableName => '''||nvl(pTableName, 'NULL')||'''' + ,'pKeyColumnName => '''||nvl(pKeyColumnName, 'NULL')||'''' + ,'pBucketArea => '''||nvl(pBucketArea, 'NULL')||'''' + ,'pFolderName => '''||nvl(pFolderName, 'NULL')||'''' + ,'pFileName => '''||nvl(pFileName, 'NULL')||'''' + ,'pColumnList => '''||nvl(pColumnList, 'NULL')||'''' + ,'pMinDate => '''||nvl(TO_CHAR(pMinDate, 'YYYY-MM-DD HH24:MI:SS'), 'NULL')||'''' + ,'pMaxDate => '''||nvl(TO_CHAR(pMaxDate, 'YYYY-MM-DD HH24:MI:SS'), 'NULL')||'''' + ,'pParallelDegree => '''||nvl(TO_CHAR(pParallelDegree), 'NULL')||'''' + ,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pMaxFileSize => '''||nvl(TO_CHAR(pMaxFileSize), 'NULL')||'''' + ,'pRegisterExport => '''||CASE WHEN pRegisterExport THEN 'TRUE' ELSE 'FALSE' END||'''' + ,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||'''' + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + -- Get bucket URI based on bucket area using FILE_MANAGER function + vBucketUri := FILE_MANAGER.GET_BUCKET_URI(pBucketArea); + + -- Convert table and column names to uppercase to match data dictionary + vTableName := UPPER(pTableName); + vSchemaName := UPPER(pSchemaName); + vKeyColumnName := UPPER(pKeyColumnName); + + -- Extract base filename and extension or construct default filename + IF pFileName IS NOT NULL THEN + -- Use provided filename + IF INSTR(pFileName, '.') > 0 THEN + vFileBaseName := SUBSTR(pFileName, 1, INSTR(pFileName, '.', -1) - 1); + vFileExtension := SUBSTR(pFileName, INSTR(pFileName, '.', -1)); + ELSE + vFileBaseName := pFileName; + vFileExtension := '.csv'; + END IF; + ELSE + -- Construct default filename: TABLENAME (without extension, will be added by worker) + vFileBaseName := UPPER(pTableName); + vFileExtension := '.csv'; + END IF; + + -- Validate table, key column, and column list using shared procedure + VALIDATE_TABLE_AND_COLUMNS(vSchemaName, vTableName, vKeyColumnName, pColumnList, vParameters); + + -- Build query with TO_CHAR for date columns (per-column format support) + vProcessedColumnList := buildQueryWithDateFormats(pColumnList, vTableName, vSchemaName, vKeyColumnName, pTemplateTableName); + + ENV_MANAGER.LOG_PROCESS_EVENT('Input column list: ' || NVL(pColumnList, 'NULL (using dynamic column list)'), 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Processed column list with TO_CHAR for date columns: ' || vProcessedColumnList, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Template table: ' || NVL(pTemplateTableName, 'NULL - using global default for all dates'), 'INFO', vParameters); + + vTableName := DBMS_ASSERT.SCHEMA_NAME(vSchemaName) || '.' || DBMS_ASSERT.simple_sql_name(vTableName); + + -- Validate parallel degree parameter + IF pParallelDegree < 1 OR pParallelDegree > 16 THEN + vgMsgTmp := ENV_MANAGER.MSG_INVALID_PARALLEL_DEGREE || ': ' || pParallelDegree || '. Valid range: 1-16'; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_INVALID_PARALLEL_DEGREE, vgMsgTmp); + END IF; + + -- Get partitions using shared function + vPartitions := GET_PARTITIONS(vSchemaName, vTableName, vKeyColumnName, pMinDate, pMaxDate, vParameters); + + ENV_MANAGER.LOG_PROCESS_EVENT('Found ' || vPartitions.COUNT || ' year/month combinations to export', 'INFO', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Date range: ' || TO_CHAR(pMinDate, 'YYYY-MM-DD HH24:MI:SS') || ' to ' || TO_CHAR(pMaxDate, 'YYYY-MM-DD HH24:MI:SS'), 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Parallel degree: ' || pParallelDegree, 'INFO', vParameters); + + -- Sequential processing (parallel degree = 1) + IF pParallelDegree = 1 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Using sequential processing (pParallelDegree = 1)', 'DEBUG', vParameters); + + FOR i IN 1 .. vPartitions.COUNT LOOP + EXPORT_SINGLE_PARTITION( + pSchemaName => vSchemaName, + pTableName => vTableName, + pKeyColumnName => vKeyColumnName, + pYear => vPartitions(i).year, + pMonth => vPartitions(i).month, + pBucketUri => vBucketUri, + pFolderName => pFolderName, + pProcessedColumns => vProcessedColumnList, + pMinDate => pMinDate, + pMaxDate => pMaxDate, + pCredentialName => pCredentialName, + pFormat => 'CSV', + pFileBaseName => vFileBaseName, + pMaxFileSize => pMaxFileSize, + pParameters => vParameters + ); + END LOOP; + + -- Parallel processing (parallel degree > 1) + ELSE + -- Skip parallel processing if no partitions found + IF vPartitions.COUNT = 0 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('No partitions to export - skipping parallel CSV processing', 'INFO', vParameters); + ELSE + DECLARE + vTaskName VARCHAR2(128) := 'DATA_CSV_EXPORT_TASK_' || TO_CHAR(SYSTIMESTAMP, 'YYYYMMDDHH24MISSFF'); + vChunkId NUMBER; + BEGIN + ENV_MANAGER.LOG_PROCESS_EVENT('Using parallel processing with ' || pParallelDegree || ' threads', 'INFO', vParameters); + + -- Clean up old completed chunks (>24 hours) to prevent table bloat + -- CRITICAL: Do NOT delete chunks from other active sessions (same-day tasks) + -- This prevents race conditions when multiple CSV exports run simultaneously + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + WHERE STATUS = 'COMPLETED' + AND CREATED_DATE < SYSTIMESTAMP - INTERVAL '1' DAY; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Cleared old COMPLETED chunks (>24h). Active session chunks preserved.', 'DEBUG', vParameters); + + -- Populate chunks table (insert new chunks, preserve FAILED chunks for retry) + FOR i IN 1 .. vPartitions.COUNT LOOP + MERGE INTO CT_MRDS.A_PARALLEL_EXPORT_CHUNKS t + USING (SELECT i AS chunk_id, vPartitions(i).year AS yr, vPartitions(i).month AS mn FROM DUAL) s + ON (t.CHUNK_ID = s.chunk_id) + WHEN NOT MATCHED THEN + INSERT (CHUNK_ID, TASK_NAME, YEAR_VALUE, MONTH_VALUE, SCHEMA_NAME, TABLE_NAME, KEY_COLUMN_NAME, + BUCKET_URI, FOLDER_NAME, PROCESSED_COLUMNS, MIN_DATE, MAX_DATE, + CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE, STATUS) + VALUES (i, vTaskName, vPartitions(i).year, vPartitions(i).month, vSchemaName, vTableName, vKeyColumnName, + vBucketUri, pFolderName, vProcessedColumnList, pMinDate, pMaxDate, + pCredentialName, 'CSV', vFileBaseName, pTemplateTableName, pMaxFileSize, 'PENDING') + WHEN MATCHED THEN + UPDATE SET TASK_NAME = vTaskName, + STATUS = CASE WHEN t.STATUS = 'FAILED' THEN 'PENDING' ELSE t.STATUS END, + ERROR_MESSAGE = CASE WHEN t.STATUS = 'FAILED' THEN NULL ELSE t.ERROR_MESSAGE END; + END LOOP; + COMMIT; + + -- Log chunk statistics + DECLARE + vPendingCount NUMBER; + vFailedCount NUMBER; + BEGIN + SELECT COUNT(*) INTO vPendingCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'PENDING'; + SELECT COUNT(*) INTO vFailedCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'FAILED'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Chunk statistics: PENDING=' || vPendingCount || ', FAILED (retry)=' || vFailedCount, 'INFO', vParameters); + END; + + -- Create parallel task + DBMS_PARALLEL_EXECUTE.CREATE_TASK(task_name => vTaskName); + + -- Define chunks by number range (1 to partition count) + DBMS_PARALLEL_EXECUTE.CREATE_CHUNKS_BY_NUMBER_COL( + task_name => vTaskName, + table_owner => 'CT_MRDS', + table_name => 'A_PARALLEL_EXPORT_CHUNKS', + table_column => 'CHUNK_ID', + chunk_size => 1 -- Each partition is one chunk + ); + + -- Execute task in parallel + ENV_MANAGER.LOG_PROCESS_EVENT('Executing parallel CSV export task: ' || vTaskName, 'DEBUG', vParameters); + + DBMS_PARALLEL_EXECUTE.RUN_TASK( + task_name => vTaskName, + sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;', + language_flag => DBMS_SQL.NATIVE, + parallel_level => pParallelDegree + ); + + -- Check for errors + DECLARE + vErrorCount NUMBER; + BEGIN + SELECT COUNT(*) INTO vErrorCount + FROM USER_PARALLEL_EXECUTE_CHUNKS + WHERE task_name = vTaskName AND status = 'PROCESSED_WITH_ERROR'; + + IF vErrorCount > 0 THEN + vgMsgTmp := 'Parallel CSV export completed with ' || vErrorCount || ' errors. Check USER_PARALLEL_EXECUTE_CHUNKS for details.'; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + END IF; + END; + + -- Clean up task + DBMS_PARALLEL_EXECUTE.DROP_TASK(task_name => vTaskName); + + -- Clean up chunks for THIS specific task only (session-safe) + -- CRITICAL: Use TASK_NAME filter to avoid deleting chunks from other active CSV sessions + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE TASK_NAME = vTaskName; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Parallel CSV execution completed successfully', 'INFO', vParameters); + EXCEPTION + WHEN OTHERS THEN + -- Attempt to drop task on error + BEGIN + DBMS_PARALLEL_EXECUTE.DROP_TASK(task_name => vTaskName); + EXCEPTION + WHEN OTHERS THEN NULL; -- Ignore drop errors + END; + + vgMsgTmp := ENV_MANAGER.MSG_PARALLEL_EXECUTION_FAILED || ': ' || SQLERRM || cgBL || DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + END; + END IF; + END IF; + + -- Note: File registration handled by EXPORT_SINGLE_PARTITION when pRegisterExport=TRUE + -- Each partition calls pRegisterExport logic independently during serial/parallel execution + + -- Register exported files to A_SOURCE_FILE_RECEIVED if requested (after successful export) + IF pRegisterExport THEN + -- Lookup A_SOURCE_FILE_CONFIG_KEY based on pFolderName parsing + -- Format: {BUCKET_AREA}/{SOURCE_KEY}/{TABLE_ID} + -- Example: 'ODS/CSDB/CSDB_DEBT_DAILY' -> SOURCE_KEY='CSDB', TABLE_ID='CSDB_DEBT_DAILY' + + -- Parse pFolderName to extract SOURCE_KEY and TABLE_ID + vSlashPos1 := INSTR(pFolderName, '/', 1, 1); -- First '/' position + vSlashPos2 := INSTR(pFolderName, '/', 1, 2); -- Second '/' position + + IF vSlashPos1 > 0 AND vSlashPos2 > 0 THEN + -- Extract segment 2 (SOURCE_KEY) and segment 3 (TABLE_ID) + vSourceKey := SUBSTR(pFolderName, vSlashPos1 + 1, vSlashPos2 - vSlashPos1 - 1); + vTableId := SUBSTR(pFolderName, vSlashPos2 + 1); + + -- Find configuration based on SOURCE_KEY and TABLE_ID + BEGIN + SELECT A_SOURCE_FILE_CONFIG_KEY + INTO vConfigKey + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE A_SOURCE_KEY = vSourceKey + AND TABLE_ID = vTableId + AND SOURCE_FILE_TYPE = 'INPUT' + AND ROWNUM = 1; + + ENV_MANAGER.LOG_PROCESS_EVENT('Found config key: ' || vConfigKey || ' for SOURCE=' || vSourceKey || ', TABLE=' || vTableId, 'DEBUG', vParameters); + EXCEPTION + WHEN NO_DATA_FOUND THEN + vConfigKey := -1; + ENV_MANAGER.LOG_PROCESS_EVENT('No config found for SOURCE=' || vSourceKey || ', TABLE=' || vTableId || ' - using default (-1)', 'INFO', vParameters); + END; + ELSE + -- Cannot parse folder name - use default + vConfigKey := -1; + ENV_MANAGER.LOG_PROCESS_EVENT('Cannot parse pFolderName: ' || pFolderName || ' - using default (-1)', 'WARNING', vParameters); + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('Registering ' || vPartitions.COUNT || ' exported files to A_SOURCE_FILE_RECEIVED with config key: ' || vConfigKey, 'INFO', vParameters); + + FOR i IN 1 .. vPartitions.COUNT LOOP + -- Construct filename and URI for this partition + vFileName := NVL(vFileBaseName, UPPER(REPLACE(vTableName, vSchemaName || '.', ''))) || '_' || vPartitions(i).year || vPartitions(i).month || '.csv'; + vFileUri := vBucketUri || CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || sanitizeFilename(vFileName); + + -- Get file metadata from OCI bucket (CHECKSUM, CREATED, BYTES) with retry logic + DECLARE + vChecksum VARCHAR2(128); + vCreated TIMESTAMP WITH TIME ZONE; + vBytes NUMBER; + vActualFileName VARCHAR2(1000); -- Actual filename with Oracle suffix + vSanitizedFileName VARCHAR2(1000); + vRetryCount NUMBER := 0; + vMaxRetries NUMBER := 1; -- One retry after initial attempt + vRetryDelay NUMBER := 2; -- 2 seconds delay + BEGIN + -- Sanitize filename first (PL/SQL function cannot be used directly in SQL) + vSanitizedFileName := sanitizeFilename(vFileName); + + -- Remove .csv extension for LIKE pattern matching (Oracle adds suffixes BEFORE .csv) + -- Example: LEGACY_DEBT_202508.csv becomes LEGACY_DEBT_202508_1_20260211T102621591769Z.csv + vSanitizedFileName := REGEXP_REPLACE(vSanitizedFileName, '\.csv$', '', 1, 0, 'i'); + + -- Try to get file metadata with retry logic + <> + LOOP + BEGIN + SELECT object_name, checksum, created, bytes + INTO vActualFileName, vChecksum, vCreated, vBytes + FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( + credential_name => pCredentialName, + location_uri => vBucketUri + )) + WHERE object_name LIKE CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || vSanitizedFileName || '%' + ORDER BY created DESC, bytes DESC + FETCH FIRST 1 ROW ONLY; + + -- Extract filename only from full path (remove bucket folder prefix) + -- vActualFileName contains: 'ODS/CSDB/CSDB_DEBT/LEGACY_DEBT_202508_1_20260211T111341375171Z.csv' + -- Extract only: 'LEGACY_DEBT_202508_1_20260211T111341375171Z.csv' + vActualFileName := SUBSTR(vActualFileName, INSTR(vActualFileName, '/', -1) + 1); + + -- Success - exit retry loop + EXIT metadata_retry_loop; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + vRetryCount := vRetryCount + 1; + + IF vRetryCount <= vMaxRetries THEN + -- Log retry attempt + ENV_MANAGER.LOG_PROCESS_EVENT('File not found in bucket (attempt ' || vRetryCount || '/' || (vMaxRetries + 1) || '), retrying after ' || vRetryDelay || ' seconds: ' || vFileName, 'DEBUG', vParameters); + + -- Wait before retry using DBMS_SESSION.SLEEP (alternative to DBMS_LOCK) + DBMS_SESSION.SLEEP(vRetryDelay); + ELSE + -- Max retries exceeded - re-raise exception + RAISE; + END IF; + END; + END LOOP metadata_retry_loop; + + -- Create A_SOURCE_FILE_RECEIVED record for this export with metadata + vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; + INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( + A_SOURCE_FILE_RECEIVED_KEY, + A_SOURCE_FILE_CONFIG_KEY, + SOURCE_FILE_NAME, + CHECKSUM, + CREATED, + BYTES, + RECEPTION_DATE, + PROCESSING_STATUS, + PARTITION_YEAR, + PARTITION_MONTH, + ARCH_FILE_NAME + ) VALUES ( + vSourceFileReceivedKey, + vConfigKey, -- Config key from A_SOURCE_FILE_CONFIG lookup + vActualFileName, -- Use actual filename with Oracle suffix + vChecksum, + vCreated, + vBytes, + SYSDATE, + 'INGESTED', + NULL, -- PARTITION_YEAR not used for CSV exports + NULL, -- PARTITION_MONTH not used for CSV exports + NULL -- ARCH_FILE_NAME not used for CSV exports + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('Registered file: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vActualFileName || ', Size=' || vBytes || ' bytes', 'DEBUG', vParameters); + EXCEPTION + WHEN NO_DATA_FOUND THEN + -- File not found after retries - log warning and continue without metadata + ENV_MANAGER.LOG_PROCESS_EVENT('WARNING: File not found in bucket after ' || (vMaxRetries + 1) || ' attempts: ' || vFileName, 'WARNING', vParameters); + + -- Sanitize filename for fallback INSERT (function cannot be used in SQL) + vSanitizedFileName := sanitizeFilename(vFileName); + + -- Insert without metadata + vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; + INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( + A_SOURCE_FILE_RECEIVED_KEY, + A_SOURCE_FILE_CONFIG_KEY, + SOURCE_FILE_NAME, + RECEPTION_DATE, + PROCESSING_STATUS, + PARTITION_YEAR, + PARTITION_MONTH, + ARCH_FILE_NAME + ) VALUES ( + vSourceFileReceivedKey, + vConfigKey, -- Config key from A_SOURCE_FILE_CONFIG lookup + vSanitizedFileName, -- Fallback: use theoretical filename if actual not found + SYSDATE, + 'INGESTED', + NULL, -- PARTITION_YEAR not used for CSV exports + NULL, -- PARTITION_MONTH not used for CSV exports + NULL -- ARCH_FILE_NAME not used for CSV exports + ); + END; + END LOOP; + + COMMIT; + ENV_MANAGER.LOG_PROCESS_EVENT('Successfully registered all ' || vPartitions.COUNT || ' files', 'INFO', vParameters); + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('Export completed successfully for ' || vPartitions.COUNT || ' files', 'INFO', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + + EXCEPTION + WHEN ENV_MANAGER.ERR_TABLE_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_TABLE_NOT_EXISTS ||': '||vTableName; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_COLUMN_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_COLUMN_NOT_EXISTS || ' (TableName.ColumnName): ' || vTableName||'.'||vKeyColumnName||CASE WHEN vCurrentCol IS NOT NULL THEN '.'||vCurrentCol||' in pColumnList' ELSE '' END; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_INVALID_PARALLEL_DEGREE THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_INVALID_PARALLEL_DEGREE, vgMsgTmp); + WHEN ENV_MANAGER.ERR_PARALLEL_EXECUTION_FAILED THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + WHEN OTHERS THEN + -- Log complete error details including full stack trace and backtrace + ENV_MANAGER.LOG_PROCESS_ERROR('Export failed: ' || SQLERRM, vParameters, 'DATA_EXPORTER'); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END EXPORT_TABLE_DATA_TO_CSV_BY_DATE; + + ---------------------------------------------------------------------------------------------------- + -- VERSION MANAGEMENT FUNCTIONS + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_VERSION RETURN VARCHAR2 IS + BEGIN + RETURN PACKAGE_VERSION; + END GET_VERSION; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_BUILD_INFO RETURN VARCHAR2 IS + BEGIN + RETURN ENV_MANAGER.GET_PACKAGE_VERSION_INFO( + pPackageName => 'DATA_EXPORTER', + pVersion => PACKAGE_VERSION, + pBuildDate => PACKAGE_BUILD_DATE, + pAuthor => PACKAGE_AUTHOR + ); + END GET_BUILD_INFO; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2 IS + BEGIN + RETURN ENV_MANAGER.FORMAT_VERSION_HISTORY( + pPackageName => 'DATA_EXPORTER', + pVersionHistory => VERSION_HISTORY + ); + END GET_VERSION_HISTORY; + + ---------------------------------------------------------------------------------------------------- + +END; + +/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.7.5/DATA_EXPORTER.pkg b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.7.5/DATA_EXPORTER.pkg new file mode 100644 index 0000000..dcb7e51 --- /dev/null +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.7.5/DATA_EXPORTER.pkg @@ -0,0 +1,233 @@ +create or replace PACKAGE CT_MRDS.DATA_EXPORTER +AUTHID CURRENT_USER +AS + /** + * Data Export Package: Provides comprehensive data export capabilities to various formats (CSV, Parquet) + * with support for cloud storage integration via Oracle Cloud Infrastructure (OCI). + * The structure of comment is used by GET_PACKAGE_DOCUMENTATION function + * which returns documentation text for confluence page (to Copy-Paste it). + **/ + + -- Package Version Information + PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.7.5'; + PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2026-02-11 12:15:00'; + PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski'; + + -- Version History (last 3-5 changes) + VERSION_HISTORY CONSTANT VARCHAR2(4000) := + 'v2.7.5 (2026-02-11): Added pRegisterExport parameter to EXPORT_TABLE_DATA procedure. When TRUE, registers each exported CSV file in A_SOURCE_FILE_RECEIVED.' || CHR(10) || + 'v2.7.4 (2026-02-11): ACTUAL FILENAME STORAGE - Store real filename with Oracle suffix in SOURCE_FILE_NAME instead of theoretical filename.' || CHR(10) || + 'v2.7.3 (2026-02-11): FIX LIKE pattern for DBMS_CLOUD.LIST_OBJECTS - Removed .csv extension from filename before pattern matching.' || CHR(10) || + 'v2.7.2 (2026-02-11): FIX pRegisterExport in EXPORT_TABLE_DATA_TO_CSV_BY_DATE - Added missing pRegisterExport parameter to EXPORT_SINGLE_PARTITION call.' || CHR(10) || + 'v2.7.1 (2026-02-11): AUTO-LOOKUP A_SOURCE_FILE_CONFIG_KEY - Parse pFolderName to automatically find config key from A_SOURCE_FILE_CONFIG.' || CHR(10) || + 'v2.7.0 (2026-02-10): Added pRegisterExport parameter to EXPORT_TABLE_DATA_TO_CSV_BY_DATE. When TRUE, registers each exported CSV file in A_SOURCE_FILE_RECEIVED.' || CHR(10) || + 'v2.6.3 (2026-01-28): COMPILATION FIX - Resolved ORA-00904 error in EXPORT_PARTITION_PARALLEL. SQLERRM properly assigned to vgMsgTmp variable.' || CHR(10) || + 'v2.6.2 (2026-01-28): CRITICAL FIX - Race condition when multiple exports run simultaneously. Session-safe cleanup with TASK_NAME filtering.' || CHR(10) || + 'v2.6.0 (2026-01-28): CRITICAL FIX - Added STATUS tracking to A_PARALLEL_EXPORT_CHUNKS table to prevent data duplication on retry.' || CHR(10); + + cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10); + vgMsgTmp VARCHAR2(32000); + + --------------------------------------------------------------------------------------------------------------------------- + -- TYPE DEFINITIONS FOR PARTITION HANDLING + --------------------------------------------------------------------------------------------------------------------------- + + /** + * Record type for year/month partition information + **/ + TYPE partition_rec IS RECORD ( + year VARCHAR2(4), + month VARCHAR2(2) + ); + + /** + * Table type for collection of partition records + **/ + TYPE partition_tab IS TABLE OF partition_rec; + + --------------------------------------------------------------------------------------------------------------------------- + -- INTERNAL PARALLEL PROCESSING CALLBACK + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name EXPORT_PARTITION_PARALLEL + * @desc Internal callback procedure for DBMS_PARALLEL_EXECUTE. + * Processes single partition (year/month) chunk in parallel task. + * Called by DBMS_PARALLEL_EXECUTE framework for each chunk. + * This procedure is PUBLIC because DBMS_PARALLEL_EXECUTE requires it, + * but should NOT be called directly by external code. + * @param pStartId - Chunk start ID (CHUNK_ID from A_PARALLEL_EXPORT_CHUNKS table) + * @param pEndId - Chunk end ID (same as pStartId for single-row chunks) + **/ + PROCEDURE EXPORT_PARTITION_PARALLEL ( + pStartId IN NUMBER, + pEndId IN NUMBER + ); + + --------------------------------------------------------------------------------------------------------------------------- + -- MAIN EXPORT PROCEDURES + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name EXPORT_TABLE_DATA + * @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA. + * Exports data into CSV file on OCI infrustructure. + * pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE' + * Supports template table for column order and per-column date formatting. + * When pRegisterExport=TRUE, successfully exported files are registered in: + * - CT_MRDS.A_SOURCE_FILE_RECEIVED (tracks file location, size, checksum, and metadata) + * @param pTemplateTableName - Optional template table (SCHEMA.TABLE or TABLE) for: + * - Column order control (template defines CSV structure) + * - Per-column date formatting via FILE_MANAGER.GET_DATE_FORMAT + * - NULL = use source table columns in natural order + * @param pRegisterExport - When TRUE, registers each exported CSV file in A_SOURCE_FILE_RECEIVED table + * @example + * begin + * DATA_EXPORTER.EXPORT_TABLE_DATA( + * pSchemaName => 'CT_MRDS', + * pTableName => 'MY_TABLE', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'DATA', + * pFolderName => 'csv_exports', + * pTemplateTableName => 'CT_ET_TEMPLATES.MY_TEMPLATE', -- Optional + * pRegisterExport => TRUE -- Optional, default FALSE + * ); + * end; + **/ + PROCEDURE EXPORT_TABLE_DATA ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pTemplateTableName IN VARCHAR2 default NULL, + pRegisterExport IN BOOLEAN default FALSE, + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ); + + + + /** + * @name EXPORT_TABLE_DATA_BY_DATE + * @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA. + * Exports data into PARQUET files on OCI infrustructure. + * Each YEAR_MONTH pair goes to seperate file (implicit partitioning). + * Allows specifying custom column list or uses T.* if pColumnList is NULL. + * Validates that all columns in pColumnList exist in the target table. + * Automatically adds 'T.' prefix to column names in pColumnList. + * Supports parallel partition processing via pParallelDegree parameter (default 1, range 1-16). + * pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE' + * @example + * begin + * DATA_EXPORTER.EXPORT_TABLE_DATA_BY_DATE( + * pSchemaName => 'CT_MRDS', + * pTableName => 'MY_TABLE', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'DATA', + * pFolderName => 'parquet_exports', + * pColumnList => 'COLUMN1, COLUMN2, COLUMN3', -- Optional + * pMinDate => DATE '2024-01-01', + * pMaxDate => SYSDATE, + * pParallelDegree => 8 -- Optional, default 1, range 1-16 + * ); + * end; + **/ + PROCEDURE EXPORT_TABLE_DATA_BY_DATE ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pColumnList IN VARCHAR2 default NULL, + pMinDate IN DATE default DATE '1900-01-01', + pMaxDate IN DATE default SYSDATE, + pParallelDegree IN NUMBER default 1, + pTemplateTableName IN VARCHAR2 default NULL, + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ); + + + + /** + * @name EXPORT_TABLE_DATA_TO_CSV_BY_DATE + * @desc Exports data to separate CSV files partitioned by year and month. + * Creates one CSV file for each year/month combination found in the data. + * Uses the same date filtering mechanism with CT_ODS.A_LOAD_HISTORY as EXPORT_TABLE_DATA_BY_DATE, + * but exports to CSV format instead of Parquet. + * Supports parallel partition processing via pParallelDegree parameter (1-16). + * File naming pattern: {pFileName}_YYYYMM.csv or {TABLENAME}_YYYYMM.csv (if pFileName is NULL) + * When pRegisterExport=TRUE, successfully exported files are registered in: + * - CT_MRDS.A_SOURCE_FILE_RECEIVED (tracks file location, size, checksum, and metadata) + * @example + * begin + * -- With custom filename + * DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE( + * pSchemaName => 'CT_MRDS', + * pTableName => 'MY_TABLE', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'DATA', + * pFolderName => 'exports', + * pFileName => 'my_export.csv', + * pMinDate => DATE '2024-01-01', + * pMaxDate => SYSDATE, + * pParallelDegree => 8, -- Optional, default 1, range 1-16 + * pRegisterExport => TRUE -- Optional, default FALSE, registers to A_SOURCE_FILE_RECEIVED + * ); + * + * -- With auto-generated filename (based on table name only) + * DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE( + * pSchemaName => 'OU_TOP', + * pTableName => 'AGGREGATED_ALLOTMENT', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'ARCHIVE', + * pFolderName => 'exports', + * pMinDate => DATE '2025-09-01', + * pMaxDate => DATE '2025-09-17', + * pRegisterExport => TRUE -- Registers each export to A_SOURCE_FILE_RECEIVED table + * ); + * -- This will create files like: AGGREGATED_ALLOTMENT_202509.csv, etc. + * pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE' + * end; + **/ + PROCEDURE EXPORT_TABLE_DATA_TO_CSV_BY_DATE ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pFileName IN VARCHAR2 DEFAULT NULL, + pColumnList IN VARCHAR2 default NULL, + pMinDate IN DATE default DATE '1900-01-01', + pMaxDate IN DATE default SYSDATE, + pParallelDegree IN NUMBER default 1, + pTemplateTableName IN VARCHAR2 default NULL, + pMaxFileSize IN NUMBER default 104857600, + pRegisterExport IN BOOLEAN default FALSE, + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ); + + --------------------------------------------------------------------------------------------------------------------------- + -- VERSION MANAGEMENT FUNCTIONS + --------------------------------------------------------------------------------------------------------------------------- + + /** + * Returns the current package version number + * return: Version string in format X.Y.Z (e.g., '2.1.0') + **/ + FUNCTION GET_VERSION RETURN VARCHAR2; + + /** + * Returns comprehensive build information including version, date, and author + * return: Formatted string with complete build details + **/ + FUNCTION GET_BUILD_INFO RETURN VARCHAR2; + + /** + * Returns the version history with recent changes + * return: Multi-line string with version history + **/ + FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2; + +END; + +/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.7.5/ENV_MANAGER.pkb b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.7.5/ENV_MANAGER.pkb new file mode 100644 index 0000000..856d449 --- /dev/null +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.7.5/ENV_MANAGER.pkb @@ -0,0 +1,1171 @@ +create or replace PACKAGE BODY CT_MRDS.ENV_MANAGER +AS + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE INIT_ERRORS IS + BEGIN + Errors(CODE_EMPTY_FILEURI_AND_RECKEY) := Error_Record(CODE_EMPTY_FILEURI_AND_RECKEY, MSG_EMPTY_FILEURI_AND_RECKEY); -- -20001 + Errors(CODE_NO_CONFIG_MATCH_FOR_FILEURI) := Error_Record(CODE_NO_CONFIG_MATCH_FOR_FILEURI, MSG_NO_CONFIG_MATCH_FOR_FILEURI); -- -20002 + Errors(CODE_MULTIPLE_MATCH_FOR_SRCFILE) := Error_Record(CODE_MULTIPLE_MATCH_FOR_SRCFILE, MSG_MULTIPLE_MATCH_FOR_SRCFILE); -- -20003 + Errors(CODE_MISSING_COLUMN_DATE_FORMAT) := Error_Record(CODE_MISSING_COLUMN_DATE_FORMAT, MSG_MISSING_COLUMN_DATE_FORMAT); -- -20004 + Errors(CODE_MULTIPLE_COLUMN_DATE_FORMAT) := Error_Record(CODE_MULTIPLE_COLUMN_DATE_FORMAT, MSG_MULTIPLE_COLUMN_DATE_FORMAT); -- -20005 + Errors(CODE_DIDNT_GET_LOAD_OPERATION_ID) := Error_Record(CODE_DIDNT_GET_LOAD_OPERATION_ID, MSG_DIDNT_GET_LOAD_OPERATION_ID); -- -20006 + Errors(CODE_NO_CONFIG_FOR_RECEIVED_FILE) := Error_Record(CODE_NO_CONFIG_FOR_RECEIVED_FILE, MSG_NO_CONFIG_FOR_RECEIVED_FILE); -- -20007 + Errors(CODE_MULTI_CONFIG_FOR_RECEIVED_FILE) := Error_Record(CODE_MULTI_CONFIG_FOR_RECEIVED_FILE, MSG_MULTI_CONFIG_FOR_RECEIVED_FILE); -- -20008 + Errors(CODE_FILE_NOT_FOUND_ON_CLOUD) := Error_Record(CODE_FILE_NOT_FOUND_ON_CLOUD, MSG_FILE_NOT_FOUND_ON_CLOUD); -- -20009 + Errors(CODE_FILE_VALIDATION_FAILED) := Error_Record(CODE_FILE_VALIDATION_FAILED, MSG_FILE_VALIDATION_FAILED); -- -20010 + Errors(CODE_EXCESS_COLUMNS_DETECTED) := Error_Record(CODE_EXCESS_COLUMNS_DETECTED, MSG_EXCESS_COLUMNS_DETECTED); -- -20011 + Errors(CODE_NO_CONFIG_MATCH) := Error_Record(CODE_NO_CONFIG_MATCH, MSG_NO_CONFIG_MATCH); -- -20012 + Errors(CODE_UNKNOWN_PREFIX) := Error_Record(CODE_UNKNOWN_PREFIX, MSG_UNKNOWN_PREFIX); -- -20013 + Errors(CODE_TABLE_NOT_EXISTS) := Error_Record(CODE_TABLE_NOT_EXISTS, MSG_TABLE_NOT_EXISTS); -- -20014 + Errors(CODE_COLUMN_NOT_EXISTS) := Error_Record(CODE_COLUMN_NOT_EXISTS, MSG_COLUMN_NOT_EXISTS); -- -20015 + Errors(CODE_UNSUPPORTED_DATA_TYPE) := Error_Record(CODE_UNSUPPORTED_DATA_TYPE, MSG_UNSUPPORTED_DATA_TYPE); -- -20016 + Errors(CODE_MISSING_SOURCE_KEY) := Error_Record(CODE_MISSING_SOURCE_KEY, MSG_MISSING_SOURCE_KEY); -- -20017 + Errors(CODE_NULL_SOURCE_FILE_CONFIG_KEY) := Error_Record(CODE_NULL_SOURCE_FILE_CONFIG_KEY, MSG_NULL_SOURCE_FILE_CONFIG_KEY); -- -20018 + Errors(CODE_DUPLICATED_SOURCE_KEY) := Error_Record(CODE_DUPLICATED_SOURCE_KEY, MSG_DUPLICATED_SOURCE_KEY); -- -20019 + Errors(CODE_MISSING_CONTAINER_CONFIG) := Error_Record(CODE_MISSING_CONTAINER_CONFIG, MSG_MISSING_CONTAINER_CONFIG); -- -20020 + Errors(CODE_MULTIPLE_CONTAINER_ENTRIES) := Error_Record(CODE_MULTIPLE_CONTAINER_ENTRIES, MSG_MULTIPLE_CONTAINER_ENTRIES); -- -20021 + Errors(CODE_WRONG_DESTINATION_PARAM) := Error_Record(CODE_WRONG_DESTINATION_PARAM, MSG_WRONG_DESTINATION_PARAM); -- -20022 + Errors(CODE_FILE_NOT_EXISTS_ON_CLOUD) := Error_Record(CODE_FILE_NOT_EXISTS_ON_CLOUD, MSG_FILE_NOT_EXISTS_ON_CLOUD); -- -20023 + Errors(CODE_FILE_ALREADY_REGISTERED) := Error_Record(CODE_FILE_ALREADY_REGISTERED, MSG_FILE_ALREADY_REGISTERED); -- -20024 + Errors(CODE_WRONG_DATE_TIMESTAMP_FORMAT) := Error_Record(CODE_WRONG_DATE_TIMESTAMP_FORMAT, MSG_WRONG_DATE_TIMESTAMP_FORMAT); -- -20025 + Errors(CODE_ENVIRONMENT_NOT_SET) := Error_Record(CODE_ENVIRONMENT_NOT_SET, MSG_ENVIRONMENT_NOT_SET); -- -20026 + Errors(CODE_CONFIG_VARIABLE_NOT_SET) := Error_Record(CODE_CONFIG_VARIABLE_NOT_SET, MSG_CONFIG_VARIABLE_NOT_SET); -- -20027 + Errors(CODE_NOT_INPUT_SOURCE_FILE_TYPE) := Error_Record(CODE_NOT_INPUT_SOURCE_FILE_TYPE, MSG_NOT_INPUT_SOURCE_FILE_TYPE); -- -20028 + Errors(CODE_EXP_DATA_FOR_ARCH_FAILED) := Error_Record(CODE_EXP_DATA_FOR_ARCH_FAILED, MSG_EXP_DATA_FOR_ARCH_FAILED); -- -20029 + Errors(CODE_RESTORE_FILE_FROM_TRASH) := Error_Record(CODE_RESTORE_FILE_FROM_TRASH, MSG_RESTORE_FILE_FROM_TRASH); -- -20030 + Errors(CODE_CHANGE_STAT_TO_ARCHIVED_FAILED):= Error_Record(CODE_CHANGE_STAT_TO_ARCHIVED_FAILED, MSG_CHANGE_STAT_TO_ARCHIVED_FAILED); -- -20031 + Errors(CODE_MOVE_FILE_TO_TRASH_FAILED) := Error_Record(CODE_MOVE_FILE_TO_TRASH_FAILED, MSG_MOVE_FILE_TO_TRASH_FAILED); -- -20032 + Errors(CODE_DROP_EXPORTED_FILES_FAILED) := Error_Record(CODE_DROP_EXPORTED_FILES_FAILED, MSG_DROP_EXPORTED_FILES_FAILED); -- -20033 + Errors(CODE_INVALID_BUCKET_AREA) := Error_Record(CODE_INVALID_BUCKET_AREA, MSG_INVALID_BUCKET_AREA); -- -20034 + Errors(CODE_INVALID_PARALLEL_DEGREE) := Error_Record(CODE_INVALID_PARALLEL_DEGREE, MSG_INVALID_PARALLEL_DEGREE); -- -20110 + Errors(CODE_PARALLEL_EXECUTION_FAILED) := Error_Record(CODE_PARALLEL_EXECUTION_FAILED, MSG_PARALLEL_EXECUTION_FAILED); -- -20111 + + Errors(CODE_UNKNOWN) := Error_Record(CODE_UNKNOWN, MSG_UNKNOWN); -- -20999 + + END INIT_ERRORS; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_DEFAULT_ENV + RETURN VARCHAR2 + IS + vDefaultEnv CT_MRDS.a_file_manager_config.config_variable_value%TYPE; + BEGIN + select config_variable_value + into vDefaultEnv + from CT_MRDS.a_file_manager_config + where lower(environment_id)='default' + and lower(config_variable)='environmentid'; + RETURN vDefaultEnv; + EXCEPTION + WHEN NO_DATA_FOUND THEN + RETURN NULL; + END; + + ---------------------------------------------------------------------------------------------------- + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE INIT_VARIABLES( + pEnv VARCHAR2 + ) IS + BEGIN + for rec in ( + select + ENVIRONMENT_ID + ,REGION + ,NAMESPACE + ,INBOXBUCKETNAME + ,DATABUCKETNAME + ,ARCHIVEBUCKETNAME + ,CREDENTIALNAME + ,LOGGINGENABLED + ,MINLOGLEVEL + ,DEFAULTDATEFORMAT + ,CONSOLELOGGINGENABLED + from ( + select environment_id, config_variable, config_variable_value from CT_MRDS.A_FILE_MANAGER_CONFIG + where environment_id=pEnv + ) + pivot ( + min(config_variable_value) + for config_variable in ( + 'Region' as Region + ,'NameSpace' as NameSpace + ,'InboxBucketName' as InboxBucketName + ,'DataBucketName' as DataBucketName + ,'ArchiveBucketName' as ArchiveBucketName + ,'CredentialName' as CredentialName + ,'LoggingEnabled' as LoggingEnabled + ,'MinLogLevel' as MinLogLevel + ,'DefaultDateFormat' as DefaultDateFormat + ,'ConsoleLoggingEnabled' as ConsoleLoggingEnabled) + ) + ) loop + if (rec.NAMESPACE is NULL + or rec.REGION is NULL + or rec.NAMESPACE is NULL + or rec.INBOXBUCKETNAME is NULL + or rec.DATABUCKETNAME is NULL + or rec.ARCHIVEBUCKETNAME is NULL + or rec.CREDENTIALNAME is NULL + ) THEN + vgMsgTmp := MSG_CONFIG_VARIABLE_NOT_SET + ||cgBL||' '||'Details about existing Configuration Variables where environment_id='||pEnv||': ' + ||cgBL||' '||'-------------------------' + ||cgBL||' '||'Region = '||rec.Region + ||cgBL||' '||'NameSpace = '||rec.NameSpace + ||cgBL||' '||'InboxBucketName = '||rec.InboxBucketName + ||cgBL||' '||'DataBucketName = '||rec.DataBucketName + ||cgBL||' '||'ArchiveBucketName = '||rec.ArchiveBucketName + ||cgBL||' '||'CredentialName = '||rec.CredentialName + ; + LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR'); + RAISE_APPLICATION_ERROR(CODE_CONFIG_VARIABLE_NOT_SET, vgMsgTmp); + + elsif (rec.LOGGINGENABLED is NULL + or rec.MINLOGLEVEL is NULL + or rec.DEFAULTDATEFORMAT is NULL + ) THEN + vgMsgTmp := 'Missing configuration variables' + ||cgBL||' '||'Details about existing Configuration Variables where environment_id='||pEnv||': ' + ||cgBL||' '||'-------------------------' + ||cgBL||' '||'LoggingEnabled = '||rec.LoggingEnabled + ||cgBL||' '||'MinLogLevel = '||rec.MinLogLevel + ||cgBL||' '||'DefaultDateFormat = '||rec.DefaultDateFormat + ; + LOG_PROCESS_EVENT(vgMsgTmp, 'WARNING'); + + else + gvNameSpace := rec.NAMESPACE; + gvRegion := rec.REGION; + gvInboxBucketName := rec.INBOXBUCKETNAME; + gvDataBucketName := rec.DATABUCKETNAME; + gvArchiveBucketName := rec.ARCHIVEBUCKETNAME; + gvCredentialName := rec.CREDENTIALNAME; + gvInboxBucketUri := 'https://objectstorage.'||rec.REGION||'.oraclecloud.com/n/'||rec.NAMESPACE||'/b/'||rec.INBOXBUCKETNAME||'/o/'; + gvDataBucketUri := 'https://objectstorage.'||rec.REGION||'.oraclecloud.com/n/'||rec.NAMESPACE||'/b/'||rec.DATABUCKETNAME||'/o/'; + gvArchiveBucketUri := 'https://objectstorage.'||rec.REGION||'.oraclecloud.com/n/'||rec.NAMESPACE||'/b/'||rec.ARCHIVEBUCKETNAME||'/o/'; + gvLoggingEnabled := rec.LOGGINGENABLED; + gvMinLogLevel := rec.MINLOGLEVEL; + gvDefaultDateFormat := rec.DEFAULTDATEFORMAT; + gvConsoleLoggingEnabled := NVL(rec.CONSOLELOGGINGENABLED, 'ON'); + end if; + end loop; + EXCEPTION + WHEN NO_DATA_FOUND THEN + vgMsgTmp := MSG_CONFIG_VARIABLE_NOT_SET + ||cgBL||' '||'No configuration found for environment_id='||pEnv||' in A_FILE_MANAGER_CONFIG table'; + LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', 'pEnv='||pEnv); + RAISE_APPLICATION_ERROR(CODE_CONFIG_VARIABLE_NOT_SET, vgMsgTmp); + WHEN OTHERS THEN + vgMsgTmp := 'Unexpected error while initializing variables for environment: '||pEnv + ||cgBL||' '||'SQLERRM: '||SQLERRM; + LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', 'pEnv='||pEnv); + RAISE; + END INIT_VARIABLES; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_ERROR_MESSAGE( + pCode PLS_INTEGER + ) RETURN VARCHAR2 + IS + BEGIN + RETURN Errors(pCode).message; + EXCEPTION + WHEN NO_DATA_FOUND THEN + LOG_PROCESS_EVENT('No error message found for pCode='||pCode , 'WARNING', 'pCode='||pCode); + LOG_PROCESS_EVENT('Update ENV_MANAGER package header with new code.' , 'WARNING', 'pCode='||pCode); + RETURN NULL; + WHEN OTHERS THEN + LOG_PROCESS_EVENT(MSG_UNKNOWN , 'ERROR', 'pCode='||pCode); + RAISE_APPLICATION_ERROR(CODE_UNKNOWN, MSG_UNKNOWN); + END GET_ERROR_MESSAGE; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_ERROR_STACK( + pFormat VARCHAR2 + ,pCode PLS_INTEGER + ,pSourceFileReceivedKey CT_MRDS.A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY%TYPE DEFAULT NULL + ) RETURN VARCHAR2 + IS + vFullErrorCore VARCHAR2(32000); + vFullErrorMsg VARCHAR2(32000); + BEGIN +-- vgErrorMessage := SQLERRM|| cgBL; +-- vgErrorStack := DBMS_UTILITY.FORMAT_ERROR_STACK; +-- vgErrorBacktrace := DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + vFullErrorCore :='Error Message:' + ||cgBL|| SQLERRM|| cgBL + ||'-------------------------------------------------------' + ||cgBL||'Error Stack:' + ||cgBL|| DBMS_UTILITY.FORMAT_ERROR_STACK + ||'-------------------------------------------------------' + ||cgBL||'Error Backtrace:' + ||cgBL|| DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; +-- vFullErrorCore := REGEXP_REPLACE (vFullErrorCore, pCode||': ', pCode||': '||GET_ERROR_MESSAGE(pCode) , 1, 1); + IF (pFormat = 'TABLE') THEN + vFullErrorMsg := vFullErrorCore; + ELSE + vFullErrorMsg := cgBL||'------------------------------------------------------+' + ||cgBL||vFullErrorCore + ||'------------------------------------------------------+'; + END IF; +-- IF pSourceFileReceivedKey is not null THEN +-- vFullErrorMsg := vFullErrorMsg ||cgBL||GET_DET_SOURCE_FILE_RECEIVED_INFO(pSourceFileReceivedKey,1,1,1); +-- END IF; + + RETURN vFullErrorMsg; + EXCEPTION + WHEN OTHERS THEN + LOG_PROCESS_EVENT(MSG_UNKNOWN , 'ERROR', 'pFormat='||pFormat); + RETURN NULL; + END GET_ERROR_STACK; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION FORMAT_PARAMETERS( + pParameterList SYS.ODCIVARCHAR2LIST + ) RETURN VARCHAR2 IS + vResult VARCHAR2(10000); + BEGIN + FOR i IN 1 .. pParameterList.COUNT LOOP +-- dbms_output.put_line('pParameterList(i): '||pParameterList(i)); + if i < pParameterList.COUNT then vResult := vResult || replace(pParameterList(i), '''NULL''', 'NULL') ||' ,'|| cgBL; + else vResult := vResult || replace(pParameterList(i), '''NULL''', 'NULL'); + end if; + END LOOP; + RETURN vResult; + EXCEPTION + WHEN OTHERS THEN + LOG_PROCESS_EVENT('Error while formating parameters.' , 'WARNING'); + RETURN NULL; + END FORMAT_PARAMETERS; + + ---------------------------------------------------------------------------------------------------- + + + + PROCEDURE LOG_PROCESS_EVENT ( + pLogMessage VARCHAR2 + ,pLogLevel VARCHAR2 DEFAULT 'ERROR' + ,pParameters VARCHAR2 DEFAULT NULL + ,pProcessName VARCHAR2 DEFAULT 'FILE_MANAGER' + ) IS + PRAGMA AUTONOMOUS_TRANSACTION; + + vLoggingEnabled VARCHAR2(10); + vMinLogLevel VARCHAR2(10); + vCallStack VARCHAR2(10000); + vProcedureName VARCHAR2(100); + vProcedureLevel PLS_INTEGER; + vTotalLines PLS_INTEGER; + vCurrentLine PLS_INTEGER; + + -- Map of priority level + TYPE logLevelMap IS TABLE OF NUMBER INDEX BY VARCHAR2(10); + vLogLevels logLevelMap; + + BEGIN + -- Prority logging level (higher -> more important) + vLogLevels('DEBUG') := 1; + vLogLevels('INFO') := 2; + vLogLevels('WARNING') := 3; + vLogLevels('ERROR') := 4; + + -- Check id logging is TURN-OFF + IF gvLoggingEnabled = 'OFF' THEN + RETURN; + END IF; + -- Check logging level + IF vLogLevels(pLogLevel) < vLogLevels(gvMinLogLevel) THEN + RETURN; + END IF; + + vCallStack := DBMS_UTILITY.FORMAT_CALL_STACK; + vProcedureName := REGEXP_SUBSTR(vCallStack, 'package body\s+\w+\.(\w+\.\w+)', 1, 2, NULL, 1); + vTotalLines := REGEXP_COUNT(vCallStack, CHR(10)) + 1; + vCurrentLine := REGEXP_COUNT(SUBSTR(vCallStack, 1, INSTR(vCallStack, vProcedureName) - 1), CHR(10)) + 1; + vProcedureLevel := (vTotalLines - vCurrentLine + 1) - 3; + vProcedureName := LPAD(vProcedureName, LENGTH(vProcedureName) + 2*vProcedureLevel, ' '); + + INSERT INTO CT_MRDS.A_PROCESS_LOG (guid, username, osuser, machine, module, process_name, procedure_name, procedure_parameters, log_level, log_message) + VALUES (guid, gvUsername, gvOsuser, gvMachine, gvModule, pProcessName, vProcedureName, pParameters, pLogLevel, pLogMessage); + + COMMIT; + + -- Also output to console for immediate visibility (if enabled) + IF gvConsoleLoggingEnabled = 'ON' THEN + DBMS_OUTPUT.PUT_LINE('[' || TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') || '] [' || pLogLevel || '] ' || vProcedureName || ': ' || pLogMessage); + END IF; + + END LOG_PROCESS_EVENT; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE LOG_PROCESS_ERROR( + pLogMessage IN VARCHAR2, + pParameters IN VARCHAR2 DEFAULT NULL, + pProcessName IN VARCHAR2 DEFAULT 'FILE_MANAGER' + ) IS + PRAGMA AUTONOMOUS_TRANSACTION; + + vCallStack VARCHAR2(32767); + vErrorStack VARCHAR2(32767); + vErrorBacktrace VARCHAR2(32767); + vAdjustedBacktrace VARCHAR2(32767); + vErrorContext VARCHAR2(4000); + vProcName VARCHAR2(100); + vProcedureLevel PLS_INTEGER; + vTotalLines PLS_INTEGER; + vCurrentLine PLS_INTEGER; + vFullErrorMessage CLOB; + vTimestamp VARCHAR2(30); + vSessionInfo VARCHAR2(1000); + + BEGIN + -- Check if logging is disabled + IF gvLoggingEnabled = 'OFF' THEN + RETURN; + END IF; + + -- Capture all available error information + vErrorStack := DBMS_UTILITY.FORMAT_ERROR_STACK; + vErrorBacktrace := DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + vCallStack := DBMS_UTILITY.FORMAT_CALL_STACK; + vTimestamp := TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS'); + + -- Capture session information for better context + vSessionInfo := 'Session ID: ' || SYS_CONTEXT('USERENV', 'SID') || + ', User: ' || SYS_CONTEXT('USERENV', 'SESSION_USER') || + ', Module: ' || SYS_CONTEXT('USERENV', 'MODULE') || + ', Client Info: ' || NVL(SYS_CONTEXT('USERENV', 'CLIENT_INFO'), 'N/A') || + ', Action: ' || NVL(SYS_CONTEXT('USERENV', 'ACTION'), 'N/A'); + + -- Build error context information + vErrorContext := 'Environment: ' || gvEnv || + ', Process: ' || NVL(pProcessName, 'UNKNOWN') || + ', Timestamp: ' || vTimestamp || + ', SQLCODE: ' || SQLCODE || + ', Transaction Active: ' || CASE WHEN DBMS_TRANSACTION.STEP_ID IS NOT NULL THEN 'YES' ELSE 'NO' END; + + -- Extract procedure name and nesting level from call stack + -- Always extract actual procedure name from call stack for precise error location + vProcName := REGEXP_SUBSTR(vCallStack, 'package body\s+\w+\.(\w+\.\w+)', 1, 2, NULL, 1); + + -- If we couldn't extract procedure name from call stack, use provided process name + IF vProcName IS NULL THEN + vProcName := NVL(pProcessName, 'UNKNOWN'); + END IF; + + vTotalLines := REGEXP_COUNT(vCallStack, CHR(10)) + 1; + vCurrentLine := REGEXP_COUNT(SUBSTR(vCallStack, 1, INSTR(vCallStack, vProcName) - 1), CHR(10)) + 1; + vProcedureLevel := (vTotalLines - vCurrentLine + 1) - 3; + vProcName := LPAD(vProcName, LENGTH(vProcName) + 2*vProcedureLevel, ' '); + + -- Enhance line number display to show direct _BODY.sql file line numbers + -- Since packages are now split into separate _SPEC and _BODY files, line numbers map directly + vAdjustedBacktrace := REGEXP_REPLACE(vErrorBacktrace, + 'at "CT_MRDS\.FILE_MANAGER", line ([0-9]+)', + 'at "CT_MRDS.FILE_MANAGER", line \1 (-> FILE_MANAGER_BODY.sql:line \1)', 1, 0, 'i'); + + vAdjustedBacktrace := REGEXP_REPLACE(vAdjustedBacktrace, + 'at "CT_MRDS\.ENV_MANAGER", line ([0-9]+)', + 'at "CT_MRDS.ENV_MANAGER", line \1 (-> ENV_MANAGER_BODY.sql:line \1)', 1, 0, 'i'); + + -- Build comprehensive error message with professional formatting + vFullErrorMessage := 'ERROR REPORT' || cgBL || + '-------------------------------------------------------' || cgBL || + 'ERROR SUMMARY' || cgBL || + ' Message: ' || pLogMessage || cgBL || + ' Context: ' || vErrorContext || cgBL || + '-------------------------------------------------------' || cgBL || + 'SESSION INFORMATION' || cgBL || + ' ' || vSessionInfo || cgBL || + '-------------------------------------------------------' || cgBL || + 'ERROR STACK (Oracle Internal)' || cgBL || + vErrorStack || + '-------------------------------------------------------' || cgBL || + 'BACKTRACE INFORMATION (Oracle Internal)' || cgBL || + vErrorBacktrace || + '-------------------------------------------------------' || cgBL || + 'CALL STACK (Execution Path)' || cgBL || + vCallStack || + '-------------------------------------------------------' || cgBL || + 'QUICK REFERENCE' || cgBL || + ' SQLCODE: ' || SQLCODE || cgBL || + ' SQLERRM: ' || SQLERRM || cgBL || + ' Timestamp: ' || vTimestamp || cgBL || + ' Parameters: ' || NVL(pParameters, 'None provided') || cgBL || + '-------------------------------------------------------'; + + -- Insert comprehensive error record into log table + -- Note: LOG_MESSAGE is VARCHAR2(4000), so we'll truncate if needed but include key info + INSERT INTO CT_MRDS.A_PROCESS_LOG (guid, username, osuser, machine, module, process_name, procedure_name, procedure_parameters, log_level, log_message) + VALUES (guid, gvUsername, gvOsuser, gvMachine, gvModule, NVL(pProcessName, 'FILE_MANAGER'), vProcName, pParameters, 'ERROR', + CASE + WHEN LENGTH(vFullErrorMessage) <= 4000 THEN vFullErrorMessage + ELSE SUBSTR(vFullErrorMessage, 1, 3950) || '... [TRUNCATED]' + END); + + COMMIT; + + -- Enhanced console output for immediate visibility (if enabled) + IF gvConsoleLoggingEnabled = 'ON' THEN + DBMS_OUTPUT.PUT_LINE('======================================================='); + DBMS_OUTPUT.PUT_LINE('ERROR DETECTED AT: ' || vTimestamp); + DBMS_OUTPUT.PUT_LINE('PROCEDURE: ' || NVL(vProcName, 'UNKNOWN')); + DBMS_OUTPUT.PUT_LINE('MESSAGE: ' || pLogMessage); + DBMS_OUTPUT.PUT_LINE('SQLCODE: ' || SQLCODE || ' | ENVIRONMENT: ' || gvEnv); + -- Extract and show the most relevant file and line number + IF INSTR(vAdjustedBacktrace, '-> ') > 0 THEN + DBMS_OUTPUT.PUT_LINE('SOURCE FILE LOCATION: ' || REGEXP_SUBSTR(vAdjustedBacktrace, '-> [^)]+')); + END IF; + DBMS_OUTPUT.PUT_LINE('FULL DETAILS: Query A_PROCESS_LOG table for complete diagnostic info'); + DBMS_OUTPUT.PUT_LINE('QUERY (This Error): SELECT * FROM CT_MRDS.A_PROCESS_LOG WHERE GUID = ''' || guid || ''' ORDER BY LOG_TIMESTAMP DESC;'); + DBMS_OUTPUT.PUT_LINE('QUERY (Recent All): SELECT * FROM CT_MRDS.A_PROCESS_LOG WHERE LOG_TIMESTAMP >= SYSDATE - 1/1440 ORDER BY LOG_TIMESTAMP DESC;'); + DBMS_OUTPUT.PUT_LINE('======================================================='); + END IF; + + END LOG_PROCESS_ERROR; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION ANALYZE_VALIDATION_ERRORS( + pValidationLogTable VARCHAR2, + pTemplateSchema VARCHAR2, + pTemplateTable VARCHAR2, + pCsvFileUri VARCHAR2 + ) RETURN VARCHAR2 + IS + vAnalysisReport CLOB := ''; + vCsvHeader VARCHAR2(4000); + vExpectedOrder VARCHAR2(4000); + vCsvOrder VARCHAR2(4000); + vErrorDetails VARCHAR2(32000) := ''; + vSolutions VARCHAR2(4000); + vColumnMismatch VARCHAR2(1000); + vErrorCount NUMBER := 0; + vFirstDataError VARCHAR2(1000); + vErrorColumn VARCHAR2(100); + vErrorValue VARCHAR2(500); + vExpectedType VARCHAR2(100); + vTemplateColCount NUMBER := 0; + vCsvColCount NUMBER := 0; + vExcessColumns VARCHAR2(2000); + vCsvFirstLine VARCHAR2(4000); + + -- Cursor for template table columns + CURSOR c_template_columns IS + SELECT COLUMN_NAME, DATA_TYPE, COLUMN_ID + FROM ALL_TAB_COLUMNS + WHERE OWNER = UPPER(REGEXP_SUBSTR(pTemplateSchema || '.' || pTemplateTable, '^([^.]+)')) + AND TABLE_NAME = UPPER(REGEXP_SUBSTR(pTemplateSchema || '.' || pTemplateTable, '\.(.+)$', 1, 1, NULL, 1)) + ORDER BY COLUMN_ID; + + BEGIN + -- Build expected column order from template table and count columns + FOR rec IN c_template_columns LOOP + IF vExpectedOrder IS NOT NULL THEN + vExpectedOrder := vExpectedOrder || ', '; + END IF; + vExpectedOrder := vExpectedOrder || rec.COLUMN_NAME; + vTemplateColCount := vTemplateColCount + 1; + END LOOP; + + -- Parse validation log table for errors and CSV structure + BEGIN + -- Try to extract error information from the validation log table + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM ' || pValidationLogTable || + ' WHERE record LIKE ''error processing column%''' + INTO vErrorCount; + + -- Get first error details + IF vErrorCount > 0 THEN + EXECUTE IMMEDIATE 'SELECT record FROM ' || pValidationLogTable || + ' WHERE record LIKE ''error processing column%'' AND ROWNUM = 1' + INTO vFirstDataError; + + -- Parse error to extract column name and error type + vErrorColumn := REGEXP_SUBSTR(vFirstDataError, 'error processing column ([A-Z_]+)', 1, 1, NULL, 1); + + -- Try to get the actual error value from ORA-01722 message + BEGIN + EXECUTE IMMEDIATE 'SELECT record FROM ' || pValidationLogTable || + ' WHERE record LIKE ''ORA-01722%'' AND ROWNUM = 1' + INTO vFirstDataError; + vErrorValue := REGEXP_SUBSTR(vFirstDataError, 'string value containing ''([^'']+)''', 1, 1, NULL, 1); + EXCEPTION + WHEN NO_DATA_FOUND THEN + vErrorValue := 'unknown value'; + WHEN OTHERS THEN + vErrorValue := 'parsing error'; + END; + END IF; + + -- Try to extract CSV structure from validation log field definitions + BEGIN + EXECUTE IMMEDIATE ' + SELECT LISTAGG( + REGEXP_SUBSTR(record, ''^\s+([A-Z_]+)\s+'', 1, 1, NULL, 1), + '', '' + ) WITHIN GROUP (ORDER BY ROWNUM) + FROM ' || pValidationLogTable || ' + WHERE record LIKE '' %CHAR%'' + AND record NOT LIKE ''%Fields in Data Source%'' + AND REGEXP_SUBSTR(record, ''^\s+([A-Z_]+)\s+'') IS NOT NULL' + INTO vCsvOrder; + + -- Count CSV columns from parsed structure + IF vCsvOrder IS NOT NULL THEN + vCsvColCount := REGEXP_COUNT(vCsvOrder, ',') + 1; + END IF; + + EXCEPTION + WHEN OTHERS THEN + vCsvOrder := 'Unable to determine CSV column order from validation log'; + END; + + -- Alternative method: Try to read first line of CSV directly for column count + IF vCsvColCount = 0 THEN + BEGIN + -- This is a fallback - try to get CSV header from external source if possible + -- Note: This would require DBMS_CLOUD.GET_OBJECT or similar approach + -- For now, we'll rely on the validation log parsing + NULL; + EXCEPTION + WHEN OTHERS THEN + NULL; + END; + END IF; + + EXCEPTION + WHEN OTHERS THEN + vErrorDetails := 'Error analyzing validation log: ' || SQLERRM; + END; + + -- Detect column order mismatch and excess columns + IF vCsvOrder IS NOT NULL AND vExpectedOrder IS NOT NULL THEN + IF UPPER(REPLACE(vCsvOrder, ' ', '')) != UPPER(REPLACE(vExpectedOrder, ' ', '')) THEN + vColumnMismatch := 'YES'; + ELSE + vColumnMismatch := 'NO'; + END IF; + END IF; + + -- Check for excess columns + IF vCsvColCount > vTemplateColCount THEN + -- Try to identify which columns are excess + IF vCsvOrder IS NOT NULL THEN + -- Parse CSV columns and compare with template + DECLARE + vCsvCols SYS.ODCIVARCHAR2LIST; + vTemplateCols SYS.ODCIVARCHAR2LIST; + vExcessFound VARCHAR2(1) := 'N'; + i NUMBER; + BEGIN + -- Split CSV columns + SELECT TRIM(REGEXP_SUBSTR(vCsvOrder, '[^,]+', 1, LEVEL)) + BULK COLLECT INTO vCsvCols + FROM DUAL + CONNECT BY REGEXP_SUBSTR(vCsvOrder, '[^,]+', 1, LEVEL) IS NOT NULL; + + -- Split template columns + SELECT TRIM(REGEXP_SUBSTR(vExpectedOrder, '[^,]+', 1, LEVEL)) + BULK COLLECT INTO vTemplateCols + FROM DUAL + CONNECT BY REGEXP_SUBSTR(vExpectedOrder, '[^,]+', 1, LEVEL) IS NOT NULL; + + -- Find excess columns (those in CSV but not in template) + FOR i IN 1..vCsvCols.COUNT LOOP + DECLARE + vFoundInTemplate BOOLEAN := FALSE; + j NUMBER; + BEGIN + -- Check if CSV column exists in template + FOR j IN 1..vTemplateCols.COUNT LOOP + IF UPPER(TRIM(vCsvCols(i))) = UPPER(TRIM(vTemplateCols(j))) THEN + vFoundInTemplate := TRUE; + EXIT; + END IF; + END LOOP; + + -- If not found in template, it's an excess column + IF NOT vFoundInTemplate THEN + IF vExcessFound = 'Y' THEN + vExcessColumns := vExcessColumns || ', '; + END IF; + vExcessColumns := vExcessColumns || vCsvCols(i); + vExcessFound := 'Y'; + END IF; + END; + END LOOP; + EXCEPTION + WHEN OTHERS THEN + vExcessColumns := 'Unable to determine specific excess columns'; + END; + END IF; + END IF; + + -- Build comprehensive analysis report + vAnalysisReport := 'FILE VALIDATION FAILED - DETAILED ANALYSIS' || cgBL || + '=================================================' || cgBL || cgBL; + + -- Column structure analysis + vAnalysisReport := vAnalysisReport || + 'COLUMN STRUCTURE ANALYSIS:' || cgBL || + '---------------------------------------------------' || cgBL || + 'Template Expected Order: ' || vExpectedOrder || cgBL || + 'Template Column Count: ' || vTemplateColCount || cgBL || + 'CSV Detected Order: ' || NVL(vCsvOrder, 'Unknown') || cgBL || + 'CSV Column Count: ' || vCsvColCount || cgBL || cgBL; + + -- Report column count issues + IF vCsvColCount > vTemplateColCount THEN + vAnalysisReport := vAnalysisReport || + 'EXCESS COLUMNS DETECTED!' || cgBL || + 'CSV file has ' || (vCsvColCount - vTemplateColCount) || ' more columns than template allows.' || cgBL; + IF vExcessColumns IS NOT NULL THEN + vAnalysisReport := vAnalysisReport || + 'Excess columns found: ' || vExcessColumns || cgBL; + END IF; + vAnalysisReport := vAnalysisReport || cgBL; + END IF; + + -- Report column order issues + IF vColumnMismatch = 'YES' THEN + vAnalysisReport := vAnalysisReport || + 'COLUMN ORDER MISMATCH DETECTED!' || cgBL || + 'CSV columns are in different order than template expects.' || cgBL || cgBL; + END IF; + + -- Specific error analysis + IF vErrorCount > 0 THEN + vAnalysisReport := vAnalysisReport || + 'SPECIFIC ERRORS FOUND:' || cgBL || + '---------------------------------------------------' || cgBL; + + IF vErrorColumn IS NOT NULL THEN + -- Get expected data type for error column + FOR rec IN c_template_columns LOOP + IF rec.COLUMN_NAME = vErrorColumn THEN + vExpectedType := rec.DATA_TYPE; + EXIT; + END IF; + END LOOP; + + vAnalysisReport := vAnalysisReport || + '1. Column ' || vErrorColumn || ': Expected ' || vExpectedType || + ', received "' || NVL(vErrorValue, 'unknown value') || '" (TEXT)' || cgBL || + ' → CSV position contains different data type than expected' || cgBL; + END IF; + + vAnalysisReport := vAnalysisReport || + 'Total validation errors found: ' || vErrorCount || cgBL || cgBL; + END IF; + + -- Solutions section + vAnalysisReport := vAnalysisReport || + 'SUGGESTED SOLUTIONS:' || cgBL || + '---------------------------------------------------' || cgBL; + + -- Solutions for excess columns + IF vCsvColCount > vTemplateColCount THEN + vAnalysisReport := vAnalysisReport || + 'FOR EXCESS COLUMNS:' || cgBL || + '• Remove extra columns from CSV file' || cgBL || + '• Keep only these columns in this order: ' || vExpectedOrder || cgBL; + IF vExcessColumns IS NOT NULL THEN + vAnalysisReport := vAnalysisReport || + '• Specifically remove: ' || vExcessColumns || cgBL; + END IF; + vAnalysisReport := vAnalysisReport || cgBL; + END IF; + + -- Solutions for column order + IF vColumnMismatch = 'YES' THEN + vAnalysisReport := vAnalysisReport || + 'FOR COLUMN ORDER:' || cgBL || + '• Reorder CSV columns to match template: ' || vExpectedOrder || cgBL || + '• Or update template table column order to match CSV file' || cgBL || cgBL; + END IF; + + -- General solutions + vAnalysisReport := vAnalysisReport || + 'GENERAL RECOMMENDATIONS:' || cgBL || + '• Ensure CSV has exactly ' || vTemplateColCount || ' columns' || cgBL || + '• Verify column names match template table exactly' || cgBL || + '• Check data types in each column match expectations' || cgBL || cgBL; + + -- Validation log reference + vAnalysisReport := vAnalysisReport || + 'TECHNICAL DETAILS:' || cgBL || + '---------------------------------------------------' || cgBL || + 'Validation Log Table: ' || pValidationLogTable || cgBL || + 'Template Table: ' || pTemplateSchema || '.' || pTemplateTable || cgBL || + 'CSV File: ' || pCsvFileUri || cgBL || + 'Query validation details: SELECT * FROM ' || pValidationLogTable || ';' || cgBL; + + RETURN vAnalysisReport; + + EXCEPTION + WHEN OTHERS THEN + RETURN 'Error generating validation analysis: ' || SQLERRM || cgBL || + 'Validation Log Table: ' || pValidationLogTable || cgBL || + 'Check table manually: SELECT * FROM ' || pValidationLogTable || ';'; + END ANALYZE_VALIDATION_ERRORS; + + ---------------------------------------------------------------------------------------------------- + -- PACKAGE VERSION MANAGEMENT FUNCTIONS IMPLEMENTATION + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_VERSION + RETURN VARCHAR2 + IS + BEGIN + RETURN PACKAGE_VERSION; + END GET_VERSION; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_BUILD_INFO + RETURN VARCHAR2 + IS + BEGIN + RETURN GET_PACKAGE_VERSION_INFO( + pPackageName => 'ENV_MANAGER', + pVersion => PACKAGE_VERSION, + pBuildDate => PACKAGE_BUILD_DATE, + pAuthor => PACKAGE_AUTHOR + ); + END GET_BUILD_INFO; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_VERSION_HISTORY + RETURN VARCHAR2 + IS + BEGIN + RETURN FORMAT_VERSION_HISTORY( + pPackageName => 'ENV_MANAGER', + pVersionHistory => VERSION_HISTORY + ); + END GET_VERSION_HISTORY; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_PACKAGE_VERSION_INFO( + pPackageName VARCHAR2, + pVersion VARCHAR2, + pBuildDate VARCHAR2, + pAuthor VARCHAR2 + ) RETURN VARCHAR2 + IS + BEGIN + RETURN 'Package: ' || pPackageName || cgBL || + 'Version: ' || pVersion || cgBL || + 'Build Date: ' || pBuildDate || cgBL || + 'Author: ' || pAuthor; + END GET_PACKAGE_VERSION_INFO; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION FORMAT_VERSION_HISTORY( + pPackageName VARCHAR2, + pVersionHistory VARCHAR2 + ) RETURN VARCHAR2 + IS + BEGIN + RETURN pPackageName || ' Version History:' || cgBL || pVersionHistory; + END FORMAT_VERSION_HISTORY; + + ---------------------------------------------------------------------------------------------------- + -- PACKAGE HASH + CHANGE DETECTION FUNCTIONS IMPLEMENTATION + ---------------------------------------------------------------------------------------------------- + + FUNCTION CALCULATE_PACKAGE_HASH( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2, + pPackageType VARCHAR2 + ) RETURN VARCHAR2 + IS + vSourceCode CLOB; + vHash VARCHAR2(64); + vRawHash RAW(32); + BEGIN + -- Build complete source code from ALL_SOURCE using XMLAGG (no 4000 char limit) + -- CRITICAL: Cannot use LISTAGG due to VARCHAR2 limit + SELECT XMLAGG(XMLELEMENT(E, TEXT) ORDER BY LINE).GETCLOBVAL() + INTO vSourceCode + FROM ALL_SOURCE + WHERE OWNER = UPPER(pPackageOwner) + AND NAME = UPPER(pPackageName) + AND TYPE = UPPER(pPackageType); + + -- If empty, return NULL + IF vSourceCode IS NULL OR DBMS_LOB.GETLENGTH(vSourceCode) = 0 THEN + RETURN NULL; + END IF; + + -- Calculate SHA256 hash directly from CLOB + -- DBMS_CRYPTO.HASH has overload for CLOB in Oracle 19c+ + vRawHash := DBMS_CRYPTO.HASH( + src => vSourceCode, + typ => DBMS_CRYPTO.HASH_SH256 + ); + + -- Convert to hex string + vHash := LOWER(RAWTOHEX(vRawHash)); + + RETURN vHash; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + RETURN NULL; + WHEN OTHERS THEN + LOG_PROCESS_ERROR('Error calculating package hash: ' || SQLERRM, + 'pPackageOwner=' || pPackageOwner || ', pPackageName=' || pPackageName); + RETURN NULL; + END CALCULATE_PACKAGE_HASH; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE TRACK_PACKAGE_VERSION( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2, + pPackageVersion VARCHAR2, + pPackageBuildDate VARCHAR2, + pPackageAuthor VARCHAR2 + ) + IS + vHashSpec VARCHAR2(64); + vHashBody VARCHAR2(64); + vLastHashSpec VARCHAR2(64); + vLastHashBody VARCHAR2(64); + vLastVersion VARCHAR2(10); + vLineCountSpec NUMBER; + vLineCountBody NUMBER; + vChangeDetected CHAR(1) := 'N'; + vChangeMessage VARCHAR2(4000); + vParameters VARCHAR2(4000); + BEGIN + vParameters := FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( + 'pPackageOwner => ''' || pPackageOwner || '''', + 'pPackageName => ''' || pPackageName || '''', + 'pPackageVersion => ''' || pPackageVersion || '''' + )); + + LOG_PROCESS_EVENT('Start TRACK_PACKAGE_VERSION', 'INFO', vParameters); + + -- Calculate current hashes + vHashSpec := CALCULATE_PACKAGE_HASH(pPackageOwner, pPackageName, 'PACKAGE'); + vHashBody := CALCULATE_PACKAGE_HASH(pPackageOwner, pPackageName, 'PACKAGE BODY'); + + -- Get line counts + BEGIN + SELECT COUNT(*) + INTO vLineCountSpec + FROM ALL_SOURCE + WHERE OWNER = UPPER(pPackageOwner) + AND NAME = UPPER(pPackageName) + AND TYPE = 'PACKAGE'; + EXCEPTION + WHEN NO_DATA_FOUND THEN + vLineCountSpec := 0; + END; + + BEGIN + SELECT COUNT(*) + INTO vLineCountBody + FROM ALL_SOURCE + WHERE OWNER = UPPER(pPackageOwner) + AND NAME = UPPER(pPackageName) + AND TYPE = 'PACKAGE BODY'; + EXCEPTION + WHEN NO_DATA_FOUND THEN + vLineCountBody := 0; + END; + + -- Get last tracked version and hashes + BEGIN + SELECT PACKAGE_VERSION, SOURCE_CODE_HASH_SPEC, SOURCE_CODE_HASH_BODY + INTO vLastVersion, vLastHashSpec, vLastHashBody + FROM CT_MRDS.A_PACKAGE_VERSION_TRACKING + WHERE PACKAGE_OWNER = UPPER(pPackageOwner) + AND PACKAGE_NAME = UPPER(pPackageName) + ORDER BY TRACKING_DATE DESC + FETCH FIRST 1 ROW ONLY; + + -- Check if hash changed but version didn't + IF (vHashSpec != vLastHashSpec OR NVL(vHashBody,'X') != NVL(vLastHashBody,'X')) + AND pPackageVersion = vLastVersion THEN + + vChangeDetected := 'Y'; + vChangeMessage := 'WARNING: Source code changed without version update!' || cgBL || + 'Last Version: ' || vLastVersion || cgBL || + 'Current Version: ' || pPackageVersion || cgBL; + + IF vHashSpec != vLastHashSpec THEN + vChangeMessage := vChangeMessage || + 'SPEC Changed - Hash: ' || SUBSTR(vHashSpec, 1, 16) || '... (was: ' || + SUBSTR(vLastHashSpec, 1, 16) || '...)' || cgBL; + END IF; + + IF NVL(vHashBody,'X') != NVL(vLastHashBody,'X') THEN + vChangeMessage := vChangeMessage || + 'BODY Changed - Hash: ' || SUBSTR(vHashBody, 1, 16) || '... (was: ' || + SUBSTR(NVL(vLastHashBody,'NULL'), 1, 16) || '...)' || cgBL; + END IF; + + vChangeMessage := vChangeMessage || + 'RECOMMENDATION: Update PACKAGE_VERSION constant and PACKAGE_BUILD_DATE'; + + LOG_PROCESS_EVENT(vChangeMessage, 'WARNING', vParameters); + END IF; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + -- First time tracking this package + vChangeDetected := 'N'; + vChangeMessage := 'First tracking record for this package'; + LOG_PROCESS_EVENT(vChangeMessage, 'INFO', vParameters); + END; + + -- Insert tracking record + INSERT INTO CT_MRDS.A_PACKAGE_VERSION_TRACKING ( + PACKAGE_OWNER, + PACKAGE_NAME, + PACKAGE_TYPE, + PACKAGE_VERSION, + PACKAGE_BUILD_DATE, + PACKAGE_AUTHOR, + SOURCE_CODE_HASH_SPEC, + SOURCE_CODE_HASH_BODY, + LINE_COUNT_SPEC, + LINE_COUNT_BODY, + DETECTED_CHANGE_WITHOUT_VERSION, + CHANGE_DETECTION_MESSAGE + ) VALUES ( + UPPER(pPackageOwner), + UPPER(pPackageName), + 'BOTH', + pPackageVersion, + pPackageBuildDate, + pPackageAuthor, + vHashSpec, + vHashBody, + vLineCountSpec, + vLineCountBody, + vChangeDetected, + vChangeMessage + ); + + COMMIT; + + LOG_PROCESS_EVENT('End TRACK_PACKAGE_VERSION - Record inserted', 'INFO', vParameters); + + EXCEPTION + WHEN OTHERS THEN + LOG_PROCESS_ERROR('Error in TRACK_PACKAGE_VERSION: ' || SQLERRM, vParameters); + RAISE; + END TRACK_PACKAGE_VERSION; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION CHECK_PACKAGE_CHANGES( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2 + ) RETURN VARCHAR2 + IS + vCurrentHashSpec VARCHAR2(64); + vCurrentHashBody VARCHAR2(64); + vLastHashSpec VARCHAR2(64); + vLastHashBody VARCHAR2(64); + vLastVersion VARCHAR2(10); + vLastTrackingDate TIMESTAMP; + vChangeReport VARCHAR2(4000); + vSpecChanged BOOLEAN := FALSE; + vBodyChanged BOOLEAN := FALSE; + BEGIN + -- Get current hashes + vCurrentHashSpec := CALCULATE_PACKAGE_HASH(pPackageOwner, pPackageName, 'PACKAGE'); + vCurrentHashBody := CALCULATE_PACKAGE_HASH(pPackageOwner, pPackageName, 'PACKAGE BODY'); + + -- Get last tracked hashes + BEGIN + SELECT PACKAGE_VERSION, SOURCE_CODE_HASH_SPEC, SOURCE_CODE_HASH_BODY, TRACKING_DATE + INTO vLastVersion, vLastHashSpec, vLastHashBody, vLastTrackingDate + FROM CT_MRDS.A_PACKAGE_VERSION_TRACKING + WHERE PACKAGE_OWNER = UPPER(pPackageOwner) + AND PACKAGE_NAME = UPPER(pPackageName) + ORDER BY TRACKING_DATE DESC + FETCH FIRST 1 ROW ONLY; + EXCEPTION + WHEN NO_DATA_FOUND THEN + RETURN 'Package ' || pPackageOwner || '.' || pPackageName || ' has never been tracked.' || cgBL || + 'Run TRACK_PACKAGE_VERSION to establish baseline.'; + END; + + -- Check for changes + IF vCurrentHashSpec != vLastHashSpec THEN + vSpecChanged := TRUE; + END IF; + + IF NVL(vCurrentHashBody, 'X') != NVL(vLastHashBody, 'X') THEN + vBodyChanged := TRUE; + END IF; + + -- Build report + IF vSpecChanged OR vBodyChanged THEN + vChangeReport := 'WARNING: Package ' || pPackageOwner || '.' || pPackageName || ' has changed!' || cgBL || + '========================================' || cgBL || + 'Last Tracked Version: ' || vLastVersion || cgBL || + 'Last Tracked Date: ' || TO_CHAR(vLastTrackingDate, 'YYYY-MM-DD HH24:MI:SS') || cgBL || + cgBL; + + IF vSpecChanged THEN + vChangeReport := vChangeReport || + 'SPECIFICATION Changed:' || cgBL || + ' Current Hash: ' || SUBSTR(vCurrentHashSpec, 1, 16) || '...' || cgBL || + ' Last Hash: ' || SUBSTR(vLastHashSpec, 1, 16) || '...' || cgBL || + cgBL; + END IF; + + IF vBodyChanged THEN + vChangeReport := vChangeReport || + 'BODY Changed:' || cgBL || + ' Current Hash: ' || SUBSTR(NVL(vCurrentHashBody, 'NULL'), 1, 16) || '...' || cgBL || + ' Last Hash: ' || SUBSTR(NVL(vLastHashBody, 'NULL'), 1, 16) || '...' || cgBL || + cgBL; + END IF; + + vChangeReport := vChangeReport || + 'RECOMMENDATION:' || cgBL || + '1. Update PACKAGE_VERSION constant' || cgBL || + '2. Update PACKAGE_BUILD_DATE constant' || cgBL || + '3. Add entry to VERSION_HISTORY' || cgBL || + '4. Call TRACK_PACKAGE_VERSION to update tracking'; + ELSE + vChangeReport := 'OK: Package ' || pPackageOwner || '.' || pPackageName || ' has not changed.' || cgBL || + 'Last Tracked: ' || TO_CHAR(vLastTrackingDate, 'YYYY-MM-DD HH24:MI:SS') || cgBL || + 'Version: ' || vLastVersion; + END IF; + + RETURN vChangeReport; + + EXCEPTION + WHEN OTHERS THEN + RETURN 'Error checking package changes: ' || SQLERRM; + END CHECK_PACKAGE_CHANGES; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_PACKAGE_HASH_INFO( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2 + ) RETURN VARCHAR2 + IS + vCurrentHashSpec VARCHAR2(64); + vCurrentHashBody VARCHAR2(64); + vLastHashSpec VARCHAR2(64); + vLastHashBody VARCHAR2(64); + vLastVersion VARCHAR2(10); + vLastTrackingDate TIMESTAMP; + vLastChangeDetected CHAR(1); + vInfo VARCHAR2(4000); + BEGIN + -- Get current hashes + vCurrentHashSpec := CALCULATE_PACKAGE_HASH(pPackageOwner, pPackageName, 'PACKAGE'); + vCurrentHashBody := CALCULATE_PACKAGE_HASH(pPackageOwner, pPackageName, 'PACKAGE BODY'); + + -- Get last tracking info + BEGIN + SELECT PACKAGE_VERSION, + SOURCE_CODE_HASH_SPEC, + SOURCE_CODE_HASH_BODY, + TRACKING_DATE, + DETECTED_CHANGE_WITHOUT_VERSION + INTO vLastVersion, vLastHashSpec, vLastHashBody, vLastTrackingDate, vLastChangeDetected + FROM CT_MRDS.A_PACKAGE_VERSION_TRACKING + WHERE PACKAGE_OWNER = UPPER(pPackageOwner) + AND PACKAGE_NAME = UPPER(pPackageName) + ORDER BY TRACKING_DATE DESC + FETCH FIRST 1 ROW ONLY; + EXCEPTION + WHEN NO_DATA_FOUND THEN + RETURN 'Package: ' || pPackageOwner || '.' || pPackageName || cgBL || + 'Status: Never tracked' || cgBL || + 'Current Hash (SPEC): ' || SUBSTR(vCurrentHashSpec, 1, 16) || '...' || cgBL || + 'Current Hash (BODY): ' || SUBSTR(NVL(vCurrentHashBody, 'NULL'), 1, 16) || '...'; + END; + + -- Build info report + vInfo := 'Package: ' || pPackageOwner || '.' || pPackageName || cgBL || + 'Current Version: ' || vLastVersion || cgBL || + 'Last Tracked: ' || TO_CHAR(vLastTrackingDate, 'YYYY-MM-DD HH24:MI:SS') || cgBL || + cgBL || + 'Current Hash (SPEC): ' || SUBSTR(vCurrentHashSpec, 1, 32) || '...' || cgBL || + 'Last Hash (SPEC): ' || SUBSTR(vLastHashSpec, 1, 32) || '...' || cgBL; + + IF vCurrentHashBody IS NOT NULL OR vLastHashBody IS NOT NULL THEN + vInfo := vInfo || + 'Current Hash (BODY): ' || SUBSTR(NVL(vCurrentHashBody, 'NULL'), 1, 32) || '...' || cgBL || + 'Last Hash (BODY): ' || SUBSTR(NVL(vLastHashBody, 'NULL'), 1, 32) || '...' || cgBL; + END IF; + + vInfo := vInfo || cgBL; + + -- Status + IF vCurrentHashSpec = vLastHashSpec AND NVL(vCurrentHashBody, 'X') = NVL(vLastHashBody, 'X') THEN + vInfo := vInfo || 'Status: OK - No changes detected'; + ELSE + vInfo := vInfo || 'Status: CHANGED - Source code modified since last tracking'; + END IF; + + IF vLastChangeDetected = 'Y' THEN + vInfo := vInfo || cgBL || 'Last Tracking Warning: Change detected without version update'; + END IF; + + RETURN vInfo; + + EXCEPTION + WHEN OTHERS THEN + RETURN 'Error getting package hash info: ' || SQLERRM; + END GET_PACKAGE_HASH_INFO; + + ---------------------------------------------------------------------------------------------------- + +BEGIN + INIT_ERRORS; + guid := sys_guid(); + gvUsername := SYS_CONTEXT('USERENV', 'SESSION_USER'); + gvOsuser := SYS_CONTEXT('USERENV', 'OS_USER'); + gvMachine := SYS_CONTEXT('USERENV', 'HOST'); + gvModule := SYS_CONTEXT('USERENV', 'MODULE'); + + -- Get info about EnvironmentID. Without it package cannot proceed further. + -- Information about environment is needed to get proper configuration values + -- It can be set up in two different ways : + -- 1. Set it on session level: execute DBMS_SESSION.SET_IDENTIFIER (client_id => 'dev'); + -- 2. Set it on configuration level: Insert into CT_MRDS.A_FILE_MANAGER_CONFIG (ENVIRONMENT_ID,CONFIG_VARIABLE,CONFIG_VARIABLE_VALUE) values ('default','environment_id','dev'); + -- Session level setup (1.) takes precedence over configuration level one (2.) + + gvEnv := nvl(SYS_CONTEXT ('USERENV', 'CLIENT_IDENTIFIER'), GET_DEFAULT_ENV()); + if gvEnv is null then + dbms_output.put_line(MSG_ENVIRONMENT_NOT_SET); + LOG_PROCESS_EVENT(MSG_ENVIRONMENT_NOT_SET, 'ERROR'); + RAISE_APPLICATION_ERROR(CODE_ENVIRONMENT_NOT_SET, MSG_ENVIRONMENT_NOT_SET); + else + dbms_output.put_line('EnvironmentID set to: '||gvEnv); + end if; + + INIT_VARIABLES(pEnv => gvEnv); +END ENV_MANAGER; + +/ + +/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.7.5/ENV_MANAGER.pkg b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.7.5/ENV_MANAGER.pkg new file mode 100644 index 0000000..fded944 --- /dev/null +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.7.5/ENV_MANAGER.pkg @@ -0,0 +1,625 @@ +create or replace PACKAGE CT_MRDS.ENV_MANAGER +AUTHID CURRENT_USER +AS + /** + * General comment for package: Please put comments for functions and procedures as shown in below example. + * It is a standard. + * The structure of comment is used by GET_PACKAGE_DOCUMENTATION function + * which returns documentation text for confluence page (to Copy-Paste it). + **/ + + -- Example comment: + /** + * @name EX_PROCEDURE_NAME + * @desc Procedure description + * @example select ENV_MANAGER.EX_PROCEDURE_NAME(pParameter => 129) from dual; + * @ex_rslt Example Result + **/ + + -- Package Version Information (Semantic Versioning: MAJOR.MINOR.PATCH) + PACKAGE_VERSION CONSTANT VARCHAR2(10) := '3.2.0'; + PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2025-12-20 10:00:00'; + PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski'; + + -- Version History (Latest changes first) + VERSION_HISTORY CONSTANT VARCHAR2(4000) := + '3.2.0 (2025-12-20): Added error codes for parallel execution support (CODE_INVALID_PARALLEL_DEGREE -20110, CODE_PARALLEL_EXECUTION_FAILED -20111)' || CHR(13)||CHR(10) || + '3.1.0 (2025-10-22): Added package hash tracking and automatic change detection system (SHA256 hashing)' || CHR(13)||CHR(10) || + '3.0.0 (2025-10-22): Added package versioning system with centralized version management functions' || CHR(13)||CHR(10) || + '2.1.0 (2025-10-15): Added ANALYZE_VALIDATION_ERRORS function for comprehensive CSV validation analysis' || CHR(13)||CHR(10) || + '2.0.0 (2025-10-01): Added LOG_PROCESS_ERROR procedure with enhanced error diagnostics and stack traces' || CHR(13)||CHR(10) || + '1.5.0 (2025-09-20): Added console logging support with gvConsoleLoggingEnabled configuration' || CHR(13)||CHR(10) || + '1.0.0 (2025-09-01): Initial release with error management and configuration system'; + + TYPE Error_Record IS RECORD ( + code PLS_INTEGER, + message VARCHAR2(4000) + ); + + TYPE tErrorList IS TABLE OF Error_Record INDEX BY PLS_INTEGER; + + Errors tErrorList; + + + guid VARCHAR2(32); + gvEnv VARCHAR2(200); + gvUsername VARCHAR2(128); + gvOsuser VARCHAR2(128); + gvMachine VARCHAR2(64); + gvModule VARCHAR2(64); + + gvNameSpace VARCHAR2(200); + gvRegion VARCHAR2(200); + gvDataBucketName VARCHAR2(200); + gvInboxBucketName VARCHAR2(200); + gvArchiveBucketName VARCHAR2(200); + gvDataBucketUri VARCHAR2(200); + gvInboxBucketUri VARCHAR2(200); + gvArchiveBucketUri VARCHAR2(200); + gvCredentialName VARCHAR2(200); + + -- Overwritten by variable "LoggingEnabled" in A_FILE_MANAGER_CONFIG.CONFIG_VARIABLE table + gvLoggingEnabled VARCHAR2(3) := 'ON'; -- 'ON' or 'OFF' + + -- Overwritten by variable "MinLogLevel" in A_FILE_MANAGER_CONFIG.CONFIG_VARIABLE table + -- Possible values: DEBUG ,INFO ,WARNING ,ERROR + gvMinLogLevel VARCHAR2(10) := 'DEBUG'; + + -- Overwritten by variable "DefaultDateFormat" in A_FILE_MANAGER_CONFIG.CONFIG_VARIABLE table + gvDefaultDateFormat VARCHAR2(200) := 'DD/MM/YYYY HH24:MI:SS'; + + -- Overwritten by variable "ConsoleLoggingEnabled" in A_FILE_MANAGER_CONFIG.CONFIG_VARIABLE table + gvConsoleLoggingEnabled VARCHAR2(3) := 'ON'; -- 'ON' or 'OFF' + + cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10); + + vgSourceFileConfigKey PLS_INTEGER; + + vgMsgTmp VARCHAR2(32000); + --Exceptions + ERR_EMPTY_FILEURI_AND_RECKEY EXCEPTION; + CODE_EMPTY_FILEURI_AND_RECKEY CONSTANT PLS_INTEGER := -20001; + MSG_EMPTY_FILEURI_AND_RECKEY VARCHAR2(4000) := 'Either pFileUri or pSourceFileReceivedKey must be not null'; + PRAGMA EXCEPTION_INIT( ERR_EMPTY_FILEURI_AND_RECKEY + ,CODE_EMPTY_FILEURI_AND_RECKEY); + + + ERR_NO_CONFIG_MATCH_FOR_FILEURI EXCEPTION; + CODE_NO_CONFIG_MATCH_FOR_FILEURI CONSTANT PLS_INTEGER := -20002; + MSG_NO_CONFIG_MATCH_FOR_FILEURI VARCHAR2(4000) := 'No match for source file in A_SOURCE_FILE_CONFIG table' + ||cgBL||' The file provided in parameter: pFileUri does not have ' + ||cgBL||' coresponding configuration in A_SOURCE_FILE_CONFIG table'; + PRAGMA EXCEPTION_INIT( ERR_NO_CONFIG_MATCH_FOR_FILEURI + ,CODE_NO_CONFIG_MATCH_FOR_FILEURI); + + ERR_MULTIPLE_MATCH_FOR_SRCFILE EXCEPTION; + CODE_MULTIPLE_MATCH_FOR_SRCFILE CONSTANT PLS_INTEGER := -20003; + MSG_MULTIPLE_MATCH_FOR_SRCFILE VARCHAR2(4000) := 'Multiple match for source file in A_SOURCE_FILE_CONFIG table'; + PRAGMA EXCEPTION_INIT( ERR_MULTIPLE_MATCH_FOR_SRCFILE + ,CODE_MULTIPLE_MATCH_FOR_SRCFILE); + + ERR_MISSING_COLUMN_DATE_FORMAT EXCEPTION; + CODE_MISSING_COLUMN_DATE_FORMAT CONSTANT PLS_INTEGER := -20004; + MSG_MISSING_COLUMN_DATE_FORMAT VARCHAR2(4000) := 'Missing entry in config table: A_COLUMN_DATE_FORMAT primary key(TEMPLATE_TABLE_NAME, COLUMN_NAME)' + ||cgBL||' Remember: each column which data_type IN (''DATE'', ''TIMESTAMP'')' + ||cgBL||' should have DateFormat specified in A_COLUMN_DATE_FORMAT table ' + ||cgBL||' for example: ''YYYY-MM-DD'''; + PRAGMA EXCEPTION_INIT( ERR_MISSING_COLUMN_DATE_FORMAT + ,CODE_MISSING_COLUMN_DATE_FORMAT); + + ERR_MULTIPLE_COLUMN_DATE_FORMAT EXCEPTION; + CODE_MULTIPLE_COLUMN_DATE_FORMAT CONSTANT PLS_INTEGER := -20005; + MSG_MULTIPLE_COLUMN_DATE_FORMAT VARCHAR2(4000) := 'Multiple records for date format in A_COLUMN_DATE_FORMAT table' + ||cgBL||' There should be only one format specified for each DAT/TIMESTAMP column'; + PRAGMA EXCEPTION_INIT( ERR_MULTIPLE_COLUMN_DATE_FORMAT + ,CODE_MULTIPLE_COLUMN_DATE_FORMAT); + + + ERR_DIDNT_GET_LOAD_OPERATION_ID EXCEPTION; + CODE_DIDNT_GET_LOAD_OPERATION_ID CONSTANT PLS_INTEGER := -20006; + MSG_DIDNT_GET_LOAD_OPERATION_ID VARCHAR2(4000) := 'Didnt get load operation id from external table validation'; + PRAGMA EXCEPTION_INIT( ERR_DIDNT_GET_LOAD_OPERATION_ID + ,CODE_DIDNT_GET_LOAD_OPERATION_ID); + + ERR_NO_CONFIG_FOR_RECEIVED_FILE EXCEPTION; + CODE_NO_CONFIG_FOR_RECEIVED_FILE CONSTANT PLS_INTEGER := -20007; + MSG_NO_CONFIG_FOR_RECEIVED_FILE VARCHAR2(4000) := 'No match for received source file in A_SOURCE_FILE_CONFIG ' + ||cgBL||' or missing data in A_SOURCE_FILE_RECEIVED table for provided pSourceFileReceivedKey parameter'; + PRAGMA EXCEPTION_INIT( ERR_NO_CONFIG_FOR_RECEIVED_FILE + ,CODE_NO_CONFIG_FOR_RECEIVED_FILE); + + ERR_MULTI_CONFIG_FOR_RECEIVED_FILE EXCEPTION; + CODE_MULTI_CONFIG_FOR_RECEIVED_FILE CONSTANT PLS_INTEGER := -20008; + MSG_MULTI_CONFIG_FOR_RECEIVED_FILE VARCHAR2(4000) := 'Multiple matchs for received source file in A_SOURCE_FILE_CONFIG'; + PRAGMA EXCEPTION_INIT( ERR_MULTI_CONFIG_FOR_RECEIVED_FILE + ,CODE_MULTI_CONFIG_FOR_RECEIVED_FILE); + + ERR_FILE_NOT_FOUND_ON_CLOUD EXCEPTION; + CODE_FILE_NOT_FOUND_ON_CLOUD CONSTANT PLS_INTEGER := -20009; + MSG_FILE_NOT_FOUND_ON_CLOUD VARCHAR2(4000) := 'File not found on the cloud'; + PRAGMA EXCEPTION_INIT( ERR_FILE_NOT_FOUND_ON_CLOUD + ,CODE_FILE_NOT_FOUND_ON_CLOUD); + + ERR_FILE_VALIDATION_FAILED EXCEPTION; + CODE_FILE_VALIDATION_FAILED CONSTANT PLS_INTEGER := -20010; + MSG_FILE_VALIDATION_FAILED VARCHAR2(4000) := 'File validation failed'; + PRAGMA EXCEPTION_INIT( ERR_FILE_VALIDATION_FAILED + ,CODE_FILE_VALIDATION_FAILED); + + ERR_EXCESS_COLUMNS_DETECTED EXCEPTION; + CODE_EXCESS_COLUMNS_DETECTED CONSTANT PLS_INTEGER := -20011; + MSG_EXCESS_COLUMNS_DETECTED VARCHAR2(4000) := 'CSV file contains more columns than template allows'; + PRAGMA EXCEPTION_INIT( ERR_EXCESS_COLUMNS_DETECTED + ,CODE_EXCESS_COLUMNS_DETECTED); + + ERR_NO_CONFIG_MATCH EXCEPTION; + CODE_NO_CONFIG_MATCH CONSTANT PLS_INTEGER := -20012; + MSG_NO_CONFIG_MATCH VARCHAR2(4000) := 'No match for specified parameters in A_SOURCE_FILE_CONFIG table'; + PRAGMA EXCEPTION_INIT( ERR_NO_CONFIG_MATCH + ,CODE_NO_CONFIG_MATCH); + + ERR_UNKNOWN_PREFIX EXCEPTION; + CODE_UNKNOWN_PREFIX CONSTANT PLS_INTEGER := -20013; + MSG_UNKNOWN_PREFIX VARCHAR2(4000) := 'Unknown prefix'; + PRAGMA EXCEPTION_INIT( ERR_UNKNOWN_PREFIX + ,CODE_UNKNOWN_PREFIX); + + ERR_TABLE_NOT_EXISTS EXCEPTION; + CODE_TABLE_NOT_EXISTS CONSTANT PLS_INTEGER := -20014; + MSG_TABLE_NOT_EXISTS VARCHAR2(4000) := 'Table does not exist'; + PRAGMA EXCEPTION_INIT( ERR_TABLE_NOT_EXISTS + ,CODE_TABLE_NOT_EXISTS); + + ERR_COLUMN_NOT_EXISTS EXCEPTION; + CODE_COLUMN_NOT_EXISTS CONSTANT PLS_INTEGER := -20015; + MSG_COLUMN_NOT_EXISTS VARCHAR2(4000) := 'Column does not exist in table'; + PRAGMA EXCEPTION_INIT( ERR_COLUMN_NOT_EXISTS + ,CODE_COLUMN_NOT_EXISTS); + + ERR_UNSUPPORTED_DATA_TYPE EXCEPTION; + CODE_UNSUPPORTED_DATA_TYPE CONSTANT PLS_INTEGER := -20016; + MSG_UNSUPPORTED_DATA_TYPE VARCHAR2(4000) := 'Unsupported data type'; + PRAGMA EXCEPTION_INIT( ERR_UNSUPPORTED_DATA_TYPE + ,CODE_UNSUPPORTED_DATA_TYPE); + + ERR_MISSING_SOURCE_KEY EXCEPTION; + CODE_MISSING_SOURCE_KEY CONSTANT PLS_INTEGER := -20017; + MSG_MISSING_SOURCE_KEY VARCHAR2(4000) := 'The Source was not found in parent table A_SOURCE'; + PRAGMA EXCEPTION_INIT( ERR_MISSING_SOURCE_KEY + ,CODE_MISSING_SOURCE_KEY); + + ERR_NULL_SOURCE_FILE_CONFIG_KEY EXCEPTION; + CODE_NULL_SOURCE_FILE_CONFIG_KEY CONSTANT PLS_INTEGER := -20018; + MSG_NULL_SOURCE_FILE_CONFIG_KEY VARCHAR2(4000) := 'No entry in A_SOURCE_FILE_CONFIG table for specified A_SOURCE_FILE_CONFIG_KEY'; + PRAGMA EXCEPTION_INIT( ERR_NULL_SOURCE_FILE_CONFIG_KEY + ,CODE_NULL_SOURCE_FILE_CONFIG_KEY); + + ERR_DUPLICATED_SOURCE_KEY EXCEPTION; + CODE_DUPLICATED_SOURCE_KEY CONSTANT PLS_INTEGER := -20019; + MSG_DUPLICATED_SOURCE_KEY VARCHAR2(4000) := 'The Source already exists in the A_SOURCE table'; + PRAGMA EXCEPTION_INIT( ERR_DUPLICATED_SOURCE_KEY + ,CODE_DUPLICATED_SOURCE_KEY); + + ERR_MISSING_CONTAINER_CONFIG EXCEPTION; + CODE_MISSING_CONTAINER_CONFIG CONSTANT PLS_INTEGER := -20020; + MSG_MISSING_CONTAINER_CONFIG VARCHAR2(4000) := 'No match in A_SOURCE_FILE_CONFIG table where SOURCE_FILE_TYPE=''CONTAINER'' and specified SOURCE_FILE_ID'; + PRAGMA EXCEPTION_INIT( ERR_MISSING_CONTAINER_CONFIG + ,CODE_MISSING_CONTAINER_CONFIG); + + ERR_MULTIPLE_CONTAINER_ENTRIES EXCEPTION; + CODE_MULTIPLE_CONTAINER_ENTRIES CONSTANT PLS_INTEGER := -20021; + MSG_MULTIPLE_CONTAINER_ENTRIES VARCHAR2(4000) := 'Multiple matches in A_SOURCE_FILE_CONFIG table where SOURCE_FILE_TYPE=''CONTAINER'' and specified SOURCE_FILE_ID'; + PRAGMA EXCEPTION_INIT( ERR_MULTIPLE_CONTAINER_ENTRIES + ,CODE_MULTIPLE_CONTAINER_ENTRIES); + + ERR_WRONG_DESTINATION_PARAM EXCEPTION; + CODE_WRONG_DESTINATION_PARAM CONSTANT PLS_INTEGER := -20022; + MSG_WRONG_DESTINATION_PARAM VARCHAR2(4000) := 'Wrong destination parameter provided.'; + PRAGMA EXCEPTION_INIT( ERR_WRONG_DESTINATION_PARAM + ,CODE_WRONG_DESTINATION_PARAM); + + ERR_FILE_NOT_EXISTS_ON_CLOUD EXCEPTION; + CODE_FILE_NOT_EXISTS_ON_CLOUD CONSTANT PLS_INTEGER := -20023; + MSG_FILE_NOT_EXISTS_ON_CLOUD VARCHAR2(4000) := 'File not exists on cloud.'; + PRAGMA EXCEPTION_INIT( ERR_FILE_NOT_EXISTS_ON_CLOUD + ,CODE_FILE_NOT_EXISTS_ON_CLOUD); + + ERR_FILE_ALREADY_REGISTERED EXCEPTION; + CODE_FILE_ALREADY_REGISTERED CONSTANT PLS_INTEGER := -20024; + MSG_FILE_ALREADY_REGISTERED VARCHAR2(4000) := 'File already registered in A_SOURCE_FILE_RECEIVED table.'; + PRAGMA EXCEPTION_INIT( ERR_FILE_ALREADY_REGISTERED + ,CODE_FILE_ALREADY_REGISTERED); + + ERR_WRONG_DATE_TIMESTAMP_FORMAT EXCEPTION; + CODE_WRONG_DATE_TIMESTAMP_FORMAT CONSTANT PLS_INTEGER := -20025; + MSG_WRONG_DATE_TIMESTAMP_FORMAT VARCHAR2(4000) := 'Provided DATE or TIMESTAMP format has errors (possible duplicated codes, ex: ''DD'').'; + PRAGMA EXCEPTION_INIT( ERR_WRONG_DATE_TIMESTAMP_FORMAT + ,CODE_WRONG_DATE_TIMESTAMP_FORMAT); + + ERR_ENVIRONMENT_NOT_SET EXCEPTION; + CODE_ENVIRONMENT_NOT_SET CONSTANT PLS_INTEGER := -20026; + MSG_ENVIRONMENT_NOT_SET VARCHAR2(4000) := 'EnvironmentID not set' + ||cgBL||' Information about environment is needed to get proper configuration values.' + ||cgBL||' It can be set up in two different ways:' + ||cgBL||' 1. Set it on session level: execute DBMS_SESSION.SET_IDENTIFIER (client_id => ''dev'')' + ||cgBL||' 2. Set it on configuration level: Insert into CT_MRDS.A_FILE_MANAGER_CONFIG (ENVIRONMENT_ID,CONFIG_VARIABLE,CONFIG_VARIABLE_VALUE) values (''default'',''environment_id'',''dev'')' + ||cgBL||' Session level setup (1.) takes precedence over configuration level one (2.)' + ; + PRAGMA EXCEPTION_INIT( ERR_ENVIRONMENT_NOT_SET + ,CODE_ENVIRONMENT_NOT_SET); + + + ERR_CONFIG_VARIABLE_NOT_SET EXCEPTION; + CODE_CONFIG_VARIABLE_NOT_SET CONSTANT PLS_INTEGER := -20027; + MSG_CONFIG_VARIABLE_NOT_SET VARCHAR2(4000) := 'Missing configuration value in A_FILE_MANAGER_CONFIG'; + PRAGMA EXCEPTION_INIT( ERR_CONFIG_VARIABLE_NOT_SET + ,CODE_CONFIG_VARIABLE_NOT_SET); + + ERR_NOT_INPUT_SOURCE_FILE_TYPE EXCEPTION; + CODE_NOT_INPUT_SOURCE_FILE_TYPE CONSTANT PLS_INTEGER := -20028; + MSG_NOT_INPUT_SOURCE_FILE_TYPE VARCHAR2(4000) := 'Archival can be executed only for A_SOURCE_FILE_CONFIG_KEY where SOURCE_FILE_TYPE=''INPUT'''; + PRAGMA EXCEPTION_INIT( ERR_NOT_INPUT_SOURCE_FILE_TYPE + ,CODE_NOT_INPUT_SOURCE_FILE_TYPE); + + ERR_EXP_DATA_FOR_ARCH_FAILED EXCEPTION; + CODE_EXP_DATA_FOR_ARCH_FAILED CONSTANT PLS_INTEGER := -20029; + MSG_EXP_DATA_FOR_ARCH_FAILED VARCHAR2(4000) := 'Export data for archival failed.'; + PRAGMA EXCEPTION_INIT( ERR_EXP_DATA_FOR_ARCH_FAILED + ,CODE_EXP_DATA_FOR_ARCH_FAILED); + + ERR_RESTORE_FILE_FROM_TRASH EXCEPTION; + CODE_RESTORE_FILE_FROM_TRASH CONSTANT PLS_INTEGER := -20030; + MSG_RESTORE_FILE_FROM_TRASH VARCHAR2(4000) := 'Unexpected issues occured while archival process. Restoration of exported files failed.'; + PRAGMA EXCEPTION_INIT( ERR_RESTORE_FILE_FROM_TRASH + ,CODE_RESTORE_FILE_FROM_TRASH); + + ERR_CHANGE_STAT_TO_ARCHIVED_FAILED EXCEPTION; + CODE_CHANGE_STAT_TO_ARCHIVED_FAILED CONSTANT PLS_INTEGER := -20031; + MSG_CHANGE_STAT_TO_ARCHIVED_FAILED VARCHAR2(4000) := 'Failed to change file status to: ARCHIVED in A_SOURCE_FILE_RECEIVED table.'; + PRAGMA EXCEPTION_INIT( ERR_CHANGE_STAT_TO_ARCHIVED_FAILED + ,CODE_CHANGE_STAT_TO_ARCHIVED_FAILED); + + ERR_MOVE_FILE_TO_TRASH_FAILED EXCEPTION; + CODE_MOVE_FILE_TO_TRASH_FAILED CONSTANT PLS_INTEGER := -20032; + MSG_MOVE_FILE_TO_TRASH_FAILED VARCHAR2(4000) := 'FAILED to move file to TRASH before DROPPING it.'; + PRAGMA EXCEPTION_INIT( ERR_MOVE_FILE_TO_TRASH_FAILED + ,CODE_MOVE_FILE_TO_TRASH_FAILED); + + ERR_DROP_EXPORTED_FILES_FAILED EXCEPTION; + CODE_DROP_EXPORTED_FILES_FAILED CONSTANT PLS_INTEGER := -20033; + MSG_DROP_EXPORTED_FILES_FAILED VARCHAR2(4000) := 'FAILED to move file to TRASH before DROPPING it.'; + PRAGMA EXCEPTION_INIT( ERR_DROP_EXPORTED_FILES_FAILED + ,CODE_DROP_EXPORTED_FILES_FAILED); + + ERR_INVALID_BUCKET_AREA EXCEPTION; + CODE_INVALID_BUCKET_AREA CONSTANT PLS_INTEGER := -20034; + MSG_INVALID_BUCKET_AREA VARCHAR2(4000) := 'Invalid bucket area specified. Valid values: INBOX, ODS, DATA, ARCHIVE'; + PRAGMA EXCEPTION_INIT( ERR_INVALID_BUCKET_AREA + ,CODE_INVALID_BUCKET_AREA); + + ERR_INVALID_PARALLEL_DEGREE EXCEPTION; + CODE_INVALID_PARALLEL_DEGREE CONSTANT PLS_INTEGER := -20110; + MSG_INVALID_PARALLEL_DEGREE VARCHAR2(4000) := 'Invalid parallel degree parameter. Must be between 1 and 16'; + PRAGMA EXCEPTION_INIT( ERR_INVALID_PARALLEL_DEGREE + ,CODE_INVALID_PARALLEL_DEGREE); + + ERR_PARALLEL_EXECUTION_FAILED EXCEPTION; + CODE_PARALLEL_EXECUTION_FAILED CONSTANT PLS_INTEGER := -20111; + MSG_PARALLEL_EXECUTION_FAILED VARCHAR2(4000) := 'Parallel execution failed'; + PRAGMA EXCEPTION_INIT( ERR_PARALLEL_EXECUTION_FAILED + ,CODE_PARALLEL_EXECUTION_FAILED); + + ERR_UNKNOWN EXCEPTION; + CODE_UNKNOWN CONSTANT PLS_INTEGER := -20999; + MSG_UNKNOWN VARCHAR2(4000) := 'Unknown Error Occured'; + PRAGMA EXCEPTION_INIT( ERR_UNKNOWN + ,CODE_UNKNOWN); + + --------------------------------------------------------------------------------------------------------------------------- + --------------------------------------------------------------------------------------------------------------------------- + + + + + /** + * @name LOG_PROCESS_EVENT + * @desc Insert a new log record into A_PROCESS_LOG table. + * Also outputs to console if gvConsoleLoggingEnabled = 'ON'. + * Respects logging level configuration (gvMinLogLevel). + * @example ENV_MANAGER.LOG_PROCESS_EVENT('Process completed successfully', 'INFO', 'pParam1=value1'); + * @ex_rslt Record inserted into A_PROCESS_LOG table and optionally displayed in console output + **/ + PROCEDURE LOG_PROCESS_EVENT ( + pLogMessage VARCHAR2 + ,pLogLevel VARCHAR2 DEFAULT 'ERROR' + ,pParameters VARCHAR2 DEFAULT NULL + ,pProcessName VARCHAR2 DEFAULT 'FILE_MANAGER' + ); + + /** + * @name LOG_PROCESS_ERROR + * @desc Insert a detailed error record into A_PROCESS_LOG table with full stack trace, backtrace, and call stack. + * This procedure captures comprehensive error information for debugging purposes while + * allowing clean user-facing error messages to be raised separately. + * @param pLogMessage - Base error message description + * @param pParameters - Procedure parameters for context + * @param pProcessName - Name of the calling process/package + * @ex_rslt Record inserted into A_PROCESS_LOG table with complete error stack information + */ + PROCEDURE LOG_PROCESS_ERROR ( + pLogMessage VARCHAR2 + ,pParameters VARCHAR2 DEFAULT NULL + ,pProcessName VARCHAR2 DEFAULT 'FILE_MANAGER' + ); + + /** + * @name INIT_ERRORS + * @desc Loads data into Errors array. + * Errors array is a list of Record(Error_Code, Error_Message) index by Error_Code. + * Called automatically during package initialization. + * @example Called automatically when package is first referenced + * @ex_rslt Errors array populated with all error codes and messages + **/ + PROCEDURE INIT_ERRORS; + + + + /** + * @name GET_DEFAULT_ENV + * @desc It returns string with name of default environment. + * Return string is A_FILE_MANAGER_CONFIG.ENVIRONMENT_ID value. + * @example select ENV_MANAGER.GET_DEFAULT_ENV() from dual; + * @ex_rslt dev + **/ + FUNCTION GET_DEFAULT_ENV + RETURN VARCHAR2; + + + + /** + * @name INIT_VARIABLES + * @desc For specified pEnv parameter (A_FILE_MANAGER_CONFIG.ENVIRONMENT_ID) + * Assign values to following global package variables: + * - gvNameSpace + * - gvRegion + * - gvCredentialName + * - gvInboxBucketName + * - gvDataBucketName + * - gvArchiveBucketName + * - gvInboxBucketUri + * - gvDataBucketUri + * - gvArchiveBucketUri + * - gvLoggingEnabled + * - gvMinLogLevel + * - gvDefaultDateFormat + * - gvConsoleLoggingEnabled + **/ + PROCEDURE INIT_VARIABLES( + pEnv VARCHAR2 + ); + + + + /** + * @name GET_ERROR_MESSAGE + * @desc It returns string with error message for specified pCode (Error_Code). + * Error message is take from Errors Array loaded by INIT_ERRORS procedure + * @example select ENV_MANAGER.GET_ERROR_MESSAGE(pCode => -20009) from dual; + * @ex_rslt File not found on the cloud + **/ + FUNCTION GET_ERROR_MESSAGE( + pCode PLS_INTEGER + ) RETURN VARCHAR2; + + + + /** + * @name GET_ERROR_STACK + * @desc It returns string with all possible error stack info. + * Error message is take from Errors Array loaded by INIT_ERRORS procedure + * @example + * select ENV_MANAGER.GET_ERROR_STACK( + * pFormat => 'OUTPUT' + * ,pCode => -20009 + * ,pSourceFileReceivedKey => NULL) + * from dual + * @ex_rslt + * ------------------------------------------------------+ + * Error Message: + * ORA-0000: normal, successful completion + * ------------------------------------------------------- + * Error Stack: + * ------------------------------------------------------- + * Error Backtrace: + * ------------------------------------------------------+ + **/ + FUNCTION GET_ERROR_STACK( + pFormat VARCHAR2 + ,pCode PLS_INTEGER + ,pSourceFileReceivedKey CT_MRDS.A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY%TYPE DEFAULT NULL + ) RETURN VARCHAR2; + + /** + * @name FORMAT_PARAMETERS + * @desc Formats parameter list for logging purposes. + * Converts SYS.ODCIVARCHAR2LIST to formatted string with proper NULL handling. + * @example select ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('param1=value1', 'param2=NULL')) from dual; + * @ex_rslt param1=value1 , + * param2=NULL + **/ + FUNCTION FORMAT_PARAMETERS( + pParameterList SYS.ODCIVARCHAR2LIST + ) RETURN VARCHAR2; + + /** + * @name ANALYZE_VALIDATION_ERRORS + * @desc Analyzes CSV validation errors and generates detailed diagnostic report. + * Compares CSV structure with template table and provides specific error analysis. + * Includes suggested solutions for common validation issues. + * @param pValidationLogTable - Name of validation log table (e.g., VALIDATE$242_LOG) + * @param pTemplateSchema - Schema of template table (e.g., CT_ET_TEMPLATES) + * @param pTemplateTable - Name of template table (e.g., MOCK_PROC_TABLE) + * @param pCsvFileUri - URI of CSV file being validated + * @example SELECT ENV_MANAGER.ANALYZE_VALIDATION_ERRORS('VALIDATE$242_LOG', 'CT_ET_TEMPLATES', 'MOCK_PROC_TABLE', 'https://...') FROM DUAL; + * @ex_rslt Detailed validation analysis report with column mismatches and solutions + **/ + FUNCTION ANALYZE_VALIDATION_ERRORS( + pValidationLogTable VARCHAR2, + pTemplateSchema VARCHAR2, + pTemplateTable VARCHAR2, + pCsvFileUri VARCHAR2 + ) RETURN VARCHAR2; + + --------------------------------------------------------------------------------------------------------------------------- + -- PACKAGE VERSION MANAGEMENT FUNCTIONS + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name GET_VERSION + * @desc Returns the current version number of the ENV_MANAGER package. + * Uses semantic versioning format (MAJOR.MINOR.PATCH). + * @example SELECT ENV_MANAGER.GET_VERSION() FROM DUAL; + * @ex_rslt 3.0.0 + **/ + FUNCTION GET_VERSION RETURN VARCHAR2; + + /** + * @name GET_BUILD_INFO + * @desc Returns comprehensive build information including version, build date, and author. + * Formatted for display in logs or monitoring systems. + * @example SELECT ENV_MANAGER.GET_BUILD_INFO() FROM DUAL; + * @ex_rslt Package: ENV_MANAGER + * Version: 3.0.0 + * Build Date: 2025-10-22 16:00:00 + * Author: Grzegorz Michalski + **/ + FUNCTION GET_BUILD_INFO RETURN VARCHAR2; + + /** + * @name GET_VERSION_HISTORY + * @desc Returns complete version history with all releases and changes. + * Shows evolution of package features over time. + * @example SELECT ENV_MANAGER.GET_VERSION_HISTORY() FROM DUAL; + * @ex_rslt ENV_MANAGER Version History: + * 3.0.0 (2025-10-22): Added package versioning system... + * 2.1.0 (2025-10-15): Added ANALYZE_VALIDATION_ERRORS function... + **/ + FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2; + + /** + * @name GET_PACKAGE_VERSION_INFO + * @desc Universal function to get formatted version information for any package. + * This centralized function is used by all packages in the system. + * @param pPackageName - Name of the package + * @param pVersion - Version string (MAJOR.MINOR.PATCH format) + * @param pBuildDate - Build date timestamp + * @param pAuthor - Package author name + * @example SELECT ENV_MANAGER.GET_PACKAGE_VERSION_INFO('FILE_MANAGER', '2.1.0', '2025-10-22 15:00:00', 'Grzegorz Michalski') FROM DUAL; + * @ex_rslt Package: FILE_MANAGER + * Version: 2.1.0 + * Build Date: 2025-10-22 15:00:00 + * Author: Grzegorz Michalski + **/ + FUNCTION GET_PACKAGE_VERSION_INFO( + pPackageName VARCHAR2, + pVersion VARCHAR2, + pBuildDate VARCHAR2, + pAuthor VARCHAR2 + ) RETURN VARCHAR2; + + /** + * @name FORMAT_VERSION_HISTORY + * @desc Universal function to format version history for any package. + * Adds package name header and proper formatting. + * @param pPackageName - Name of the package + * @param pVersionHistory - Complete version history text + * @example SELECT ENV_MANAGER.FORMAT_VERSION_HISTORY('FILE_MANAGER', '2.1.0 (2025-10-22): Export procedures...') FROM DUAL; + * @ex_rslt FILE_MANAGER Version History: + * 2.1.0 (2025-10-22): Export procedures... + **/ + FUNCTION FORMAT_VERSION_HISTORY( + pPackageName VARCHAR2, + pVersionHistory VARCHAR2 + ) RETURN VARCHAR2; + + --------------------------------------------------------------------------------------------------------------------------- + -- PACKAGE HASH + CHANGE DETECTION FUNCTIONS + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name CALCULATE_PACKAGE_HASH + * @desc Calculates SHA256 hash of package source code from ALL_SOURCE. + * Returns hash for both SPEC and BODY (if exists). + * Used for automatic change detection. + * @param pPackageOwner - Schema owner of the package + * @param pPackageName - Name of the package + * @param pPackageType - Type of package code ('PACKAGE' for SPEC, 'PACKAGE BODY' for BODY) + * @example SELECT ENV_MANAGER.CALCULATE_PACKAGE_HASH('CT_MRDS', 'FILE_MANAGER', 'PACKAGE') FROM DUAL; + * @ex_rslt A7B3C5D9E8F1234567890ABCDEF... (64-character SHA256 hash) + **/ + FUNCTION CALCULATE_PACKAGE_HASH( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2, + pPackageType VARCHAR2 -- 'PACKAGE' or 'PACKAGE BODY' + ) RETURN VARCHAR2; + + /** + * @name TRACK_PACKAGE_VERSION + * @desc Records package version and source code hash in A_PACKAGE_VERSION_TRACKING table. + * Automatically detects if source code changed without version update. + * Should be called after every package deployment. + * @param pPackageOwner - Schema owner of the package + * @param pPackageName - Name of the package + * @param pPackageVersion - Current version from PACKAGE_VERSION constant + * @param pPackageBuildDate - Build date from PACKAGE_BUILD_DATE constant + * @param pPackageAuthor - Author from PACKAGE_AUTHOR constant + * @example EXEC ENV_MANAGER.TRACK_PACKAGE_VERSION('CT_MRDS', 'FILE_MANAGER', '3.2.0', '2025-10-22 16:30:00', 'Grzegorz Michalski'); + * @ex_rslt Record inserted into A_PACKAGE_VERSION_TRACKING with change detection status + **/ + PROCEDURE TRACK_PACKAGE_VERSION( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2, + pPackageVersion VARCHAR2, + pPackageBuildDate VARCHAR2, + pPackageAuthor VARCHAR2 + ); + + /** + * @name CHECK_PACKAGE_CHANGES + * @desc Checks if package source code has changed since last tracking. + * Compares current hash with last recorded hash in A_PACKAGE_VERSION_TRACKING. + * Returns detailed change detection report. + * @param pPackageOwner - Schema owner of the package + * @param pPackageName - Name of the package + * @example SELECT ENV_MANAGER.CHECK_PACKAGE_CHANGES('CT_MRDS', 'FILE_MANAGER') FROM DUAL; + * @ex_rslt WARNING: Package changed without version update! + * Last Version: 3.2.0 + * Current Hash (SPEC): A7B3C5D9... + * Last Hash (SPEC): B8C4D6E0... + * RECOMMENDATION: Update PACKAGE_VERSION and PACKAGE_BUILD_DATE + **/ + FUNCTION CHECK_PACKAGE_CHANGES( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2 + ) RETURN VARCHAR2; + + /** + * @name GET_PACKAGE_HASH_INFO + * @desc Returns formatted information about package hash and tracking history. + * Includes current hash, last tracked hash, and change detection status. + * @param pPackageOwner - Schema owner of the package + * @param pPackageName - Name of the package + * @example SELECT ENV_MANAGER.GET_PACKAGE_HASH_INFO('CT_MRDS', 'FILE_MANAGER') FROM DUAL; + * @ex_rslt Package: CT_MRDS.FILE_MANAGER + * Current Version: 3.2.0 + * Current Hash (SPEC): A7B3C5D9... + * Last Tracked: 2025-10-22 16:30:00 + * Status: OK - No changes detected + **/ + FUNCTION GET_PACKAGE_HASH_INFO( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2 + ) RETURN VARCHAR2; + +END ENV_MANAGER; +/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/DATA_EXPORTER.pkb b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/DATA_EXPORTER.pkb new file mode 100644 index 0000000..7a3c90a --- /dev/null +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/DATA_EXPORTER.pkb @@ -0,0 +1,1669 @@ +create or replace PACKAGE BODY CT_MRDS.DATA_EXPORTER +AS + + ---------------------------------------------------------------------------------------------------- + -- PRIVATE HELPER FUNCTIONS (USED BY MULTIPLE PROCEDURES) + ---------------------------------------------------------------------------------------------------- + + /** + * Sanitizes filename by replacing disallowed characters with underscores + **/ + FUNCTION sanitizeFilename(pFilename IN VARCHAR2) RETURN VARCHAR2 IS + vFilename VARCHAR2(1000); + BEGIN + vFilename := REGEXP_REPLACE(pFilename, '[^a-zA-Z0-9._-]', '_'); + RETURN vFilename; + END sanitizeFilename; + + ---------------------------------------------------------------------------------------------------- + + /** + * Deletes export file from OCI bucket if it exists (used for cleanup before retry) + * Silently ignores if file doesn't exist (ORA-20404) + **/ + PROCEDURE DELETE_FAILED_EXPORT_FILE( + pFileUri IN VARCHAR2, + pCredentialName IN VARCHAR2, + pParameters IN VARCHAR2 + ) IS + BEGIN + BEGIN + ENV_MANAGER.LOG_PROCESS_EVENT('Attempting to delete potentially corrupted file: ' || pFileUri, 'DEBUG', pParameters); + + DBMS_CLOUD.DELETE_OBJECT( + credential_name => pCredentialName, + object_uri => pFileUri + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('Deleted existing file (cleanup before retry): ' || pFileUri, 'INFO', pParameters); + EXCEPTION + WHEN OTHERS THEN + -- Object not found is OK (file doesn't exist) + IF SQLCODE = -20404 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('File does not exist (OK): ' || pFileUri, 'DEBUG', pParameters); + ELSE + -- Log but don't fail - export will attempt anyway + ENV_MANAGER.LOG_PROCESS_EVENT('Warning: Could not delete file (will retry export anyway): ' || SQLERRM, 'WARNING', pParameters); + END IF; + END; + END DELETE_FAILED_EXPORT_FILE; + + ---------------------------------------------------------------------------------------------------- + + /** + * Builds query with TO_CHAR for date/timestamp columns using per-column formats + * Retrieves format for each date column from FILE_MANAGER.GET_DATE_FORMAT + **/ + FUNCTION buildQueryWithDateFormats( + pColumnList IN VARCHAR2, + pTableName IN VARCHAR2, + pSchemaName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pTemplateTableName IN VARCHAR2 + ) RETURN VARCHAR2 IS + vResult VARCHAR2(32767); + vColumns VARCHAR2(32767); + vPos PLS_INTEGER; + vNextPos PLS_INTEGER; + vCurrentCol VARCHAR2(128); + vAllCols VARCHAR2(32767); + vDataType VARCHAR2(30); + vDateFormat VARCHAR2(200); + vTemplateSchema VARCHAR2(128); + vTemplateTable VARCHAR2(128); + vColExists NUMBER; + BEGIN + -- Build column list if not provided + IF pColumnList IS NULL THEN + -- Use template table for column order when provided + -- Template defines which columns to export and in what order + IF pTemplateTableName IS NOT NULL THEN + -- Parse template table name (SCHEMA.TABLE or just TABLE) + IF INSTR(pTemplateTableName, '.') > 0 THEN + vTemplateSchema := SUBSTR(pTemplateTableName, 1, INSTR(pTemplateTableName, '.') - 1); + vTemplateTable := SUBSTR(pTemplateTableName, INSTR(pTemplateTableName, '.') + 1); + ELSE + vTemplateSchema := pSchemaName; + vTemplateTable := pTemplateTableName; + END IF; + + -- Get columns from TEMPLATE table in template column order + -- Template defines target CSV structure (column order and which columns to include) + SELECT LISTAGG(column_name, ', ') WITHIN GROUP (ORDER BY column_id) + INTO vAllCols + FROM all_tab_columns + WHERE table_name = vTemplateTable + AND owner = vTemplateSchema; + ELSE + -- Get columns from source table when no template + SELECT LISTAGG(column_name, ', ') WITHIN GROUP (ORDER BY column_id) + INTO vAllCols + FROM all_tab_columns + WHERE table_name = pTableName + AND owner = pSchemaName; + END IF; + ELSE + vAllCols := pColumnList; + END IF; + + -- Process each column + vColumns := UPPER(REPLACE(vAllCols, ' ', '')); + vPos := 1; + vResult := ''; + + WHILE vPos <= LENGTH(vColumns) LOOP + vNextPos := INSTR(vColumns, ',', vPos); + IF vNextPos = 0 THEN + vNextPos := LENGTH(vColumns) + 1; + END IF; + + vCurrentCol := SUBSTR(vColumns, vPos, vNextPos - vPos); + + -- When using template table, check if column exists in SOURCE table + -- Template defines target structure, source provides data + -- Skip template columns that don't exist in source (except A_WORKFLOW_HISTORY_KEY) + IF pTemplateTableName IS NOT NULL THEN + -- Check if template column exists in SOURCE table + SELECT COUNT(*) INTO vColExists + FROM all_tab_columns + WHERE table_name = pTableName + AND column_name = vCurrentCol + AND owner = pSchemaName; + + -- Skip columns that don't exist in source table + -- Exception: A_WORKFLOW_HISTORY_KEY is virtual (mapped from pKeyColumnName) + IF vColExists = 0 AND UPPER(vCurrentCol) != 'A_WORKFLOW_HISTORY_KEY' THEN + vPos := vNextPos + 1; + CONTINUE; + END IF; + END IF; + + -- Get column data type from appropriate table (template or source) + IF pTemplateTableName IS NOT NULL THEN + -- Get data type from template table + SELECT data_type INTO vDataType + FROM all_tab_columns + WHERE table_name = vTemplateTable + AND column_name = vCurrentCol + AND owner = vTemplateSchema; + ELSE + -- Get data type from source table + SELECT data_type INTO vDataType + FROM all_tab_columns + WHERE table_name = pTableName + AND column_name = vCurrentCol + AND owner = pSchemaName; + END IF; + + -- Handle key column alias (template table has A_WORKFLOW_HISTORY_KEY, source table has pKeyColumnName) + IF UPPER(vCurrentCol) = 'A_WORKFLOW_HISTORY_KEY' THEN + vResult := vResult || CASE WHEN vResult IS NOT NULL THEN ', ' ELSE '' END || + 'T.' || pKeyColumnName || ' AS A_WORKFLOW_HISTORY_KEY'; + + -- Convert DATE/TIMESTAMP columns to CHAR with specific format + ELSIF vDataType IN ('DATE', 'TIMESTAMP', 'TIMESTAMP WITH TIME ZONE', 'TIMESTAMP WITH LOCAL TIME ZONE') THEN + IF pTemplateTableName IS NOT NULL THEN + vDateFormat := CT_MRDS.FILE_MANAGER.GET_DATE_FORMAT( + pTemplateTableName => pTemplateTableName, + pColumnName => vCurrentCol + ); + ELSE + vDateFormat := ENV_MANAGER.gvDefaultDateFormat; + END IF; + vResult := vResult || CASE WHEN vResult IS NOT NULL THEN ', ' ELSE '' END || + 'TO_CHAR(T.' || vCurrentCol || ', ''' || vDateFormat || ''') AS ' || vCurrentCol; + + -- Other columns as-is with T. prefix + ELSE + vResult := vResult || CASE WHEN vResult IS NOT NULL THEN ', ' ELSE '' END || + 'T.' || vCurrentCol; + END IF; + + vPos := vNextPos + 1; + END LOOP; + + RETURN vResult; + END buildQueryWithDateFormats; + + ---------------------------------------------------------------------------------------------------- + + -- Internal shared function to process column list with T. prefix and key column mapping + FUNCTION processColumnList(pColumnList IN VARCHAR2, pTableName IN VARCHAR2, pSchemaName IN VARCHAR2, pKeyColumnName IN VARCHAR2) RETURN VARCHAR2 IS + vResult VARCHAR2(32767); + vColumns VARCHAR2(32767); + vPos PLS_INTEGER; + vNextPos PLS_INTEGER; + vCurrentCol VARCHAR2(128); + vAllCols VARCHAR2(32767); + BEGIN + IF pColumnList IS NULL THEN + -- Build list of all columns + SELECT LISTAGG(column_name, ', ') WITHIN GROUP (ORDER BY column_id) + INTO vAllCols + FROM all_tab_columns + WHERE table_name = pTableName + AND owner = pSchemaName; + + -- Add T. prefix to all columns + vResult := 'T.' || REPLACE(vAllCols, ', ', ', T.'); + + -- Replace key column with aliased version (e.g., T.A_ETL_LOAD_SET_KEY_FK AS A_WORKFLOW_HISTORY_KEY) + vResult := REPLACE(vResult, 'T.' || pKeyColumnName, 'T.' || pKeyColumnName || ' AS A_WORKFLOW_HISTORY_KEY'); + + RETURN vResult; + END IF; + + -- Remove extra spaces and convert to uppercase + vColumns := UPPER(REPLACE(pColumnList, ' ', '')); + vPos := 1; + vResult := ''; + + -- Parse comma-separated column list and add T. prefix + WHILE vPos <= LENGTH(vColumns) LOOP + vNextPos := INSTR(vColumns, ',', vPos); + IF vNextPos = 0 THEN + vNextPos := LENGTH(vColumns) + 1; + END IF; + + vCurrentCol := SUBSTR(vColumns, vPos, vNextPos - vPos); + + -- Check if this is the key column (e.g., A_ETL_LOAD_SET_KEY_FK) and add alias + IF UPPER(vCurrentCol) = UPPER(pKeyColumnName) THEN + vCurrentCol := 'T.' || pKeyColumnName || ' AS A_WORKFLOW_HISTORY_KEY'; + ELSE + -- Add T. prefix if not already present + IF INSTR(vCurrentCol, '.') = 0 THEN + vCurrentCol := 'T.' || vCurrentCol; + END IF; + END IF; + + -- Add to result with comma separator + IF vResult IS NOT NULL THEN + vResult := vResult || ', '; + END IF; + vResult := vResult || vCurrentCol; + + vPos := vNextPos + 1; + END LOOP; + + RETURN vResult; + END processColumnList; + + ---------------------------------------------------------------------------------------------------- + + /** + * Validates table existence, key column existence, and column list + **/ + PROCEDURE VALIDATE_TABLE_AND_COLUMNS ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pColumnList IN VARCHAR2, + pParameters IN VARCHAR2 + ) IS + vCount INTEGER; + vColumns VARCHAR2(32767); + vPos PLS_INTEGER; + vNextPos PLS_INTEGER; + vCurrentCol VARCHAR2(128); + BEGIN + -- Check if table exists + SELECT COUNT(*) INTO vCount + FROM all_tables + WHERE table_name = pTableName + AND owner = pSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, ENV_MANAGER.MSG_TABLE_NOT_EXISTS); + END IF; + + -- Check if key column exists + SELECT COUNT(*) INTO vCount + FROM all_tab_columns + WHERE table_name = pTableName + AND column_name = pKeyColumnName + AND owner = pSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, ENV_MANAGER.MSG_COLUMN_NOT_EXISTS); + END IF; + + -- Validate pColumnList - check if all column names exist in the table + IF pColumnList IS NOT NULL THEN + vColumns := UPPER(REPLACE(pColumnList, ' ', '')); + vPos := 1; + + WHILE vPos <= LENGTH(vColumns) LOOP + vNextPos := INSTR(vColumns, ',', vPos); + IF vNextPos = 0 THEN + vNextPos := LENGTH(vColumns) + 1; + END IF; + + vCurrentCol := SUBSTR(vColumns, vPos, vNextPos - vPos); + + -- Remove table alias prefix if present + IF INSTR(vCurrentCol, '.') > 0 THEN + vCurrentCol := SUBSTR(vCurrentCol, INSTR(vCurrentCol, '.') + 1); + END IF; + + -- Check if column exists + SELECT COUNT(*) INTO vCount + FROM all_tab_columns + WHERE table_name = pTableName + AND column_name = vCurrentCol + AND owner = pSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, ENV_MANAGER.MSG_COLUMN_NOT_EXISTS); + END IF; + + vPos := vNextPos + 1; + END LOOP; + END IF; + END VALIDATE_TABLE_AND_COLUMNS; + + ---------------------------------------------------------------------------------------------------- + + /** + * Retrieves list of year/month partitions based on date range + **/ + FUNCTION GET_PARTITIONS ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pMinDate IN DATE, + pMaxDate IN DATE, + pParameters IN VARCHAR2 + ) RETURN partition_tab IS + vSql VARCHAR2(32000); + vPartitions partition_tab; + vKeyValuesYear DBMS_SQL.VARCHAR2_TABLE; + vKeyValuesMonth DBMS_SQL.VARCHAR2_TABLE; + vFullTableName VARCHAR2(200); + BEGIN + -- Build fully qualified table name if not already qualified + IF INSTR(pTableName, '.') > 0 THEN + vFullTableName := pTableName; -- Already fully qualified + ELSE + vFullTableName := pSchemaName || '.' || pTableName; + END IF; + + vSql := 'SELECT DISTINCT TO_CHAR(L.LOAD_START,''YYYY'') AS YR, TO_CHAR(L.LOAD_START,''MM'') AS MN + FROM ' || vFullTableName || ' T, CT_ODS.A_LOAD_HISTORY L + WHERE T.' || pKeyColumnName || ' = L.A_ETL_LOAD_SET_KEY + AND L.LOAD_START >= :pMinDate + AND L.LOAD_START < :pMaxDate + ORDER BY YR, MN'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Executing date range query: ' || vSql, 'DEBUG', pParameters); + EXECUTE IMMEDIATE vSql BULK COLLECT INTO vKeyValuesYear, vKeyValuesMonth USING pMinDate, pMaxDate; + + ENV_MANAGER.LOG_PROCESS_EVENT('Found ' || vKeyValuesYear.COUNT || ' year/month combinations to export', 'DEBUG', pParameters); + + -- Convert to partition_tab + vPartitions := partition_tab(); + vPartitions.EXTEND(vKeyValuesYear.COUNT); + FOR i IN 1 .. vKeyValuesYear.COUNT LOOP + vPartitions(i).year := vKeyValuesYear(i); + vPartitions(i).month := vKeyValuesMonth(i); + END LOOP; + + RETURN vPartitions; + END GET_PARTITIONS; + + ---------------------------------------------------------------------------------------------------- + + /** + * Exports single partition (year/month) to specified format (PARQUET or CSV) + * This is the core worker procedure that will be used for parallel processing in v2.3.0 + **/ + PROCEDURE EXPORT_SINGLE_PARTITION ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pYear IN VARCHAR2, + pMonth IN VARCHAR2, + pBucketUri IN VARCHAR2, + pFolderName IN VARCHAR2, + pProcessedColumns IN VARCHAR2, + pMinDate IN DATE, + pMaxDate IN DATE, + pCredentialName IN VARCHAR2, + pFormat IN VARCHAR2 DEFAULT 'PARQUET', + pFileBaseName IN VARCHAR2 DEFAULT NULL, + pMaxFileSize IN NUMBER DEFAULT 104857600, + pParameters IN VARCHAR2 + ) IS + vQuery VARCHAR2(32767); + vUri VARCHAR2(4000); + vFileName VARCHAR2(1000); + vFullTableName VARCHAR2(200); + BEGIN + -- Build fully qualified table name if not already qualified + IF INSTR(pTableName, '.') > 0 THEN + vFullTableName := pTableName; -- Already fully qualified + ELSE + vFullTableName := pSchemaName || '.' || pTableName; + END IF; + + -- Construct the query to extract data for the current year/month + vQuery := 'SELECT ' || pProcessedColumns || ' + FROM ' || vFullTableName || ' T, CT_ODS.A_LOAD_HISTORY L + WHERE T.' || pKeyColumnName || ' = L.A_ETL_LOAD_SET_KEY + AND TO_CHAR(L.LOAD_START,''YYYY'') = ' || CHR(39) || pYear || CHR(39) || ' + AND TO_CHAR(L.LOAD_START,''MM'') = ' || CHR(39) || pMonth || CHR(39) || ' + AND L.LOAD_START >= TO_DATE(' || CHR(39) || TO_CHAR(pMinDate, 'YYYY-MM-DD HH24:MI:SS') || CHR(39) || ', ''YYYY-MM-DD HH24:MI:SS'') + AND L.LOAD_START < TO_DATE(' || CHR(39) || TO_CHAR(pMaxDate, 'YYYY-MM-DD HH24:MI:SS') || CHR(39) || ', ''YYYY-MM-DD HH24:MI:SS'')'; + + -- Construct the URI based on format + IF pFormat = 'PARQUET' THEN + -- Parquet: Use Hive-style partitioning + -- Note: maxfilesize is NOT supported for Parquet format (Oracle limitation) + vUri := pBucketUri || + CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || + 'PARTITION_YEAR=' || sanitizeFilename(pYear) || '/' || + 'PARTITION_MONTH=' || sanitizeFilename(pMonth) || '/' || + sanitizeFilename(pYear) || sanitizeFilename(pMonth) || '.parquet'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Parquet export URI: ' || vUri, 'DEBUG', pParameters); + + -- Delete potentially corrupted file from previous failed attempt + -- This prevents Oracle from creating _1 suffixed files on retry + DELETE_FAILED_EXPORT_FILE(vUri, pCredentialName, pParameters); + + DBMS_CLOUD.EXPORT_DATA( + credential_name => pCredentialName, + file_uri_list => vUri, + query => vQuery, + format => json_object('type' VALUE 'parquet') + ); + ELSIF pFormat = 'CSV' THEN + -- CSV: Flat file structure with year/month in filename + vFileName := NVL(pFileBaseName, UPPER(pTableName)) || '_' || pYear || pMonth || '.csv'; + vUri := pBucketUri || + CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || + sanitizeFilename(vFileName); + + ENV_MANAGER.LOG_PROCESS_EVENT('CSV export URI: ' || vUri, 'DEBUG', pParameters); + + -- Delete potentially corrupted file from previous failed attempt + -- This prevents Oracle from creating _1 suffixed files on retry + DELETE_FAILED_EXPORT_FILE(vUri, pCredentialName, pParameters); + + -- Use json_object() for CSV export with maxfilesize in bytes (Oracle requirement) + -- Oracle maxfilesize: min 10MB (10485760), max 1GB (1073741824), default 10MB + -- NOTE: maxfilesize must be NUMBER (bytes), not string like '1000M' + -- Using 100MB (104857600) to avoid PGA memory issues with large files + DBMS_CLOUD.EXPORT_DATA( + credential_name => pCredentialName, + file_uri_list => vUri, + query => vQuery, + format => json_object( + 'type' VALUE 'CSV', + 'header' VALUE true, + 'quote' VALUE CHR(34), + 'delimiter' VALUE ',', + 'escape' VALUE true, + 'recorddelimiter' VALUE CHR(13)||CHR(10), -- CRLF dla Windows + 'maxfilesize' VALUE pMaxFileSize -- Dynamic maxfilesize in bytes (e.g., 104857600 = 100MB) + ) + ); + ELSE + RAISE_APPLICATION_ERROR(-20001, 'Unsupported format: ' || pFormat || '. Use PARQUET or CSV.'); + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('Processing Year/Month: ' || pYear || '/' || pMonth || ' (Format: ' || pFormat || ')', 'DEBUG', pParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Export query: ' || vQuery, 'DEBUG', pParameters); + END EXPORT_SINGLE_PARTITION; + + ---------------------------------------------------------------------------------------------------- + + /** + * Callback procedure for DBMS_PARALLEL_EXECUTE + * Processes single partition (year/month) chunk in parallel task + * Called by DBMS_PARALLEL_EXECUTE framework for each chunk + **/ + PROCEDURE EXPORT_PARTITION_PARALLEL ( + pStartId IN NUMBER, + pEndId IN NUMBER + ) IS + vYear VARCHAR2(4); + vMonth VARCHAR2(2); + vSchemaName VARCHAR2(128); + vTableName VARCHAR2(128); + vKeyColumnName VARCHAR2(128); + vBucketUri VARCHAR2(4000); + vFolderName VARCHAR2(1000); + vProcessedColumns VARCHAR2(32767); + vMinDate DATE; + vMaxDate DATE; + vCredentialName VARCHAR2(200); + vFormat VARCHAR2(20); + vFileBaseName VARCHAR2(1000); + vMaxFileSize NUMBER; + vParameters VARCHAR2(4000); + BEGIN + -- Retrieve chunk context from global temporary table + SELECT + YEAR_VALUE, + MONTH_VALUE, + SCHEMA_NAME, + TABLE_NAME, + KEY_COLUMN_NAME, + BUCKET_URI, + FOLDER_NAME, + PROCESSED_COLUMNS, + MIN_DATE, + MAX_DATE, + CREDENTIAL_NAME, + FORMAT_TYPE, + FILE_BASE_NAME, + MAX_FILE_SIZE + INTO + vYear, + vMonth, + vSchemaName, + vTableName, + vKeyColumnName, + vBucketUri, + vFolderName, + vProcessedColumns, + vMinDate, + vMaxDate, + vCredentialName, + vFormat, + vFileBaseName, + vMaxFileSize + FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + WHERE CHUNK_ID = pStartId; + + vParameters := 'Parallel task - Year: ' || vYear || ', Month: ' || vMonth || ', ChunkID: ' || pStartId; + ENV_MANAGER.LOG_PROCESS_EVENT('Starting parallel export for partition ' || vYear || '/' || vMonth, 'DEBUG', vParameters); + + -- Mark chunk as PROCESSING + UPDATE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + SET STATUS = 'PROCESSING', + ERROR_MESSAGE = NULL + WHERE CHUNK_ID = pStartId; + COMMIT; + + -- Call the worker procedure + EXPORT_SINGLE_PARTITION( + pSchemaName => vSchemaName, + pTableName => vTableName, + pKeyColumnName => vKeyColumnName, + pYear => vYear, + pMonth => vMonth, + pBucketUri => vBucketUri, + pFolderName => vFolderName, + pProcessedColumns => vProcessedColumns, + pMinDate => vMinDate, + pMaxDate => vMaxDate, + pCredentialName => vCredentialName, + pFormat => vFormat, + pFileBaseName => vFileBaseName, + pMaxFileSize => vMaxFileSize, + pParameters => vParameters + ); + + -- Mark chunk as COMPLETED + UPDATE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + SET STATUS = 'COMPLETED', + EXPORT_TIMESTAMP = SYSTIMESTAMP, + ERROR_MESSAGE = NULL + WHERE CHUNK_ID = pStartId; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Completed parallel export for partition ' || vYear || '/' || vMonth, 'DEBUG', vParameters); + EXCEPTION + WHEN OTHERS THEN + -- Capture error details in variable (SQLERRM cannot be used directly in SQL) + vgMsgTmp := 'Parallel task error for partition ' || vYear || '/' || vMonth || ' (ChunkID: ' || pStartId || '): ' || SQLERRM || cgBL || DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + + -- Mark chunk as FAILED with error message + -- Use vgMsgTmp variable instead of SQLERRM directly (Oracle limitation in SQL context) + UPDATE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + SET STATUS = 'FAILED', + ERROR_MESSAGE = SUBSTR(vgMsgTmp, 1, 4000) + WHERE CHUNK_ID = pStartId; + COMMIT; + + RAISE; + END EXPORT_PARTITION_PARALLEL; + + ---------------------------------------------------------------------------------------------------- + -- MAIN EXPORT PROCEDURES + ---------------------------------------------------------------------------------------------------- + + PROCEDURE EXPORT_TABLE_DATA ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pFileName IN VARCHAR2 default NULL, + pTemplateTableName IN VARCHAR2 default NULL, + pMaxFileSize IN NUMBER default 104857600, + pRegisterExport IN BOOLEAN default FALSE, + pProcessName IN VARCHAR2 default 'DATA_EXPORTER', + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ) + IS + vCount INTEGER; + vQuery VARCHAR2(32767); + vUri VARCHAR2(4000); + vTableName VARCHAR2(128); + vSchemaName VARCHAR2(128); + vKeyColumnName VARCHAR2(128); + vParameters VARCHAR2(4000); + vBucketUri VARCHAR2(4000); + vProcessedColumnList VARCHAR2(32767); + vCurrentCol VARCHAR2(128); + + -- Variables for file registration (when pRegisterExport=TRUE) + vConfigKey NUMBER; + vSourceKey VARCHAR2(100); + vTableId VARCHAR2(100); + vSlashPos1 NUMBER; + vSlashPos2 NUMBER; + vSourceFileReceivedKey NUMBER; + + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pSchemaName => '''||nvl(pSchemaName, 'NULL')||'''' + ,'pTableName => '''||nvl(pTableName, 'NULL')||'''' + ,'pKeyColumnName => '''||nvl(pKeyColumnName, 'NULL')||'''' + ,'pBucketArea => '''||nvl(pBucketArea, 'NULL')||'''' + ,'pFolderName => '''||nvl(pFolderName, 'NULL')||'''' + ,'pFileName => '''||nvl(pFileName, 'NULL')||'''' + ,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pMaxFileSize => '''||nvl(TO_CHAR(pMaxFileSize), 'NULL')||'''' + ,'pRegisterExport => '''||CASE WHEN pRegisterExport THEN 'TRUE' ELSE 'FALSE' END||'''' + ,'pProcessName => '''||nvl(pProcessName, 'NULL')||'''' + ,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||'''' + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + -- Get bucket URI based on bucket area using FILE_MANAGER function + vBucketUri := FILE_MANAGER.GET_BUCKET_URI(pBucketArea); + + -- Convert table and column names to uppercase to match data dictionary + vTableName := UPPER(pTableName); + vSchemaName := UPPER(pSchemaName); + vKeyColumnName := UPPER(pKeyColumnName); + + -- Check if table exists + SELECT COUNT(*) INTO vCount + FROM all_tables + WHERE table_name = vTableName + AND owner = vSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, ENV_MANAGER.MSG_TABLE_NOT_EXISTS); + END IF; + + -- Check if key column exists + SELECT COUNT(*) INTO vCount + FROM all_tab_columns + WHERE table_name = vTableName + AND column_name = vKeyColumnName + AND owner = vSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, ENV_MANAGER.MSG_COLUMN_NOT_EXISTS); + END IF; + + -- Validate template table if provided + IF pTemplateTableName IS NOT NULL THEN + DECLARE + vTemplateSchema VARCHAR2(128); + vTemplateTable VARCHAR2(128); + vTemplateCount NUMBER; + BEGIN + -- Parse template table name (SCHEMA.TABLE or just TABLE) + IF INSTR(pTemplateTableName, '.') > 0 THEN + vTemplateSchema := UPPER(SUBSTR(pTemplateTableName, 1, INSTR(pTemplateTableName, '.') - 1)); + vTemplateTable := UPPER(SUBSTR(pTemplateTableName, INSTR(pTemplateTableName, '.') + 1)); + ELSE + vTemplateSchema := vSchemaName; + vTemplateTable := UPPER(pTemplateTableName); + END IF; + + -- Check if template table exists + SELECT COUNT(*) INTO vTemplateCount + FROM all_tables + WHERE table_name = vTemplateTable + AND owner = vTemplateSchema; + + IF vTemplateCount = 0 THEN + vgMsgTmp := ENV_MANAGER.MSG_TABLE_NOT_EXISTS || ': Template table ' || vTemplateSchema || '.' || vTemplateTable; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, vgMsgTmp); + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('Template table validated: ' || vTemplateSchema || '.' || vTemplateTable, 'DEBUG', vParameters); + END; + END IF; + + -- Build query with TO_CHAR for date columns (per-column format support) + vProcessedColumnList := buildQueryWithDateFormats(NULL, vTableName, vSchemaName, vKeyColumnName, pTemplateTableName); + + ENV_MANAGER.LOG_PROCESS_EVENT('Processed column list with TO_CHAR for date columns: ' || vProcessedColumnList, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Template table: ' || NVL(pTemplateTableName, 'NULL - using global default for all dates'), 'INFO', vParameters); + + vTableName := DBMS_ASSERT.SCHEMA_NAME(vSchemaName) || '.' || DBMS_ASSERT.simple_sql_name(vTableName); + + -- Lookup A_SOURCE_FILE_CONFIG_KEY based on pFolderName parsing if pRegisterExport is enabled + IF pRegisterExport THEN + -- Format: {BUCKET_AREA}/{SOURCE_KEY}/{TABLE_ID} + -- Example: 'ODS/CSDB/CSDB_DEBT_DAILY' -> SOURCE_KEY='CSDB', TABLE_ID='CSDB_DEBT_DAILY' + + -- Parse pFolderName to extract SOURCE_KEY and TABLE_ID + vSlashPos1 := INSTR(pFolderName, '/', 1, 1); -- First '/' position + vSlashPos2 := INSTR(pFolderName, '/', 1, 2); -- Second '/' position + + IF vSlashPos1 > 0 AND vSlashPos2 > 0 THEN + -- Extract segment 2 (SOURCE_KEY) and segment 3 (TABLE_ID) + vSourceKey := SUBSTR(pFolderName, vSlashPos1 + 1, vSlashPos2 - vSlashPos1 - 1); + vTableId := SUBSTR(pFolderName, vSlashPos2 + 1); + + -- Find configuration based on SOURCE_KEY and TABLE_ID + BEGIN + SELECT A_SOURCE_FILE_CONFIG_KEY + INTO vConfigKey + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE A_SOURCE_KEY = vSourceKey + AND TABLE_ID = vTableId + AND SOURCE_FILE_TYPE = 'INPUT' + AND ROWNUM = 1; + + ENV_MANAGER.LOG_PROCESS_EVENT('Found config key: ' || vConfigKey || ' for SOURCE=' || vSourceKey || ', TABLE=' || vTableId, 'DEBUG', vParameters); + EXCEPTION + WHEN NO_DATA_FOUND THEN + vConfigKey := -1; + ENV_MANAGER.LOG_PROCESS_EVENT('No config found for SOURCE=' || vSourceKey || ', TABLE=' || vTableId || ' - using default (-1)', 'INFO', vParameters); + END; + ELSE + -- Cannot parse folder name - use default + vConfigKey := -1; + ENV_MANAGER.LOG_PROCESS_EVENT('Cannot parse pFolderName: ' || pFolderName || ' - using default (-1)', 'WARNING', vParameters); + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('File registration enabled with config key: ' || vConfigKey, 'INFO', vParameters); + END IF; + + -- Construct single query for entire table (no join with A_LOAD_HISTORY - ensures single file output) + vQuery := 'SELECT ' || vProcessedColumnList || + ' FROM ' || vTableName || ' T'; + + -- Construct the URI for the file in OCI Object Storage + vUri := vBucketUri || + CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || + NVL(pFileName, UPPER(vTableName) || '.csv'); + + ENV_MANAGER.LOG_PROCESS_EVENT('Exporting to single file: ' || vUri, 'INFO', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Export query: ' || vQuery, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Max file size: ' || pMaxFileSize || ' bytes (' || ROUND(pMaxFileSize/1048576, 2) || ' MB)', 'DEBUG', vParameters); + + -- Use DBMS_CLOUD package to export data to the URI + -- Oracle maxfilesize: min 10MB (10485760), max 1GB (1073741824), default 100MB (104857600) + DBMS_CLOUD.EXPORT_DATA( + credential_name => pCredentialName, + file_uri_list => vUri, + query => vQuery, + format => json_object( + 'type' VALUE 'CSV', + 'header' VALUE true, + 'quote' VALUE CHR(34), + 'delimiter' VALUE ',', + 'escape' VALUE true, + 'recorddelimiter' VALUE CHR(13)||CHR(10), -- CRLF dla Windows + 'maxfilesize' VALUE pMaxFileSize -- Dynamic maxfilesize in bytes + ) + ); + + -- Register exported file to A_SOURCE_FILE_RECEIVED if requested + IF pRegisterExport THEN + DECLARE + vChecksum VARCHAR2(128); + vCreated TIMESTAMP WITH TIME ZONE; + vBytes NUMBER; + vActualFileName VARCHAR2(1000); -- Actual filename with Oracle suffix + vSanitizedFileName VARCHAR2(1000); + vFileName VARCHAR2(1000); + vRetryCount NUMBER := 0; + vMaxRetries NUMBER := 1; -- One retry after initial attempt + vRetryDelay NUMBER := 2; -- 2 seconds delay + BEGIN + -- Extract filename from URI (after last '/') + vFileName := SUBSTR(vUri, INSTR(vUri, '/', -1) + 1); + + -- Sanitize filename first (PL/SQL function cannot be used directly in SQL) + vSanitizedFileName := sanitizeFilename(vFileName); + + -- Remove .csv extension for LIKE pattern matching (Oracle adds suffixes BEFORE .csv) + -- Example: tablename.csv becomes tablename_1_20260211T102621591769Z.csv + vSanitizedFileName := REGEXP_REPLACE(vSanitizedFileName, '\.csv$', '', 1, 0, 'i'); + + -- Try to get file metadata with retry logic + <> + LOOP + BEGIN + SELECT object_name, checksum, created, bytes + INTO vActualFileName, vChecksum, vCreated, vBytes + FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( + credential_name => pCredentialName, + location_uri => vBucketUri + )) + WHERE object_name LIKE CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || vSanitizedFileName || '%' + ORDER BY created DESC, bytes DESC + FETCH FIRST 1 ROW ONLY; + + -- Extract filename only from full path (remove bucket folder prefix) + vActualFileName := SUBSTR(vActualFileName, INSTR(vActualFileName, '/', -1) + 1); + + -- Success - exit retry loop + EXIT metadata_retry_loop; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + vRetryCount := vRetryCount + 1; + + IF vRetryCount <= vMaxRetries THEN + -- Log retry attempt + ENV_MANAGER.LOG_PROCESS_EVENT('File not found in bucket (attempt ' || vRetryCount || '/' || (vMaxRetries + 1) || '), retrying after ' || vRetryDelay || ' seconds: ' || vFileName, 'DEBUG', vParameters); + + -- Wait before retry using DBMS_SESSION.SLEEP (alternative to DBMS_LOCK) + DBMS_SESSION.SLEEP(vRetryDelay); + ELSE + -- Max retries exceeded - re-raise exception + RAISE; + END IF; + END; + END LOOP metadata_retry_loop; + + -- Create A_SOURCE_FILE_RECEIVED record for this export with metadata + vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; + INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( + A_SOURCE_FILE_RECEIVED_KEY, + A_SOURCE_FILE_CONFIG_KEY, + SOURCE_FILE_NAME, + CHECKSUM, + CREATED, + BYTES, + RECEPTION_DATE, + PROCESSING_STATUS, + PARTITION_YEAR, + PARTITION_MONTH, + ARCH_PATH, + PROCESS_NAME + ) VALUES ( + vSourceFileReceivedKey, + NVL(vConfigKey, -1), -- Use config key if found, otherwise -1 + vActualFileName, -- Use actual filename with Oracle suffix + vChecksum, + vCreated, + vBytes, + SYSDATE, + 'INGESTED', + NULL, -- PARTITION_YEAR not used for single-file exports + NULL, -- PARTITION_MONTH not used for single-file exports + NULL, -- ARCH_PATH not used for single-file exports + pProcessName -- Process name from parameter + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('Registered file: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vActualFileName || ', Size=' || vBytes || ' bytes', 'INFO', vParameters); + EXCEPTION + WHEN NO_DATA_FOUND THEN + -- File not found after retries - log warning and continue without metadata + ENV_MANAGER.LOG_PROCESS_EVENT('WARNING: File not found in bucket after ' || (vMaxRetries + 1) || ' attempts: ' || vFileName, 'WARNING', vParameters); + + -- Sanitize filename for fallback INSERT (function cannot be used in SQL) + vSanitizedFileName := sanitizeFilename(vFileName); + + -- Insert without metadata using theoretical filename + vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; + INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( + A_SOURCE_FILE_RECEIVED_KEY, + A_SOURCE_FILE_CONFIG_KEY, + SOURCE_FILE_NAME, + RECEPTION_DATE, + PROCESSING_STATUS, + PARTITION_YEAR, + PARTITION_MONTH, + ARCH_PATH, + PROCESS_NAME + ) VALUES ( + vSourceFileReceivedKey, + NVL(vConfigKey, -1), -- Use config key if found, otherwise -1 + vSanitizedFileName, -- Use pre-calculated sanitized filename + SYSDATE, + 'INGESTED', + NULL, -- PARTITION_YEAR not used for single-file exports + NULL, -- PARTITION_MONTH not used for single-file exports + NULL, -- ARCH_PATH not used for single-file exports + pProcessName -- Process name from parameter + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('Registered file without metadata: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vSanitizedFileName, 'INFO', vParameters); + END; + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + EXCEPTION + WHEN ENV_MANAGER.ERR_TABLE_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_TABLE_NOT_EXISTS ||': '||vTableName; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_COLUMN_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_COLUMN_NOT_EXISTS || ' (TableName.ColumnName): ' || vTableName||'.'||vKeyColumnName||CASE WHEN vCurrentCol IS NOT NULL THEN '.'||vCurrentCol||' in column list' ELSE '' END; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, vgMsgTmp); + WHEN OTHERS THEN + -- Log complete error details including full stack trace and backtrace + ENV_MANAGER.LOG_PROCESS_ERROR('Export failed: ' || SQLERRM, vParameters, 'DATA_EXPORTER'); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END EXPORT_TABLE_DATA; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE EXPORT_TABLE_DATA_BY_DATE ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pColumnList IN VARCHAR2 default NULL, + pMinDate IN DATE default DATE '1900-01-01', + pMaxDate IN DATE default SYSDATE, + pParallelDegree IN NUMBER default 1, + pTemplateTableName IN VARCHAR2 default NULL, + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ) + IS + vTableName VARCHAR2(128); + vSchemaName VARCHAR2(128); + vKeyColumnName VARCHAR2(128); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vProcessedColumnList VARCHAR2(32767); + vBucketUri VARCHAR2(4000); + vCurrentCol VARCHAR2(128); + vPartitions partition_tab; + + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pSchemaName => '''||nvl(pSchemaName, 'NULL')||'''' + ,'pTableName => '''||nvl(pTableName, 'NULL')||'''' + ,'pKeyColumnName => '''||nvl(pKeyColumnName, 'NULL')||'''' + ,'pBucketArea => '''||nvl(pBucketArea, 'NULL')||'''' + ,'pFolderName => '''||nvl(pFolderName, 'NULL')||'''' + ,'pColumnList => '''||nvl(pColumnList, 'NULL')||'''' + ,'pMinDate => '''||nvl(TO_CHAR(pMinDate, 'YYYY-MM-DD HH24:MI:SS'), 'NULL')||'''' + ,'pMaxDate => '''||nvl(TO_CHAR(pMaxDate, 'YYYY-MM-DD HH24:MI:SS'), 'NULL')||'''' + ,'pParallelDegree => '''||nvl(TO_CHAR(pParallelDegree), 'NULL')||'''' + ,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||'''' + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + -- Get bucket URI based on bucket area using FILE_MANAGER function + vBucketUri := FILE_MANAGER.GET_BUCKET_URI(pBucketArea); + + -- Convert table and column names to uppercase to match data dictionary + vTableName := UPPER(pTableName); + vSchemaName := UPPER(pSchemaName); + vKeyColumnName := UPPER(pKeyColumnName); + + -- Validate table, key column, and column list using shared procedure + VALIDATE_TABLE_AND_COLUMNS(vSchemaName, vTableName, vKeyColumnName, pColumnList, vParameters); + + -- Build query with TO_CHAR for date columns (per-column format support) + vProcessedColumnList := buildQueryWithDateFormats(pColumnList, vTableName, vSchemaName, vKeyColumnName, pTemplateTableName); + + ENV_MANAGER.LOG_PROCESS_EVENT('Input column list: ' || NVL(pColumnList, 'NULL (building dynamic list from table metadata)'), 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Processed column list with TO_CHAR for date columns: ' || vProcessedColumnList, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Template table: ' || NVL(pTemplateTableName, 'NULL - using global default for all dates'), 'INFO', vParameters); + + vTableName := DBMS_ASSERT.SCHEMA_NAME(vSchemaName) || '.' || DBMS_ASSERT.simple_sql_name(vTableName); + + -- Validate parallel degree parameter + IF pParallelDegree < 1 OR pParallelDegree > 16 THEN + vgMsgTmp := ENV_MANAGER.MSG_INVALID_PARALLEL_DEGREE || ': ' || pParallelDegree || '. Valid range: 1-16'; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_INVALID_PARALLEL_DEGREE, vgMsgTmp); + END IF; + + -- Get partitions using shared function + vPartitions := GET_PARTITIONS(vSchemaName, vTableName, vKeyColumnName, pMinDate, pMaxDate, vParameters); + + ENV_MANAGER.LOG_PROCESS_EVENT('Found ' || vPartitions.COUNT || ' partitions to export with parallel degree ' || pParallelDegree, 'INFO', vParameters); + + -- Sequential processing (parallel degree = 1) + IF pParallelDegree = 1 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Using sequential processing (pParallelDegree = 1)', 'DEBUG', vParameters); + + FOR i IN 1 .. vPartitions.COUNT LOOP + EXPORT_SINGLE_PARTITION( + pSchemaName => vSchemaName, + pTableName => vTableName, + pKeyColumnName => vKeyColumnName, + pYear => vPartitions(i).year, + pMonth => vPartitions(i).month, + pBucketUri => vBucketUri, + pFolderName => pFolderName, + pProcessedColumns => vProcessedColumnList, + pMinDate => pMinDate, + pMaxDate => pMaxDate, + pCredentialName => pCredentialName, + pFormat => 'PARQUET', + pFileBaseName => NULL, + pMaxFileSize => 104857600, + pParameters => vParameters + ); + END LOOP; + + -- Parallel processing (parallel degree > 1) + ELSE + -- Skip parallel processing if no partitions found + IF vPartitions.COUNT = 0 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('No partitions to export - skipping parallel processing', 'INFO', vParameters); + ELSE + DECLARE + vTaskName VARCHAR2(128) := 'DATA_EXPORT_TASK_' || TO_CHAR(SYSTIMESTAMP, 'YYYYMMDDHH24MISSFF'); + vChunkId NUMBER; + BEGIN + ENV_MANAGER.LOG_PROCESS_EVENT('Using parallel processing with ' || pParallelDegree || ' threads', 'INFO', vParameters); + + -- Clean up old completed chunks (>24 hours) to prevent table bloat + -- CRITICAL: Do NOT delete chunks from other active sessions (same-day tasks) + -- This prevents race conditions when multiple exports run simultaneously + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + WHERE STATUS = 'COMPLETED' + AND CREATED_DATE < SYSTIMESTAMP - INTERVAL '1' DAY; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Cleared old COMPLETED chunks (>24h). Active session chunks preserved.', 'DEBUG', vParameters); + -- This prevents re-exporting successfully completed partitions + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'COMPLETED'; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Cleared COMPLETED chunks. FAILED chunks retained for retry.', 'DEBUG', vParameters); + + -- Populate chunks table (insert new chunks, preserve FAILED chunks for retry) + FOR i IN 1 .. vPartitions.COUNT LOOP + MERGE INTO CT_MRDS.A_PARALLEL_EXPORT_CHUNKS t + USING (SELECT i AS chunk_id, vPartitions(i).year AS yr, vPartitions(i).month AS mn FROM DUAL) s + ON (t.CHUNK_ID = s.chunk_id) + WHEN NOT MATCHED THEN + INSERT (CHUNK_ID, TASK_NAME, YEAR_VALUE, MONTH_VALUE, SCHEMA_NAME, TABLE_NAME, KEY_COLUMN_NAME, + BUCKET_URI, FOLDER_NAME, PROCESSED_COLUMNS, MIN_DATE, MAX_DATE, + CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE, STATUS) + VALUES (i, vTaskName, vPartitions(i).year, vPartitions(i).month, vSchemaName, vTableName, vKeyColumnName, + vBucketUri, pFolderName, vProcessedColumnList, pMinDate, pMaxDate, + pCredentialName, 'PARQUET', NULL, pTemplateTableName, 104857600, 'PENDING') + WHEN MATCHED THEN + UPDATE SET TASK_NAME = vTaskName, + STATUS = CASE WHEN t.STATUS = 'FAILED' THEN 'PENDING' ELSE t.STATUS END, + ERROR_MESSAGE = CASE WHEN t.STATUS = 'FAILED' THEN NULL ELSE t.ERROR_MESSAGE END; + END LOOP; + COMMIT; + + -- Log chunk statistics + DECLARE + vPendingCount NUMBER; + vFailedCount NUMBER; + BEGIN + SELECT COUNT(*) INTO vPendingCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'PENDING'; + SELECT COUNT(*) INTO vFailedCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'FAILED'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Chunk statistics: PENDING=' || vPendingCount || ', FAILED (retry)=' || vFailedCount, 'INFO', vParameters); + END; + + -- Create parallel task + DBMS_PARALLEL_EXECUTE.CREATE_TASK(task_name => vTaskName); + + -- Define chunks by number range (1 to partition count) + DBMS_PARALLEL_EXECUTE.CREATE_CHUNKS_BY_NUMBER_COL( + task_name => vTaskName, + table_owner => 'CT_MRDS', + table_name => 'A_PARALLEL_EXPORT_CHUNKS', + table_column => 'CHUNK_ID', + chunk_size => 1 -- Each partition is one chunk + ); + + -- Execute task in parallel + ENV_MANAGER.LOG_PROCESS_EVENT('Executing parallel task: ' || vTaskName, 'DEBUG', vParameters); + + DBMS_PARALLEL_EXECUTE.RUN_TASK( + task_name => vTaskName, + sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;', + language_flag => DBMS_SQL.NATIVE, + parallel_level => pParallelDegree + ); + + -- Check for errors + DECLARE + vErrorCount NUMBER; + BEGIN + SELECT COUNT(*) INTO vErrorCount + FROM USER_PARALLEL_EXECUTE_CHUNKS + WHERE task_name = vTaskName AND status = 'PROCESSED_WITH_ERROR'; + + IF vErrorCount > 0 THEN + vgMsgTmp := 'Parallel execution completed with ' || vErrorCount || ' errors. Check USER_PARALLEL_EXECUTE_CHUNKS for details.'; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + END IF; + END; + + -- Clean up task + DBMS_PARALLEL_EXECUTE.DROP_TASK(task_name => vTaskName); + + -- Clean up chunks for THIS specific task only (session-safe) + -- CRITICAL: Use TASK_NAME filter to avoid deleting chunks from other active sessions + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE TASK_NAME = vTaskName; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Parallel execution completed successfully', 'INFO', vParameters); + EXCEPTION + WHEN OTHERS THEN + -- Attempt to drop task on error + BEGIN + DBMS_PARALLEL_EXECUTE.DROP_TASK(task_name => vTaskName); + EXCEPTION + WHEN OTHERS THEN NULL; -- Ignore drop errors + END; + + vgMsgTmp := ENV_MANAGER.MSG_PARALLEL_EXECUTION_FAILED || ': ' || SQLERRM || cgBL || DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + END; + END IF; + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + EXCEPTION + WHEN ENV_MANAGER.ERR_TABLE_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_TABLE_NOT_EXISTS ||': '||vTableName; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_COLUMN_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_COLUMN_NOT_EXISTS || ' (TableName.ColumnName): ' || vTableName||'.'||vKeyColumnName||CASE WHEN vCurrentCol IS NOT NULL THEN '.'||vCurrentCol||' in pColumnList' ELSE '' END; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_INVALID_PARALLEL_DEGREE THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_INVALID_PARALLEL_DEGREE, vgMsgTmp); + WHEN ENV_MANAGER.ERR_PARALLEL_EXECUTION_FAILED THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + WHEN OTHERS THEN + -- Log complete error details including full stack trace and backtrace + ENV_MANAGER.LOG_PROCESS_ERROR('Export failed: ' || SQLERRM, vParameters, 'DATA_EXPORTER'); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END EXPORT_TABLE_DATA_BY_DATE; + + ---------------------------------------------------------------------------------------------------- + + /** + * @name EXPORT_TABLE_DATA_TO_CSV_BY_DATE + * @desc Exports data to a single CSV file with date filtering. + * Unlike EXPORT_TABLE_DATA_BY_DATE, this procedure creates one CSV file + * instead of multiple Parquet files partitioned by year/month. + * Uses the same date filtering mechanism with CT_ODS.A_LOAD_HISTORY. + * Allows specifying custom column list or uses T.* if pColumnList is NULL. + * Validates that all columns in pColumnList exist in the target table. + * Automatically adds 'T.' prefix to column names in pColumnList. + * @example + * begin + * DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE( + * pSchemaName => 'CT_MRDS', + * pTableName => 'MY_TABLE', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'DATA', + * pFolderName => 'exports', + * pFileName => 'my_export.csv', + * pColumnList => 'COLUMN1, COLUMN2, COLUMN3', -- Optional + * pMinDate => DATE '2024-01-01', + * pMaxDate => SYSDATE + * ); + * end; + **/ + PROCEDURE EXPORT_TABLE_DATA_TO_CSV_BY_DATE ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pFileName IN VARCHAR2 DEFAULT NULL, + pColumnList IN VARCHAR2 default NULL, + pMinDate IN DATE default DATE '1900-01-01', + pMaxDate IN DATE default SYSDATE, + pParallelDegree IN NUMBER default 1, + pTemplateTableName IN VARCHAR2 default NULL, + pMaxFileSize IN NUMBER default 104857600, + pRegisterExport IN BOOLEAN default FALSE, + pProcessName IN VARCHAR2 default 'DATA_EXPORTER', + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ) + IS + vTableName VARCHAR2(128); + vSchemaName VARCHAR2(128); + vKeyColumnName VARCHAR2(128); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vFileBaseName VARCHAR2(4000); + vFileExtension VARCHAR2(10); + vProcessedColumnList VARCHAR2(32767); + vBucketUri VARCHAR2(4000); + vCurrentCol VARCHAR2(128); + vPartitions partition_tab; + vSourceFileReceivedKey NUMBER; + vFileName VARCHAR2(1000); + vFileUri VARCHAR2(4000); + -- Variables for A_SOURCE_FILE_CONFIG lookup + vSourceKey VARCHAR2(100); + vTableId VARCHAR2(200); + vConfigKey NUMBER := -1; + vSlashPos1 NUMBER; + vSlashPos2 NUMBER; + + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pSchemaName => '''||nvl(pSchemaName, 'NULL')||'''' + ,'pTableName => '''||nvl(pTableName, 'NULL')||'''' + ,'pKeyColumnName => '''||nvl(pKeyColumnName, 'NULL')||'''' + ,'pBucketArea => '''||nvl(pBucketArea, 'NULL')||'''' + ,'pFolderName => '''||nvl(pFolderName, 'NULL')||'''' + ,'pFileName => '''||nvl(pFileName, 'NULL')||'''' + ,'pColumnList => '''||nvl(pColumnList, 'NULL')||'''' + ,'pMinDate => '''||nvl(TO_CHAR(pMinDate, 'YYYY-MM-DD HH24:MI:SS'), 'NULL')||'''' + ,'pMaxDate => '''||nvl(TO_CHAR(pMaxDate, 'YYYY-MM-DD HH24:MI:SS'), 'NULL')||'''' + ,'pParallelDegree => '''||nvl(TO_CHAR(pParallelDegree), 'NULL')||'''' + ,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pMaxFileSize => '''||nvl(TO_CHAR(pMaxFileSize), 'NULL')||'''' + ,'pRegisterExport => '''||CASE WHEN pRegisterExport THEN 'TRUE' ELSE 'FALSE' END||'''' + ,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||'''' + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + -- Get bucket URI based on bucket area using FILE_MANAGER function + vBucketUri := FILE_MANAGER.GET_BUCKET_URI(pBucketArea); + + -- Convert table and column names to uppercase to match data dictionary + vTableName := UPPER(pTableName); + vSchemaName := UPPER(pSchemaName); + vKeyColumnName := UPPER(pKeyColumnName); + + -- Extract base filename and extension or construct default filename + IF pFileName IS NOT NULL THEN + -- Use provided filename + IF INSTR(pFileName, '.') > 0 THEN + vFileBaseName := SUBSTR(pFileName, 1, INSTR(pFileName, '.', -1) - 1); + vFileExtension := SUBSTR(pFileName, INSTR(pFileName, '.', -1)); + ELSE + vFileBaseName := pFileName; + vFileExtension := '.csv'; + END IF; + ELSE + -- Construct default filename: TABLENAME (without extension, will be added by worker) + vFileBaseName := UPPER(pTableName); + vFileExtension := '.csv'; + END IF; + + -- Validate table, key column, and column list using shared procedure + VALIDATE_TABLE_AND_COLUMNS(vSchemaName, vTableName, vKeyColumnName, pColumnList, vParameters); + + -- Build query with TO_CHAR for date columns (per-column format support) + vProcessedColumnList := buildQueryWithDateFormats(pColumnList, vTableName, vSchemaName, vKeyColumnName, pTemplateTableName); + + ENV_MANAGER.LOG_PROCESS_EVENT('Input column list: ' || NVL(pColumnList, 'NULL (using dynamic column list)'), 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Processed column list with TO_CHAR for date columns: ' || vProcessedColumnList, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Template table: ' || NVL(pTemplateTableName, 'NULL - using global default for all dates'), 'INFO', vParameters); + + vTableName := DBMS_ASSERT.SCHEMA_NAME(vSchemaName) || '.' || DBMS_ASSERT.simple_sql_name(vTableName); + + -- Validate parallel degree parameter + IF pParallelDegree < 1 OR pParallelDegree > 16 THEN + vgMsgTmp := ENV_MANAGER.MSG_INVALID_PARALLEL_DEGREE || ': ' || pParallelDegree || '. Valid range: 1-16'; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_INVALID_PARALLEL_DEGREE, vgMsgTmp); + END IF; + + -- Get partitions using shared function + vPartitions := GET_PARTITIONS(vSchemaName, vTableName, vKeyColumnName, pMinDate, pMaxDate, vParameters); + + ENV_MANAGER.LOG_PROCESS_EVENT('Found ' || vPartitions.COUNT || ' year/month combinations to export', 'INFO', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Date range: ' || TO_CHAR(pMinDate, 'YYYY-MM-DD HH24:MI:SS') || ' to ' || TO_CHAR(pMaxDate, 'YYYY-MM-DD HH24:MI:SS'), 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Parallel degree: ' || pParallelDegree, 'INFO', vParameters); + + -- Sequential processing (parallel degree = 1) + IF pParallelDegree = 1 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Using sequential processing (pParallelDegree = 1)', 'DEBUG', vParameters); + + FOR i IN 1 .. vPartitions.COUNT LOOP + EXPORT_SINGLE_PARTITION( + pSchemaName => vSchemaName, + pTableName => vTableName, + pKeyColumnName => vKeyColumnName, + pYear => vPartitions(i).year, + pMonth => vPartitions(i).month, + pBucketUri => vBucketUri, + pFolderName => pFolderName, + pProcessedColumns => vProcessedColumnList, + pMinDate => pMinDate, + pMaxDate => pMaxDate, + pCredentialName => pCredentialName, + pFormat => 'CSV', + pFileBaseName => vFileBaseName, + pMaxFileSize => pMaxFileSize, + pParameters => vParameters + ); + END LOOP; + + -- Parallel processing (parallel degree > 1) + ELSE + -- Skip parallel processing if no partitions found + IF vPartitions.COUNT = 0 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('No partitions to export - skipping parallel CSV processing', 'INFO', vParameters); + ELSE + DECLARE + vTaskName VARCHAR2(128) := 'DATA_CSV_EXPORT_TASK_' || TO_CHAR(SYSTIMESTAMP, 'YYYYMMDDHH24MISSFF'); + vChunkId NUMBER; + BEGIN + ENV_MANAGER.LOG_PROCESS_EVENT('Using parallel processing with ' || pParallelDegree || ' threads', 'INFO', vParameters); + + -- Clean up old completed chunks (>24 hours) to prevent table bloat + -- CRITICAL: Do NOT delete chunks from other active sessions (same-day tasks) + -- This prevents race conditions when multiple CSV exports run simultaneously + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + WHERE STATUS = 'COMPLETED' + AND CREATED_DATE < SYSTIMESTAMP - INTERVAL '1' DAY; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Cleared old COMPLETED chunks (>24h). Active session chunks preserved.', 'DEBUG', vParameters); + + -- Populate chunks table (insert new chunks, preserve FAILED chunks for retry) + FOR i IN 1 .. vPartitions.COUNT LOOP + MERGE INTO CT_MRDS.A_PARALLEL_EXPORT_CHUNKS t + USING (SELECT i AS chunk_id, vPartitions(i).year AS yr, vPartitions(i).month AS mn FROM DUAL) s + ON (t.CHUNK_ID = s.chunk_id) + WHEN NOT MATCHED THEN + INSERT (CHUNK_ID, TASK_NAME, YEAR_VALUE, MONTH_VALUE, SCHEMA_NAME, TABLE_NAME, KEY_COLUMN_NAME, + BUCKET_URI, FOLDER_NAME, PROCESSED_COLUMNS, MIN_DATE, MAX_DATE, + CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE, STATUS) + VALUES (i, vTaskName, vPartitions(i).year, vPartitions(i).month, vSchemaName, vTableName, vKeyColumnName, + vBucketUri, pFolderName, vProcessedColumnList, pMinDate, pMaxDate, + pCredentialName, 'CSV', vFileBaseName, pTemplateTableName, pMaxFileSize, 'PENDING') + WHEN MATCHED THEN + UPDATE SET TASK_NAME = vTaskName, + STATUS = CASE WHEN t.STATUS = 'FAILED' THEN 'PENDING' ELSE t.STATUS END, + ERROR_MESSAGE = CASE WHEN t.STATUS = 'FAILED' THEN NULL ELSE t.ERROR_MESSAGE END; + END LOOP; + COMMIT; + + -- Log chunk statistics + DECLARE + vPendingCount NUMBER; + vFailedCount NUMBER; + BEGIN + SELECT COUNT(*) INTO vPendingCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'PENDING'; + SELECT COUNT(*) INTO vFailedCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'FAILED'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Chunk statistics: PENDING=' || vPendingCount || ', FAILED (retry)=' || vFailedCount, 'INFO', vParameters); + END; + + -- Create parallel task + DBMS_PARALLEL_EXECUTE.CREATE_TASK(task_name => vTaskName); + + -- Define chunks by number range (1 to partition count) + DBMS_PARALLEL_EXECUTE.CREATE_CHUNKS_BY_NUMBER_COL( + task_name => vTaskName, + table_owner => 'CT_MRDS', + table_name => 'A_PARALLEL_EXPORT_CHUNKS', + table_column => 'CHUNK_ID', + chunk_size => 1 -- Each partition is one chunk + ); + + -- Execute task in parallel + ENV_MANAGER.LOG_PROCESS_EVENT('Executing parallel CSV export task: ' || vTaskName, 'DEBUG', vParameters); + + DBMS_PARALLEL_EXECUTE.RUN_TASK( + task_name => vTaskName, + sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;', + language_flag => DBMS_SQL.NATIVE, + parallel_level => pParallelDegree + ); + + -- Check for errors + DECLARE + vErrorCount NUMBER; + BEGIN + SELECT COUNT(*) INTO vErrorCount + FROM USER_PARALLEL_EXECUTE_CHUNKS + WHERE task_name = vTaskName AND status = 'PROCESSED_WITH_ERROR'; + + IF vErrorCount > 0 THEN + vgMsgTmp := 'Parallel CSV export completed with ' || vErrorCount || ' errors. Check USER_PARALLEL_EXECUTE_CHUNKS for details.'; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + END IF; + END; + + -- Clean up task + DBMS_PARALLEL_EXECUTE.DROP_TASK(task_name => vTaskName); + + -- Clean up chunks for THIS specific task only (session-safe) + -- CRITICAL: Use TASK_NAME filter to avoid deleting chunks from other active CSV sessions + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE TASK_NAME = vTaskName; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Parallel CSV execution completed successfully', 'INFO', vParameters); + EXCEPTION + WHEN OTHERS THEN + -- Attempt to drop task on error + BEGIN + DBMS_PARALLEL_EXECUTE.DROP_TASK(task_name => vTaskName); + EXCEPTION + WHEN OTHERS THEN NULL; -- Ignore drop errors + END; + + vgMsgTmp := ENV_MANAGER.MSG_PARALLEL_EXECUTION_FAILED || ': ' || SQLERRM || cgBL || DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + END; + END IF; + END IF; + + -- Note: File registration handled by EXPORT_SINGLE_PARTITION when pRegisterExport=TRUE + -- Each partition calls pRegisterExport logic independently during serial/parallel execution + + -- Register exported files to A_SOURCE_FILE_RECEIVED if requested (after successful export) + IF pRegisterExport THEN + -- Lookup A_SOURCE_FILE_CONFIG_KEY based on pFolderName parsing + -- Format: {BUCKET_AREA}/{SOURCE_KEY}/{TABLE_ID} + -- Example: 'ODS/CSDB/CSDB_DEBT_DAILY' -> SOURCE_KEY='CSDB', TABLE_ID='CSDB_DEBT_DAILY' + + -- Parse pFolderName to extract SOURCE_KEY and TABLE_ID + vSlashPos1 := INSTR(pFolderName, '/', 1, 1); -- First '/' position + vSlashPos2 := INSTR(pFolderName, '/', 1, 2); -- Second '/' position + + IF vSlashPos1 > 0 AND vSlashPos2 > 0 THEN + -- Extract segment 2 (SOURCE_KEY) and segment 3 (TABLE_ID) + vSourceKey := SUBSTR(pFolderName, vSlashPos1 + 1, vSlashPos2 - vSlashPos1 - 1); + vTableId := SUBSTR(pFolderName, vSlashPos2 + 1); + + -- Find configuration based on SOURCE_KEY and TABLE_ID + BEGIN + SELECT A_SOURCE_FILE_CONFIG_KEY + INTO vConfigKey + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE A_SOURCE_KEY = vSourceKey + AND TABLE_ID = vTableId + AND SOURCE_FILE_TYPE = 'INPUT' + AND ROWNUM = 1; + + ENV_MANAGER.LOG_PROCESS_EVENT('Found config key: ' || vConfigKey || ' for SOURCE=' || vSourceKey || ', TABLE=' || vTableId, 'DEBUG', vParameters); + EXCEPTION + WHEN NO_DATA_FOUND THEN + vConfigKey := -1; + ENV_MANAGER.LOG_PROCESS_EVENT('No config found for SOURCE=' || vSourceKey || ', TABLE=' || vTableId || ' - using default (-1)', 'INFO', vParameters); + END; + ELSE + -- Cannot parse folder name - use default + vConfigKey := -1; + ENV_MANAGER.LOG_PROCESS_EVENT('Cannot parse pFolderName: ' || pFolderName || ' - using default (-1)', 'WARNING', vParameters); + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('Registering ' || vPartitions.COUNT || ' exported files to A_SOURCE_FILE_RECEIVED with config key: ' || vConfigKey, 'INFO', vParameters); + + FOR i IN 1 .. vPartitions.COUNT LOOP + -- Construct filename and URI for this partition + vFileName := NVL(vFileBaseName, UPPER(REPLACE(vTableName, vSchemaName || '.', ''))) || '_' || vPartitions(i).year || vPartitions(i).month || '.csv'; + vFileUri := vBucketUri || CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || sanitizeFilename(vFileName); + + -- Get file metadata from OCI bucket (CHECKSUM, CREATED, BYTES) with retry logic + DECLARE + vChecksum VARCHAR2(128); + vCreated TIMESTAMP WITH TIME ZONE; + vBytes NUMBER; + vActualFileName VARCHAR2(1000); -- Actual filename with Oracle suffix + vSanitizedFileName VARCHAR2(1000); + vRetryCount NUMBER := 0; + vMaxRetries NUMBER := 1; -- One retry after initial attempt + vRetryDelay NUMBER := 2; -- 2 seconds delay + BEGIN + -- Sanitize filename first (PL/SQL function cannot be used directly in SQL) + vSanitizedFileName := sanitizeFilename(vFileName); + + -- Remove .csv extension for LIKE pattern matching (Oracle adds suffixes BEFORE .csv) + -- Example: LEGACY_DEBT_202508.csv becomes LEGACY_DEBT_202508_1_20260211T102621591769Z.csv + vSanitizedFileName := REGEXP_REPLACE(vSanitizedFileName, '\.csv$', '', 1, 0, 'i'); + + -- Try to get file metadata with retry logic + <> + LOOP + BEGIN + SELECT object_name, checksum, created, bytes + INTO vActualFileName, vChecksum, vCreated, vBytes + FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( + credential_name => pCredentialName, + location_uri => vBucketUri + )) + WHERE object_name LIKE CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || vSanitizedFileName || '%' + ORDER BY created DESC, bytes DESC + FETCH FIRST 1 ROW ONLY; + + -- Extract filename only from full path (remove bucket folder prefix) + -- vActualFileName contains: 'ODS/CSDB/CSDB_DEBT/LEGACY_DEBT_202508_1_20260211T111341375171Z.csv' + -- Extract only: 'LEGACY_DEBT_202508_1_20260211T111341375171Z.csv' + vActualFileName := SUBSTR(vActualFileName, INSTR(vActualFileName, '/', -1) + 1); + + -- Success - exit retry loop + EXIT metadata_retry_loop; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + vRetryCount := vRetryCount + 1; + + IF vRetryCount <= vMaxRetries THEN + -- Log retry attempt + ENV_MANAGER.LOG_PROCESS_EVENT('File not found in bucket (attempt ' || vRetryCount || '/' || (vMaxRetries + 1) || '), retrying after ' || vRetryDelay || ' seconds: ' || vFileName, 'DEBUG', vParameters); + + -- Wait before retry using DBMS_SESSION.SLEEP (alternative to DBMS_LOCK) + DBMS_SESSION.SLEEP(vRetryDelay); + ELSE + -- Max retries exceeded - re-raise exception + RAISE; + END IF; + END; + END LOOP metadata_retry_loop; + + -- Create A_SOURCE_FILE_RECEIVED record for this export with metadata + vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; + INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( + A_SOURCE_FILE_RECEIVED_KEY, + A_SOURCE_FILE_CONFIG_KEY, + SOURCE_FILE_NAME, + CHECKSUM, + CREATED, + BYTES, + RECEPTION_DATE, + PROCESSING_STATUS, + PARTITION_YEAR, + PARTITION_MONTH, + ARCH_PATH, + PROCESS_NAME + ) VALUES ( + vSourceFileReceivedKey, + vConfigKey, -- Config key from A_SOURCE_FILE_CONFIG lookup + vActualFileName, -- Use actual filename with Oracle suffix + vChecksum, + vCreated, + vBytes, + SYSDATE, + 'INGESTED', + NULL, -- PARTITION_YEAR not used for CSV exports + NULL, -- PARTITION_MONTH not used for CSV exports + NULL, -- ARCH_PATH not used for CSV exports + pProcessName -- Process name from parameter + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('Registered file: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vActualFileName || ', Size=' || vBytes || ' bytes', 'DEBUG', vParameters); + EXCEPTION + WHEN NO_DATA_FOUND THEN + -- File not found after retries - log warning and continue without metadata + ENV_MANAGER.LOG_PROCESS_EVENT('WARNING: File not found in bucket after ' || (vMaxRetries + 1) || ' attempts: ' || vFileName, 'WARNING', vParameters); + + -- Sanitize filename for fallback INSERT (function cannot be used in SQL) + vSanitizedFileName := sanitizeFilename(vFileName); + + -- Insert without metadata + vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; + INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( + A_SOURCE_FILE_RECEIVED_KEY, + A_SOURCE_FILE_CONFIG_KEY, + SOURCE_FILE_NAME, + RECEPTION_DATE, + PROCESSING_STATUS, + PARTITION_YEAR, + PARTITION_MONTH, + ARCH_PATH, + PROCESS_NAME + ) VALUES ( + vSourceFileReceivedKey, + vConfigKey, -- Config key from A_SOURCE_FILE_CONFIG lookup + vSanitizedFileName, -- Fallback: use theoretical filename if actual not found + SYSDATE, + 'INGESTED', + NULL, -- PARTITION_YEAR not used for CSV exports + NULL, -- PARTITION_MONTH not used for CSV exports + NULL, -- ARCH_PATH not used for CSV exports + pProcessName -- Process name from parameter + ); + END; + END LOOP; + + COMMIT; + ENV_MANAGER.LOG_PROCESS_EVENT('Successfully registered all ' || vPartitions.COUNT || ' files', 'INFO', vParameters); + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('Export completed successfully for ' || vPartitions.COUNT || ' files', 'INFO', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + + EXCEPTION + WHEN ENV_MANAGER.ERR_TABLE_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_TABLE_NOT_EXISTS ||': '||vTableName; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_COLUMN_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_COLUMN_NOT_EXISTS || ' (TableName.ColumnName): ' || vTableName||'.'||vKeyColumnName||CASE WHEN vCurrentCol IS NOT NULL THEN '.'||vCurrentCol||' in pColumnList' ELSE '' END; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_INVALID_PARALLEL_DEGREE THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_INVALID_PARALLEL_DEGREE, vgMsgTmp); + WHEN ENV_MANAGER.ERR_PARALLEL_EXECUTION_FAILED THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + WHEN OTHERS THEN + -- Log complete error details including full stack trace and backtrace + ENV_MANAGER.LOG_PROCESS_ERROR('Export failed: ' || SQLERRM, vParameters, 'DATA_EXPORTER'); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END EXPORT_TABLE_DATA_TO_CSV_BY_DATE; + + ---------------------------------------------------------------------------------------------------- + -- VERSION MANAGEMENT FUNCTIONS + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_VERSION RETURN VARCHAR2 IS + BEGIN + RETURN PACKAGE_VERSION; + END GET_VERSION; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_BUILD_INFO RETURN VARCHAR2 IS + BEGIN + RETURN ENV_MANAGER.GET_PACKAGE_VERSION_INFO( + pPackageName => 'DATA_EXPORTER', + pVersion => PACKAGE_VERSION, + pBuildDate => PACKAGE_BUILD_DATE, + pAuthor => PACKAGE_AUTHOR + ); + END GET_BUILD_INFO; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2 IS + BEGIN + RETURN ENV_MANAGER.FORMAT_VERSION_HISTORY( + pPackageName => 'DATA_EXPORTER', + pVersionHistory => VERSION_HISTORY + ); + END GET_VERSION_HISTORY; + + ---------------------------------------------------------------------------------------------------- + +END; + +/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/DATA_EXPORTER.pkg b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/DATA_EXPORTER.pkg new file mode 100644 index 0000000..6b6a4b8 --- /dev/null +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/DATA_EXPORTER.pkg @@ -0,0 +1,239 @@ +create or replace PACKAGE CT_MRDS.DATA_EXPORTER +AUTHID CURRENT_USER +AS + /** + * Data Export Package: Provides comprehensive data export capabilities to various formats (CSV, Parquet) + * with support for cloud storage integration via Oracle Cloud Infrastructure (OCI). + * The structure of comment is used by GET_PACKAGE_DOCUMENTATION function + * which returns documentation text for confluence page (to Copy-Paste it). + **/ + + -- Package Version Information + PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.9.0'; + PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2026-02-13 14:00:00'; + PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski'; + + -- Version History (last 3-5 changes) + VERSION_HISTORY CONSTANT VARCHAR2(4000) := + 'v2.9.0 (2026-02-13): Added pProcessName parameter to EXPORT_TABLE_DATA and EXPORT_TABLE_DATA_TO_CSV_BY_DATE procedures for process tracking in A_SOURCE_FILE_RECEIVED table.' || CHR(10) || + 'v2.8.1 (2026-02-12): FIX query in EXPORT_TABLE_DATA - removed A_LOAD_HISTORY join to ensure single file output (simple SELECT).' || CHR(10) || + 'v2.8.0 (2026-02-12): MAJOR REFACTOR - EXPORT_TABLE_DATA now exports to single CSV file instead of partitioning by key values. Added pFileName parameter.' || CHR(10) || + 'v2.7.5 (2026-02-11): Added pRegisterExport parameter to EXPORT_TABLE_DATA procedure. When TRUE, registers each exported CSV file in A_SOURCE_FILE_RECEIVED.' || CHR(10) || + 'v2.7.4 (2026-02-11): ACTUAL FILENAME STORAGE - Store real filename with Oracle suffix in SOURCE_FILE_NAME instead of theoretical filename.' || CHR(10); + + cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10); + vgMsgTmp VARCHAR2(32000); + + --------------------------------------------------------------------------------------------------------------------------- + -- TYPE DEFINITIONS FOR PARTITION HANDLING + --------------------------------------------------------------------------------------------------------------------------- + + /** + * Record type for year/month partition information + **/ + TYPE partition_rec IS RECORD ( + year VARCHAR2(4), + month VARCHAR2(2) + ); + + /** + * Table type for collection of partition records + **/ + TYPE partition_tab IS TABLE OF partition_rec; + + --------------------------------------------------------------------------------------------------------------------------- + -- INTERNAL PARALLEL PROCESSING CALLBACK + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name EXPORT_PARTITION_PARALLEL + * @desc Internal callback procedure for DBMS_PARALLEL_EXECUTE. + * Processes single partition (year/month) chunk in parallel task. + * Called by DBMS_PARALLEL_EXECUTE framework for each chunk. + * This procedure is PUBLIC because DBMS_PARALLEL_EXECUTE requires it, + * but should NOT be called directly by external code. + * @param pStartId - Chunk start ID (CHUNK_ID from A_PARALLEL_EXPORT_CHUNKS table) + * @param pEndId - Chunk end ID (same as pStartId for single-row chunks) + **/ + PROCEDURE EXPORT_PARTITION_PARALLEL ( + pStartId IN NUMBER, + pEndId IN NUMBER + ); + + --------------------------------------------------------------------------------------------------------------------------- + -- MAIN EXPORT PROCEDURES + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name EXPORT_TABLE_DATA + * @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA. + * Exports data into single CSV file on OCI infrastructure. + * pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE' + * Supports template table for column order and per-column date formatting. + * When pRegisterExport=TRUE, successfully exported file is registered in: + * - CT_MRDS.A_SOURCE_FILE_RECEIVED (tracks file location, size, checksum, and metadata) + * @param pFileName - Optional filename (e.g., 'export.csv'). NULL = auto-generate from table name + * @param pTemplateTableName - Optional template table (SCHEMA.TABLE or TABLE) for: + * - Column order control (template defines CSV structure) + * - Per-column date formatting via FILE_MANAGER.GET_DATE_FORMAT + * - NULL = use source table columns in natural order + * @param pMaxFileSize - Maximum file size in bytes (default 104857600 = 100MB, min 10MB, max 1GB) + * @param pRegisterExport - When TRUE, registers exported CSV file in A_SOURCE_FILE_RECEIVED table + * @param pProcessName - Process name stored in PROCESS_NAME column (default 'DATA_EXPORTER') + * @example + * begin + * DATA_EXPORTER.EXPORT_TABLE_DATA( + * pSchemaName => 'CT_MRDS', + * pTableName => 'MY_TABLE', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'DATA', + * pFolderName => 'csv_exports', + * pFileName => 'my_export.csv', -- Optional + * pTemplateTableName => 'CT_ET_TEMPLATES.MY_TEMPLATE', -- Optional + * pMaxFileSize => 104857600, -- Optional, default 100MB + * pRegisterExport => TRUE -- Optional, default FALSE + * ); + * end; + **/ + PROCEDURE EXPORT_TABLE_DATA ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pFileName IN VARCHAR2 default NULL, + pTemplateTableName IN VARCHAR2 default NULL, + pMaxFileSize IN NUMBER default 104857600, + pRegisterExport IN BOOLEAN default FALSE, + pProcessName IN VARCHAR2 default 'DATA_EXPORTER', + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ); + + + + /** + * @name EXPORT_TABLE_DATA_BY_DATE + * @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA. + * Exports data into PARQUET files on OCI infrustructure. + * Each YEAR_MONTH pair goes to seperate file (implicit partitioning). + * Allows specifying custom column list or uses T.* if pColumnList is NULL. + * Validates that all columns in pColumnList exist in the target table. + * Automatically adds 'T.' prefix to column names in pColumnList. + * Supports parallel partition processing via pParallelDegree parameter (default 1, range 1-16). + * pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE' + * @example + * begin + * DATA_EXPORTER.EXPORT_TABLE_DATA_BY_DATE( + * pSchemaName => 'CT_MRDS', + * pTableName => 'MY_TABLE', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'DATA', + * pFolderName => 'parquet_exports', + * pColumnList => 'COLUMN1, COLUMN2, COLUMN3', -- Optional + * pMinDate => DATE '2024-01-01', + * pMaxDate => SYSDATE, + * pParallelDegree => 8 -- Optional, default 1, range 1-16 + * ); + * end; + **/ + PROCEDURE EXPORT_TABLE_DATA_BY_DATE ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pColumnList IN VARCHAR2 default NULL, + pMinDate IN DATE default DATE '1900-01-01', + pMaxDate IN DATE default SYSDATE, + pParallelDegree IN NUMBER default 1, + pTemplateTableName IN VARCHAR2 default NULL, + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ); + + + + /** + * @name EXPORT_TABLE_DATA_TO_CSV_BY_DATE + * @desc Exports data to separate CSV files partitioned by year and month. + * Creates one CSV file for each year/month combination found in the data. + * Uses the same date filtering mechanism with CT_ODS.A_LOAD_HISTORY as EXPORT_TABLE_DATA_BY_DATE, + * but exports to CSV format instead of Parquet. + * Supports parallel partition processing via pParallelDegree parameter (1-16). + * File naming pattern: {pFileName}_YYYYMM.csv or {TABLENAME}_YYYYMM.csv (if pFileName is NULL) + * When pRegisterExport=TRUE, successfully exported files are registered in: + * - CT_MRDS.A_SOURCE_FILE_RECEIVED (tracks file location, size, checksum, and metadata) + * @param pProcessName - Process name stored in PROCESS_NAME column (default 'DATA_EXPORTER') + * @example + * begin + * -- With custom filename + * DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE( + * pSchemaName => 'CT_MRDS', + * pTableName => 'MY_TABLE', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'DATA', + * pFolderName => 'exports', + * pFileName => 'my_export.csv', + * pMinDate => DATE '2024-01-01', + * pMaxDate => SYSDATE, + * pParallelDegree => 8, -- Optional, default 1, range 1-16 + * pRegisterExport => TRUE -- Optional, default FALSE, registers to A_SOURCE_FILE_RECEIVED + * ); + * + * -- With auto-generated filename (based on table name only) + * DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE( + * pSchemaName => 'OU_TOP', + * pTableName => 'AGGREGATED_ALLOTMENT', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'ARCHIVE', + * pFolderName => 'exports', + * pMinDate => DATE '2025-09-01', + * pMaxDate => DATE '2025-09-17', + * pRegisterExport => TRUE -- Registers each export to A_SOURCE_FILE_RECEIVED table + * ); + * -- This will create files like: AGGREGATED_ALLOTMENT_202509.csv, etc. + * pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE' + * end; + **/ + PROCEDURE EXPORT_TABLE_DATA_TO_CSV_BY_DATE ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pFileName IN VARCHAR2 DEFAULT NULL, + pColumnList IN VARCHAR2 default NULL, + pMinDate IN DATE default DATE '1900-01-01', + pMaxDate IN DATE default SYSDATE, + pParallelDegree IN NUMBER default 1, + pTemplateTableName IN VARCHAR2 default NULL, + pMaxFileSize IN NUMBER default 104857600, + pRegisterExport IN BOOLEAN default FALSE, + pProcessName IN VARCHAR2 default 'DATA_EXPORTER', + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ); + + --------------------------------------------------------------------------------------------------------------------------- + -- VERSION MANAGEMENT FUNCTIONS + --------------------------------------------------------------------------------------------------------------------------- + + /** + * Returns the current package version number + * return: Version string in format X.Y.Z (e.g., '2.1.0') + **/ + FUNCTION GET_VERSION RETURN VARCHAR2; + + /** + * Returns comprehensive build information including version, date, and author + * return: Formatted string with complete build details + **/ + FUNCTION GET_BUILD_INFO RETURN VARCHAR2; + + /** + * Returns the version history with recent changes + * return: Multi-line string with version history + **/ + FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2; + +END; + +/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/ENV_MANAGER.pkb b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/ENV_MANAGER.pkb new file mode 100644 index 0000000..856d449 --- /dev/null +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/ENV_MANAGER.pkb @@ -0,0 +1,1171 @@ +create or replace PACKAGE BODY CT_MRDS.ENV_MANAGER +AS + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE INIT_ERRORS IS + BEGIN + Errors(CODE_EMPTY_FILEURI_AND_RECKEY) := Error_Record(CODE_EMPTY_FILEURI_AND_RECKEY, MSG_EMPTY_FILEURI_AND_RECKEY); -- -20001 + Errors(CODE_NO_CONFIG_MATCH_FOR_FILEURI) := Error_Record(CODE_NO_CONFIG_MATCH_FOR_FILEURI, MSG_NO_CONFIG_MATCH_FOR_FILEURI); -- -20002 + Errors(CODE_MULTIPLE_MATCH_FOR_SRCFILE) := Error_Record(CODE_MULTIPLE_MATCH_FOR_SRCFILE, MSG_MULTIPLE_MATCH_FOR_SRCFILE); -- -20003 + Errors(CODE_MISSING_COLUMN_DATE_FORMAT) := Error_Record(CODE_MISSING_COLUMN_DATE_FORMAT, MSG_MISSING_COLUMN_DATE_FORMAT); -- -20004 + Errors(CODE_MULTIPLE_COLUMN_DATE_FORMAT) := Error_Record(CODE_MULTIPLE_COLUMN_DATE_FORMAT, MSG_MULTIPLE_COLUMN_DATE_FORMAT); -- -20005 + Errors(CODE_DIDNT_GET_LOAD_OPERATION_ID) := Error_Record(CODE_DIDNT_GET_LOAD_OPERATION_ID, MSG_DIDNT_GET_LOAD_OPERATION_ID); -- -20006 + Errors(CODE_NO_CONFIG_FOR_RECEIVED_FILE) := Error_Record(CODE_NO_CONFIG_FOR_RECEIVED_FILE, MSG_NO_CONFIG_FOR_RECEIVED_FILE); -- -20007 + Errors(CODE_MULTI_CONFIG_FOR_RECEIVED_FILE) := Error_Record(CODE_MULTI_CONFIG_FOR_RECEIVED_FILE, MSG_MULTI_CONFIG_FOR_RECEIVED_FILE); -- -20008 + Errors(CODE_FILE_NOT_FOUND_ON_CLOUD) := Error_Record(CODE_FILE_NOT_FOUND_ON_CLOUD, MSG_FILE_NOT_FOUND_ON_CLOUD); -- -20009 + Errors(CODE_FILE_VALIDATION_FAILED) := Error_Record(CODE_FILE_VALIDATION_FAILED, MSG_FILE_VALIDATION_FAILED); -- -20010 + Errors(CODE_EXCESS_COLUMNS_DETECTED) := Error_Record(CODE_EXCESS_COLUMNS_DETECTED, MSG_EXCESS_COLUMNS_DETECTED); -- -20011 + Errors(CODE_NO_CONFIG_MATCH) := Error_Record(CODE_NO_CONFIG_MATCH, MSG_NO_CONFIG_MATCH); -- -20012 + Errors(CODE_UNKNOWN_PREFIX) := Error_Record(CODE_UNKNOWN_PREFIX, MSG_UNKNOWN_PREFIX); -- -20013 + Errors(CODE_TABLE_NOT_EXISTS) := Error_Record(CODE_TABLE_NOT_EXISTS, MSG_TABLE_NOT_EXISTS); -- -20014 + Errors(CODE_COLUMN_NOT_EXISTS) := Error_Record(CODE_COLUMN_NOT_EXISTS, MSG_COLUMN_NOT_EXISTS); -- -20015 + Errors(CODE_UNSUPPORTED_DATA_TYPE) := Error_Record(CODE_UNSUPPORTED_DATA_TYPE, MSG_UNSUPPORTED_DATA_TYPE); -- -20016 + Errors(CODE_MISSING_SOURCE_KEY) := Error_Record(CODE_MISSING_SOURCE_KEY, MSG_MISSING_SOURCE_KEY); -- -20017 + Errors(CODE_NULL_SOURCE_FILE_CONFIG_KEY) := Error_Record(CODE_NULL_SOURCE_FILE_CONFIG_KEY, MSG_NULL_SOURCE_FILE_CONFIG_KEY); -- -20018 + Errors(CODE_DUPLICATED_SOURCE_KEY) := Error_Record(CODE_DUPLICATED_SOURCE_KEY, MSG_DUPLICATED_SOURCE_KEY); -- -20019 + Errors(CODE_MISSING_CONTAINER_CONFIG) := Error_Record(CODE_MISSING_CONTAINER_CONFIG, MSG_MISSING_CONTAINER_CONFIG); -- -20020 + Errors(CODE_MULTIPLE_CONTAINER_ENTRIES) := Error_Record(CODE_MULTIPLE_CONTAINER_ENTRIES, MSG_MULTIPLE_CONTAINER_ENTRIES); -- -20021 + Errors(CODE_WRONG_DESTINATION_PARAM) := Error_Record(CODE_WRONG_DESTINATION_PARAM, MSG_WRONG_DESTINATION_PARAM); -- -20022 + Errors(CODE_FILE_NOT_EXISTS_ON_CLOUD) := Error_Record(CODE_FILE_NOT_EXISTS_ON_CLOUD, MSG_FILE_NOT_EXISTS_ON_CLOUD); -- -20023 + Errors(CODE_FILE_ALREADY_REGISTERED) := Error_Record(CODE_FILE_ALREADY_REGISTERED, MSG_FILE_ALREADY_REGISTERED); -- -20024 + Errors(CODE_WRONG_DATE_TIMESTAMP_FORMAT) := Error_Record(CODE_WRONG_DATE_TIMESTAMP_FORMAT, MSG_WRONG_DATE_TIMESTAMP_FORMAT); -- -20025 + Errors(CODE_ENVIRONMENT_NOT_SET) := Error_Record(CODE_ENVIRONMENT_NOT_SET, MSG_ENVIRONMENT_NOT_SET); -- -20026 + Errors(CODE_CONFIG_VARIABLE_NOT_SET) := Error_Record(CODE_CONFIG_VARIABLE_NOT_SET, MSG_CONFIG_VARIABLE_NOT_SET); -- -20027 + Errors(CODE_NOT_INPUT_SOURCE_FILE_TYPE) := Error_Record(CODE_NOT_INPUT_SOURCE_FILE_TYPE, MSG_NOT_INPUT_SOURCE_FILE_TYPE); -- -20028 + Errors(CODE_EXP_DATA_FOR_ARCH_FAILED) := Error_Record(CODE_EXP_DATA_FOR_ARCH_FAILED, MSG_EXP_DATA_FOR_ARCH_FAILED); -- -20029 + Errors(CODE_RESTORE_FILE_FROM_TRASH) := Error_Record(CODE_RESTORE_FILE_FROM_TRASH, MSG_RESTORE_FILE_FROM_TRASH); -- -20030 + Errors(CODE_CHANGE_STAT_TO_ARCHIVED_FAILED):= Error_Record(CODE_CHANGE_STAT_TO_ARCHIVED_FAILED, MSG_CHANGE_STAT_TO_ARCHIVED_FAILED); -- -20031 + Errors(CODE_MOVE_FILE_TO_TRASH_FAILED) := Error_Record(CODE_MOVE_FILE_TO_TRASH_FAILED, MSG_MOVE_FILE_TO_TRASH_FAILED); -- -20032 + Errors(CODE_DROP_EXPORTED_FILES_FAILED) := Error_Record(CODE_DROP_EXPORTED_FILES_FAILED, MSG_DROP_EXPORTED_FILES_FAILED); -- -20033 + Errors(CODE_INVALID_BUCKET_AREA) := Error_Record(CODE_INVALID_BUCKET_AREA, MSG_INVALID_BUCKET_AREA); -- -20034 + Errors(CODE_INVALID_PARALLEL_DEGREE) := Error_Record(CODE_INVALID_PARALLEL_DEGREE, MSG_INVALID_PARALLEL_DEGREE); -- -20110 + Errors(CODE_PARALLEL_EXECUTION_FAILED) := Error_Record(CODE_PARALLEL_EXECUTION_FAILED, MSG_PARALLEL_EXECUTION_FAILED); -- -20111 + + Errors(CODE_UNKNOWN) := Error_Record(CODE_UNKNOWN, MSG_UNKNOWN); -- -20999 + + END INIT_ERRORS; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_DEFAULT_ENV + RETURN VARCHAR2 + IS + vDefaultEnv CT_MRDS.a_file_manager_config.config_variable_value%TYPE; + BEGIN + select config_variable_value + into vDefaultEnv + from CT_MRDS.a_file_manager_config + where lower(environment_id)='default' + and lower(config_variable)='environmentid'; + RETURN vDefaultEnv; + EXCEPTION + WHEN NO_DATA_FOUND THEN + RETURN NULL; + END; + + ---------------------------------------------------------------------------------------------------- + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE INIT_VARIABLES( + pEnv VARCHAR2 + ) IS + BEGIN + for rec in ( + select + ENVIRONMENT_ID + ,REGION + ,NAMESPACE + ,INBOXBUCKETNAME + ,DATABUCKETNAME + ,ARCHIVEBUCKETNAME + ,CREDENTIALNAME + ,LOGGINGENABLED + ,MINLOGLEVEL + ,DEFAULTDATEFORMAT + ,CONSOLELOGGINGENABLED + from ( + select environment_id, config_variable, config_variable_value from CT_MRDS.A_FILE_MANAGER_CONFIG + where environment_id=pEnv + ) + pivot ( + min(config_variable_value) + for config_variable in ( + 'Region' as Region + ,'NameSpace' as NameSpace + ,'InboxBucketName' as InboxBucketName + ,'DataBucketName' as DataBucketName + ,'ArchiveBucketName' as ArchiveBucketName + ,'CredentialName' as CredentialName + ,'LoggingEnabled' as LoggingEnabled + ,'MinLogLevel' as MinLogLevel + ,'DefaultDateFormat' as DefaultDateFormat + ,'ConsoleLoggingEnabled' as ConsoleLoggingEnabled) + ) + ) loop + if (rec.NAMESPACE is NULL + or rec.REGION is NULL + or rec.NAMESPACE is NULL + or rec.INBOXBUCKETNAME is NULL + or rec.DATABUCKETNAME is NULL + or rec.ARCHIVEBUCKETNAME is NULL + or rec.CREDENTIALNAME is NULL + ) THEN + vgMsgTmp := MSG_CONFIG_VARIABLE_NOT_SET + ||cgBL||' '||'Details about existing Configuration Variables where environment_id='||pEnv||': ' + ||cgBL||' '||'-------------------------' + ||cgBL||' '||'Region = '||rec.Region + ||cgBL||' '||'NameSpace = '||rec.NameSpace + ||cgBL||' '||'InboxBucketName = '||rec.InboxBucketName + ||cgBL||' '||'DataBucketName = '||rec.DataBucketName + ||cgBL||' '||'ArchiveBucketName = '||rec.ArchiveBucketName + ||cgBL||' '||'CredentialName = '||rec.CredentialName + ; + LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR'); + RAISE_APPLICATION_ERROR(CODE_CONFIG_VARIABLE_NOT_SET, vgMsgTmp); + + elsif (rec.LOGGINGENABLED is NULL + or rec.MINLOGLEVEL is NULL + or rec.DEFAULTDATEFORMAT is NULL + ) THEN + vgMsgTmp := 'Missing configuration variables' + ||cgBL||' '||'Details about existing Configuration Variables where environment_id='||pEnv||': ' + ||cgBL||' '||'-------------------------' + ||cgBL||' '||'LoggingEnabled = '||rec.LoggingEnabled + ||cgBL||' '||'MinLogLevel = '||rec.MinLogLevel + ||cgBL||' '||'DefaultDateFormat = '||rec.DefaultDateFormat + ; + LOG_PROCESS_EVENT(vgMsgTmp, 'WARNING'); + + else + gvNameSpace := rec.NAMESPACE; + gvRegion := rec.REGION; + gvInboxBucketName := rec.INBOXBUCKETNAME; + gvDataBucketName := rec.DATABUCKETNAME; + gvArchiveBucketName := rec.ARCHIVEBUCKETNAME; + gvCredentialName := rec.CREDENTIALNAME; + gvInboxBucketUri := 'https://objectstorage.'||rec.REGION||'.oraclecloud.com/n/'||rec.NAMESPACE||'/b/'||rec.INBOXBUCKETNAME||'/o/'; + gvDataBucketUri := 'https://objectstorage.'||rec.REGION||'.oraclecloud.com/n/'||rec.NAMESPACE||'/b/'||rec.DATABUCKETNAME||'/o/'; + gvArchiveBucketUri := 'https://objectstorage.'||rec.REGION||'.oraclecloud.com/n/'||rec.NAMESPACE||'/b/'||rec.ARCHIVEBUCKETNAME||'/o/'; + gvLoggingEnabled := rec.LOGGINGENABLED; + gvMinLogLevel := rec.MINLOGLEVEL; + gvDefaultDateFormat := rec.DEFAULTDATEFORMAT; + gvConsoleLoggingEnabled := NVL(rec.CONSOLELOGGINGENABLED, 'ON'); + end if; + end loop; + EXCEPTION + WHEN NO_DATA_FOUND THEN + vgMsgTmp := MSG_CONFIG_VARIABLE_NOT_SET + ||cgBL||' '||'No configuration found for environment_id='||pEnv||' in A_FILE_MANAGER_CONFIG table'; + LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', 'pEnv='||pEnv); + RAISE_APPLICATION_ERROR(CODE_CONFIG_VARIABLE_NOT_SET, vgMsgTmp); + WHEN OTHERS THEN + vgMsgTmp := 'Unexpected error while initializing variables for environment: '||pEnv + ||cgBL||' '||'SQLERRM: '||SQLERRM; + LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', 'pEnv='||pEnv); + RAISE; + END INIT_VARIABLES; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_ERROR_MESSAGE( + pCode PLS_INTEGER + ) RETURN VARCHAR2 + IS + BEGIN + RETURN Errors(pCode).message; + EXCEPTION + WHEN NO_DATA_FOUND THEN + LOG_PROCESS_EVENT('No error message found for pCode='||pCode , 'WARNING', 'pCode='||pCode); + LOG_PROCESS_EVENT('Update ENV_MANAGER package header with new code.' , 'WARNING', 'pCode='||pCode); + RETURN NULL; + WHEN OTHERS THEN + LOG_PROCESS_EVENT(MSG_UNKNOWN , 'ERROR', 'pCode='||pCode); + RAISE_APPLICATION_ERROR(CODE_UNKNOWN, MSG_UNKNOWN); + END GET_ERROR_MESSAGE; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_ERROR_STACK( + pFormat VARCHAR2 + ,pCode PLS_INTEGER + ,pSourceFileReceivedKey CT_MRDS.A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY%TYPE DEFAULT NULL + ) RETURN VARCHAR2 + IS + vFullErrorCore VARCHAR2(32000); + vFullErrorMsg VARCHAR2(32000); + BEGIN +-- vgErrorMessage := SQLERRM|| cgBL; +-- vgErrorStack := DBMS_UTILITY.FORMAT_ERROR_STACK; +-- vgErrorBacktrace := DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + vFullErrorCore :='Error Message:' + ||cgBL|| SQLERRM|| cgBL + ||'-------------------------------------------------------' + ||cgBL||'Error Stack:' + ||cgBL|| DBMS_UTILITY.FORMAT_ERROR_STACK + ||'-------------------------------------------------------' + ||cgBL||'Error Backtrace:' + ||cgBL|| DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; +-- vFullErrorCore := REGEXP_REPLACE (vFullErrorCore, pCode||': ', pCode||': '||GET_ERROR_MESSAGE(pCode) , 1, 1); + IF (pFormat = 'TABLE') THEN + vFullErrorMsg := vFullErrorCore; + ELSE + vFullErrorMsg := cgBL||'------------------------------------------------------+' + ||cgBL||vFullErrorCore + ||'------------------------------------------------------+'; + END IF; +-- IF pSourceFileReceivedKey is not null THEN +-- vFullErrorMsg := vFullErrorMsg ||cgBL||GET_DET_SOURCE_FILE_RECEIVED_INFO(pSourceFileReceivedKey,1,1,1); +-- END IF; + + RETURN vFullErrorMsg; + EXCEPTION + WHEN OTHERS THEN + LOG_PROCESS_EVENT(MSG_UNKNOWN , 'ERROR', 'pFormat='||pFormat); + RETURN NULL; + END GET_ERROR_STACK; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION FORMAT_PARAMETERS( + pParameterList SYS.ODCIVARCHAR2LIST + ) RETURN VARCHAR2 IS + vResult VARCHAR2(10000); + BEGIN + FOR i IN 1 .. pParameterList.COUNT LOOP +-- dbms_output.put_line('pParameterList(i): '||pParameterList(i)); + if i < pParameterList.COUNT then vResult := vResult || replace(pParameterList(i), '''NULL''', 'NULL') ||' ,'|| cgBL; + else vResult := vResult || replace(pParameterList(i), '''NULL''', 'NULL'); + end if; + END LOOP; + RETURN vResult; + EXCEPTION + WHEN OTHERS THEN + LOG_PROCESS_EVENT('Error while formating parameters.' , 'WARNING'); + RETURN NULL; + END FORMAT_PARAMETERS; + + ---------------------------------------------------------------------------------------------------- + + + + PROCEDURE LOG_PROCESS_EVENT ( + pLogMessage VARCHAR2 + ,pLogLevel VARCHAR2 DEFAULT 'ERROR' + ,pParameters VARCHAR2 DEFAULT NULL + ,pProcessName VARCHAR2 DEFAULT 'FILE_MANAGER' + ) IS + PRAGMA AUTONOMOUS_TRANSACTION; + + vLoggingEnabled VARCHAR2(10); + vMinLogLevel VARCHAR2(10); + vCallStack VARCHAR2(10000); + vProcedureName VARCHAR2(100); + vProcedureLevel PLS_INTEGER; + vTotalLines PLS_INTEGER; + vCurrentLine PLS_INTEGER; + + -- Map of priority level + TYPE logLevelMap IS TABLE OF NUMBER INDEX BY VARCHAR2(10); + vLogLevels logLevelMap; + + BEGIN + -- Prority logging level (higher -> more important) + vLogLevels('DEBUG') := 1; + vLogLevels('INFO') := 2; + vLogLevels('WARNING') := 3; + vLogLevels('ERROR') := 4; + + -- Check id logging is TURN-OFF + IF gvLoggingEnabled = 'OFF' THEN + RETURN; + END IF; + -- Check logging level + IF vLogLevels(pLogLevel) < vLogLevels(gvMinLogLevel) THEN + RETURN; + END IF; + + vCallStack := DBMS_UTILITY.FORMAT_CALL_STACK; + vProcedureName := REGEXP_SUBSTR(vCallStack, 'package body\s+\w+\.(\w+\.\w+)', 1, 2, NULL, 1); + vTotalLines := REGEXP_COUNT(vCallStack, CHR(10)) + 1; + vCurrentLine := REGEXP_COUNT(SUBSTR(vCallStack, 1, INSTR(vCallStack, vProcedureName) - 1), CHR(10)) + 1; + vProcedureLevel := (vTotalLines - vCurrentLine + 1) - 3; + vProcedureName := LPAD(vProcedureName, LENGTH(vProcedureName) + 2*vProcedureLevel, ' '); + + INSERT INTO CT_MRDS.A_PROCESS_LOG (guid, username, osuser, machine, module, process_name, procedure_name, procedure_parameters, log_level, log_message) + VALUES (guid, gvUsername, gvOsuser, gvMachine, gvModule, pProcessName, vProcedureName, pParameters, pLogLevel, pLogMessage); + + COMMIT; + + -- Also output to console for immediate visibility (if enabled) + IF gvConsoleLoggingEnabled = 'ON' THEN + DBMS_OUTPUT.PUT_LINE('[' || TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') || '] [' || pLogLevel || '] ' || vProcedureName || ': ' || pLogMessage); + END IF; + + END LOG_PROCESS_EVENT; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE LOG_PROCESS_ERROR( + pLogMessage IN VARCHAR2, + pParameters IN VARCHAR2 DEFAULT NULL, + pProcessName IN VARCHAR2 DEFAULT 'FILE_MANAGER' + ) IS + PRAGMA AUTONOMOUS_TRANSACTION; + + vCallStack VARCHAR2(32767); + vErrorStack VARCHAR2(32767); + vErrorBacktrace VARCHAR2(32767); + vAdjustedBacktrace VARCHAR2(32767); + vErrorContext VARCHAR2(4000); + vProcName VARCHAR2(100); + vProcedureLevel PLS_INTEGER; + vTotalLines PLS_INTEGER; + vCurrentLine PLS_INTEGER; + vFullErrorMessage CLOB; + vTimestamp VARCHAR2(30); + vSessionInfo VARCHAR2(1000); + + BEGIN + -- Check if logging is disabled + IF gvLoggingEnabled = 'OFF' THEN + RETURN; + END IF; + + -- Capture all available error information + vErrorStack := DBMS_UTILITY.FORMAT_ERROR_STACK; + vErrorBacktrace := DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + vCallStack := DBMS_UTILITY.FORMAT_CALL_STACK; + vTimestamp := TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS'); + + -- Capture session information for better context + vSessionInfo := 'Session ID: ' || SYS_CONTEXT('USERENV', 'SID') || + ', User: ' || SYS_CONTEXT('USERENV', 'SESSION_USER') || + ', Module: ' || SYS_CONTEXT('USERENV', 'MODULE') || + ', Client Info: ' || NVL(SYS_CONTEXT('USERENV', 'CLIENT_INFO'), 'N/A') || + ', Action: ' || NVL(SYS_CONTEXT('USERENV', 'ACTION'), 'N/A'); + + -- Build error context information + vErrorContext := 'Environment: ' || gvEnv || + ', Process: ' || NVL(pProcessName, 'UNKNOWN') || + ', Timestamp: ' || vTimestamp || + ', SQLCODE: ' || SQLCODE || + ', Transaction Active: ' || CASE WHEN DBMS_TRANSACTION.STEP_ID IS NOT NULL THEN 'YES' ELSE 'NO' END; + + -- Extract procedure name and nesting level from call stack + -- Always extract actual procedure name from call stack for precise error location + vProcName := REGEXP_SUBSTR(vCallStack, 'package body\s+\w+\.(\w+\.\w+)', 1, 2, NULL, 1); + + -- If we couldn't extract procedure name from call stack, use provided process name + IF vProcName IS NULL THEN + vProcName := NVL(pProcessName, 'UNKNOWN'); + END IF; + + vTotalLines := REGEXP_COUNT(vCallStack, CHR(10)) + 1; + vCurrentLine := REGEXP_COUNT(SUBSTR(vCallStack, 1, INSTR(vCallStack, vProcName) - 1), CHR(10)) + 1; + vProcedureLevel := (vTotalLines - vCurrentLine + 1) - 3; + vProcName := LPAD(vProcName, LENGTH(vProcName) + 2*vProcedureLevel, ' '); + + -- Enhance line number display to show direct _BODY.sql file line numbers + -- Since packages are now split into separate _SPEC and _BODY files, line numbers map directly + vAdjustedBacktrace := REGEXP_REPLACE(vErrorBacktrace, + 'at "CT_MRDS\.FILE_MANAGER", line ([0-9]+)', + 'at "CT_MRDS.FILE_MANAGER", line \1 (-> FILE_MANAGER_BODY.sql:line \1)', 1, 0, 'i'); + + vAdjustedBacktrace := REGEXP_REPLACE(vAdjustedBacktrace, + 'at "CT_MRDS\.ENV_MANAGER", line ([0-9]+)', + 'at "CT_MRDS.ENV_MANAGER", line \1 (-> ENV_MANAGER_BODY.sql:line \1)', 1, 0, 'i'); + + -- Build comprehensive error message with professional formatting + vFullErrorMessage := 'ERROR REPORT' || cgBL || + '-------------------------------------------------------' || cgBL || + 'ERROR SUMMARY' || cgBL || + ' Message: ' || pLogMessage || cgBL || + ' Context: ' || vErrorContext || cgBL || + '-------------------------------------------------------' || cgBL || + 'SESSION INFORMATION' || cgBL || + ' ' || vSessionInfo || cgBL || + '-------------------------------------------------------' || cgBL || + 'ERROR STACK (Oracle Internal)' || cgBL || + vErrorStack || + '-------------------------------------------------------' || cgBL || + 'BACKTRACE INFORMATION (Oracle Internal)' || cgBL || + vErrorBacktrace || + '-------------------------------------------------------' || cgBL || + 'CALL STACK (Execution Path)' || cgBL || + vCallStack || + '-------------------------------------------------------' || cgBL || + 'QUICK REFERENCE' || cgBL || + ' SQLCODE: ' || SQLCODE || cgBL || + ' SQLERRM: ' || SQLERRM || cgBL || + ' Timestamp: ' || vTimestamp || cgBL || + ' Parameters: ' || NVL(pParameters, 'None provided') || cgBL || + '-------------------------------------------------------'; + + -- Insert comprehensive error record into log table + -- Note: LOG_MESSAGE is VARCHAR2(4000), so we'll truncate if needed but include key info + INSERT INTO CT_MRDS.A_PROCESS_LOG (guid, username, osuser, machine, module, process_name, procedure_name, procedure_parameters, log_level, log_message) + VALUES (guid, gvUsername, gvOsuser, gvMachine, gvModule, NVL(pProcessName, 'FILE_MANAGER'), vProcName, pParameters, 'ERROR', + CASE + WHEN LENGTH(vFullErrorMessage) <= 4000 THEN vFullErrorMessage + ELSE SUBSTR(vFullErrorMessage, 1, 3950) || '... [TRUNCATED]' + END); + + COMMIT; + + -- Enhanced console output for immediate visibility (if enabled) + IF gvConsoleLoggingEnabled = 'ON' THEN + DBMS_OUTPUT.PUT_LINE('======================================================='); + DBMS_OUTPUT.PUT_LINE('ERROR DETECTED AT: ' || vTimestamp); + DBMS_OUTPUT.PUT_LINE('PROCEDURE: ' || NVL(vProcName, 'UNKNOWN')); + DBMS_OUTPUT.PUT_LINE('MESSAGE: ' || pLogMessage); + DBMS_OUTPUT.PUT_LINE('SQLCODE: ' || SQLCODE || ' | ENVIRONMENT: ' || gvEnv); + -- Extract and show the most relevant file and line number + IF INSTR(vAdjustedBacktrace, '-> ') > 0 THEN + DBMS_OUTPUT.PUT_LINE('SOURCE FILE LOCATION: ' || REGEXP_SUBSTR(vAdjustedBacktrace, '-> [^)]+')); + END IF; + DBMS_OUTPUT.PUT_LINE('FULL DETAILS: Query A_PROCESS_LOG table for complete diagnostic info'); + DBMS_OUTPUT.PUT_LINE('QUERY (This Error): SELECT * FROM CT_MRDS.A_PROCESS_LOG WHERE GUID = ''' || guid || ''' ORDER BY LOG_TIMESTAMP DESC;'); + DBMS_OUTPUT.PUT_LINE('QUERY (Recent All): SELECT * FROM CT_MRDS.A_PROCESS_LOG WHERE LOG_TIMESTAMP >= SYSDATE - 1/1440 ORDER BY LOG_TIMESTAMP DESC;'); + DBMS_OUTPUT.PUT_LINE('======================================================='); + END IF; + + END LOG_PROCESS_ERROR; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION ANALYZE_VALIDATION_ERRORS( + pValidationLogTable VARCHAR2, + pTemplateSchema VARCHAR2, + pTemplateTable VARCHAR2, + pCsvFileUri VARCHAR2 + ) RETURN VARCHAR2 + IS + vAnalysisReport CLOB := ''; + vCsvHeader VARCHAR2(4000); + vExpectedOrder VARCHAR2(4000); + vCsvOrder VARCHAR2(4000); + vErrorDetails VARCHAR2(32000) := ''; + vSolutions VARCHAR2(4000); + vColumnMismatch VARCHAR2(1000); + vErrorCount NUMBER := 0; + vFirstDataError VARCHAR2(1000); + vErrorColumn VARCHAR2(100); + vErrorValue VARCHAR2(500); + vExpectedType VARCHAR2(100); + vTemplateColCount NUMBER := 0; + vCsvColCount NUMBER := 0; + vExcessColumns VARCHAR2(2000); + vCsvFirstLine VARCHAR2(4000); + + -- Cursor for template table columns + CURSOR c_template_columns IS + SELECT COLUMN_NAME, DATA_TYPE, COLUMN_ID + FROM ALL_TAB_COLUMNS + WHERE OWNER = UPPER(REGEXP_SUBSTR(pTemplateSchema || '.' || pTemplateTable, '^([^.]+)')) + AND TABLE_NAME = UPPER(REGEXP_SUBSTR(pTemplateSchema || '.' || pTemplateTable, '\.(.+)$', 1, 1, NULL, 1)) + ORDER BY COLUMN_ID; + + BEGIN + -- Build expected column order from template table and count columns + FOR rec IN c_template_columns LOOP + IF vExpectedOrder IS NOT NULL THEN + vExpectedOrder := vExpectedOrder || ', '; + END IF; + vExpectedOrder := vExpectedOrder || rec.COLUMN_NAME; + vTemplateColCount := vTemplateColCount + 1; + END LOOP; + + -- Parse validation log table for errors and CSV structure + BEGIN + -- Try to extract error information from the validation log table + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM ' || pValidationLogTable || + ' WHERE record LIKE ''error processing column%''' + INTO vErrorCount; + + -- Get first error details + IF vErrorCount > 0 THEN + EXECUTE IMMEDIATE 'SELECT record FROM ' || pValidationLogTable || + ' WHERE record LIKE ''error processing column%'' AND ROWNUM = 1' + INTO vFirstDataError; + + -- Parse error to extract column name and error type + vErrorColumn := REGEXP_SUBSTR(vFirstDataError, 'error processing column ([A-Z_]+)', 1, 1, NULL, 1); + + -- Try to get the actual error value from ORA-01722 message + BEGIN + EXECUTE IMMEDIATE 'SELECT record FROM ' || pValidationLogTable || + ' WHERE record LIKE ''ORA-01722%'' AND ROWNUM = 1' + INTO vFirstDataError; + vErrorValue := REGEXP_SUBSTR(vFirstDataError, 'string value containing ''([^'']+)''', 1, 1, NULL, 1); + EXCEPTION + WHEN NO_DATA_FOUND THEN + vErrorValue := 'unknown value'; + WHEN OTHERS THEN + vErrorValue := 'parsing error'; + END; + END IF; + + -- Try to extract CSV structure from validation log field definitions + BEGIN + EXECUTE IMMEDIATE ' + SELECT LISTAGG( + REGEXP_SUBSTR(record, ''^\s+([A-Z_]+)\s+'', 1, 1, NULL, 1), + '', '' + ) WITHIN GROUP (ORDER BY ROWNUM) + FROM ' || pValidationLogTable || ' + WHERE record LIKE '' %CHAR%'' + AND record NOT LIKE ''%Fields in Data Source%'' + AND REGEXP_SUBSTR(record, ''^\s+([A-Z_]+)\s+'') IS NOT NULL' + INTO vCsvOrder; + + -- Count CSV columns from parsed structure + IF vCsvOrder IS NOT NULL THEN + vCsvColCount := REGEXP_COUNT(vCsvOrder, ',') + 1; + END IF; + + EXCEPTION + WHEN OTHERS THEN + vCsvOrder := 'Unable to determine CSV column order from validation log'; + END; + + -- Alternative method: Try to read first line of CSV directly for column count + IF vCsvColCount = 0 THEN + BEGIN + -- This is a fallback - try to get CSV header from external source if possible + -- Note: This would require DBMS_CLOUD.GET_OBJECT or similar approach + -- For now, we'll rely on the validation log parsing + NULL; + EXCEPTION + WHEN OTHERS THEN + NULL; + END; + END IF; + + EXCEPTION + WHEN OTHERS THEN + vErrorDetails := 'Error analyzing validation log: ' || SQLERRM; + END; + + -- Detect column order mismatch and excess columns + IF vCsvOrder IS NOT NULL AND vExpectedOrder IS NOT NULL THEN + IF UPPER(REPLACE(vCsvOrder, ' ', '')) != UPPER(REPLACE(vExpectedOrder, ' ', '')) THEN + vColumnMismatch := 'YES'; + ELSE + vColumnMismatch := 'NO'; + END IF; + END IF; + + -- Check for excess columns + IF vCsvColCount > vTemplateColCount THEN + -- Try to identify which columns are excess + IF vCsvOrder IS NOT NULL THEN + -- Parse CSV columns and compare with template + DECLARE + vCsvCols SYS.ODCIVARCHAR2LIST; + vTemplateCols SYS.ODCIVARCHAR2LIST; + vExcessFound VARCHAR2(1) := 'N'; + i NUMBER; + BEGIN + -- Split CSV columns + SELECT TRIM(REGEXP_SUBSTR(vCsvOrder, '[^,]+', 1, LEVEL)) + BULK COLLECT INTO vCsvCols + FROM DUAL + CONNECT BY REGEXP_SUBSTR(vCsvOrder, '[^,]+', 1, LEVEL) IS NOT NULL; + + -- Split template columns + SELECT TRIM(REGEXP_SUBSTR(vExpectedOrder, '[^,]+', 1, LEVEL)) + BULK COLLECT INTO vTemplateCols + FROM DUAL + CONNECT BY REGEXP_SUBSTR(vExpectedOrder, '[^,]+', 1, LEVEL) IS NOT NULL; + + -- Find excess columns (those in CSV but not in template) + FOR i IN 1..vCsvCols.COUNT LOOP + DECLARE + vFoundInTemplate BOOLEAN := FALSE; + j NUMBER; + BEGIN + -- Check if CSV column exists in template + FOR j IN 1..vTemplateCols.COUNT LOOP + IF UPPER(TRIM(vCsvCols(i))) = UPPER(TRIM(vTemplateCols(j))) THEN + vFoundInTemplate := TRUE; + EXIT; + END IF; + END LOOP; + + -- If not found in template, it's an excess column + IF NOT vFoundInTemplate THEN + IF vExcessFound = 'Y' THEN + vExcessColumns := vExcessColumns || ', '; + END IF; + vExcessColumns := vExcessColumns || vCsvCols(i); + vExcessFound := 'Y'; + END IF; + END; + END LOOP; + EXCEPTION + WHEN OTHERS THEN + vExcessColumns := 'Unable to determine specific excess columns'; + END; + END IF; + END IF; + + -- Build comprehensive analysis report + vAnalysisReport := 'FILE VALIDATION FAILED - DETAILED ANALYSIS' || cgBL || + '=================================================' || cgBL || cgBL; + + -- Column structure analysis + vAnalysisReport := vAnalysisReport || + 'COLUMN STRUCTURE ANALYSIS:' || cgBL || + '---------------------------------------------------' || cgBL || + 'Template Expected Order: ' || vExpectedOrder || cgBL || + 'Template Column Count: ' || vTemplateColCount || cgBL || + 'CSV Detected Order: ' || NVL(vCsvOrder, 'Unknown') || cgBL || + 'CSV Column Count: ' || vCsvColCount || cgBL || cgBL; + + -- Report column count issues + IF vCsvColCount > vTemplateColCount THEN + vAnalysisReport := vAnalysisReport || + 'EXCESS COLUMNS DETECTED!' || cgBL || + 'CSV file has ' || (vCsvColCount - vTemplateColCount) || ' more columns than template allows.' || cgBL; + IF vExcessColumns IS NOT NULL THEN + vAnalysisReport := vAnalysisReport || + 'Excess columns found: ' || vExcessColumns || cgBL; + END IF; + vAnalysisReport := vAnalysisReport || cgBL; + END IF; + + -- Report column order issues + IF vColumnMismatch = 'YES' THEN + vAnalysisReport := vAnalysisReport || + 'COLUMN ORDER MISMATCH DETECTED!' || cgBL || + 'CSV columns are in different order than template expects.' || cgBL || cgBL; + END IF; + + -- Specific error analysis + IF vErrorCount > 0 THEN + vAnalysisReport := vAnalysisReport || + 'SPECIFIC ERRORS FOUND:' || cgBL || + '---------------------------------------------------' || cgBL; + + IF vErrorColumn IS NOT NULL THEN + -- Get expected data type for error column + FOR rec IN c_template_columns LOOP + IF rec.COLUMN_NAME = vErrorColumn THEN + vExpectedType := rec.DATA_TYPE; + EXIT; + END IF; + END LOOP; + + vAnalysisReport := vAnalysisReport || + '1. Column ' || vErrorColumn || ': Expected ' || vExpectedType || + ', received "' || NVL(vErrorValue, 'unknown value') || '" (TEXT)' || cgBL || + ' → CSV position contains different data type than expected' || cgBL; + END IF; + + vAnalysisReport := vAnalysisReport || + 'Total validation errors found: ' || vErrorCount || cgBL || cgBL; + END IF; + + -- Solutions section + vAnalysisReport := vAnalysisReport || + 'SUGGESTED SOLUTIONS:' || cgBL || + '---------------------------------------------------' || cgBL; + + -- Solutions for excess columns + IF vCsvColCount > vTemplateColCount THEN + vAnalysisReport := vAnalysisReport || + 'FOR EXCESS COLUMNS:' || cgBL || + '• Remove extra columns from CSV file' || cgBL || + '• Keep only these columns in this order: ' || vExpectedOrder || cgBL; + IF vExcessColumns IS NOT NULL THEN + vAnalysisReport := vAnalysisReport || + '• Specifically remove: ' || vExcessColumns || cgBL; + END IF; + vAnalysisReport := vAnalysisReport || cgBL; + END IF; + + -- Solutions for column order + IF vColumnMismatch = 'YES' THEN + vAnalysisReport := vAnalysisReport || + 'FOR COLUMN ORDER:' || cgBL || + '• Reorder CSV columns to match template: ' || vExpectedOrder || cgBL || + '• Or update template table column order to match CSV file' || cgBL || cgBL; + END IF; + + -- General solutions + vAnalysisReport := vAnalysisReport || + 'GENERAL RECOMMENDATIONS:' || cgBL || + '• Ensure CSV has exactly ' || vTemplateColCount || ' columns' || cgBL || + '• Verify column names match template table exactly' || cgBL || + '• Check data types in each column match expectations' || cgBL || cgBL; + + -- Validation log reference + vAnalysisReport := vAnalysisReport || + 'TECHNICAL DETAILS:' || cgBL || + '---------------------------------------------------' || cgBL || + 'Validation Log Table: ' || pValidationLogTable || cgBL || + 'Template Table: ' || pTemplateSchema || '.' || pTemplateTable || cgBL || + 'CSV File: ' || pCsvFileUri || cgBL || + 'Query validation details: SELECT * FROM ' || pValidationLogTable || ';' || cgBL; + + RETURN vAnalysisReport; + + EXCEPTION + WHEN OTHERS THEN + RETURN 'Error generating validation analysis: ' || SQLERRM || cgBL || + 'Validation Log Table: ' || pValidationLogTable || cgBL || + 'Check table manually: SELECT * FROM ' || pValidationLogTable || ';'; + END ANALYZE_VALIDATION_ERRORS; + + ---------------------------------------------------------------------------------------------------- + -- PACKAGE VERSION MANAGEMENT FUNCTIONS IMPLEMENTATION + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_VERSION + RETURN VARCHAR2 + IS + BEGIN + RETURN PACKAGE_VERSION; + END GET_VERSION; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_BUILD_INFO + RETURN VARCHAR2 + IS + BEGIN + RETURN GET_PACKAGE_VERSION_INFO( + pPackageName => 'ENV_MANAGER', + pVersion => PACKAGE_VERSION, + pBuildDate => PACKAGE_BUILD_DATE, + pAuthor => PACKAGE_AUTHOR + ); + END GET_BUILD_INFO; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_VERSION_HISTORY + RETURN VARCHAR2 + IS + BEGIN + RETURN FORMAT_VERSION_HISTORY( + pPackageName => 'ENV_MANAGER', + pVersionHistory => VERSION_HISTORY + ); + END GET_VERSION_HISTORY; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_PACKAGE_VERSION_INFO( + pPackageName VARCHAR2, + pVersion VARCHAR2, + pBuildDate VARCHAR2, + pAuthor VARCHAR2 + ) RETURN VARCHAR2 + IS + BEGIN + RETURN 'Package: ' || pPackageName || cgBL || + 'Version: ' || pVersion || cgBL || + 'Build Date: ' || pBuildDate || cgBL || + 'Author: ' || pAuthor; + END GET_PACKAGE_VERSION_INFO; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION FORMAT_VERSION_HISTORY( + pPackageName VARCHAR2, + pVersionHistory VARCHAR2 + ) RETURN VARCHAR2 + IS + BEGIN + RETURN pPackageName || ' Version History:' || cgBL || pVersionHistory; + END FORMAT_VERSION_HISTORY; + + ---------------------------------------------------------------------------------------------------- + -- PACKAGE HASH + CHANGE DETECTION FUNCTIONS IMPLEMENTATION + ---------------------------------------------------------------------------------------------------- + + FUNCTION CALCULATE_PACKAGE_HASH( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2, + pPackageType VARCHAR2 + ) RETURN VARCHAR2 + IS + vSourceCode CLOB; + vHash VARCHAR2(64); + vRawHash RAW(32); + BEGIN + -- Build complete source code from ALL_SOURCE using XMLAGG (no 4000 char limit) + -- CRITICAL: Cannot use LISTAGG due to VARCHAR2 limit + SELECT XMLAGG(XMLELEMENT(E, TEXT) ORDER BY LINE).GETCLOBVAL() + INTO vSourceCode + FROM ALL_SOURCE + WHERE OWNER = UPPER(pPackageOwner) + AND NAME = UPPER(pPackageName) + AND TYPE = UPPER(pPackageType); + + -- If empty, return NULL + IF vSourceCode IS NULL OR DBMS_LOB.GETLENGTH(vSourceCode) = 0 THEN + RETURN NULL; + END IF; + + -- Calculate SHA256 hash directly from CLOB + -- DBMS_CRYPTO.HASH has overload for CLOB in Oracle 19c+ + vRawHash := DBMS_CRYPTO.HASH( + src => vSourceCode, + typ => DBMS_CRYPTO.HASH_SH256 + ); + + -- Convert to hex string + vHash := LOWER(RAWTOHEX(vRawHash)); + + RETURN vHash; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + RETURN NULL; + WHEN OTHERS THEN + LOG_PROCESS_ERROR('Error calculating package hash: ' || SQLERRM, + 'pPackageOwner=' || pPackageOwner || ', pPackageName=' || pPackageName); + RETURN NULL; + END CALCULATE_PACKAGE_HASH; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE TRACK_PACKAGE_VERSION( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2, + pPackageVersion VARCHAR2, + pPackageBuildDate VARCHAR2, + pPackageAuthor VARCHAR2 + ) + IS + vHashSpec VARCHAR2(64); + vHashBody VARCHAR2(64); + vLastHashSpec VARCHAR2(64); + vLastHashBody VARCHAR2(64); + vLastVersion VARCHAR2(10); + vLineCountSpec NUMBER; + vLineCountBody NUMBER; + vChangeDetected CHAR(1) := 'N'; + vChangeMessage VARCHAR2(4000); + vParameters VARCHAR2(4000); + BEGIN + vParameters := FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( + 'pPackageOwner => ''' || pPackageOwner || '''', + 'pPackageName => ''' || pPackageName || '''', + 'pPackageVersion => ''' || pPackageVersion || '''' + )); + + LOG_PROCESS_EVENT('Start TRACK_PACKAGE_VERSION', 'INFO', vParameters); + + -- Calculate current hashes + vHashSpec := CALCULATE_PACKAGE_HASH(pPackageOwner, pPackageName, 'PACKAGE'); + vHashBody := CALCULATE_PACKAGE_HASH(pPackageOwner, pPackageName, 'PACKAGE BODY'); + + -- Get line counts + BEGIN + SELECT COUNT(*) + INTO vLineCountSpec + FROM ALL_SOURCE + WHERE OWNER = UPPER(pPackageOwner) + AND NAME = UPPER(pPackageName) + AND TYPE = 'PACKAGE'; + EXCEPTION + WHEN NO_DATA_FOUND THEN + vLineCountSpec := 0; + END; + + BEGIN + SELECT COUNT(*) + INTO vLineCountBody + FROM ALL_SOURCE + WHERE OWNER = UPPER(pPackageOwner) + AND NAME = UPPER(pPackageName) + AND TYPE = 'PACKAGE BODY'; + EXCEPTION + WHEN NO_DATA_FOUND THEN + vLineCountBody := 0; + END; + + -- Get last tracked version and hashes + BEGIN + SELECT PACKAGE_VERSION, SOURCE_CODE_HASH_SPEC, SOURCE_CODE_HASH_BODY + INTO vLastVersion, vLastHashSpec, vLastHashBody + FROM CT_MRDS.A_PACKAGE_VERSION_TRACKING + WHERE PACKAGE_OWNER = UPPER(pPackageOwner) + AND PACKAGE_NAME = UPPER(pPackageName) + ORDER BY TRACKING_DATE DESC + FETCH FIRST 1 ROW ONLY; + + -- Check if hash changed but version didn't + IF (vHashSpec != vLastHashSpec OR NVL(vHashBody,'X') != NVL(vLastHashBody,'X')) + AND pPackageVersion = vLastVersion THEN + + vChangeDetected := 'Y'; + vChangeMessage := 'WARNING: Source code changed without version update!' || cgBL || + 'Last Version: ' || vLastVersion || cgBL || + 'Current Version: ' || pPackageVersion || cgBL; + + IF vHashSpec != vLastHashSpec THEN + vChangeMessage := vChangeMessage || + 'SPEC Changed - Hash: ' || SUBSTR(vHashSpec, 1, 16) || '... (was: ' || + SUBSTR(vLastHashSpec, 1, 16) || '...)' || cgBL; + END IF; + + IF NVL(vHashBody,'X') != NVL(vLastHashBody,'X') THEN + vChangeMessage := vChangeMessage || + 'BODY Changed - Hash: ' || SUBSTR(vHashBody, 1, 16) || '... (was: ' || + SUBSTR(NVL(vLastHashBody,'NULL'), 1, 16) || '...)' || cgBL; + END IF; + + vChangeMessage := vChangeMessage || + 'RECOMMENDATION: Update PACKAGE_VERSION constant and PACKAGE_BUILD_DATE'; + + LOG_PROCESS_EVENT(vChangeMessage, 'WARNING', vParameters); + END IF; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + -- First time tracking this package + vChangeDetected := 'N'; + vChangeMessage := 'First tracking record for this package'; + LOG_PROCESS_EVENT(vChangeMessage, 'INFO', vParameters); + END; + + -- Insert tracking record + INSERT INTO CT_MRDS.A_PACKAGE_VERSION_TRACKING ( + PACKAGE_OWNER, + PACKAGE_NAME, + PACKAGE_TYPE, + PACKAGE_VERSION, + PACKAGE_BUILD_DATE, + PACKAGE_AUTHOR, + SOURCE_CODE_HASH_SPEC, + SOURCE_CODE_HASH_BODY, + LINE_COUNT_SPEC, + LINE_COUNT_BODY, + DETECTED_CHANGE_WITHOUT_VERSION, + CHANGE_DETECTION_MESSAGE + ) VALUES ( + UPPER(pPackageOwner), + UPPER(pPackageName), + 'BOTH', + pPackageVersion, + pPackageBuildDate, + pPackageAuthor, + vHashSpec, + vHashBody, + vLineCountSpec, + vLineCountBody, + vChangeDetected, + vChangeMessage + ); + + COMMIT; + + LOG_PROCESS_EVENT('End TRACK_PACKAGE_VERSION - Record inserted', 'INFO', vParameters); + + EXCEPTION + WHEN OTHERS THEN + LOG_PROCESS_ERROR('Error in TRACK_PACKAGE_VERSION: ' || SQLERRM, vParameters); + RAISE; + END TRACK_PACKAGE_VERSION; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION CHECK_PACKAGE_CHANGES( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2 + ) RETURN VARCHAR2 + IS + vCurrentHashSpec VARCHAR2(64); + vCurrentHashBody VARCHAR2(64); + vLastHashSpec VARCHAR2(64); + vLastHashBody VARCHAR2(64); + vLastVersion VARCHAR2(10); + vLastTrackingDate TIMESTAMP; + vChangeReport VARCHAR2(4000); + vSpecChanged BOOLEAN := FALSE; + vBodyChanged BOOLEAN := FALSE; + BEGIN + -- Get current hashes + vCurrentHashSpec := CALCULATE_PACKAGE_HASH(pPackageOwner, pPackageName, 'PACKAGE'); + vCurrentHashBody := CALCULATE_PACKAGE_HASH(pPackageOwner, pPackageName, 'PACKAGE BODY'); + + -- Get last tracked hashes + BEGIN + SELECT PACKAGE_VERSION, SOURCE_CODE_HASH_SPEC, SOURCE_CODE_HASH_BODY, TRACKING_DATE + INTO vLastVersion, vLastHashSpec, vLastHashBody, vLastTrackingDate + FROM CT_MRDS.A_PACKAGE_VERSION_TRACKING + WHERE PACKAGE_OWNER = UPPER(pPackageOwner) + AND PACKAGE_NAME = UPPER(pPackageName) + ORDER BY TRACKING_DATE DESC + FETCH FIRST 1 ROW ONLY; + EXCEPTION + WHEN NO_DATA_FOUND THEN + RETURN 'Package ' || pPackageOwner || '.' || pPackageName || ' has never been tracked.' || cgBL || + 'Run TRACK_PACKAGE_VERSION to establish baseline.'; + END; + + -- Check for changes + IF vCurrentHashSpec != vLastHashSpec THEN + vSpecChanged := TRUE; + END IF; + + IF NVL(vCurrentHashBody, 'X') != NVL(vLastHashBody, 'X') THEN + vBodyChanged := TRUE; + END IF; + + -- Build report + IF vSpecChanged OR vBodyChanged THEN + vChangeReport := 'WARNING: Package ' || pPackageOwner || '.' || pPackageName || ' has changed!' || cgBL || + '========================================' || cgBL || + 'Last Tracked Version: ' || vLastVersion || cgBL || + 'Last Tracked Date: ' || TO_CHAR(vLastTrackingDate, 'YYYY-MM-DD HH24:MI:SS') || cgBL || + cgBL; + + IF vSpecChanged THEN + vChangeReport := vChangeReport || + 'SPECIFICATION Changed:' || cgBL || + ' Current Hash: ' || SUBSTR(vCurrentHashSpec, 1, 16) || '...' || cgBL || + ' Last Hash: ' || SUBSTR(vLastHashSpec, 1, 16) || '...' || cgBL || + cgBL; + END IF; + + IF vBodyChanged THEN + vChangeReport := vChangeReport || + 'BODY Changed:' || cgBL || + ' Current Hash: ' || SUBSTR(NVL(vCurrentHashBody, 'NULL'), 1, 16) || '...' || cgBL || + ' Last Hash: ' || SUBSTR(NVL(vLastHashBody, 'NULL'), 1, 16) || '...' || cgBL || + cgBL; + END IF; + + vChangeReport := vChangeReport || + 'RECOMMENDATION:' || cgBL || + '1. Update PACKAGE_VERSION constant' || cgBL || + '2. Update PACKAGE_BUILD_DATE constant' || cgBL || + '3. Add entry to VERSION_HISTORY' || cgBL || + '4. Call TRACK_PACKAGE_VERSION to update tracking'; + ELSE + vChangeReport := 'OK: Package ' || pPackageOwner || '.' || pPackageName || ' has not changed.' || cgBL || + 'Last Tracked: ' || TO_CHAR(vLastTrackingDate, 'YYYY-MM-DD HH24:MI:SS') || cgBL || + 'Version: ' || vLastVersion; + END IF; + + RETURN vChangeReport; + + EXCEPTION + WHEN OTHERS THEN + RETURN 'Error checking package changes: ' || SQLERRM; + END CHECK_PACKAGE_CHANGES; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_PACKAGE_HASH_INFO( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2 + ) RETURN VARCHAR2 + IS + vCurrentHashSpec VARCHAR2(64); + vCurrentHashBody VARCHAR2(64); + vLastHashSpec VARCHAR2(64); + vLastHashBody VARCHAR2(64); + vLastVersion VARCHAR2(10); + vLastTrackingDate TIMESTAMP; + vLastChangeDetected CHAR(1); + vInfo VARCHAR2(4000); + BEGIN + -- Get current hashes + vCurrentHashSpec := CALCULATE_PACKAGE_HASH(pPackageOwner, pPackageName, 'PACKAGE'); + vCurrentHashBody := CALCULATE_PACKAGE_HASH(pPackageOwner, pPackageName, 'PACKAGE BODY'); + + -- Get last tracking info + BEGIN + SELECT PACKAGE_VERSION, + SOURCE_CODE_HASH_SPEC, + SOURCE_CODE_HASH_BODY, + TRACKING_DATE, + DETECTED_CHANGE_WITHOUT_VERSION + INTO vLastVersion, vLastHashSpec, vLastHashBody, vLastTrackingDate, vLastChangeDetected + FROM CT_MRDS.A_PACKAGE_VERSION_TRACKING + WHERE PACKAGE_OWNER = UPPER(pPackageOwner) + AND PACKAGE_NAME = UPPER(pPackageName) + ORDER BY TRACKING_DATE DESC + FETCH FIRST 1 ROW ONLY; + EXCEPTION + WHEN NO_DATA_FOUND THEN + RETURN 'Package: ' || pPackageOwner || '.' || pPackageName || cgBL || + 'Status: Never tracked' || cgBL || + 'Current Hash (SPEC): ' || SUBSTR(vCurrentHashSpec, 1, 16) || '...' || cgBL || + 'Current Hash (BODY): ' || SUBSTR(NVL(vCurrentHashBody, 'NULL'), 1, 16) || '...'; + END; + + -- Build info report + vInfo := 'Package: ' || pPackageOwner || '.' || pPackageName || cgBL || + 'Current Version: ' || vLastVersion || cgBL || + 'Last Tracked: ' || TO_CHAR(vLastTrackingDate, 'YYYY-MM-DD HH24:MI:SS') || cgBL || + cgBL || + 'Current Hash (SPEC): ' || SUBSTR(vCurrentHashSpec, 1, 32) || '...' || cgBL || + 'Last Hash (SPEC): ' || SUBSTR(vLastHashSpec, 1, 32) || '...' || cgBL; + + IF vCurrentHashBody IS NOT NULL OR vLastHashBody IS NOT NULL THEN + vInfo := vInfo || + 'Current Hash (BODY): ' || SUBSTR(NVL(vCurrentHashBody, 'NULL'), 1, 32) || '...' || cgBL || + 'Last Hash (BODY): ' || SUBSTR(NVL(vLastHashBody, 'NULL'), 1, 32) || '...' || cgBL; + END IF; + + vInfo := vInfo || cgBL; + + -- Status + IF vCurrentHashSpec = vLastHashSpec AND NVL(vCurrentHashBody, 'X') = NVL(vLastHashBody, 'X') THEN + vInfo := vInfo || 'Status: OK - No changes detected'; + ELSE + vInfo := vInfo || 'Status: CHANGED - Source code modified since last tracking'; + END IF; + + IF vLastChangeDetected = 'Y' THEN + vInfo := vInfo || cgBL || 'Last Tracking Warning: Change detected without version update'; + END IF; + + RETURN vInfo; + + EXCEPTION + WHEN OTHERS THEN + RETURN 'Error getting package hash info: ' || SQLERRM; + END GET_PACKAGE_HASH_INFO; + + ---------------------------------------------------------------------------------------------------- + +BEGIN + INIT_ERRORS; + guid := sys_guid(); + gvUsername := SYS_CONTEXT('USERENV', 'SESSION_USER'); + gvOsuser := SYS_CONTEXT('USERENV', 'OS_USER'); + gvMachine := SYS_CONTEXT('USERENV', 'HOST'); + gvModule := SYS_CONTEXT('USERENV', 'MODULE'); + + -- Get info about EnvironmentID. Without it package cannot proceed further. + -- Information about environment is needed to get proper configuration values + -- It can be set up in two different ways : + -- 1. Set it on session level: execute DBMS_SESSION.SET_IDENTIFIER (client_id => 'dev'); + -- 2. Set it on configuration level: Insert into CT_MRDS.A_FILE_MANAGER_CONFIG (ENVIRONMENT_ID,CONFIG_VARIABLE,CONFIG_VARIABLE_VALUE) values ('default','environment_id','dev'); + -- Session level setup (1.) takes precedence over configuration level one (2.) + + gvEnv := nvl(SYS_CONTEXT ('USERENV', 'CLIENT_IDENTIFIER'), GET_DEFAULT_ENV()); + if gvEnv is null then + dbms_output.put_line(MSG_ENVIRONMENT_NOT_SET); + LOG_PROCESS_EVENT(MSG_ENVIRONMENT_NOT_SET, 'ERROR'); + RAISE_APPLICATION_ERROR(CODE_ENVIRONMENT_NOT_SET, MSG_ENVIRONMENT_NOT_SET); + else + dbms_output.put_line('EnvironmentID set to: '||gvEnv); + end if; + + INIT_VARIABLES(pEnv => gvEnv); +END ENV_MANAGER; + +/ + +/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/ENV_MANAGER.pkg b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/ENV_MANAGER.pkg new file mode 100644 index 0000000..fded944 --- /dev/null +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/ENV_MANAGER.pkg @@ -0,0 +1,625 @@ +create or replace PACKAGE CT_MRDS.ENV_MANAGER +AUTHID CURRENT_USER +AS + /** + * General comment for package: Please put comments for functions and procedures as shown in below example. + * It is a standard. + * The structure of comment is used by GET_PACKAGE_DOCUMENTATION function + * which returns documentation text for confluence page (to Copy-Paste it). + **/ + + -- Example comment: + /** + * @name EX_PROCEDURE_NAME + * @desc Procedure description + * @example select ENV_MANAGER.EX_PROCEDURE_NAME(pParameter => 129) from dual; + * @ex_rslt Example Result + **/ + + -- Package Version Information (Semantic Versioning: MAJOR.MINOR.PATCH) + PACKAGE_VERSION CONSTANT VARCHAR2(10) := '3.2.0'; + PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2025-12-20 10:00:00'; + PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski'; + + -- Version History (Latest changes first) + VERSION_HISTORY CONSTANT VARCHAR2(4000) := + '3.2.0 (2025-12-20): Added error codes for parallel execution support (CODE_INVALID_PARALLEL_DEGREE -20110, CODE_PARALLEL_EXECUTION_FAILED -20111)' || CHR(13)||CHR(10) || + '3.1.0 (2025-10-22): Added package hash tracking and automatic change detection system (SHA256 hashing)' || CHR(13)||CHR(10) || + '3.0.0 (2025-10-22): Added package versioning system with centralized version management functions' || CHR(13)||CHR(10) || + '2.1.0 (2025-10-15): Added ANALYZE_VALIDATION_ERRORS function for comprehensive CSV validation analysis' || CHR(13)||CHR(10) || + '2.0.0 (2025-10-01): Added LOG_PROCESS_ERROR procedure with enhanced error diagnostics and stack traces' || CHR(13)||CHR(10) || + '1.5.0 (2025-09-20): Added console logging support with gvConsoleLoggingEnabled configuration' || CHR(13)||CHR(10) || + '1.0.0 (2025-09-01): Initial release with error management and configuration system'; + + TYPE Error_Record IS RECORD ( + code PLS_INTEGER, + message VARCHAR2(4000) + ); + + TYPE tErrorList IS TABLE OF Error_Record INDEX BY PLS_INTEGER; + + Errors tErrorList; + + + guid VARCHAR2(32); + gvEnv VARCHAR2(200); + gvUsername VARCHAR2(128); + gvOsuser VARCHAR2(128); + gvMachine VARCHAR2(64); + gvModule VARCHAR2(64); + + gvNameSpace VARCHAR2(200); + gvRegion VARCHAR2(200); + gvDataBucketName VARCHAR2(200); + gvInboxBucketName VARCHAR2(200); + gvArchiveBucketName VARCHAR2(200); + gvDataBucketUri VARCHAR2(200); + gvInboxBucketUri VARCHAR2(200); + gvArchiveBucketUri VARCHAR2(200); + gvCredentialName VARCHAR2(200); + + -- Overwritten by variable "LoggingEnabled" in A_FILE_MANAGER_CONFIG.CONFIG_VARIABLE table + gvLoggingEnabled VARCHAR2(3) := 'ON'; -- 'ON' or 'OFF' + + -- Overwritten by variable "MinLogLevel" in A_FILE_MANAGER_CONFIG.CONFIG_VARIABLE table + -- Possible values: DEBUG ,INFO ,WARNING ,ERROR + gvMinLogLevel VARCHAR2(10) := 'DEBUG'; + + -- Overwritten by variable "DefaultDateFormat" in A_FILE_MANAGER_CONFIG.CONFIG_VARIABLE table + gvDefaultDateFormat VARCHAR2(200) := 'DD/MM/YYYY HH24:MI:SS'; + + -- Overwritten by variable "ConsoleLoggingEnabled" in A_FILE_MANAGER_CONFIG.CONFIG_VARIABLE table + gvConsoleLoggingEnabled VARCHAR2(3) := 'ON'; -- 'ON' or 'OFF' + + cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10); + + vgSourceFileConfigKey PLS_INTEGER; + + vgMsgTmp VARCHAR2(32000); + --Exceptions + ERR_EMPTY_FILEURI_AND_RECKEY EXCEPTION; + CODE_EMPTY_FILEURI_AND_RECKEY CONSTANT PLS_INTEGER := -20001; + MSG_EMPTY_FILEURI_AND_RECKEY VARCHAR2(4000) := 'Either pFileUri or pSourceFileReceivedKey must be not null'; + PRAGMA EXCEPTION_INIT( ERR_EMPTY_FILEURI_AND_RECKEY + ,CODE_EMPTY_FILEURI_AND_RECKEY); + + + ERR_NO_CONFIG_MATCH_FOR_FILEURI EXCEPTION; + CODE_NO_CONFIG_MATCH_FOR_FILEURI CONSTANT PLS_INTEGER := -20002; + MSG_NO_CONFIG_MATCH_FOR_FILEURI VARCHAR2(4000) := 'No match for source file in A_SOURCE_FILE_CONFIG table' + ||cgBL||' The file provided in parameter: pFileUri does not have ' + ||cgBL||' coresponding configuration in A_SOURCE_FILE_CONFIG table'; + PRAGMA EXCEPTION_INIT( ERR_NO_CONFIG_MATCH_FOR_FILEURI + ,CODE_NO_CONFIG_MATCH_FOR_FILEURI); + + ERR_MULTIPLE_MATCH_FOR_SRCFILE EXCEPTION; + CODE_MULTIPLE_MATCH_FOR_SRCFILE CONSTANT PLS_INTEGER := -20003; + MSG_MULTIPLE_MATCH_FOR_SRCFILE VARCHAR2(4000) := 'Multiple match for source file in A_SOURCE_FILE_CONFIG table'; + PRAGMA EXCEPTION_INIT( ERR_MULTIPLE_MATCH_FOR_SRCFILE + ,CODE_MULTIPLE_MATCH_FOR_SRCFILE); + + ERR_MISSING_COLUMN_DATE_FORMAT EXCEPTION; + CODE_MISSING_COLUMN_DATE_FORMAT CONSTANT PLS_INTEGER := -20004; + MSG_MISSING_COLUMN_DATE_FORMAT VARCHAR2(4000) := 'Missing entry in config table: A_COLUMN_DATE_FORMAT primary key(TEMPLATE_TABLE_NAME, COLUMN_NAME)' + ||cgBL||' Remember: each column which data_type IN (''DATE'', ''TIMESTAMP'')' + ||cgBL||' should have DateFormat specified in A_COLUMN_DATE_FORMAT table ' + ||cgBL||' for example: ''YYYY-MM-DD'''; + PRAGMA EXCEPTION_INIT( ERR_MISSING_COLUMN_DATE_FORMAT + ,CODE_MISSING_COLUMN_DATE_FORMAT); + + ERR_MULTIPLE_COLUMN_DATE_FORMAT EXCEPTION; + CODE_MULTIPLE_COLUMN_DATE_FORMAT CONSTANT PLS_INTEGER := -20005; + MSG_MULTIPLE_COLUMN_DATE_FORMAT VARCHAR2(4000) := 'Multiple records for date format in A_COLUMN_DATE_FORMAT table' + ||cgBL||' There should be only one format specified for each DAT/TIMESTAMP column'; + PRAGMA EXCEPTION_INIT( ERR_MULTIPLE_COLUMN_DATE_FORMAT + ,CODE_MULTIPLE_COLUMN_DATE_FORMAT); + + + ERR_DIDNT_GET_LOAD_OPERATION_ID EXCEPTION; + CODE_DIDNT_GET_LOAD_OPERATION_ID CONSTANT PLS_INTEGER := -20006; + MSG_DIDNT_GET_LOAD_OPERATION_ID VARCHAR2(4000) := 'Didnt get load operation id from external table validation'; + PRAGMA EXCEPTION_INIT( ERR_DIDNT_GET_LOAD_OPERATION_ID + ,CODE_DIDNT_GET_LOAD_OPERATION_ID); + + ERR_NO_CONFIG_FOR_RECEIVED_FILE EXCEPTION; + CODE_NO_CONFIG_FOR_RECEIVED_FILE CONSTANT PLS_INTEGER := -20007; + MSG_NO_CONFIG_FOR_RECEIVED_FILE VARCHAR2(4000) := 'No match for received source file in A_SOURCE_FILE_CONFIG ' + ||cgBL||' or missing data in A_SOURCE_FILE_RECEIVED table for provided pSourceFileReceivedKey parameter'; + PRAGMA EXCEPTION_INIT( ERR_NO_CONFIG_FOR_RECEIVED_FILE + ,CODE_NO_CONFIG_FOR_RECEIVED_FILE); + + ERR_MULTI_CONFIG_FOR_RECEIVED_FILE EXCEPTION; + CODE_MULTI_CONFIG_FOR_RECEIVED_FILE CONSTANT PLS_INTEGER := -20008; + MSG_MULTI_CONFIG_FOR_RECEIVED_FILE VARCHAR2(4000) := 'Multiple matchs for received source file in A_SOURCE_FILE_CONFIG'; + PRAGMA EXCEPTION_INIT( ERR_MULTI_CONFIG_FOR_RECEIVED_FILE + ,CODE_MULTI_CONFIG_FOR_RECEIVED_FILE); + + ERR_FILE_NOT_FOUND_ON_CLOUD EXCEPTION; + CODE_FILE_NOT_FOUND_ON_CLOUD CONSTANT PLS_INTEGER := -20009; + MSG_FILE_NOT_FOUND_ON_CLOUD VARCHAR2(4000) := 'File not found on the cloud'; + PRAGMA EXCEPTION_INIT( ERR_FILE_NOT_FOUND_ON_CLOUD + ,CODE_FILE_NOT_FOUND_ON_CLOUD); + + ERR_FILE_VALIDATION_FAILED EXCEPTION; + CODE_FILE_VALIDATION_FAILED CONSTANT PLS_INTEGER := -20010; + MSG_FILE_VALIDATION_FAILED VARCHAR2(4000) := 'File validation failed'; + PRAGMA EXCEPTION_INIT( ERR_FILE_VALIDATION_FAILED + ,CODE_FILE_VALIDATION_FAILED); + + ERR_EXCESS_COLUMNS_DETECTED EXCEPTION; + CODE_EXCESS_COLUMNS_DETECTED CONSTANT PLS_INTEGER := -20011; + MSG_EXCESS_COLUMNS_DETECTED VARCHAR2(4000) := 'CSV file contains more columns than template allows'; + PRAGMA EXCEPTION_INIT( ERR_EXCESS_COLUMNS_DETECTED + ,CODE_EXCESS_COLUMNS_DETECTED); + + ERR_NO_CONFIG_MATCH EXCEPTION; + CODE_NO_CONFIG_MATCH CONSTANT PLS_INTEGER := -20012; + MSG_NO_CONFIG_MATCH VARCHAR2(4000) := 'No match for specified parameters in A_SOURCE_FILE_CONFIG table'; + PRAGMA EXCEPTION_INIT( ERR_NO_CONFIG_MATCH + ,CODE_NO_CONFIG_MATCH); + + ERR_UNKNOWN_PREFIX EXCEPTION; + CODE_UNKNOWN_PREFIX CONSTANT PLS_INTEGER := -20013; + MSG_UNKNOWN_PREFIX VARCHAR2(4000) := 'Unknown prefix'; + PRAGMA EXCEPTION_INIT( ERR_UNKNOWN_PREFIX + ,CODE_UNKNOWN_PREFIX); + + ERR_TABLE_NOT_EXISTS EXCEPTION; + CODE_TABLE_NOT_EXISTS CONSTANT PLS_INTEGER := -20014; + MSG_TABLE_NOT_EXISTS VARCHAR2(4000) := 'Table does not exist'; + PRAGMA EXCEPTION_INIT( ERR_TABLE_NOT_EXISTS + ,CODE_TABLE_NOT_EXISTS); + + ERR_COLUMN_NOT_EXISTS EXCEPTION; + CODE_COLUMN_NOT_EXISTS CONSTANT PLS_INTEGER := -20015; + MSG_COLUMN_NOT_EXISTS VARCHAR2(4000) := 'Column does not exist in table'; + PRAGMA EXCEPTION_INIT( ERR_COLUMN_NOT_EXISTS + ,CODE_COLUMN_NOT_EXISTS); + + ERR_UNSUPPORTED_DATA_TYPE EXCEPTION; + CODE_UNSUPPORTED_DATA_TYPE CONSTANT PLS_INTEGER := -20016; + MSG_UNSUPPORTED_DATA_TYPE VARCHAR2(4000) := 'Unsupported data type'; + PRAGMA EXCEPTION_INIT( ERR_UNSUPPORTED_DATA_TYPE + ,CODE_UNSUPPORTED_DATA_TYPE); + + ERR_MISSING_SOURCE_KEY EXCEPTION; + CODE_MISSING_SOURCE_KEY CONSTANT PLS_INTEGER := -20017; + MSG_MISSING_SOURCE_KEY VARCHAR2(4000) := 'The Source was not found in parent table A_SOURCE'; + PRAGMA EXCEPTION_INIT( ERR_MISSING_SOURCE_KEY + ,CODE_MISSING_SOURCE_KEY); + + ERR_NULL_SOURCE_FILE_CONFIG_KEY EXCEPTION; + CODE_NULL_SOURCE_FILE_CONFIG_KEY CONSTANT PLS_INTEGER := -20018; + MSG_NULL_SOURCE_FILE_CONFIG_KEY VARCHAR2(4000) := 'No entry in A_SOURCE_FILE_CONFIG table for specified A_SOURCE_FILE_CONFIG_KEY'; + PRAGMA EXCEPTION_INIT( ERR_NULL_SOURCE_FILE_CONFIG_KEY + ,CODE_NULL_SOURCE_FILE_CONFIG_KEY); + + ERR_DUPLICATED_SOURCE_KEY EXCEPTION; + CODE_DUPLICATED_SOURCE_KEY CONSTANT PLS_INTEGER := -20019; + MSG_DUPLICATED_SOURCE_KEY VARCHAR2(4000) := 'The Source already exists in the A_SOURCE table'; + PRAGMA EXCEPTION_INIT( ERR_DUPLICATED_SOURCE_KEY + ,CODE_DUPLICATED_SOURCE_KEY); + + ERR_MISSING_CONTAINER_CONFIG EXCEPTION; + CODE_MISSING_CONTAINER_CONFIG CONSTANT PLS_INTEGER := -20020; + MSG_MISSING_CONTAINER_CONFIG VARCHAR2(4000) := 'No match in A_SOURCE_FILE_CONFIG table where SOURCE_FILE_TYPE=''CONTAINER'' and specified SOURCE_FILE_ID'; + PRAGMA EXCEPTION_INIT( ERR_MISSING_CONTAINER_CONFIG + ,CODE_MISSING_CONTAINER_CONFIG); + + ERR_MULTIPLE_CONTAINER_ENTRIES EXCEPTION; + CODE_MULTIPLE_CONTAINER_ENTRIES CONSTANT PLS_INTEGER := -20021; + MSG_MULTIPLE_CONTAINER_ENTRIES VARCHAR2(4000) := 'Multiple matches in A_SOURCE_FILE_CONFIG table where SOURCE_FILE_TYPE=''CONTAINER'' and specified SOURCE_FILE_ID'; + PRAGMA EXCEPTION_INIT( ERR_MULTIPLE_CONTAINER_ENTRIES + ,CODE_MULTIPLE_CONTAINER_ENTRIES); + + ERR_WRONG_DESTINATION_PARAM EXCEPTION; + CODE_WRONG_DESTINATION_PARAM CONSTANT PLS_INTEGER := -20022; + MSG_WRONG_DESTINATION_PARAM VARCHAR2(4000) := 'Wrong destination parameter provided.'; + PRAGMA EXCEPTION_INIT( ERR_WRONG_DESTINATION_PARAM + ,CODE_WRONG_DESTINATION_PARAM); + + ERR_FILE_NOT_EXISTS_ON_CLOUD EXCEPTION; + CODE_FILE_NOT_EXISTS_ON_CLOUD CONSTANT PLS_INTEGER := -20023; + MSG_FILE_NOT_EXISTS_ON_CLOUD VARCHAR2(4000) := 'File not exists on cloud.'; + PRAGMA EXCEPTION_INIT( ERR_FILE_NOT_EXISTS_ON_CLOUD + ,CODE_FILE_NOT_EXISTS_ON_CLOUD); + + ERR_FILE_ALREADY_REGISTERED EXCEPTION; + CODE_FILE_ALREADY_REGISTERED CONSTANT PLS_INTEGER := -20024; + MSG_FILE_ALREADY_REGISTERED VARCHAR2(4000) := 'File already registered in A_SOURCE_FILE_RECEIVED table.'; + PRAGMA EXCEPTION_INIT( ERR_FILE_ALREADY_REGISTERED + ,CODE_FILE_ALREADY_REGISTERED); + + ERR_WRONG_DATE_TIMESTAMP_FORMAT EXCEPTION; + CODE_WRONG_DATE_TIMESTAMP_FORMAT CONSTANT PLS_INTEGER := -20025; + MSG_WRONG_DATE_TIMESTAMP_FORMAT VARCHAR2(4000) := 'Provided DATE or TIMESTAMP format has errors (possible duplicated codes, ex: ''DD'').'; + PRAGMA EXCEPTION_INIT( ERR_WRONG_DATE_TIMESTAMP_FORMAT + ,CODE_WRONG_DATE_TIMESTAMP_FORMAT); + + ERR_ENVIRONMENT_NOT_SET EXCEPTION; + CODE_ENVIRONMENT_NOT_SET CONSTANT PLS_INTEGER := -20026; + MSG_ENVIRONMENT_NOT_SET VARCHAR2(4000) := 'EnvironmentID not set' + ||cgBL||' Information about environment is needed to get proper configuration values.' + ||cgBL||' It can be set up in two different ways:' + ||cgBL||' 1. Set it on session level: execute DBMS_SESSION.SET_IDENTIFIER (client_id => ''dev'')' + ||cgBL||' 2. Set it on configuration level: Insert into CT_MRDS.A_FILE_MANAGER_CONFIG (ENVIRONMENT_ID,CONFIG_VARIABLE,CONFIG_VARIABLE_VALUE) values (''default'',''environment_id'',''dev'')' + ||cgBL||' Session level setup (1.) takes precedence over configuration level one (2.)' + ; + PRAGMA EXCEPTION_INIT( ERR_ENVIRONMENT_NOT_SET + ,CODE_ENVIRONMENT_NOT_SET); + + + ERR_CONFIG_VARIABLE_NOT_SET EXCEPTION; + CODE_CONFIG_VARIABLE_NOT_SET CONSTANT PLS_INTEGER := -20027; + MSG_CONFIG_VARIABLE_NOT_SET VARCHAR2(4000) := 'Missing configuration value in A_FILE_MANAGER_CONFIG'; + PRAGMA EXCEPTION_INIT( ERR_CONFIG_VARIABLE_NOT_SET + ,CODE_CONFIG_VARIABLE_NOT_SET); + + ERR_NOT_INPUT_SOURCE_FILE_TYPE EXCEPTION; + CODE_NOT_INPUT_SOURCE_FILE_TYPE CONSTANT PLS_INTEGER := -20028; + MSG_NOT_INPUT_SOURCE_FILE_TYPE VARCHAR2(4000) := 'Archival can be executed only for A_SOURCE_FILE_CONFIG_KEY where SOURCE_FILE_TYPE=''INPUT'''; + PRAGMA EXCEPTION_INIT( ERR_NOT_INPUT_SOURCE_FILE_TYPE + ,CODE_NOT_INPUT_SOURCE_FILE_TYPE); + + ERR_EXP_DATA_FOR_ARCH_FAILED EXCEPTION; + CODE_EXP_DATA_FOR_ARCH_FAILED CONSTANT PLS_INTEGER := -20029; + MSG_EXP_DATA_FOR_ARCH_FAILED VARCHAR2(4000) := 'Export data for archival failed.'; + PRAGMA EXCEPTION_INIT( ERR_EXP_DATA_FOR_ARCH_FAILED + ,CODE_EXP_DATA_FOR_ARCH_FAILED); + + ERR_RESTORE_FILE_FROM_TRASH EXCEPTION; + CODE_RESTORE_FILE_FROM_TRASH CONSTANT PLS_INTEGER := -20030; + MSG_RESTORE_FILE_FROM_TRASH VARCHAR2(4000) := 'Unexpected issues occured while archival process. Restoration of exported files failed.'; + PRAGMA EXCEPTION_INIT( ERR_RESTORE_FILE_FROM_TRASH + ,CODE_RESTORE_FILE_FROM_TRASH); + + ERR_CHANGE_STAT_TO_ARCHIVED_FAILED EXCEPTION; + CODE_CHANGE_STAT_TO_ARCHIVED_FAILED CONSTANT PLS_INTEGER := -20031; + MSG_CHANGE_STAT_TO_ARCHIVED_FAILED VARCHAR2(4000) := 'Failed to change file status to: ARCHIVED in A_SOURCE_FILE_RECEIVED table.'; + PRAGMA EXCEPTION_INIT( ERR_CHANGE_STAT_TO_ARCHIVED_FAILED + ,CODE_CHANGE_STAT_TO_ARCHIVED_FAILED); + + ERR_MOVE_FILE_TO_TRASH_FAILED EXCEPTION; + CODE_MOVE_FILE_TO_TRASH_FAILED CONSTANT PLS_INTEGER := -20032; + MSG_MOVE_FILE_TO_TRASH_FAILED VARCHAR2(4000) := 'FAILED to move file to TRASH before DROPPING it.'; + PRAGMA EXCEPTION_INIT( ERR_MOVE_FILE_TO_TRASH_FAILED + ,CODE_MOVE_FILE_TO_TRASH_FAILED); + + ERR_DROP_EXPORTED_FILES_FAILED EXCEPTION; + CODE_DROP_EXPORTED_FILES_FAILED CONSTANT PLS_INTEGER := -20033; + MSG_DROP_EXPORTED_FILES_FAILED VARCHAR2(4000) := 'FAILED to move file to TRASH before DROPPING it.'; + PRAGMA EXCEPTION_INIT( ERR_DROP_EXPORTED_FILES_FAILED + ,CODE_DROP_EXPORTED_FILES_FAILED); + + ERR_INVALID_BUCKET_AREA EXCEPTION; + CODE_INVALID_BUCKET_AREA CONSTANT PLS_INTEGER := -20034; + MSG_INVALID_BUCKET_AREA VARCHAR2(4000) := 'Invalid bucket area specified. Valid values: INBOX, ODS, DATA, ARCHIVE'; + PRAGMA EXCEPTION_INIT( ERR_INVALID_BUCKET_AREA + ,CODE_INVALID_BUCKET_AREA); + + ERR_INVALID_PARALLEL_DEGREE EXCEPTION; + CODE_INVALID_PARALLEL_DEGREE CONSTANT PLS_INTEGER := -20110; + MSG_INVALID_PARALLEL_DEGREE VARCHAR2(4000) := 'Invalid parallel degree parameter. Must be between 1 and 16'; + PRAGMA EXCEPTION_INIT( ERR_INVALID_PARALLEL_DEGREE + ,CODE_INVALID_PARALLEL_DEGREE); + + ERR_PARALLEL_EXECUTION_FAILED EXCEPTION; + CODE_PARALLEL_EXECUTION_FAILED CONSTANT PLS_INTEGER := -20111; + MSG_PARALLEL_EXECUTION_FAILED VARCHAR2(4000) := 'Parallel execution failed'; + PRAGMA EXCEPTION_INIT( ERR_PARALLEL_EXECUTION_FAILED + ,CODE_PARALLEL_EXECUTION_FAILED); + + ERR_UNKNOWN EXCEPTION; + CODE_UNKNOWN CONSTANT PLS_INTEGER := -20999; + MSG_UNKNOWN VARCHAR2(4000) := 'Unknown Error Occured'; + PRAGMA EXCEPTION_INIT( ERR_UNKNOWN + ,CODE_UNKNOWN); + + --------------------------------------------------------------------------------------------------------------------------- + --------------------------------------------------------------------------------------------------------------------------- + + + + + /** + * @name LOG_PROCESS_EVENT + * @desc Insert a new log record into A_PROCESS_LOG table. + * Also outputs to console if gvConsoleLoggingEnabled = 'ON'. + * Respects logging level configuration (gvMinLogLevel). + * @example ENV_MANAGER.LOG_PROCESS_EVENT('Process completed successfully', 'INFO', 'pParam1=value1'); + * @ex_rslt Record inserted into A_PROCESS_LOG table and optionally displayed in console output + **/ + PROCEDURE LOG_PROCESS_EVENT ( + pLogMessage VARCHAR2 + ,pLogLevel VARCHAR2 DEFAULT 'ERROR' + ,pParameters VARCHAR2 DEFAULT NULL + ,pProcessName VARCHAR2 DEFAULT 'FILE_MANAGER' + ); + + /** + * @name LOG_PROCESS_ERROR + * @desc Insert a detailed error record into A_PROCESS_LOG table with full stack trace, backtrace, and call stack. + * This procedure captures comprehensive error information for debugging purposes while + * allowing clean user-facing error messages to be raised separately. + * @param pLogMessage - Base error message description + * @param pParameters - Procedure parameters for context + * @param pProcessName - Name of the calling process/package + * @ex_rslt Record inserted into A_PROCESS_LOG table with complete error stack information + */ + PROCEDURE LOG_PROCESS_ERROR ( + pLogMessage VARCHAR2 + ,pParameters VARCHAR2 DEFAULT NULL + ,pProcessName VARCHAR2 DEFAULT 'FILE_MANAGER' + ); + + /** + * @name INIT_ERRORS + * @desc Loads data into Errors array. + * Errors array is a list of Record(Error_Code, Error_Message) index by Error_Code. + * Called automatically during package initialization. + * @example Called automatically when package is first referenced + * @ex_rslt Errors array populated with all error codes and messages + **/ + PROCEDURE INIT_ERRORS; + + + + /** + * @name GET_DEFAULT_ENV + * @desc It returns string with name of default environment. + * Return string is A_FILE_MANAGER_CONFIG.ENVIRONMENT_ID value. + * @example select ENV_MANAGER.GET_DEFAULT_ENV() from dual; + * @ex_rslt dev + **/ + FUNCTION GET_DEFAULT_ENV + RETURN VARCHAR2; + + + + /** + * @name INIT_VARIABLES + * @desc For specified pEnv parameter (A_FILE_MANAGER_CONFIG.ENVIRONMENT_ID) + * Assign values to following global package variables: + * - gvNameSpace + * - gvRegion + * - gvCredentialName + * - gvInboxBucketName + * - gvDataBucketName + * - gvArchiveBucketName + * - gvInboxBucketUri + * - gvDataBucketUri + * - gvArchiveBucketUri + * - gvLoggingEnabled + * - gvMinLogLevel + * - gvDefaultDateFormat + * - gvConsoleLoggingEnabled + **/ + PROCEDURE INIT_VARIABLES( + pEnv VARCHAR2 + ); + + + + /** + * @name GET_ERROR_MESSAGE + * @desc It returns string with error message for specified pCode (Error_Code). + * Error message is take from Errors Array loaded by INIT_ERRORS procedure + * @example select ENV_MANAGER.GET_ERROR_MESSAGE(pCode => -20009) from dual; + * @ex_rslt File not found on the cloud + **/ + FUNCTION GET_ERROR_MESSAGE( + pCode PLS_INTEGER + ) RETURN VARCHAR2; + + + + /** + * @name GET_ERROR_STACK + * @desc It returns string with all possible error stack info. + * Error message is take from Errors Array loaded by INIT_ERRORS procedure + * @example + * select ENV_MANAGER.GET_ERROR_STACK( + * pFormat => 'OUTPUT' + * ,pCode => -20009 + * ,pSourceFileReceivedKey => NULL) + * from dual + * @ex_rslt + * ------------------------------------------------------+ + * Error Message: + * ORA-0000: normal, successful completion + * ------------------------------------------------------- + * Error Stack: + * ------------------------------------------------------- + * Error Backtrace: + * ------------------------------------------------------+ + **/ + FUNCTION GET_ERROR_STACK( + pFormat VARCHAR2 + ,pCode PLS_INTEGER + ,pSourceFileReceivedKey CT_MRDS.A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY%TYPE DEFAULT NULL + ) RETURN VARCHAR2; + + /** + * @name FORMAT_PARAMETERS + * @desc Formats parameter list for logging purposes. + * Converts SYS.ODCIVARCHAR2LIST to formatted string with proper NULL handling. + * @example select ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('param1=value1', 'param2=NULL')) from dual; + * @ex_rslt param1=value1 , + * param2=NULL + **/ + FUNCTION FORMAT_PARAMETERS( + pParameterList SYS.ODCIVARCHAR2LIST + ) RETURN VARCHAR2; + + /** + * @name ANALYZE_VALIDATION_ERRORS + * @desc Analyzes CSV validation errors and generates detailed diagnostic report. + * Compares CSV structure with template table and provides specific error analysis. + * Includes suggested solutions for common validation issues. + * @param pValidationLogTable - Name of validation log table (e.g., VALIDATE$242_LOG) + * @param pTemplateSchema - Schema of template table (e.g., CT_ET_TEMPLATES) + * @param pTemplateTable - Name of template table (e.g., MOCK_PROC_TABLE) + * @param pCsvFileUri - URI of CSV file being validated + * @example SELECT ENV_MANAGER.ANALYZE_VALIDATION_ERRORS('VALIDATE$242_LOG', 'CT_ET_TEMPLATES', 'MOCK_PROC_TABLE', 'https://...') FROM DUAL; + * @ex_rslt Detailed validation analysis report with column mismatches and solutions + **/ + FUNCTION ANALYZE_VALIDATION_ERRORS( + pValidationLogTable VARCHAR2, + pTemplateSchema VARCHAR2, + pTemplateTable VARCHAR2, + pCsvFileUri VARCHAR2 + ) RETURN VARCHAR2; + + --------------------------------------------------------------------------------------------------------------------------- + -- PACKAGE VERSION MANAGEMENT FUNCTIONS + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name GET_VERSION + * @desc Returns the current version number of the ENV_MANAGER package. + * Uses semantic versioning format (MAJOR.MINOR.PATCH). + * @example SELECT ENV_MANAGER.GET_VERSION() FROM DUAL; + * @ex_rslt 3.0.0 + **/ + FUNCTION GET_VERSION RETURN VARCHAR2; + + /** + * @name GET_BUILD_INFO + * @desc Returns comprehensive build information including version, build date, and author. + * Formatted for display in logs or monitoring systems. + * @example SELECT ENV_MANAGER.GET_BUILD_INFO() FROM DUAL; + * @ex_rslt Package: ENV_MANAGER + * Version: 3.0.0 + * Build Date: 2025-10-22 16:00:00 + * Author: Grzegorz Michalski + **/ + FUNCTION GET_BUILD_INFO RETURN VARCHAR2; + + /** + * @name GET_VERSION_HISTORY + * @desc Returns complete version history with all releases and changes. + * Shows evolution of package features over time. + * @example SELECT ENV_MANAGER.GET_VERSION_HISTORY() FROM DUAL; + * @ex_rslt ENV_MANAGER Version History: + * 3.0.0 (2025-10-22): Added package versioning system... + * 2.1.0 (2025-10-15): Added ANALYZE_VALIDATION_ERRORS function... + **/ + FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2; + + /** + * @name GET_PACKAGE_VERSION_INFO + * @desc Universal function to get formatted version information for any package. + * This centralized function is used by all packages in the system. + * @param pPackageName - Name of the package + * @param pVersion - Version string (MAJOR.MINOR.PATCH format) + * @param pBuildDate - Build date timestamp + * @param pAuthor - Package author name + * @example SELECT ENV_MANAGER.GET_PACKAGE_VERSION_INFO('FILE_MANAGER', '2.1.0', '2025-10-22 15:00:00', 'Grzegorz Michalski') FROM DUAL; + * @ex_rslt Package: FILE_MANAGER + * Version: 2.1.0 + * Build Date: 2025-10-22 15:00:00 + * Author: Grzegorz Michalski + **/ + FUNCTION GET_PACKAGE_VERSION_INFO( + pPackageName VARCHAR2, + pVersion VARCHAR2, + pBuildDate VARCHAR2, + pAuthor VARCHAR2 + ) RETURN VARCHAR2; + + /** + * @name FORMAT_VERSION_HISTORY + * @desc Universal function to format version history for any package. + * Adds package name header and proper formatting. + * @param pPackageName - Name of the package + * @param pVersionHistory - Complete version history text + * @example SELECT ENV_MANAGER.FORMAT_VERSION_HISTORY('FILE_MANAGER', '2.1.0 (2025-10-22): Export procedures...') FROM DUAL; + * @ex_rslt FILE_MANAGER Version History: + * 2.1.0 (2025-10-22): Export procedures... + **/ + FUNCTION FORMAT_VERSION_HISTORY( + pPackageName VARCHAR2, + pVersionHistory VARCHAR2 + ) RETURN VARCHAR2; + + --------------------------------------------------------------------------------------------------------------------------- + -- PACKAGE HASH + CHANGE DETECTION FUNCTIONS + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name CALCULATE_PACKAGE_HASH + * @desc Calculates SHA256 hash of package source code from ALL_SOURCE. + * Returns hash for both SPEC and BODY (if exists). + * Used for automatic change detection. + * @param pPackageOwner - Schema owner of the package + * @param pPackageName - Name of the package + * @param pPackageType - Type of package code ('PACKAGE' for SPEC, 'PACKAGE BODY' for BODY) + * @example SELECT ENV_MANAGER.CALCULATE_PACKAGE_HASH('CT_MRDS', 'FILE_MANAGER', 'PACKAGE') FROM DUAL; + * @ex_rslt A7B3C5D9E8F1234567890ABCDEF... (64-character SHA256 hash) + **/ + FUNCTION CALCULATE_PACKAGE_HASH( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2, + pPackageType VARCHAR2 -- 'PACKAGE' or 'PACKAGE BODY' + ) RETURN VARCHAR2; + + /** + * @name TRACK_PACKAGE_VERSION + * @desc Records package version and source code hash in A_PACKAGE_VERSION_TRACKING table. + * Automatically detects if source code changed without version update. + * Should be called after every package deployment. + * @param pPackageOwner - Schema owner of the package + * @param pPackageName - Name of the package + * @param pPackageVersion - Current version from PACKAGE_VERSION constant + * @param pPackageBuildDate - Build date from PACKAGE_BUILD_DATE constant + * @param pPackageAuthor - Author from PACKAGE_AUTHOR constant + * @example EXEC ENV_MANAGER.TRACK_PACKAGE_VERSION('CT_MRDS', 'FILE_MANAGER', '3.2.0', '2025-10-22 16:30:00', 'Grzegorz Michalski'); + * @ex_rslt Record inserted into A_PACKAGE_VERSION_TRACKING with change detection status + **/ + PROCEDURE TRACK_PACKAGE_VERSION( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2, + pPackageVersion VARCHAR2, + pPackageBuildDate VARCHAR2, + pPackageAuthor VARCHAR2 + ); + + /** + * @name CHECK_PACKAGE_CHANGES + * @desc Checks if package source code has changed since last tracking. + * Compares current hash with last recorded hash in A_PACKAGE_VERSION_TRACKING. + * Returns detailed change detection report. + * @param pPackageOwner - Schema owner of the package + * @param pPackageName - Name of the package + * @example SELECT ENV_MANAGER.CHECK_PACKAGE_CHANGES('CT_MRDS', 'FILE_MANAGER') FROM DUAL; + * @ex_rslt WARNING: Package changed without version update! + * Last Version: 3.2.0 + * Current Hash (SPEC): A7B3C5D9... + * Last Hash (SPEC): B8C4D6E0... + * RECOMMENDATION: Update PACKAGE_VERSION and PACKAGE_BUILD_DATE + **/ + FUNCTION CHECK_PACKAGE_CHANGES( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2 + ) RETURN VARCHAR2; + + /** + * @name GET_PACKAGE_HASH_INFO + * @desc Returns formatted information about package hash and tracking history. + * Includes current hash, last tracked hash, and change detection status. + * @param pPackageOwner - Schema owner of the package + * @param pPackageName - Name of the package + * @example SELECT ENV_MANAGER.GET_PACKAGE_HASH_INFO('CT_MRDS', 'FILE_MANAGER') FROM DUAL; + * @ex_rslt Package: CT_MRDS.FILE_MANAGER + * Current Version: 3.2.0 + * Current Hash (SPEC): A7B3C5D9... + * Last Tracked: 2025-10-22 16:30:00 + * Status: OK - No changes detected + **/ + FUNCTION GET_PACKAGE_HASH_INFO( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2 + ) RETURN VARCHAR2; + +END ENV_MANAGER; +/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835/01_MARS_835_install_step1.sql b/MARS_Packages/REL01_ADDITIONS/MARS-835/01_MARS_835_install_step1.sql index 3dbc997..235e387 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835/01_MARS_835_install_step1.sql +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835/01_MARS_835_install_step1.sql @@ -103,11 +103,13 @@ BEGIN pBucketArea => 'DATA', pFolderName => 'ODS/CSDB/CSDB_DEBT', pMinDate => &cutoff_date, - pMaxDate => SYSDATE, + pMaxDate => DATE '9999-12-31', -- Include future dates (MAX_LOAD_START can be beyond SYSDATE) pParallelDegree => 16, pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT', pMaxFileSize => 104857600, -- 100MB in bytes (safe for parallel execution, avoids ORA-04036) - pRegisterExport => TRUE -- Register exported files in A_SOURCE_FILE_RECEIVED with metadata (CHECKSUM, CREATED, BYTES) + pRegisterExport => TRUE, -- Register exported files in A_SOURCE_FILE_RECEIVED with metadata (CHECKSUM, CREATED, BYTES) + pProcessName => 'MARS-835', -- Process identifier for tracking + pJobClass => 'high' -- Oracle Scheduler job class for resource management ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_DEBT exported to DATA bucket with template column order'); @@ -128,7 +130,8 @@ BEGIN pFolderName => 'ARCHIVE/CSDB/CSDB_DEBT', pMaxDate => &cutoff_date, pParallelDegree => 16, - pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT' + pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT', + pJobClass => 'high' -- Oracle Scheduler job class for resource management ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_DEBT exported to HIST bucket with template column order'); @@ -223,11 +226,13 @@ BEGIN pBucketArea => 'DATA', pFolderName => 'ODS/CSDB/CSDB_DEBT_DAILY', pMinDate => &cutoff_date, - pMaxDate => SYSDATE, + pMaxDate => DATE '9999-12-31', -- Include future dates (MAX_LOAD_START can be beyond SYSDATE) pParallelDegree => 16, pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT_DAILY', pMaxFileSize => 104857600, -- 100MB in bytes (safe for parallel execution, avoids ORA-04036) - pRegisterExport => TRUE -- Register exported files in A_SOURCE_FILE_RECEIVED with metadata (CHECKSUM, CREATED, BYTES) + pRegisterExport => TRUE, -- Register exported files in A_SOURCE_FILE_RECEIVED with metadata (CHECKSUM, CREATED, BYTES) + pProcessName => 'MARS-835', -- Process identifier for tracking + pJobClass => 'high' -- Oracle Scheduler job class for resource management ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_DEBT_DAILY exported to DATA bucket with template column order'); @@ -248,7 +253,8 @@ BEGIN pFolderName => 'ARCHIVE/CSDB/CSDB_DEBT_DAILY', pMaxDate => &cutoff_date, pParallelDegree => 16, - pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT_DAILY' + pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT_DAILY', + pJobClass => 'high' -- Oracle Scheduler job class for resource management ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_DEBT_DAILY exported to HIST bucket with template column order'); diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835/02_MARS_835_install_step2.sql b/MARS_Packages/REL01_ADDITIONS/MARS-835/02_MARS_835_install_step2.sql index 3db3bdf..0abaeba 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835/02_MARS_835_install_step2.sql +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835/02_MARS_835_install_step2.sql @@ -33,9 +33,11 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/CSDB/CSDB_INSTR_RAT_FULL', - pMaxDate => SYSDATE, + pMinDate => DATE '1900-01-01', -- Explicit start date for clarity + pMaxDate => DATE '9999-12-31', -- Include future dates (MAX_LOAD_START can be beyond SYSDATE) pParallelDegree => 8, - pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_INSTR_RAT_FULL' + pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_INSTR_RAT_FULL', + pJobClass => 'high' -- Oracle Scheduler job class for resource management ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_INSTR_RAT_FULL exported to HIST bucket with template column order'); @@ -60,9 +62,11 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/CSDB/CSDB_INSTR_DESC_FULL', - pMaxDate => SYSDATE, + pMinDate => DATE '1900-01-01', -- Explicit start date for clarity + pMaxDate => DATE '9999-12-31', -- Include future dates (MAX_LOAD_START can be beyond SYSDATE) pParallelDegree => 8, - pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_INSTR_DESC_FULL' + pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_INSTR_DESC_FULL', + pJobClass => 'high' -- Oracle Scheduler job class for resource management ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_INSTR_DESC_FULL exported to HIST bucket with template column order'); @@ -87,9 +91,11 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/CSDB/CSDB_ISSUER_RAT_FULL', - pMaxDate => SYSDATE, + pMinDate => DATE '1900-01-01', -- Explicit start date for clarity + pMaxDate => DATE '9999-12-31', -- Include future dates (MAX_LOAD_START can be beyond SYSDATE) pParallelDegree => 8, - pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_ISSUER_RAT_FULL' + pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_ISSUER_RAT_FULL', + pJobClass => 'high' -- Oracle Scheduler job class for resource management ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_ISSUER_RAT_FULL exported to HIST bucket with template column order'); @@ -114,9 +120,11 @@ BEGIN pKeyColumnName => 'A_ETL_LOAD_SET_FK', pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/CSDB/CSDB_ISSUER_DESC_FULL', - pMaxDate => SYSDATE, + pMinDate => DATE '1900-01-01', -- Explicit start date for clarity + pMaxDate => DATE '9999-12-31', -- Include future dates (MAX_LOAD_START can be beyond SYSDATE) pParallelDegree => 8, - pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_ISSUER_DESC_FULL' + pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_ISSUER_DESC_FULL', + pJobClass => 'high' -- Oracle Scheduler job class for resource management ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_ISSUER_DESC_FULL exported to HIST bucket with template column order'); diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835/90_MARS_835_rollback_file_registrations.sql b/MARS_Packages/REL01_ADDITIONS/MARS-835/90_MARS_835_rollback_file_registrations.sql new file mode 100644 index 0000000..1de8581 --- /dev/null +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835/90_MARS_835_rollback_file_registrations.sql @@ -0,0 +1,54 @@ +--============================================================================================================================= +-- MARS-835 ROLLBACK: Delete File Registration Records +--============================================================================================================================= +-- Purpose: Delete all file registration records from A_SOURCE_FILE_RECEIVED table for MARS-835 process +-- Author: Grzegorz Michalski +-- Date: 2026-02-13 +-- Related: MARS-835 - CSDB Data Export Rollback +--============================================================================================================================= + +SET SERVEROUTPUT ON SIZE UNLIMITED +SET TIMING ON + +PROMPT ======================================================================== +PROMPT ROLLBACK: Deleting file registration records from A_SOURCE_FILE_RECEIVED +PROMPT ======================================================================== + +DECLARE + vRowCount NUMBER := 0; + vStartTime TIMESTAMP := SYSTIMESTAMP; + vEndTime TIMESTAMP; + vElapsedSeconds NUMBER; +BEGIN + DBMS_OUTPUT.PUT_LINE('Deleting all MARS-835 file registrations from A_SOURCE_FILE_RECEIVED...'); + + -- Delete all records for MARS-835 process + DELETE FROM CT_MRDS.A_SOURCE_FILE_RECEIVED + WHERE PROCESS_NAME = 'MARS-835'; + + vRowCount := SQL%ROWCOUNT; + COMMIT; + + vEndTime := SYSTIMESTAMP; + vElapsedSeconds := EXTRACT(SECOND FROM (vEndTime - vStartTime)) + + EXTRACT(MINUTE FROM (vEndTime - vStartTime)) * 60 + + EXTRACT(HOUR FROM (vEndTime - vStartTime)) * 3600; + + DBMS_OUTPUT.PUT_LINE('========================================================================'); + DBMS_OUTPUT.PUT_LINE('SUCCESS: File registration records deleted'); + DBMS_OUTPUT.PUT_LINE('========================================================================'); + DBMS_OUTPUT.PUT_LINE('Records deleted: ' || vRowCount); + DBMS_OUTPUT.PUT_LINE('Elapsed time: ' || ROUND(vElapsedSeconds, 2) || ' seconds'); + DBMS_OUTPUT.PUT_LINE('========================================================================'); +EXCEPTION + WHEN OTHERS THEN + ROLLBACK; + DBMS_OUTPUT.PUT_LINE('ERROR: Failed to delete file registration records'); + DBMS_OUTPUT.PUT_LINE('Error message: ' || SQLERRM); + RAISE; +END; +/ + +--============================================================================================================================= +-- End of Script +--============================================================================================================================= diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835/91_MARS_835_rollback_step1.sql b/MARS_Packages/REL01_ADDITIONS/MARS-835/91_MARS_835_rollback_step1.sql index 20848a8..b5e283c 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835/91_MARS_835_rollback_step1.sql +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835/91_MARS_835_rollback_step1.sql @@ -22,25 +22,24 @@ DECLARE vDataBucketUri VARCHAR2(500); vHistBucketUri VARCHAR2(500); vCredentialName VARCHAR2(100); + vFileCount NUMBER := 0; BEGIN -- Get bucket URIs and credential vDataBucketUri := CT_MRDS.FILE_MANAGER.GET_BUCKET_URI('DATA'); vHistBucketUri := CT_MRDS.FILE_MANAGER.GET_BUCKET_URI('ARCHIVE'); vCredentialName := CT_MRDS.ENV_MANAGER.gvCredentialName; - DBMS_OUTPUT.PUT_LINE('Deleting DEBT files from DATA bucket...'); + DBMS_OUTPUT.PUT_LINE('Deleting DEBT CSV files from DATA bucket...'); + DBMS_OUTPUT.PUT_LINE(' Using DBMS_CLOUD.LIST_OBJECTS to scan bucket'); - -- Delete CSV files from DATA bucket (only files matching export pattern) - -- Pattern matches: LEGACY_DEBT_YYYYMM.csv OR LEGACY_DEBT_YYYYMM_1_20260122T...Z.csv (Oracle timestamp) + -- Delete CSV files for DEBT from DATA bucket using LIST_OBJECTS FOR rec IN ( - SELECT object_name + SELECT object_name FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( credential_name => vCredentialName, location_uri => vDataBucketUri || 'ODS/CSDB/CSDB_DEBT/' )) - WHERE object_name LIKE 'LEGACY_DEBT_%' - AND object_name LIKE '%.csv' - AND REGEXP_LIKE(object_name, '^LEGACY_DEBT_[0-9]{6}(_[0-9]+_[0-9]{8}T[0-9]{6,}Z)?\.csv$') -- YYYYMM or YYYYMM_1_timestamp + WHERE object_name LIKE 'LEGACY_DEBT%' ) LOOP BEGIN DBMS_CLOUD.DELETE_OBJECT( @@ -48,6 +47,7 @@ BEGIN object_uri => vDataBucketUri || 'ODS/CSDB/CSDB_DEBT/' || rec.object_name ); DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name); + vFileCount := vFileCount + 1; EXCEPTION WHEN OTHERS THEN IF SQLCODE = -20404 THEN @@ -58,19 +58,20 @@ BEGIN END; END LOOP; - DBMS_OUTPUT.PUT_LINE('Deleting DEBT files from HIST bucket...'); + DBMS_OUTPUT.PUT_LINE('SUCCESS: DEBT CSV files deleted from DATA bucket (' || vFileCount || ' file(s))'); - -- Delete Parquet files from HIST bucket (only files matching export pattern) - -- Pattern matches: YYYYMM.parquet OR YYYYMM_1_20260122T...Z.parquet (Oracle timestamp) + DBMS_OUTPUT.PUT_LINE('Deleting DEBT Parquet files from ARCHIVE bucket...'); + DBMS_OUTPUT.PUT_LINE(' Using DBMS_CLOUD.LIST_OBJECTS (Parquet files not registered)'); + vFileCount := 0; + + -- Delete Parquet files from ARCHIVE bucket using DBMS_CLOUD.LIST_OBJECTS FOR rec IN ( - SELECT object_name + SELECT object_name FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( credential_name => vCredentialName, location_uri => vHistBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT/' )) - WHERE object_name LIKE '%PARTITION_YEAR=%' -- Hive-style partitioning folders - AND object_name LIKE '%.parquet' - AND REGEXP_LIKE(object_name, '[0-9]{6}(_[0-9]+_[0-9]{8}T[0-9]{6,}Z)?\.parquet$') -- YYYYMM or YYYYMM_1_timestamp + WHERE object_name NOT LIKE '%/' -- Exclude directories ) LOOP BEGIN DBMS_CLOUD.DELETE_OBJECT( @@ -78,6 +79,7 @@ BEGIN object_uri => vHistBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT/' || rec.object_name ); DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name); + vFileCount := vFileCount + 1; EXCEPTION WHEN OTHERS THEN IF SQLCODE = -20404 THEN @@ -88,7 +90,11 @@ BEGIN END; END LOOP; - DBMS_OUTPUT.PUT_LINE('SUCCESS: DEBT files deleted'); + IF vFileCount = 0 THEN + DBMS_OUTPUT.PUT_LINE(' INFO: No DEBT Parquet files found to delete'); + END IF; + + DBMS_OUTPUT.PUT_LINE('SUCCESS: DEBT Parquet files deleted from ARCHIVE bucket (' || vFileCount || ' file(s))'); END; / @@ -104,25 +110,24 @@ DECLARE vDataBucketUri VARCHAR2(500); vHistBucketUri VARCHAR2(500); vCredentialName VARCHAR2(100); + vFileCount NUMBER := 0; BEGIN -- Get bucket URIs and credential vDataBucketUri := CT_MRDS.FILE_MANAGER.GET_BUCKET_URI('DATA'); vHistBucketUri := CT_MRDS.FILE_MANAGER.GET_BUCKET_URI('ARCHIVE'); vCredentialName := CT_MRDS.ENV_MANAGER.gvCredentialName; - DBMS_OUTPUT.PUT_LINE('Deleting DEBT_DAILY files from DATA bucket...'); + DBMS_OUTPUT.PUT_LINE('Deleting DEBT_DAILY CSV files from DATA bucket...'); + DBMS_OUTPUT.PUT_LINE(' Using DBMS_CLOUD.LIST_OBJECTS to scan bucket'); - -- Delete CSV files from DATA bucket (only files matching export pattern) - -- Pattern matches: LEGACY_DEBT_DAILY_YYYYMM.csv OR LEGACY_DEBT_DAILY_YYYYMM_1_timestamp.csv + -- Delete CSV files for DEBT_DAILY from DATA bucket using LIST_OBJECTS FOR rec IN ( - SELECT object_name + SELECT object_name FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( credential_name => vCredentialName, location_uri => vDataBucketUri || 'ODS/CSDB/CSDB_DEBT_DAILY/' )) - WHERE object_name LIKE 'LEGACY_DEBT_DAILY_%' - AND object_name LIKE '%.csv' - AND REGEXP_LIKE(object_name, '^LEGACY_DEBT_DAILY_[0-9]{6}(_[0-9]+_[0-9]{8}T[0-9]{6,}Z)?\.csv$') -- YYYYMM or YYYYMM_1_timestamp + WHERE object_name LIKE 'LEGACY_DEBT_DAILY%' ) LOOP BEGIN DBMS_CLOUD.DELETE_OBJECT( @@ -130,6 +135,7 @@ BEGIN object_uri => vDataBucketUri || 'ODS/CSDB/CSDB_DEBT_DAILY/' || rec.object_name ); DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name); + vFileCount := vFileCount + 1; EXCEPTION WHEN OTHERS THEN IF SQLCODE = -20404 THEN @@ -140,19 +146,20 @@ BEGIN END; END LOOP; - DBMS_OUTPUT.PUT_LINE('Deleting DEBT_DAILY files from HIST bucket...'); + DBMS_OUTPUT.PUT_LINE('SUCCESS: DEBT_DAILY CSV files deleted from DATA bucket (' || vFileCount || ' file(s))'); - -- Delete Parquet files from HIST bucket (only files matching export pattern) - -- Pattern matches: YYYYMM.parquet OR YYYYMM_1_timestamp.parquet + DBMS_OUTPUT.PUT_LINE('Deleting DEBT_DAILY Parquet files from ARCHIVE bucket...'); + DBMS_OUTPUT.PUT_LINE(' Using DBMS_CLOUD.LIST_OBJECTS (Parquet files not registered)'); + vFileCount := 0; + + -- Delete Parquet files from ARCHIVE bucket using DBMS_CLOUD.LIST_OBJECTS FOR rec IN ( - SELECT object_name + SELECT object_name FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( credential_name => vCredentialName, location_uri => vHistBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT_DAILY/' )) - WHERE object_name LIKE '%PARTITION_YEAR=%' -- Hive-style partitioning folders - AND object_name LIKE '%.parquet' - AND REGEXP_LIKE(object_name, '[0-9]{6}(_[0-9]+_[0-9]{8}T[0-9]{6,}Z)?\.parquet$') -- YYYYMM or YYYYMM_1_timestamp + WHERE object_name NOT LIKE '%/' -- Exclude directories ) LOOP BEGIN DBMS_CLOUD.DELETE_OBJECT( @@ -160,6 +167,7 @@ BEGIN object_uri => vHistBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT_DAILY/' || rec.object_name ); DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name); + vFileCount := vFileCount + 1; EXCEPTION WHEN OTHERS THEN IF SQLCODE = -20404 THEN @@ -170,7 +178,11 @@ BEGIN END; END LOOP; - DBMS_OUTPUT.PUT_LINE('SUCCESS: DEBT_DAILY files deleted'); + IF vFileCount = 0 THEN + DBMS_OUTPUT.PUT_LINE(' INFO: No DEBT_DAILY Parquet files found to delete'); + END IF; + + DBMS_OUTPUT.PUT_LINE('SUCCESS: DEBT_DAILY Parquet files deleted from ARCHIVE bucket (' || vFileCount || ' file(s))'); END; / diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835/README.md b/MARS_Packages/REL01_ADDITIONS/MARS-835/README.md deleted file mode 100644 index 9347e91..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835/README.md +++ /dev/null @@ -1,165 +0,0 @@ -# MARS-835: One-Time CSDB Data Export from Operational Database to External Tables - -## Overview -This package performs a one-time bulk export of CSDB data from operational database tables (OU_CSDB schema) to new external tables in OCI buckets. The export uses DATA_EXPORTER v2.4.0 with per-column date format handling to move historical data to either DATA bucket (CSV format) or HIST bucket (Parquet format with Hive-style partitioning). - -**Migration Strategy:** -- **Split Export (2 tables)**: DEBT, DEBT_DAILY - Last 6 months → DATA (CSV), Older data → HIST (Parquet) -- **HIST Only (4 tables)**: INSTR_RAT_FULL, INSTR_DESC_FULL, ISSUER_RAT_FULL, ISSUER_DESC_FULL - All data → HIST (Parquet) - -**Key Transformations:** -- Column rename: `A_ETL_LOAD_SET_FK` → `A_WORKFLOW_HISTORY_KEY` (all tables) -- Column removal: DEBT (2 columns), DEBT_DAILY (6 columns) not required in new structure - -## Contents -- `install_mars835.sql` - Master installation script with SPOOL logging -- `rollback_mars835.sql` - Master rollback script -- `01_MARS_835_*.sql` - Individual installation scripts -- `91_MARS_835_*.sql` - Individual rollback scripts -- `track_package_versions.sql` - Package version tracking -- `verify_packages_version.sql` - Package verification - -## Prerequisites -- Oracle Database 23ai -- ADMIN user access (required for all MARS installations) -- ENV_MANAGER v3.1.0+ -- Required schema privileges - -## Installation - -### Option 1: Master Script (Recommended) -```powershell -# IMPORTANT: Execute as ADMIN user for proper privilege management -Get-Content "MARS_Packages/REL01_POST_DEACTIVATION/MARS-835/install_mars835.sql" | sql "ADMIN/Cloudpass#34@ggmichalski_high" - -# Log file created: log/INSTALL_MARS_835__.log -``` - -### Option 2: Individual Scripts -```powershell -# IMPORTANT: Execute as ADMIN user -Get-Content "01_MARS_835_*.sql" | sql "ADMIN/Cloudpass#34@ggmichalski_high" -Get-Content "02_MARS_835_*.sql" | sql "ADMIN/Cloudpass#34@ggmichalski_high" -# ... etc -``` - -## Verification -```sql --- Verify package versions -SELECT PACKAGE_NAME.GET_VERSION() FROM DUAL; - --- Check for errors (ADMIN user checks specific schema) -SELECT * FROM ALL_ERRORS -WHERE OWNER = 'CT_MRDS' -- Replace with target schema - AND NAME = 'PACKAGE_NAME'; - --- Verify no untracked changes -SELECT ENV_MANAGER.CHECK_PACKAGE_CHANGES('CT_MRDS', 'PACKAGE_NAME') FROM DUAL; -``` - -## Rollback -```powershell -# IMPORTANT: Execute as ADMIN user -Get-Content "MARS_Packages/REL01_POST_DEACTIVATION/MARS-835/rollback_mars835.sql" | sql "ADMIN/Cloudpass#34@ggmichalski_high" - -**NOTE**: Rollback for data exports is **NOT RECOMMENDED** as it would delete exported files from OCI buckets. Only use rollback if export failed and needs to be restarted. -``` - -## Expected Changes - -### Data Export Summary -**6 CSDB tables exported from OU_CSDB schema:** - -**Group 1: Split DATA + HIST (Time Critical)** -1. **DEBT** - Last 6 months → DATA, Older → HIST -2. **DEBT_DAILY** - Last 6 months → DATA, Older → HIST - -**Group 2: HIST Only (Weekend Bulk)** -3. **INSTR_RAT_FULL** - All data → HIST -4. **INSTR_DESC_FULL** - All data → HIST -5. **ISSUER_RAT_FULL** - All data → HIST -6. **ISSUER_DESC_FULL** - All data → HIST - -### Bucket Destinations (DEV environment) -- **DATA Bucket**: `mrds_data_dev/ODS/CSDB/` (CSV format) -- **HIST Bucket**: `mrds_hist_dev/ARCHIVE/CSDB/` (Parquet with partitioning) - -### Column Mappings -- **All tables**: `A_ETL_LOAD_SET_FK` renamed to `A_WORKFLOW_HISTORY_KEY` -- **DEBT**: Removed columns: `IDIRDEPOSITORY`, `VA_BONDDURATION` -- **DEBT_DAILY**: Removed columns: `STEPID`, `PROGRAMNAME`, `PROGRAMCEILING`, `PROGRAMSTATUS`, `ISSUERNACE21SECTOR`, `INSTRUMENTQUOTATIONBASIS` - -## Testing - -### Post-Export Verification - -1. **Verify CSV files in DATA bucket** (DEBT, DEBT_DAILY - last 6 months): -```sql --- Check exported files -SELECT object_name, bytes -FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( - credential_name => 'DEF_CRED_ARN', - location_uri => 'https://objectstorage.region.oraclecloud.com/n/namespace/b/mrds_data_dev/o/ODS/CSDB/' -)) WHERE object_name LIKE '%CSDB_DEBT%'; -``` - -2. **Verify Parquet files in HIST bucket** (all 6 tables): -```sql --- Check archived files with Hive partitioning -SELECT object_name, bytes -FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( - credential_name => 'DEF_CRED_ARN', - location_uri => 'https://objectstorage.region.oraclecloud.com/n/namespace/b/mrds_hist_dev/o/ARCHIVE/CSDB/' -)) WHERE object_name LIKE '%PARTITION_YEAR=%'; -``` - -3. **Validate row counts match source tables**: -```sql --- Compare counts between source and exported data -SELECT COUNT(*) FROM OU_CSDB.DEBT; -SELECT COUNT(*) FROM ODS.CSDB_DEBT_ODS; -- External table pointing to DATA -SELECT COUNT(*) FROM ODS.CSDB_DEBT_ARCHIVE; -- External table pointing to HIST -``` - -4. **Verify column mappings**: -```sql --- Check A_WORKFLOW_HISTORY_KEY exists in exported data -SELECT A_WORKFLOW_HISTORY_KEY, COUNT(*) -FROM ODS.CSDB_DEBT_ARCHIVE -GROUP BY A_WORKFLOW_HISTORY_KEY; -``` - -## Known Issues - -### Timing Constraints -- **DATA exports (DEBT, DEBT_DAILY)**: Must execute during parallel old+new loads phase after Production deployment -- **HIST exports (all 6 tables)**: Can run anytime, recommended for weekend bulk execution to avoid interference - -### Environment-Specific Configuration -- Bucket names must be adjusted for each environment: - - DEV: `mrds_data_dev`, `mrds_hist_dev` - - TEST: `mrds_data_test`, `mrds_hist_test` - - PROD: `mrds_data_prod`, `mrds_hist_prod` - -### Data Cutoff Date -- Export scripts use 6-month cutoff date calculated as `ADD_MONTHS(SYSDATE, -6)` -- Verify cutoff aligns with business requirements before execution - -### One-Time Execution -- This is a **ONE-TIME data migration** package -- After successful execution, package should be **deactivated** (moved to REL01_POST_DEACTIVATION) -- Do not re-run unless explicitly required for data refresh - -## Related -- **JIRA**: MARS-835 - CSDB Data Export to External Tables -- **Confluence**: FILE_MANAGER package - MRDS - Technical Team -- **Confluence**: Table Setup Guide for FILE PROCESSOR System -- **Source Schema**: OU_CSDB (Operational Database) -- **Target Schema**: ODS (External Tables) -- **Migration Type**: One-time bulk export (deactivated post-execution) - ---- - -**Author:** Grzegorz Michalski -**Date:** 2025-12-04 -**Version:** 1.0.0 diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835/REQUIRED_EXTERNAL_TABLES.md b/MARS_Packages/REL01_ADDITIONS/MARS-835/REQUIRED_EXTERNAL_TABLES.md deleted file mode 100644 index 0dedb48..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835/REQUIRED_EXTERNAL_TABLES.md +++ /dev/null @@ -1,207 +0,0 @@ -# MARS-835: Required External Tables for Smart Column Mapping - -## Overview -This document lists all external tables required for MARS-835 data exports using DATA_EXPORTER v2.4.0 with Smart Column Mapping feature. - -**Purpose**: Smart Column Mapping ensures CSV files are generated with columns in the EXACT order expected by external tables, preventing NULL values due to Oracle's positional CSV mapping. - ---- - -## Required External Tables - -### Group 1: DATA Bucket (CSV Format) - **CRITICAL** - -#### 1. ODS.CSDB_DEBT_DATA_ODS -- **Source Table**: OU_CSDB.LEGACY_DEBT -- **Format**: CSV -- **Bucket**: DATA (mrds_data_dev/ODS/CSDB/CSDB_DEBT/) -- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY (position 2 recommended) -- **Critical**: Must use Smart Column Mapping to avoid NULL values in A_WORKFLOW_HISTORY_KEY - -#### 2. ODS.CSDB_DEBT_DAILY_DATA_ODS -- **Source Table**: OU_CSDB.LEGACY_DEBT_DAILY -- **Format**: CSV -- **Bucket**: DATA (mrds_data_dev/ODS/CSDB/CSDB_DEBT_DAILY/) -- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY (position 2 recommended) -- **Critical**: Must use Smart Column Mapping to avoid NULL values in A_WORKFLOW_HISTORY_KEY - ---- - -### Group 2: ARCHIVE Bucket (Parquet Format) - **RECOMMENDED** - -#### 3. ODS.CSDB_DEBT_ARCHIVE -- **Source Table**: OU_CSDB.LEGACY_DEBT -- **Format**: Parquet with Hive partitioning -- **Bucket**: ARCHIVE (mrds_hist_dev/ARCHIVE/CSDB/CSDB_DEBT/) -- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY -- **Note**: Parquet uses schema-based mapping (column order less critical but Smart Column Mapping ensures consistency) - -#### 4. ODS.CSDB_DEBT_DAILY_ARCHIVE -- **Source Table**: OU_CSDB.LEGACY_DEBT_DAILY -- **Format**: Parquet with Hive partitioning -- **Bucket**: ARCHIVE (mrds_hist_dev/ARCHIVE/CSDB/CSDB_DEBT_DAILY/) -- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY - -#### 5. ODS.CSDB_INSTR_RAT_FULL_ARCHIVE -- **Source Table**: OU_CSDB.LEGACY_INSTR_RAT_FULL -- **Format**: Parquet with Hive partitioning -- **Bucket**: ARCHIVE (mrds_hist_dev/ARCHIVE/CSDB/CSDB_INSTR_RAT_FULL/) -- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY - -#### 6. ODS.CSDB_INSTR_DESC_FULL_ARCHIVE -- **Source Table**: OU_CSDB.LEGACY_INSTR_DESC_FULL -- **Format**: Parquet with Hive partitioning -- **Bucket**: ARCHIVE (mrds_hist_dev/ARCHIVE/CSDB/CSDB_INSTR_DESC_FULL/) -- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY - -#### 7. ODS.CSDB_ISSUER_RAT_FULL_ARCHIVE -- **Source Table**: OU_CSDB.LEGACY_ISSUER_RAT_FULL -- **Format**: Parquet with Hive partitioning -- **Bucket**: ARCHIVE (mrds_hist_dev/ARCHIVE/CSDB/CSDB_ISSUER_RAT_FULL/) -- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY - -#### 8. ODS.CSDB_ISSUER_DESC_FULL_ARCHIVE -- **Source Table**: OU_CSDB.LEGACY_ISSUER_DESC_FULL -- **Format**: Parquet with Hive partitioning -- **Bucket**: ARCHIVE (mrds_hist_dev/ARCHIVE/CSDB/CSDB_ISSUER_DESC_FULL/) -- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY - ---- - -## External Table Column Order Requirements - -### **CRITICAL for CSV Tables** (DATA bucket): - -All CSV external tables MUST have **A_WORKFLOW_HISTORY_KEY at position 2**: - -``` -Position 1: A_KEY (NUMBER) -Position 2: A_WORKFLOW_HISTORY_KEY (NUMBER) ← MUST BE HERE! -Position 3+: Other columns in any order -``` - -**Reason**: Oracle External Tables with CSV format use **positional mapping** (ignore header row). If source table has A_ETL_LOAD_SET_FK at position 72, but CSV puts it at position 72 while external table expects A_WORKFLOW_HISTORY_KEY at position 2, the external table will try to read position 2 (which might be a DATE column) as NUMBER → conversion fails → NULL value. - -**Solution**: Smart Column Mapping (v2.4.0) generates CSV columns in EXTERNAL TABLE order, ensuring position 2 has the correct NUMBER value. - -### **OPTIONAL for Parquet Tables** (ARCHIVE bucket): - -Parquet format uses **schema-based mapping** (column names). Column order doesn't matter, but Smart Column Mapping provides consistency. - ---- - -## Creation Script Example - -### CSV External Table (CRITICAL - Correct Column Order) - -```sql --- Example: ODS.CSDB_DEBT_DATA_ODS --- IMPORTANT: A_WORKFLOW_HISTORY_KEY must be at position 2! - -BEGIN - ODS.FILE_MANAGER_ODS.CREATE_EXTERNAL_TABLE( - pTableName => 'CSDB_DEBT_DATA_ODS', - pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT_TEMPLATE', - pPrefix => 'ODS/CSDB/CSDB_DEBT', - pBucketUri => CT_MRDS.ENV_MANAGER.gvDataBucketUri, - pFormat => 'CSV' -- Uses positional mapping! - ); -END; -/ - --- Verify column order (A_WORKFLOW_HISTORY_KEY should be position 2) -SELECT column_id, column_name, data_type -FROM all_tab_columns -WHERE table_name = 'CSDB_DEBT_DATA_ODS' - AND owner = 'ODS' -ORDER BY column_id; -``` - -### Parquet External Table (Optional Column Order) - -```sql --- Example: ODS.CSDB_DEBT_ARCHIVE --- Column order flexible (schema-based mapping) - -BEGIN - ODS.FILE_MANAGER_ODS.CREATE_EXTERNAL_TABLE( - pTableName => 'CSDB_DEBT_ARCHIVE', - pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT_TEMPLATE', - pPrefix => 'ARCHIVE/CSDB/CSDB_DEBT', - pBucketUri => CT_MRDS.ENV_MANAGER.gvArchiveBucketUri, - pFormat => 'PARQUET' -- Uses schema-based mapping - ); -END; -/ -``` - ---- - -## Template Tables Required - -All external tables require corresponding template tables in CT_ET_TEMPLATES schema: - -- `CT_ET_TEMPLATES.CSDB_DEBT_TEMPLATE` -- `CT_ET_TEMPLATES.CSDB_DEBT_DAILY_TEMPLATE` -- `CT_ET_TEMPLATES.CSDB_INSTR_RAT_FULL_TEMPLATE` -- `CT_ET_TEMPLATES.CSDB_INSTR_DESC_FULL_TEMPLATE` -- `CT_ET_TEMPLATES.CSDB_ISSUER_RAT_FULL_TEMPLATE` -- `CT_ET_TEMPLATES.CSDB_ISSUER_DESC_FULL_TEMPLATE` - -**Note**: Template tables must be created by ADMIN or CT_ET_TEMPLATES user (MRDS_LOADER cannot create them). - ---- - -## Verification Checklist - -Before running MARS-835 exports: - -- [ ] All 8 external tables exist in ODS schema -- [ ] CSV tables (DATA bucket) have A_WORKFLOW_HISTORY_KEY at position 2 -- [ ] Template tables exist in CT_ET_TEMPLATES schema -- [ ] MRDS_LOADER has EXECUTE privilege on ODS.FILE_MANAGER_ODS -- [ ] ODS schema has access to CT_MRDS.ENV_MANAGER for logging -- [ ] DATA_EXPORTER v2.4.0 deployed with Smart Column Mapping feature - ---- - -## Testing Verification - -After export, verify A_WORKFLOW_HISTORY_KEY is not NULL: - -```sql --- CSV tables (should be 100% populated) -SELECT 'CSDB_DEBT_DATA_ODS' AS TABLE_NAME, - COUNT(*) AS TOTAL_ROWS, - COUNT(A_WORKFLOW_HISTORY_KEY) AS NON_NULL_COUNT, - ROUND(COUNT(A_WORKFLOW_HISTORY_KEY) * 100.0 / NULLIF(COUNT(*), 0), 2) AS SUCCESS_RATE_PCT -FROM ODS.CSDB_DEBT_DATA_ODS; - -SELECT 'CSDB_DEBT_DAILY_DATA_ODS' AS TABLE_NAME, - COUNT(*) AS TOTAL_ROWS, - COUNT(A_WORKFLOW_HISTORY_KEY) AS NON_NULL_COUNT, - ROUND(COUNT(A_WORKFLOW_HISTORY_KEY) * 100.0 / NULLIF(COUNT(*), 0), 2) AS SUCCESS_RATE_PCT -FROM ODS.CSDB_DEBT_DAILY_DATA_ODS; - --- Parquet tables (should also be 100% populated) -SELECT 'CSDB_DEBT_ARCHIVE' AS TABLE_NAME, - COUNT(*) AS TOTAL_ROWS, - COUNT(A_WORKFLOW_HISTORY_KEY) AS NON_NULL_COUNT, - ROUND(COUNT(A_WORKFLOW_HISTORY_KEY) * 100.0 / NULLIF(COUNT(*), 0), 2) AS SUCCESS_RATE_PCT -FROM ODS.CSDB_DEBT_ARCHIVE; -``` - -**Expected Result**: SUCCESS_RATE_PCT = 100.00 for all tables - ---- - -## Related Documentation - -- [DATA_EXPORTER v2.4.0 Smart Column Mapping Examples](../MARS-835-PREHOOK/current_version/v2.3.0/DATA_EXPORTER_v2.4.0_Smart_Column_Mapping_Examples.sql) -- [Oracle External Tables Column Order Issue](../../confluence/additions/Oracle_External_Tables_Column_Order_Issue.md) -- [MARS-835 README](README.md) - ---- - -**Last Updated**: 2026-01-09 -**Author**: GitHub Copilot (MARS-835 Update) diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835/rollback_mars835.sql b/MARS_Packages/REL01_ADDITIONS/MARS-835/rollback_mars835.sql index a3d5c5c..7d85b79 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835/rollback_mars835.sql +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835/rollback_mars835.sql @@ -59,7 +59,13 @@ PROMPT ========================================================================= PROMPT PROMPT ========================================================================= -PROMPT Step 3: Verify Rollback Completed +PROMPT Step 3: Delete File Registration Records from A_SOURCE_FILE_RECEIVED +PROMPT ========================================================================= +@@90_MARS_835_rollback_file_registrations.sql + +PROMPT +PROMPT ========================================================================= +PROMPT Step 4: Verify Rollback Completed PROMPT ========================================================================= @@99_MARS_835_verify_rollback.sql diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835/track_package_versions.sql b/MARS_Packages/REL01_ADDITIONS/MARS-835/track_package_versions.sql deleted file mode 100644 index 7a8f647..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835/track_package_versions.sql +++ /dev/null @@ -1,92 +0,0 @@ --- =================================================================== --- Simple Package Version Tracking Script --- =================================================================== --- Purpose: Track specified Oracle package versions --- Author: Grzegorz Michalski --- Date: 2025-12-04 --- Version: 3.1.0 - List-Based Edition --- --- USAGE: --- 1. Edit package list below (add/remove packages as needed) --- 2. Include in your install/rollback script: @@track_package_versions.sql --- =================================================================== - -SET SERVEROUTPUT ON; - -DECLARE - TYPE t_package_rec IS RECORD ( - owner VARCHAR2(50), - name VARCHAR2(50), - version VARCHAR2(50) - ); - TYPE t_packages IS TABLE OF t_package_rec; - TYPE t_string_array IS TABLE OF VARCHAR2(100); - - -- =================================================================== - -- PACKAGE LIST - Edit this array to specify packages to track - -- =================================================================== - -- Add or remove entries as needed for your MARS issue - -- Format: 'SCHEMA.PACKAGE_NAME' - -- =================================================================== - vPackageList t_string_array := t_string_array( - 'CT_MRDS.FILE_MANAGER', - 'ODS.FILE_MANAGER_ODS' - ); - -- =================================================================== - - vPackages t_packages := t_packages(); - vVersion VARCHAR2(50); - vCount NUMBER := 0; - vOwner VARCHAR2(50); - vPackageName VARCHAR2(50); - vDotPos NUMBER; -BEGIN - DBMS_OUTPUT.PUT_LINE('========================================'); - DBMS_OUTPUT.PUT_LINE('Package Version Tracking'); - DBMS_OUTPUT.PUT_LINE('========================================'); - - -- Process each package in the list - FOR i IN 1..vPackageList.COUNT LOOP - vDotPos := INSTR(vPackageList(i), '.'); - IF vDotPos > 0 THEN - vOwner := SUBSTR(vPackageList(i), 1, vDotPos - 1); - vPackageName := SUBSTR(vPackageList(i), vDotPos + 1); - - BEGIN - EXECUTE IMMEDIATE 'SELECT ' || vPackageList(i) || '.GET_VERSION() FROM DUAL' - INTO vVersion; - - vPackages.EXTEND; - vPackages(vPackages.COUNT).owner := vOwner; - vPackages(vPackages.COUNT).name := vPackageName; - vPackages(vPackages.COUNT).version := vVersion; - - CT_MRDS.ENV_MANAGER.TRACK_PACKAGE_VERSION( - pPackageOwner => vOwner, - pPackageName => vPackageName, - pPackageVersion => vVersion, - pPackageBuildDate => TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS'), - pPackageAuthor => 'Grzegorz Michalski' - ); - vCount := vCount + 1; - EXCEPTION - WHEN OTHERS THEN - DBMS_OUTPUT.PUT_LINE('Error tracking ' || vPackageList(i) || ': ' || SQLERRM); - END; - END IF; - END LOOP; - - -- Display results - IF vPackages.COUNT > 0 THEN - DBMS_OUTPUT.PUT_LINE('Packages tracked: ' || vCount || ' of ' || vPackages.COUNT); - FOR i IN 1..vPackages.COUNT LOOP - DBMS_OUTPUT.PUT_LINE(' ' || vPackages(i).owner || '.' || vPackages(i).name || - ' (v' || vPackages(i).version || ')'); - END LOOP; - ELSE - DBMS_OUTPUT.PUT_LINE('No packages found in list'); - END IF; - - DBMS_OUTPUT.PUT_LINE('========================================'); -END; -/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835/verify_packages_version.sql b/MARS_Packages/REL01_ADDITIONS/MARS-835/verify_packages_version.sql deleted file mode 100644 index e9ef47f..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835/verify_packages_version.sql +++ /dev/null @@ -1,62 +0,0 @@ --- =================================================================== --- Universal Package Version Verification Script --- =================================================================== --- Purpose: Verify all tracked Oracle packages for code changes --- Author: Grzegorz Michalski --- Date: 2025-12-04 --- Version: 1.0.0 --- --- USAGE: --- Include at the end of install/rollback scripts: @@verify_packages_version.sql --- --- OUTPUT: --- - List of all tracked packages with their current status --- - OK: Package has not changed since last tracking --- - WARNING: Package code changed without version update --- =================================================================== - -SET LINESIZE 200 -SET PAGESIZE 1000 -SET FEEDBACK OFF - -PROMPT -PROMPT ======================================== -PROMPT Package Version Verification -PROMPT ======================================== -PROMPT - -COLUMN PACKAGE_OWNER FORMAT A15 -COLUMN PACKAGE_NAME FORMAT A20 -COLUMN VERSION FORMAT A10 -COLUMN STATUS FORMAT A80 - -SELECT - PACKAGE_OWNER, - PACKAGE_NAME, - PACKAGE_VERSION AS VERSION, - CT_MRDS.ENV_MANAGER.CHECK_PACKAGE_CHANGES(PACKAGE_OWNER, PACKAGE_NAME) AS STATUS -FROM ( - SELECT - PACKAGE_OWNER, - PACKAGE_NAME, - PACKAGE_VERSION, - ROW_NUMBER() OVER (PARTITION BY PACKAGE_OWNER, PACKAGE_NAME ORDER BY TRACKING_DATE DESC) AS RN - FROM CT_MRDS.A_PACKAGE_VERSION_TRACKING -) -WHERE RN = 1 -ORDER BY PACKAGE_OWNER, PACKAGE_NAME; - -PROMPT -PROMPT ======================================== -PROMPT Verification Complete -PROMPT ======================================== -PROMPT -PROMPT Legend: -PROMPT OK - Package has not changed since last tracking -PROMPT WARNING - Package code changed without version update -PROMPT -PROMPT For detailed hash information, use: -PROMPT SELECT ENV_MANAGER.GET_PACKAGE_HASH_INFO('OWNER', 'PACKAGE') FROM DUAL; -PROMPT ======================================== - -SET FEEDBACK ON diff --git a/MARS_Packages/REL02_POST/MARS-956/.gitignore b/MARS_Packages/REL02_POST/MARS-956/.gitignore new file mode 100644 index 0000000..754791d --- /dev/null +++ b/MARS_Packages/REL02_POST/MARS-956/.gitignore @@ -0,0 +1,5 @@ +# Exclude temporary folders from version control +confluence/ +log/ +test/ +mock_data/ diff --git a/MARS_Packages/REL02_POST/MARS-956/01_MARS_956_export_c2d_mpec_data.sql b/MARS_Packages/REL02_POST/MARS-956/01_MARS_956_export_c2d_mpec_data.sql index aecfce7..b848bed 100644 --- a/MARS_Packages/REL02_POST/MARS-956/01_MARS_956_export_c2d_mpec_data.sql +++ b/MARS_Packages/REL02_POST/MARS-956/01_MARS_956_export_c2d_mpec_data.sql @@ -1,156 +1,534 @@ --- =================================================================== --- MARS-956: Export Historical C2D MPEC Data to DATA Bucket --- =================================================================== --- Purpose: One-time export of historical C2D MPEC delta data from --- OU_C2D operational database to DATA bucket as CSV files --- Method: Using DATA_EXPORTER.EXPORT_TABLE_DATA procedure --- Target: DATA bucket with folder structure DATA/C2D/{TABLE_NAME} --- Format: CSV files for complete historical data access --- =================================================================== +-- ===================================================================================== +-- Script: 01_MARS_956_export_c2d_mpec_data.sql +-- Purpose: Export C2D MPEC historical data to ODS bucket +-- Author: Grzegorz Michalski +-- Created: 2026-02-12 +-- MARS Issue: MARS-956 +-- Target: mrds_data_dev/ODS/C2D/ +-- ===================================================================================== -PROMPT ========================================================================= -PROMPT MARS-956: Starting C2D MPEC Historical Data Export -PROMPT ========================================================================= +SET SERVEROUTPUT ON SIZE UNLIMITED; +SET TIMING ON; + +PROMPT ===================================================================================== +PROMPT MARS-956: C2D MPEC Historical Data Export +PROMPT ===================================================================================== PROMPT Export Strategy: -PROMPT - Source: OU_C2D schema tables (operational database) -PROMPT - Target: DATA bucket as CSV files +PROMPT - Source: OU_LEGACY_C2D schema tables (operational database) +PROMPT - Target: ODS bucket as CSV files PROMPT - Method: DATA_EXPORTER.EXPORT_TABLE_DATA -PROMPT - Structure: Must match ODS template tables PROMPT - Registration: Files registered in A_SOURCE_FILE_RECEIVED -PROMPT ========================================================================= +PROMPT - Path Structure: ODS/C2D/C2D_MPEC_*/ +PROMPT ===================================================================================== -- Log export start -INSERT INTO CT_MRDS.A_PROCESS_LOG (PACKAGE_NAME, PROCEDURE_NAME, EVENT_TYPE, EVENT_MESSAGE, PROCEDURE_PARAMETERS) +INSERT INTO CT_MRDS.A_PROCESS_LOG (PROCESS_NAME, PROCEDURE_NAME, LOG_LEVEL, LOG_MESSAGE, PROCEDURE_PARAMETERS) VALUES ('MARS-956', 'EXPORT_C2D_MPEC_DATA', 'INFO', 'Starting historical C2D MPEC data export', 'Tables: MPEC_ADMIN, MPEC_CONTENT, MPEC_CONTENT_CRITERION'); -COMMIT; --- =================================================================== --- TABLE 1: OU_C2D.MPEC_ADMIN -> DATA/C2D/C2D_MPEC_ADMIN --- =================================================================== +PROMPT +PROMPT ===================================================================================== +PROMPT PRE-EXPORT CHECK: Verify Existing Files in ODS Bucket +PROMPT ===================================================================================== -PROMPT Exporting Table 1/3: OU_C2D.MPEC_ADMIN -PROMPT Target: mrds_data_dev/DATA/C2D/C2D_MPEC_ADMIN +-- Check 1: MPEC_ADMIN files +DECLARE + vFileCount NUMBER := 0; + vRecordCount NUMBER := 0; + vLocationUri VARCHAR2(1000); +BEGIN + -- Get bucket URI for DATA bucket + vLocationUri := CT_MRDS.FILE_MANAGER.GET_BUCKET_URI('DATA') || 'ODS/C2D/C2D_MPEC_ADMIN/'; + + -- Count existing files + SELECT COUNT(*) + INTO vFileCount + FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( + credential_name => 'OCI$RESOURCE_PRINCIPAL', + location_uri => vLocationUri + )) + WHERE object_name NOT LIKE '%/'; -- Exclude directories + + IF vFileCount > 0 THEN + DBMS_OUTPUT.PUT_LINE('==============================================================================='); + DBMS_OUTPUT.PUT_LINE('PRE-EXPORT CHECK: MPEC_ADMIN files already exist in DATA bucket'); + DBMS_OUTPUT.PUT_LINE('==============================================================================='); + DBMS_OUTPUT.PUT_LINE('Location: ' || vLocationUri); + DBMS_OUTPUT.PUT_LINE('Files found: ' || vFileCount); + DBMS_OUTPUT.PUT_LINE(''); + + -- List existing files + DBMS_OUTPUT.PUT_LINE('Existing files:'); + FOR rec IN ( + SELECT object_name, bytes, TO_CHAR(last_modified, 'YYYY-MM-DD HH24:MI:SS') AS modified + FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( + credential_name => 'OCI$RESOURCE_PRINCIPAL', + location_uri => vLocationUri + )) + WHERE object_name NOT LIKE '%/' + ORDER BY object_name + ) LOOP + DBMS_OUTPUT.PUT_LINE(' - ' || rec.object_name || ' (' || rec.bytes || ' bytes, ' || rec.modified || ')'); + END LOOP; + + -- Count records in external table + BEGIN + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM ODS.C2D_MPEC_ADMIN_ODS' INTO vRecordCount; + DBMS_OUTPUT.PUT_LINE(''); + DBMS_OUTPUT.PUT_LINE('-------------------------------------------------------------------------------'); + DBMS_OUTPUT.PUT_LINE('>>>'); + DBMS_OUTPUT.PUT_LINE('>>> Records currently readable via external table: ' || vRecordCount); + DBMS_OUTPUT.PUT_LINE('>>>'); + DBMS_OUTPUT.PUT_LINE('-------------------------------------------------------------------------------'); + EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE(''); + DBMS_OUTPUT.PUT_LINE('WARNING: Cannot count records in external table'); + DBMS_OUTPUT.PUT_LINE('Error: ' || SQLERRM); + END; + + DBMS_OUTPUT.PUT_LINE('==============================================================================='); + DBMS_OUTPUT.PUT_LINE(''); + ELSE + DBMS_OUTPUT.PUT_LINE('PRE-EXPORT CHECK: No existing MPEC_ADMIN files found - bucket is clean'); + DBMS_OUTPUT.PUT_LINE(''); + END IF; +END; +/ + +-- Check 2: MPEC_CONTENT files +DECLARE + vFileCount NUMBER := 0; + vRecordCount NUMBER := 0; + vLocationUri VARCHAR2(1000); +BEGIN + vLocationUri := CT_MRDS.FILE_MANAGER.GET_BUCKET_URI('DATA') || 'ODS/C2D/C2D_MPEC_CONTENT/'; + + SELECT COUNT(*) + INTO vFileCount + FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( + credential_name => 'OCI$RESOURCE_PRINCIPAL', + location_uri => vLocationUri + )) + WHERE object_name NOT LIKE '%/'; + + IF vFileCount > 0 THEN + DBMS_OUTPUT.PUT_LINE('==============================================================================='); + DBMS_OUTPUT.PUT_LINE('PRE-EXPORT CHECK: MPEC_CONTENT files already exist in DATA bucket'); + DBMS_OUTPUT.PUT_LINE('==============================================================================='); + DBMS_OUTPUT.PUT_LINE('Location: ' || vLocationUri); + DBMS_OUTPUT.PUT_LINE('Files found: ' || vFileCount); + DBMS_OUTPUT.PUT_LINE(''); + + DBMS_OUTPUT.PUT_LINE('Existing files:'); + FOR rec IN ( + SELECT object_name, bytes, TO_CHAR(last_modified, 'YYYY-MM-DD HH24:MI:SS') AS modified + FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( + credential_name => 'OCI$RESOURCE_PRINCIPAL', + location_uri => vLocationUri + )) + WHERE object_name NOT LIKE '%/' + ORDER BY object_name + ) LOOP + DBMS_OUTPUT.PUT_LINE(' - ' || rec.object_name || ' (' || rec.bytes || ' bytes, ' || rec.modified || ')'); + END LOOP; + + BEGIN + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM ODS.C2D_MPEC_CONTENT_ODS' INTO vRecordCount; + DBMS_OUTPUT.PUT_LINE(''); + DBMS_OUTPUT.PUT_LINE('-------------------------------------------------------------------------------'); + DBMS_OUTPUT.PUT_LINE('>>>'); + DBMS_OUTPUT.PUT_LINE('>>> Records currently readable via external table: ' || vRecordCount); + DBMS_OUTPUT.PUT_LINE('>>>'); + DBMS_OUTPUT.PUT_LINE('-------------------------------------------------------------------------------'); + EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE(''); + DBMS_OUTPUT.PUT_LINE('WARNING: Cannot count records in external table'); + DBMS_OUTPUT.PUT_LINE('Error: ' || SQLERRM); + END; + + DBMS_OUTPUT.PUT_LINE('==============================================================================='); + DBMS_OUTPUT.PUT_LINE(''); + ELSE + DBMS_OUTPUT.PUT_LINE('PRE-EXPORT CHECK: No existing MPEC_CONTENT files found - bucket is clean'); + DBMS_OUTPUT.PUT_LINE(''); + END IF; +END; +/ + +-- Check 3: MPEC_CONTENT_CRITERION files +DECLARE + vFileCount NUMBER := 0; + vRecordCount NUMBER := 0; + vLocationUri VARCHAR2(1000); +BEGIN + vLocationUri := CT_MRDS.FILE_MANAGER.GET_BUCKET_URI('DATA') || 'ODS/C2D/C2D_MPEC_CONTENT_CRITERION/'; + + SELECT COUNT(*) + INTO vFileCount + FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( + credential_name => 'OCI$RESOURCE_PRINCIPAL', + location_uri => vLocationUri + )) + WHERE object_name NOT LIKE '%/'; + + IF vFileCount > 0 THEN + DBMS_OUTPUT.PUT_LINE('==============================================================================='); + DBMS_OUTPUT.PUT_LINE('PRE-EXPORT CHECK: MPEC_CONTENT_CRITERION files already exist in DATA bucket'); + DBMS_OUTPUT.PUT_LINE('==============================================================================='); + DBMS_OUTPUT.PUT_LINE('Location: ' || vLocationUri); + DBMS_OUTPUT.PUT_LINE('Files found: ' || vFileCount); + DBMS_OUTPUT.PUT_LINE(''); + + DBMS_OUTPUT.PUT_LINE('Existing files:'); + FOR rec IN ( + SELECT object_name, bytes, TO_CHAR(last_modified, 'YYYY-MM-DD HH24:MI:SS') AS modified + FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( + credential_name => 'OCI$RESOURCE_PRINCIPAL', + location_uri => vLocationUri + )) + WHERE object_name NOT LIKE '%/' + ORDER BY object_name + ) LOOP + DBMS_OUTPUT.PUT_LINE(' - ' || rec.object_name || ' (' || rec.bytes || ' bytes, ' || rec.modified || ')'); + END LOOP; + + BEGIN + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM ODS.C2D_MPEC_CONTENT_CRITERION_ODS' INTO vRecordCount; + DBMS_OUTPUT.PUT_LINE(''); + DBMS_OUTPUT.PUT_LINE('-------------------------------------------------------------------------------'); + DBMS_OUTPUT.PUT_LINE('>>>'); + DBMS_OUTPUT.PUT_LINE('>>> Records currently readable via external table: ' || vRecordCount); + DBMS_OUTPUT.PUT_LINE('>>>'); + DBMS_OUTPUT.PUT_LINE('-------------------------------------------------------------------------------'); + EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE(''); + DBMS_OUTPUT.PUT_LINE('WARNING: Cannot count records in external table'); + DBMS_OUTPUT.PUT_LINE('Error: ' || SQLERRM); + END; + + DBMS_OUTPUT.PUT_LINE('==============================================================================='); + DBMS_OUTPUT.PUT_LINE(''); + ELSE + DBMS_OUTPUT.PUT_LINE('PRE-EXPORT CHECK: No existing MPEC_CONTENT_CRITERION files found - bucket is clean'); + DBMS_OUTPUT.PUT_LINE(''); + END IF; +END; +/ + +PROMPT +PROMPT ===================================================================================== +PROMPT PRE-EXPORT: Verify Source and Target Table Readiness +PROMPT ===================================================================================== + +-- Check source table counts before export +DECLARE + vAdminRows NUMBER := 0; + vContentRows NUMBER := 0; + vCriterionRows NUMBER := 0; + vTotalSource NUMBER := 0; + vAdminTarget NUMBER := 0; + vContentTarget NUMBER := 0; + vCriterionTarget NUMBER := 0; + vTotalTarget NUMBER := 0; +BEGIN + -- Source table counts + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM OU_LEGACY_C2D.MPEC_ADMIN' INTO vAdminRows; + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM OU_LEGACY_C2D.MPEC_CONTENT' INTO vContentRows; + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM OU_LEGACY_C2D.MPEC_CONTENT_CRITERION' INTO vCriterionRows; + vTotalSource := vAdminRows + vContentRows + vCriterionRows; + + DBMS_OUTPUT.PUT_LINE('Source table record counts (pre-export):'); + DBMS_OUTPUT.PUT_LINE('- MPEC_ADMIN: ' || vAdminRows || ' records'); + DBMS_OUTPUT.PUT_LINE('- MPEC_CONTENT: ' || vContentRows || ' records'); + DBMS_OUTPUT.PUT_LINE('- MPEC_CONTENT_CRITERION: ' || vCriterionRows || ' records'); + DBMS_OUTPUT.PUT_LINE('- TOTAL SOURCE: ' || vTotalSource || ' records'); + + -- Target external table counts (current state) + BEGIN + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM ODS.C2D_MPEC_ADMIN_ODS' INTO vAdminTarget; + EXCEPTION + WHEN OTHERS THEN + IF SQLCODE IN (-29913, -29400) OR SQLERRM LIKE '%KUP-13023%' THEN + vAdminTarget := 0; -- Empty is expected + ELSE + vAdminTarget := -1; -- Error + END IF; + END; + + BEGIN + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM ODS.C2D_MPEC_CONTENT_ODS' INTO vContentTarget; + EXCEPTION + WHEN OTHERS THEN + IF SQLCODE IN (-29913, -29400) OR SQLERRM LIKE '%KUP-13023%' THEN + vContentTarget := 0; + ELSE + vContentTarget := -1; + END IF; + END; + + BEGIN + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM ODS.C2D_MPEC_CONTENT_CRITERION_ODS' INTO vCriterionTarget; + EXCEPTION + WHEN OTHERS THEN + IF SQLCODE IN (-29913, -29400) OR SQLERRM LIKE '%KUP-13023%' THEN + vCriterionTarget := 0; + ELSE + vCriterionTarget := -1; + END IF; + END; + + IF vAdminTarget >= 0 AND vContentTarget >= 0 AND vCriterionTarget >= 0 THEN + vTotalTarget := vAdminTarget + vContentTarget + vCriterionTarget; + ELSE + vTotalTarget := -1; -- Error state + END IF; + + DBMS_OUTPUT.PUT_LINE(''); + DBMS_OUTPUT.PUT_LINE('Target external table record counts (pre-export):'); + DBMS_OUTPUT.PUT_LINE('- C2D_MPEC_ADMIN_ODS: ' || + CASE WHEN vAdminTarget = -1 THEN 'ERROR/INACCESSIBLE' ELSE TO_CHAR(vAdminTarget) END); + DBMS_OUTPUT.PUT_LINE('- C2D_MPEC_CONTENT_ODS: ' || + CASE WHEN vContentTarget = -1 THEN 'ERROR/INACCESSIBLE' ELSE TO_CHAR(vContentTarget) END); + DBMS_OUTPUT.PUT_LINE('- C2D_MPEC_CONTENT_CRITERION_ODS: ' || + CASE WHEN vCriterionTarget = -1 THEN 'ERROR/INACCESSIBLE' ELSE TO_CHAR(vCriterionTarget) END); + DBMS_OUTPUT.PUT_LINE('- TOTAL TARGET: ' || + CASE WHEN vTotalTarget = -1 THEN 'ERROR/INACCESSIBLE' ELSE TO_CHAR(vTotalTarget) END); + + IF vTotalSource > 0 THEN + DBMS_OUTPUT.PUT_LINE('SUCCESS: Source tables contain data - ready for export'); + ELSE + DBMS_OUTPUT.PUT_LINE('ERROR: WARNING: No source data found'); + END IF; + + IF vTotalTarget = 0 THEN + DBMS_OUTPUT.PUT_LINE('SUCCESS: Target external tables are clean - ready for fresh export'); + ELSIF vTotalTarget > 0 THEN + DBMS_OUTPUT.PUT_LINE('WARNING: Target tables contain ' || vTotalTarget || ' records - may be re-run'); + ELSE + DBMS_OUTPUT.PUT_LINE('ERROR: Cannot access target external tables'); + END IF; + + DBMS_OUTPUT.PUT_LINE(''); + DBMS_OUTPUT.PUT_LINE('Proceeding with export...'); +END; +/ + +PROMPT +PROMPT ===================================================================================== +PROMPT TABLE 1/3: OU_LEGACY_C2D.MPEC_ADMIN -> ODS/C2D/C2D_MPEC_ADMIN +PROMPT ===================================================================================== BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_TABLE_DATA( - pSchemaName => 'OU_C2D', + pSchemaName => 'OU_LEGACY_C2D', pTableName => 'MPEC_ADMIN', pKeyColumnName => 'A_ETL_LOAD_SET_FK', -- ETL key for data lookup - pBucketArea => 'DATA', - pFolderName => 'DATA/C2D/C2D_MPEC_ADMIN', + pBucketArea => 'ODS', + pFolderName => 'ODS/C2D/C2D_MPEC_ADMIN', pTemplateTableName => 'CT_ET_TEMPLATES.C2D_MPEC_ADMIN', -- Template for column order - pRegisterExport => TRUE, -- Register files in A_SOURCE_FILE_RECEIVED - pCredentialName => 'DEF_CRED_ARN' + pMaxFileSize => 104857600, -- 100MB max file size + pRegisterExport => TRUE, -- Register files in A_SOURCE_FILE_RECEIVED + pProcessName => 'MARS-956' -- Process identifier for tracking ); - DBMS_OUTPUT.PUT_LINE('✓ MPEC_ADMIN export completed successfully'); + DBMS_OUTPUT.PUT_LINE('SUCCESS: MPEC_ADMIN export completed successfully'); EXCEPTION WHEN OTHERS THEN - DBMS_OUTPUT.PUT_LINE('✗ MPEC_ADMIN export failed: ' || SQLERRM); - -- Log error but continue with other tables - INSERT INTO CT_MRDS.A_PROCESS_LOG (PACKAGE_NAME, PROCEDURE_NAME, EVENT_TYPE, EVENT_MESSAGE) - VALUES ('MARS-956', 'EXPORT_MPEC_ADMIN', 'ERROR', 'Export failed: ' || SQLERRM); - COMMIT; - RAISE; + DECLARE + vErrorMsg VARCHAR2(4000) := SUBSTR(SQLERRM, 1, 4000); + BEGIN + DBMS_OUTPUT.PUT_LINE('ERROR: MPEC_ADMIN export failed: ' || vErrorMsg); + -- Log error using proper ENV_MANAGER pattern + INSERT INTO CT_MRDS.A_PROCESS_LOG + (guid, Username, Osuser, Machine, Module, process_name, procedure_name, procedure_parameters, log_level, log_message) + VALUES + ('MARS-956', USER, SYS_CONTEXT('USERENV','OS_USER'), SYS_CONTEXT('USERENV','HOST'), + 'MARS-956', 'MARS-956', 'EXPORT_MPEC_ADMIN', NULL, 'ERROR', + 'Export failed: ' || vErrorMsg); + COMMIT; + END; END; / --- =================================================================== --- TABLE 2: OU_C2D.MPEC_CONTENT -> DATA/C2D/C2D_MPEC_CONTENT --- =================================================================== - -PROMPT Exporting Table 2/3: OU_C2D.MPEC_CONTENT -PROMPT Target: mrds_data_dev/DATA/C2D/C2D_MPEC_CONTENT +PROMPT +PROMPT ===================================================================================== +PROMPT TABLE 2/3: OU_LEGACY_C2D.MPEC_CONTENT -> ODS/C2D/C2D_MPEC_CONTENT +PROMPT ===================================================================================== BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_TABLE_DATA( - pSchemaName => 'OU_C2D', + pSchemaName => 'OU_LEGACY_C2D', pTableName => 'MPEC_CONTENT', - pKeyColumnName => 'A_ETL_LOAD_SET_FK', -- ETL key for data lookup - pBucketArea => 'DATA', - pFolderName => 'DATA/C2D/C2D_MPEC_CONTENT', - pTemplateTableName => 'CT_ET_TEMPLATES.C2D_MPEC_CONTENT', -- Template for column order - pRegisterExport => TRUE, -- Register files in A_SOURCE_FILE_RECEIVED - pCredentialName => 'DEF_CRED_ARN' + pKeyColumnName => 'A_ETL_LOAD_SET_FK', + pBucketArea => 'ODS', + pFolderName => 'ODS/C2D/C2D_MPEC_CONTENT', + pTemplateTableName => 'CT_ET_TEMPLATES.C2D_MPEC_CONTENT', + pMaxFileSize => 104857600, -- 100MB max file size + pRegisterExport => TRUE, + pProcessName => 'MARS-956' -- Process identifier for tracking ); - DBMS_OUTPUT.PUT_LINE('✓ MPEC_CONTENT export completed successfully'); + DBMS_OUTPUT.PUT_LINE('SUCCESS: MPEC_CONTENT export completed successfully'); EXCEPTION WHEN OTHERS THEN - DBMS_OUTPUT.PUT_LINE('✗ MPEC_CONTENT export failed: ' || SQLERRM); - -- Log error but continue with other tables - INSERT INTO CT_MRDS.A_PROCESS_LOG (PACKAGE_NAME, PROCEDURE_NAME, EVENT_TYPE, EVENT_MESSAGE) - VALUES ('MARS-956', 'EXPORT_MPEC_CONTENT', 'ERROR', 'Export failed: ' || SQLERRM); - COMMIT; - RAISE; + DECLARE + vErrorMsg VARCHAR2(4000) := SUBSTR(SQLERRM, 1, 4000); + BEGIN + DBMS_OUTPUT.PUT_LINE('ERROR: MPEC_CONTENT export failed: ' || vErrorMsg); + -- Log error using proper ENV_MANAGER pattern + INSERT INTO CT_MRDS.A_PROCESS_LOG + (guid, Username, Osuser, Machine, Module, process_name, procedure_name, procedure_parameters, log_level, log_message) + VALUES + ('MARS-956', USER, SYS_CONTEXT('USERENV','OS_USER'), SYS_CONTEXT('USERENV','HOST'), + 'MARS-956', 'MARS-956', 'EXPORT_MPEC_CONTENT', NULL, 'ERROR', + 'Export failed: ' || vErrorMsg); + COMMIT; + END; END; / --- =================================================================== --- TABLE 3: OU_C2D.MPEC_CONTENT_CRITERION -> DATA/C2D/C2D_MPEC_CONTENT_CRITERION --- =================================================================== - -PROMPT Exporting Table 3/3: OU_C2D.MPEC_CONTENT_CRITERION -PROMPT Target: mrds_data_dev/DATA/C2D/C2D_MPEC_CONTENT_CRITERION +PROMPT +PROMPT ===================================================================================== +PROMPT TABLE 3/3: OU_LEGACY_C2D.MPEC_CONTENT_CRITERION -> ODS/C2D/C2D_MPEC_CONTENT_CRITERION +PROMPT ===================================================================================== BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_TABLE_DATA( - pSchemaName => 'OU_C2D', + pSchemaName => 'OU_LEGACY_C2D', pTableName => 'MPEC_CONTENT_CRITERION', - pKeyColumnName => 'A_ETL_LOAD_SET_FK', -- ETL key for data lookup - pBucketArea => 'DATA', - pFolderName => 'DATA/C2D/C2D_MPEC_CONTENT_CRITERION', - pTemplateTableName => 'CT_ET_TEMPLATES.C2D_MPEC_CONTENT_CRITERION', -- Template for column order - pRegisterExport => TRUE, -- Register files in A_SOURCE_FILE_RECEIVED - pCredentialName => 'DEF_CRED_ARN' + pKeyColumnName => 'A_ETL_LOAD_SET_FK', + pBucketArea => 'ODS', + pFolderName => 'ODS/C2D/C2D_MPEC_CONTENT_CRITERION', + pTemplateTableName => 'CT_ET_TEMPLATES.C2D_MPEC_CONTENT_CRITERION', + pMaxFileSize => 104857600, -- 100MB max file size + pRegisterExport => TRUE, + pProcessName => 'MARS-956' -- Process identifier for tracking ); - DBMS_OUTPUT.PUT_LINE('✓ MPEC_CONTENT_CRITERION export completed successfully'); + DBMS_OUTPUT.PUT_LINE('SUCCESS: MPEC_CONTENT_CRITERION export completed successfully'); EXCEPTION WHEN OTHERS THEN - DBMS_OUTPUT.PUT_LINE('✗ MPEC_CONTENT_CRITERION export failed: ' || SQLERRM); - -- Log error - INSERT INTO CT_MRDS.A_PROCESS_LOG (PACKAGE_NAME, PROCEDURE_NAME, EVENT_TYPE, EVENT_MESSAGE) - VALUES ('MARS-956', 'EXPORT_MPEC_CONTENT_CRITERION', 'ERROR', 'Export failed: ' || SQLERRM); - COMMIT; - RAISE; + DECLARE + vErrorMsg VARCHAR2(4000) := SUBSTR(SQLERRM, 1, 4000); + BEGIN + DBMS_OUTPUT.PUT_LINE('ERROR: MPEC_CONTENT_CRITERION export failed: ' || vErrorMsg); + -- Log error using proper ENV_MANAGER pattern + INSERT INTO CT_MRDS.A_PROCESS_LOG + (guid, Username, Osuser, Machine, Module, process_name, procedure_name, procedure_parameters, log_level, log_message) + VALUES + ('MARS-956', USER, SYS_CONTEXT('USERENV','OS_USER'), SYS_CONTEXT('USERENV','HOST'), + 'MARS-956', 'MARS-956', 'EXPORT_MPEC_CONTENT_CRITERION', NULL, 'ERROR', + 'Export failed: ' || vErrorMsg); + COMMIT; + END; END; / --- =================================================================== --- Export Summary and Verification --- =================================================================== - -PROMPT ========================================================================= +PROMPT +PROMPT ===================================================================================== PROMPT Export Summary - Checking Results -PROMPT ========================================================================= +PROMPT ===================================================================================== -- Log completion -INSERT INTO CT_MRDS.A_PROCESS_LOG (PACKAGE_NAME, PROCEDURE_NAME, EVENT_TYPE, EVENT_MESSAGE) +INSERT INTO CT_MRDS.A_PROCESS_LOG (PROCESS_NAME, PROCEDURE_NAME, LOG_LEVEL, LOG_MESSAGE) VALUES ('MARS-956', 'EXPORT_C2D_MPEC_DATA', 'INFO', 'All C2D MPEC historical exports completed successfully'); + +PROMPT +PROMPT ===================================================================================== +PROMPT MARS-956 C2D MPEC Export Completed Successfully! +PROMPT ===================================================================================== +PROMPT POST-EXPORT: Source vs Target Record Count Comparison +PROMPT ===================================================================================== + +-- Verify record counts after export +DECLARE + vAdminSource NUMBER := 0; + vContentSource NUMBER := 0; + vCriterionSource NUMBER := 0; + vTotalSource NUMBER := 0; + vAdminTarget NUMBER := 0; + vContentTarget NUMBER := 0; + vCriterionTarget NUMBER := 0; + vTotalTarget NUMBER := 0; + vMismatchCount NUMBER := 0; +BEGIN + -- Source table counts + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM OU_LEGACY_C2D.MPEC_ADMIN' INTO vAdminSource; + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM OU_LEGACY_C2D.MPEC_CONTENT' INTO vContentSource; + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM OU_LEGACY_C2D.MPEC_CONTENT_CRITERION' INTO vCriterionSource; + vTotalSource := vAdminSource + vContentSource + vCriterionSource; + + -- Target external table counts + BEGIN + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM ODS.C2D_MPEC_ADMIN_ODS' INTO vAdminTarget; + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM ODS.C2D_MPEC_CONTENT_ODS' INTO vContentTarget; + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM ODS.C2D_MPEC_CONTENT_CRITERION_ODS' INTO vCriterionTarget; + vTotalTarget := vAdminTarget + vContentTarget + vCriterionTarget; + + DBMS_OUTPUT.PUT_LINE('POST-EXPORT VERIFICATION SUMMARY'); + DBMS_OUTPUT.PUT_LINE('====================================='); + DBMS_OUTPUT.PUT_LINE('Table | Source | Target | Match'); + DBMS_OUTPUT.PUT_LINE('-----------------------------------------------------------'); + + -- MPEC_ADMIN comparison + DBMS_OUTPUT.PUT_LINE('MPEC_ADMIN | ' || + RPAD(vAdminSource, 8) || ' | ' || + RPAD(vAdminTarget, 8) || ' | ' || + CASE WHEN vAdminSource = vAdminTarget THEN 'OK' ELSE 'MISMATCH' END); + IF vAdminSource != vAdminTarget THEN vMismatchCount := vMismatchCount + 1; END IF; + + -- MPEC_CONTENT comparison + DBMS_OUTPUT.PUT_LINE('MPEC_CONTENT | ' || + RPAD(vContentSource, 8) || ' | ' || + RPAD(vContentTarget, 8) || ' | ' || + CASE WHEN vContentSource = vContentTarget THEN 'OK' ELSE 'MISMATCH' END); + IF vContentSource != vContentTarget THEN vMismatchCount := vMismatchCount + 1; END IF; + + -- MPEC_CONTENT_CRITERION comparison + DBMS_OUTPUT.PUT_LINE('MPEC_CONTENT_CRITERION | ' || + RPAD(vCriterionSource, 8) || ' | ' || + RPAD(vCriterionTarget, 8) || ' | ' || + CASE WHEN vCriterionSource = vCriterionTarget THEN 'OK' ELSE 'MISMATCH' END); + IF vCriterionSource != vCriterionTarget THEN vMismatchCount := vMismatchCount + 1; END IF; + + DBMS_OUTPUT.PUT_LINE('-----------------------------------------------------------'); + DBMS_OUTPUT.PUT_LINE('TOTAL | ' || + RPAD(vTotalSource, 8) || ' | ' || + RPAD(vTotalTarget, 8) || ' | ' || + CASE WHEN vTotalSource = vTotalTarget THEN 'OK' ELSE 'MISMATCH' END); + + DBMS_OUTPUT.PUT_LINE(''); + IF vMismatchCount = 0 THEN + DBMS_OUTPUT.PUT_LINE('SUCCESS: All record counts match - export verified'); + ELSE + DBMS_OUTPUT.PUT_LINE('WARNING: ' || vMismatchCount || ' table(s) have record count mismatches'); + DBMS_OUTPUT.PUT_LINE(' Please review export logs and external table access permissions'); + END IF; + + EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE('ERROR: Cannot verify target external tables post-export'); + DBMS_OUTPUT.PUT_LINE('Error: ' || SQLERRM); + DBMS_OUTPUT.PUT_LINE('Please check external table configuration and ODS bucket access'); + END; +END; +/ + +-- Log export completion +INSERT INTO CT_MRDS.A_PROCESS_LOG (PROCESS_NAME, PROCEDURE_NAME, LOG_LEVEL, LOG_MESSAGE, PROCEDURE_PARAMETERS) +VALUES ('MARS-956', 'EXPORT_C2D_MPEC_DATA', 'INFO', 'Historical C2D MPEC data export completed', + 'Check verification scripts for detailed results'); + COMMIT; --- Display recent export activity -PROMPT Recent Export Activity (last 30 minutes): -SELECT TO_CHAR(EVENT_TIMESTAMP, 'YYYY-MM-DD HH24:MI:SS') AS EXPORT_TIME, - PACKAGE_NAME, - PROCEDURE_NAME, - EVENT_TYPE, - EVENT_MESSAGE -FROM CT_MRDS.A_PROCESS_LOG -WHERE PACKAGE_NAME = 'MARS-956' - OR PROCEDURE_NAME LIKE '%DATA_EXPORTER%' - AND EVENT_TIMESTAMP >= SYSTIMESTAMP - INTERVAL '30' MINUTE -ORDER BY EVENT_TIMESTAMP DESC -FETCH FIRST 20 ROWS ONLY; - -PROMPT ========================================================================= -PROMPT MARS-956 Export Completed Successfully! -PROMPT ========================================================================= -PROMPT Next Steps: -PROMPT 1. Verify CSV files created in DATA bucket -PROMPT 2. Check file structure matches template tables -PROMPT 3. Validate row counts match source tables -PROMPT 4. Confirm data available for delta queries -PROMPT ========================================================================= \ No newline at end of file +PROMPT +PROMPT ===================================================================================== +PROMPT MARS-956 C2D MPEC Historical Data Export - COMPLETED +PROMPT +PROMPT Next steps: +PROMPT 1. Run: @02_MARS_956_verify_exports.sql (verify file registration) +PROMPT 2. Run: @03_MARS_956_verify_data_integrity.sql (full data verification) +PROMPT ===================================================================================== \ No newline at end of file diff --git a/MARS_Packages/REL02_POST/MARS-956/02_MARS_956_verify_exports.sql b/MARS_Packages/REL02_POST/MARS-956/02_MARS_956_verify_exports.sql new file mode 100644 index 0000000..70dbbd2 --- /dev/null +++ b/MARS_Packages/REL02_POST/MARS-956/02_MARS_956_verify_exports.sql @@ -0,0 +1,190 @@ +-- =================================================================== +-- MARS-956 Verify Exports: Check Export Results and File Creation +-- =================================================================== +-- Purpose: Verify that C2D MPEC export completed successfully +-- Author: Grzegorz Michalski +-- Date: 2026-02-12 + +SET SERVEROUTPUT ON SIZE UNLIMITED +SET TIMING ON + +PROMPT ========================================================================= +PROMPT MARS-956 Export Verification +PROMPT ========================================================================= + +-- Check 1: Verify files were registered in A_SOURCE_FILE_RECEIVED +PROMPT Checking export file registration... +DECLARE + vFileCount NUMBER := 0; + vTotalBytes NUMBER := 0; +BEGIN + SELECT COUNT(*), NVL(SUM(BYTES), 0) + INTO vFileCount, vTotalBytes + FROM CT_MRDS.A_SOURCE_FILE_RECEIVED + WHERE RECEPTION_DATE >= SYSDATE - 1/24 -- Last hour + AND (SOURCE_FILE_NAME LIKE '2001_%' -- MPEC_ADMIN ETL keys + OR SOURCE_FILE_NAME LIKE '2002_%' + OR SOURCE_FILE_NAME LIKE '2003_%' + OR SOURCE_FILE_NAME LIKE '2004_%' + OR SOURCE_FILE_NAME LIKE '2005_%' + OR SOURCE_FILE_NAME LIKE '2006_%' -- MPEC_CONTENT ETL keys + OR SOURCE_FILE_NAME LIKE '2007_%' + OR SOURCE_FILE_NAME LIKE '2008_%' + OR SOURCE_FILE_NAME LIKE '2009_%' -- MPEC_CONTENT_CRITERION ETL keys + OR SOURCE_FILE_NAME LIKE '2010_%'); + + DBMS_OUTPUT.PUT_LINE('SUCCESS: Registered export files: ' || vFileCount); + DBMS_OUTPUT.PUT_LINE('SUCCESS: Total file size: ' || ROUND(vTotalBytes/1024, 2) || ' KB'); + + IF vFileCount = 0 THEN + DBMS_OUTPUT.PUT_LINE('WARNING: No export files found in registration'); + ELSIF vFileCount < 9 THEN + DBMS_OUTPUT.PUT_LINE('WARNING: Expected 9 files (3 tables x 3 ETL keys), found: ' || vFileCount); + ELSE + DBMS_OUTPUT.PUT_LINE('SUCCESS: All expected export files found'); + END IF; +END; +/ + +-- Check 2: Show recent export registrations +PROMPT Recent export file registrations: +SELECT + SUBSTR(SOURCE_FILE_NAME, 1, 40) AS FILE_NAME, + A_SOURCE_FILE_CONFIG_KEY AS CONFIG_KEY, + PROCESSING_STATUS, + ROUND(BYTES/1024, 2) AS SIZE_KB, + TO_CHAR(RECEPTION_DATE, 'HH24:MI:SS') AS TIME_EXPORTED +FROM CT_MRDS.A_SOURCE_FILE_RECEIVED +WHERE RECEPTION_DATE >= SYSDATE - 1/24 -- Last hour + AND (SOURCE_FILE_NAME LIKE '200%') -- ETL keys starting with 200 +ORDER BY RECEPTION_DATE DESC; + +-- Check 3: Verify export process logs +PROMPT Checking export process logs... +DECLARE + vLogCount NUMBER := 0; + vErrorCount NUMBER := 0; +BEGIN + SELECT COUNT(*), SUM(CASE WHEN LOG_LEVEL = 'ERROR' THEN 1 ELSE 0 END) + INTO vLogCount, vErrorCount + FROM CT_MRDS.A_PROCESS_LOG + WHERE PROCESS_NAME = 'MARS-956' + AND LOG_TIMESTAMP >= SYSTIMESTAMP - INTERVAL '1' HOUR; + + DBMS_OUTPUT.PUT_LINE('SUCCESS: Process log entries: ' || vLogCount); + DBMS_OUTPUT.PUT_LINE('SUCCESS: Error entries: ' || vErrorCount); + + IF vErrorCount > 0 THEN + DBMS_OUTPUT.PUT_LINE('WARNING: ' || vErrorCount || ' errors found in process log'); + ELSE + DBMS_OUTPUT.PUT_LINE('SUCCESS: No errors found in process log'); + END IF; +END; +/ + +-- Check 4: Display recent process logs +PROMPT Recent MARS-956 process logs: +SELECT + TO_CHAR(LOG_TIMESTAMP, 'HH24:MI:SS') AS TIME, + PROCEDURE_NAME, + LOG_LEVEL, + SUBSTR(LOG_MESSAGE, 1, 60) AS MESSAGE +FROM CT_MRDS.A_PROCESS_LOG +WHERE PROCESS_NAME = 'MARS-956' + AND LOG_TIMESTAMP >= SYSTIMESTAMP - INTERVAL '1' HOUR +ORDER BY LOG_TIMESTAMP DESC +FETCH FIRST 10 ROWS ONLY; + +-- Check 5: Cloud bucket file verification (if cloud_wrapper available) +PROMPT Checking cloud bucket files... +DECLARE + vCloudFileCount NUMBER := 0; + vCredentialName VARCHAR2(100); + vDataBucketUri VARCHAR2(500); +BEGIN + -- Get bucket URI and credential + vDataBucketUri := CT_MRDS.FILE_MANAGER.GET_BUCKET_URI('ODS'); + vCredentialName := CT_MRDS.ENV_MANAGER.gvCredentialName; + + DBMS_OUTPUT.PUT_LINE('Checking ODS bucket: ' || vDataBucketUri); + + -- Count files in cloud bucket + BEGIN + FOR rec IN ( + SELECT object_name + FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( + credential_name => vCredentialName, + location_uri => vDataBucketUri + )) + WHERE object_name LIKE 'ODS/C2D/C2D_MPEC_%' + ) LOOP + vCloudFileCount := vCloudFileCount + 1; + IF vCloudFileCount <= 5 THEN -- Show first 5 files + DBMS_OUTPUT.PUT_LINE('- ' || rec.object_name); + END IF; + END LOOP; + + DBMS_OUTPUT.PUT_LINE('SUCCESS: Cloud bucket files found: ' || vCloudFileCount); + + IF vCloudFileCount = 0 THEN + DBMS_OUTPUT.PUT_LINE('WARNING: No files found in cloud bucket'); + END IF; + + EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE('WARNING: Cannot access cloud bucket: ' || SQLERRM); + END; +END; +/ + +PROMPT +PROMPT ========================================================================= +PROMPT MARS-956 Export Verification Summary +PROMPT ========================================================================= + +-- Final verification summary +DECLARE + vFileRegCount NUMBER := 0; + vCloudFileCount NUMBER := 0; + vLogErrorCount NUMBER := 0; + vOverallStatus VARCHAR2(20); +BEGIN + -- Count registered files + SELECT COUNT(*) + INTO vFileRegCount + FROM CT_MRDS.A_SOURCE_FILE_RECEIVED + WHERE RECEPTION_DATE >= SYSDATE - 1/24 + AND SOURCE_FILE_NAME LIKE '200%'; + + -- Count process errors + SELECT COUNT(*) + INTO vLogErrorCount + FROM CT_MRDS.A_PROCESS_LOG + WHERE PROCESS_NAME = 'MARS-956' + AND LOG_LEVEL = 'ERROR' + AND LOG_TIMESTAMP >= SYSTIMESTAMP - INTERVAL '1' HOUR; + + -- Determine overall status + IF vFileRegCount >= 9 AND vLogErrorCount = 0 THEN + vOverallStatus := 'SUCCESS'; + ELSIF vFileRegCount > 0 AND vLogErrorCount = 0 THEN + vOverallStatus := 'PARTIAL SUCCESS'; + ELSE + vOverallStatus := 'ISSUES DETECTED'; + END IF; + + DBMS_OUTPUT.PUT_LINE('MARS-956 Export Verification: ' || vOverallStatus); + DBMS_OUTPUT.PUT_LINE('- Registered files: ' || vFileRegCount || ' (expected: 9)'); + DBMS_OUTPUT.PUT_LINE('- Process errors: ' || vLogErrorCount); + + IF vOverallStatus = 'SUCCESS' THEN + DBMS_OUTPUT.PUT_LINE('SUCCESS: All validations passed - export successful'); + ELSE + DBMS_OUTPUT.PUT_LINE('WARNING: Some issues detected - review logs'); + END IF; +END; +/ + +PROMPT ========================================================================= +PROMPT Export Verification Completed +PROMPT ========================================================================= \ No newline at end of file diff --git a/MARS_Packages/REL02_POST/MARS-956/03_MARS_956_verify_data_integrity.sql b/MARS_Packages/REL02_POST/MARS-956/03_MARS_956_verify_data_integrity.sql new file mode 100644 index 0000000..89f5d94 --- /dev/null +++ b/MARS_Packages/REL02_POST/MARS-956/03_MARS_956_verify_data_integrity.sql @@ -0,0 +1,354 @@ +-- =================================================================== +-- MARS-956 Verify Data Integrity: Source vs Exported Data Validation +-- =================================================================== +-- Purpose: Verify data integrity between source tables and exported files +-- Author: Grzegorz Michalski +-- Date: 2026-02-12 + +SET SERVEROUTPUT ON SIZE UNLIMITED +SET TIMING ON + +PROMPT ========================================================================= +PROMPT MARS-956 Data Integrity Verification +PROMPT ========================================================================= + +-- Check 1: Source table record counts vs expected ETL keys +PROMPT Checking source table record counts... +DECLARE + vAdminRows NUMBER := 0; + vContentRows NUMBER := 0; + vCriterionRows NUMBER := 0; + vTotalRows NUMBER := 0; + vExpectedFiles NUMBER := 9; -- 3 tables x 3 ETL keys average +BEGIN + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM OU_LEGACY_C2D.MPEC_ADMIN' INTO vAdminRows; + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM OU_LEGACY_C2D.MPEC_CONTENT' INTO vContentRows; + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM OU_LEGACY_C2D.MPEC_CONTENT_CRITERION' INTO vCriterionRows; + + vTotalRows := vAdminRows + vContentRows + vCriterionRows; + + DBMS_OUTPUT.PUT_LINE('Source table record counts:'); + DBMS_OUTPUT.PUT_LINE('- MPEC_ADMIN: ' || vAdminRows || ' records'); + DBMS_OUTPUT.PUT_LINE('- MPEC_CONTENT: ' || vContentRows || ' records'); + DBMS_OUTPUT.PUT_LINE('- MPEC_CONTENT_CRITERION: ' || vCriterionRows || ' records'); + DBMS_OUTPUT.PUT_LINE('- TOTAL: ' || vTotalRows || ' records'); + + IF vTotalRows > 0 THEN + DBMS_OUTPUT.PUT_LINE('SUCCESS: All source tables contain data'); + ELSE + DBMS_OUTPUT.PUT_LINE('ERROR: No data found in source tables'); + END IF; +END; +/ + +-- Check 2: ETL key distribution analysis +PROMPT Checking ETL key distribution... +DECLARE + vAdminKeys NUMBER := 0; + vContentKeys NUMBER := 0; + vCriterionKeys NUMBER := 0; + vTotalKeys NUMBER := 0; +BEGIN + EXECUTE IMMEDIATE 'SELECT COUNT(DISTINCT A_ETL_LOAD_SET_FK) FROM OU_LEGACY_C2D.MPEC_ADMIN' INTO vAdminKeys; + EXECUTE IMMEDIATE 'SELECT COUNT(DISTINCT A_ETL_LOAD_SET_FK) FROM OU_LEGACY_C2D.MPEC_CONTENT' INTO vContentKeys; + EXECUTE IMMEDIATE 'SELECT COUNT(DISTINCT A_ETL_LOAD_SET_FK) FROM OU_LEGACY_C2D.MPEC_CONTENT_CRITERION' INTO vCriterionKeys; + + SELECT COUNT(DISTINCT etl_key) + INTO vTotalKeys + FROM ( + SELECT A_ETL_LOAD_SET_FK AS etl_key FROM OU_LEGACY_C2D.MPEC_ADMIN + UNION + SELECT A_ETL_LOAD_SET_FK FROM OU_LEGACY_C2D.MPEC_CONTENT + UNION + SELECT A_ETL_LOAD_SET_FK FROM OU_LEGACY_C2D.MPEC_CONTENT_CRITERION + ); + + DBMS_OUTPUT.PUT_LINE('ETL key distribution:'); + DBMS_OUTPUT.PUT_LINE('- MPEC_ADMIN distinct keys: ' || vAdminKeys); + DBMS_OUTPUT.PUT_LINE('- MPEC_CONTENT distinct keys: ' || vContentKeys); + DBMS_OUTPUT.PUT_LINE('- MPEC_CONTENT_CRITERION distinct keys: ' || vCriterionKeys); + DBMS_OUTPUT.PUT_LINE('- Total distinct ETL keys: ' || vTotalKeys); + + IF vTotalKeys > 0 THEN + DBMS_OUTPUT.PUT_LINE('SUCCESS: ETL key distribution looks normal'); + ELSE + DBMS_OUTPUT.PUT_LINE('ERROR: No ETL keys found in source data'); + END IF; +END; +/ + +-- Check 3: Template table compatibility verification +PROMPT Checking template table compatibility... +DECLARE + vAdminCols NUMBER := 0; + vContentCols NUMBER := 0; + vCriterionCols NUMBER := 0; +BEGIN + -- Check MPEC_ADMIN template compatibility + SELECT COUNT(*) + INTO vAdminCols + FROM all_tab_columns + WHERE owner = 'CT_ET_TEMPLATES' + AND table_name = 'C2D_MPEC_ADMIN'; + + -- Check MPEC_CONTENT template compatibility + SELECT COUNT(*) + INTO vContentCols + FROM all_tab_columns + WHERE owner = 'CT_ET_TEMPLATES' + AND table_name = 'C2D_MPEC_CONTENT'; + + -- Check MPEC_CONTENT_CRITERION template compatibility + SELECT COUNT(*) + INTO vCriterionCols + FROM all_tab_columns + WHERE owner = 'CT_ET_TEMPLATES' + AND table_name = 'C2D_MPEC_CONTENT_CRITERION'; + + DBMS_OUTPUT.PUT_LINE('Template table column counts:'); + DBMS_OUTPUT.PUT_LINE('- C2D_MPEC_ADMIN: ' || vAdminCols || ' columns'); + DBMS_OUTPUT.PUT_LINE('- C2D_MPEC_CONTENT: ' || vContentCols || ' columns'); + DBMS_OUTPUT.PUT_LINE('- C2D_MPEC_CONTENT_CRITERION: ' || vCriterionCols || ' columns'); + + IF vAdminCols > 0 AND vContentCols > 0 AND vCriterionCols > 0 THEN + DBMS_OUTPUT.PUT_LINE('SUCCESS: All template tables have defined structure'); + ELSE + DBMS_OUTPUT.PUT_LINE('ERROR: One or more template tables missing columns'); + END IF; +END; +/ + +-- Check 4: Verify A_ETL_LOAD_SET_FK values exist in A_LOAD_HISTORY +PROMPT Checking ETL key references in A_LOAD_HISTORY... +DECLARE + vValidKeys NUMBER := 0; + vTotalSourceKeys NUMBER := 0; +BEGIN + -- Count total distinct ETL keys in source tables + SELECT COUNT(DISTINCT etl_key) + INTO vTotalSourceKeys + FROM ( + SELECT A_ETL_LOAD_SET_FK AS etl_key FROM OU_LEGACY_C2D.MPEC_ADMIN + UNION + SELECT A_ETL_LOAD_SET_FK FROM OU_LEGACY_C2D.MPEC_CONTENT + UNION + SELECT A_ETL_LOAD_SET_FK FROM OU_LEGACY_C2D.MPEC_CONTENT_CRITERION + ); + + -- Count how many exist in A_LOAD_HISTORY + SELECT COUNT(DISTINCT etl_key) + INTO vValidKeys + FROM ( + SELECT A_ETL_LOAD_SET_FK AS etl_key FROM OU_LEGACY_C2D.MPEC_ADMIN + UNION + SELECT A_ETL_LOAD_SET_FK FROM OU_LEGACY_C2D.MPEC_CONTENT + UNION + SELECT A_ETL_LOAD_SET_FK FROM OU_LEGACY_C2D.MPEC_CONTENT_CRITERION + ) src + WHERE EXISTS ( + SELECT 1 FROM CT_ODS.A_LOAD_HISTORY h + WHERE h.A_ETL_LOAD_SET_KEY = src.etl_key + ); + + DBMS_OUTPUT.PUT_LINE('ETL key validation:'); + DBMS_OUTPUT.PUT_LINE('- Total distinct ETL keys in source: ' || vTotalSourceKeys); + DBMS_OUTPUT.PUT_LINE('- Valid keys (exist in A_LOAD_HISTORY): ' || vValidKeys); + + IF vValidKeys = vTotalSourceKeys THEN + DBMS_OUTPUT.PUT_LINE('SUCCESS: All source ETL keys are valid'); + ELSE + DBMS_OUTPUT.PUT_LINE('ERROR: Some ETL keys may be invalid: ' || (vTotalSourceKeys - vValidKeys)); + END IF; +END; +/ + +PROMPT ===================================================================================== +PROMPT MARS-956 Record Count Verification +PROMPT ===================================================================================== +PROMPT Comparing source table counts with exported external table counts +PROMPT ===================================================================================== + +DECLARE + TYPE t_table_info IS RECORD ( + source_schema VARCHAR2(50), + source_table VARCHAR2(100), + external_table VARCHAR2(100), + description VARCHAR2(200) + ); + TYPE t_table_list IS TABLE OF t_table_info; + + vTables t_table_list; + vSourceCount NUMBER; + vTargetCount NUMBER; + vTotalSourceCount NUMBER := 0; + vTotalTargetCount NUMBER := 0; + vMismatchCount NUMBER := 0; + vSql VARCHAR2(4000); + vFileCount NUMBER := 0; + vValidationResult VARCHAR2(100); +BEGIN + DBMS_OUTPUT.PUT_LINE('VERIFICATION TIME: ' || TO_CHAR(SYSTIMESTAMP, 'YYYY-MM-DD HH24:MI:SS')); + DBMS_OUTPUT.PUT_LINE(''); + + -- Initialize table list with C2D MPEC configuration + vTables := t_table_list( + t_table_info('OU_LEGACY_C2D', 'MPEC_ADMIN', 'ODS.C2D_MPEC_ADMIN_ODS', 'MPEC Admin data (ETL keys 2001-2005)'), + t_table_info('OU_LEGACY_C2D', 'MPEC_CONTENT', 'ODS.C2D_MPEC_CONTENT_ODS', 'MPEC Content data (ETL keys 2006-2008)'), + t_table_info('OU_LEGACY_C2D', 'MPEC_CONTENT_CRITERION', 'ODS.C2D_MPEC_CONTENT_CRITERION_ODS', 'MPEC Criterion data (ETL keys 2009-2010)') + ); + + DBMS_OUTPUT.PUT_LINE('-----------------------------------------------------------------------------------------'); + DBMS_OUTPUT.PUT_LINE('Table Name Source Count Target Count Status'); + DBMS_OUTPUT.PUT_LINE('-----------------------------------------------------------------------------------------'); + + FOR i IN 1..vTables.COUNT LOOP + -- Get source table count + vSql := 'SELECT COUNT(*) FROM ' || vTables(i).source_schema || '.' || vTables(i).source_table; + + BEGIN + EXECUTE IMMEDIATE vSql INTO vSourceCount; + vTotalSourceCount := vTotalSourceCount + vSourceCount; + EXCEPTION + WHEN OTHERS THEN + vSourceCount := -1; + DBMS_OUTPUT.PUT_LINE(RPAD(vTables(i).source_table, 24) || 'ERROR: Cannot access source table'); + CONTINUE; + END; + + -- Get target external table count + vSql := 'SELECT COUNT(*) FROM ' || vTables(i).external_table; + BEGIN + EXECUTE IMMEDIATE vSql INTO vTargetCount; + vTotalTargetCount := vTotalTargetCount + vTargetCount; + EXCEPTION + WHEN OTHERS THEN + -- Handle expected errors for empty external tables + -- ORA-29913: error in executing ODCIEXTTABLEOPEN callout + -- ORA-29400: data cartridge error + -- KUP-13023: nothing matched wildcard query (no files in bucket) + -- NOTE: ORA-30653 (reject limit) is a real data quality error, not treated as empty + IF vSourceCount = 0 OR SQLCODE IN (-29913, -29400) OR SQLERRM LIKE '%KUP-13023%' THEN + vTargetCount := 0; -- Treat as empty (no files exported yet) + ELSE + vTargetCount := -1; -- Real error + END IF; + END; + + -- Display comparison results with thousands separators + DECLARE + vStatus VARCHAR2(20); + vSourceDisplay VARCHAR2(17); + vTargetDisplay VARCHAR2(17); + BEGIN + -- Format source count display + IF vSourceCount = -1 THEN + vSourceDisplay := 'ERROR'; + ELSE + vSourceDisplay := TO_CHAR(vSourceCount, '9,999,999,999'); + END IF; + + -- Format target count display + IF vTargetCount = -1 THEN + vTargetDisplay := 'ERROR'; + ELSE + vTargetDisplay := TO_CHAR(vTargetCount, '9,999,999,999'); + END IF; + + -- Determine status + IF vSourceCount = vTargetCount THEN + vStatus := 'PASS'; + ELSIF vTargetCount = -1 THEN + vStatus := 'ERROR'; + vMismatchCount := vMismatchCount + 1; + ELSIF vSourceCount = -1 THEN + vStatus := 'ERROR'; + vMismatchCount := vMismatchCount + 1; + ELSE + vStatus := 'MISMATCH'; + vMismatchCount := vMismatchCount + 1; + END IF; + + DBMS_OUTPUT.PUT_LINE( + RPAD(vTables(i).source_table, 24) || + LPAD(vSourceDisplay, 15) || + LPAD(vTargetDisplay, 15) || ' ' || + vStatus + ); + END; + END LOOP; + + DBMS_OUTPUT.PUT_LINE('-----------------------------------------------------------------------------------------'); + DBMS_OUTPUT.PUT_LINE( + RPAD('TOTALS', 24) || + LPAD(TO_CHAR(vTotalSourceCount, '9,999,999,999'), 15) || + LPAD(TO_CHAR(vTotalTargetCount, '9,999,999,999'), 15) + ); + DBMS_OUTPUT.PUT_LINE('-----------------------------------------------------------------------------------------'); + DBMS_OUTPUT.PUT_LINE(''); + + -- Count exported files for additional verification + SELECT COUNT(*) + INTO vFileCount + FROM CT_MRDS.A_SOURCE_FILE_RECEIVED + WHERE RECEPTION_DATE >= SYSDATE - 1/24 + AND (SOURCE_FILE_NAME LIKE '200_%'); + + DBMS_OUTPUT.PUT_LINE('====================================================================================='); + DBMS_OUTPUT.PUT_LINE('Record Count Verification Summary'); + DBMS_OUTPUT.PUT_LINE('====================================================================================='); + DBMS_OUTPUT.PUT_LINE('Total source records: ' || TO_CHAR(vTotalSourceCount, '9,999,999,999')); + DBMS_OUTPUT.PUT_LINE('Total target records: ' || TO_CHAR(vTotalTargetCount, '9,999,999,999') || ' (exported to ODS)'); + DBMS_OUTPUT.PUT_LINE('Export files registered: ' || vFileCount); + DBMS_OUTPUT.PUT_LINE(''); + + IF vMismatchCount = 0 AND vFileCount > 0 THEN + DBMS_OUTPUT.PUT_LINE('[PASS] VERIFICATION PASSED'); + DBMS_OUTPUT.PUT_LINE(' All record counts match between source and exported data'); + DBMS_OUTPUT.PUT_LINE(' Export completed successfully'); + ELSIF vMismatchCount > 0 THEN + DBMS_OUTPUT.PUT_LINE('[INFO] VERIFICATION COMPLETED WITH MISMATCHES'); + DBMS_OUTPUT.PUT_LINE(' Found ' || vMismatchCount || ' table(s) with count mismatches'); + DBMS_OUTPUT.PUT_LINE(' NOTE: Mismatches may be caused by pre-existing files in buckets (see pre-check)'); + DBMS_OUTPUT.PUT_LINE(' Review export logs and pre-check results before re-running exports'); + ELSE + DBMS_OUTPUT.PUT_LINE('[WARN] NO EXPORT DETECTED'); + DBMS_OUTPUT.PUT_LINE(' No files found in export registration'); + DBMS_OUTPUT.PUT_LINE(' Verify export execution completed successfully'); + END IF; + DBMS_OUTPUT.PUT_LINE('====================================================================================='); + DBMS_OUTPUT.PUT_LINE(''); + + DBMS_OUTPUT.PUT_LINE('Legend:'); + DBMS_OUTPUT.PUT_LINE(' PASS - Record counts match (export successful)'); + DBMS_OUTPUT.PUT_LINE(' MISMATCH - Record counts differ (may be pre-existing files or export issue)'); + DBMS_OUTPUT.PUT_LINE(' Check pre-check results to identify pre-existing files'); + DBMS_OUTPUT.PUT_LINE(' ERROR - Cannot access table (verify table exists and permissions)'); + DBMS_OUTPUT.PUT_LINE('====================================================================================='); + + -- Additional ETL key analysis for C2D MPEC data + DBMS_OUTPUT.PUT_LINE(''); + DBMS_OUTPUT.PUT_LINE('ETL Key Analysis:'); + + DECLARE + vAdminKeys NUMBER; + vContentKeys NUMBER; + vCriterionKeys NUMBER; + BEGIN + EXECUTE IMMEDIATE 'SELECT COUNT(DISTINCT A_ETL_LOAD_SET_FK) FROM OU_LEGACY_C2D.MPEC_ADMIN' INTO vAdminKeys; + EXECUTE IMMEDIATE 'SELECT COUNT(DISTINCT A_ETL_LOAD_SET_FK) FROM OU_LEGACY_C2D.MPEC_CONTENT' INTO vContentKeys; + EXECUTE IMMEDIATE 'SELECT COUNT(DISTINCT A_ETL_LOAD_SET_FK) FROM OU_LEGACY_C2D.MPEC_CONTENT_CRITERION' INTO vCriterionKeys; + + DBMS_OUTPUT.PUT_LINE('- MPEC_ADMIN distinct ETL keys: ' || vAdminKeys || ' (expected: 3 for keys 2001-2005)'); + DBMS_OUTPUT.PUT_LINE('- MPEC_CONTENT distinct ETL keys: ' || vContentKeys || ' (expected: 3 for keys 2006-2008)'); + DBMS_OUTPUT.PUT_LINE('- MPEC_CONTENT_CRITERION distinct ETL keys: ' || vCriterionKeys || ' (expected: 2 for keys 2009-2010)'); + + -- Expected file count = sum of distinct ETL keys per table + DBMS_OUTPUT.PUT_LINE('- Expected export files: ' || (vAdminKeys + vContentKeys + vCriterionKeys)); + DBMS_OUTPUT.PUT_LINE('- Actual export files: ' || vFileCount); + END; +END; +/ + +PROMPT ========================================================================= +PROMPT Data Integrity Verification Completed +PROMPT ========================================================================= \ No newline at end of file diff --git a/MARS_Packages/REL02_POST/MARS-956/90_MARS_956_rollback_delete_csv_files.sql b/MARS_Packages/REL02_POST/MARS-956/90_MARS_956_rollback_delete_csv_files.sql new file mode 100644 index 0000000..9e5c924 --- /dev/null +++ b/MARS_Packages/REL02_POST/MARS-956/90_MARS_956_rollback_delete_csv_files.sql @@ -0,0 +1,167 @@ +--============================================================================================================================= +-- MARS-956 ROLLBACK: Delete Exported CSV Files from DATA Bucket +--============================================================================================================================= +-- Purpose: Delete exported CSV files from ODS/C2D bucket folders for MPEC tables +-- WARNING: This will permanently delete exported data files! +-- Author: Grzegorz Michalski +-- Date: 2026-02-12 +-- Related: MARS-956 - C2D MPEC Data Export Rollback +--============================================================================================================================= + +SET SERVEROUTPUT ON SIZE UNLIMITED + +PROMPT ======================================================================== +PROMPT ROLLBACK: Deleting C2D_MPEC_ADMIN exported files +PROMPT ======================================================================== +PROMPT WARNING: This will delete files from: +PROMPT - DATA bucket: mrds_data_dev/ODS/C2D/C2D_MPEC_ADMIN/ +PROMPT ======================================================================== + +DECLARE + vDataBucketUri VARCHAR2(500); + vCredentialName VARCHAR2(100); + vFileCount NUMBER := 0; +BEGIN + -- Get bucket URI and credential + vDataBucketUri := CT_MRDS.FILE_MANAGER.GET_BUCKET_URI('DATA'); + vCredentialName := CT_MRDS.ENV_MANAGER.gvCredentialName; + + DBMS_OUTPUT.PUT_LINE('Deleting C2D_MPEC_ADMIN files from DATA bucket...'); + DBMS_OUTPUT.PUT_LINE(' Using A_SOURCE_FILE_RECEIVED with PROCESS_NAME = ''MARS-956'''); + + -- Delete CSV files registered by MARS-956 process + FOR rec IN ( + SELECT SOURCE_FILE_NAME AS object_name + FROM CT_MRDS.A_SOURCE_FILE_RECEIVED + WHERE PROCESS_NAME = 'MARS-956' + AND SOURCE_FILE_NAME LIKE '%MPEC_ADMIN%' + ) LOOP + BEGIN + DBMS_CLOUD.DELETE_OBJECT( + credential_name => vCredentialName, + object_uri => vDataBucketUri || 'ODS/C2D/C2D_MPEC_ADMIN/' || rec.object_name + ); + DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name); + vFileCount := vFileCount + 1; + EXCEPTION + WHEN OTHERS THEN + IF SQLCODE = -20404 THEN + DBMS_OUTPUT.PUT_LINE(' Skipped (not found): ' || rec.object_name); + ELSE + RAISE; + END IF; + END; + END LOOP; + + IF vFileCount = 0 THEN + DBMS_OUTPUT.PUT_LINE(' INFO: No files found to delete'); + END IF; + + DBMS_OUTPUT.PUT_LINE('SUCCESS: C2D_MPEC_ADMIN files deleted (' || vFileCount || ' file(s))'); +END; +/ + +PROMPT ======================================================================== +PROMPT ROLLBACK: Deleting C2D_MPEC_CONTENT exported files +PROMPT ======================================================================== +PROMPT WARNING: This will delete files from: +PROMPT - DATA bucket: mrds_data_dev/ODS/C2D/C2D_MPEC_CONTENT/ +PROMPT ======================================================================== + +DECLARE + vDataBucketUri VARCHAR2(500); + vCredentialName VARCHAR2(100); + vFileCount NUMBER := 0; +BEGIN + -- Get bucket URI and credential + vDataBucketUri := CT_MRDS.FILE_MANAGER.GET_BUCKET_URI('DATA'); + vCredentialName := CT_MRDS.ENV_MANAGER.gvCredentialName; + + DBMS_OUTPUT.PUT_LINE('Deleting C2D_MPEC_CONTENT files from DATA bucket...'); + DBMS_OUTPUT.PUT_LINE(' Using A_SOURCE_FILE_RECEIVED with PROCESS_NAME = ''MARS-956'''); + + -- Delete CSV files registered by MARS-956 process + FOR rec IN ( + SELECT SOURCE_FILE_NAME AS object_name + FROM CT_MRDS.A_SOURCE_FILE_RECEIVED + WHERE PROCESS_NAME = 'MARS-956' + AND SOURCE_FILE_NAME LIKE '%MPEC_CONTENT%' + AND SOURCE_FILE_NAME NOT LIKE '%CRITERION%' + ) LOOP + BEGIN + DBMS_CLOUD.DELETE_OBJECT( + credential_name => vCredentialName, + object_uri => vDataBucketUri || 'ODS/C2D/C2D_MPEC_CONTENT/' || rec.object_name + ); + DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name); + vFileCount := vFileCount + 1; + EXCEPTION + WHEN OTHERS THEN + IF SQLCODE = -20404 THEN + DBMS_OUTPUT.PUT_LINE(' Skipped (not found): ' || rec.object_name); + ELSE + RAISE; + END IF; + END; + END LOOP; + + IF vFileCount = 0 THEN + DBMS_OUTPUT.PUT_LINE(' INFO: No files found to delete'); + END IF; + + DBMS_OUTPUT.PUT_LINE('SUCCESS: C2D_MPEC_CONTENT files deleted (' || vFileCount || ' file(s))'); +END; +/ + +PROMPT ======================================================================== +PROMPT ROLLBACK: Deleting C2D_MPEC_CONTENT_CRITERION exported files +PROMPT ======================================================================== +PROMPT WARNING: This will delete files from: +PROMPT - DATA bucket: mrds_data_dev/ODS/C2D/C2D_MPEC_CONTENT_CRITERION/ +PROMPT ======================================================================== + +DECLARE + vDataBucketUri VARCHAR2(500); + vCredentialName VARCHAR2(100); + vFileCount NUMBER := 0; +BEGIN + -- Get bucket URI and credential + vDataBucketUri := CT_MRDS.FILE_MANAGER.GET_BUCKET_URI('DATA'); + vCredentialName := CT_MRDS.ENV_MANAGER.gvCredentialName; + + DBMS_OUTPUT.PUT_LINE('Deleting C2D_MPEC_CONTENT_CRITERION files from DATA bucket...'); + DBMS_OUTPUT.PUT_LINE(' Using A_SOURCE_FILE_RECEIVED with PROCESS_NAME = ''MARS-956'''); + + -- Delete CSV files registered by MARS-956 process + FOR rec IN ( + SELECT SOURCE_FILE_NAME AS object_name + FROM CT_MRDS.A_SOURCE_FILE_RECEIVED + WHERE PROCESS_NAME = 'MARS-956' + AND SOURCE_FILE_NAME LIKE '%MPEC_CONTENT_CRITERION%' + ) LOOP + BEGIN + DBMS_CLOUD.DELETE_OBJECT( + credential_name => vCredentialName, + object_uri => vDataBucketUri || 'ODS/C2D/C2D_MPEC_CONTENT_CRITERION/' || rec.object_name + ); + DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name); + vFileCount := vFileCount + 1; + EXCEPTION + WHEN OTHERS THEN + IF SQLCODE = -20404 THEN + DBMS_OUTPUT.PUT_LINE(' Skipped (not found): ' || rec.object_name); + ELSE + RAISE; + END IF; + END; + END LOOP; + + IF vFileCount = 0 THEN + DBMS_OUTPUT.PUT_LINE(' INFO: No files found to delete'); + END IF; + + DBMS_OUTPUT.PUT_LINE('SUCCESS: C2D_MPEC_CONTENT_CRITERION files deleted (' || vFileCount || ' file(s))'); +END; +/ + +PROMPT SUCCESS: All CSV file deletion operations completed diff --git a/MARS_Packages/REL02_POST/MARS-956/91_MARS_956_rollback_file_registrations.sql b/MARS_Packages/REL02_POST/MARS-956/91_MARS_956_rollback_file_registrations.sql new file mode 100644 index 0000000..42050d5 --- /dev/null +++ b/MARS_Packages/REL02_POST/MARS-956/91_MARS_956_rollback_file_registrations.sql @@ -0,0 +1,78 @@ +-- =================================================================== +-- MARS-956 Rollback Step 1: Delete File Registrations +-- =================================================================== +-- Purpose: Remove MARS-956 export file registrations from A_SOURCE_FILE_RECEIVED +-- Author: Grzegorz Michalski +-- Date: 2026-02-12 + +SET SERVEROUTPUT ON SIZE UNLIMITED +SET TIMING ON + +PROMPT ========================================================================= +PROMPT MARS-956 Rollback Step 1: Delete File Registrations +PROMPT ========================================================================= + +DECLARE + vFileCount NUMBER := 0; + vDeletedCount NUMBER := 0; + vErrorMsg VARCHAR2(4000); +BEGIN + -- Count files to be deleted (using PROCESS_NAME) + SELECT COUNT(*) + INTO vFileCount + FROM CT_MRDS.A_SOURCE_FILE_RECEIVED + WHERE PROCESS_NAME = 'MARS-956'; + + DBMS_OUTPUT.PUT_LINE('Files to be deleted: ' || vFileCount); + DBMS_OUTPUT.PUT_LINE('Using PROCESS_NAME = ''MARS-956'' filter'); + + IF vFileCount > 0 THEN + -- Show files before deletion + DBMS_OUTPUT.PUT_LINE('Files being removed:'); + FOR rec IN ( + SELECT A_SOURCE_FILE_RECEIVED_KEY, + SUBSTR(SOURCE_FILE_NAME, 1, 60) AS FILE_NAME, + TO_CHAR(RECEPTION_DATE, 'YYYY-MM-DD HH24:MI:SS') AS RECEIVED_TIME, + PROCESS_NAME + FROM CT_MRDS.A_SOURCE_FILE_RECEIVED + WHERE PROCESS_NAME = 'MARS-956' + ORDER BY RECEPTION_DATE DESC + ) LOOP + DBMS_OUTPUT.PUT_LINE('- ' || rec.FILE_NAME || ' (ID: ' || rec.A_SOURCE_FILE_RECEIVED_KEY || ', Process: ' || rec.PROCESS_NAME || ')'); + END LOOP; + + -- Delete the file registrations using PROCESS_NAME + DELETE FROM CT_MRDS.A_SOURCE_FILE_RECEIVED + WHERE PROCESS_NAME = 'MARS-956'; + + vDeletedCount := SQL%ROWCOUNT; + COMMIT; + + DBMS_OUTPUT.PUT_LINE('SUCCESS: Successfully deleted ' || vDeletedCount || ' file registrations'); + + -- Log the rollback action + INSERT INTO CT_MRDS.A_PROCESS_LOG (PROCESS_NAME, PROCEDURE_NAME, LOG_LEVEL, LOG_MESSAGE) + VALUES ('MARS-956-ROLLBACK', 'DELETE_FILE_REGISTRATIONS', 'INFO', + 'Deleted ' || vDeletedCount || ' file registrations'); + COMMIT; + + ELSE + DBMS_OUTPUT.PUT_LINE('SUCCESS: No file registrations found to delete'); + END IF; + +EXCEPTION + WHEN OTHERS THEN + ROLLBACK; + vErrorMsg := 'Failed to delete file registrations: ' || SQLERRM; + DBMS_OUTPUT.PUT_LINE('ERROR: Error during file registration deletion: ' || SQLERRM); + -- Log the error + INSERT INTO CT_MRDS.A_PROCESS_LOG (PROCESS_NAME, PROCEDURE_NAME, LOG_LEVEL, LOG_MESSAGE) + VALUES ('MARS-956-ROLLBACK', 'DELETE_FILE_REGISTRATIONS', 'ERROR', vErrorMsg); + COMMIT; + RAISE; +END; +/ + +PROMPT ========================================================================= +PROMPT File Registration Rollback Completed +PROMPT ========================================================================= \ No newline at end of file diff --git a/MARS_Packages/REL02_POST/MARS-956/92_MARS_956_rollback_process_logs.sql b/MARS_Packages/REL02_POST/MARS-956/92_MARS_956_rollback_process_logs.sql new file mode 100644 index 0000000..6e4b82a --- /dev/null +++ b/MARS_Packages/REL02_POST/MARS-956/92_MARS_956_rollback_process_logs.sql @@ -0,0 +1,77 @@ +-- =================================================================== +-- MARS-956 Rollback Step 2: Clean Process Logs +-- =================================================================== +-- Purpose: Remove MARS-956 process logs from A_PROCESS_LOG +-- Author: Grzegorz Michalski +-- Date: 2026-02-12 + +SET SERVEROUTPUT ON SIZE UNLIMITED +SET TIMING ON + +PROMPT ========================================================================= +PROMPT MARS-956 Rollback Step 2: Clean Process Logs +PROMPT ========================================================================= + +DECLARE + vLogCount NUMBER := 0; + vDeletedCount NUMBER := 0; + vErrorMsg VARCHAR2(4000); +BEGIN + -- Count logs to be deleted + SELECT COUNT(*) + INTO vLogCount + FROM CT_MRDS.A_PROCESS_LOG + WHERE PROCESS_NAME IN ('MARS-956', 'MARS-956-ROLLBACK') + AND LOG_TIMESTAMP >= SYSDATE - 7; -- Last week (safety) + + DBMS_OUTPUT.PUT_LINE('Process log entries to be deleted: ' || vLogCount); + + IF vLogCount > 0 THEN + -- Show recent logs before deletion + DBMS_OUTPUT.PUT_LINE('Recent MARS-956 log entries being removed:'); + FOR rec IN ( + SELECT A_PROCESS_LOG_KEY, + TO_CHAR(LOG_TIMESTAMP, 'YYYY-MM-DD HH24:MI:SS') AS LOG_TIME, + PROCEDURE_NAME, + LOG_LEVEL, + SUBSTR(LOG_MESSAGE, 1, 40) AS MESSAGE + FROM CT_MRDS.A_PROCESS_LOG + WHERE PROCESS_NAME IN ('MARS-956', 'MARS-956-ROLLBACK') + AND LOG_TIMESTAMP >= SYSDATE - 7 + ORDER BY LOG_TIMESTAMP DESC + FETCH FIRST 10 ROWS ONLY + ) LOOP + DBMS_OUTPUT.PUT_LINE('- ' || rec.LOG_TIME || ' [' || rec.LOG_LEVEL || '] ' || + rec.PROCEDURE_NAME || ': ' || rec.MESSAGE); + END LOOP; + + -- Delete the process logs + DELETE FROM CT_MRDS.A_PROCESS_LOG + WHERE PROCESS_NAME IN ('MARS-956', 'MARS-956-ROLLBACK') + AND LOG_TIMESTAMP >= SYSDATE - 7; + + vDeletedCount := SQL%ROWCOUNT; + COMMIT; + + DBMS_OUTPUT.PUT_LINE('SUCCESS: Successfully deleted ' || vDeletedCount || ' process log entries'); + + ELSE + DBMS_OUTPUT.PUT_LINE('SUCCESS: No process log entries found to delete'); + END IF; + +EXCEPTION + WHEN OTHERS THEN + ROLLBACK; + vErrorMsg := 'Failed to clean process logs: ' || SQLERRM; + DBMS_OUTPUT.PUT_LINE('ERROR: Error during process log cleanup: ' || SQLERRM); + -- Log the error (will remain after rollback for debugging) + INSERT INTO CT_MRDS.A_PROCESS_LOG (PROCESS_NAME, PROCEDURE_NAME, LOG_LEVEL, LOG_MESSAGE) + VALUES ('MARS-956-ROLLBACK', 'CLEANUP_PROCESS_LOGS', 'ERROR', vErrorMsg); + COMMIT; + RAISE; +END; +/ + +PROMPT ========================================================================= +PROMPT Process Log Cleanup Completed +PROMPT ========================================================================= \ No newline at end of file diff --git a/MARS_Packages/REL02_POST/MARS-956/99_MARS_956_verify_rollback.sql b/MARS_Packages/REL02_POST/MARS-956/99_MARS_956_verify_rollback.sql new file mode 100644 index 0000000..f14648e --- /dev/null +++ b/MARS_Packages/REL02_POST/MARS-956/99_MARS_956_verify_rollback.sql @@ -0,0 +1,207 @@ +-- =================================================================== +-- MARS-956 Rollback Verification: Confirm Rollback Completion +-- =================================================================== +-- Purpose: Verify that MARS-956 rollback completed successfully +-- Author: Grzegorz Michalski +-- Date: 2026-02-12 + +SET SERVEROUTPUT ON SIZE UNLIMITED +SET TIMING ON + +PROMPT ========================================================================= +PROMPT MARS-956 Rollback Verification +PROMPT ========================================================================= + +-- Check 1: Verify file registrations were removed +PROMPT Checking file registration cleanup... +DECLARE + vRemainingFiles NUMBER := 0; +BEGIN + SELECT COUNT(*) + INTO vRemainingFiles + FROM CT_MRDS.A_SOURCE_FILE_RECEIVED + WHERE SOURCE_FILE_NAME LIKE '200%' -- ETL keys 2001-2010 + AND RECEPTION_DATE >= SYSDATE - 7; -- Last week + + IF vRemainingFiles = 0 THEN + DBMS_OUTPUT.PUT_LINE('SUCCESS: All MARS-956 file registrations successfully removed'); + ELSE + DBMS_OUTPUT.PUT_LINE('WARNING: ' || vRemainingFiles || ' file registrations still exist'); + + -- Show remaining files + FOR rec IN ( + SELECT SUBSTR(SOURCE_FILE_NAME, 1, 50) AS FILE_NAME, + TO_CHAR(RECEPTION_DATE, 'YYYY-MM-DD HH24:MI:SS') AS RECEIVED_TIME + FROM CT_MRDS.A_SOURCE_FILE_RECEIVED + WHERE SOURCE_FILE_NAME LIKE '200%' + AND RECEPTION_DATE >= SYSDATE - 7 + ) LOOP + DBMS_OUTPUT.PUT_LINE(' Remaining: ' || rec.FILE_NAME); + END LOOP; + END IF; +END; +/ + +-- Check 2: Verify process logs were cleaned +PROMPT Checking process log cleanup... +DECLARE + vRemainingLogs NUMBER := 0; +BEGIN + SELECT COUNT(*) + INTO vRemainingLogs + FROM CT_MRDS.A_PROCESS_LOG + WHERE PROCESS_NAME = 'MARS-956' + AND LOG_TIMESTAMP >= SYSDATE - 7; -- Last week + + IF vRemainingLogs = 0 THEN + DBMS_OUTPUT.PUT_LINE('SUCCESS: All MARS-956 process logs successfully removed'); + ELSE + DBMS_OUTPUT.PUT_LINE('WARNING: ' || vRemainingLogs || ' process log entries still exist'); + + -- Show remaining logs (first few) + FOR rec IN ( + SELECT TO_CHAR(LOG_TIMESTAMP, 'YYYY-MM-DD HH24:MI:SS') AS LOG_TIME, + PROCEDURE_NAME, + SUBSTR(LOG_MESSAGE, 1, 40) AS MESSAGE + FROM CT_MRDS.A_PROCESS_LOG + WHERE PROCESS_NAME = 'MARS-956' + AND LOG_TIMESTAMP >= SYSDATE - 7 + ORDER BY LOG_TIMESTAMP DESC + FETCH FIRST 3 ROWS ONLY + ) LOOP + DBMS_OUTPUT.PUT_LINE(' Remaining: ' || rec.LOG_TIME || ' ' || rec.PROCEDURE_NAME); + END LOOP; + END IF; +END; +/ + +-- Check 3: Verify cloud bucket cleanup (informational only) +PROMPT Checking cloud bucket status... +DECLARE + vCloudFileCount NUMBER := 0; + vCredentialName VARCHAR2(100); + vDataBucketUri VARCHAR2(500); +BEGIN + -- Get bucket URI and credential + vDataBucketUri := CT_MRDS.FILE_MANAGER.GET_BUCKET_URI('ODS'); + vCredentialName := CT_MRDS.ENV_MANAGER.gvCredentialName; + + DBMS_OUTPUT.PUT_LINE('Checking ODS bucket: ' || vDataBucketUri); + + -- Count remaining files in cloud bucket + BEGIN + FOR rec IN ( + SELECT object_name + FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( + credential_name => vCredentialName, + location_uri => vDataBucketUri + )) + WHERE object_name LIKE 'ODS/C2D/C2D_MPEC_%' + ) LOOP + vCloudFileCount := vCloudFileCount + 1; + IF vCloudFileCount <= 3 THEN -- Show first 3 files + DBMS_OUTPUT.PUT_LINE(' Cloud file: ' || rec.object_name); + END IF; + END LOOP; + + IF vCloudFileCount = 0 THEN + DBMS_OUTPUT.PUT_LINE('SUCCESS: No C2D MPEC files found in cloud bucket'); + ELSE + DBMS_OUTPUT.PUT_LINE('INFO: ' || vCloudFileCount || ' C2D MPEC files still in cloud bucket'); + DBMS_OUTPUT.PUT_LINE(' Note: Cloud files are not automatically deleted by rollback'); + DBMS_OUTPUT.PUT_LINE(' Manual deletion required if needed'); + END IF; + + EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE('WARNING: Cannot check cloud bucket: ' || SQLERRM); + END; +END; +/ + +-- Check 4: Verify rollback logs were created +PROMPT Checking rollback operation logs... +DECLARE + vRollbackLogs NUMBER := 0; +BEGIN + SELECT COUNT(*) + INTO vRollbackLogs + FROM CT_MRDS.A_PROCESS_LOG + WHERE PROCESS_NAME = 'MARS-956-ROLLBACK' + AND LOG_TIMESTAMP >= SYSDATE - 1/24; -- Last hour + + IF vRollbackLogs > 0 THEN + DBMS_OUTPUT.PUT_LINE('SUCCESS: Rollback operation logs found: ' || vRollbackLogs); + + -- Show recent rollback logs + FOR rec IN ( + SELECT TO_CHAR(LOG_TIMESTAMP, 'HH24:MI:SS') AS LOG_TIME, + PROCEDURE_NAME, + LOG_LEVEL, + SUBSTR(LOG_MESSAGE, 1, 50) AS MESSAGE + FROM CT_MRDS.A_PROCESS_LOG + WHERE PROCESS_NAME = 'MARS-956-ROLLBACK' + AND LOG_TIMESTAMP >= SYSDATE - 1/24 + ORDER BY LOG_TIMESTAMP DESC + ) LOOP + DBMS_OUTPUT.PUT_LINE(' ' || rec.LOG_TIME || ' [' || rec.LOG_LEVEL || '] ' || + rec.PROCEDURE_NAME || ': ' || rec.MESSAGE); + END LOOP; + ELSE + DBMS_OUTPUT.PUT_LINE('WARNING: Warning: No rollback operation logs found'); + END IF; +END; +/ + +PROMPT +PROMPT ========================================================================= +PROMPT MARS-956 Rollback Verification Summary +PROMPT ========================================================================= + +DECLARE + vRemainingFiles NUMBER := 0; + vRemainingLogs NUMBER := 0; + vRollbackStatus VARCHAR2(20); +BEGIN + -- Count remaining registrations + SELECT COUNT(*) + INTO vRemainingFiles + FROM CT_MRDS.A_SOURCE_FILE_RECEIVED + WHERE SOURCE_FILE_NAME LIKE '200%' + AND RECEPTION_DATE >= SYSDATE - 7; + + -- Count remaining process logs + SELECT COUNT(*) + INTO vRemainingLogs + FROM CT_MRDS.A_PROCESS_LOG + WHERE PROCESS_NAME = 'MARS-956' + AND LOG_TIMESTAMP >= SYSDATE - 7; + + -- Determine rollback status + IF vRemainingFiles = 0 AND vRemainingLogs = 0 THEN + vRollbackStatus := 'COMPLETE'; + ELSIF vRemainingFiles = 0 OR vRemainingLogs = 0 THEN + vRollbackStatus := 'PARTIAL'; + ELSE + vRollbackStatus := 'INCOMPLETE'; + END IF; + + DBMS_OUTPUT.PUT_LINE('MARS-956 Rollback Status: ' || vRollbackStatus); + DBMS_OUTPUT.PUT_LINE('- Remaining file registrations: ' || vRemainingFiles); + DBMS_OUTPUT.PUT_LINE('- Remaining process logs: ' || vRemainingLogs); + + IF vRollbackStatus = 'COMPLETE' THEN + DBMS_OUTPUT.PUT_LINE('SUCCESS: Rollback completed successfully - system clean'); + ELSE + DBMS_OUTPUT.PUT_LINE('WARNING: Rollback incomplete - manual cleanup may be required'); + END IF; + + DBMS_OUTPUT.PUT_LINE(''); + DBMS_OUTPUT.PUT_LINE('Note: Cloud bucket files (OCI) are not automatically removed'); + DBMS_OUTPUT.PUT_LINE(' Use OCI console or DBMS_CLOUD commands for file deletion if needed'); +END; +/ + +PROMPT ========================================================================= +PROMPT Rollback Verification Completed +PROMPT ========================================================================= \ No newline at end of file diff --git a/MARS_Packages/REL02_POST/MARS-956/README.md b/MARS_Packages/REL02_POST/MARS-956/README.md deleted file mode 100644 index e6935f3..0000000 --- a/MARS_Packages/REL02_POST/MARS-956/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# MARS-956: Exporting Historical data for ODS: C2D MPEC (delta) - -## Overview - -**Purpose**: One-time export of historical C2D MPEC delta data from operational database (OU_C2D) to DATA bucket as CSV files. - -**Approach**: Use DATA_EXPORTER export functionality EXPORT_TABLE_DATA for bulk data movement with file registration. - -**Input**: Old tables in OU_C2D operational database -**Output**: CSV files in DATA bucket -**Mapping**: Structure must match new ODS template tables - -## Tables to Export - -| Source Table (OU_C2D) | Target Location (DATA) | Export Type | Time Dependency | -|------------------------|-------------------------|-------------|------------------| -| `MPEC_ADMIN` | `mrds_data_dev/DATA/C2D/C2D_MPEC_ADMIN` | CSV to DATA | Sync with REL_02 | -| `MPEC_CONTENT` | `mrds_data_dev/DATA/C2D/C2D_MPEC_CONTENT` | CSV to DATA | Sync with REL_02 | -| `MPEC_CONTENT_CRITERION` | `mrds_data_dev/DATA/C2D/C2D_MPEC_CONTENT_CRITERION` | CSV to DATA | Sync with REL_02 | - -## Export Strategy - -- **Format**: CSV files in DATA bucket -- **Reason**: Complete history of delta records needed for all queries -- **Method**: `DATA_EXPORTER.EXPORT_TABLE_DATA` procedure -- **Bucket Area**: `'DATA'` -- **Folder Structure**: `'DATA/C2D/{TABLE_NAME}'` -- **File Registration**: Files registered in A_SOURCE_FILE_RECEIVED table - -## Installation Steps - -1. Run master install script: `@install_mars956.sql` -2. Verify exports completed successfully -3. Confirm CSV files created in DATA bucket with expected structure - -## Files Structure - -``` -MARS-956/ -├── README.md # This file -├── install_mars956.sql # Master installation script -├── 01_MARS_956_export_c2d_mpec_data.sql # Export procedures execution -├── track_package_versions.sql # Universal version tracking -├── verify_packages_version.sql # Universal version verification -└── rollback_mars956.sql # Rollback script (if needed) -``` - -## Prerequisites - -- OU_C2D schema access for source tables -- DATA_EXPORTER package v2.7.5+ deployed (with pRegisterExport support) -- DEF_CRED_ARN credentials configured -- DATA bucket accessible - -## Post-Installation Verification - -1. Check export completion in A_PROCESS_LOG -2. Verify CSV files created in DATA bucket -3. Validate file structure matches template tables -4. Confirm row counts match source tables -5. Check file registration in A_SOURCE_FILE_RECEIVED table - -## Notes - -- This is a **one-time** data migration -- No package modifications required (uses existing DATA_EXPORTER) -- Export timing critical - must sync with REL_02 deployment -- Complete history required for delta queries \ No newline at end of file diff --git a/MARS_Packages/REL02_POST/MARS-956/install_mars956.sql b/MARS_Packages/REL02_POST/MARS-956/install_mars956.sql index b28dfcd..8fe639d 100644 --- a/MARS_Packages/REL02_POST/MARS-956/install_mars956.sql +++ b/MARS_Packages/REL02_POST/MARS-956/install_mars956.sql @@ -1,128 +1,88 @@ -- =================================================================== --- MARS-956 MASTER INSTALLATION SCRIPT +-- MARS-956 INSTALL SCRIPT: C2D MPEC Data Export to External Tables -- =================================================================== --- Purpose: Export Historical C2D MPEC data from OU_C2D to DATA bucket +-- Purpose: One-time bulk export of 3 C2D MPEC tables from OU_LEGACY_C2D schema +-- to OCI buckets (ODS bucket CSV format) +-- Uses DATA_EXPORTER v2.7.5 with pRegisterExport for file registration -- Author: Grzegorz Michalski --- Date: 2026-02-11 --- --- Requirements: --- - ADMIN user access for MARS installation --- - OU_C2D schema access for source tables --- - DATA_EXPORTER package v2.7.4+ deployed --- - DEF_CRED_ARN credentials configured --- - DATA bucket accessible --- =================================================================== +-- Date: 2026-02-12 --- Dynamic spool file generation +-- Dynamic spool file generation (using SYS_CONTEXT - no DBA privileges required) +-- Log files are automatically created in log/ subdirectory +-- IMPORTANT: Ensure log/ directory exists before SPOOL (use host mkdir) host mkdir log 2>nul -define spoolfile = 'log\install_mars956_' -define timestamp = '' --- Get current timestamp for unique log filename -column current_time new_value timestamp -SELECT TO_CHAR(SYSDATE, 'YYYYMMDD_HH24MISS') AS current_time FROM dual; +var filename VARCHAR2(100) +BEGIN + :filename := 'log/INSTALL_MARS_956_' || SYS_CONTEXT('USERENV', 'CON_NAME') || '_' || TO_CHAR(SYSDATE,'YYYYMMDD_HH24MISS') || '.log'; +END; +/ +column filename new_value _filename +select :filename filename from dual; +spool &_filename --- Start logging -spool &spoolfile.×tamp..log +SET ECHO OFF +SET TIMING ON +SET SERVEROUTPUT ON SIZE UNLIMITED +SET PAUSE OFF --- Display environment information -PROMPT ========================================================================= -PROMPT MARS-956 INSTALLATION - Export Historical C2D MPEC Data -PROMPT ========================================================================= -PROMPT Installation Start: -SELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') AS INSTALL_START FROM DUAL; - -PROMPT Current User: -SELECT USER AS CURRENT_USER FROM DUAL; - -PROMPT Database Info: -SELECT INSTANCE_NAME, VERSION, STATUS FROM V$INSTANCE; +-- Set current schema context (optional - use when modifying packages in specific schema) +-- ALTER SESSION SET CURRENT_SCHEMA = CT_MRDS; PROMPT ========================================================================= -PROMPT Installation Details: -PROMPT - Purpose: One-time export of historical C2D MPEC delta data -PROMPT - Source: OU_C2D schema tables (operational database) -PROMPT - Target: DATA bucket as CSV files -PROMPT - Tables: MPEC_ADMIN, MPEC_CONTENT, MPEC_CONTENT_CRITERION -PROMPT - Method: DATA_EXPORTER.EXPORT_TABLE_DATA_BY_DATE +PROMPT MARS-956: C2D MPEC Data Export to External Tables (One-Time Migration) +PROMPT ========================================================================= +PROMPT +PROMPT This script will export 3 C2D MPEC tables to OCI buckets: +PROMPT +PROMPT TARGET: ODS Bucket (CSV format): +PROMPT - MPEC_ADMIN +PROMPT - MPEC_CONTENT +PROMPT - MPEC_CONTENT_CRITERION +PROMPT +PROMPT Key Features: +PROMPT - Files registered in A_SOURCE_FILE_RECEIVED for tracking +PROMPT - Template table column order matching (CT_ET_TEMPLATES.C2D_MPEC_*) +PROMPT - ODS/C2D bucket path structure PROMPT ========================================================================= -SET SERVEROUTPUT ON SIZE 1000000 -SET LINESIZE 200 -SET PAGESIZE 1000 +-- Confirm installation with user +ACCEPT continue CHAR PROMPT 'Type YES to continue with installation, or Ctrl+C to abort: ' +WHENEVER SQLERROR EXIT SQL.SQLCODE +BEGIN + IF '&continue' IS NULL OR TRIM('&continue') IS NULL OR UPPER(TRIM('&continue')) != 'YES' THEN + RAISE_APPLICATION_ERROR(-20001, 'Installation aborted by user'); + END IF; +END; +/ +WHENEVER SQLERROR CONTINUE -PROMPT -PROMPT Step 1: Verify Prerequisites +PROMPT PROMPT ========================================================================= - --- Verify DATA_EXPORTER package is available -PROMPT Checking DATA_EXPORTER package availability... -SELECT 'DATA_EXPORTER v' || CT_MRDS.DATA_EXPORTER.PACKAGE_VERSION || - ' (Build: ' || CT_MRDS.DATA_EXPORTER.PACKAGE_BUILD_DATE || ')' AS PACKAGE_INFO -FROM DUAL; - --- Verify source tables exist in OU_C2D -PROMPT Checking source tables in OU_C2D schema... -SELECT table_name, num_rows -FROM all_tables -WHERE owner = 'OU_C2D' - AND table_name IN ('MPEC_ADMIN', 'MPEC_CONTENT', 'MPEC_CONTENT_CRITERION') -ORDER BY table_name; - --- Verify template tables exist in CT_ET_TEMPLATES -PROMPT Checking template tables in CT_ET_TEMPLATES schema... -SELECT table_name -FROM all_tables -WHERE owner = 'CT_ET_TEMPLATES' - AND table_name IN ('C2D_MPEC_ADMIN', 'C2D_MPEC_CONTENT', 'C2D_MPEC_CONTENT_CRITERION') -ORDER BY table_name; - -PROMPT -PROMPT Step 2: Execute Historical Data Export +PROMPT Step 1: Export C2D MPEC Data to ODS Bucket PROMPT ========================================================================= @@01_MARS_956_export_c2d_mpec_data.sql -PROMPT -PROMPT Step 3: Track Package Versions +PROMPT PROMPT ========================================================================= -@@track_package_versions.sql +PROMPT Step 2: Verify Exports (File Registration Check) +PROMPT ========================================================================= +@@02_MARS_956_verify_exports.sql -PROMPT -PROMPT Step 4: Verify Package Versions +PROMPT PROMPT ========================================================================= -@@verify_packages_version.sql +PROMPT Step 3: Verify Data Integrity (Source vs Exported) +PROMPT ========================================================================= +@@03_MARS_956_verify_data_integrity.sql -PROMPT +PROMPT PROMPT ========================================================================= -PROMPT MARS-956 INSTALLATION SUMMARY +PROMPT MARS-956 Installation - COMPLETED PROMPT ========================================================================= - --- Display final summary -PROMPT Installation Completed: -SELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') AS INSTALL_END FROM DUAL; - -PROMPT Export Results Summary: -SELECT COUNT(*) AS EXPORT_LOG_ENTRIES, - MIN(EVENT_TIMESTAMP) AS FIRST_EXPORT, - MAX(EVENT_TIMESTAMP) AS LAST_EXPORT -FROM CT_MRDS.A_PROCESS_LOG -WHERE PACKAGE_NAME = 'MARS-956' - AND EVENT_TIMESTAMP >= SYSDATE - 1; -- Last 24 hours - -PROMPT -PROMPT ========================================================================= -PROMPT POST-INSTALLATION TASKS -PROMPT ========================================================================= -PROMPT 1. Verify CSV files created in DATA bucket: -PROMPT - mrds_data_dev/ODS/C2D/C2D_MPEC_ADMIN/*.csv -PROMPT - mrds_data_dev/ODS/C2D/C2D_MPEC_CONTENT/*.csv -PROMPT - mrds_data_dev/ODS/C2D/C2D_MPEC_CONTENT_CRITERION/*.csv -PROMPT -PROMPT 2. Check file structure matches template tables -PROMPT 3. Validate row counts match source tables -PROMPT 4. Confirm data available for delta queries -PROMPT 5. Sync deployment timing with REL_02 deployment +PROMPT Check the log file for complete installation details. +PROMPT For rollback, use: rollback_mars956.sql PROMPT ========================================================================= spool off + quit; \ No newline at end of file diff --git a/MARS_Packages/REL02_POST/MARS-956/rollback_mars956.sql b/MARS_Packages/REL02_POST/MARS-956/rollback_mars956.sql index b87e521..e349472 100644 --- a/MARS_Packages/REL02_POST/MARS-956/rollback_mars956.sql +++ b/MARS_Packages/REL02_POST/MARS-956/rollback_mars956.sql @@ -1,85 +1,81 @@ -- =================================================================== --- MARS-956 ROLLBACK SCRIPT +-- MARS-956 ROLLBACK SCRIPT: C2D MPEC Data Export Rollback -- =================================================================== --- Purpose: Rollback/cleanup for MARS-956 C2D MPEC historical data export +-- Purpose: Rollback MARS-956 - Delete exported CSV files and file registrations +-- WARNING: This will DELETE all exported data files and registrations! -- Author: Grzegorz Michalski --- Date: 2026-02-11 --- --- NOTE: This is primarily for cleanup of log entries and tracking data. --- The exported CSV files would need to be manually removed from --- the DATA bucket if rollback is required. --- =================================================================== +-- Date: 2026-02-12 --- Start logging -spool rollback_mars956.log +-- Dynamic spool file generation (using SYS_CONTEXT - no DBA privileges required) +-- IMPORTANT: Ensure log/ directory exists before SPOOL (use host mkdir) +host mkdir log 2>nul + +var filename VARCHAR2(100) +BEGIN + :filename := 'log/ROLLBACK_MARS_956_' || SYS_CONTEXT('USERENV', 'CON_NAME') || '_' || TO_CHAR(SYSDATE,'YYYYMMDD_HH24MISS') || '.log'; +END; +/ +column filename new_value _filename +select :filename filename from dual; +spool &_filename + +SET ECHO OFF +SET TIMING ON +SET SERVEROUTPUT ON SIZE UNLIMITED +SET PAUSE OFF PROMPT ========================================================================= -PROMPT MARS-956 ROLLBACK - Cleanup Historical C2D MPEC Export +PROMPT MARS-956: Rollback C2D MPEC Data Export PROMPT ========================================================================= -PROMPT Rollback Start: -SELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') AS ROLLBACK_START FROM DUAL; - -SET SERVEROUTPUT ON SIZE 1000000 - -PROMPT -PROMPT Step 1: Review Export Activity -PROMPT ========================================================================= - --- Show what was exported -PROMPT Recent MARS-956 export activity: -SELECT TO_CHAR(EVENT_TIMESTAMP, 'YYYY-MM-DD HH24:MI:SS') AS EXPORT_TIME, - PROCEDURE_NAME, - EVENT_TYPE, - EVENT_MESSAGE -FROM CT_MRDS.A_PROCESS_LOG -WHERE PACKAGE_NAME = 'MARS-956' - OR PROCEDURE_NAME LIKE '%MARS_956%' -ORDER BY EVENT_TIMESTAMP DESC; - -PROMPT -PROMPT Step 2: Cleanup Log Entries (Optional) -PROMPT ========================================================================= - --- Optionally remove MARS-956 log entries (uncomment if needed) -/* -DELETE FROM CT_MRDS.A_PROCESS_LOG -WHERE PACKAGE_NAME = 'MARS-956' - OR PROCEDURE_NAME LIKE '%MARS_956%'; - -PROMPT Deleted log entries: -SELECT SQL%ROWCOUNT AS DELETED_ROWS FROM DUAL; - -COMMIT; -*/ - -PROMPT Log cleanup skipped (uncomment DELETE statement if cleanup needed) - -PROMPT -PROMPT Step 3: Manual Steps Required -PROMPT ========================================================================= - -PROMPT ⚠️ MANUAL CLEANUP REQUIRED: +PROMPT WARNING: This will DELETE exported CSV files and file registrations! +PROMPT - ODS bucket: mrds_data_dev/ODS/C2D/ +PROMPT - File registrations: A_SOURCE_FILE_RECEIVED entries PROMPT -PROMPT If complete rollback is needed, manually remove CSV files from DATA bucket: -PROMPT - mrds_data_dev/ODS/C2D/C2D_MPEC_ADMIN/*.csv -PROMPT - mrds_data_dev/ODS/C2D/C2D_MPEC_CONTENT/*.csv -PROMPT - mrds_data_dev/ODS/C2D/C2D_MPEC_CONTENT_CRITERION/*.csv -PROMPT -PROMPT Use OCI CLI or console to remove files: -PROMPT oci os object list --bucket-name mrds_data_dev --prefix "ODS/C2D/C2D_MPEC" -PROMPT oci os object delete --bucket-name mrds_data_dev --name "path/to/file.csv" - -PROMPT -PROMPT ========================================================================= -PROMPT MARS-956 ROLLBACK SUMMARY +PROMPT Only proceed if export failed and needs to be restarted! PROMPT ========================================================================= -PROMPT Rollback Completed: -SELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') AS ROLLBACK_END FROM DUAL; +-- Confirm rollback with user +ACCEPT continue CHAR PROMPT 'Type YES to continue with rollback, or Ctrl+C to abort: ' +WHENEVER SQLERROR EXIT SQL.SQLCODE +BEGIN + IF '&continue' IS NULL OR TRIM('&continue') IS NULL OR UPPER(TRIM('&continue')) != 'YES' THEN + RAISE_APPLICATION_ERROR(-20001, 'Rollback aborted by user'); + END IF; +END; +/ +WHENEVER SQLERROR CONTINUE -PROMPT -PROMPT Note: This rollback script primarily cleans up log entries. -PROMPT Exported CSV files require manual removal from DATA bucket. +PROMPT +PROMPT ========================================================================= +PROMPT Step 1: Delete Exported CSV Files from DATA Bucket +PROMPT ========================================================================= +@@90_MARS_956_rollback_delete_csv_files.sql + +PROMPT +PROMPT ========================================================================= +PROMPT Step 2: Delete File Registrations +PROMPT ========================================================================= +@@91_MARS_956_rollback_file_registrations.sql + +PROMPT +PROMPT ========================================================================= +PROMPT Step 3: Clean Process Logs +PROMPT ========================================================================= +@@92_MARS_956_rollback_process_logs.sql + +PROMPT +PROMPT ========================================================================= +PROMPT Step 4: Verify Rollback Completion +PROMPT ========================================================================= +@@99_MARS_956_verify_rollback.sql + +PROMPT +PROMPT ========================================================================= +PROMPT MARS-956 Rollback - COMPLETED +PROMPT ========================================================================= +PROMPT Check the log file for complete rollback details. +PROMPT ========================================================================= spool off + quit; \ No newline at end of file diff --git a/MARS_Packages/REL02_POST/MARS-956/track_package_versions.sql b/MARS_Packages/REL02_POST/MARS-956/track_package_versions.sql deleted file mode 100644 index 8497868..0000000 --- a/MARS_Packages/REL02_POST/MARS-956/track_package_versions.sql +++ /dev/null @@ -1,96 +0,0 @@ --- =================================================================== --- Simple Package Version Tracking Script --- =================================================================== --- Purpose: Track specified Oracle package versions for MARS-956 --- Author: Grzegorz Michalski --- Date: 2026-02-11 --- Version: 3.1.0 - List-Based Edition --- --- USAGE: --- 1. Edit package list below (add/remove packages as needed) --- 2. Include in your install/rollback script: @@track_package_versions.sql --- =================================================================== - -SET SERVEROUTPUT ON; - -DECLARE - TYPE t_package_rec IS RECORD ( - owner VARCHAR2(50), - package_name VARCHAR2(50), - version VARCHAR2(50) - ); - TYPE t_packages IS TABLE OF t_package_rec; - TYPE t_string_array IS TABLE OF VARCHAR2(100); - - -- =================================================================== - -- PACKAGE LIST - Edit this array to specify packages to track - -- =================================================================== - -- MARS-956: Historical C2D MPEC data export - using existing packages - -- No new packages created, tracking existing DATA_EXPORTER usage - -- =================================================================== - vPackageList t_string_array := t_string_array( - 'CT_MRDS.DATA_EXPORTER' - ); - -- =================================================================== - - vPackages t_packages := t_packages(); - vVersion VARCHAR2(50); - vCount NUMBER := 0; - vOwner VARCHAR2(50); - vPackageName VARCHAR2(50); - vDotPos NUMBER; -BEGIN - DBMS_OUTPUT.PUT_LINE('========================================'); - DBMS_OUTPUT.PUT_LINE('MARS-956: Package Version Tracking'); - DBMS_OUTPUT.PUT_LINE('========================================'); - - -- Process each package in the list - FOR i IN 1..vPackageList.COUNT LOOP - vDotPos := INSTR(vPackageList(i), '.'); - IF vDotPos > 0 THEN - vOwner := SUBSTR(vPackageList(i), 1, vDotPos - 1); - vPackageName := SUBSTR(vPackageList(i), vDotPos + 1); - - -- Get package version - BEGIN - EXECUTE IMMEDIATE 'SELECT ' || vOwner || '.' || vPackageName || '.GET_VERSION() FROM DUAL' INTO vVersion; - vPackages.EXTEND; - vPackages(vPackages.COUNT).owner := vOwner; - vPackages(vPackages.COUNT).package_name := vPackageName; - vPackages(vPackages.COUNT).version := vVersion; - - -- Track in ENV_MANAGER - BEGIN - CT_MRDS.ENV_MANAGER.TRACK_PACKAGE_VERSION( - pPackageOwner => vOwner, - pPackageName => vPackageName, - pPackageVersion => vVersion, - pPackageBuildDate => TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS'), - pPackageAuthor => 'Grzegorz Michalski' - ); - vCount := vCount + 1; - EXCEPTION - WHEN OTHERS THEN NULL; -- Continue even if tracking fails - END; - EXCEPTION - WHEN OTHERS THEN NULL; -- Skip packages that fail - END; - END IF; - END LOOP; - - DBMS_OUTPUT.PUT_LINE(''); - DBMS_OUTPUT.PUT_LINE('Summary:'); - DBMS_OUTPUT.PUT_LINE('--------'); - DBMS_OUTPUT.PUT_LINE('Packages tracked: ' || vCount || '/' || vPackageList.COUNT); - - IF vPackages.COUNT > 0 THEN - DBMS_OUTPUT.PUT_LINE(''); - DBMS_OUTPUT.PUT_LINE('Tracked Packages:'); - FOR i IN 1..vPackages.COUNT LOOP - DBMS_OUTPUT.PUT_LINE(' ' || vPackages(i).owner || '.' || vPackages(i).package_name || ' v' || vPackages(i).version); - END LOOP; - END IF; - - DBMS_OUTPUT.PUT_LINE('========================================'); -END; -/ \ No newline at end of file diff --git a/MARS_Packages/REL02_POST/MARS-956/validate_export.sql b/MARS_Packages/REL02_POST/MARS-956/validate_export.sql deleted file mode 100644 index e7cbc38..0000000 --- a/MARS_Packages/REL02_POST/MARS-956/validate_export.sql +++ /dev/null @@ -1,182 +0,0 @@ --- =================================================================== --- MARS-956 POST-EXPORT VALIDATION SCRIPT --- =================================================================== --- Purpose: Validate C2D MPEC historical data export results --- Author: Grzegorz Michalski --- Date: 2026-02-11 --- --- Run after MARS-956 installation to verify export success --- =================================================================== - -SET LINESIZE 200 -SET PAGESIZE 1000 -SET SERVEROUTPUT ON SIZE 1000000 - -PROMPT ========================================================================= -PROMPT MARS-956 POST-EXPORT VALIDATION -PROMPT ========================================================================= -PROMPT Validation Start: -SELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') AS VALIDATION_START FROM DUAL; - -PROMPT -PROMPT 1. Export Process Log Review -PROMPT ========================================================================= - --- Check export completion status -PROMPT Recent MARS-956 export activity: -SELECT TO_CHAR(EVENT_TIMESTAMP, 'YYYY-MM-DD HH24:MI:SS') AS EVENT_TIME, - PROCEDURE_NAME, - EVENT_TYPE, - SUBSTR(EVENT_MESSAGE, 1, 80) AS MESSAGE_PREVIEW -FROM CT_MRDS.A_PROCESS_LOG -WHERE PACKAGE_NAME = 'MARS-956' - OR PROCEDURE_NAME LIKE '%MARS_956%' - OR PROCEDURE_NAME LIKE '%DATA_EXPORTER%' -ORDER BY EVENT_TIMESTAMP DESC -FETCH FIRST 20 ROWS ONLY; - --- Check for any errors -PROMPT Export errors (if any): -SELECT TO_CHAR(EVENT_TIMESTAMP, 'YYYY-MM-DD HH24:MI:SS') AS ERROR_TIME, - PROCEDURE_NAME, - EVENT_MESSAGE -FROM CT_MRDS.A_PROCESS_LOG -WHERE (PACKAGE_NAME = 'MARS-956' OR PROCEDURE_NAME LIKE '%MARS_956%') - AND EVENT_TYPE = 'ERROR' - AND EVENT_TIMESTAMP >= SYSDATE - 1; -- Last 24 hours - -PROMPT -PROMPT 2. Source Table Row Counts -PROMPT ========================================================================= - --- Get source table counts for comparison -PROMPT Source table row counts (OU_C2D): -SELECT 'OU_C2D' AS SCHEMA_NAME, - table_name, - num_rows, - TO_CHAR(last_analyzed, 'YYYY-MM-DD HH24:MI:SS') AS STATS_DATE -FROM all_tables -WHERE owner = 'OU_C2D' - AND table_name IN ('MPEC_ADMIN', 'MPEC_CONTENT', 'MPEC_CONTENT_CRITERION') -ORDER BY table_name; - -PROMPT -PROMPT 3. Template Table Structure Verification -PROMPT ========================================================================= - --- Verify template tables exist and have proper structure -PROMPT Template tables in CT_ET_TEMPLATES: -SELECT table_name, - num_rows, - TO_CHAR(last_analyzed, 'YYYY-MM-DD HH24:MI:SS') AS STATS_DATE -FROM all_tables -WHERE owner = 'CT_ET_TEMPLATES' - AND table_name IN ('C2D_MPEC_ADMIN', 'C2D_MPEC_CONTENT', 'C2D_MPEC_CONTENT_CRITERION') -ORDER BY table_name; - -PROMPT -PROMPT Template table column counts: -SELECT owner, table_name, COUNT(*) AS COLUMN_COUNT -FROM all_tab_columns -WHERE owner IN ('OU_C2D', 'CT_ET_TEMPLATES') - AND ((owner = 'OU_C2D' AND table_name IN ('MPEC_ADMIN', 'MPEC_CONTENT', 'MPEC_CONTENT_CRITERION')) - OR (owner = 'CT_ET_TEMPLATES' AND table_name IN ('C2D_MPEC_ADMIN', 'C2D_MPEC_CONTENT', 'C2D_MPEC_CONTENT_CRITERION'))) -GROUP BY owner, table_name -ORDER BY table_name, owner; - -PROMPT -PROMPT 4. File Registration Validation -PROMPT ========================================================================= - --- Check if exported files were registered in A_SOURCE_FILE_RECEIVED -PROMPT Registered export files (last 24 hours): -SELECT A_SOURCE_FILE_RECEIVED_KEY, - A_SOURCE_FILE_CONFIG_KEY, - SOURCE_FILE_NAME, - ROUND(BYTES/1024, 2) AS SIZE_KB, - PROCESSING_STATUS, - TO_CHAR(RECEPTION_DATE, 'YYYY-MM-DD HH24:MI:SS') AS REGISTERED_TIME -FROM CT_MRDS.A_SOURCE_FILE_RECEIVED -WHERE RECEPTION_DATE >= SYSDATE - 1 -- Last 24 hours - AND (SOURCE_FILE_NAME LIKE '%MPEC_%' OR A_SOURCE_FILE_CONFIG_KEY IN ( - SELECT A_SOURCE_FILE_CONFIG_KEY - FROM CT_MRDS.A_SOURCE_FILE_CONFIG - WHERE A_SOURCE_KEY = 'C2D' AND TABLE_ID LIKE '%MPEC%' - )) -ORDER BY RECEPTION_DATE DESC; - --- Count registered files per config key -PROMPT File registration summary: -SELECT - CASE WHEN A_SOURCE_FILE_CONFIG_KEY = -1 THEN 'Default (no config)' - ELSE 'Config Key: ' || A_SOURCE_FILE_CONFIG_KEY - END AS CONFIG_INFO, - COUNT(*) AS REGISTERED_FILES -FROM CT_MRDS.A_SOURCE_FILE_RECEIVED -WHERE RECEPTION_DATE >= SYSDATE - 1 -- Last 24 hours - AND (SOURCE_FILE_NAME LIKE '%MPEC_%' OR A_SOURCE_FILE_CONFIG_KEY IN ( - SELECT A_SOURCE_FILE_CONFIG_KEY - FROM CT_MRDS.A_SOURCE_FILE_CONFIG - WHERE A_SOURCE_KEY = 'C2D' AND TABLE_ID LIKE '%MPEC%' - )) -GROUP BY A_SOURCE_FILE_CONFIG_KEY -ORDER BY A_SOURCE_FILE_CONFIG_KEY; - -PROMPT -PROMPT 5. Export File Validation Commands -PROMPT ========================================================================= - -PROMPT To validate exported CSV files, use these OCI CLI commands: -PROMPT -PROMPT # List exported files -PROMPT oci os object list --bucket-name mrds_data_dev --prefix "DATA/C2D/C2D_MPEC" -PROMPT -PROMPT # Check file sizes -PROMPT oci os object list --bucket-name mrds_data_dev --prefix "DATA/C2D/C2D_MPEC_ADMIN" -PROMPT oci os object list --bucket-name mrds_data_dev --prefix "DATA/C2D/C2D_MPEC_CONTENT" -PROMPT oci os object list --bucket-name mrds_data_dev --prefix "DATA/C2D/C2D_MPEC_CONTENT_CRITERION" -PROMPT -PROMPT # Download sample file for validation -PROMPT oci os object get --bucket-name mrds_data_dev --name "DATA/C2D/C2D_MPEC_ADMIN/filename.csv" --file sample.csv - -PROMPT -PROMPT 6. Data Quality Checks (Manual) -PROMPT ========================================================================= - -PROMPT Manual verification steps: -PROMPT 1. Download sample CSV files from each folder -PROMPT 2. Verify CSV header matches template table columns -PROMPT 3. Check data formats (especially dates) match expectations -PROMPT 4. Confirm row counts approximately match source tables -PROMPT 5. Validate no empty files were created -PROMPT 6. Test loading sample data into external tables -PROMPT 7. Verify file registration entries in A_SOURCE_FILE_RECEIVED - -PROMPT -PROMPT 7. Next Steps for ODS Integration -PROMPT ========================================================================= - -PROMPT After validation success: -PROMPT 1. Configure external tables pointing to CSV files -PROMPT 2. Test external table queries -PROMPT 3. Setup scheduled data refresh processes (if needed) -PROMPT 4. Document file locations and access patterns -PROMPT 5. Coordinate with REL_02 deployment timing - -PROMPT -PROMPT ========================================================================= -PROMPT VALIDATION COMPLETED -PROMPT ========================================================================= -PROMPT Validation End: -SELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') AS VALIDATION_END FROM DUAL; - -PROMPT -PROMPT Review the output above to confirm: -PROMPT ✓ Export processes completed without errors -PROMPT ✓ Source table row counts are reasonable -PROMPT ✓ Template tables exist and have matching structure -PROMPT ✓ Exported files registered in A_SOURCE_FILE_RECEIVED table -PROMPT ✓ Manual file validation steps are understood -PROMPT -PROMPT If any issues found, check export logs and re-run specific exports if needed. -PROMPT ========================================================================= \ No newline at end of file diff --git a/MARS_Packages/REL02_POST/MARS-956/verify_packages_version.sql b/MARS_Packages/REL02_POST/MARS-956/verify_packages_version.sql deleted file mode 100644 index 510f8b7..0000000 --- a/MARS_Packages/REL02_POST/MARS-956/verify_packages_version.sql +++ /dev/null @@ -1,62 +0,0 @@ --- =================================================================== --- Universal Package Version Verification Script --- =================================================================== --- Purpose: Verify all tracked Oracle packages for code changes (MARS-956) --- Author: Grzegorz Michalski --- Date: 2026-02-11 --- Version: 1.0.0 --- --- USAGE: --- Include at the end of install/rollback scripts: @@verify_packages_version.sql --- --- OUTPUT: --- - List of all tracked packages with their current status --- - OK: Package has not changed since last tracking --- - WARNING: Package code changed without version update --- =================================================================== - -SET LINESIZE 200 -SET PAGESIZE 1000 -SET FEEDBACK OFF - -PROMPT -PROMPT ======================================== -PROMPT MARS-956: Package Version Verification -PROMPT ======================================== -PROMPT - -COLUMN PACKAGE_OWNER FORMAT A15 -COLUMN PACKAGE_NAME FORMAT A20 -COLUMN VERSION FORMAT A10 -COLUMN STATUS FORMAT A80 - -SELECT - PACKAGE_OWNER, - PACKAGE_NAME, - PACKAGE_VERSION AS VERSION, - CT_MRDS.ENV_MANAGER.CHECK_PACKAGE_CHANGES(PACKAGE_OWNER, PACKAGE_NAME) AS STATUS -FROM ( - SELECT - PACKAGE_OWNER, - PACKAGE_NAME, - PACKAGE_VERSION, - ROW_NUMBER() OVER (PARTITION BY PACKAGE_OWNER, PACKAGE_NAME ORDER BY TRACKING_DATE DESC) AS RN - FROM CT_MRDS.A_PACKAGE_VERSION_TRACKING -) -WHERE RN = 1 -ORDER BY PACKAGE_OWNER, PACKAGE_NAME; - -PROMPT -PROMPT ======================================== -PROMPT MARS-956: Verification Complete -PROMPT ======================================== -PROMPT -PROMPT Legend: -PROMPT OK - Package has not changed since last tracking -PROMPT WARNING - Package code changed without version update -PROMPT -PROMPT For detailed hash information, use: -PROMPT SELECT ENV_MANAGER.GET_PACKAGE_HASH_INFO('OWNER', 'PACKAGE') FROM DUAL; -PROMPT ======================================== - -SET FEEDBACK ON \ No newline at end of file diff --git a/README.md b/README.md index 8d6edea..170c118 100644 --- a/README.md +++ b/README.md @@ -42,39 +42,44 @@ sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars1056.sql" New-Item -ItemType Directory -Force -Path "log" | Out-Null; Move-Item -Path "*.log" -Destination "log" -Force cd .\MARS_Packages\REL02\MARS-1046 -sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars1046.sql" -sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars1046.sql" +echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars1046.sql" +echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars1046.sql" 7z a -pMojeSuperHaslo#123 -mhe=on M1046_arch.7z MARS-1046/ cd .\MARS_Packages\REL01_ADDITIONS\MARS-826-PREHOOK -sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars826_prehook.sql" -sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars826_prehook.sql" +echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars826_prehook.sql" +echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars826_prehook.sql" 7z a -pMojeSuperHaslo#123 -mhe=on M826PH_arch.7z MARS-826-PREHOOK cd .\MARS_Packages\REL01_ADDITIONS\MARS-826 -sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars826.sql" -sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars826.sql" +echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars826.sql" +echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars826.sql" 7z a -pMojeSuperHaslo#123 -mhe=on M826_arch.7z MARS-826\ cd .\MARS_Packages\REL01_ADDITIONS\MARS-835 -sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars835.sql" -sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars835.sql" +echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars835.sql" +echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars835.sql" 7z a -pMojeSuperHaslo#123 -mhe=on M835_arch.7z MARS-835 cd .\MARS_Packages\REL01_ADDITIONS\MARS-835-PREHOOK -sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars835_prehook.sql" -sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars835_prehook.sql" +echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars835_prehook.sql" +echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars835_prehook.sql" 7z a -pMojeSuperHaslo#123 -mhe=on M835PH_arch.7z MARS-835-PREHOOK - cd .\MARS_Packages\REL03\MARS-1057 -sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars1057.sql" -sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars1057.sql" +echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars1057.sql" +echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars1057.sql" 7z a -pMojeSuperHaslo#123 -mhe=on M1057_arch.7z MARS-1057 cd .\MARS_Packages\REL01_ADDITIONS\MARS-828 echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars828.sql" echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars828.sql" -7z a -pMojeSuperHaslo#123 -mhe=on M828_arch.7z MARS-828\ \ No newline at end of file +7z a -pMojeSuperHaslo#123 -mhe=on M828_arch.7z MARS-828\ + + +cd .\MARS_Packages\REL02_POST\MARS-956 +echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars956.sql" +echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars956.sql" +7z a -pMojeSuperHaslo#123 -mhe=on M956_arch.7z MARS-956