diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/.gitignore b/MARS_Packages/REL01_ADDITIONS/MARS-828/.gitignore deleted file mode 100644 index d8c41ba..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# MARS-828 Package - Git Ignore Rules -# Standard exclusions for MARS deployment packages - -# Confluence documentation (generated, not source) -confluence/ - -# Log files from SPOOL operations -log/ -*.log - -# Test directories and files -test/ -*_test.sql - -# Mock data scripts (development only) -mock_data/ - -# Temporary files -*.tmp -*.bak -*~ - -# OS-specific files -.DS_Store -Thumbs.db diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/01_MARS_828_install_add_archival_strategy_columns.sql b/MARS_Packages/REL01_ADDITIONS/MARS-828/01_MARS_828_install_add_archival_strategy_columns.sql deleted file mode 100644 index 7e64f56..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/01_MARS_828_install_add_archival_strategy_columns.sql +++ /dev/null @@ -1,44 +0,0 @@ --- MARS-828: Add archival strategy columns to A_SOURCE_FILE_CONFIG --- Author: Grzegorz Michalski --- Date: 2026-01-27 --- Description: Adds ARCHIVAL_STRATEGY and MINIMUM_AGE_MONTHS columns to support flexible archival strategies - -PROMPT ======================================== -PROMPT MARS-828: Adding archival strategy columns -PROMPT ======================================== - --- Add new columns -ALTER TABLE CT_MRDS.A_SOURCE_FILE_CONFIG ADD ( - ARCHIVAL_STRATEGY VARCHAR2(30) DEFAULT 'THRESHOLD_BASED' NOT NULL, - MINIMUM_AGE_MONTHS NUMBER(3) DEFAULT NULL -); - --- Add check constraint for valid strategies -ALTER TABLE CT_MRDS.A_SOURCE_FILE_CONFIG ADD CONSTRAINT - CHK_ARCHIVAL_STRATEGY CHECK ( - ARCHIVAL_STRATEGY IN ('THRESHOLD_BASED', 'CURRENT_MONTH_ONLY', 'MINIMUM_AGE_MONTHS', 'HYBRID') - ); - --- Add comments -COMMENT ON COLUMN CT_MRDS.A_SOURCE_FILE_CONFIG.ARCHIVAL_STRATEGY IS - 'Archival strategy: THRESHOLD_BASED (days), CURRENT_MONTH_ONLY (exclude current month), MINIMUM_AGE_MONTHS (minimum age), HYBRID (combination)'; - -COMMENT ON COLUMN CT_MRDS.A_SOURCE_FILE_CONFIG.MINIMUM_AGE_MONTHS IS - 'Minimum age in months for archival (used with MINIMUM_AGE_MONTHS or HYBRID strategies)'; - --- Verify columns added -SELECT - column_name, - data_type, - data_length, - nullable, - data_default -FROM all_tab_columns -WHERE owner = 'CT_MRDS' - AND table_name = 'A_SOURCE_FILE_CONFIG' - AND column_name IN ('ARCHIVAL_STRATEGY', 'MINIMUM_AGE_MONTHS') -ORDER BY column_id; - -PROMPT ======================================== -PROMPT Archival strategy columns added successfully -PROMPT ======================================== diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/02_MARS_828_install_archival_strategy_trigger.sql b/MARS_Packages/REL01_ADDITIONS/MARS-828/02_MARS_828_install_archival_strategy_trigger.sql deleted file mode 100644 index 401ca46..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/02_MARS_828_install_archival_strategy_trigger.sql +++ /dev/null @@ -1,24 +0,0 @@ --- MARS-828: Create validation trigger for archival strategy --- Author: Grzegorz Michalski --- Date: 2026-01-27 --- Description: Validates archival strategy configuration consistency - -PROMPT ======================================== -PROMPT MARS-828: Creating archival strategy validation trigger -PROMPT ======================================== - -@@new_version/TRG_BI_A_SRC_FILE_CFG_ARCH_VAL.sql - --- Verify trigger created -SELECT - trigger_name, - status, - trigger_type, - triggering_event -FROM all_triggers -WHERE owner = 'CT_MRDS' - AND trigger_name = 'TRG_BI_A_SRC_FILE_CFG_ARCH_VAL'; - -PROMPT ======================================== -PROMPT Archival strategy validation trigger created successfully -PROMPT ======================================== diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/03_MARS_828_install_CT_MRDS_FILE_ARCHIVER_SPEC.sql b/MARS_Packages/REL01_ADDITIONS/MARS-828/03_MARS_828_install_CT_MRDS_FILE_ARCHIVER_SPEC.sql deleted file mode 100644 index 9ff3c4c..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/03_MARS_828_install_CT_MRDS_FILE_ARCHIVER_SPEC.sql +++ /dev/null @@ -1,14 +0,0 @@ --- =================================================================== --- MARS-828: Install FILE_ARCHIVER Package Specification v3.0.0 --- =================================================================== --- Purpose: Deploy updated package specification with version 3.0.0 --- Author: Grzegorz Michalski --- Date: 2026-01-27 --- =================================================================== - -@@new_version/FILE_ARCHIVER.pkg - - -PROMPT ======================================== -PROMPT FILE_ARCHIVER Specification v3.0.0 ready for installation -PROMPT ======================================== diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/04_MARS_828_install_CT_MRDS_FILE_ARCHIVER_BODY.sql b/MARS_Packages/REL01_ADDITIONS/MARS-828/04_MARS_828_install_CT_MRDS_FILE_ARCHIVER_BODY.sql deleted file mode 100644 index d7d8792..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/04_MARS_828_install_CT_MRDS_FILE_ARCHIVER_BODY.sql +++ /dev/null @@ -1,14 +0,0 @@ --- =================================================================== --- MARS-828: Install FILE_ARCHIVER Package Body v3.0.0 --- =================================================================== --- Purpose: Deploy updated package body with GET_ARCHIVAL_WHERE_CLAUSE function --- Author: Grzegorz Michalski --- Date: 2026-01-27 --- Changes: --- - Added GET_ARCHIVAL_WHERE_CLAUSE private function --- - Updated ARCHIVE_TABLE_DATA to use strategy-based filtering --- - Updated GATHER_TABLE_STAT to use strategy-based statistics --- =================================================================== - -@@new_version/FILE_ARCHIVER.pkb - diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/05_MARS_828_verify_installation.sql b/MARS_Packages/REL01_ADDITIONS/MARS-828/05_MARS_828_verify_installation.sql deleted file mode 100644 index c4a4f6e..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/05_MARS_828_verify_installation.sql +++ /dev/null @@ -1,122 +0,0 @@ --- MARS-828: Verify installation --- Author: Grzegorz Michalski --- Date: 2026-01-27 - -PROMPT ======================================== -PROMPT MARS-828: Verification Script -PROMPT ======================================== - -SET SERVEROUTPUT ON SIZE UNLIMITED - --- 1. Verify columns added -PROMPT -PROMPT 1. Verifying A_SOURCE_FILE_CONFIG columns... -SELECT - column_name, - data_type, - nullable, - data_default -FROM all_tab_columns -WHERE owner = 'CT_MRDS' - AND table_name = 'A_SOURCE_FILE_CONFIG' - AND column_name IN ('ARCHIVAL_STRATEGY', 'MINIMUM_AGE_MONTHS') -ORDER BY column_id; - --- 2. Verify constraint -PROMPT -PROMPT 2. Verifying check constraint... -SELECT - constraint_name, - constraint_type, - search_condition -FROM all_constraints -WHERE owner = 'CT_MRDS' - AND table_name = 'A_SOURCE_FILE_CONFIG' - AND constraint_name = 'CHK_ARCHIVAL_STRATEGY'; - --- 3. Verify trigger -PROMPT -PROMPT 3. Verifying validation trigger... -SELECT - trigger_name, - status, - trigger_type -FROM all_triggers -WHERE owner = 'CT_MRDS' - AND trigger_name = 'TRG_BI_A_SRC_FILE_CFG_ARCH_VAL'; - --- 4. Check package compilation status -PROMPT -PROMPT 4. Checking FILE_ARCHIVER package status... -SELECT - object_name, - object_type, - status, - TO_CHAR(last_ddl_time, 'YYYY-MM-DD HH24:MI:SS') as last_ddl_time -FROM all_objects -WHERE owner = 'CT_MRDS' - AND object_name = 'FILE_ARCHIVER' - AND object_type IN ('PACKAGE', 'PACKAGE BODY') -ORDER BY object_type; - --- 5. Check for compilation errors -PROMPT -PROMPT 5. Checking for compilation errors... -SELECT - name, - type, - line, - position, - text -FROM all_errors -WHERE owner = 'CT_MRDS' - AND name = 'FILE_ARCHIVER' -ORDER BY type, sequence; - --- 6. Verify package version -PROMPT -PROMPT 6. Verifying FILE_ARCHIVER version... -SELECT CT_MRDS.FILE_ARCHIVER.GET_VERSION() as package_version FROM DUAL; - --- 7. Test trigger validation -PROMPT -PROMPT 7. Testing trigger validation (should fail)... -DECLARE - vTestPassed BOOLEAN := FALSE; -BEGIN - -- This should fail - MINIMUM_AGE_MONTHS strategy without value - INSERT INTO CT_MRDS.A_SOURCE_FILE_CONFIG ( - A_SOURCE_FILE_CONFIG_KEY, - A_SOURCE_KEY, - SOURCE_FILE_TYPE, - SOURCE_FILE_ID, - ARCHIVAL_STRATEGY, - MINIMUM_AGE_MONTHS - ) VALUES ( - -999, - 'TEST', - 'INPUT', - 'TEST', - 'MINIMUM_AGE_MONTHS', - NULL -- Should trigger error - ); - - ROLLBACK; - DBMS_OUTPUT.PUT_LINE('ERROR: Trigger validation did not fire!'); -EXCEPTION - WHEN OTHERS THEN - IF SQLCODE = -20999 THEN - DBMS_OUTPUT.PUT_LINE('SUCCESS: Trigger validation working correctly'); - DBMS_OUTPUT.PUT_LINE('Expected error: ' || SQLERRM); - vTestPassed := TRUE; - ELSE - DBMS_OUTPUT.PUT_LINE('ERROR: Unexpected error: ' || SQLERRM); - END IF; - ROLLBACK; -END; -/ - -PROMPT -PROMPT ======================================== -PROMPT MARS-828: Verification Complete -PROMPT ======================================== diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/91_MARS_828_rollback_FILE_ARCHIVER_SPEC.sql b/MARS_Packages/REL01_ADDITIONS/MARS-828/91_MARS_828_rollback_FILE_ARCHIVER_SPEC.sql deleted file mode 100644 index 204e834..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/91_MARS_828_rollback_FILE_ARCHIVER_SPEC.sql +++ /dev/null @@ -1,11 +0,0 @@ --- =================================================================== --- MARS-828: Rollback FILE_ARCHIVER Package Specification to v2.0.0 --- =================================================================== --- Purpose: Restore previous package specification version (pre-MARS-828) --- Author: Grzegorz Michalski --- Date: 2026-01-27 --- WARNING: This removes all MARS-828 version information updates --- =================================================================== - -@@rollback/FILE_ARCHIVER.pkg - diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/92_MARS_828_rollback_FILE_ARCHIVER_BODY.sql b/MARS_Packages/REL01_ADDITIONS/MARS-828/92_MARS_828_rollback_FILE_ARCHIVER_BODY.sql deleted file mode 100644 index 9004a2b..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/92_MARS_828_rollback_FILE_ARCHIVER_BODY.sql +++ /dev/null @@ -1,11 +0,0 @@ --- =================================================================== --- MARS-828: Rollback FILE_ARCHIVER Package Body to v2.0.0 --- =================================================================== --- Purpose: Restore previous package body version (pre-MARS-828) --- Author: Grzegorz Michalski --- Date: 2026-01-27 --- WARNING: This removes all MARS-828 archival strategy enhancements --- =================================================================== - -@@rollback/FILE_ARCHIVER.pkb - diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/93_MARS_828_rollback_trigger.sql b/MARS_Packages/REL01_ADDITIONS/MARS-828/93_MARS_828_rollback_trigger.sql deleted file mode 100644 index 0289cd4..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/93_MARS_828_rollback_trigger.sql +++ /dev/null @@ -1,20 +0,0 @@ --- MARS-828: Rollback validation trigger --- Author: Grzegorz Michalski --- Date: 2026-01-27 --- Description: Drop archival strategy validation trigger - -PROMPT ======================================== -PROMPT MARS-828: Dropping archival strategy validation trigger -PROMPT ======================================== - -DROP TRIGGER CT_MRDS.TRG_BI_A_SRC_FILE_CFG_ARCH_VAL; - --- Verify trigger dropped -SELECT COUNT(*) as trigger_count -FROM all_triggers -WHERE owner = 'CT_MRDS' - AND trigger_name = 'TRG_BI_A_SRC_FILE_CFG_ARCH_VAL'; - -PROMPT ======================================== -PROMPT Validation trigger dropped successfully -PROMPT ======================================== diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/94_MARS_828_rollback_columns.sql b/MARS_Packages/REL01_ADDITIONS/MARS-828/94_MARS_828_rollback_columns.sql deleted file mode 100644 index fc481eb..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/94_MARS_828_rollback_columns.sql +++ /dev/null @@ -1,30 +0,0 @@ --- MARS-828: Rollback archival strategy columns --- Author: Grzegorz Michalski --- Date: 2026-01-27 --- Description: Remove ARCHIVAL_STRATEGY and MINIMUM_AGE_MONTHS columns - -PROMPT ======================================== -PROMPT MARS-828: Removing archival strategy columns -PROMPT ======================================== - --- Drop check constraint first -ALTER TABLE CT_MRDS.A_SOURCE_FILE_CONFIG -DROP CONSTRAINT CHK_ARCHIVAL_STRATEGY; - --- Drop columns -ALTER TABLE CT_MRDS.A_SOURCE_FILE_CONFIG DROP ( - ARCHIVAL_STRATEGY, - MINIMUM_AGE_MONTHS -); - --- Verify columns dropped -SELECT - column_name -FROM all_tab_columns -WHERE owner = 'CT_MRDS' - AND table_name = 'A_SOURCE_FILE_CONFIG' - AND column_name IN ('ARCHIVAL_STRATEGY', 'MINIMUM_AGE_MONTHS'); - -PROMPT ======================================== -PROMPT Archival strategy columns removed successfully -PROMPT ======================================== diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/README.md b/MARS_Packages/REL01_ADDITIONS/MARS-828/README.md deleted file mode 100644 index 8d4aa72..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/README.md +++ /dev/null @@ -1,269 +0,0 @@ -# MARS-828: Enhanced Archival Strategies - -## Overview - -Implementation of flexible archival strategies for FILE_ARCHIVER package to support business requirements for different data retention policies across source systems. - -### Background - -**Issue**: Current FILE_ARCHIVER v2.0.0 uses fixed threshold-based archival (DAYS_FOR_ARCHIVE_THRESHOLD) which cannot accommodate: -1. **LM/TOP sources**: Archive all data except current month -2. **CSDB sources**: Archive only data older than 6 months - -**Root Cause**: Hardcoded WHERE clause in ARCHIVE_TABLE_DATA procedure: -```sql -WHERE extract(day from (systimestamp - workflow_start)) > DAYS_FOR_ARCHIVE_THRESHOLD -``` - -**Solution**: Introduce ARCHIVAL_STRATEGY configuration column with four strategies: -- `THRESHOLD_BASED` - Days-based threshold (backward compatible) -- `CURRENT_MONTH_ONLY` - Keep only current month data -- `MINIMUM_AGE_MONTHS` - Archive data older than X months -- `HYBRID` - Combination of current month and minimum age - -## Changes Made - -### Database Schema Changes -**Table**: CT_MRDS.A_SOURCE_FILE_CONFIG - -**Before**: -```sql --- Only threshold-based configuration available -DAYS_FOR_ARCHIVE_THRESHOLD NUMBER DEFAULT 30 -``` - -**After**: -```sql --- Flexible strategy-based configuration -ARCHIVAL_STRATEGY VARCHAR2(30) DEFAULT 'THRESHOLD_BASED' NOT NULL, -MINIMUM_AGE_MONTHS NUMBER(3), -DAYS_FOR_ARCHIVE_THRESHOLD NUMBER DEFAULT 30, -CONSTRAINT CHK_ARCHIVAL_STRATEGY CHECK (ARCHIVAL_STRATEGY IN ('THRESHOLD_BASED', 'CURRENT_MONTH_ONLY', 'MINIMUM_AGE_MONTHS', 'HYBRID')) -``` - -**New Trigger**: TRG_BI_ARCHIVAL_STRATEGY_VAL -- Validates MINIMUM_AGE_MONTHS required for strategies that need it -- Ensures data integrity before insert/update - -### Package Changes -**Package**: CT_MRDS.FILE_ARCHIVER - -**Version**: 2.0.0 → 3.0.0 (MAJOR - breaking changes to internal logic) - -**Specification Changes**: -- Updated PACKAGE_VERSION: '2.0.0' → '3.0.0' -- Updated VERSION_HISTORY with MARS-828 entry -- Updated PACKAGE_BUILD_DATE to deployment date - -**Body Changes**: -1. **New Function**: GET_ARCHIVAL_WHERE_CLAUSE(pSourceFileConfig) - Returns strategy-specific WHERE clause -2. **Updated**: ARCHIVE_TABLE_DATA - Uses GET_ARCHIVAL_WHERE_CLAUSE for flexible filtering -3. **Updated**: GATHER_TABLE_STAT - Uses GET_ARCHIVAL_WHERE_CLAUSE for statistics calculation - -**File Structure**: -- **rollback/** - Backup of FILE_ARCHIVER v2.0.0 (for rollback) -- **new_version/** - Updated FILE_ARCHIVER v3.0.0 (with strategy support) - -## Archival Strategies - -| Strategy | WHERE Clause Logic | Configuration Required | Use Case | -|----------|-------------------|----------------------|----------| -| `THRESHOLD_BASED` | `extract(day from (systimestamp - workflow_start)) > DAYS_FOR_ARCHIVE_THRESHOLD` | DAYS_FOR_ARCHIVE_THRESHOLD | Legacy compatibility | -| `CURRENT_MONTH_ONLY` | `TRUNC(workflow_start, 'MM') < TRUNC(SYSDATE, 'MM')` | None | General sources (LM, TOP) | -| `MINIMUM_AGE_MONTHS` | `workflow_start < ADD_MONTHS(TRUNC(SYSDATE, 'MM'), -X)` | MINIMUM_AGE_MONTHS | CSDB (6 months retention) | -| `HYBRID` | Both CURRENT_MONTH_ONLY AND MINIMUM_AGE_MONTHS | MINIMUM_AGE_MONTHS | Advanced scenarios | - -## Configuration Examples - -```sql --- LM/TOP sources: Archive everything except current month -UPDATE A_SOURCE_FILE_CONFIG -SET ARCHIVAL_STRATEGY = 'CURRENT_MONTH_ONLY', - MINIMUM_AGE_MONTHS = NULL -WHERE A_SOURCE_KEY IN ('LM', 'TOP'); - --- CSDB: Archive only data older than 6 months -UPDATE A_SOURCE_FILE_CONFIG -SET ARCHIVAL_STRATEGY = 'MINIMUM_AGE_MONTHS', - MINIMUM_AGE_MONTHS = 6 -WHERE A_SOURCE_KEY = 'CSDB' - AND TABLE_ID IN ('DEBT', 'DEBT_DAILY'); - --- Legacy sources: Keep existing threshold behavior -UPDATE A_SOURCE_FILE_CONFIG -SET ARCHIVAL_STRATEGY = 'THRESHOLD_BASED', - DAYS_FOR_ARCHIVE_THRESHOLD = 30 -WHERE A_SOURCE_KEY = 'C2D'; - --- Advanced hybrid: Current month + 3 months minimum -UPDATE A_SOURCE_FILE_CONFIG -SET ARCHIVAL_STRATEGY = 'HYBRID', - MINIMUM_AGE_MONTHS = 3 -WHERE A_SOURCE_KEY = 'SPECIAL'; -``` - -## Deployment - -### Prerequisites -- **User**: ADMIN with full privileges -- **Database**: Oracle 23ai -- **ENV_MANAGER**: v3.x or higher -- **FILE_MANAGER**: v3.x or higher -- **FILE_ARCHIVER**: v2.0.0 (will upgrade to v3.0.0) -- **Table**: A_SOURCE_FILE_CONFIG must exist - -### Installation Steps - -**Option 1: Master Script (Recommended)** -```powershell -# Navigate to MARS-828 directory -cd .\MARS_Packages\REL01_ADDITIONS\MARS-828 - -# Execute installation (requires ADMIN user) -sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars828.sql" - -# Log file created: log/INSTALL_MARS_828__.log -``` - -**Installation Workflow**: -1. **Add Columns** - ARCHIVAL_STRATEGY, MINIMUM_AGE_MONTHS to A_SOURCE_FILE_CONFIG -2. **Create Trigger** - Validation trigger TRG_BI_ARCHIVAL_STRATEGY_VAL -3. **Deploy Package Spec** - FILE_ARCHIVER v3.0.0 specification -4. **Deploy Package Body** - FILE_ARCHIVER v3.0.0 body with GET_ARCHIVAL_WHERE_CLAUSE -5. **Verify Installation** - Check package compilation and structure -6. **Track Version** - Record v3.0.0 in ENV_MANAGER -7. **Verify Packages** - Check for untracked changes using ENV_MANAGER hash verification - -**Option 2: Using Get-Content** -```powershell -Get-Content "MARS_Packages\REL01_ADDITIONS\MARS-828\install_mars828.sql" | sql "ADMIN/Cloudpass#34@ggmichalski_high" -``` - -### Verification -```sql --- Check package compilation status -SELECT object_name, object_type, status -FROM all_objects -WHERE owner = 'CT_MRDS' - AND object_name = 'FILE_ARCHIVER' - AND object_type IN ('PACKAGE', 'PACKAGE BODY'); - --- Verify package version -SELECT CT_MRDS.FILE_ARCHIVER.GET_VERSION() FROM DUAL; --- Expected: 3.0.0 - --- Check new columns exist -SELECT column_name, data_type, data_default -FROM all_tab_columns -WHERE owner = 'CT_MRDS' - AND table_name = 'A_SOURCE_FILE_CONFIG' - AND column_name IN ('ARCHIVAL_STRATEGY', 'MINIMUM_AGE_MONTHS'); - --- Test strategy configuration -SELECT A_SOURCE_KEY, TABLE_ID, ARCHIVAL_STRATEGY, MINIMUM_AGE_MONTHS -FROM CT_MRDS.A_SOURCE_FILE_CONFIG -ORDER BY A_SOURCE_KEY, TABLE_ID; -``` - -### Rollback -```powershell -# Execute rollback script (requires ADMIN user) -cd .\MARS_Packages\REL01_ADDITIONS\MARS-828 -sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars828.sql" - -# Log file created: log/ROLLBACK_MARS_828__.log -``` - -**Rollback Workflow**: -1. **Restore Package Body** - FILE_ARCHIVER v2.0.0 body -2. **Restore Package Spec** - FILE_ARCHIVER v2.0.0 specification -3. **Drop Trigger** - Remove TRG_BI_ARCHIVAL_STRATEGY_VAL -4. **Drop Columns** - Remove ARCHIVAL_STRATEGY, MINIMUM_AGE_MONTHS -5. **Track Rollback** - Record v2.0.0 restoration in ENV_MANAGER -6. **Verify Packages** - Check package hash consistency - -## Testing - -**Test Scripts**: Located in `test/` folder - -**Main Test Script**: test/test_archival_strategies.sql - -```sql --- Test 1: CURRENT_MONTH_ONLY strategy --- Expected: Archives data from previous months only -SELECT COUNT(*) FROM table WHERE TRUNC(workflow_start, 'MM') < TRUNC(SYSDATE, 'MM'); - --- Test 2: MINIMUM_AGE_MONTHS strategy (6 months) --- Expected: Archives data older than 6 months -SELECT COUNT(*) FROM table WHERE workflow_start < ADD_MONTHS(TRUNC(SYSDATE, 'MM'), -6); - --- Test 3: HYBRID strategy --- Expected: Archives data from previous months AND older than X months -``` - -## Dependencies - -### Required Packages -- **CT_MRDS.ENV_MANAGER** v3.x - Error handling, logging, version tracking -- **CT_MRDS.FILE_MANAGER** v3.x - Bucket URI resolution -- **MRDS_LOADER.cloud_wrapper** - DBMS_CLOUD operations - -### Database Objects -- **Table**: CT_MRDS.A_SOURCE_FILE_CONFIG - Configuration storage -- **Table**: CT_ODS.A_LOAD_HISTORY - Workflow tracking -- **Credential**: DEF_CRED_ARN - OCI bucket access - -## Files Included - -1. **README.md** - This documentation file -2. **.gitignore** - Git exclusions (confluence/, log/, test/, mock_data/) -3. **install_mars828.sql** - Master installation script with SPOOL logging (7 steps) -4. **rollback_mars828.sql** - Master rollback script (6 steps) -5. **01_MARS_828_install_add_archival_strategy_columns.sql** - ALTER TABLE DDL -6. **02_MARS_828_install_archival_strategy_trigger.sql** - CREATE TRIGGER DDL -7. **03_MARS_828_install_CT_MRDS_FILE_ARCHIVER_SPEC.sql** - Deploy package specification v3.0.0 -8. **04_MARS_828_install_CT_MRDS_FILE_ARCHIVER_BODY.sql** - Deploy package body v3.0.0 -9. **05_MARS_828_verify_installation.sql** - Verification queries -10. **91_MARS_828_rollback_FILE_ARCHIVER_SPEC.sql** - Restore package specification v2.0.0 -11. **92_MARS_828_rollback_FILE_ARCHIVER_BODY.sql** - Restore package body v2.0.0 -12. **93_MARS_828_rollback_trigger.sql** - DROP TRIGGER -13. **94_MARS_828_rollback_columns.sql** - ALTER TABLE DROP COLUMN -14. **track_package_versions.sql** - Universal version tracking script (Standard) -15. **verify_packages_version.sql** - Universal package verification script (Standard) -16. **test/** - Test scripts and validation scenarios -17. **rollback/** - Backup of FILE_ARCHIVER v2.0.0 (for rollback) -18. **new_version/** - Updated FILE_ARCHIVER v3.0.0 (deployment source) - -## Impact Analysis - -### Backward Compatibility -✅ **FULLY BACKWARD COMPATIBLE**: Default ARCHIVAL_STRATEGY = 'THRESHOLD_BASED' maintains existing behavior for all sources without configuration changes. - -### Affected Procedures -1. **ARCHIVE_TABLE_DATA** - Uses GET_ARCHIVAL_WHERE_CLAUSE for WHERE clause generation -2. **GATHER_TABLE_STAT** - Uses GET_ARCHIVAL_WHERE_CLAUSE for statistics calculation - -### Configuration Migration -No automatic migration required. New columns have sensible defaults: -- ARCHIVAL_STRATEGY = 'THRESHOLD_BASED' (maintains current behavior) -- MINIMUM_AGE_MONTHS = NULL (not required for THRESHOLD_BASED) - -## Version History -- **v3.0.0** (2026-01-27): Added flexible archival strategies (CURRENT_MONTH_ONLY, MINIMUM_AGE_MONTHS, HYBRID) via ARCHIVAL_STRATEGY configuration -- **v2.0.0** (2025-10-01): Initial FILE_ARCHIVER package with threshold-based archival - -## Related JIRA Issues -- **MARS-828**: Enhanced Archival Strategies implementation - -## Author -Created by: Grzegorz Michalski -Date: 2026-01-27 -Schema: CT_MRDS -Package: FILE_ARCHIVER - - -Grzegorz Michalski - -## Date - -2026-01-27 diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/install_mars828.sql b/MARS_Packages/REL01_ADDITIONS/MARS-828/install_mars828.sql deleted file mode 100644 index 319bf02..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/install_mars828.sql +++ /dev/null @@ -1,116 +0,0 @@ --- ============================================================================ --- MARS-828 Master Installation Script --- ============================================================================ --- Purpose: Deploy enhanced archival strategies for FILE_ARCHIVER package --- Target Schema: CT_MRDS --- Estimated Time: 2-3 minutes --- Prerequisites: FILE_ARCHIVER v2.0.0, ENV_MANAGER v3.x, ADMIN privileges --- ============================================================================ - -SET SERVEROUTPUT ON SIZE UNLIMITED -SET VERIFY OFF -SET FEEDBACK ON -SET ECHO OFF - -SET SERVEROUTPUT ON SIZE UNLIMITED -SET VERIFY OFF -SET FEEDBACK ON -SET ECHO OFF - --- Create log directory if it doesn't exist -host mkdir log 2>nul - --- Generate dynamic SPOOL filename with timestamp -var filename VARCHAR2(100) -BEGIN - :filename := 'log/INSTALL_MARS_828_' || SYS_CONTEXT('USERENV', 'CON_NAME') || '_' || TO_CHAR(SYSDATE,'YYYYMMDD_HH24MISS') || '.log'; -END; -/ -column filename new_value _filename -select :filename filename from dual; -spool &_filename - -PROMPT -PROMPT ============================================================================ -PROMPT MARS-828 Installation Starting -PROMPT ============================================================================ -PROMPT Package: CT_MRDS.FILE_ARCHIVER -PROMPT Change: Enhanced archival strategies (CURRENT_MONTH_ONLY, MINIMUM_AGE_MONTHS, HYBRID) -PROMPT Purpose: Flexible archival policies per data source -PROMPT Steps: 7 (DDL, Trigger, Packages, Verify, Track, Verify) -PROMPT Timestamp: -SELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') AS install_start FROM DUAL; -PROMPT ============================================================================ - --- Confirm installation with user -ACCEPT continue CHAR PROMPT 'Type YES to continue with installation, or Ctrl+C to abort: ' -WHENEVER SQLERROR EXIT SQL.SQLCODE -BEGIN - IF '&continue' IS NULL OR TRIM('&continue') IS NULL OR UPPER(TRIM('&continue')) != 'YES' THEN - RAISE_APPLICATION_ERROR(-20001, 'Installation aborted by user'); - END IF; -END; -/ -WHENEVER SQLERROR CONTINUE - --- Installation steps -PROMPT7: Adding archival strategy columns to A_SOURCE_FILE_CONFIG -PROMPT =================================================================== -@@01_MARS_828_install_add_archival_strategy_columns.sql - -PROMPT -PROMPT Step 2/7: Creating validation trigger -PROMPT ====================================== -@@02_MARS_828_install_archival_strategy_trigger.sql - -PROMPT -PROMPT Step 3/7: Deploying FILE_ARCHIVER Package Specification v3.0.0 -PROMPT =============================================================== -@@03_MARS_828_install_CT_MRDS_FILE_ARCHIVER_SPEC.sql - -PROMPT -PROMPT Step 4/7: Deploying FILE_ARCHIVER Package Body v3.0.0 -PROMPT ====================================================== -@@04_MARS_828_install_CT_MRDS_FILE_ARCHIVER_BODY.sql - -PROMPT -PROMPT Step 5/7: Verifying installation -PROMPT ================================= -@@05_MARS_828_verify_installation.sql - -PROMPT -PROMPT Step 6/7: Tracking package versions -PROMPT ==================================== -@@track_package_versions.sql - -PROMPT -PROMPT Step 7/7: Verifying tracked packages -PROMPT ===================================== -@@verify_packages_version.sql - -PROMPT -PROMPT ============================================================================ -PROMPT MARS-828 Installation Completed -PROMPT ============================================================================ -PROMPT Completion Time: -SELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') AS install_end FROM DUAL; -PROMPT -PROMPT Installation Summary: -PROMPT - Package: CT_MRDS.FILE_ARCHIVER -PROMPT - Version: 2.0.0 -> 3.0.0 (MAJOR) -PROMPT - New Strategies: CURRENT_MONTH_ONLY, MINIMUM_AGE_MONTHS, HYBRID -PROMPT - Backward Compatible: THRESHOLD_BASED (default) -PROMPT -PROMPT Next Steps: -PROMPT 1. Configure archival strategies per source: -PROMPT UPDATE A_SOURCE_FILE_CONFIG SET ARCHIVAL_STRATEGY = 'CURRENT_MONTH_ONLY' WHERE A_SOURCE_KEY = 'LM'; -PROMPT UPDATE A_SOURCE_FILE_CONFIG SET ARCHIVAL_STRATEGY = 'MINIMUM_AGE_MONTHS', MINIMUM_AGE_MONTHS = 6 WHERE A_SOURCE_KEY = 'CSDB'; -PROMPT 2. Test strategies using test_archival_strategies.sql -PROMPT 3. Monitor first archival run -PROMPT -PROMPT Log file: &_filename -PROMPT ============================================================================ - -spool off - -quit; diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/new_version/FILE_ARCHIVER.pkb b/MARS_Packages/REL01_ADDITIONS/MARS-828/new_version/FILE_ARCHIVER.pkb deleted file mode 100644 index e9f83f7..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/new_version/FILE_ARCHIVER.pkb +++ /dev/null @@ -1,537 +0,0 @@ -create or replace PACKAGE BODY CT_MRDS.FILE_ARCHIVER -AS - - ---------------------------------------------------------------------------------------------------- - -- PRIVATE FUNCTION: GET_ARCHIVAL_WHERE_CLAUSE - ---------------------------------------------------------------------------------------------------- - /** - * @name GET_ARCHIVAL_WHERE_CLAUSE - * @desc Private function that generates WHERE clause based on ARCHIVAL_STRATEGY configuration. - * Supports four strategies: THRESHOLD_BASED, CURRENT_MONTH_ONLY, MINIMUM_AGE_MONTHS, HYBRID. - * @param pSourceFileConfig - Source file configuration record with ARCHIVAL_STRATEGY - * @return VARCHAR2 - WHERE clause for filtering archival candidates - **/ - FUNCTION GET_ARCHIVAL_WHERE_CLAUSE( - pSourceFileConfig IN CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE - ) RETURN VARCHAR2 - IS - vWhereClause VARCHAR2(4000); - cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10); - BEGIN - CASE pSourceFileConfig.ARCHIVAL_STRATEGY - -- Legacy threshold-based strategy (backward compatible) - WHEN 'THRESHOLD_BASED' THEN - vWhereClause := 'extract(day from (systimestamp - workflow_start)) > ' || pSourceFileConfig.DAYS_FOR_ARCHIVE_THRESHOLD; - - -- Archive all data except current month - WHEN 'CURRENT_MONTH_ONLY' THEN - vWhereClause := 'TRUNC(workflow_start, ''MM'') < TRUNC(SYSDATE, ''MM'')'; - - -- Archive only data older than X months - WHEN 'MINIMUM_AGE_MONTHS' THEN - IF pSourceFileConfig.MINIMUM_AGE_MONTHS IS NULL THEN - RAISE_APPLICATION_ERROR(-20001, 'MINIMUM_AGE_MONTHS must be configured for MINIMUM_AGE_MONTHS strategy'); - END IF; - vWhereClause := 'workflow_start < ADD_MONTHS(TRUNC(SYSDATE, ''MM''), -' || pSourceFileConfig.MINIMUM_AGE_MONTHS || ')'; - - -- Hybrid: Current month exclusion AND minimum age requirement - WHEN 'HYBRID' THEN - IF pSourceFileConfig.MINIMUM_AGE_MONTHS IS NULL THEN - RAISE_APPLICATION_ERROR(-20001, 'MINIMUM_AGE_MONTHS must be configured for HYBRID strategy'); - END IF; - vWhereClause := 'TRUNC(workflow_start, ''MM'') < TRUNC(SYSDATE, ''MM'') ' || - 'AND workflow_start < ADD_MONTHS(TRUNC(SYSDATE, ''MM''), -' || pSourceFileConfig.MINIMUM_AGE_MONTHS || ')'; - - ELSE - RAISE_APPLICATION_ERROR(-20002, 'Invalid ARCHIVAL_STRATEGY: ' || pSourceFileConfig.ARCHIVAL_STRATEGY); - END CASE; - - RETURN vWhereClause; - END GET_ARCHIVAL_WHERE_CLAUSE; - - ---------------------------------------------------------------------------------------------------- - - FUNCTION GET_TABLE_STAT(pSourceFileConfigKey IN NUMBER) - RETURN CT_MRDS.A_TABLE_STAT%ROWTYPE - IS - vTableStat CT_MRDS.A_TABLE_STAT%ROWTYPE; - vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; - vCount PLS_INTEGER; - vSourceFileType CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_TYPE%TYPE; - - BEGIN - vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileConfigKey => '||nvl(to_char(pSourceFileConfigKey),NULL))); - ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); - SELECT count(*) , min(SOURCE_FILE_TYPE) - INTO vCount, vSourceFileType - FROM CT_MRDS.A_TABLE_STAT s - JOIN CT_MRDS.A_SOURCE_FILE_CONFIG c - ON s.A_SOURCE_FILE_CONFIG_KEY = c.A_SOURCE_FILE_CONFIG_KEY - WHERE s.A_SOURCE_FILE_CONFIG_KEY = pSourceFileConfigKey; - - IF vCount=0 and vSourceFileType='INPUT' THEN - GATHER_TABLE_STAT(pSourceFileConfigKey); - END IF; - - BEGIN - SELECT * - INTO vTableStat - FROM CT_MRDS.A_TABLE_STAT - WHERE A_SOURCE_FILE_CONFIG_KEY = pSourceFileConfigKey; --- EXCEPTION --- WHEN NO_DATA_FOUND THEN --- - END; - - ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); - RETURN vTableStat; - - END GET_TABLE_STAT; - - ---------------------------------------------------------------------------------------------------- - - PROCEDURE ARCHIVE_TABLE_DATA ( - pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE - ) - IS - vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; - vTableStat CT_MRDS.A_TABLE_STAT%ROWTYPE; - vQuery VARCHAR2(4000); - vTableName VARCHAR2(200); - vUri VARCHAR2(1000); - vfiles T_FILENAMES; - vFilename VARCHAR2(300); - vOperationId NUMBER := -1; - vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; - vArchivalTriggeredBy VARCHAR2(60); -- Possible values: FILES_COUNT, ROWS_COUNT, BYTES_SUM - vUserLoadOperations USER_LOAD_OPERATIONS%ROWTYPE; - vProcessControlStatus VARCHAR2(60) := 'OK'; - - BEGIN - vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileConfigKey => '||nvl(to_char(pSourceFileConfigKey), 'NULL'))); - ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); - - vSourceFileConfig := FILE_MANAGER.GET_SOURCE_FILE_CONFIG(pSourceFileConfigKey => pSourceFileConfigKey); - vTableStat := GET_TABLE_STAT(pSourceFileConfigKey => pSourceFileConfigKey); - - if vSourceFileConfig.SOURCE_FILE_TYPE <> 'INPUT' then - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_NOT_INPUT_SOURCE_FILE_TYPE, 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NOT_INPUT_SOURCE_FILE_TYPE, ENV_MANAGER.MSG_NOT_INPUT_SOURCE_FILE_TYPE); - end if; - - if vTableStat.created < sysdate-(vSourceFileConfig.HOURS_TO_EXPIRE_STATISTICS/24) then - GATHER_TABLE_STAT(pSourceFileConfigKey => pSourceFileConfigKey); - vTableStat := GET_TABLE_STAT(pSourceFileConfigKey => pSourceFileConfigKey); - end if; - - if vTableStat.OVER_ARCH_THRESOLD_FILE_COUNT >= vSourceFileConfig.FILES_COUNT_OVER_ARCHIVE_THRESHOLD then vArchivalTriggeredBy := 'FILES_COUNT'; - elsif vTableStat.OVER_ARCH_THRESOLD_ROW_COUNT >= vSourceFileConfig.ROWS_COUNT_OVER_ARCHIVE_THRESHOLD then vArchivalTriggeredBy := vArchivalTriggeredBy||', ROWS_COUNT'; - elsif vTableStat.OVER_ARCH_THRESOLD_SIZE >= vSourceFileConfig.BYTES_SUM_OVER_ARCHIVE_THRESHOLD then vArchivalTriggeredBy := vArchivalTriggeredBy||', BYTES_SUM'; - else ENV_MANAGER.LOG_PROCESS_EVENT('Non of archival triggers reached','INFO'); - end if; - - if LENGTH(vArchivalTriggeredBy)>0 THEN - ENV_MANAGER.LOG_PROCESS_EVENT('Archival Triggered By: '||vArchivalTriggeredBy,'INFO'); - vTableName := DBMS_ASSERT.SCHEMA_NAME(vSourceFileConfig.ODS_SCHEMA_NAME) || '.'||vSourceFileConfig.A_SOURCE_KEY||'_'||DBMS_ASSERT.simple_sql_name(vSourceFileConfig.TABLE_ID)||'_ODS'; - - -- Use strategy-based WHERE clause (MARS-828) - vQuery := ' - select t_filename( - file$name - ,file$path - , to_char(h.workflow_start,''yyyy'') - , to_char(h.workflow_start,''mm'') - ) - - from '||vTableName||' s - join CT_MRDS.a_workflow_history h - on s.a_workflow_history_key = h.a_workflow_history_key - where ' || GET_ARCHIVAL_WHERE_CLAUSE(vSourceFileConfig) - ; - - -- Get all files that will be archived into "vfiles" collection ("regular data files") - execute immediate vQuery bulk collect into vfiles; - - -- Start EXPORT "regular data files" to parquet and DROP "csv" - FOR ym_loop IN (select distinct year, month from table(vfiles) order by 1,2) LOOP - dbms_output.put_line('year: '||ym_loop.year||' - '||'month: '||ym_loop.month); - vQuery:= - 'select - s.* --- ,r.partition_year --- ,r.partition_month - from '|| vTableName ||' s - join CT_MRDS.A_SOURCE_FILE_RECEIVED r - on s.file$name = r.source_file_name - and r.a_source_file_config_key = '||pSourceFileConfigKey||' - and r.partition_year='''||ym_loop.year||''' - and r.partition_month='''||ym_loop.month||''' - and r.PROCESSING_STATUS = ''INGESTED'' - ' - ; - vUri := FILE_MANAGER.GET_BUCKET_URI('ARCHIVE')||vSourceFileConfig.A_SOURCE_KEY||'/'||vSourceFileConfig.TABLE_ID||'/PARTITION_YEAR='||ym_loop.year||'/PARTITION_MONTH='||ym_loop.month||'/'; - - ENV_MANAGER.LOG_PROCESS_EVENT('Start Archiving for YEAR_MONTH: '||ym_loop.year||'_'||ym_loop.month ,'INFO'); - ENV_MANAGER.LOG_PROCESS_EVENT('Parameter for DBMS_CLOUD.EXPORT_DATA => file_uri_list' ,'DEBUG',vUri); - ENV_MANAGER.LOG_PROCESS_EVENT('Parameter for DBMS_CLOUD.EXPORT_DATA => query' ,'DEBUG',vQuery); - - - - BEGIN - DBMS_CLOUD.EXPORT_DATA( - credential_name => ENV_MANAGER.gvCredentialName, - file_uri_list => vUri||'d' , - format => json_object('type' value 'parquet'), - query => vQuery, - operation_id => vOperationId - ); - EXCEPTION - WHEN OTHERS THEN - vProcessControlStatus :='EXPORT_FAILURE'; - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EXP_DATA_FOR_ARCH_FAILED, ENV_MANAGER.MSG_EXP_DATA_FOR_ARCH_FAILED); - - END; - - ENV_MANAGER.LOG_PROCESS_EVENT('vOperationId of export: '||vOperationId,'DEBUG'); - - -- Get USER_LOAD_OPERATIONS info - select * - into vUserLoadOperations - from USER_LOAD_OPERATIONS - where id = vOperationId; - - IF vUserLoadOperations.STATUS <>'COMPLETED' THEN - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EXP_DATA_FOR_ARCH_FAILED, - ENV_MANAGER.MSG_EXP_DATA_FOR_ARCH_FAILED ||cgBL|| ' Export ended with status '||vUserLoadOperations.STATUS); - ELSIF vUserLoadOperations.STATUS = 'COMPLETED' and vUserLoadOperations.ROWS_LOADED = 0 THEN - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EXP_DATA_FOR_ARCH_FAILED, - ENV_MANAGER.MSG_EXP_DATA_FOR_ARCH_FAILED ||cgBL|| ' Zero rows were exported.'); - ELSE - ENV_MANAGER.LOG_PROCESS_EVENT('Data exported to archival file for YEAR_MONTH: 2025_01','INFO', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT(FILE_MANAGER.GET_DET_USER_LOAD_OPERATIONS (pOperationId => vOperationId),'DEBUG', vParameters); - END IF; - - SELECT - object_name - into vFilename - from DBMS_CLOUD.LIST_OBJECTS( - credential_name => 'OCI$RESOURCE_PRINCIPAL', - location_uri => vUri) - where TO_UTC_TIMESTAMP_TZ(REGEXP_REPLACE(object_name, '.*(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})(\d{2})(\d{6})Z\.parquet$', '\1-\2-\3T\4:\5:\6.\7')) - between vUserLoadOperations.START_TIME - and vUserLoadOperations.UPDATE_TIME - ; - - -- Try to drop EXPORTED FILES ("regular data files") - BEGIN - FOR f in (select filename, pathname from table(vfiles) where year = ym_loop.year and month = ym_loop.month) loop - - -- first change of status - BEGIN - UPDATE CT_MRDS.A_SOURCE_FILE_RECEIVED r - SET PROCESSING_STATUS = 'ARCHIVED' - ,ARCH_FILE_NAME = vUri||vFilename - WHERE r.a_source_file_config_key= pSourceFileConfigKey - AND r.source_file_name = f.filename - AND r.processing_status = 'INGESTED' - ; - EXCEPTION - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - vProcessControlStatus := 'CHANGE_STATUS_TO_ARCHIVED_FAILURE'; - END; - EXIT WHEN vProcessControlStatus = 'CHANGE_STATUS_TO_ARCHIVED_FAILURE'; - - -- move file to trash before dropping - BEGIN - DBMS_CLOUD.MOVE_OBJECT(source_credential_name => ENV_MANAGER.gvCredentialName, - source_object_uri => f.pathname||'/'||f.filename, - target_object_uri => replace(f.pathname,'ODS','TRASH')||'/'||f.filename, - target_credential_name => ENV_MANAGER.gvCredentialName - ); - ENV_MANAGER.LOG_PROCESS_EVENT('File moved to TRASH.','DEBUG', f.pathname||'/'||f.filename); - EXCEPTION - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT('Failed to move file to TRASH.','ERROR', f.pathname||'/'||f.filename); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - rollback; - vProcessControlStatus := 'MOVE_FILE_TO_TRASH_FAILURE'; - END; - EXIT WHEN vProcessControlStatus = 'MOVE_FILE_TO_TRASH_FAILURE'; - commit; - END LOOP; - - -------------------------------------------------------------------- - -- IF All goes fine till this point, we drop files from TRASH (if not then ROLLBACK PART) - IF vProcessControlStatus = 'OK' THEN - FOR f in (select filename, pathname from table(vfiles) where year = ym_loop.year and month = ym_loop.month) LOOP - --Drop file from TRASH - DBMS_CLOUD.DELETE_OBJECT(credential_name => ENV_MANAGER.gvCredentialName, - object_uri => replace(f.pathname,'ODS','TRASH')||'/'||f.filename); - ENV_MANAGER.LOG_PROCESS_EVENT('File dropped from TRASH.','DEBUG', f.pathname||'/'||f.filename); - END LOOP; - - --ROLLBACK PART - --ROLLBACK PROCESS in case of FAILURE (restore files from TRASH) - ELSIF vProcessControlStatus = 'MOVE_FILE_TO_TRASH_FAILURE' THEN - FOR f in ( SELECT vf.filename, vf.pathname - FROM TABLE(vfiles) vf - JOIN CT_MRDS.A_SOURCE_FILE_RECEIVED r - ON r.source_file_name = vf.filename - AND r.a_source_file_config_key = pSourceFileConfigKey - AND r.PROCESSING_STATUS = 'ARCHIVED' - AND vf.year = ym_loop.year - AND vf.month = ym_loop.month - ) LOOP - BEGIN - DBMS_CLOUD.MOVE_OBJECT(source_credential_name => ENV_MANAGER.gvCredentialName, - source_object_uri => replace(f.pathname,'ODS','TRASH')||'/'||f.filename, - target_object_uri => f.pathname||'/'||f.filename, - target_credential_name => ENV_MANAGER.gvCredentialName - ); - ENV_MANAGER.LOG_PROCESS_EVENT('File restored from TRASH.','DEBUG', f.pathname||'/'||f.filename); - - UPDATE CT_MRDS.A_SOURCE_FILE_RECEIVED r - SET PROCESSING_STATUS = 'INGESTED' - ,ARCH_FILE_NAME = NULL - WHERE r.a_source_file_config_key = pSourceFileConfigKey - AND r.source_file_name = f.filename - ; - - EXCEPTION - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT('Failed to restore file from TRASH.','ERROR', replace(f.pathname,'ODS','TRASH')||'/'||f.filename); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - vProcessControlStatus := 'RESTORE_FILE_FROM_TRASH_FAILURE'; - END; - END LOOP; - - DBMS_CLOUD.DELETE_OBJECT(credential_name => ENV_MANAGER.gvCredentialName, - object_uri => vUri||vFilename); - ENV_MANAGER.LOG_PROCESS_EVENT('ROLLBACK operation: Archival PARQUET file dropped.','DEBUG', vUri||vFilename); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MOVE_FILE_TO_TRASH_FAILED, ENV_MANAGER.MSG_MOVE_FILE_TO_TRASH_FAILED); - - ELSIF vProcessControlStatus = 'CHANGE_STATUS_TO_ARCHIVED_FAILURE' THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_CHANGE_STAT_TO_ARCHIVED_FAILED, 'ERROR', vParameters); - DBMS_CLOUD.DELETE_OBJECT(credential_name => ENV_MANAGER.gvCredentialName, - object_uri => vUri||vFilename); - ENV_MANAGER.LOG_PROCESS_EVENT('Archival PARQUET file dropped.','DEBUG', vUri||vFilename); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_CHANGE_STAT_TO_ARCHIVED_FAILED, ENV_MANAGER.MSG_CHANGE_STAT_TO_ARCHIVED_FAILED); - - ELSIF vProcessControlStatus = 'RESTORE_FILE_FROM_TRASH_FAILURE' THEN - ENV_MANAGER.LOG_PROCESS_EVENT('Some files were not restored from TRASH. Check A_PROCESS_LOG table for details','ERROR'); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_RESTORE_FILE_FROM_TRASH, ENV_MANAGER.MSG_RESTORE_FILE_FROM_TRASH); - END IF; - - EXCEPTION - WHEN ENV_MANAGER.ERR_CHANGE_STAT_TO_ARCHIVED_FAILED THEN - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_CHANGE_STAT_TO_ARCHIVED_FAILED, ENV_MANAGER.MSG_CHANGE_STAT_TO_ARCHIVED_FAILED); - - WHEN ENV_MANAGER.ERR_MOVE_FILE_TO_TRASH_FAILED THEN - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MOVE_FILE_TO_TRASH_FAILED, ENV_MANAGER.MSG_MOVE_FILE_TO_TRASH_FAILED); - - WHEN ENV_MANAGER.ERR_RESTORE_FILE_FROM_TRASH THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_RESTORE_FILE_FROM_TRASH, ENV_MANAGER.MSG_RESTORE_FILE_FROM_TRASH); - - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT('Error during archiving process','ERROR'); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_DROP_EXPORTED_FILES_FAILED, ENV_MANAGER.MSG_DROP_EXPORTED_FILES_FAILED); - END; - -- END of "Try to drop EXPORTED FILES" - - ENV_MANAGER.LOG_PROCESS_EVENT('All archived files had been dropped.','INFO'); - ENV_MANAGER.LOG_PROCESS_EVENT('End Archiving for YEAR_MONTH: '||ym_loop.year||'_'||ym_loop.month ,'INFO'); - END LOOP; --ym_loop end (YEAR_MONTH) - - COMMIT; - - ELSE - ENV_MANAGER.LOG_PROCESS_EVENT('Non of archival thresholds reached. Skip archiving.'||vArchivalTriggeredBy,'INFO'); - END IF; - - ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); - EXCEPTION - WHEN ENV_MANAGER.ERR_NOT_INPUT_SOURCE_FILE_TYPE THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_NOT_INPUT_SOURCE_FILE_TYPE , 'ERROR', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NOT_INPUT_SOURCE_FILE_TYPE, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); - - WHEN ENV_MANAGER.ERR_EXP_DATA_FOR_ARCH_FAILED THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_EXP_DATA_FOR_ARCH_FAILED , 'ERROR', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EXP_DATA_FOR_ARCH_FAILED, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); - - WHEN ENV_MANAGER.ERR_CHANGE_STAT_TO_ARCHIVED_FAILED THEN - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_CHANGE_STAT_TO_ARCHIVED_FAILED, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); - - WHEN ENV_MANAGER.ERR_MOVE_FILE_TO_TRASH_FAILED THEN - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MOVE_FILE_TO_TRASH_FAILED, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); - - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); - - END ARCHIVE_TABLE_DATA; - - ---------------------------------------------------------------------------------------------------- - - PROCEDURE GATHER_TABLE_STAT ( - pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE - ) IS - vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; - vStats CT_MRDS.A_TABLE_STAT%ROWTYPE; - vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; - vTableName VARCHAR2(200); - vQuery VARCHAR2(32000); - BEGIN - vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileConfigKey => '||nvl(to_char(pSourceFileConfigKey), 'NULL'))); - ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); - vSourceFileConfig := FILE_MANAGER.GET_SOURCE_FILE_CONFIG(pSourceFileConfigKey => pSourceFileConfigKey); - - vTableName := DBMS_ASSERT.SCHEMA_NAME(vSourceFileConfig.ODS_SCHEMA_NAME) || '.'||vSourceFileConfig.A_SOURCE_KEY||'_'||DBMS_ASSERT.simple_sql_name(vSourceFileConfig.TABLE_ID)||'_ODS'; - ENV_MANAGER.LOG_PROCESS_EVENT('vTableName','DEBUG',vTableName); - - -- Use strategy-based WHERE clause for statistics (MARS-828) - vQuery := - 'with tmp as ( - select - s.* - ,file$name as filename - ,h.workflow_start - , to_char(h.workflow_start,''yyyy'') as year - , to_char(h.workflow_start,''mm'') as month - from '||vTableName||' s - join CT_MRDS.a_workflow_history h - on s.a_workflow_history_key = h.a_workflow_history_key - ) - , tmp_gr as ( - select - filename, count(*) as row_count_per_file, min(workflow_start) as workflow_start - from tmp - group by filename - ) - select - NULL as A_TABLE_STAT_KEY - ,'||pSourceFileConfigKey||' as A_SOURCE_FILE_CONFIG_KEY - ,'''||vTableName||''' as TABLE_NAME - ,count(*) as FILE_COUNT - ,sum(case when ' || GET_ARCHIVAL_WHERE_CLAUSE(vSourceFileConfig) || ' then 1 else 0 end) as OLD_FILE_COUNT - ,sum (row_count_per_file) as ROW_COUNT - ,sum(case when ' || GET_ARCHIVAL_WHERE_CLAUSE(vSourceFileConfig) || ' then row_count_per_file else 0 end) as OLD_ROW_COUNT - ,sum(r.bytes) as BYTES - ,sum(case when ' || GET_ARCHIVAL_WHERE_CLAUSE(vSourceFileConfig) || ' then r.bytes else 0 end) as OLD_BYTES - ,'||vSourceFileConfig.DAYS_FOR_ARCHIVE_THRESHOLD||' as DAYS_FOR_ARCHIVE_THRESHOLD - ,systimestamp as CREATED - from tmp_gr t - join (SELECT * from DBMS_CLOUD.LIST_OBJECTS( - credential_name => '''||ENV_MANAGER.gvCredentialName||''', - location_uri => FILE_MANAGER.GET_BUCKET_URI(''ODS'')||''ODS/'||vSourceFileConfig.A_SOURCE_KEY||'/'||vSourceFileConfig.TABLE_ID||'/'' - ) - ) r - on t.filename = r.object_name' - ; - ENV_MANAGER.LOG_PROCESS_EVENT('vQuery','DEBUG',vQuery); - execute immediate vQuery into vStats; - - vStats.A_TABLE_STAT_KEY := CT_MRDS.A_TABLE_STAT_KEY_SEQ.NEXTVAL; - insert into A_TABLE_STAT_HIST values vStats; - delete from A_TABLE_STAT where A_SOURCE_FILE_CONFIG_KEY = pSourceFileConfigKey; - insert into A_TABLE_STAT values vStats; - COMMIT; - - ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); - EXCEPTION - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); - END GATHER_TABLE_STAT; - - ---------------------------------------------------------------------------------------------------- - -- PACKAGE VERSION MANAGEMENT FUNCTIONS IMPLEMENTATION - ---------------------------------------------------------------------------------------------------- - - FUNCTION GET_VERSION - RETURN VARCHAR2 - IS - BEGIN - RETURN PACKAGE_VERSION; - END GET_VERSION; - - ---------------------------------------------------------------------------------------------------- - - FUNCTION GET_BUILD_INFO - RETURN VARCHAR2 - IS - BEGIN - RETURN ENV_MANAGER.GET_PACKAGE_VERSION_INFO( - pPackageName => 'FILE_ARCHIVER', - pVersion => PACKAGE_VERSION, - pBuildDate => PACKAGE_BUILD_DATE, - pAuthor => PACKAGE_AUTHOR - ); - END GET_BUILD_INFO; - - ---------------------------------------------------------------------------------------------------- - - FUNCTION GET_VERSION_HISTORY - RETURN VARCHAR2 - IS - BEGIN - RETURN ENV_MANAGER.FORMAT_VERSION_HISTORY( - pPackageName => 'FILE_ARCHIVER', - pVersionHistory => VERSION_HISTORY - ); - END GET_VERSION_HISTORY; - - ---------------------------------------------------------------------------------------------------- - - FUNCTION ARCHIVE_TABLE_DATA ( - pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE - ) RETURN PLS_INTEGER - IS - vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; - BEGIN - vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileConfigKey => '||nvl(to_char(pSourceFileConfigKey), 'NULL'))); - ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); - ---- - ARCHIVE_TABLE_DATA(pSourceFileConfigKey => pSourceFileConfigKey); - ---- - ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); - RETURN SQLCODE; - EXCEPTION - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RETURN SQLCODE; - END ARCHIVE_TABLE_DATA; - - ---------------------------------------------------------------------------------------------------- - - FUNCTION GATHER_TABLE_STAT ( - pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE - ) RETURN PLS_INTEGER - IS - vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; - BEGIN - vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileConfigKey => '||nvl(to_char(pSourceFileConfigKey), 'NULL'))); - ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); - ---- - GATHER_TABLE_STAT(pSourceFileConfigKey => pSourceFileConfigKey); - ---- - ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); - RETURN SQLCODE; - EXCEPTION - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RETURN SQLCODE; - END GATHER_TABLE_STAT; - - ---------------------------------------------------------------------------------------------------- - -END; - -/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/new_version/FILE_ARCHIVER.pkg b/MARS_Packages/REL01_ADDITIONS/MARS-828/new_version/FILE_ARCHIVER.pkg deleted file mode 100644 index 2ff8ff2..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/new_version/FILE_ARCHIVER.pkg +++ /dev/null @@ -1,116 +0,0 @@ -create or replace PACKAGE CT_MRDS.FILE_ARCHIVER -AUTHID CURRENT_USER -AS - /** - * General comment for package: Please put comments for functions and procedures as shown in below example. - * It is a standard. - * The structure of comment is used by GET_PACKAGE_DOCUMENTATION function - * which returns documentation text for confluence page (to Copy-Paste it). - **/ - - -- Example comment: - /** - * @name EX_PROCEDURE_NAME - * @desc Procedure description - * @example select LOGGING_AND_ERROR_MANAGER.EX_PROCEDURE_NAME(pParameter => 129) from dual; - * @ex_rslt Example Result - **/ - - -- Package Version Information (Semantic Versioning: MAJOR.MINOR.PATCH) - PACKAGE_VERSION CONSTANT VARCHAR2(10) := '3.1.0'; - PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2026-01-29 21:00:00'; - PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski'; - - -- Version History (Latest changes first) - VERSION_HISTORY CONSTANT VARCHAR2(4000) := - '3.1.0 (2026-01-29): Added function overloads for ARCHIVE_TABLE_DATA and GATHER_TABLE_STAT returning SQLCODE for Python library integration' || CHR(13)||CHR(10) || - '3.0.0 (2026-01-27): MARS-828 - Added flexible archival strategies (CURRENT_MONTH_ONLY, MINIMUM_AGE_MONTHS, HYBRID) via ARCHIVAL_STRATEGY configuration' || CHR(13)||CHR(10) || - '2.0.0 (2025-10-22): Added package versioning system using centralized ENV_MANAGER functions' || CHR(13)||CHR(10) || - '1.5.0 (2025-10-18): Enhanced ARCHIVE_TABLE_DATA with Hive-style partitioning support' || CHR(13)||CHR(10) || - '1.0.0 (2025-09-15): Initial release with table archival and statistics gathering'; - - cgBL CONSTANT VARCHAR2(2) := ENV_MANAGER.cgBL; - - /** - * @name ARCHIVE_TABLE_DATA - * @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA. - * Exports data from table specified by pSourceFileConfigKey(A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY) into PARQUET file on OCI infrustructure. - * Each YEAR_MONTH pair goes to seperate file (implicit partitioning). - **/ - PROCEDURE ARCHIVE_TABLE_DATA ( - pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE - ); - - /** - * @name ARCHIVE_TABLE_DATA - * @desc Function overload for ARCHIVE_TABLE_DATA procedure. - * Returns SQLCODE for Python library integration. - * Calls the main ARCHIVE_TABLE_DATA procedure and captures execution result. - * @example SELECT FILE_ARCHIVER.ARCHIVE_TABLE_DATA(pSourceFileConfigKey => 123) FROM DUAL; - * @ex_rslt 0 (success) or error code - **/ - FUNCTION ARCHIVE_TABLE_DATA ( - pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE - ) RETURN PLS_INTEGER; - - - - /** - * @name GATHER_TABLE_STAT - * @desc Gather info about EXTERNAL TABLE specified by pSourceFileConfigKey parameter (A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY). - * Data is inserted into A_TABLE_STAT and A_TABLE_STAT_HIST. - **/ - PROCEDURE GATHER_TABLE_STAT ( - pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE - ); - - /** - * @name GATHER_TABLE_STAT - * @desc Function overload for GATHER_TABLE_STAT procedure. - * Returns SQLCODE for Python library integration. - * Calls the main GATHER_TABLE_STAT procedure and captures execution result. - * @example SELECT FILE_ARCHIVER.GATHER_TABLE_STAT(pSourceFileConfigKey => 123) FROM DUAL; - * @ex_rslt 0 (success) or error code - **/ - FUNCTION GATHER_TABLE_STAT ( - pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE - ) RETURN PLS_INTEGER; - - --------------------------------------------------------------------------------------------------------------------------- - -- PACKAGE VERSION MANAGEMENT FUNCTIONS - --------------------------------------------------------------------------------------------------------------------------- - - /** - * @name GET_VERSION - * @desc Returns the current version number of the FILE_ARCHIVER package. - * Uses semantic versioning format (MAJOR.MINOR.PATCH). - * @example SELECT FILE_ARCHIVER.GET_VERSION() FROM DUAL; - * @ex_rslt 2.0.0 - **/ - FUNCTION GET_VERSION RETURN VARCHAR2; - - /** - * @name GET_BUILD_INFO - * @desc Returns comprehensive build information including version, build date, and author. - * Uses centralized ENV_MANAGER.GET_PACKAGE_VERSION_INFO function. - * @example SELECT FILE_ARCHIVER.GET_BUILD_INFO() FROM DUAL; - * @ex_rslt Package: FILE_ARCHIVER - * Version: 2.0.0 - * Build Date: 2025-10-22 16:45:00 - * Author: Grzegorz Michalski - **/ - FUNCTION GET_BUILD_INFO RETURN VARCHAR2; - - /** - * @name GET_VERSION_HISTORY - * @desc Returns complete version history with all releases and changes. - * Uses centralized ENV_MANAGER.FORMAT_VERSION_HISTORY function. - * @example SELECT FILE_ARCHIVER.GET_VERSION_HISTORY() FROM DUAL; - * @ex_rslt FILE_ARCHIVER Version History: - * 2.0.0 (2025-10-22): Added package versioning system... - **/ - FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2; - -END; - -/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/new_version/TRG_BI_A_SRC_FILE_CFG_ARCH_VAL.sql b/MARS_Packages/REL01_ADDITIONS/MARS-828/new_version/TRG_BI_A_SRC_FILE_CFG_ARCH_VAL.sql deleted file mode 100644 index d25647a..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/new_version/TRG_BI_A_SRC_FILE_CFG_ARCH_VAL.sql +++ /dev/null @@ -1,40 +0,0 @@ --- =================================================================== --- MARS-828: Archival Strategy Validation Trigger --- =================================================================== --- Purpose: Validates archival strategy configuration consistency --- Author: Grzegorz Michalski --- Date: 2026-01-27 --- Version: 1.0.0 --- =================================================================== - -CREATE OR REPLACE TRIGGER CT_MRDS.TRG_BI_A_SRC_FILE_CFG_ARCH_VAL -BEFORE INSERT OR UPDATE ON CT_MRDS.A_SOURCE_FILE_CONFIG -FOR EACH ROW -DECLARE - vErrorMsg VARCHAR2(1000); -BEGIN - -- Validate MINIMUM_AGE_MONTHS required for specific strategies - IF :NEW.ARCHIVAL_STRATEGY IN ('MINIMUM_AGE_MONTHS', 'HYBRID') - AND :NEW.MINIMUM_AGE_MONTHS IS NULL THEN - vErrorMsg := 'MINIMUM_AGE_MONTHS is required for ' || :NEW.ARCHIVAL_STRATEGY || ' strategy'; - RAISE_APPLICATION_ERROR(-20999, vErrorMsg); - END IF; - - -- Validate MINIMUM_AGE_MONTHS is positive - IF :NEW.MINIMUM_AGE_MONTHS IS NOT NULL - AND :NEW.MINIMUM_AGE_MONTHS < 1 THEN - RAISE_APPLICATION_ERROR(-20998, 'MINIMUM_AGE_MONTHS must be greater than 0'); - END IF; - - -- Warn if MINIMUM_AGE_MONTHS set but strategy doesn't use it - IF :NEW.MINIMUM_AGE_MONTHS IS NOT NULL - AND :NEW.ARCHIVAL_STRATEGY NOT IN ('MINIMUM_AGE_MONTHS', 'HYBRID') THEN - -- This is just a warning scenario - allow but could log - NULL; - END IF; - -EXCEPTION - WHEN OTHERS THEN - RAISE; -END; -/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/rollback/v2.0.0/FILE_ARCHIVER.pkb b/MARS_Packages/REL01_ADDITIONS/MARS-828/rollback/v2.0.0/FILE_ARCHIVER.pkb deleted file mode 100644 index 83edcf3..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/rollback/v2.0.0/FILE_ARCHIVER.pkb +++ /dev/null @@ -1,443 +0,0 @@ -create or replace PACKAGE BODY CT_MRDS.FILE_ARCHIVER -AS - - ---------------------------------------------------------------------------------------------------- - - FUNCTION GET_TABLE_STAT(pSourceFileConfigKey IN NUMBER) - RETURN CT_MRDS.A_TABLE_STAT%ROWTYPE - IS - vTableStat CT_MRDS.A_TABLE_STAT%ROWTYPE; - vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; - vCount PLS_INTEGER; - vSourceFileType CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_TYPE%TYPE; - - BEGIN - vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileConfigKey => '||nvl(to_char(pSourceFileConfigKey),NULL))); - ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); - SELECT count(*) , min(SOURCE_FILE_TYPE) - INTO vCount, vSourceFileType - FROM CT_MRDS.A_TABLE_STAT s - JOIN CT_MRDS.A_SOURCE_FILE_CONFIG c - ON s.A_SOURCE_FILE_CONFIG_KEY = c.A_SOURCE_FILE_CONFIG_KEY - WHERE s.A_SOURCE_FILE_CONFIG_KEY = pSourceFileConfigKey; - - IF vCount=0 and vSourceFileType='INPUT' THEN - GATHER_TABLE_STAT(pSourceFileConfigKey); - END IF; - - BEGIN - SELECT * - INTO vTableStat - FROM CT_MRDS.A_TABLE_STAT - WHERE A_SOURCE_FILE_CONFIG_KEY = pSourceFileConfigKey; --- EXCEPTION --- WHEN NO_DATA_FOUND THEN --- - END; - - ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); - RETURN vTableStat; - - END GET_TABLE_STAT; - - ---------------------------------------------------------------------------------------------------- - - PROCEDURE ARCHIVE_TABLE_DATA ( - pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE - ) - IS - vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; - vTableStat CT_MRDS.A_TABLE_STAT%ROWTYPE; - vQuery VARCHAR2(4000); - vTableName VARCHAR2(200); - vUri VARCHAR2(1000); - vfiles T_FILENAMES; - vFilename VARCHAR2(300); - vOperationId NUMBER := -1; - vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; - vArchivalTriggeredBy VARCHAR2(60); -- Possible values: FILES_COUNT, ROWS_COUNT, BYTES_SUM - vUserLoadOperations USER_LOAD_OPERATIONS%ROWTYPE; - vProcessControlStatus VARCHAR2(60) := 'OK'; - - BEGIN - vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileConfigKey => '||nvl(to_char(pSourceFileConfigKey), 'NULL'))); - ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); - - vSourceFileConfig := FILE_MANAGER.GET_SOURCE_FILE_CONFIG(pSourceFileConfigKey => pSourceFileConfigKey); - vTableStat := GET_TABLE_STAT(pSourceFileConfigKey => pSourceFileConfigKey); - - if vSourceFileConfig.SOURCE_FILE_TYPE <> 'INPUT' then - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_NOT_INPUT_SOURCE_FILE_TYPE, 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NOT_INPUT_SOURCE_FILE_TYPE, ENV_MANAGER.MSG_NOT_INPUT_SOURCE_FILE_TYPE); - end if; - - if vTableStat.created < sysdate-(vSourceFileConfig.HOURS_TO_EXPIRE_STATISTICS/24) then - GATHER_TABLE_STAT(pSourceFileConfigKey => pSourceFileConfigKey); - vTableStat := GET_TABLE_STAT(pSourceFileConfigKey => pSourceFileConfigKey); - end if; - - if vTableStat.OVER_ARCH_THRESOLD_FILE_COUNT >= vSourceFileConfig.FILES_COUNT_OVER_ARCHIVE_THRESHOLD then vArchivalTriggeredBy := 'FILES_COUNT'; - elsif vTableStat.OVER_ARCH_THRESOLD_ROW_COUNT >= vSourceFileConfig.ROWS_COUNT_OVER_ARCHIVE_THRESHOLD then vArchivalTriggeredBy := vArchivalTriggeredBy||', ROWS_COUNT'; - elsif vTableStat.OVER_ARCH_THRESOLD_SIZE >= vSourceFileConfig.BYTES_SUM_OVER_ARCHIVE_THRESHOLD then vArchivalTriggeredBy := vArchivalTriggeredBy||', BYTES_SUM'; - else ENV_MANAGER.LOG_PROCESS_EVENT('Non of archival triggers reached','INFO'); - end if; - - if LENGTH(vArchivalTriggeredBy)>0 THEN - ENV_MANAGER.LOG_PROCESS_EVENT('Archival Triggered By: '||vArchivalTriggeredBy,'INFO'); - vTableName := DBMS_ASSERT.SCHEMA_NAME(vSourceFileConfig.ODS_SCHEMA_NAME) || '.'||vSourceFileConfig.A_SOURCE_KEY||'_'||DBMS_ASSERT.simple_sql_name(vSourceFileConfig.TABLE_ID)||'_ODS'; - vQuery := ' - select t_filename( - file$name - ,file$path - , to_char(h.workflow_start,''yyyy'') - , to_char(h.workflow_start,''mm'') - ) - - from '||vTableName||' s - join CT_MRDS.a_workflow_history h - on s.a_workflow_history_key = h.a_workflow_history_key - where extract(day from (systimestamp - workflow_start)) > '||vSourceFileConfig.DAYS_FOR_ARCHIVE_THRESHOLD - ; - - -- Get all files that will be archived into "vfiles" collection ("regular data files") - execute immediate vQuery bulk collect into vfiles; - - -- Start EXPORT "regular data files" to parquet and DROP "csv" - FOR ym_loop IN (select distinct year, month from table(vfiles) order by 1,2) LOOP - dbms_output.put_line('year: '||ym_loop.year||' - '||'month: '||ym_loop.month); - vQuery:= - 'select - s.* --- ,r.partition_year --- ,r.partition_month - from '|| vTableName ||' s - join CT_MRDS.A_SOURCE_FILE_RECEIVED r - on s.file$name = r.source_file_name - and r.a_source_file_config_key = '||pSourceFileConfigKey||' - and r.partition_year='''||ym_loop.year||''' - and r.partition_month='''||ym_loop.month||''' - and r.PROCESSING_STATUS = ''INGESTED'' - ' - ; - vUri := FILE_MANAGER.GET_BUCKET_URI('ARCHIVE')||vSourceFileConfig.A_SOURCE_KEY||'/'||vSourceFileConfig.TABLE_ID||'/PARTITION_YEAR='||ym_loop.year||'/PARTITION_MONTH='||ym_loop.month||'/'; - - ENV_MANAGER.LOG_PROCESS_EVENT('Start Archiving for YEAR_MONTH: '||ym_loop.year||'_'||ym_loop.month ,'INFO'); - ENV_MANAGER.LOG_PROCESS_EVENT('Parameter for DBMS_CLOUD.EXPORT_DATA => file_uri_list' ,'DEBUG',vUri); - ENV_MANAGER.LOG_PROCESS_EVENT('Parameter for DBMS_CLOUD.EXPORT_DATA => query' ,'DEBUG',vQuery); - - - - BEGIN - DBMS_CLOUD.EXPORT_DATA( - credential_name => ENV_MANAGER.gvCredentialName, - file_uri_list => vUri||'d' , - format => json_object('type' value 'parquet'), - query => vQuery, - operation_id => vOperationId - ); - EXCEPTION - WHEN OTHERS THEN - vProcessControlStatus :='EXPORT_FAILURE'; - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EXP_DATA_FOR_ARCH_FAILED, ENV_MANAGER.MSG_EXP_DATA_FOR_ARCH_FAILED); - - END; - - ENV_MANAGER.LOG_PROCESS_EVENT('vOperationId of export: '||vOperationId,'DEBUG'); - - -- Get USER_LOAD_OPERATIONS info - select * - into vUserLoadOperations - from USER_LOAD_OPERATIONS - where id = vOperationId; - - IF vUserLoadOperations.STATUS <>'COMPLETED' THEN - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EXP_DATA_FOR_ARCH_FAILED, - ENV_MANAGER.MSG_EXP_DATA_FOR_ARCH_FAILED ||cgBL|| ' Export ended with status '||vUserLoadOperations.STATUS); - ELSIF vUserLoadOperations.STATUS = 'COMPLETED' and vUserLoadOperations.ROWS_LOADED = 0 THEN - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EXP_DATA_FOR_ARCH_FAILED, - ENV_MANAGER.MSG_EXP_DATA_FOR_ARCH_FAILED ||cgBL|| ' Zero rows were exported.'); - ELSE - ENV_MANAGER.LOG_PROCESS_EVENT('Data exported to archival file for YEAR_MONTH: 2025_01','INFO', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT(FILE_MANAGER.GET_DET_USER_LOAD_OPERATIONS (pOperationId => vOperationId),'DEBUG', vParameters); - END IF; - - SELECT - object_name - into vFilename - from DBMS_CLOUD.LIST_OBJECTS( - credential_name => 'OCI$RESOURCE_PRINCIPAL', - location_uri => vUri) - where TO_UTC_TIMESTAMP_TZ(REGEXP_REPLACE(object_name, '.*(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})(\d{2})(\d{6})Z\.parquet$', '\1-\2-\3T\4:\5:\6.\7')) - between vUserLoadOperations.START_TIME - and vUserLoadOperations.UPDATE_TIME - ; - - -- Try to drop EXPORTED FILES ("regular data files") - BEGIN - FOR f in (select filename, pathname from table(vfiles) where year = ym_loop.year and month = ym_loop.month) loop - - -- first change of status - BEGIN - UPDATE CT_MRDS.A_SOURCE_FILE_RECEIVED r - SET PROCESSING_STATUS = 'ARCHIVED' - ,ARCH_FILE_NAME = vUri||vFilename - WHERE r.a_source_file_config_key= pSourceFileConfigKey - AND r.source_file_name = f.filename - AND r.processing_status = 'INGESTED' - ; - EXCEPTION - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - vProcessControlStatus := 'CHANGE_STATUS_TO_ARCHIVED_FAILURE'; - END; - EXIT WHEN vProcessControlStatus = 'CHANGE_STATUS_TO_ARCHIVED_FAILURE'; - - -- move file to trash before dropping - BEGIN - DBMS_CLOUD.MOVE_OBJECT(source_credential_name => ENV_MANAGER.gvCredentialName, - source_object_uri => f.pathname||'/'||f.filename, - target_object_uri => replace(f.pathname,'ODS','TRASH')||'/'||f.filename, - target_credential_name => ENV_MANAGER.gvCredentialName - ); - ENV_MANAGER.LOG_PROCESS_EVENT('File moved to TRASH.','DEBUG', f.pathname||'/'||f.filename); - EXCEPTION - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT('Failed to move file to TRASH.','ERROR', f.pathname||'/'||f.filename); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - rollback; - vProcessControlStatus := 'MOVE_FILE_TO_TRASH_FAILURE'; - END; - EXIT WHEN vProcessControlStatus = 'MOVE_FILE_TO_TRASH_FAILURE'; - commit; - END LOOP; - - -------------------------------------------------------------------- - -- IF All goes fine till this point, we drop files from TRASH (if not then ROLLBACK PART) - IF vProcessControlStatus = 'OK' THEN - FOR f in (select filename, pathname from table(vfiles) where year = ym_loop.year and month = ym_loop.month) LOOP - --Drop file from TRASH - DBMS_CLOUD.DELETE_OBJECT(credential_name => ENV_MANAGER.gvCredentialName, - object_uri => replace(f.pathname,'ODS','TRASH')||'/'||f.filename); - ENV_MANAGER.LOG_PROCESS_EVENT('File dropped from TRASH.','DEBUG', f.pathname||'/'||f.filename); - END LOOP; - - --ROLLBACK PART - --ROLLBACK PROCESS in case of FAILURE (restore files from TRASH) - ELSIF vProcessControlStatus = 'MOVE_FILE_TO_TRASH_FAILURE' THEN - FOR f in ( SELECT vf.filename, vf.pathname - FROM TABLE(vfiles) vf - JOIN CT_MRDS.A_SOURCE_FILE_RECEIVED r - ON r.source_file_name = vf.filename - AND r.a_source_file_config_key = pSourceFileConfigKey - AND r.PROCESSING_STATUS = 'ARCHIVED' - AND vf.year = ym_loop.year - AND vf.month = ym_loop.month - ) LOOP - BEGIN - DBMS_CLOUD.MOVE_OBJECT(source_credential_name => ENV_MANAGER.gvCredentialName, - source_object_uri => replace(f.pathname,'ODS','TRASH')||'/'||f.filename, - target_object_uri => f.pathname||'/'||f.filename, - target_credential_name => ENV_MANAGER.gvCredentialName - ); - ENV_MANAGER.LOG_PROCESS_EVENT('File restored from TRASH.','DEBUG', f.pathname||'/'||f.filename); - - UPDATE CT_MRDS.A_SOURCE_FILE_RECEIVED r - SET PROCESSING_STATUS = 'INGESTED' - ,ARCH_FILE_NAME = NULL - WHERE r.a_source_file_config_key = pSourceFileConfigKey - AND r.source_file_name = f.filename - ; - - EXCEPTION - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT('Failed to restore file from TRASH.','ERROR', replace(f.pathname,'ODS','TRASH')||'/'||f.filename); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - vProcessControlStatus := 'RESTORE_FILE_FROM_TRASH_FAILURE'; - END; - END LOOP; - - DBMS_CLOUD.DELETE_OBJECT(credential_name => ENV_MANAGER.gvCredentialName, - object_uri => vUri||vFilename); - ENV_MANAGER.LOG_PROCESS_EVENT('ROLLBACK operation: Archival PARQUET file dropped.','DEBUG', vUri||vFilename); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MOVE_FILE_TO_TRASH_FAILED, ENV_MANAGER.MSG_MOVE_FILE_TO_TRASH_FAILED); - - ELSIF vProcessControlStatus = 'CHANGE_STATUS_TO_ARCHIVED_FAILURE' THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_CHANGE_STAT_TO_ARCHIVED_FAILED, 'ERROR', vParameters); - DBMS_CLOUD.DELETE_OBJECT(credential_name => ENV_MANAGER.gvCredentialName, - object_uri => vUri||vFilename); - ENV_MANAGER.LOG_PROCESS_EVENT('Archival PARQUET file dropped.','DEBUG', vUri||vFilename); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_CHANGE_STAT_TO_ARCHIVED_FAILED, ENV_MANAGER.MSG_CHANGE_STAT_TO_ARCHIVED_FAILED); - - ELSIF vProcessControlStatus = 'RESTORE_FILE_FROM_TRASH_FAILURE' THEN - ENV_MANAGER.LOG_PROCESS_EVENT('Some files were not restored from TRASH. Check A_PROCESS_LOG table for details','ERROR'); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_RESTORE_FILE_FROM_TRASH, ENV_MANAGER.MSG_RESTORE_FILE_FROM_TRASH); - END IF; - - EXCEPTION - WHEN ENV_MANAGER.ERR_CHANGE_STAT_TO_ARCHIVED_FAILED THEN - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_CHANGE_STAT_TO_ARCHIVED_FAILED, ENV_MANAGER.MSG_CHANGE_STAT_TO_ARCHIVED_FAILED); - - WHEN ENV_MANAGER.ERR_MOVE_FILE_TO_TRASH_FAILED THEN - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MOVE_FILE_TO_TRASH_FAILED, ENV_MANAGER.MSG_MOVE_FILE_TO_TRASH_FAILED); - - WHEN ENV_MANAGER.ERR_RESTORE_FILE_FROM_TRASH THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_RESTORE_FILE_FROM_TRASH, ENV_MANAGER.MSG_RESTORE_FILE_FROM_TRASH); - - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT('Error during archiving process','ERROR'); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_DROP_EXPORTED_FILES_FAILED, ENV_MANAGER.MSG_DROP_EXPORTED_FILES_FAILED); - END; - -- END of "Try to drop EXPORTED FILES" - - ENV_MANAGER.LOG_PROCESS_EVENT('All archived files had been dropped.','INFO'); - ENV_MANAGER.LOG_PROCESS_EVENT('End Archiving for YEAR_MONTH: '||ym_loop.year||'_'||ym_loop.month ,'INFO'); - END LOOP; --ym_loop end (YEAR_MONTH) - - COMMIT; - - ELSE - ENV_MANAGER.LOG_PROCESS_EVENT('Non of archival thresholds reached. Skip archiving.'||vArchivalTriggeredBy,'INFO'); - END IF; - - ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); - EXCEPTION - WHEN ENV_MANAGER.ERR_NOT_INPUT_SOURCE_FILE_TYPE THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_NOT_INPUT_SOURCE_FILE_TYPE , 'ERROR', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NOT_INPUT_SOURCE_FILE_TYPE, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); - - WHEN ENV_MANAGER.ERR_EXP_DATA_FOR_ARCH_FAILED THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_EXP_DATA_FOR_ARCH_FAILED , 'ERROR', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EXP_DATA_FOR_ARCH_FAILED, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); - - WHEN ENV_MANAGER.ERR_CHANGE_STAT_TO_ARCHIVED_FAILED THEN - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_CHANGE_STAT_TO_ARCHIVED_FAILED, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); - - WHEN ENV_MANAGER.ERR_MOVE_FILE_TO_TRASH_FAILED THEN - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MOVE_FILE_TO_TRASH_FAILED, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); - - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); - - END ARCHIVE_TABLE_DATA; - - ---------------------------------------------------------------------------------------------------- - - PROCEDURE GATHER_TABLE_STAT ( - pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE - ) IS - vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; - vStats CT_MRDS.A_TABLE_STAT%ROWTYPE; - vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; - vTableName VARCHAR2(200); - vQuery VARCHAR2(32000); - BEGIN - vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileConfigKey => '||nvl(to_char(pSourceFileConfigKey), 'NULL'))); - ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); - vSourceFileConfig := FILE_MANAGER.GET_SOURCE_FILE_CONFIG(pSourceFileConfigKey => pSourceFileConfigKey); - - vTableName := DBMS_ASSERT.SCHEMA_NAME(vSourceFileConfig.ODS_SCHEMA_NAME) || '.'||vSourceFileConfig.A_SOURCE_KEY||'_'||DBMS_ASSERT.simple_sql_name(vSourceFileConfig.TABLE_ID)||'_ODS'; - ENV_MANAGER.LOG_PROCESS_EVENT('vTableName','DEBUG',vTableName); - vQuery := - 'with tmp as ( - select - s.* - ,file$name as filename - ,h.workflow_start - , to_char(h.workflow_start,''yyyy'') as year - , to_char(h.workflow_start,''mm'') as month - from '||vTableName||' s - join CT_MRDS.a_workflow_history h - on s.a_workflow_history_key = h.a_workflow_history_key - ) - , tmp_gr as ( - select - filename, count(*) as row_count_per_file, min(workflow_start) as workflow_start - from tmp - group by filename - ) - select - NULL as A_TABLE_STAT_KEY - ,'||pSourceFileConfigKey||' as A_SOURCE_FILE_CONFIG_KEY - ,'''||vTableName||''' as TABLE_NAME - ,count(*) as FILE_COUNT - ,sum(case when extract(day from (systimestamp - workflow_start)) > '||vSourceFileConfig.DAYS_FOR_ARCHIVE_THRESHOLD||' then 1 else 0 end) as OLD_FILE_COUNT - ,sum (row_count_per_file) as ROW_COUNT - ,sum(case when extract(day from (systimestamp - workflow_start)) > '||vSourceFileConfig.DAYS_FOR_ARCHIVE_THRESHOLD||' then row_count_per_file else 0 end) as OLD_ROW_COUNT - ,sum(r.bytes) as BYTES - ,sum(case when extract(day from (systimestamp - workflow_start)) > '||vSourceFileConfig.DAYS_FOR_ARCHIVE_THRESHOLD||' then r.bytes else 0 end) as OLD_BYTES - ,'||vSourceFileConfig.DAYS_FOR_ARCHIVE_THRESHOLD||' as DAYS_FOR_ARCHIVE_THRESHOLD - ,systimestamp as CREATED - from tmp_gr t - join (SELECT * from DBMS_CLOUD.LIST_OBJECTS( - credential_name => '''||ENV_MANAGER.gvCredentialName||''', - location_uri => FILE_MANAGER.GET_BUCKET_URI(''ODS'')||''ODS/'||vSourceFileConfig.A_SOURCE_KEY||'/'||vSourceFileConfig.TABLE_ID||'/'' - ) - ) r - on t.filename = r.object_name' - ; - ENV_MANAGER.LOG_PROCESS_EVENT('vQuery','DEBUG',vQuery); - execute immediate vQuery into vStats; - - vStats.A_TABLE_STAT_KEY := CT_MRDS.A_TABLE_STAT_KEY_SEQ.NEXTVAL; - insert into A_TABLE_STAT_HIST values vStats; - delete from A_TABLE_STAT where A_SOURCE_FILE_CONFIG_KEY = pSourceFileConfigKey; - insert into A_TABLE_STAT values vStats; - COMMIT; - - ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); - EXCEPTION - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); - END GATHER_TABLE_STAT; - - ---------------------------------------------------------------------------------------------------- - -- PACKAGE VERSION MANAGEMENT FUNCTIONS IMPLEMENTATION - ---------------------------------------------------------------------------------------------------- - - FUNCTION GET_VERSION - RETURN VARCHAR2 - IS - BEGIN - RETURN PACKAGE_VERSION; - END GET_VERSION; - - ---------------------------------------------------------------------------------------------------- - - FUNCTION GET_BUILD_INFO - RETURN VARCHAR2 - IS - BEGIN - RETURN ENV_MANAGER.GET_PACKAGE_VERSION_INFO( - pPackageName => 'FILE_ARCHIVER', - pVersion => PACKAGE_VERSION, - pBuildDate => PACKAGE_BUILD_DATE, - pAuthor => PACKAGE_AUTHOR - ); - END GET_BUILD_INFO; - - ---------------------------------------------------------------------------------------------------- - - FUNCTION GET_VERSION_HISTORY - RETURN VARCHAR2 - IS - BEGIN - RETURN ENV_MANAGER.FORMAT_VERSION_HISTORY( - pPackageName => 'FILE_ARCHIVER', - pVersionHistory => VERSION_HISTORY - ); - END GET_VERSION_HISTORY; - - ---------------------------------------------------------------------------------------------------- - -END; - -/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/rollback/v2.0.0/FILE_ARCHIVER.pkg b/MARS_Packages/REL01_ADDITIONS/MARS-828/rollback/v2.0.0/FILE_ARCHIVER.pkg deleted file mode 100644 index 5185456..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/rollback/v2.0.0/FILE_ARCHIVER.pkg +++ /dev/null @@ -1,90 +0,0 @@ -create or replace PACKAGE CT_MRDS.FILE_ARCHIVER -AUTHID CURRENT_USER -AS - /** - * General comment for package: Please put comments for functions and procedures as shown in below example. - * It is a standard. - * The structure of comment is used by GET_PACKAGE_DOCUMENTATION function - * which returns documentation text for confluence page (to Copy-Paste it). - **/ - - -- Example comment: - /** - * @name EX_PROCEDURE_NAME - * @desc Procedure description - * @example select LOGGING_AND_ERROR_MANAGER.EX_PROCEDURE_NAME(pParameter => 129) from dual; - * @ex_rslt Example Result - **/ - - -- Package Version Information (Semantic Versioning: MAJOR.MINOR.PATCH) - PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.0.0'; - PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2025-10-22 16:45:00'; - PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski'; - - -- Version History (Latest changes first) - VERSION_HISTORY CONSTANT VARCHAR2(4000) := - '2.0.0 (2025-10-22): Added package versioning system using centralized ENV_MANAGER functions' || CHR(13)||CHR(10) || - '1.5.0 (2025-10-18): Enhanced ARCHIVE_TABLE_DATA with Hive-style partitioning support' || CHR(13)||CHR(10) || - '1.0.0 (2025-09-15): Initial release with table archival and statistics gathering'; - - cgBL CONSTANT VARCHAR2(2) := ENV_MANAGER.cgBL; - - /** - * @name ARCHIVE_TABLE_DATA - * @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA. - * Exports data from table specified by pSourceFileConfigKey(A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY) into PARQUET file on OCI infrustructure. - * Each YEAR_MONTH pair goes to seperate file (implicit partitioning). - **/ - PROCEDURE ARCHIVE_TABLE_DATA ( - pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE - ); - - - - /** - * @name GATHER_TABLE_STAT - * @desc Gather info about EXTERNAL TABLE specified by pSourceFileConfigKey parameter (A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY). - * Data is inserted into A_TABLE_STAT and A_TABLE_STAT_HIST. - **/ - PROCEDURE GATHER_TABLE_STAT ( - pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE - ); - - --------------------------------------------------------------------------------------------------------------------------- - -- PACKAGE VERSION MANAGEMENT FUNCTIONS - --------------------------------------------------------------------------------------------------------------------------- - - /** - * @name GET_VERSION - * @desc Returns the current version number of the FILE_ARCHIVER package. - * Uses semantic versioning format (MAJOR.MINOR.PATCH). - * @example SELECT FILE_ARCHIVER.GET_VERSION() FROM DUAL; - * @ex_rslt 2.0.0 - **/ - FUNCTION GET_VERSION RETURN VARCHAR2; - - /** - * @name GET_BUILD_INFO - * @desc Returns comprehensive build information including version, build date, and author. - * Uses centralized ENV_MANAGER.GET_PACKAGE_VERSION_INFO function. - * @example SELECT FILE_ARCHIVER.GET_BUILD_INFO() FROM DUAL; - * @ex_rslt Package: FILE_ARCHIVER - * Version: 2.0.0 - * Build Date: 2025-10-22 16:45:00 - * Author: Grzegorz Michalski - **/ - FUNCTION GET_BUILD_INFO RETURN VARCHAR2; - - /** - * @name GET_VERSION_HISTORY - * @desc Returns complete version history with all releases and changes. - * Uses centralized ENV_MANAGER.FORMAT_VERSION_HISTORY function. - * @example SELECT FILE_ARCHIVER.GET_VERSION_HISTORY() FROM DUAL; - * @ex_rslt FILE_ARCHIVER Version History: - * 2.0.0 (2025-10-22): Added package versioning system... - **/ - FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2; - -END; - -/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/rollback/v3.0.0/FILE_ARCHIVER.pkb b/MARS_Packages/REL01_ADDITIONS/MARS-828/rollback/v3.0.0/FILE_ARCHIVER.pkb deleted file mode 100644 index 66cd24b..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/rollback/v3.0.0/FILE_ARCHIVER.pkb +++ /dev/null @@ -1,495 +0,0 @@ -create or replace PACKAGE BODY CT_MRDS.FILE_ARCHIVER -AS - - ---------------------------------------------------------------------------------------------------- - -- PRIVATE FUNCTION: GET_ARCHIVAL_WHERE_CLAUSE - ---------------------------------------------------------------------------------------------------- - /** - * @name GET_ARCHIVAL_WHERE_CLAUSE - * @desc Private function that generates WHERE clause based on ARCHIVAL_STRATEGY configuration. - * Supports four strategies: THRESHOLD_BASED, CURRENT_MONTH_ONLY, MINIMUM_AGE_MONTHS, HYBRID. - * @param pSourceFileConfig - Source file configuration record with ARCHIVAL_STRATEGY - * @return VARCHAR2 - WHERE clause for filtering archival candidates - **/ - FUNCTION GET_ARCHIVAL_WHERE_CLAUSE( - pSourceFileConfig IN CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE - ) RETURN VARCHAR2 - IS - vWhereClause VARCHAR2(4000); - cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10); - BEGIN - CASE pSourceFileConfig.ARCHIVAL_STRATEGY - -- Legacy threshold-based strategy (backward compatible) - WHEN 'THRESHOLD_BASED' THEN - vWhereClause := 'extract(day from (systimestamp - workflow_start)) > ' || pSourceFileConfig.DAYS_FOR_ARCHIVE_THRESHOLD; - - -- Archive all data except current month - WHEN 'CURRENT_MONTH_ONLY' THEN - vWhereClause := 'TRUNC(workflow_start, ''MM'') < TRUNC(SYSDATE, ''MM'')'; - - -- Archive only data older than X months - WHEN 'MINIMUM_AGE_MONTHS' THEN - IF pSourceFileConfig.MINIMUM_AGE_MONTHS IS NULL THEN - RAISE_APPLICATION_ERROR(-20001, 'MINIMUM_AGE_MONTHS must be configured for MINIMUM_AGE_MONTHS strategy'); - END IF; - vWhereClause := 'workflow_start < ADD_MONTHS(TRUNC(SYSDATE, ''MM''), -' || pSourceFileConfig.MINIMUM_AGE_MONTHS || ')'; - - -- Hybrid: Current month exclusion AND minimum age requirement - WHEN 'HYBRID' THEN - IF pSourceFileConfig.MINIMUM_AGE_MONTHS IS NULL THEN - RAISE_APPLICATION_ERROR(-20001, 'MINIMUM_AGE_MONTHS must be configured for HYBRID strategy'); - END IF; - vWhereClause := 'TRUNC(workflow_start, ''MM'') < TRUNC(SYSDATE, ''MM'') ' || - 'AND workflow_start < ADD_MONTHS(TRUNC(SYSDATE, ''MM''), -' || pSourceFileConfig.MINIMUM_AGE_MONTHS || ')'; - - ELSE - RAISE_APPLICATION_ERROR(-20002, 'Invalid ARCHIVAL_STRATEGY: ' || pSourceFileConfig.ARCHIVAL_STRATEGY); - END CASE; - - RETURN vWhereClause; - END GET_ARCHIVAL_WHERE_CLAUSE; - - ---------------------------------------------------------------------------------------------------- - - FUNCTION GET_TABLE_STAT(pSourceFileConfigKey IN NUMBER) - RETURN CT_MRDS.A_TABLE_STAT%ROWTYPE - IS - vTableStat CT_MRDS.A_TABLE_STAT%ROWTYPE; - vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; - vCount PLS_INTEGER; - vSourceFileType CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_TYPE%TYPE; - - BEGIN - vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileConfigKey => '||nvl(to_char(pSourceFileConfigKey),NULL))); - ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); - SELECT count(*) , min(SOURCE_FILE_TYPE) - INTO vCount, vSourceFileType - FROM CT_MRDS.A_TABLE_STAT s - JOIN CT_MRDS.A_SOURCE_FILE_CONFIG c - ON s.A_SOURCE_FILE_CONFIG_KEY = c.A_SOURCE_FILE_CONFIG_KEY - WHERE s.A_SOURCE_FILE_CONFIG_KEY = pSourceFileConfigKey; - - IF vCount=0 and vSourceFileType='INPUT' THEN - GATHER_TABLE_STAT(pSourceFileConfigKey); - END IF; - - BEGIN - SELECT * - INTO vTableStat - FROM CT_MRDS.A_TABLE_STAT - WHERE A_SOURCE_FILE_CONFIG_KEY = pSourceFileConfigKey; --- EXCEPTION --- WHEN NO_DATA_FOUND THEN --- - END; - - ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); - RETURN vTableStat; - - END GET_TABLE_STAT; - - ---------------------------------------------------------------------------------------------------- - - PROCEDURE ARCHIVE_TABLE_DATA ( - pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE - ) - IS - vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; - vTableStat CT_MRDS.A_TABLE_STAT%ROWTYPE; - vQuery VARCHAR2(4000); - vTableName VARCHAR2(200); - vUri VARCHAR2(1000); - vfiles T_FILENAMES; - vFilename VARCHAR2(300); - vOperationId NUMBER := -1; - vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; - vArchivalTriggeredBy VARCHAR2(60); -- Possible values: FILES_COUNT, ROWS_COUNT, BYTES_SUM - vUserLoadOperations USER_LOAD_OPERATIONS%ROWTYPE; - vProcessControlStatus VARCHAR2(60) := 'OK'; - - BEGIN - vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileConfigKey => '||nvl(to_char(pSourceFileConfigKey), 'NULL'))); - ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); - - vSourceFileConfig := FILE_MANAGER.GET_SOURCE_FILE_CONFIG(pSourceFileConfigKey => pSourceFileConfigKey); - vTableStat := GET_TABLE_STAT(pSourceFileConfigKey => pSourceFileConfigKey); - - if vSourceFileConfig.SOURCE_FILE_TYPE <> 'INPUT' then - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_NOT_INPUT_SOURCE_FILE_TYPE, 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NOT_INPUT_SOURCE_FILE_TYPE, ENV_MANAGER.MSG_NOT_INPUT_SOURCE_FILE_TYPE); - end if; - - if vTableStat.created < sysdate-(vSourceFileConfig.HOURS_TO_EXPIRE_STATISTICS/24) then - GATHER_TABLE_STAT(pSourceFileConfigKey => pSourceFileConfigKey); - vTableStat := GET_TABLE_STAT(pSourceFileConfigKey => pSourceFileConfigKey); - end if; - - if vTableStat.OVER_ARCH_THRESOLD_FILE_COUNT >= vSourceFileConfig.FILES_COUNT_OVER_ARCHIVE_THRESHOLD then vArchivalTriggeredBy := 'FILES_COUNT'; - elsif vTableStat.OVER_ARCH_THRESOLD_ROW_COUNT >= vSourceFileConfig.ROWS_COUNT_OVER_ARCHIVE_THRESHOLD then vArchivalTriggeredBy := vArchivalTriggeredBy||', ROWS_COUNT'; - elsif vTableStat.OVER_ARCH_THRESOLD_SIZE >= vSourceFileConfig.BYTES_SUM_OVER_ARCHIVE_THRESHOLD then vArchivalTriggeredBy := vArchivalTriggeredBy||', BYTES_SUM'; - else ENV_MANAGER.LOG_PROCESS_EVENT('Non of archival triggers reached','INFO'); - end if; - - if LENGTH(vArchivalTriggeredBy)>0 THEN - ENV_MANAGER.LOG_PROCESS_EVENT('Archival Triggered By: '||vArchivalTriggeredBy,'INFO'); - vTableName := DBMS_ASSERT.SCHEMA_NAME(vSourceFileConfig.ODS_SCHEMA_NAME) || '.'||vSourceFileConfig.A_SOURCE_KEY||'_'||DBMS_ASSERT.simple_sql_name(vSourceFileConfig.TABLE_ID)||'_ODS'; - - -- Use strategy-based WHERE clause (MARS-828) - vQuery := ' - select t_filename( - file$name - ,file$path - , to_char(h.workflow_start,''yyyy'') - , to_char(h.workflow_start,''mm'') - ) - - from '||vTableName||' s - join CT_MRDS.a_workflow_history h - on s.a_workflow_history_key = h.a_workflow_history_key - where ' || GET_ARCHIVAL_WHERE_CLAUSE(vSourceFileConfig) - ; - - -- Get all files that will be archived into "vfiles" collection ("regular data files") - execute immediate vQuery bulk collect into vfiles; - - -- Start EXPORT "regular data files" to parquet and DROP "csv" - FOR ym_loop IN (select distinct year, month from table(vfiles) order by 1,2) LOOP - dbms_output.put_line('year: '||ym_loop.year||' - '||'month: '||ym_loop.month); - vQuery:= - 'select - s.* --- ,r.partition_year --- ,r.partition_month - from '|| vTableName ||' s - join CT_MRDS.A_SOURCE_FILE_RECEIVED r - on s.file$name = r.source_file_name - and r.a_source_file_config_key = '||pSourceFileConfigKey||' - and r.partition_year='''||ym_loop.year||''' - and r.partition_month='''||ym_loop.month||''' - and r.PROCESSING_STATUS = ''INGESTED'' - ' - ; - vUri := FILE_MANAGER.GET_BUCKET_URI('ARCHIVE')||vSourceFileConfig.A_SOURCE_KEY||'/'||vSourceFileConfig.TABLE_ID||'/PARTITION_YEAR='||ym_loop.year||'/PARTITION_MONTH='||ym_loop.month||'/'; - - ENV_MANAGER.LOG_PROCESS_EVENT('Start Archiving for YEAR_MONTH: '||ym_loop.year||'_'||ym_loop.month ,'INFO'); - ENV_MANAGER.LOG_PROCESS_EVENT('Parameter for DBMS_CLOUD.EXPORT_DATA => file_uri_list' ,'DEBUG',vUri); - ENV_MANAGER.LOG_PROCESS_EVENT('Parameter for DBMS_CLOUD.EXPORT_DATA => query' ,'DEBUG',vQuery); - - - - BEGIN - DBMS_CLOUD.EXPORT_DATA( - credential_name => ENV_MANAGER.gvCredentialName, - file_uri_list => vUri||'d' , - format => json_object('type' value 'parquet'), - query => vQuery, - operation_id => vOperationId - ); - EXCEPTION - WHEN OTHERS THEN - vProcessControlStatus :='EXPORT_FAILURE'; - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EXP_DATA_FOR_ARCH_FAILED, ENV_MANAGER.MSG_EXP_DATA_FOR_ARCH_FAILED); - - END; - - ENV_MANAGER.LOG_PROCESS_EVENT('vOperationId of export: '||vOperationId,'DEBUG'); - - -- Get USER_LOAD_OPERATIONS info - select * - into vUserLoadOperations - from USER_LOAD_OPERATIONS - where id = vOperationId; - - IF vUserLoadOperations.STATUS <>'COMPLETED' THEN - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EXP_DATA_FOR_ARCH_FAILED, - ENV_MANAGER.MSG_EXP_DATA_FOR_ARCH_FAILED ||cgBL|| ' Export ended with status '||vUserLoadOperations.STATUS); - ELSIF vUserLoadOperations.STATUS = 'COMPLETED' and vUserLoadOperations.ROWS_LOADED = 0 THEN - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EXP_DATA_FOR_ARCH_FAILED, - ENV_MANAGER.MSG_EXP_DATA_FOR_ARCH_FAILED ||cgBL|| ' Zero rows were exported.'); - ELSE - ENV_MANAGER.LOG_PROCESS_EVENT('Data exported to archival file for YEAR_MONTH: 2025_01','INFO', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT(FILE_MANAGER.GET_DET_USER_LOAD_OPERATIONS (pOperationId => vOperationId),'DEBUG', vParameters); - END IF; - - SELECT - object_name - into vFilename - from DBMS_CLOUD.LIST_OBJECTS( - credential_name => 'OCI$RESOURCE_PRINCIPAL', - location_uri => vUri) - where TO_UTC_TIMESTAMP_TZ(REGEXP_REPLACE(object_name, '.*(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})(\d{2})(\d{6})Z\.parquet$', '\1-\2-\3T\4:\5:\6.\7')) - between vUserLoadOperations.START_TIME - and vUserLoadOperations.UPDATE_TIME - ; - - -- Try to drop EXPORTED FILES ("regular data files") - BEGIN - FOR f in (select filename, pathname from table(vfiles) where year = ym_loop.year and month = ym_loop.month) loop - - -- first change of status - BEGIN - UPDATE CT_MRDS.A_SOURCE_FILE_RECEIVED r - SET PROCESSING_STATUS = 'ARCHIVED' - ,ARCH_FILE_NAME = vUri||vFilename - WHERE r.a_source_file_config_key= pSourceFileConfigKey - AND r.source_file_name = f.filename - AND r.processing_status = 'INGESTED' - ; - EXCEPTION - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - vProcessControlStatus := 'CHANGE_STATUS_TO_ARCHIVED_FAILURE'; - END; - EXIT WHEN vProcessControlStatus = 'CHANGE_STATUS_TO_ARCHIVED_FAILURE'; - - -- move file to trash before dropping - BEGIN - DBMS_CLOUD.MOVE_OBJECT(source_credential_name => ENV_MANAGER.gvCredentialName, - source_object_uri => f.pathname||'/'||f.filename, - target_object_uri => replace(f.pathname,'ODS','TRASH')||'/'||f.filename, - target_credential_name => ENV_MANAGER.gvCredentialName - ); - ENV_MANAGER.LOG_PROCESS_EVENT('File moved to TRASH.','DEBUG', f.pathname||'/'||f.filename); - EXCEPTION - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT('Failed to move file to TRASH.','ERROR', f.pathname||'/'||f.filename); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - rollback; - vProcessControlStatus := 'MOVE_FILE_TO_TRASH_FAILURE'; - END; - EXIT WHEN vProcessControlStatus = 'MOVE_FILE_TO_TRASH_FAILURE'; - commit; - END LOOP; - - -------------------------------------------------------------------- - -- IF All goes fine till this point, we drop files from TRASH (if not then ROLLBACK PART) - IF vProcessControlStatus = 'OK' THEN - FOR f in (select filename, pathname from table(vfiles) where year = ym_loop.year and month = ym_loop.month) LOOP - --Drop file from TRASH - DBMS_CLOUD.DELETE_OBJECT(credential_name => ENV_MANAGER.gvCredentialName, - object_uri => replace(f.pathname,'ODS','TRASH')||'/'||f.filename); - ENV_MANAGER.LOG_PROCESS_EVENT('File dropped from TRASH.','DEBUG', f.pathname||'/'||f.filename); - END LOOP; - - --ROLLBACK PART - --ROLLBACK PROCESS in case of FAILURE (restore files from TRASH) - ELSIF vProcessControlStatus = 'MOVE_FILE_TO_TRASH_FAILURE' THEN - FOR f in ( SELECT vf.filename, vf.pathname - FROM TABLE(vfiles) vf - JOIN CT_MRDS.A_SOURCE_FILE_RECEIVED r - ON r.source_file_name = vf.filename - AND r.a_source_file_config_key = pSourceFileConfigKey - AND r.PROCESSING_STATUS = 'ARCHIVED' - AND vf.year = ym_loop.year - AND vf.month = ym_loop.month - ) LOOP - BEGIN - DBMS_CLOUD.MOVE_OBJECT(source_credential_name => ENV_MANAGER.gvCredentialName, - source_object_uri => replace(f.pathname,'ODS','TRASH')||'/'||f.filename, - target_object_uri => f.pathname||'/'||f.filename, - target_credential_name => ENV_MANAGER.gvCredentialName - ); - ENV_MANAGER.LOG_PROCESS_EVENT('File restored from TRASH.','DEBUG', f.pathname||'/'||f.filename); - - UPDATE CT_MRDS.A_SOURCE_FILE_RECEIVED r - SET PROCESSING_STATUS = 'INGESTED' - ,ARCH_FILE_NAME = NULL - WHERE r.a_source_file_config_key = pSourceFileConfigKey - AND r.source_file_name = f.filename - ; - - EXCEPTION - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT('Failed to restore file from TRASH.','ERROR', replace(f.pathname,'ODS','TRASH')||'/'||f.filename); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - vProcessControlStatus := 'RESTORE_FILE_FROM_TRASH_FAILURE'; - END; - END LOOP; - - DBMS_CLOUD.DELETE_OBJECT(credential_name => ENV_MANAGER.gvCredentialName, - object_uri => vUri||vFilename); - ENV_MANAGER.LOG_PROCESS_EVENT('ROLLBACK operation: Archival PARQUET file dropped.','DEBUG', vUri||vFilename); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MOVE_FILE_TO_TRASH_FAILED, ENV_MANAGER.MSG_MOVE_FILE_TO_TRASH_FAILED); - - ELSIF vProcessControlStatus = 'CHANGE_STATUS_TO_ARCHIVED_FAILURE' THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_CHANGE_STAT_TO_ARCHIVED_FAILED, 'ERROR', vParameters); - DBMS_CLOUD.DELETE_OBJECT(credential_name => ENV_MANAGER.gvCredentialName, - object_uri => vUri||vFilename); - ENV_MANAGER.LOG_PROCESS_EVENT('Archival PARQUET file dropped.','DEBUG', vUri||vFilename); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_CHANGE_STAT_TO_ARCHIVED_FAILED, ENV_MANAGER.MSG_CHANGE_STAT_TO_ARCHIVED_FAILED); - - ELSIF vProcessControlStatus = 'RESTORE_FILE_FROM_TRASH_FAILURE' THEN - ENV_MANAGER.LOG_PROCESS_EVENT('Some files were not restored from TRASH. Check A_PROCESS_LOG table for details','ERROR'); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_RESTORE_FILE_FROM_TRASH, ENV_MANAGER.MSG_RESTORE_FILE_FROM_TRASH); - END IF; - - EXCEPTION - WHEN ENV_MANAGER.ERR_CHANGE_STAT_TO_ARCHIVED_FAILED THEN - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_CHANGE_STAT_TO_ARCHIVED_FAILED, ENV_MANAGER.MSG_CHANGE_STAT_TO_ARCHIVED_FAILED); - - WHEN ENV_MANAGER.ERR_MOVE_FILE_TO_TRASH_FAILED THEN - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MOVE_FILE_TO_TRASH_FAILED, ENV_MANAGER.MSG_MOVE_FILE_TO_TRASH_FAILED); - - WHEN ENV_MANAGER.ERR_RESTORE_FILE_FROM_TRASH THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_RESTORE_FILE_FROM_TRASH, ENV_MANAGER.MSG_RESTORE_FILE_FROM_TRASH); - - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT('Error during archiving process','ERROR'); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_DROP_EXPORTED_FILES_FAILED, ENV_MANAGER.MSG_DROP_EXPORTED_FILES_FAILED); - END; - -- END of "Try to drop EXPORTED FILES" - - ENV_MANAGER.LOG_PROCESS_EVENT('All archived files had been dropped.','INFO'); - ENV_MANAGER.LOG_PROCESS_EVENT('End Archiving for YEAR_MONTH: '||ym_loop.year||'_'||ym_loop.month ,'INFO'); - END LOOP; --ym_loop end (YEAR_MONTH) - - COMMIT; - - ELSE - ENV_MANAGER.LOG_PROCESS_EVENT('Non of archival thresholds reached. Skip archiving.'||vArchivalTriggeredBy,'INFO'); - END IF; - - ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); - EXCEPTION - WHEN ENV_MANAGER.ERR_NOT_INPUT_SOURCE_FILE_TYPE THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_NOT_INPUT_SOURCE_FILE_TYPE , 'ERROR', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NOT_INPUT_SOURCE_FILE_TYPE, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); - - WHEN ENV_MANAGER.ERR_EXP_DATA_FOR_ARCH_FAILED THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_EXP_DATA_FOR_ARCH_FAILED , 'ERROR', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EXP_DATA_FOR_ARCH_FAILED, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); - - WHEN ENV_MANAGER.ERR_CHANGE_STAT_TO_ARCHIVED_FAILED THEN - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_CHANGE_STAT_TO_ARCHIVED_FAILED, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); - - WHEN ENV_MANAGER.ERR_MOVE_FILE_TO_TRASH_FAILED THEN - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MOVE_FILE_TO_TRASH_FAILED, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); - - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); - - END ARCHIVE_TABLE_DATA; - - ---------------------------------------------------------------------------------------------------- - - PROCEDURE GATHER_TABLE_STAT ( - pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE - ) IS - vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; - vStats CT_MRDS.A_TABLE_STAT%ROWTYPE; - vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; - vTableName VARCHAR2(200); - vQuery VARCHAR2(32000); - BEGIN - vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileConfigKey => '||nvl(to_char(pSourceFileConfigKey), 'NULL'))); - ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); - vSourceFileConfig := FILE_MANAGER.GET_SOURCE_FILE_CONFIG(pSourceFileConfigKey => pSourceFileConfigKey); - - vTableName := DBMS_ASSERT.SCHEMA_NAME(vSourceFileConfig.ODS_SCHEMA_NAME) || '.'||vSourceFileConfig.A_SOURCE_KEY||'_'||DBMS_ASSERT.simple_sql_name(vSourceFileConfig.TABLE_ID)||'_ODS'; - ENV_MANAGER.LOG_PROCESS_EVENT('vTableName','DEBUG',vTableName); - - -- Use strategy-based WHERE clause for statistics (MARS-828) - vQuery := - 'with tmp as ( - select - s.* - ,file$name as filename - ,h.workflow_start - , to_char(h.workflow_start,''yyyy'') as year - , to_char(h.workflow_start,''mm'') as month - from '||vTableName||' s - join CT_MRDS.a_workflow_history h - on s.a_workflow_history_key = h.a_workflow_history_key - ) - , tmp_gr as ( - select - filename, count(*) as row_count_per_file, min(workflow_start) as workflow_start - from tmp - group by filename - ) - select - NULL as A_TABLE_STAT_KEY - ,'||pSourceFileConfigKey||' as A_SOURCE_FILE_CONFIG_KEY - ,'''||vTableName||''' as TABLE_NAME - ,count(*) as FILE_COUNT - ,sum(case when ' || GET_ARCHIVAL_WHERE_CLAUSE(vSourceFileConfig) || ' then 1 else 0 end) as OLD_FILE_COUNT - ,sum (row_count_per_file) as ROW_COUNT - ,sum(case when ' || GET_ARCHIVAL_WHERE_CLAUSE(vSourceFileConfig) || ' then row_count_per_file else 0 end) as OLD_ROW_COUNT - ,sum(r.bytes) as BYTES - ,sum(case when ' || GET_ARCHIVAL_WHERE_CLAUSE(vSourceFileConfig) || ' then r.bytes else 0 end) as OLD_BYTES - ,'||vSourceFileConfig.DAYS_FOR_ARCHIVE_THRESHOLD||' as DAYS_FOR_ARCHIVE_THRESHOLD - ,systimestamp as CREATED - from tmp_gr t - join (SELECT * from DBMS_CLOUD.LIST_OBJECTS( - credential_name => '''||ENV_MANAGER.gvCredentialName||''', - location_uri => FILE_MANAGER.GET_BUCKET_URI(''ODS'')||''ODS/'||vSourceFileConfig.A_SOURCE_KEY||'/'||vSourceFileConfig.TABLE_ID||'/'' - ) - ) r - on t.filename = r.object_name' - ; - ENV_MANAGER.LOG_PROCESS_EVENT('vQuery','DEBUG',vQuery); - execute immediate vQuery into vStats; - - vStats.A_TABLE_STAT_KEY := CT_MRDS.A_TABLE_STAT_KEY_SEQ.NEXTVAL; - insert into A_TABLE_STAT_HIST values vStats; - delete from A_TABLE_STAT where A_SOURCE_FILE_CONFIG_KEY = pSourceFileConfigKey; - insert into A_TABLE_STAT values vStats; - COMMIT; - - ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); - EXCEPTION - WHEN OTHERS THEN - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); - END GATHER_TABLE_STAT; - - ---------------------------------------------------------------------------------------------------- - -- PACKAGE VERSION MANAGEMENT FUNCTIONS IMPLEMENTATION - ---------------------------------------------------------------------------------------------------- - - FUNCTION GET_VERSION - RETURN VARCHAR2 - IS - BEGIN - RETURN PACKAGE_VERSION; - END GET_VERSION; - - ---------------------------------------------------------------------------------------------------- - - FUNCTION GET_BUILD_INFO - RETURN VARCHAR2 - IS - BEGIN - RETURN ENV_MANAGER.GET_PACKAGE_VERSION_INFO( - pPackageName => 'FILE_ARCHIVER', - pVersion => PACKAGE_VERSION, - pBuildDate => PACKAGE_BUILD_DATE, - pAuthor => PACKAGE_AUTHOR - ); - END GET_BUILD_INFO; - - ---------------------------------------------------------------------------------------------------- - - FUNCTION GET_VERSION_HISTORY - RETURN VARCHAR2 - IS - BEGIN - RETURN ENV_MANAGER.FORMAT_VERSION_HISTORY( - pPackageName => 'FILE_ARCHIVER', - pVersionHistory => VERSION_HISTORY - ); - END GET_VERSION_HISTORY; - - ---------------------------------------------------------------------------------------------------- - -END; - -/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/rollback/v3.0.0/FILE_ARCHIVER.pkg b/MARS_Packages/REL01_ADDITIONS/MARS-828/rollback/v3.0.0/FILE_ARCHIVER.pkg deleted file mode 100644 index e7267d9..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/rollback/v3.0.0/FILE_ARCHIVER.pkg +++ /dev/null @@ -1,91 +0,0 @@ -create or replace PACKAGE CT_MRDS.FILE_ARCHIVER -AUTHID CURRENT_USER -AS - /** - * General comment for package: Please put comments for functions and procedures as shown in below example. - * It is a standard. - * The structure of comment is used by GET_PACKAGE_DOCUMENTATION function - * which returns documentation text for confluence page (to Copy-Paste it). - **/ - - -- Example comment: - /** - * @name EX_PROCEDURE_NAME - * @desc Procedure description - * @example select LOGGING_AND_ERROR_MANAGER.EX_PROCEDURE_NAME(pParameter => 129) from dual; - * @ex_rslt Example Result - **/ - - -- Package Version Information (Semantic Versioning: MAJOR.MINOR.PATCH) - PACKAGE_VERSION CONSTANT VARCHAR2(10) := '3.0.0'; - PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2026-01-27 12:00:00'; - PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski'; - - -- Version History (Latest changes first) - VERSION_HISTORY CONSTANT VARCHAR2(4000) := - '3.0.0 (2026-01-27): MARS-828 - Added flexible archival strategies (CURRENT_MONTH_ONLY, MINIMUM_AGE_MONTHS, HYBRID) via ARCHIVAL_STRATEGY configuration' || CHR(13)||CHR(10) || - '2.0.0 (2025-10-22): Added package versioning system using centralized ENV_MANAGER functions' || CHR(13)||CHR(10) || - '1.5.0 (2025-10-18): Enhanced ARCHIVE_TABLE_DATA with Hive-style partitioning support' || CHR(13)||CHR(10) || - '1.0.0 (2025-09-15): Initial release with table archival and statistics gathering'; - - cgBL CONSTANT VARCHAR2(2) := ENV_MANAGER.cgBL; - - /** - * @name ARCHIVE_TABLE_DATA - * @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA. - * Exports data from table specified by pSourceFileConfigKey(A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY) into PARQUET file on OCI infrustructure. - * Each YEAR_MONTH pair goes to seperate file (implicit partitioning). - **/ - PROCEDURE ARCHIVE_TABLE_DATA ( - pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE - ); - - - - /** - * @name GATHER_TABLE_STAT - * @desc Gather info about EXTERNAL TABLE specified by pSourceFileConfigKey parameter (A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY). - * Data is inserted into A_TABLE_STAT and A_TABLE_STAT_HIST. - **/ - PROCEDURE GATHER_TABLE_STAT ( - pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE - ); - - --------------------------------------------------------------------------------------------------------------------------- - -- PACKAGE VERSION MANAGEMENT FUNCTIONS - --------------------------------------------------------------------------------------------------------------------------- - - /** - * @name GET_VERSION - * @desc Returns the current version number of the FILE_ARCHIVER package. - * Uses semantic versioning format (MAJOR.MINOR.PATCH). - * @example SELECT FILE_ARCHIVER.GET_VERSION() FROM DUAL; - * @ex_rslt 2.0.0 - **/ - FUNCTION GET_VERSION RETURN VARCHAR2; - - /** - * @name GET_BUILD_INFO - * @desc Returns comprehensive build information including version, build date, and author. - * Uses centralized ENV_MANAGER.GET_PACKAGE_VERSION_INFO function. - * @example SELECT FILE_ARCHIVER.GET_BUILD_INFO() FROM DUAL; - * @ex_rslt Package: FILE_ARCHIVER - * Version: 2.0.0 - * Build Date: 2025-10-22 16:45:00 - * Author: Grzegorz Michalski - **/ - FUNCTION GET_BUILD_INFO RETURN VARCHAR2; - - /** - * @name GET_VERSION_HISTORY - * @desc Returns complete version history with all releases and changes. - * Uses centralized ENV_MANAGER.FORMAT_VERSION_HISTORY function. - * @example SELECT FILE_ARCHIVER.GET_VERSION_HISTORY() FROM DUAL; - * @ex_rslt FILE_ARCHIVER Version History: - * 2.0.0 (2025-10-22): Added package versioning system... - **/ - FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2; - -END; - -/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/rollback_mars828.sql b/MARS_Packages/REL01_ADDITIONS/MARS-828/rollback_mars828.sql deleted file mode 100644 index 86bd778..0000000 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/rollback_mars828.sql +++ /dev/null @@ -1,114 +0,0 @@ --- =================================================================== --- MARS-828: Enhanced Archival Strategies Rollback --- =================================================================== --- Purpose: Rollback all changes from MARS-828 installation --- Author: Grzegorz Michalski --- Date: 2026-01-27 - --- Dynamic spool file generation (using SYS_CONTEXT - no DBA privileges required) --- IMPORTANT: Ensure log/ directory exists before SPOOL (use host mkdir) -host mkdir log 2>nul - -var filename VARCHAR2(100) -BEGIN - :filename := 'log/ROLLBACK_MARS_828_' || SYS_CONTEXT('USERENV', 'CON_NAME') || '_' || TO_CHAR(SYSDATE,'YYYYMMDD_HH24MISS') || '.log'; -END; -/ -column filename new_value _filename -select :filename filename from dual; -spool &_filename - -SET ECHO OFF -SET TIMING ON -SET SERVEROUTPUT ON SIZE UNLIMITED -SET LINESIZE 200 -SET PAGESIZE 1000 -SET PAUSE OFF - -PROMPT -PROMPT ============================================================================ -PROMPT MARS-828 Rollback Starting -PROMPT ============================================================================ -PROMPT WARNING: This will restore FILE_ARCHIVER to v2.0.0 -PROMPT -PROMPT CRITICAL IMPACT: -PROMPT 1. All archival strategies revert to THRESHOLD_BASED -PROMPT 2. ARCHIVAL_STRATEGY and MINIMUM_AGE_MONTHS columns will be dropped -PROMPT 3. Validation trigger will be removed -PROMPT 4. Reconfigure archival thresholds after rollback -PROMPT -PROMPT Timestamp: -SELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') AS rollback_start FROM DUAL; -PROMPT ============================================================================ - --- Confirm rollback with user -ACCEPT continue CHAR PROMPT 'Type YES to continue with rollback, or Ctrl+C to abort: ' -WHENEVER SQLERROR EXIT SQL.SQLCODE -BEGIN - IF '&continue' IS NULL OR TRIM('&continue') IS NULL OR UPPER(TRIM('&continue')) != 'YES' THEN - RAISE_APPLICATION_ERROR(-20001, 'Rollback aborted by user'); - END IF; -END; -/ -WHENEVER SQLERROR CONTINUE - --- Rollback steps (in reverse order) -PROMPT -PROMPT Step 1/6: Restoring FILE_ARCHIVER Package Specification v2.0.0 -PROMPT =============================================================== -@@91_MARS_828_rollback_FILE_ARCHIVER_SPEC.sql - -PROMPT -PROMPT Step 2/6: Restoring FILE_ARCHIVER Package Body v2.0.0 -PROMPT ====================================================== -@@92_MARS_828_rollback_FILE_ARCHIVER_BODY.sql - -PROMPT -PROMPT Step 3/6: Dropping validation trigger -PROMPT ====================================== -@@93_MARS_828_rollback_trigger.sql - -PROMPT -PROMPT Step 4/6: Dropping archival strategy columns -PROMPT ============================================= -@@94_MARS_828_rollback_columns.sql - -PROMPT -PROMPT Step 5/6: Tracking rollback version -PROMPT ==================================== -@@track_package_versions.sql - -PROMPT -PROMPT Step 6/6: Verifying tracked packages -PROMPT ===================================== -@@verify_packages_version.sql - --- Verify rollback -PROMPT -PROMPT Verification: Package Compilation Status -PROMPT ========================================= -SELECT object_name, object_type, status, last_ddl_time -FROM all_objects -WHERE owner = 'CT_MRDS' - AND object_name = 'FILE_ARCHIVER' - AND object_type IN ('PACKAGE', 'PACKAGE BODY') -ORDER BY object_type; - -PROMPT -PROMPT ============================================================================ -PROMPT MARS-828 Rollback Completed -PROMPT ============================================================================ -PROMPT Completion Time: -SELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') AS rollback_end FROM DUAL; -PROMPT -PROMPT Rollback Summary: -PROMPT - Package: CT_MRDS.FILE_ARCHIVER -PROMPT - Restored Version: 2.0.0 (THRESHOLD_BASED archival only) -PROMPT - Removed Features: CURRENT_MONTH_ONLY, MINIMUM_AGE_MONTHS, HYBRID strategies -PROMPT -PROMPT Log file: &_filename -PROMPT ============================================================================ - -spool off - -quit; diff --git a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826-PREHOOK/new_version/DATA_EXPORTER.pkg b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826-PREHOOK/new_version/DATA_EXPORTER.pkg index 9de3821..a6dfefb 100644 --- a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826-PREHOOK/new_version/DATA_EXPORTER.pkg +++ b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826-PREHOOK/new_version/DATA_EXPORTER.pkg @@ -16,7 +16,6 @@ AS -- Version History (last 3-5 changes) VERSION_HISTORY CONSTANT VARCHAR2(4000) := 'v2.1.1 (2025-12-04): Fixed JOIN column reference A_WORKFLOW_HISTORY_KEY -> A_ETL_LOAD_SET_KEY, added consistent column mapping and dynamic column list to EXPORT_TABLE_DATA procedure, enhanced DEBUG logging for all export operations' || CHR(10) || - 'v2.1.1 (2025-12-04): Fixed JOIN column reference A_WORKFLOW_HISTORY_KEY -> A_ETL_LOAD_SET_KEY' || CHR(10) || 'v2.1.0 (2025-10-22): Added version tracking and PARTITION_YEAR/PARTITION_MONTH support' || CHR(10) || 'v2.0.0 (2025-10-01): Separated export functionality from FILE_MANAGER package' || CHR(10) || 'v1.0.0 (2025-09-15): Initial implementation within FILE_MANAGER package' || CHR(10); diff --git a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/01_MARS_826_export_ADHOC_ADJ_tables.sql b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/01_MARS_826_export_ADHOC_ADJ_tables.sql index 6a447c8..81183c5 100644 --- a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/01_MARS_826_export_ADHOC_ADJ_tables.sql +++ b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/01_MARS_826_export_ADHOC_ADJ_tables.sql @@ -23,7 +23,8 @@ BEGIN pTableName => 'LEGACY_ADHOC_ADJ_HEADER', pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_ADHOC_ADJUSTMENTS_HEADER' + pFolderName => 'ARCHIVE/LM/LM_ADHOC_ADJUSTMENTS_HEADER', + pParallelDegree => 1 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_ADHOC_ADJ_HEADER exported'); EXCEPTION @@ -42,7 +43,8 @@ BEGIN pTableName => 'LEGACY_ADHOC_ADJ_ITEM', pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_ADHOC_ADJUSTMENTS_ITEM' + pFolderName => 'ARCHIVE/LM/LM_ADHOC_ADJUSTMENTS_ITEM', + pParallelDegree => 1 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_ADHOC_ADJ_ITEM exported'); EXCEPTION @@ -61,7 +63,8 @@ BEGIN pTableName => 'LEGACY_ADHOC_ADJ_ITEM_HEADER', pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_ADHOC_ADJUSTMENTS_ITEM_HEADER' + pFolderName => 'ARCHIVE/LM/LM_ADHOC_ADJUSTMENTS_ITEM_HEADER', + pParallelDegree => 1 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_ADHOC_ADJ_ITEM_HEADER exported'); EXCEPTION diff --git a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/02_MARS_826_export_BALANCESHEET_tables.sql b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/02_MARS_826_export_BALANCESHEET_tables.sql index 7269c80..bd9a561 100644 --- a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/02_MARS_826_export_BALANCESHEET_tables.sql +++ b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/02_MARS_826_export_BALANCESHEET_tables.sql @@ -28,7 +28,8 @@ BEGIN pTableName => 'LEGACY_BALANCESHEET_HEADER', pKeyColumnName => 'A_ETL_LOAD_SET_KEY', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_BALANCESHEET_HEADER' + pFolderName => 'ARCHIVE/LM/LM_BALANCESHEET_HEADER', + pParallelDegree => 4 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_BALANCESHEET_HEADER exported'); EXCEPTION @@ -47,7 +48,8 @@ BEGIN pTableName => 'LEGACY_BALANCESHEET_ITEM', pKeyColumnName => 'A_ETL_LOAD_SET_KEY', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_BALANCESHEET_ITEM' + pFolderName => 'ARCHIVE/LM/LM_BALANCESHEET_ITEM', + pParallelDegree => 16 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_BALANCESHEET_ITEM exported'); EXCEPTION diff --git a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/03_MARS_826_export_CSM_ADJ_tables.sql b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/03_MARS_826_export_CSM_ADJ_tables.sql index 81c0321..8a9453d 100644 --- a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/03_MARS_826_export_CSM_ADJ_tables.sql +++ b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/03_MARS_826_export_CSM_ADJ_tables.sql @@ -23,7 +23,8 @@ BEGIN pTableName => 'LEGACY_CSM_ADJ_HEADER', pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_CSM_ADJUSTMENTS_HEADER' + pFolderName => 'ARCHIVE/LM/LM_CSM_ADJUSTMENTS_HEADER', + pParallelDegree => 1 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_CSM_ADJ_HEADER exported'); EXCEPTION @@ -42,7 +43,8 @@ BEGIN pTableName => 'LEGACY_CSM_ADJ_ITEM', pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_CSM_ADJUSTMENTS_ITEM' + pFolderName => 'ARCHIVE/LM/LM_CSM_ADJUSTMENTS_ITEM', + pParallelDegree => 2 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_CSM_ADJ_ITEM exported'); EXCEPTION @@ -61,7 +63,8 @@ BEGIN pTableName => 'LEGACY_CSM_ADJ_ITEM_HEADER', pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_CSM_ADJUSTMENTS_ITEM_HEADER' + pFolderName => 'ARCHIVE/LM/LM_CSM_ADJUSTMENTS_ITEM_HEADER', + pParallelDegree => 2 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_CSM_ADJ_ITEM_HEADER exported'); EXCEPTION diff --git a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/04_MARS_826_export_STANDING_FACILITY_tables.sql b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/04_MARS_826_export_STANDING_FACILITY_tables.sql index cd20dde..f1931c5 100644 --- a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/04_MARS_826_export_STANDING_FACILITY_tables.sql +++ b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/04_MARS_826_export_STANDING_FACILITY_tables.sql @@ -28,7 +28,8 @@ BEGIN pTableName => 'LEGACY_STANDING_FACILITY', pKeyColumnName => 'A_ETL_LOAD_SET_FK', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_STANDING_FACILITIES' + pFolderName => 'ARCHIVE/LM/LM_STANDING_FACILITIES', + pParallelDegree => 8 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_STANDING_FACILITY exported'); EXCEPTION @@ -47,7 +48,8 @@ BEGIN pTableName => 'LEGACY_STANDING_FACILITY_HEADER', pKeyColumnName => 'A_ETL_LOAD_SET_FK', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_STANDING_FACILITIES_HEADER' + pFolderName => 'ARCHIVE/LM/LM_STANDING_FACILITIES_HEADER', + pParallelDegree => 2 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_STANDING_FACILITY_HEADER exported'); EXCEPTION diff --git a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/05_MARS_826_export_MRR_IND_CURRENT_ACCOUNT_tables.sql b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/05_MARS_826_export_MRR_IND_CURRENT_ACCOUNT_tables.sql index 9bb569a..5ae57de 100644 --- a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/05_MARS_826_export_MRR_IND_CURRENT_ACCOUNT_tables.sql +++ b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/05_MARS_826_export_MRR_IND_CURRENT_ACCOUNT_tables.sql @@ -24,7 +24,8 @@ BEGIN pTableName => 'LEGACY_MRR_IND_CURRENT_ACCOUNT_HEADER', pKeyColumnName => 'A_ETL_LOAD_SET_KEY', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_CURRENT_ACCOUNTS_HEADER' + pFolderName => 'ARCHIVE/LM/LM_CURRENT_ACCOUNTS_HEADER', + pParallelDegree => 2 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_MRR_IND_CURRENT_ACCOUNT_HEADER exported'); EXCEPTION @@ -43,7 +44,8 @@ BEGIN pTableName => 'LEGACY_MRR_IND_CURRENT_ACCOUNT_ITEM', pKeyColumnName => 'A_ETL_LOAD_SET_KEY', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_CURRENT_ACCOUNTS_ITEM' + pFolderName => 'ARCHIVE/LM/LM_CURRENT_ACCOUNTS_ITEM', + pParallelDegree => 16 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_MRR_IND_CURRENT_ACCOUNT_ITEM exported'); EXCEPTION diff --git a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/06_MARS_826_export_FORECAST_tables.sql b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/06_MARS_826_export_FORECAST_tables.sql index 2c3c791..3cd4a8d 100644 --- a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/06_MARS_826_export_FORECAST_tables.sql +++ b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/06_MARS_826_export_FORECAST_tables.sql @@ -28,7 +28,8 @@ BEGIN pTableName => 'LEGACY_FORECAST_HEADER', pKeyColumnName => 'A_ETL_LOAD_SET_FK', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_FORECAST_HEADER' + pFolderName => 'ARCHIVE/LM/LM_FORECAST_HEADER', + pParallelDegree => 4 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_FORECAST_HEADER exported'); EXCEPTION @@ -47,7 +48,8 @@ BEGIN pTableName => 'LEGACY_FORECAST_ITEM', pKeyColumnName => 'A_ETL_LOAD_SET_FK', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_FORECAST_ITEM' + pFolderName => 'ARCHIVE/LM/LM_FORECAST_ITEM', + pParallelDegree => 16 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_FORECAST_ITEM exported'); EXCEPTION diff --git a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/07_MARS_826_export_QR_ADJ_tables.sql b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/07_MARS_826_export_QR_ADJ_tables.sql index e07db41..1767964 100644 --- a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/07_MARS_826_export_QR_ADJ_tables.sql +++ b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/07_MARS_826_export_QR_ADJ_tables.sql @@ -23,7 +23,8 @@ BEGIN pTableName => 'LEGACY_QR_ADJ_HEADER', pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_QRE_ADJUSTMENTS_HEADER' + pFolderName => 'ARCHIVE/LM/LM_QRE_ADJUSTMENTS_HEADER', + pParallelDegree => 1 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_QR_ADJ_HEADER exported'); EXCEPTION @@ -42,7 +43,8 @@ BEGIN pTableName => 'LEGACY_QR_ADJ_ITEM', pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_QRE_ADJUSTMENTS_ITEM' + pFolderName => 'ARCHIVE/LM/LM_QRE_ADJUSTMENTS_ITEM', + pParallelDegree => 4 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_QR_ADJ_ITEM exported'); EXCEPTION @@ -61,7 +63,8 @@ BEGIN pTableName => 'LEGACY_QR_ADJ_ITEM_HEADER', pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_QRE_ADJUSTMENTS_ITEM_HEADER' + pFolderName => 'ARCHIVE/LM/LM_QRE_ADJUSTMENTS_ITEM_HEADER', + pParallelDegree => 2 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_QR_ADJ_ITEM_HEADER exported'); EXCEPTION diff --git a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/08_MARS_826_export_TTS_tables.sql b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/08_MARS_826_export_TTS_tables.sql index d954fae..6dfc97f 100644 --- a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/08_MARS_826_export_TTS_tables.sql +++ b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-826/08_MARS_826_export_TTS_tables.sql @@ -23,7 +23,8 @@ BEGIN pTableName => 'LEGACY_TTS_HEADER', pKeyColumnName => 'A_ETL_LOAD_SET_FK', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_TTS_HEADER' + pFolderName => 'ARCHIVE/LM/LM_TTS_HEADER', + pParallelDegree => 1 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_TTS_HEADER exported'); EXCEPTION @@ -42,7 +43,8 @@ BEGIN pTableName => 'LEGACY_TTS_ITEM', pKeyColumnName => 'A_ETL_LOAD_SET_FK', pBucketArea => 'ARCHIVE', - pFolderName => 'ARCHIVE/LM/LM_TTS_ITEM' + pFolderName => 'ARCHIVE/LM/LM_TTS_ITEM', + pParallelDegree => 1 ); DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_TTS_ITEM exported'); EXCEPTION diff --git a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/README.md b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/README.md deleted file mode 100644 index 9822832..0000000 --- a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/README.md +++ /dev/null @@ -1,82 +0,0 @@ -# MARS-835-PREHOOK: Parallel Processing for DATA_EXPORTER - -## Overview -Implements parallel partition processing for DATA_EXPORTER package using **DBMS_PARALLEL_EXECUTE** framework. - -## Changes Summary - -### ENV_MANAGER v3.1.0 → v3.2.0 -- Added `CODE_INVALID_PARALLEL_DEGREE` (-20110) error code -- Added `CODE_PARALLEL_EXECUTION_FAILED` (-20111) error code -- Added corresponding message constants and exception declarations - -### DATA_EXPORTER v2.2.0 → v2.3.0 -- Added `pParallelDegree` parameter to `EXPORT_TABLE_DATA_BY_DATE` (default: 1, range: 1-16) -- Added `pParallelDegree` parameter to `EXPORT_TABLE_DATA_TO_CSV_BY_DATE` (default: 1, range: 1-16) -- Implemented `EXPORT_PARTITION_PARALLEL` callback procedure for DBMS_PARALLEL_EXECUTE -- Created global temporary table `A_PARALLEL_EXPORT_CHUNKS` for chunk management -- Sequential processing when `pParallelDegree = 1` (default - safest option) -- Parallel processing via DBMS_PARALLEL_EXECUTE when `pParallelDegree > 1` -- Automatic error detection and reporting through `USER_PARALLEL_EXECUTE_CHUNKS` - -## Installation - -### Prerequisites -- Oracle Database 23ai or higher (DBMS_PARALLEL_EXECUTE support) -- ADMIN privileges for table creation -- CT_MRDS schema for package deployment - -### Installation Command -```powershell -cd .\MARS_Packages\REL01_POST_DEACTIVATION\MARS-835-PREHOOK -echo "YES" | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars835_prehook.sql" -``` - -## Usage Examples - -### Parallel Export (8 threads) -```sql -BEGIN - CT_MRDS.DATA_EXPORTER.EXPORT_TABLE_DATA_BY_DATE( - pSchemaName => 'OU_TOP', - pTableName => 'AGGREGATED_ALLOTMENT', - pKeyColumnName => 'A_WORKFLOW_HISTORY_KEY', - pBucketArea => 'ARCHIVE', - pFolderName => 'parallel_export', - pMinDate => DATE '2020-01-01', - pMaxDate => SYSDATE, - pParallelDegree => 8 - ); -END; -/ -``` - -### Sequential Export -```sql -BEGIN - CT_MRDS.DATA_EXPORTER.EXPORT_TABLE_DATA_BY_DATE( - pSchemaName => 'OU_TOP', - pTableName => 'AGGREGATED_ALLOTMENT', - pKeyColumnName => 'A_WORKFLOW_HISTORY_KEY', - pBucketArea => 'DATA', - pFolderName => 'sequential_export', - pParallelDegree => 1 -- Sequential - ); -END; -/ -``` - -## Test Results -✅ Installation successful -✅ ENV_MANAGER v3.2.0 compiled -✅ DATA_EXPORTER v2.3.0 compiled -✅ Zero partition handling works correctly -✅ DBMS_PARALLEL_EXECUTE framework verified - -## Rollback -```powershell -sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars835_prehook.sql" -``` - -## Author -Grzegorz Michalski - 2025-12-20 diff --git a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/new_version/A_PARALLEL_EXPORT_CHUNKS.sql b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/new_version/A_PARALLEL_EXPORT_CHUNKS.sql index 2a4821a..a5514cb 100644 --- a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/new_version/A_PARALLEL_EXPORT_CHUNKS.sql +++ b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/new_version/A_PARALLEL_EXPORT_CHUNKS.sql @@ -43,6 +43,9 @@ CREATE TABLE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS ( FILE_BASE_NAME VARCHAR2(1000), TEMPLATE_TABLE_NAME VARCHAR2(200), MAX_FILE_SIZE NUMBER DEFAULT 104857600 NOT NULL, + STATUS VARCHAR2(30) DEFAULT 'PENDING' NOT NULL, + ERROR_MESSAGE VARCHAR2(4000), + EXPORT_TIMESTAMP TIMESTAMP, CREATED_DATE TIMESTAMP DEFAULT SYSTIMESTAMP NOT NULL ); @@ -66,4 +69,7 @@ COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.FORMAT_TYPE IS 'Export format COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.FILE_BASE_NAME IS 'Base filename for CSV exports (NULL for Parquet)'; COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.TEMPLATE_TABLE_NAME IS 'Template table name for per-column date format configuration (e.g., CT_ET_TEMPLATES.TABLE_NAME)'; COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.MAX_FILE_SIZE IS 'Maximum file size in bytes for CSV exports only (e.g., 104857600 = 100MB, 1073741824 = 1GB) - default 100MB (104857600). NOTE: Not applicable for PARQUET format (Oracle limitation)'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.STATUS IS 'Chunk processing status: PENDING (not started), PROCESSING (in progress), COMPLETED (success), FAILED (error) - allows retry of failed partitions only'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.ERROR_MESSAGE IS 'Error message if chunk processing failed (STATUS = FAILED)'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.EXPORT_TIMESTAMP IS 'Timestamp when chunk export was completed (STATUS = COMPLETED)'; COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.CREATED_DATE IS 'Timestamp when chunk was created'; diff --git a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkb b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkb index 6a37469..6734d50 100644 --- a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkb +++ b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkb @@ -17,6 +17,39 @@ AS ---------------------------------------------------------------------------------------------------- + /** + * Deletes export file from OCI bucket if it exists (used for cleanup before retry) + * Silently ignores if file doesn't exist (ORA-20404) + **/ + PROCEDURE DELETE_FAILED_EXPORT_FILE( + pFileUri IN VARCHAR2, + pCredentialName IN VARCHAR2, + pParameters IN VARCHAR2 + ) IS + BEGIN + BEGIN + ENV_MANAGER.LOG_PROCESS_EVENT('Attempting to delete potentially corrupted file: ' || pFileUri, 'DEBUG', pParameters); + + DBMS_CLOUD.DELETE_OBJECT( + credential_name => pCredentialName, + object_uri => pFileUri + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('Deleted existing file (cleanup before retry): ' || pFileUri, 'INFO', pParameters); + EXCEPTION + WHEN OTHERS THEN + -- Object not found is OK (file doesn't exist) + IF SQLCODE = -20404 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('File does not exist (OK): ' || pFileUri, 'DEBUG', pParameters); + ELSE + -- Log but don't fail - export will attempt anyway + ENV_MANAGER.LOG_PROCESS_EVENT('Warning: Could not delete file (will retry export anyway): ' || SQLERRM, 'WARNING', pParameters); + END IF; + END; + END DELETE_FAILED_EXPORT_FILE; + + ---------------------------------------------------------------------------------------------------- + /** * Builds query with TO_CHAR for date/timestamp columns using per-column formats * Retrieves format for each date column from FILE_MANAGER.GET_DATE_FORMAT @@ -394,6 +427,10 @@ AS ENV_MANAGER.LOG_PROCESS_EVENT('Parquet export URI: ' || vUri, 'DEBUG', pParameters); + -- Delete potentially corrupted file from previous failed attempt + -- This prevents Oracle from creating _1 suffixed files on retry + DELETE_FAILED_EXPORT_FILE(vUri, pCredentialName, pParameters); + DBMS_CLOUD.EXPORT_DATA( credential_name => pCredentialName, file_uri_list => vUri, @@ -409,6 +446,10 @@ AS ENV_MANAGER.LOG_PROCESS_EVENT('CSV export URI: ' || vUri, 'DEBUG', pParameters); + -- Delete potentially corrupted file from previous failed attempt + -- This prevents Oracle from creating _1 suffixed files on retry + DELETE_FAILED_EXPORT_FILE(vUri, pCredentialName, pParameters); + -- Use json_object() for CSV export with maxfilesize in bytes (Oracle requirement) -- Oracle maxfilesize: min 10MB (10485760), max 1GB (1073741824), default 10MB -- NOTE: maxfilesize must be NUMBER (bytes), not string like '1000M' @@ -499,6 +540,13 @@ AS vParameters := 'Parallel task - Year: ' || vYear || ', Month: ' || vMonth || ', ChunkID: ' || pStartId; ENV_MANAGER.LOG_PROCESS_EVENT('Starting parallel export for partition ' || vYear || '/' || vMonth, 'DEBUG', vParameters); + -- Mark chunk as PROCESSING + UPDATE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + SET STATUS = 'PROCESSING', + ERROR_MESSAGE = NULL + WHERE CHUNK_ID = pStartId; + COMMIT; + -- Call the worker procedure EXPORT_SINGLE_PARTITION( pSchemaName => vSchemaName, @@ -518,11 +566,29 @@ AS pParameters => vParameters ); + -- Mark chunk as COMPLETED + UPDATE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + SET STATUS = 'COMPLETED', + EXPORT_TIMESTAMP = SYSTIMESTAMP, + ERROR_MESSAGE = NULL + WHERE CHUNK_ID = pStartId; + COMMIT; + ENV_MANAGER.LOG_PROCESS_EVENT('Completed parallel export for partition ' || vYear || '/' || vMonth, 'DEBUG', vParameters); EXCEPTION WHEN OTHERS THEN + -- Capture error details in variable (SQLERRM cannot be used directly in SQL) vgMsgTmp := 'Parallel task error for partition ' || vYear || '/' || vMonth || ' (ChunkID: ' || pStartId || '): ' || SQLERRM || cgBL || DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + + -- Mark chunk as FAILED with error message + -- Use vgMsgTmp variable instead of SQLERRM directly (Oracle limitation in SQL context) + UPDATE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + SET STATUS = 'FAILED', + ERROR_MESSAGE = SUBSTR(vgMsgTmp, 1, 4000) + WHERE CHUNK_ID = pStartId; + COMMIT; + RAISE; END EXPORT_PARTITION_PARALLEL; @@ -798,23 +864,50 @@ AS BEGIN ENV_MANAGER.LOG_PROCESS_EVENT('Using parallel processing with ' || pParallelDegree || ' threads', 'INFO', vParameters); - -- Clear any existing chunks from previous runs (TRUNCATE to avoid PK violations) - EXECUTE IMMEDIATE 'TRUNCATE TABLE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS'; + -- Clean up old completed chunks (>24 hours) to prevent table bloat + -- CRITICAL: Do NOT delete chunks from other active sessions (same-day tasks) + -- This prevents race conditions when multiple exports run simultaneously + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + WHERE STATUS = 'COMPLETED' + AND CREATED_DATE < SYSTIMESTAMP - INTERVAL '1' DAY; + COMMIT; - -- Populate chunks table + ENV_MANAGER.LOG_PROCESS_EVENT('Cleared old COMPLETED chunks (>24h). Active session chunks preserved.', 'DEBUG', vParameters); + -- This prevents re-exporting successfully completed partitions + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'COMPLETED'; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Cleared COMPLETED chunks. FAILED chunks retained for retry.', 'DEBUG', vParameters); + + -- Populate chunks table (insert new chunks, preserve FAILED chunks for retry) FOR i IN 1 .. vPartitions.COUNT LOOP - INSERT INTO CT_MRDS.A_PARALLEL_EXPORT_CHUNKS ( - CHUNK_ID, TASK_NAME, YEAR_VALUE, MONTH_VALUE, SCHEMA_NAME, TABLE_NAME, KEY_COLUMN_NAME, - BUCKET_URI, FOLDER_NAME, PROCESSED_COLUMNS, MIN_DATE, MAX_DATE, - CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE - ) VALUES ( - i, vTaskName, vPartitions(i).year, vPartitions(i).month, vSchemaName, vTableName, vKeyColumnName, - vBucketUri, pFolderName, vProcessedColumnList, pMinDate, pMaxDate, - pCredentialName, 'PARQUET', NULL, pTemplateTableName, 104857600 - ); + MERGE INTO CT_MRDS.A_PARALLEL_EXPORT_CHUNKS t + USING (SELECT i AS chunk_id, vPartitions(i).year AS yr, vPartitions(i).month AS mn FROM DUAL) s + ON (t.CHUNK_ID = s.chunk_id) + WHEN NOT MATCHED THEN + INSERT (CHUNK_ID, TASK_NAME, YEAR_VALUE, MONTH_VALUE, SCHEMA_NAME, TABLE_NAME, KEY_COLUMN_NAME, + BUCKET_URI, FOLDER_NAME, PROCESSED_COLUMNS, MIN_DATE, MAX_DATE, + CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE, STATUS) + VALUES (i, vTaskName, vPartitions(i).year, vPartitions(i).month, vSchemaName, vTableName, vKeyColumnName, + vBucketUri, pFolderName, vProcessedColumnList, pMinDate, pMaxDate, + pCredentialName, 'PARQUET', NULL, pTemplateTableName, 104857600, 'PENDING') + WHEN MATCHED THEN + UPDATE SET TASK_NAME = vTaskName, + STATUS = CASE WHEN t.STATUS = 'FAILED' THEN 'PENDING' ELSE t.STATUS END, + ERROR_MESSAGE = CASE WHEN t.STATUS = 'FAILED' THEN NULL ELSE t.ERROR_MESSAGE END; END LOOP; + COMMIT; - ENV_MANAGER.LOG_PROCESS_EVENT('Populated ' || vPartitions.COUNT || ' chunks for parallel export', 'DEBUG', vParameters); + -- Log chunk statistics + DECLARE + vPendingCount NUMBER; + vFailedCount NUMBER; + BEGIN + SELECT COUNT(*) INTO vPendingCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'PENDING'; + SELECT COUNT(*) INTO vFailedCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'FAILED'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Chunk statistics: PENDING=' || vPendingCount || ', FAILED (retry)=' || vFailedCount, 'INFO', vParameters); + END; -- Create parallel task DBMS_PARALLEL_EXECUTE.CREATE_TASK(task_name => vTaskName); @@ -856,7 +949,8 @@ AS -- Clean up task DBMS_PARALLEL_EXECUTE.DROP_TASK(task_name => vTaskName); - -- Clean up chunks for this task + -- Clean up chunks for THIS specific task only (session-safe) + -- CRITICAL: Use TASK_NAME filter to avoid deleting chunks from other active sessions DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE TASK_NAME = vTaskName; COMMIT; @@ -1055,23 +1149,45 @@ AS BEGIN ENV_MANAGER.LOG_PROCESS_EVENT('Using parallel processing with ' || pParallelDegree || ' threads', 'INFO', vParameters); - -- Clear any existing chunks from previous runs (TRUNCATE to avoid PK violations) - EXECUTE IMMEDIATE 'TRUNCATE TABLE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS'; + -- Clean up old completed chunks (>24 hours) to prevent table bloat + -- CRITICAL: Do NOT delete chunks from other active sessions (same-day tasks) + -- This prevents race conditions when multiple CSV exports run simultaneously + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + WHERE STATUS = 'COMPLETED' + AND CREATED_DATE < SYSTIMESTAMP - INTERVAL '1' DAY; + COMMIT; - -- Populate chunks table + ENV_MANAGER.LOG_PROCESS_EVENT('Cleared old COMPLETED chunks (>24h). Active session chunks preserved.', 'DEBUG', vParameters); + + -- Populate chunks table (insert new chunks, preserve FAILED chunks for retry) FOR i IN 1 .. vPartitions.COUNT LOOP - INSERT INTO CT_MRDS.A_PARALLEL_EXPORT_CHUNKS ( - CHUNK_ID, TASK_NAME, YEAR_VALUE, MONTH_VALUE, SCHEMA_NAME, TABLE_NAME, KEY_COLUMN_NAME, - BUCKET_URI, FOLDER_NAME, PROCESSED_COLUMNS, MIN_DATE, MAX_DATE, - CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE - ) VALUES ( - i, vTaskName, vPartitions(i).year, vPartitions(i).month, vSchemaName, vTableName, vKeyColumnName, - vBucketUri, pFolderName, vProcessedColumnList, pMinDate, pMaxDate, - pCredentialName, 'CSV', vFileBaseName, pTemplateTableName, pMaxFileSize - ); + MERGE INTO CT_MRDS.A_PARALLEL_EXPORT_CHUNKS t + USING (SELECT i AS chunk_id, vPartitions(i).year AS yr, vPartitions(i).month AS mn FROM DUAL) s + ON (t.CHUNK_ID = s.chunk_id) + WHEN NOT MATCHED THEN + INSERT (CHUNK_ID, TASK_NAME, YEAR_VALUE, MONTH_VALUE, SCHEMA_NAME, TABLE_NAME, KEY_COLUMN_NAME, + BUCKET_URI, FOLDER_NAME, PROCESSED_COLUMNS, MIN_DATE, MAX_DATE, + CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE, STATUS) + VALUES (i, vTaskName, vPartitions(i).year, vPartitions(i).month, vSchemaName, vTableName, vKeyColumnName, + vBucketUri, pFolderName, vProcessedColumnList, pMinDate, pMaxDate, + pCredentialName, 'CSV', vFileBaseName, pTemplateTableName, pMaxFileSize, 'PENDING') + WHEN MATCHED THEN + UPDATE SET TASK_NAME = vTaskName, + STATUS = CASE WHEN t.STATUS = 'FAILED' THEN 'PENDING' ELSE t.STATUS END, + ERROR_MESSAGE = CASE WHEN t.STATUS = 'FAILED' THEN NULL ELSE t.ERROR_MESSAGE END; END LOOP; + COMMIT; - ENV_MANAGER.LOG_PROCESS_EVENT('Populated ' || vPartitions.COUNT || ' chunks for parallel CSV export', 'DEBUG', vParameters); + -- Log chunk statistics + DECLARE + vPendingCount NUMBER; + vFailedCount NUMBER; + BEGIN + SELECT COUNT(*) INTO vPendingCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'PENDING'; + SELECT COUNT(*) INTO vFailedCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'FAILED'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Chunk statistics: PENDING=' || vPendingCount || ', FAILED (retry)=' || vFailedCount, 'INFO', vParameters); + END; -- Create parallel task DBMS_PARALLEL_EXECUTE.CREATE_TASK(task_name => vTaskName); @@ -1113,7 +1229,8 @@ AS -- Clean up task DBMS_PARALLEL_EXECUTE.DROP_TASK(task_name => vTaskName); - -- Clean up chunks for this task + -- Clean up chunks for THIS specific task only (session-safe) + -- CRITICAL: Use TASK_NAME filter to avoid deleting chunks from other active CSV sessions DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE TASK_NAME = vTaskName; COMMIT; diff --git a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkg b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkg index 8ce3624..1209d84 100644 --- a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkg +++ b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkg @@ -9,12 +9,16 @@ AS **/ -- Package Version Information - PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.5.0'; - PACKAGE_BUILD_DATE CONSTANT VARCHAR2(19) := '2026-01-26 13:30:00'; + PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.6.3'; + PACKAGE_BUILD_DATE CONSTANT VARCHAR2(19) := '2026-01-28 19:30:00'; PACKAGE_AUTHOR CONSTANT VARCHAR2(50) := 'MRDS Development Team'; -- Version History (last 3-5 changes) VERSION_HISTORY CONSTANT VARCHAR2(4000) := + 'v2.6.3 (2026-01-28): COMPILATION FIX - Resolved ORA-00904 error in EXPORT_PARTITION_PARALLEL. SQLERRM and DBMS_UTILITY.FORMAT_ERROR_BACKTRACE cannot be used directly in SQL UPDATE statements. Now properly assigned to vgMsgTmp variable before UPDATE.' || CHR(10) || + 'v2.6.2 (2026-01-28): CRITICAL FIX - Race condition when multiple exports run simultaneously. Changed DELETE to filter by age (>24h) instead of deleting all COMPLETED chunks. Prevents concurrent sessions from deleting each other chunks. Session-safe cleanup with TASK_NAME filtering. Enables true parallel execution of multiple export jobs.' || CHR(10) || + 'v2.6.1 (2026-01-28): Added DELETE_FAILED_EXPORT_FILE procedure to clean up partial/corrupted files before retry. When partition fails mid-export, partial file is deleted before retry to prevent Oracle from creating _1 suffixed duplicates. Ensures clean retry without orphaned files in OCI bucket.' || CHR(10) || + 'v2.6.0 (2026-01-28): CRITICAL FIX - Added STATUS tracking to A_PARALLEL_EXPORT_CHUNKS table to prevent data duplication on retry. System now restarts ONLY failed partitions instead of re-exporting all data. Added ERROR_MESSAGE and EXPORT_TIMESTAMP columns for better error handling and monitoring. Prevents duplicate file creation when parallel tasks fail (e.g., 22 partitions with 16 threads, 3 failures no longer duplicates 19 successful exports).' || CHR(10) || 'v2.5.0 (2026-01-26): Added recorddelimiter parameter with CRLF (CHR(13)||CHR(10)) for CSV exports to ensure Windows-compatible line endings. Improves cross-platform compatibility when CSV files are opened in Windows applications (Notepad, Excel).' || CHR(10) || 'v2.4.0 (2026-01-11): Added pTemplateTableName parameter for per-column date format configuration. Implements dynamic query building with TO_CHAR for each date/timestamp column using FILE_MANAGER.GET_DATE_FORMAT. Supports 3-tier hierarchy: column-specific, template DEFAULT, global fallback. Eliminates single dateformat limitation of DBMS_CLOUD.EXPORT_DATA.' || CHR(10) || 'v2.3.0 (2025-12-20): Added parallel partition processing using DBMS_PARALLEL_EXECUTE. New pParallelDegree parameter (1-16, default 1) for EXPORT_TABLE_DATA_BY_DATE and EXPORT_TABLE_DATA_TO_CSV_BY_DATE procedures. Each year/month partition processed in separate thread for improved performance.' || CHR(10) || diff --git a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/rollback_version/v2.5.0/DATA_EXPORTER.pkb b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/rollback_version/v2.5.0/DATA_EXPORTER.pkb new file mode 100644 index 0000000..6a37469 --- /dev/null +++ b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/rollback_version/v2.5.0/DATA_EXPORTER.pkb @@ -0,0 +1,1196 @@ +create or replace PACKAGE BODY CT_MRDS.DATA_EXPORTER +AS + + ---------------------------------------------------------------------------------------------------- + -- PRIVATE HELPER FUNCTIONS (USED BY MULTIPLE PROCEDURES) + ---------------------------------------------------------------------------------------------------- + + /** + * Sanitizes filename by replacing disallowed characters with underscores + **/ + FUNCTION sanitizeFilename(pFilename IN VARCHAR2) RETURN VARCHAR2 IS + vFilename VARCHAR2(1000); + BEGIN + vFilename := REGEXP_REPLACE(pFilename, '[^a-zA-Z0-9._-]', '_'); + RETURN vFilename; + END sanitizeFilename; + + ---------------------------------------------------------------------------------------------------- + + /** + * Builds query with TO_CHAR for date/timestamp columns using per-column formats + * Retrieves format for each date column from FILE_MANAGER.GET_DATE_FORMAT + **/ + FUNCTION buildQueryWithDateFormats( + pColumnList IN VARCHAR2, + pTableName IN VARCHAR2, + pSchemaName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pTemplateTableName IN VARCHAR2 + ) RETURN VARCHAR2 IS + vResult VARCHAR2(32767); + vColumns VARCHAR2(32767); + vPos PLS_INTEGER; + vNextPos PLS_INTEGER; + vCurrentCol VARCHAR2(128); + vAllCols VARCHAR2(32767); + vDataType VARCHAR2(30); + vDateFormat VARCHAR2(200); + vTemplateSchema VARCHAR2(128); + vTemplateTable VARCHAR2(128); + vColExists NUMBER; + BEGIN + -- Build column list if not provided + IF pColumnList IS NULL THEN + -- Use template table for column order when provided + -- Template defines which columns to export and in what order + IF pTemplateTableName IS NOT NULL THEN + -- Parse template table name (SCHEMA.TABLE or just TABLE) + IF INSTR(pTemplateTableName, '.') > 0 THEN + vTemplateSchema := SUBSTR(pTemplateTableName, 1, INSTR(pTemplateTableName, '.') - 1); + vTemplateTable := SUBSTR(pTemplateTableName, INSTR(pTemplateTableName, '.') + 1); + ELSE + vTemplateSchema := pSchemaName; + vTemplateTable := pTemplateTableName; + END IF; + + -- Get columns from TEMPLATE table in template column order + -- Template defines target CSV structure (column order and which columns to include) + SELECT LISTAGG(column_name, ', ') WITHIN GROUP (ORDER BY column_id) + INTO vAllCols + FROM all_tab_columns + WHERE table_name = vTemplateTable + AND owner = vTemplateSchema; + ELSE + -- Get columns from source table when no template + SELECT LISTAGG(column_name, ', ') WITHIN GROUP (ORDER BY column_id) + INTO vAllCols + FROM all_tab_columns + WHERE table_name = pTableName + AND owner = pSchemaName; + END IF; + ELSE + vAllCols := pColumnList; + END IF; + + -- Process each column + vColumns := UPPER(REPLACE(vAllCols, ' ', '')); + vPos := 1; + vResult := ''; + + WHILE vPos <= LENGTH(vColumns) LOOP + vNextPos := INSTR(vColumns, ',', vPos); + IF vNextPos = 0 THEN + vNextPos := LENGTH(vColumns) + 1; + END IF; + + vCurrentCol := SUBSTR(vColumns, vPos, vNextPos - vPos); + + -- When using template table, check if column exists in SOURCE table + -- Template defines target structure, source provides data + -- Skip template columns that don't exist in source (except A_WORKFLOW_HISTORY_KEY) + IF pTemplateTableName IS NOT NULL THEN + -- Check if template column exists in SOURCE table + SELECT COUNT(*) INTO vColExists + FROM all_tab_columns + WHERE table_name = pTableName + AND column_name = vCurrentCol + AND owner = pSchemaName; + + -- Skip columns that don't exist in source table + -- Exception: A_WORKFLOW_HISTORY_KEY is virtual (mapped from pKeyColumnName) + IF vColExists = 0 AND UPPER(vCurrentCol) != 'A_WORKFLOW_HISTORY_KEY' THEN + vPos := vNextPos + 1; + CONTINUE; + END IF; + END IF; + + -- Get column data type from appropriate table (template or source) + IF pTemplateTableName IS NOT NULL THEN + -- Get data type from template table + SELECT data_type INTO vDataType + FROM all_tab_columns + WHERE table_name = vTemplateTable + AND column_name = vCurrentCol + AND owner = vTemplateSchema; + ELSE + -- Get data type from source table + SELECT data_type INTO vDataType + FROM all_tab_columns + WHERE table_name = pTableName + AND column_name = vCurrentCol + AND owner = pSchemaName; + END IF; + + -- Handle key column alias (template table has A_WORKFLOW_HISTORY_KEY, source table has pKeyColumnName) + IF UPPER(vCurrentCol) = 'A_WORKFLOW_HISTORY_KEY' THEN + vResult := vResult || CASE WHEN vResult IS NOT NULL THEN ', ' ELSE '' END || + 'T.' || pKeyColumnName || ' AS A_WORKFLOW_HISTORY_KEY'; + + -- Convert DATE/TIMESTAMP columns to CHAR with specific format + ELSIF vDataType IN ('DATE', 'TIMESTAMP', 'TIMESTAMP WITH TIME ZONE', 'TIMESTAMP WITH LOCAL TIME ZONE') THEN + IF pTemplateTableName IS NOT NULL THEN + vDateFormat := CT_MRDS.FILE_MANAGER.GET_DATE_FORMAT( + pTemplateTableName => pTemplateTableName, + pColumnName => vCurrentCol + ); + ELSE + vDateFormat := ENV_MANAGER.gvDefaultDateFormat; + END IF; + vResult := vResult || CASE WHEN vResult IS NOT NULL THEN ', ' ELSE '' END || + 'TO_CHAR(T.' || vCurrentCol || ', ''' || vDateFormat || ''') AS ' || vCurrentCol; + + -- Other columns as-is with T. prefix + ELSE + vResult := vResult || CASE WHEN vResult IS NOT NULL THEN ', ' ELSE '' END || + 'T.' || vCurrentCol; + END IF; + + vPos := vNextPos + 1; + END LOOP; + + RETURN vResult; + END buildQueryWithDateFormats; + + ---------------------------------------------------------------------------------------------------- + + -- Internal shared function to process column list with T. prefix and key column mapping + FUNCTION processColumnList(pColumnList IN VARCHAR2, pTableName IN VARCHAR2, pSchemaName IN VARCHAR2, pKeyColumnName IN VARCHAR2) RETURN VARCHAR2 IS + vResult VARCHAR2(32767); + vColumns VARCHAR2(32767); + vPos PLS_INTEGER; + vNextPos PLS_INTEGER; + vCurrentCol VARCHAR2(128); + vAllCols VARCHAR2(32767); + BEGIN + IF pColumnList IS NULL THEN + -- Build list of all columns + SELECT LISTAGG(column_name, ', ') WITHIN GROUP (ORDER BY column_id) + INTO vAllCols + FROM all_tab_columns + WHERE table_name = pTableName + AND owner = pSchemaName; + + -- Add T. prefix to all columns + vResult := 'T.' || REPLACE(vAllCols, ', ', ', T.'); + + -- Replace key column with aliased version (e.g., T.A_ETL_LOAD_SET_KEY_FK AS A_WORKFLOW_HISTORY_KEY) + vResult := REPLACE(vResult, 'T.' || pKeyColumnName, 'T.' || pKeyColumnName || ' AS A_WORKFLOW_HISTORY_KEY'); + + RETURN vResult; + END IF; + + -- Remove extra spaces and convert to uppercase + vColumns := UPPER(REPLACE(pColumnList, ' ', '')); + vPos := 1; + vResult := ''; + + -- Parse comma-separated column list and add T. prefix + WHILE vPos <= LENGTH(vColumns) LOOP + vNextPos := INSTR(vColumns, ',', vPos); + IF vNextPos = 0 THEN + vNextPos := LENGTH(vColumns) + 1; + END IF; + + vCurrentCol := SUBSTR(vColumns, vPos, vNextPos - vPos); + + -- Check if this is the key column (e.g., A_ETL_LOAD_SET_KEY_FK) and add alias + IF UPPER(vCurrentCol) = UPPER(pKeyColumnName) THEN + vCurrentCol := 'T.' || pKeyColumnName || ' AS A_WORKFLOW_HISTORY_KEY'; + ELSE + -- Add T. prefix if not already present + IF INSTR(vCurrentCol, '.') = 0 THEN + vCurrentCol := 'T.' || vCurrentCol; + END IF; + END IF; + + -- Add to result with comma separator + IF vResult IS NOT NULL THEN + vResult := vResult || ', '; + END IF; + vResult := vResult || vCurrentCol; + + vPos := vNextPos + 1; + END LOOP; + + RETURN vResult; + END processColumnList; + + ---------------------------------------------------------------------------------------------------- + + /** + * Validates table existence, key column existence, and column list + **/ + PROCEDURE VALIDATE_TABLE_AND_COLUMNS ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pColumnList IN VARCHAR2, + pParameters IN VARCHAR2 + ) IS + vCount INTEGER; + vColumns VARCHAR2(32767); + vPos PLS_INTEGER; + vNextPos PLS_INTEGER; + vCurrentCol VARCHAR2(128); + BEGIN + -- Check if table exists + SELECT COUNT(*) INTO vCount + FROM all_tables + WHERE table_name = pTableName + AND owner = pSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, ENV_MANAGER.MSG_TABLE_NOT_EXISTS); + END IF; + + -- Check if key column exists + SELECT COUNT(*) INTO vCount + FROM all_tab_columns + WHERE table_name = pTableName + AND column_name = pKeyColumnName + AND owner = pSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, ENV_MANAGER.MSG_COLUMN_NOT_EXISTS); + END IF; + + -- Validate pColumnList - check if all column names exist in the table + IF pColumnList IS NOT NULL THEN + vColumns := UPPER(REPLACE(pColumnList, ' ', '')); + vPos := 1; + + WHILE vPos <= LENGTH(vColumns) LOOP + vNextPos := INSTR(vColumns, ',', vPos); + IF vNextPos = 0 THEN + vNextPos := LENGTH(vColumns) + 1; + END IF; + + vCurrentCol := SUBSTR(vColumns, vPos, vNextPos - vPos); + + -- Remove table alias prefix if present + IF INSTR(vCurrentCol, '.') > 0 THEN + vCurrentCol := SUBSTR(vCurrentCol, INSTR(vCurrentCol, '.') + 1); + END IF; + + -- Check if column exists + SELECT COUNT(*) INTO vCount + FROM all_tab_columns + WHERE table_name = pTableName + AND column_name = vCurrentCol + AND owner = pSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, ENV_MANAGER.MSG_COLUMN_NOT_EXISTS); + END IF; + + vPos := vNextPos + 1; + END LOOP; + END IF; + END VALIDATE_TABLE_AND_COLUMNS; + + ---------------------------------------------------------------------------------------------------- + + /** + * Retrieves list of year/month partitions based on date range + **/ + FUNCTION GET_PARTITIONS ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pMinDate IN DATE, + pMaxDate IN DATE, + pParameters IN VARCHAR2 + ) RETURN partition_tab IS + vSql VARCHAR2(32000); + vPartitions partition_tab; + vKeyValuesYear DBMS_SQL.VARCHAR2_TABLE; + vKeyValuesMonth DBMS_SQL.VARCHAR2_TABLE; + vFullTableName VARCHAR2(200); + BEGIN + -- Build fully qualified table name if not already qualified + IF INSTR(pTableName, '.') > 0 THEN + vFullTableName := pTableName; -- Already fully qualified + ELSE + vFullTableName := pSchemaName || '.' || pTableName; + END IF; + + vSql := 'SELECT DISTINCT TO_CHAR(L.LOAD_START,''YYYY'') AS YR, TO_CHAR(L.LOAD_START,''MM'') AS MN + FROM ' || vFullTableName || ' T, CT_ODS.A_LOAD_HISTORY L + WHERE T.' || pKeyColumnName || ' = L.A_ETL_LOAD_SET_KEY + AND L.LOAD_START >= :pMinDate + AND L.LOAD_START < :pMaxDate + ORDER BY YR, MN'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Executing date range query: ' || vSql, 'DEBUG', pParameters); + EXECUTE IMMEDIATE vSql BULK COLLECT INTO vKeyValuesYear, vKeyValuesMonth USING pMinDate, pMaxDate; + + ENV_MANAGER.LOG_PROCESS_EVENT('Found ' || vKeyValuesYear.COUNT || ' year/month combinations to export', 'DEBUG', pParameters); + + -- Convert to partition_tab + vPartitions := partition_tab(); + vPartitions.EXTEND(vKeyValuesYear.COUNT); + FOR i IN 1 .. vKeyValuesYear.COUNT LOOP + vPartitions(i).year := vKeyValuesYear(i); + vPartitions(i).month := vKeyValuesMonth(i); + END LOOP; + + RETURN vPartitions; + END GET_PARTITIONS; + + ---------------------------------------------------------------------------------------------------- + + /** + * Exports single partition (year/month) to specified format (PARQUET or CSV) + * This is the core worker procedure that will be used for parallel processing in v2.3.0 + **/ + PROCEDURE EXPORT_SINGLE_PARTITION ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pYear IN VARCHAR2, + pMonth IN VARCHAR2, + pBucketUri IN VARCHAR2, + pFolderName IN VARCHAR2, + pProcessedColumns IN VARCHAR2, + pMinDate IN DATE, + pMaxDate IN DATE, + pCredentialName IN VARCHAR2, + pFormat IN VARCHAR2 DEFAULT 'PARQUET', + pFileBaseName IN VARCHAR2 DEFAULT NULL, + pMaxFileSize IN NUMBER DEFAULT 104857600, + pParameters IN VARCHAR2 + ) IS + vQuery VARCHAR2(32767); + vUri VARCHAR2(4000); + vFileName VARCHAR2(1000); + vFullTableName VARCHAR2(200); + BEGIN + -- Build fully qualified table name if not already qualified + IF INSTR(pTableName, '.') > 0 THEN + vFullTableName := pTableName; -- Already fully qualified + ELSE + vFullTableName := pSchemaName || '.' || pTableName; + END IF; + + -- Construct the query to extract data for the current year/month + vQuery := 'SELECT ' || pProcessedColumns || ' + FROM ' || vFullTableName || ' T, CT_ODS.A_LOAD_HISTORY L + WHERE T.' || pKeyColumnName || ' = L.A_ETL_LOAD_SET_KEY + AND TO_CHAR(L.LOAD_START,''YYYY'') = ' || CHR(39) || pYear || CHR(39) || ' + AND TO_CHAR(L.LOAD_START,''MM'') = ' || CHR(39) || pMonth || CHR(39) || ' + AND L.LOAD_START >= TO_DATE(' || CHR(39) || TO_CHAR(pMinDate, 'YYYY-MM-DD HH24:MI:SS') || CHR(39) || ', ''YYYY-MM-DD HH24:MI:SS'') + AND L.LOAD_START < TO_DATE(' || CHR(39) || TO_CHAR(pMaxDate, 'YYYY-MM-DD HH24:MI:SS') || CHR(39) || ', ''YYYY-MM-DD HH24:MI:SS'')'; + + -- Construct the URI based on format + IF pFormat = 'PARQUET' THEN + -- Parquet: Use Hive-style partitioning + -- Note: maxfilesize is NOT supported for Parquet format (Oracle limitation) + vUri := pBucketUri || + CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || + 'PARTITION_YEAR=' || sanitizeFilename(pYear) || '/' || + 'PARTITION_MONTH=' || sanitizeFilename(pMonth) || '/' || + sanitizeFilename(pYear) || sanitizeFilename(pMonth) || '.parquet'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Parquet export URI: ' || vUri, 'DEBUG', pParameters); + + DBMS_CLOUD.EXPORT_DATA( + credential_name => pCredentialName, + file_uri_list => vUri, + query => vQuery, + format => json_object('type' VALUE 'parquet') + ); + ELSIF pFormat = 'CSV' THEN + -- CSV: Flat file structure with year/month in filename + vFileName := NVL(pFileBaseName, UPPER(pTableName)) || '_' || pYear || pMonth || '.csv'; + vUri := pBucketUri || + CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || + sanitizeFilename(vFileName); + + ENV_MANAGER.LOG_PROCESS_EVENT('CSV export URI: ' || vUri, 'DEBUG', pParameters); + + -- Use json_object() for CSV export with maxfilesize in bytes (Oracle requirement) + -- Oracle maxfilesize: min 10MB (10485760), max 1GB (1073741824), default 10MB + -- NOTE: maxfilesize must be NUMBER (bytes), not string like '1000M' + -- Using 100MB (104857600) to avoid PGA memory issues with large files + DBMS_CLOUD.EXPORT_DATA( + credential_name => pCredentialName, + file_uri_list => vUri, + query => vQuery, + format => json_object( + 'type' VALUE 'CSV', + 'header' VALUE true, + 'quote' VALUE CHR(34), + 'delimiter' VALUE ',', + 'escape' VALUE true, + 'recorddelimiter' VALUE CHR(13)||CHR(10), -- CRLF dla Windows + 'maxfilesize' VALUE pMaxFileSize -- Dynamic maxfilesize in bytes (e.g., 104857600 = 100MB) + ) + ); + ELSE + RAISE_APPLICATION_ERROR(-20001, 'Unsupported format: ' || pFormat || '. Use PARQUET or CSV.'); + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('Processing Year/Month: ' || pYear || '/' || pMonth || ' (Format: ' || pFormat || ')', 'DEBUG', pParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Export query: ' || vQuery, 'DEBUG', pParameters); + END EXPORT_SINGLE_PARTITION; + + ---------------------------------------------------------------------------------------------------- + + /** + * Callback procedure for DBMS_PARALLEL_EXECUTE + * Processes single partition (year/month) chunk in parallel task + * Called by DBMS_PARALLEL_EXECUTE framework for each chunk + **/ + PROCEDURE EXPORT_PARTITION_PARALLEL ( + pStartId IN NUMBER, + pEndId IN NUMBER + ) IS + vYear VARCHAR2(4); + vMonth VARCHAR2(2); + vSchemaName VARCHAR2(128); + vTableName VARCHAR2(128); + vKeyColumnName VARCHAR2(128); + vBucketUri VARCHAR2(4000); + vFolderName VARCHAR2(1000); + vProcessedColumns VARCHAR2(32767); + vMinDate DATE; + vMaxDate DATE; + vCredentialName VARCHAR2(200); + vFormat VARCHAR2(20); + vFileBaseName VARCHAR2(1000); + vMaxFileSize NUMBER; + vParameters VARCHAR2(4000); + BEGIN + -- Retrieve chunk context from global temporary table + SELECT + YEAR_VALUE, + MONTH_VALUE, + SCHEMA_NAME, + TABLE_NAME, + KEY_COLUMN_NAME, + BUCKET_URI, + FOLDER_NAME, + PROCESSED_COLUMNS, + MIN_DATE, + MAX_DATE, + CREDENTIAL_NAME, + FORMAT_TYPE, + FILE_BASE_NAME, + MAX_FILE_SIZE + INTO + vYear, + vMonth, + vSchemaName, + vTableName, + vKeyColumnName, + vBucketUri, + vFolderName, + vProcessedColumns, + vMinDate, + vMaxDate, + vCredentialName, + vFormat, + vFileBaseName, + vMaxFileSize + FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + WHERE CHUNK_ID = pStartId; + + vParameters := 'Parallel task - Year: ' || vYear || ', Month: ' || vMonth || ', ChunkID: ' || pStartId; + ENV_MANAGER.LOG_PROCESS_EVENT('Starting parallel export for partition ' || vYear || '/' || vMonth, 'DEBUG', vParameters); + + -- Call the worker procedure + EXPORT_SINGLE_PARTITION( + pSchemaName => vSchemaName, + pTableName => vTableName, + pKeyColumnName => vKeyColumnName, + pYear => vYear, + pMonth => vMonth, + pBucketUri => vBucketUri, + pFolderName => vFolderName, + pProcessedColumns => vProcessedColumns, + pMinDate => vMinDate, + pMaxDate => vMaxDate, + pCredentialName => vCredentialName, + pFormat => vFormat, + pFileBaseName => vFileBaseName, + pMaxFileSize => vMaxFileSize, + pParameters => vParameters + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('Completed parallel export for partition ' || vYear || '/' || vMonth, 'DEBUG', vParameters); + EXCEPTION + WHEN OTHERS THEN + vgMsgTmp := 'Parallel task error for partition ' || vYear || '/' || vMonth || ' (ChunkID: ' || pStartId || '): ' || SQLERRM || cgBL || DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE; + END EXPORT_PARTITION_PARALLEL; + + ---------------------------------------------------------------------------------------------------- + -- MAIN EXPORT PROCEDURES + ---------------------------------------------------------------------------------------------------- + + PROCEDURE EXPORT_TABLE_DATA ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ) + IS + -- Type definition for key values + TYPE key_value_tab IS TABLE OF VARCHAR2(4000); + vKeyValues key_value_tab; + vCount INTEGER; + vSql VARCHAR2(4000); + vKeyValue VARCHAR2(4000); + vQuery VARCHAR2(32767); + vUri VARCHAR2(4000); + vDataType VARCHAR2(30); + vTableName VARCHAR2(128); + vSchemaName VARCHAR2(128); + vKeyColumnName VARCHAR2(128); + vParameters VARCHAR2(4000); + vBucketUri VARCHAR2(4000); + vProcessedColumnList VARCHAR2(32767); + vCurrentCol VARCHAR2(128); + vAllColumnsList VARCHAR2(32767); + + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pSchemaName => '''||nvl(pSchemaName, 'NULL')||'''' + ,'pTableName => '''||nvl(pTableName, 'NULL')||'''' + ,'pKeyColumnName => '''||nvl(pKeyColumnName, 'NULL')||'''' + ,'pBucketArea => '''||nvl(pBucketArea, 'NULL')||'''' + ,'pFolderName => '''||nvl(pFolderName, 'NULL')||'''' + ,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||'''' + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + -- Get bucket URI based on bucket area using FILE_MANAGER function + vBucketUri := FILE_MANAGER.GET_BUCKET_URI(pBucketArea); + + -- Convert table and column names to uppercase to match data dictionary + vTableName := UPPER(pTableName); + vSchemaName := UPPER(pSchemaName); + vKeyColumnName := UPPER(pKeyColumnName); + + -- Check if table exists + SELECT COUNT(*) INTO vCount + FROM all_tables + WHERE table_name = vTableName + AND owner = vSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, ENV_MANAGER.MSG_TABLE_NOT_EXISTS); + END IF; + + -- Check if key column exists + SELECT COUNT(*) INTO vCount + FROM all_tab_columns + WHERE table_name = vTableName + AND column_name = vKeyColumnName + AND owner = vSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, ENV_MANAGER.MSG_COLUMN_NOT_EXISTS); + + END IF; + + -- Get the data type of the key column + SELECT data_type INTO vDataType + FROM all_tab_columns + WHERE table_name = vTableName + AND column_name = vKeyColumnName + AND owner = vSchemaName; + + -- Build list of all columns for the table (including key column for aliasing) + SELECT LISTAGG(column_name, ', ') WITHIN GROUP (ORDER BY column_id) + INTO vAllColumnsList + FROM all_tab_columns + WHERE table_name = vTableName + AND owner = vSchemaName; + + -- Process column list to add T. prefix and alias key column as A_WORKFLOW_HISTORY_KEY + vProcessedColumnList := processColumnList(vAllColumnsList, vTableName, vSchemaName, vKeyColumnName); + + ENV_MANAGER.LOG_PROCESS_EVENT('Dynamic column list built: ' || vAllColumnsList, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Processed column list with T. prefix: ' || vProcessedColumnList, 'DEBUG', vParameters); + + vTableName := DBMS_ASSERT.SCHEMA_NAME(vSchemaName) || '.' || DBMS_ASSERT.simple_sql_name(vTableName); + -- Fetch unique key values from A_LOAD_HISTORY + vSql := 'SELECT DISTINCT L.A_ETL_LOAD_SET_KEY' || + ' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' || + ' WHERE T.' || DBMS_ASSERT.simple_sql_name(vKeyColumnName) || ' = L.A_ETL_LOAD_SET_KEY'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Executing key values query: ' || vSql, 'DEBUG', vParameters); + EXECUTE IMMEDIATE vSql BULK COLLECT INTO vKeyValues; + ENV_MANAGER.LOG_PROCESS_EVENT('Found ' || vKeyValues.COUNT || ' unique key values to process', 'DEBUG', vParameters); + + -- Loop over each unique key value + FOR i IN 1 .. vKeyValues.COUNT LOOP + vKeyValue := vKeyValues(i); + + -- Construct the query to extract data for the current key value with A_WORKFLOW_HISTORY_KEY mapping + IF vDataType IN ('VARCHAR2', 'CHAR', 'NCHAR', 'NVARCHAR2') THEN + vQuery := 'SELECT ' || vProcessedColumnList || + ' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' || + ' WHERE T.' || DBMS_ASSERT.simple_sql_name(vKeyColumnName) || ' = L.A_ETL_LOAD_SET_KEY' || + ' AND L.A_ETL_LOAD_SET_KEY = ' || CHR(39) || vKeyValue || CHR(39); + ELSIF vDataType IN ('NUMBER', 'FLOAT', 'BINARY_FLOAT', 'BINARY_DOUBLE') THEN + vQuery := 'SELECT ' || vProcessedColumnList || + ' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' || + ' WHERE T.' || DBMS_ASSERT.simple_sql_name(vKeyColumnName) || ' = L.A_ETL_LOAD_SET_KEY' || + ' AND L.A_ETL_LOAD_SET_KEY = ' || vKeyValue; + ELSIF vDataType LIKE 'TIMESTAMP%' OR vDataType = 'DATE' THEN + vQuery := 'SELECT ' || vProcessedColumnList || + ' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' || + ' WHERE T.' || DBMS_ASSERT.simple_sql_name(vKeyColumnName) || ' = L.A_ETL_LOAD_SET_KEY' || + ' AND L.A_ETL_LOAD_SET_KEY = TO_TIMESTAMP(' || CHR(39) || vKeyValue || CHR(39) ||', ''YYYY-MM-DD HH24:MI:SS.FF'')'; + ELSE + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNSUPPORTED_DATA_TYPE, ENV_MANAGER.MSG_UNSUPPORTED_DATA_TYPE); + END IF; + + -- Construct the URI for the file in OCI Object Storage + vUri := vBucketUri || + CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || + sanitizeFilename(vKeyValue) || '.csv'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Processing key value: ' || vKeyValue || ' (' || (i) || '/' || vKeyValues.COUNT || ')', 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Export query: ' || vQuery, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Export URI: ' || vUri, 'DEBUG', vParameters); + + -- Use DBMS_CLOUD package to export data to the URI + DBMS_CLOUD.EXPORT_DATA( + credential_name => pCredentialName, + file_uri_list => vUri, + query => vQuery, + format => json_object('type' VALUE 'CSV', 'header' VALUE true) + ); + END LOOP; + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + EXCEPTION + WHEN ENV_MANAGER.ERR_TABLE_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_TABLE_NOT_EXISTS ||': '||vTableName; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_COLUMN_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_COLUMN_NOT_EXISTS || ' (TableName.ColumnName): ' || vTableName||'.'||vKeyColumnName||CASE WHEN vCurrentCol IS NOT NULL THEN '.'||vCurrentCol||' in column list' ELSE '' END; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_UNSUPPORTED_DATA_TYPE THEN + vgMsgTmp := ENV_MANAGER.MSG_UNSUPPORTED_DATA_TYPE || ' vDataType: '||vDataType; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNSUPPORTED_DATA_TYPE, vgMsgTmp); + WHEN OTHERS THEN + -- Log complete error details including full stack trace and backtrace + ENV_MANAGER.LOG_PROCESS_ERROR('Export failed: ' || SQLERRM, vParameters, 'DATA_EXPORTER'); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END EXPORT_TABLE_DATA; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE EXPORT_TABLE_DATA_BY_DATE ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pColumnList IN VARCHAR2 default NULL, + pMinDate IN DATE default DATE '1900-01-01', + pMaxDate IN DATE default SYSDATE, + pParallelDegree IN NUMBER default 1, + pTemplateTableName IN VARCHAR2 default NULL, + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ) + IS + vTableName VARCHAR2(128); + vSchemaName VARCHAR2(128); + vKeyColumnName VARCHAR2(128); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vProcessedColumnList VARCHAR2(32767); + vBucketUri VARCHAR2(4000); + vCurrentCol VARCHAR2(128); + vPartitions partition_tab; + + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pSchemaName => '''||nvl(pSchemaName, 'NULL')||'''' + ,'pTableName => '''||nvl(pTableName, 'NULL')||'''' + ,'pKeyColumnName => '''||nvl(pKeyColumnName, 'NULL')||'''' + ,'pBucketArea => '''||nvl(pBucketArea, 'NULL')||'''' + ,'pFolderName => '''||nvl(pFolderName, 'NULL')||'''' + ,'pColumnList => '''||nvl(pColumnList, 'NULL')||'''' + ,'pMinDate => '''||nvl(TO_CHAR(pMinDate, 'YYYY-MM-DD HH24:MI:SS'), 'NULL')||'''' + ,'pMaxDate => '''||nvl(TO_CHAR(pMaxDate, 'YYYY-MM-DD HH24:MI:SS'), 'NULL')||'''' + ,'pParallelDegree => '''||nvl(TO_CHAR(pParallelDegree), 'NULL')||'''' + ,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||'''' + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + -- Get bucket URI based on bucket area using FILE_MANAGER function + vBucketUri := FILE_MANAGER.GET_BUCKET_URI(pBucketArea); + + -- Convert table and column names to uppercase to match data dictionary + vTableName := UPPER(pTableName); + vSchemaName := UPPER(pSchemaName); + vKeyColumnName := UPPER(pKeyColumnName); + + -- Validate table, key column, and column list using shared procedure + VALIDATE_TABLE_AND_COLUMNS(vSchemaName, vTableName, vKeyColumnName, pColumnList, vParameters); + + -- Build query with TO_CHAR for date columns (per-column format support) + vProcessedColumnList := buildQueryWithDateFormats(pColumnList, vTableName, vSchemaName, vKeyColumnName, pTemplateTableName); + + ENV_MANAGER.LOG_PROCESS_EVENT('Input column list: ' || NVL(pColumnList, 'NULL (building dynamic list from table metadata)'), 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Processed column list with TO_CHAR for date columns: ' || vProcessedColumnList, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Template table: ' || NVL(pTemplateTableName, 'NULL - using global default for all dates'), 'INFO', vParameters); + + vTableName := DBMS_ASSERT.SCHEMA_NAME(vSchemaName) || '.' || DBMS_ASSERT.simple_sql_name(vTableName); + + -- Validate parallel degree parameter + IF pParallelDegree < 1 OR pParallelDegree > 16 THEN + vgMsgTmp := ENV_MANAGER.MSG_INVALID_PARALLEL_DEGREE || ': ' || pParallelDegree || '. Valid range: 1-16'; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_INVALID_PARALLEL_DEGREE, vgMsgTmp); + END IF; + + -- Get partitions using shared function + vPartitions := GET_PARTITIONS(vSchemaName, vTableName, vKeyColumnName, pMinDate, pMaxDate, vParameters); + + ENV_MANAGER.LOG_PROCESS_EVENT('Found ' || vPartitions.COUNT || ' partitions to export with parallel degree ' || pParallelDegree, 'INFO', vParameters); + + -- Sequential processing (parallel degree = 1) + IF pParallelDegree = 1 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Using sequential processing (pParallelDegree = 1)', 'DEBUG', vParameters); + + FOR i IN 1 .. vPartitions.COUNT LOOP + EXPORT_SINGLE_PARTITION( + pSchemaName => vSchemaName, + pTableName => vTableName, + pKeyColumnName => vKeyColumnName, + pYear => vPartitions(i).year, + pMonth => vPartitions(i).month, + pBucketUri => vBucketUri, + pFolderName => pFolderName, + pProcessedColumns => vProcessedColumnList, + pMinDate => pMinDate, + pMaxDate => pMaxDate, + pCredentialName => pCredentialName, + pFormat => 'PARQUET', + pFileBaseName => NULL, + pMaxFileSize => 104857600, + pParameters => vParameters + ); + END LOOP; + + -- Parallel processing (parallel degree > 1) + ELSE + -- Skip parallel processing if no partitions found + IF vPartitions.COUNT = 0 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('No partitions to export - skipping parallel processing', 'INFO', vParameters); + ELSE + DECLARE + vTaskName VARCHAR2(128) := 'DATA_EXPORT_TASK_' || TO_CHAR(SYSTIMESTAMP, 'YYYYMMDDHH24MISSFF'); + vChunkId NUMBER; + BEGIN + ENV_MANAGER.LOG_PROCESS_EVENT('Using parallel processing with ' || pParallelDegree || ' threads', 'INFO', vParameters); + + -- Clear any existing chunks from previous runs (TRUNCATE to avoid PK violations) + EXECUTE IMMEDIATE 'TRUNCATE TABLE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS'; + + -- Populate chunks table + FOR i IN 1 .. vPartitions.COUNT LOOP + INSERT INTO CT_MRDS.A_PARALLEL_EXPORT_CHUNKS ( + CHUNK_ID, TASK_NAME, YEAR_VALUE, MONTH_VALUE, SCHEMA_NAME, TABLE_NAME, KEY_COLUMN_NAME, + BUCKET_URI, FOLDER_NAME, PROCESSED_COLUMNS, MIN_DATE, MAX_DATE, + CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE + ) VALUES ( + i, vTaskName, vPartitions(i).year, vPartitions(i).month, vSchemaName, vTableName, vKeyColumnName, + vBucketUri, pFolderName, vProcessedColumnList, pMinDate, pMaxDate, + pCredentialName, 'PARQUET', NULL, pTemplateTableName, 104857600 + ); + END LOOP; + + ENV_MANAGER.LOG_PROCESS_EVENT('Populated ' || vPartitions.COUNT || ' chunks for parallel export', 'DEBUG', vParameters); + + -- Create parallel task + DBMS_PARALLEL_EXECUTE.CREATE_TASK(task_name => vTaskName); + + -- Define chunks by number range (1 to partition count) + DBMS_PARALLEL_EXECUTE.CREATE_CHUNKS_BY_NUMBER_COL( + task_name => vTaskName, + table_owner => 'CT_MRDS', + table_name => 'A_PARALLEL_EXPORT_CHUNKS', + table_column => 'CHUNK_ID', + chunk_size => 1 -- Each partition is one chunk + ); + + -- Execute task in parallel + ENV_MANAGER.LOG_PROCESS_EVENT('Executing parallel task: ' || vTaskName, 'DEBUG', vParameters); + + DBMS_PARALLEL_EXECUTE.RUN_TASK( + task_name => vTaskName, + sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;', + language_flag => DBMS_SQL.NATIVE, + parallel_level => pParallelDegree + ); + + -- Check for errors + DECLARE + vErrorCount NUMBER; + BEGIN + SELECT COUNT(*) INTO vErrorCount + FROM USER_PARALLEL_EXECUTE_CHUNKS + WHERE task_name = vTaskName AND status = 'PROCESSED_WITH_ERROR'; + + IF vErrorCount > 0 THEN + vgMsgTmp := 'Parallel execution completed with ' || vErrorCount || ' errors. Check USER_PARALLEL_EXECUTE_CHUNKS for details.'; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + END IF; + END; + + -- Clean up task + DBMS_PARALLEL_EXECUTE.DROP_TASK(task_name => vTaskName); + + -- Clean up chunks for this task + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE TASK_NAME = vTaskName; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Parallel execution completed successfully', 'INFO', vParameters); + EXCEPTION + WHEN OTHERS THEN + -- Attempt to drop task on error + BEGIN + DBMS_PARALLEL_EXECUTE.DROP_TASK(task_name => vTaskName); + EXCEPTION + WHEN OTHERS THEN NULL; -- Ignore drop errors + END; + + vgMsgTmp := ENV_MANAGER.MSG_PARALLEL_EXECUTION_FAILED || ': ' || SQLERRM || cgBL || DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + END; + END IF; + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + EXCEPTION + WHEN ENV_MANAGER.ERR_TABLE_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_TABLE_NOT_EXISTS ||': '||vTableName; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_COLUMN_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_COLUMN_NOT_EXISTS || ' (TableName.ColumnName): ' || vTableName||'.'||vKeyColumnName||CASE WHEN vCurrentCol IS NOT NULL THEN '.'||vCurrentCol||' in pColumnList' ELSE '' END; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_INVALID_PARALLEL_DEGREE THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_INVALID_PARALLEL_DEGREE, vgMsgTmp); + WHEN ENV_MANAGER.ERR_PARALLEL_EXECUTION_FAILED THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + WHEN OTHERS THEN + -- Log complete error details including full stack trace and backtrace + ENV_MANAGER.LOG_PROCESS_ERROR('Export failed: ' || SQLERRM, vParameters, 'DATA_EXPORTER'); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END EXPORT_TABLE_DATA_BY_DATE; + + ---------------------------------------------------------------------------------------------------- + + /** + * @name EXPORT_TABLE_DATA_TO_CSV_BY_DATE + * @desc Exports data to a single CSV file with date filtering. + * Unlike EXPORT_TABLE_DATA_BY_DATE, this procedure creates one CSV file + * instead of multiple Parquet files partitioned by year/month. + * Uses the same date filtering mechanism with CT_ODS.A_LOAD_HISTORY. + * Allows specifying custom column list or uses T.* if pColumnList is NULL. + * Validates that all columns in pColumnList exist in the target table. + * Automatically adds 'T.' prefix to column names in pColumnList. + * @example + * begin + * DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE( + * pSchemaName => 'CT_MRDS', + * pTableName => 'MY_TABLE', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'DATA', + * pFolderName => 'exports', + * pFileName => 'my_export.csv', + * pColumnList => 'COLUMN1, COLUMN2, COLUMN3', -- Optional + * pMinDate => DATE '2024-01-01', + * pMaxDate => SYSDATE + * ); + * end; + **/ + PROCEDURE EXPORT_TABLE_DATA_TO_CSV_BY_DATE ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pFileName IN VARCHAR2 DEFAULT NULL, + pColumnList IN VARCHAR2 default NULL, + pMinDate IN DATE default DATE '1900-01-01', + pMaxDate IN DATE default SYSDATE, + pParallelDegree IN NUMBER default 1, + pTemplateTableName IN VARCHAR2 default NULL, + pMaxFileSize IN NUMBER default 104857600, + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ) + IS + vTableName VARCHAR2(128); + vSchemaName VARCHAR2(128); + vKeyColumnName VARCHAR2(128); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vFileBaseName VARCHAR2(4000); + vFileExtension VARCHAR2(10); + vProcessedColumnList VARCHAR2(32767); + vBucketUri VARCHAR2(4000); + vCurrentCol VARCHAR2(128); + vPartitions partition_tab; + + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pSchemaName => '''||nvl(pSchemaName, 'NULL')||'''' + ,'pTableName => '''||nvl(pTableName, 'NULL')||'''' + ,'pKeyColumnName => '''||nvl(pKeyColumnName, 'NULL')||'''' + ,'pBucketArea => '''||nvl(pBucketArea, 'NULL')||'''' + ,'pFolderName => '''||nvl(pFolderName, 'NULL')||'''' + ,'pFileName => '''||nvl(pFileName, 'NULL')||'''' + ,'pColumnList => '''||nvl(pColumnList, 'NULL')||'''' + ,'pMinDate => '''||nvl(TO_CHAR(pMinDate, 'YYYY-MM-DD HH24:MI:SS'), 'NULL')||'''' + ,'pMaxDate => '''||nvl(TO_CHAR(pMaxDate, 'YYYY-MM-DD HH24:MI:SS'), 'NULL')||'''' + ,'pParallelDegree => '''||nvl(TO_CHAR(pParallelDegree), 'NULL')||'''' + ,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pMaxFileSize => '''||nvl(TO_CHAR(pMaxFileSize), 'NULL')||'''' + ,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||'''' + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + -- Get bucket URI based on bucket area using FILE_MANAGER function + vBucketUri := FILE_MANAGER.GET_BUCKET_URI(pBucketArea); + + -- Convert table and column names to uppercase to match data dictionary + vTableName := UPPER(pTableName); + vSchemaName := UPPER(pSchemaName); + vKeyColumnName := UPPER(pKeyColumnName); + + -- Extract base filename and extension or construct default filename + IF pFileName IS NOT NULL THEN + -- Use provided filename + IF INSTR(pFileName, '.') > 0 THEN + vFileBaseName := SUBSTR(pFileName, 1, INSTR(pFileName, '.', -1) - 1); + vFileExtension := SUBSTR(pFileName, INSTR(pFileName, '.', -1)); + ELSE + vFileBaseName := pFileName; + vFileExtension := '.csv'; + END IF; + ELSE + -- Construct default filename: TABLENAME (without extension, will be added by worker) + vFileBaseName := UPPER(pTableName); + vFileExtension := '.csv'; + END IF; + + -- Validate table, key column, and column list using shared procedure + VALIDATE_TABLE_AND_COLUMNS(vSchemaName, vTableName, vKeyColumnName, pColumnList, vParameters); + + -- Build query with TO_CHAR for date columns (per-column format support) + vProcessedColumnList := buildQueryWithDateFormats(pColumnList, vTableName, vSchemaName, vKeyColumnName, pTemplateTableName); + + ENV_MANAGER.LOG_PROCESS_EVENT('Input column list: ' || NVL(pColumnList, 'NULL (using dynamic column list)'), 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Processed column list with TO_CHAR for date columns: ' || vProcessedColumnList, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Template table: ' || NVL(pTemplateTableName, 'NULL - using global default for all dates'), 'INFO', vParameters); + + vTableName := DBMS_ASSERT.SCHEMA_NAME(vSchemaName) || '.' || DBMS_ASSERT.simple_sql_name(vTableName); + + -- Validate parallel degree parameter + IF pParallelDegree < 1 OR pParallelDegree > 16 THEN + vgMsgTmp := ENV_MANAGER.MSG_INVALID_PARALLEL_DEGREE || ': ' || pParallelDegree || '. Valid range: 1-16'; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_INVALID_PARALLEL_DEGREE, vgMsgTmp); + END IF; + + -- Get partitions using shared function + vPartitions := GET_PARTITIONS(vSchemaName, vTableName, vKeyColumnName, pMinDate, pMaxDate, vParameters); + + ENV_MANAGER.LOG_PROCESS_EVENT('Found ' || vPartitions.COUNT || ' year/month combinations to export', 'INFO', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Date range: ' || TO_CHAR(pMinDate, 'YYYY-MM-DD HH24:MI:SS') || ' to ' || TO_CHAR(pMaxDate, 'YYYY-MM-DD HH24:MI:SS'), 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Parallel degree: ' || pParallelDegree, 'INFO', vParameters); + + -- Sequential processing (parallel degree = 1) + IF pParallelDegree = 1 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Using sequential processing (pParallelDegree = 1)', 'DEBUG', vParameters); + + FOR i IN 1 .. vPartitions.COUNT LOOP + EXPORT_SINGLE_PARTITION( + pSchemaName => vSchemaName, + pTableName => vTableName, + pKeyColumnName => vKeyColumnName, + pYear => vPartitions(i).year, + pMonth => vPartitions(i).month, + pBucketUri => vBucketUri, + pFolderName => pFolderName, + pProcessedColumns => vProcessedColumnList, + pMinDate => pMinDate, + pMaxDate => pMaxDate, + pCredentialName => pCredentialName, + pFormat => 'CSV', + pFileBaseName => vFileBaseName, + pMaxFileSize => pMaxFileSize, + pParameters => vParameters + ); + END LOOP; + + -- Parallel processing (parallel degree > 1) + ELSE + -- Skip parallel processing if no partitions found + IF vPartitions.COUNT = 0 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('No partitions to export - skipping parallel CSV processing', 'INFO', vParameters); + ELSE + DECLARE + vTaskName VARCHAR2(128) := 'DATA_CSV_EXPORT_TASK_' || TO_CHAR(SYSTIMESTAMP, 'YYYYMMDDHH24MISSFF'); + vChunkId NUMBER; + BEGIN + ENV_MANAGER.LOG_PROCESS_EVENT('Using parallel processing with ' || pParallelDegree || ' threads', 'INFO', vParameters); + + -- Clear any existing chunks from previous runs (TRUNCATE to avoid PK violations) + EXECUTE IMMEDIATE 'TRUNCATE TABLE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS'; + + -- Populate chunks table + FOR i IN 1 .. vPartitions.COUNT LOOP + INSERT INTO CT_MRDS.A_PARALLEL_EXPORT_CHUNKS ( + CHUNK_ID, TASK_NAME, YEAR_VALUE, MONTH_VALUE, SCHEMA_NAME, TABLE_NAME, KEY_COLUMN_NAME, + BUCKET_URI, FOLDER_NAME, PROCESSED_COLUMNS, MIN_DATE, MAX_DATE, + CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE + ) VALUES ( + i, vTaskName, vPartitions(i).year, vPartitions(i).month, vSchemaName, vTableName, vKeyColumnName, + vBucketUri, pFolderName, vProcessedColumnList, pMinDate, pMaxDate, + pCredentialName, 'CSV', vFileBaseName, pTemplateTableName, pMaxFileSize + ); + END LOOP; + + ENV_MANAGER.LOG_PROCESS_EVENT('Populated ' || vPartitions.COUNT || ' chunks for parallel CSV export', 'DEBUG', vParameters); + + -- Create parallel task + DBMS_PARALLEL_EXECUTE.CREATE_TASK(task_name => vTaskName); + + -- Define chunks by number range (1 to partition count) + DBMS_PARALLEL_EXECUTE.CREATE_CHUNKS_BY_NUMBER_COL( + task_name => vTaskName, + table_owner => 'CT_MRDS', + table_name => 'A_PARALLEL_EXPORT_CHUNKS', + table_column => 'CHUNK_ID', + chunk_size => 1 -- Each partition is one chunk + ); + + -- Execute task in parallel + ENV_MANAGER.LOG_PROCESS_EVENT('Executing parallel CSV export task: ' || vTaskName, 'DEBUG', vParameters); + + DBMS_PARALLEL_EXECUTE.RUN_TASK( + task_name => vTaskName, + sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;', + language_flag => DBMS_SQL.NATIVE, + parallel_level => pParallelDegree + ); + + -- Check for errors + DECLARE + vErrorCount NUMBER; + BEGIN + SELECT COUNT(*) INTO vErrorCount + FROM USER_PARALLEL_EXECUTE_CHUNKS + WHERE task_name = vTaskName AND status = 'PROCESSED_WITH_ERROR'; + + IF vErrorCount > 0 THEN + vgMsgTmp := 'Parallel CSV export completed with ' || vErrorCount || ' errors. Check USER_PARALLEL_EXECUTE_CHUNKS for details.'; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + END IF; + END; + + -- Clean up task + DBMS_PARALLEL_EXECUTE.DROP_TASK(task_name => vTaskName); + + -- Clean up chunks for this task + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE TASK_NAME = vTaskName; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Parallel CSV execution completed successfully', 'INFO', vParameters); + EXCEPTION + WHEN OTHERS THEN + -- Attempt to drop task on error + BEGIN + DBMS_PARALLEL_EXECUTE.DROP_TASK(task_name => vTaskName); + EXCEPTION + WHEN OTHERS THEN NULL; -- Ignore drop errors + END; + + vgMsgTmp := ENV_MANAGER.MSG_PARALLEL_EXECUTION_FAILED || ': ' || SQLERRM || cgBL || DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + END; + END IF; + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('Export completed successfully for ' || vPartitions.COUNT || ' files', 'INFO', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + + EXCEPTION + WHEN ENV_MANAGER.ERR_TABLE_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_TABLE_NOT_EXISTS ||': '||vTableName; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_COLUMN_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_COLUMN_NOT_EXISTS || ' (TableName.ColumnName): ' || vTableName||'.'||vKeyColumnName||CASE WHEN vCurrentCol IS NOT NULL THEN '.'||vCurrentCol||' in pColumnList' ELSE '' END; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_INVALID_PARALLEL_DEGREE THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_INVALID_PARALLEL_DEGREE, vgMsgTmp); + WHEN ENV_MANAGER.ERR_PARALLEL_EXECUTION_FAILED THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + WHEN OTHERS THEN + -- Log complete error details including full stack trace and backtrace + ENV_MANAGER.LOG_PROCESS_ERROR('Export failed: ' || SQLERRM, vParameters, 'DATA_EXPORTER'); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END EXPORT_TABLE_DATA_TO_CSV_BY_DATE; + + ---------------------------------------------------------------------------------------------------- + -- VERSION MANAGEMENT FUNCTIONS + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_VERSION RETURN VARCHAR2 IS + BEGIN + RETURN PACKAGE_VERSION; + END GET_VERSION; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_BUILD_INFO RETURN VARCHAR2 IS + BEGIN + RETURN ENV_MANAGER.GET_PACKAGE_VERSION_INFO( + pPackageName => 'DATA_EXPORTER', + pVersion => PACKAGE_VERSION, + pBuildDate => PACKAGE_BUILD_DATE, + pAuthor => PACKAGE_AUTHOR + ); + END GET_BUILD_INFO; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2 IS + BEGIN + RETURN ENV_MANAGER.FORMAT_VERSION_HISTORY( + pPackageName => 'DATA_EXPORTER', + pVersionHistory => VERSION_HISTORY + ); + END GET_VERSION_HISTORY; + + ---------------------------------------------------------------------------------------------------- + +END; + +/ diff --git a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/rollback_version/v2.5.0/DATA_EXPORTER.pkg b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/rollback_version/v2.5.0/DATA_EXPORTER.pkg new file mode 100644 index 0000000..8ce3624 --- /dev/null +++ b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/rollback_version/v2.5.0/DATA_EXPORTER.pkg @@ -0,0 +1,214 @@ +create or replace PACKAGE CT_MRDS.DATA_EXPORTER +AUTHID CURRENT_USER +AS + /** + * Data Export Package: Provides comprehensive data export capabilities to various formats (CSV, Parquet) + * with support for cloud storage integration via Oracle Cloud Infrastructure (OCI). + * The structure of comment is used by GET_PACKAGE_DOCUMENTATION function + * which returns documentation text for confluence page (to Copy-Paste it). + **/ + + -- Package Version Information + PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.5.0'; + PACKAGE_BUILD_DATE CONSTANT VARCHAR2(19) := '2026-01-26 13:30:00'; + PACKAGE_AUTHOR CONSTANT VARCHAR2(50) := 'MRDS Development Team'; + + -- Version History (last 3-5 changes) + VERSION_HISTORY CONSTANT VARCHAR2(4000) := + 'v2.5.0 (2026-01-26): Added recorddelimiter parameter with CRLF (CHR(13)||CHR(10)) for CSV exports to ensure Windows-compatible line endings. Improves cross-platform compatibility when CSV files are opened in Windows applications (Notepad, Excel).' || CHR(10) || + 'v2.4.0 (2026-01-11): Added pTemplateTableName parameter for per-column date format configuration. Implements dynamic query building with TO_CHAR for each date/timestamp column using FILE_MANAGER.GET_DATE_FORMAT. Supports 3-tier hierarchy: column-specific, template DEFAULT, global fallback. Eliminates single dateformat limitation of DBMS_CLOUD.EXPORT_DATA.' || CHR(10) || + 'v2.3.0 (2025-12-20): Added parallel partition processing using DBMS_PARALLEL_EXECUTE. New pParallelDegree parameter (1-16, default 1) for EXPORT_TABLE_DATA_BY_DATE and EXPORT_TABLE_DATA_TO_CSV_BY_DATE procedures. Each year/month partition processed in separate thread for improved performance.' || CHR(10) || + 'v2.2.0 (2025-12-19): DRY refactoring - extracted shared helper functions (sanitizeFilename, VALIDATE_TABLE_AND_COLUMNS, GET_PARTITIONS, EXPORT_SINGLE_PARTITION worker procedure). Reduced code duplication by ~400 lines. Prepared architecture for v2.3.0 parallel processing.' || CHR(10) || + 'v2.1.1 (2025-12-04): Fixed JOIN column reference A_WORKFLOW_HISTORY_KEY -> A_ETL_LOAD_SET_KEY, added consistent column mapping and dynamic column list to EXPORT_TABLE_DATA procedure, enhanced DEBUG logging for all export operations' || CHR(10) || + 'v2.1.0 (2025-10-22): Added version tracking and PARTITION_YEAR/PARTITION_MONTH support' || CHR(10) || + 'v2.0.0 (2025-10-01): Separated export functionality from FILE_MANAGER package' || CHR(10); + + cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10); + vgMsgTmp VARCHAR2(32000); + + --------------------------------------------------------------------------------------------------------------------------- + -- TYPE DEFINITIONS FOR PARTITION HANDLING + --------------------------------------------------------------------------------------------------------------------------- + + /** + * Record type for year/month partition information + **/ + TYPE partition_rec IS RECORD ( + year VARCHAR2(4), + month VARCHAR2(2) + ); + + /** + * Table type for collection of partition records + **/ + TYPE partition_tab IS TABLE OF partition_rec; + + --------------------------------------------------------------------------------------------------------------------------- + -- INTERNAL PARALLEL PROCESSING CALLBACK + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name EXPORT_PARTITION_PARALLEL + * @desc Internal callback procedure for DBMS_PARALLEL_EXECUTE. + * Processes single partition (year/month) chunk in parallel task. + * Called by DBMS_PARALLEL_EXECUTE framework for each chunk. + * This procedure is PUBLIC because DBMS_PARALLEL_EXECUTE requires it, + * but should NOT be called directly by external code. + * @param pStartId - Chunk start ID (CHUNK_ID from A_PARALLEL_EXPORT_CHUNKS table) + * @param pEndId - Chunk end ID (same as pStartId for single-row chunks) + **/ + PROCEDURE EXPORT_PARTITION_PARALLEL ( + pStartId IN NUMBER, + pEndId IN NUMBER + ); + + --------------------------------------------------------------------------------------------------------------------------- + -- MAIN EXPORT PROCEDURES + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name EXPORT_TABLE_DATA + * @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA. + * Exports data into CSV file on OCI infrustructure. + * pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE' + * @example + * begin + * DATA_EXPORTER.EXPORT_TABLE_DATA( + * pSchemaName => 'CT_MRDS', + * pTableName => 'MY_TABLE', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'DATA', + * pFolderName => 'csv_exports' + * ); + * end; + **/ + PROCEDURE EXPORT_TABLE_DATA ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ); + + + + /** + * @name EXPORT_TABLE_DATA_BY_DATE + * @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA. + * Exports data into PARQUET files on OCI infrustructure. + * Each YEAR_MONTH pair goes to seperate file (implicit partitioning). + * Allows specifying custom column list or uses T.* if pColumnList is NULL. + * Validates that all columns in pColumnList exist in the target table. + * Automatically adds 'T.' prefix to column names in pColumnList. + * Supports parallel partition processing via pParallelDegree parameter (default 1, range 1-16). + * pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE' + * @example + * begin + * DATA_EXPORTER.EXPORT_TABLE_DATA_BY_DATE( + * pSchemaName => 'CT_MRDS', + * pTableName => 'MY_TABLE', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'DATA', + * pFolderName => 'parquet_exports', + * pColumnList => 'COLUMN1, COLUMN2, COLUMN3', -- Optional + * pMinDate => DATE '2024-01-01', + * pMaxDate => SYSDATE, + * pParallelDegree => 8 -- Optional, default 1, range 1-16 + * ); + * end; + **/ + PROCEDURE EXPORT_TABLE_DATA_BY_DATE ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pColumnList IN VARCHAR2 default NULL, + pMinDate IN DATE default DATE '1900-01-01', + pMaxDate IN DATE default SYSDATE, + pParallelDegree IN NUMBER default 1, + pTemplateTableName IN VARCHAR2 default NULL, + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ); + + + + /** + * @name EXPORT_TABLE_DATA_TO_CSV_BY_DATE + * @desc Exports data to separate CSV files partitioned by year and month. + * Creates one CSV file for each year/month combination found in the data. + * Uses the same date filtering mechanism with CT_ODS.A_LOAD_HISTORY as EXPORT_TABLE_DATA_BY_DATE, + * but exports to CSV format instead of Parquet. + * Supports parallel partition processing via pParallelDegree parameter (1-16). + * File naming pattern: {pFileName}_YYYYMM.csv or {TABLENAME}_YYYYMM.csv (if pFileName is NULL) + * @example + * begin + * -- With custom filename + * DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE( + * pSchemaName => 'CT_MRDS', + * pTableName => 'MY_TABLE', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'DATA', + * pFolderName => 'exports', + * pFileName => 'my_export.csv', + * pMinDate => DATE '2024-01-01', + * pMaxDate => SYSDATE, + * pParallelDegree => 8 -- Optional, default 1, range 1-16 + * ); + * + * -- With auto-generated filename (based on table name only) + * DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE( + * pSchemaName => 'OU_TOP', + * pTableName => 'AGGREGATED_ALLOTMENT', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'ARCHIVE', + * pFolderName => 'exports', + * pMinDate => DATE '2025-09-01', + * pMaxDate => DATE '2025-09-17' + * ); + * -- This will create files like: AGGREGATED_ALLOTMENT_202509.csv, etc. + * pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE' + * end; + **/ + PROCEDURE EXPORT_TABLE_DATA_TO_CSV_BY_DATE ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pFileName IN VARCHAR2 DEFAULT NULL, + pColumnList IN VARCHAR2 default NULL, + pMinDate IN DATE default DATE '1900-01-01', + pMaxDate IN DATE default SYSDATE, + pParallelDegree IN NUMBER default 1, + pTemplateTableName IN VARCHAR2 default NULL, + pMaxFileSize IN NUMBER default 104857600, + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ); + + --------------------------------------------------------------------------------------------------------------------------- + -- VERSION MANAGEMENT FUNCTIONS + --------------------------------------------------------------------------------------------------------------------------- + + /** + * Returns the current package version number + * return: Version string in format X.Y.Z (e.g., '2.1.0') + **/ + FUNCTION GET_VERSION RETURN VARCHAR2; + + /** + * Returns comprehensive build information including version, date, and author + * return: Formatted string with complete build details + **/ + FUNCTION GET_BUILD_INFO RETURN VARCHAR2; + + /** + * Returns the version history with recent changes + * return: Multi-line string with version history + **/ + FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2; + +END; + +/ diff --git a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835/01_MARS_835_install_step1.sql b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835/01_MARS_835_install_step1.sql index 759acea..0a16f07 100644 --- a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835/01_MARS_835_install_step1.sql +++ b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835/01_MARS_835_install_step1.sql @@ -14,7 +14,7 @@ SET SERVEROUTPUT ON SIZE UNLIMITED SET TIMING ON -DEFINE cutoff_date = "ADD_MONTHS(SYSDATE, -6)" +DEFINE cutoff_date = "TRUNC(ADD_MONTHS(SYSDATE, -6), 'MM')" PROMPT ======================================================================== PROMPT Exporting CSDB.DEBT - Split DATA + HIST @@ -69,7 +69,11 @@ BEGIN BEGIN EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM ODS.CSDB_DEBT_ODS' INTO vRecordCount; DBMS_OUTPUT.PUT_LINE(''); - DBMS_OUTPUT.PUT_LINE('Records currently readable via external table: ' || vRecordCount); + DBMS_OUTPUT.PUT_LINE('-------------------------------------------------------------------------------'); + DBMS_OUTPUT.PUT_LINE('>>>'); + DBMS_OUTPUT.PUT_LINE('>>> Records currently readable via external table: ' || vRecordCount); + DBMS_OUTPUT.PUT_LINE('>>>'); + DBMS_OUTPUT.PUT_LINE('-------------------------------------------------------------------------------'); EXCEPTION WHEN OTHERS THEN DBMS_OUTPUT.PUT_LINE(''); @@ -100,7 +104,7 @@ BEGIN pFolderName => 'ODS/CSDB/CSDB_DEBT', pMinDate => &cutoff_date, pMaxDate => SYSDATE, - pParallelDegree => 8, + pParallelDegree => 16, pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT', pMaxFileSize => 104857600 -- 100MB in bytes (safe for parallel execution, avoids ORA-04036) ); @@ -122,7 +126,7 @@ BEGIN pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/CSDB/CSDB_DEBT', pMaxDate => &cutoff_date, - pParallelDegree => 8, + pParallelDegree => 16, pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT' ); @@ -184,7 +188,11 @@ BEGIN BEGIN EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM ODS.CSDB_DEBT_DAILY_ODS' INTO vRecordCount; DBMS_OUTPUT.PUT_LINE(''); - DBMS_OUTPUT.PUT_LINE('Records currently readable via external table: ' || vRecordCount); + DBMS_OUTPUT.PUT_LINE('-------------------------------------------------------------------------------'); + DBMS_OUTPUT.PUT_LINE('>>>'); + DBMS_OUTPUT.PUT_LINE('>>> Records currently readable via external table: ' || vRecordCount); + DBMS_OUTPUT.PUT_LINE('>>>'); + DBMS_OUTPUT.PUT_LINE('-------------------------------------------------------------------------------'); EXCEPTION WHEN OTHERS THEN DBMS_OUTPUT.PUT_LINE(''); @@ -215,7 +223,7 @@ BEGIN pFolderName => 'ODS/CSDB/CSDB_DEBT_DAILY', pMinDate => &cutoff_date, pMaxDate => SYSDATE, - pParallelDegree => 8, + pParallelDegree => 16, pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT_DAILY', pMaxFileSize => 104857600 -- 100MB in bytes (safe for parallel execution, avoids ORA-04036) ); @@ -237,7 +245,7 @@ BEGIN pBucketArea => 'ARCHIVE', pFolderName => 'ARCHIVE/CSDB/CSDB_DEBT_DAILY', pMaxDate => &cutoff_date, - pParallelDegree => 8, + pParallelDegree => 16, pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT_DAILY' ); diff --git a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835/04_MARS_835_verify_record_counts.sql b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835/04_MARS_835_verify_record_counts.sql index d36bdac..88781c5 100644 --- a/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835/04_MARS_835_verify_record_counts.sql +++ b/MARS_Packages/REL01_POST_DEACTIVATION/MARS-835/04_MARS_835_verify_record_counts.sql @@ -52,9 +52,9 @@ BEGIN t_table_info('OU_CSDB', 'LEGACY_ISSUER_DESC_FULL', NULL, 'ODS.CSDB_ISSUER_DESC_FULL_ARCHIVE', FALSE, TRUE) ); - DBMS_OUTPUT.PUT_LINE('-------------------------------------------------------------------------------------'); - DBMS_OUTPUT.PUT_LINE('Table Name Source Count DATA Count HIST Count Status'); - DBMS_OUTPUT.PUT_LINE('-------------------------------------------------------------------------------------'); + DBMS_OUTPUT.PUT_LINE('-----------------------------------------------------------------------------------------'); + DBMS_OUTPUT.PUT_LINE('Table Name Source Count DATA Count HIST Count Status'); + DBMS_OUTPUT.PUT_LINE('-----------------------------------------------------------------------------------------'); FOR i IN 1..vTables.COUNT LOOP -- Get source table count @@ -78,7 +78,18 @@ BEGIN vTotalDataCount := vTotalDataCount + vDataCount; EXCEPTION WHEN OTHERS THEN - vDataCount := -1; + -- If source table is empty (0 records), no files were exported + -- External table returns error, treat as 0 + -- Acceptable error codes: + -- ORA-29913: error in executing ODCIEXTTABLEOPEN callout + -- ORA-29400: data cartridge error + -- KUP-13023: nothing matched wildcard query (no files in bucket) + -- NOTE: ORA-30653 (reject limit) is a real data quality error, not treated as empty + IF vSourceCount = 0 OR SQLCODE IN (-29913, -29400) OR SQLERRM LIKE '%KUP-13023%' THEN + vDataCount := 0; + ELSE + vDataCount := -1; + END IF; END; ELSE vDataCount := NULL; @@ -91,14 +102,25 @@ BEGIN vTotalHistCount := vTotalHistCount + vHistCount; EXCEPTION WHEN OTHERS THEN - vHistCount := -1; + -- If source table is empty (0 records), no files were exported + -- External table returns error, treat as 0 + -- Acceptable error codes: + -- ORA-29913: error in executing ODCIEXTTABLEOPEN callout + -- ORA-29400: data cartridge error + -- KUP-13023: nothing matched wildcard query (no files in bucket) + -- NOTE: ORA-30653 (reject limit) is a real data quality error, not treated as empty + IF vSourceCount = 0 OR SQLCODE IN (-29913, -29400) OR SQLERRM LIKE '%KUP-13023%' THEN + vHistCount := 0; + ELSE + vHistCount := -1; + END IF; END; -- Display results DECLARE vStatus VARCHAR2(20); - vDataDisplay VARCHAR2(15); - vHistDisplay VARCHAR2(15); + vDataDisplay VARCHAR2(17); + vHistDisplay VARCHAR2(17); BEGIN -- Format DATA count display IF vDataCount IS NULL THEN @@ -106,14 +128,14 @@ BEGIN ELSIF vDataCount = -1 THEN vDataDisplay := 'ERROR'; ELSE - vDataDisplay := TO_CHAR(vDataCount, '999,999,999'); + vDataDisplay := TO_CHAR(vDataCount, '9,999,999,999'); END IF; -- Format HIST count display IF vHistCount = -1 THEN vHistDisplay := 'ERROR'; ELSE - vHistDisplay := TO_CHAR(vHistCount, '999,999,999'); + vHistDisplay := TO_CHAR(vHistCount, '9,999,999,999'); END IF; -- Determine status @@ -143,27 +165,30 @@ BEGIN DBMS_OUTPUT.PUT_LINE( RPAD(vTables(i).source_table, 24) || - LPAD(TO_CHAR(vSourceCount, '999,999,999'), 13) || - LPAD(vDataDisplay, 13) || - LPAD(vHistDisplay, 13) || ' ' || + LPAD(TO_CHAR(vSourceCount, '9,999,999,999'), 15) || + LPAD(vDataDisplay, 15) || + LPAD(vHistDisplay, 15) || ' ' || vStatus ); END; END LOOP; - DBMS_OUTPUT.PUT_LINE('-------------------------------------------------------------------------------------'); - DBMS_OUTPUT.PUT_LINE('TOTALS:' || LPAD(TO_CHAR(vTotalSourceCount, '999,999,999'), 17) || - LPAD(TO_CHAR(vTotalDataCount, '999,999,999'), 13) || - LPAD(TO_CHAR(vTotalHistCount, '999,999,999'), 13)); - DBMS_OUTPUT.PUT_LINE('-------------------------------------------------------------------------------------'); + DBMS_OUTPUT.PUT_LINE('-----------------------------------------------------------------------------------------'); + DBMS_OUTPUT.PUT_LINE( + RPAD('TOTALS', 24) || + LPAD(TO_CHAR(vTotalSourceCount, '9,999,999,999'), 15) || + LPAD(TO_CHAR(vTotalDataCount, '9,999,999,999'), 15) || + LPAD(TO_CHAR(vTotalHistCount, '9,999,999,999'), 15) + ); + DBMS_OUTPUT.PUT_LINE('-----------------------------------------------------------------------------------------'); DBMS_OUTPUT.PUT_LINE(''); DBMS_OUTPUT.PUT_LINE('====================================================================================='); DBMS_OUTPUT.PUT_LINE('Record Count Verification Summary'); DBMS_OUTPUT.PUT_LINE('====================================================================================='); - DBMS_OUTPUT.PUT_LINE('Total source records: ' || TO_CHAR(vTotalSourceCount, '999,999,999')); - DBMS_OUTPUT.PUT_LINE('Total DATA records: ' || TO_CHAR(vTotalDataCount, '999,999,999') || ' (last 6 months)'); - DBMS_OUTPUT.PUT_LINE('Total HIST records: ' || TO_CHAR(vTotalHistCount, '999,999,999') || ' (historical + full exports)'); + DBMS_OUTPUT.PUT_LINE('Total source records: ' || TO_CHAR(vTotalSourceCount, '9,999,999,999')); + DBMS_OUTPUT.PUT_LINE('Total DATA records: ' || TO_CHAR(vTotalDataCount, '9,999,999,999') || ' (last 6 months)'); + DBMS_OUTPUT.PUT_LINE('Total HIST records: ' || TO_CHAR(vTotalHistCount, '9,999,999,999') || ' (historical + full exports)'); DBMS_OUTPUT.PUT_LINE(''); IF vMismatchCount = 0 THEN diff --git a/MARS_Packages/REL02/MARS-1046/.gitignore b/MARS_Packages/REL02/MARS-1046/.gitignore deleted file mode 100644 index 69c2e82..0000000 --- a/MARS_Packages/REL02/MARS-1046/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -confluence/ -log/ -test/ -mock_data/ diff --git a/MARS_Packages/REL02/MARS-1046/92_MARS_1046_rollback_CT_MRDS_FILE_MANAGER_BODY.sql b/MARS_Packages/REL02/MARS-1046/91_MARS_1046_rollback_CT_MRDS_FILE_MANAGER_BODY.sql similarity index 90% rename from MARS_Packages/REL02/MARS-1046/92_MARS_1046_rollback_CT_MRDS_FILE_MANAGER_BODY.sql rename to MARS_Packages/REL02/MARS-1046/91_MARS_1046_rollback_CT_MRDS_FILE_MANAGER_BODY.sql index 47ac9c4..b53a116 100644 --- a/MARS_Packages/REL02/MARS-1046/92_MARS_1046_rollback_CT_MRDS_FILE_MANAGER_BODY.sql +++ b/MARS_Packages/REL02/MARS-1046/91_MARS_1046_rollback_CT_MRDS_FILE_MANAGER_BODY.sql @@ -1,6 +1,5 @@ --============================================================================================================================= --- Script: 92_MARS_1046_rollback_CT_MRDS_FILE_MANAGER_BODY.sql --- MARS-1046: Rollback FILE_MANAGER Package Body to Version 3.3.0 (Step 2) +-- MARS-1046: Rollback FILE_MANAGER Package Body to Version 3.3.0 --============================================================================================================================= -- Purpose: Restore FILE_MANAGER package body from MARS-1056 (version 3.3.0) -- Author: Grzegorz Michalski @@ -11,7 +10,7 @@ SET SERVEROUTPUT ON PROMPT ======================================================================== -PROMPT MARS-1046 ROLLBACK Step 2: Restoring FILE_MANAGER Package Body to v3.3.0 +PROMPT MARS-1046 ROLLBACK: Restoring CT_MRDS.FILE_MANAGER Package Body to v3.3.0 PROMPT ======================================================================== -- Execute FILE_MANAGER body from current_version (MARS-1056 backup) diff --git a/MARS_Packages/REL02/MARS-1046/91_MARS_1046_rollback_CT_MRDS_FILE_MANAGER_SPEC.sql b/MARS_Packages/REL02/MARS-1046/92_MARS_1046_rollback_CT_MRDS_FILE_MANAGER_SPEC.sql similarity index 89% rename from MARS_Packages/REL02/MARS-1046/91_MARS_1046_rollback_CT_MRDS_FILE_MANAGER_SPEC.sql rename to MARS_Packages/REL02/MARS-1046/92_MARS_1046_rollback_CT_MRDS_FILE_MANAGER_SPEC.sql index 1149b63..70d0029 100644 --- a/MARS_Packages/REL02/MARS-1046/91_MARS_1046_rollback_CT_MRDS_FILE_MANAGER_SPEC.sql +++ b/MARS_Packages/REL02/MARS-1046/92_MARS_1046_rollback_CT_MRDS_FILE_MANAGER_SPEC.sql @@ -1,6 +1,5 @@ --============================================================================================================================= --- Script: 91_MARS_1046_rollback_CT_MRDS_FILE_MANAGER_SPEC.sql --- MARS-1046: Rollback FILE_MANAGER Package Specification to Version 3.3.0 (Step 1) +-- MARS-1046: Rollback FILE_MANAGER Package Specification to Version 3.3.0 --============================================================================================================================= -- Purpose: Restore FILE_MANAGER package specification from MARS-1056 (version 3.3.0) -- Author: Grzegorz Michalski @@ -11,7 +10,7 @@ SET SERVEROUTPUT ON PROMPT ======================================================================== -PROMPT MARS-1046 ROLLBACK Step 1: Restoring FILE_MANAGER Package Spec to v3.3.0 +PROMPT MARS-1046 ROLLBACK: Restoring CT_MRDS.FILE_MANAGER Package Spec to v3.3.0 PROMPT ======================================================================== -- Execute FILE_MANAGER specification from current_version (MARS-1056 backup) diff --git a/MARS_Packages/REL02/MARS-1046/README.md b/MARS_Packages/REL02/MARS-1046/README.md index 168bcee..c3901ea 100644 --- a/MARS_Packages/REL02/MARS-1046/README.md +++ b/MARS_Packages/REL02/MARS-1046/README.md @@ -10,8 +10,8 @@ This package fixes parsing of ISO 8601 datetime formats with milliseconds and ti **Example**: - **CSV Data**: `2012-03-02T14:16:23.798+01:00` -- **Configured Format**: `YYYY-MM-DDTHH24:MI:SS.FF3TZH:TZM` (FAILS) -- **Required Format**: `YYYY-MM-DD"T"HH24:MI:SS.FF3TZH:TZM` (WORKS) +- **Configured Format**: `YYYY-MM-DDTHH24:MI:SS.FF3TZH:TZM` ❌ (fails) +- **Required Format**: `YYYY-MM-DD"T"HH24:MI:SS.FF3TZH:TZM` ✅ (works) **Root Cause**: Oracle external table FIELD_LIST requires literal characters (like 'T') to be enclosed in double quotes. The configured format in `A_COLUMN_DATE_FORMAT` table does not include these quotes, causing parsing failures. diff --git a/MARS_Packages/REL02/MARS-1046/install_mars1046.sql b/MARS_Packages/REL02/MARS-1046/install_mars1046.sql index ba0e8a2..5473a52 100644 --- a/MARS_Packages/REL02/MARS-1046/install_mars1046.sql +++ b/MARS_Packages/REL02/MARS-1046/install_mars1046.sql @@ -21,10 +21,9 @@ -- =================================================================== -- Dynamic spool file generation (using SYS_CONTEXT - no DBA privileges required) -host mkdir log 2>nul var filename VARCHAR2(100) BEGIN - :filename := 'log/INSTALL_MARS_1046_' || SYS_CONTEXT('USERENV', 'CON_NAME') || '_' || TO_CHAR(SYSDATE,'YYYYMMDD_HH24MISS') || '.log'; + :filename := 'INSTALL_MARS_1046_' || SYS_CONTEXT('USERENV', 'CON_NAME') || '_' || TO_CHAR(SYSDATE,'YYYYMMDD_HH24MISS') || '.log'; END; / column filename new_value _filename @@ -41,7 +40,7 @@ PROMPT MARS-1046: ISO 8601 Date Format Fix for FILE_MANAGER PROMPT ========================================================================= PROMPT PROMPT This script will: -PROMPT - Update FILE_MANAGER package specification (3.3.0 -> 3.3.1) +PROMPT - Update FILE_MANAGER package specification (3.3.0 → 3.3.1) PROMPT - Update FILE_MANAGER package body with NORMALIZE_DATE_FORMAT function PROMPT - Fix parsing of ISO 8601 formats: YYYY-MM-DDTHH24:MI:SS.FF3TZH:TZM PROMPT - Track package version in A_PACKAGE_VERSION_TRACKING diff --git a/MARS_Packages/REL02/MARS-1046/rollback_mars1046.sql b/MARS_Packages/REL02/MARS-1046/rollback_mars1046.sql index 8cc4295..9d88456 100644 --- a/MARS_Packages/REL02/MARS-1046/rollback_mars1046.sql +++ b/MARS_Packages/REL02/MARS-1046/rollback_mars1046.sql @@ -11,10 +11,9 @@ -- =================================================================== -- Dynamic spool file generation (using SYS_CONTEXT - no DBA privileges required) -host mkdir log 2>nul var filename VARCHAR2(100) BEGIN - :filename := 'log/ROLLBACK_MARS_1046_' || SYS_CONTEXT('USERENV', 'CON_NAME') || '_' || TO_CHAR(SYSDATE,'YYYYMMDD_HH24MISS') || '.log'; + :filename := 'ROLLBACK_MARS_1046_' || SYS_CONTEXT('USERENV', 'CON_NAME') || '_' || TO_CHAR(SYSDATE,'YYYYMMDD_HH24MISS') || '.log'; END; / column filename new_value _filename @@ -50,27 +49,27 @@ WHENEVER SQLERROR CONTINUE PROMPT PROMPT ========================================================================= -PROMPT Step 1: Restore FILE_MANAGER Package Specification (v3.3.0) +PROMPT Step 1: Restore FILE_MANAGER Package Body (v3.3.0) PROMPT ========================================================================= -@@91_MARS_1046_rollback_CT_MRDS_FILE_MANAGER_SPEC.sql +@@91_MARS_1046_rollback_CT_MRDS_FILE_MANAGER_BODY.sql PROMPT PROMPT ========================================================================= -PROMPT Step 2: Restore FILE_MANAGER Package Body (v3.3.0) +PROMPT Step 2: Restore FILE_MANAGER Package Specification (v3.3.0) PROMPT ========================================================================= -@@92_MARS_1046_rollback_CT_MRDS_FILE_MANAGER_BODY.sql +@@92_MARS_1046_rollback_CT_MRDS_FILE_MANAGER_SPEC.sql PROMPT PROMPT ========================================================================= PROMPT Step 3: Track Rollback Version PROMPT ========================================================================= -@@track_package_versions.sql +@@test/track_package_versions.sql PROMPT PROMPT ========================================================================= PROMPT Step 4: Verify Package Status PROMPT ========================================================================= -@@verify_packages_version.sql +@@test/verify_packages_version.sql PROMPT PROMPT ========================================================================= diff --git a/MARS_Packages/REL03/MARS-1057/.gitignore b/MARS_Packages/REL03/MARS-1057/.gitignore new file mode 100644 index 0000000..da39485 --- /dev/null +++ b/MARS_Packages/REL03/MARS-1057/.gitignore @@ -0,0 +1,6 @@ +# Exclude temporary folders from version control +confluence/ +log/ +test/ +mock_data/ +*.log diff --git a/MARS_Packages/REL03/MARS-1057/01_MARS_1057_install_CT_MRDS_FILE_MANAGER_SPEC.sql b/MARS_Packages/REL03/MARS-1057/01_MARS_1057_install_CT_MRDS_FILE_MANAGER_SPEC.sql new file mode 100644 index 0000000..0bdd884 --- /dev/null +++ b/MARS_Packages/REL03/MARS-1057/01_MARS_1057_install_CT_MRDS_FILE_MANAGER_SPEC.sql @@ -0,0 +1,29 @@ +--============================================================================================================================= +-- MARS-1057: Install FILE_MANAGER Package Specification v3.4.0 +--============================================================================================================================= +-- Purpose: Deploy FILE_MANAGER package specification with new batch external table creation procedures +-- Author: Grzegorz Michalski +-- Date: 2025-11-27 +-- Related: MARS-1057 Batch External Table Creation +--============================================================================================================================= + +SET SERVEROUTPUT ON + +PROMPT ======================================================================== +PROMPT Installing FILE_MANAGER Package Specification v3.4.0 +PROMPT ======================================================================== + +@@new_version/FILE_MANAGER.pkg + +-- Verify compilation status (check specific schema when installing as ADMIN) +SELECT object_name, object_type, status +FROM ALL_OBJECTS +WHERE OWNER = 'CT_MRDS' + AND object_name = 'FILE_MANAGER' + AND object_type = 'PACKAGE'; + +PROMPT SUCCESS: FILE_MANAGER package specification installed + +--============================================================================================================================= +-- End of Script +--============================================================================================================================= diff --git a/MARS_Packages/REL03/MARS-1057/02_MARS_1057_install_CT_MRDS_FILE_MANAGER_BODY.sql b/MARS_Packages/REL03/MARS-1057/02_MARS_1057_install_CT_MRDS_FILE_MANAGER_BODY.sql new file mode 100644 index 0000000..2f4e22f --- /dev/null +++ b/MARS_Packages/REL03/MARS-1057/02_MARS_1057_install_CT_MRDS_FILE_MANAGER_BODY.sql @@ -0,0 +1,36 @@ +--============================================================================================================================= +-- MARS-1057: Install FILE_MANAGER Package Body v3.4.0 +--============================================================================================================================= +-- Purpose: Deploy FILE_MANAGER package body with implementation of batch external table creation procedures +-- Author: Grzegorz Michalski +-- Date: 2025-11-27 +-- Related: MARS-1057 Batch External Table Creation +--============================================================================================================================= + +SET SERVEROUTPUT ON + +PROMPT ======================================================================== +PROMPT Installing FILE_MANAGER Package Body v3.4.0 +PROMPT ======================================================================== + +@@new_version/FILE_MANAGER.pkb + +-- Verify compilation status (check specific schema when installing as ADMIN) +SELECT object_name, object_type, status +FROM ALL_OBJECTS +WHERE OWNER = 'CT_MRDS' + AND object_name = 'FILE_MANAGER' + AND object_type = 'PACKAGE BODY'; + +-- Check for compilation errors +SELECT * +FROM ALL_ERRORS +WHERE OWNER = 'CT_MRDS' + AND NAME = 'FILE_MANAGER' + AND TYPE = 'PACKAGE BODY'; + +PROMPT SUCCESS: FILE_MANAGER package body installed + +--============================================================================================================================= +-- End of Script +--============================================================================================================================= diff --git a/MARS_Packages/REL03/MARS-1057/91_MARS_1057_rollback_CT_MRDS_FILE_MANAGER_SPEC.sql b/MARS_Packages/REL03/MARS-1057/91_MARS_1057_rollback_CT_MRDS_FILE_MANAGER_SPEC.sql new file mode 100644 index 0000000..f5f2fb7 --- /dev/null +++ b/MARS_Packages/REL03/MARS-1057/91_MARS_1057_rollback_CT_MRDS_FILE_MANAGER_SPEC.sql @@ -0,0 +1,29 @@ +--============================================================================================================================= +-- MARS-1057: Rollback FILE_MANAGER Package Specification to v3.3.0 +--============================================================================================================================= +-- Purpose: Restore FILE_MANAGER package specification to version before MARS-1057 changes +-- Author: Grzegorz Michalski +-- Date: 2025-11-27 +-- Related: MARS-1057 Batch External Table Creation (ROLLBACK) +--============================================================================================================================= + +SET SERVEROUTPUT ON + +PROMPT ======================================================================== +PROMPT Rolling back FILE_MANAGER Package Specification to v3.3.0 +PROMPT ======================================================================== + +@@current_version/FILE_MANAGER.pkg + +-- Verify compilation status (check specific schema when installing as ADMIN) +SELECT object_name, object_type, status +FROM ALL_OBJECTS +WHERE OWNER = 'CT_MRDS' + AND object_name = 'FILE_MANAGER' + AND object_type = 'PACKAGE'; + +PROMPT SUCCESS: FILE_MANAGER package specification rolled back to v3.3.0 + +--============================================================================================================================= +-- End of Script +--============================================================================================================================= diff --git a/MARS_Packages/REL03/MARS-1057/92_MARS_1057_rollback_CT_MRDS_FILE_MANAGER_BODY.sql b/MARS_Packages/REL03/MARS-1057/92_MARS_1057_rollback_CT_MRDS_FILE_MANAGER_BODY.sql new file mode 100644 index 0000000..b4291e8 --- /dev/null +++ b/MARS_Packages/REL03/MARS-1057/92_MARS_1057_rollback_CT_MRDS_FILE_MANAGER_BODY.sql @@ -0,0 +1,36 @@ +--============================================================================================================================= +-- MARS-1057: Rollback FILE_MANAGER Package Body to v3.3.0 +--============================================================================================================================= +-- Purpose: Restore FILE_MANAGER package body to version before MARS-1057 changes +-- Author: Grzegorz Michalski +-- Date: 2025-11-27 +-- Related: MARS-1057 Batch External Table Creation (ROLLBACK) +--============================================================================================================================= + +SET SERVEROUTPUT ON + +PROMPT ======================================================================== +PROMPT Rolling back FILE_MANAGER Package Body to v3.3.0 +PROMPT ======================================================================== + +@@current_version/FILE_MANAGER.pkb + +-- Verify compilation status (check specific schema when installing as ADMIN) +SELECT object_name, object_type, status +FROM ALL_OBJECTS +WHERE OWNER = 'CT_MRDS' + AND object_name = 'FILE_MANAGER' + AND object_type = 'PACKAGE BODY'; + +-- Check for compilation errors +SELECT * +FROM ALL_ERRORS +WHERE OWNER = 'CT_MRDS' + AND NAME = 'FILE_MANAGER' + AND TYPE = 'PACKAGE BODY'; + +PROMPT SUCCESS: FILE_MANAGER package body rolled back to v3.3.0 + +--============================================================================================================================= +-- End of Script +--============================================================================================================================= diff --git a/MARS_Packages/REL03/MARS-1057/README.md b/MARS_Packages/REL03/MARS-1057/README.md new file mode 100644 index 0000000..cf2f016 --- /dev/null +++ b/MARS_Packages/REL03/MARS-1057/README.md @@ -0,0 +1,292 @@ +# MARS-1057: Batch External Table Creation + +## Overview +This MARS package adds batch external table creation capabilities to the FILE_MANAGER package, enabling automatic creation of external table sets (INBOX, ODS, ARCHIVE) based on A_SOURCE_FILE_CONFIG metadata. + +**Jira Issue:** MARS-1057 +**Package Version:** FILE_MANAGER 3.4.0 +**Author:** Grzegorz Michalski +**Date:** 2025-11-27 + +## Contents +- `install_mars1057.sql` - Master installation script with SPOOL logging +- `rollback_mars1057.sql` - Master rollback script +- `01_MARS_1057_install_CT_MRDS_FILE_MANAGER_SPEC.sql` - Install package specification +- `02_MARS_1057_install_CT_MRDS_FILE_MANAGER_BODY.sql` - Install package body +- `91_MARS_1057_rollback_CT_MRDS_FILE_MANAGER_BODY.sql` - Rollback package body +- `92_MARS_1057_rollback_CT_MRDS_FILE_MANAGER_SPEC.sql` - Rollback package specification +- `track_package_versions.sql` - Universal package version tracking +- `verify_packages_version.sql` - Universal package verification +- `current_version/` - FILE_MANAGER v3.3.0 (before MARS-1057) +- `new_version/` - FILE_MANAGER v3.4.0 (after MARS-1057) +- `.gitignore` - Git exclusions for temporary files + +## Prerequisites +- Oracle Database 23ai +- FILE_MANAGER package v3.3.0 installed +- ENV_MANAGER package v3.1.0+ with version tracking +- ADMIN user access for deployment +- ODS.FILE_MANAGER_ODS package available + +## New Features + +### 1. CREATE_EXTERNAL_TABLES_SET +Creates a complete set of 3 external tables (INBOX, ODS, ARCHIVE) for a single configuration from A_SOURCE_FILE_CONFIG. + +**Signature:** +```sql +PROCEDURE CREATE_EXTERNAL_TABLES_SET ( + pSourceFileConfigKey IN NUMBER, + pRecreate IN BOOLEAN DEFAULT FALSE +); +``` + +**Example:** +```sql +BEGIN + FILE_MANAGER.CREATE_EXTERNAL_TABLES_SET( + pSourceFileConfigKey => 123, + pRecreate => FALSE + ); +END; +/ +``` + +**Features:** +- Automatic table naming: `{TABLE_ID}_{INBOX|ODS|ARCHIVE}` +- Official path patterns compliance +- Optional drop and recreate +- Full ENV_MANAGER logging +- Error handling with detailed messages + +### 2. CREATE_EXTERNAL_TABLES_BATCH +Creates external table sets for multiple configurations based on filter criteria. + +**Signature:** +```sql +PROCEDURE CREATE_EXTERNAL_TABLES_BATCH ( + pSourceKey IN VARCHAR2 DEFAULT NULL, + pSourceFileId IN VARCHAR2 DEFAULT NULL, + pTableId IN VARCHAR2 DEFAULT NULL, + pRecreate IN BOOLEAN DEFAULT FALSE +); +``` + +**Examples:** +```sql +-- All external tables for C2D source +BEGIN + FILE_MANAGER.CREATE_EXTERNAL_TABLES_BATCH( + pSourceKey => 'C2D' + ); +END; +/ + +-- Recreate all external tables +BEGIN + FILE_MANAGER.CREATE_EXTERNAL_TABLES_BATCH( + pRecreate => TRUE + ); +END; +/ + +-- Specific table across all sources +BEGIN + FILE_MANAGER.CREATE_EXTERNAL_TABLES_BATCH( + pTableId => 'A_UC_DISSEM_METADATA_LOADS' + ); +END; +/ +``` + +**Features:** +- Filters only INPUT type files +- Continues processing on errors +- Returns summary (Total/Processed/Failed) +- Comprehensive logging + +## Installation + +### Option 1: Master Script (Recommended) +```powershell +# IMPORTANT: Execute as ADMIN user +Get-Content "MARS_Packages/REL01/MARS-1057/install_mars1057.sql" | sql "ADMIN/password@service" + +# Log file created: log/INSTALL_MARS_1057__.log +``` + +**Installation Steps:** +1. Install FILE_MANAGER package specification v3.4.0 +2. Install FILE_MANAGER package body v3.4.0 +3. Track version in A_PACKAGE_VERSION_TRACKING +4. Verify all tracked packages for untracked changes + +### Option 2: Individual Scripts +```powershell +# IMPORTANT: Execute as ADMIN user +Get-Content "01_MARS_1057_install_CT_MRDS_FILE_MANAGER_SPEC.sql" | sql "ADMIN/password@service" +Get-Content "02_MARS_1057_install_CT_MRDS_FILE_MANAGER_BODY.sql" | sql "ADMIN/password@service" +Get-Content "track_package_versions.sql" | sql "ADMIN/password@service" +Get-Content "verify_packages_version.sql" | sql "ADMIN/password@service" +``` + +## Verification +```sql +-- Check package version +SELECT CT_MRDS.FILE_MANAGER.GET_VERSION() FROM DUAL; +-- Expected: 3.4.0 + +-- Check for errors (ADMIN user checks specific schema) +SELECT * FROM ALL_ERRORS +WHERE OWNER = 'CT_MRDS' AND NAME = 'FILE_MANAGER'; +-- Expected: No rows + +-- Verify new procedures exist +SELECT procedure_name +FROM ALL_PROCEDURES +WHERE OWNER = 'CT_MRDS' + AND object_name = 'FILE_MANAGER' + AND procedure_name IN ('CREATE_EXTERNAL_TABLES_SET', 'CREATE_EXTERNAL_TABLES_BATCH'); +-- Expected: 2 rows + +-- Check for untracked changes +SELECT CT_MRDS.ENV_MANAGER.CHECK_PACKAGE_CHANGES('CT_MRDS', 'FILE_MANAGER') FROM DUAL; +-- Expected: OK: Package CT_MRDS.FILE_MANAGER has not changed. +``` + +## Rollback +```powershell +# IMPORTANT: Execute as ADMIN user +Get-Content "MARS_Packages/REL01/MARS-1057/rollback_mars1057.sql" | sql "ADMIN/password@service" +``` + +**Rollback restores:** +- FILE_MANAGER package specification v3.3.0 +- FILE_MANAGER package body v3.3.0 +- Removes CREATE_EXTERNAL_TABLES_SET procedure +- Removes CREATE_EXTERNAL_TABLES_BATCH procedure + +## Expected Changes +- **FILE_MANAGER package**: v3.3.0 → v3.4.0 +- **New procedures**: CREATE_EXTERNAL_TABLES_SET, CREATE_EXTERNAL_TABLES_BATCH +- **SPEC size**: +3.3 KB (declaration and documentation) +- **BODY size**: +8.6 KB (implementation with logging) + +## Testing + +### Test 1: Create Single Set +```sql +BEGIN + FILE_MANAGER.CREATE_EXTERNAL_TABLES_SET( + pSourceFileConfigKey => 123 + ); +END; +/ + +-- Verify tables created +SELECT table_name +FROM ALL_TABLES +WHERE OWNER = 'ODS' + AND table_name LIKE '%_INBOX' + OR table_name LIKE '%_ODS' + OR table_name LIKE '%_ARCHIVE'; +``` + +### Test 2: Batch Creation +```sql +BEGIN + FILE_MANAGER.CREATE_EXTERNAL_TABLES_BATCH( + pSourceKey => 'C2D' + ); +END; +/ + +-- Check process log for results +SELECT * +FROM CT_MRDS.A_PROCESS_LOG +WHERE LOG_TIMESTAMP > SYSDATE - INTERVAL '1' HOUR + AND PROCEDURE_NAME LIKE '%CREATE_EXTERNAL_TABLES%' +ORDER BY LOG_TIMESTAMP DESC; +``` + +### Test 3: Recreate Existing Tables +```sql +BEGIN + FILE_MANAGER.CREATE_EXTERNAL_TABLES_SET( + pSourceFileConfigKey => 123, + pRecreate => TRUE + ); +END; +/ +``` + +## Usage Examples + +### Example 1: Setup All External Tables for New Source +```sql +-- 1. Add source configuration +CALL FILE_MANAGER.ADD_SOURCE('LM', 'Liquidity Management'); + +-- 2. Add file configurations +CALL FILE_MANAGER.ADD_SOURCE_FILE_CONFIG( + pSourceKey => 'LM', + pSourceFileType => 'INPUT', + pSourceFileId => 'STANDING_FACILITIES', + pSourceFileDesc => 'Standing Facilities Data', + pSourceFileNamePattern => 'SF_*.csv', + pTableId => 'STANDING_FACILITIES', + pTemplateTableName => 'CT_ET_TEMPLATES.LM_STANDING_FACILITIES', + pEncoding => 'UTF8' +); + +-- 3. Create all external tables for this source +BEGIN + FILE_MANAGER.CREATE_EXTERNAL_TABLES_BATCH( + pSourceKey => 'LM' + ); +END; +/ +``` + +### Example 2: Recreate All External Tables +```sql +-- Useful after bucket URI changes or template table modifications +BEGIN + FILE_MANAGER.CREATE_EXTERNAL_TABLES_BATCH( + pRecreate => TRUE + ); +END; +/ +``` + +### Example 3: Create Tables for Specific File Type +```sql +BEGIN + FILE_MANAGER.CREATE_EXTERNAL_TABLES_BATCH( + pSourceFileId => 'UC_DISSEM' + ); +END; +/ +``` + +## Known Issues +None + +## Dependencies +- **CT_MRDS.ENV_MANAGER** - Logging and error handling +- **ODS.FILE_MANAGER_ODS** - AUTHID DEFINER wrapper for external table creation +- **CT_MRDS.A_SOURCE_FILE_CONFIG** - Source file configuration metadata +- **CT_ET_TEMPLATES schema** - Template table definitions + +## Related +- **MARS-1056** - VARCHAR2 CHAR/BYTE semantics fix +- **MARS-1049** - CSV encoding support +- **Package Deployment Guide** - Standard deployment procedures +- **Tables Setup Guide** - External table configuration guide + +## Notes +- All installations must be executed as ADMIN user +- Use `ALL_*` views instead of `USER_*` views for verification +- Master scripts include SPOOL logging for audit trail +- ACCEPT validation prevents accidental execution +- Follows official path patterns: INBOX (3-level), ODS (2-level), ARCHIVE (2-level) diff --git a/MARS_Packages/REL03/MARS-1057/current_version/FILE_MANAGER.pkb b/MARS_Packages/REL03/MARS-1057/current_version/FILE_MANAGER.pkb new file mode 100644 index 0000000..b2f338b --- /dev/null +++ b/MARS_Packages/REL03/MARS-1057/current_version/FILE_MANAGER.pkb @@ -0,0 +1,2008 @@ +create or replace PACKAGE BODY CT_MRDS.FILE_MANAGER +AS + + ---------------------------------------------------------------------------------------------------- + -- PRIVATE FUNCTION: NORMALIZE_DATE_FORMAT + ---------------------------------------------------------------------------------------------------- + /** + * Purpose: Normalize Oracle date format strings for use in external tables + * + * Problem: ISO 8601 formats like 'YYYY-MM-DDTHH24:MI:SS.FF3TZH:TZM' fail because + * literal character 'T' must be enclosed in double quotes for Oracle + * external table DATE column definitions. + * + * Solution: Detect unquoted 'T' separator and wrap it in double quotes + * + * Parameters: + * pDateFormat - Original date format from A_COLUMN_DATE_FORMAT table + * + * Returns: Normalized format with quoted 'T' if applicable + * + * Examples: + * Input: 'YYYY-MM-DDTHH24:MI:SS.FF3TZH:TZM' + * Output: 'YYYY-MM-DD"T"HH24:MI:SS.FF3TZH:TZM' + * + * Input: 'DD/MM/YYYY HH24:MI:SS' (no T) + * Output: 'DD/MM/YYYY HH24:MI:SS' (unchanged) + * + * Input: 'YYYY-MM-DD"T"HH24:MI:SS' (already quoted) + * Output: 'YYYY-MM-DD"T"HH24:MI:SS' (unchanged) + * + * Author: Grzegorz Michalski + * Date: 2025-11-27 + * Version: 1.0.0 (MARS-1046) + */ + FUNCTION NORMALIZE_DATE_FORMAT(pDateFormat VARCHAR2) RETURN VARCHAR2 IS + vNormalizedFormat VARCHAR2(500); + BEGIN + -- Return NULL if input is NULL + IF pDateFormat IS NULL THEN + RETURN NULL; + END IF; + + vNormalizedFormat := pDateFormat; + + -- Check if 'T' separator exists and is NOT already quoted + -- Pattern: [YMD]T[HM] (date component + T + time component) + IF INSTR(vNormalizedFormat, '"T"') = 0 AND + REGEXP_LIKE(vNormalizedFormat, '[YMD]T[HM]') THEN + + -- Wrap 'T' in double quotes using regex replace + -- Pattern matches: (date format char) + T + (time format char) + -- Replacement: \1 + "T" + \2 + vNormalizedFormat := REGEXP_REPLACE(vNormalizedFormat, '([YMD])T([HM])', '\1"T"\2'); + END IF; + + RETURN vNormalizedFormat; + + EXCEPTION + WHEN OTHERS THEN + -- If normalization fails, return original format (safety fallback) + RETURN pDateFormat; + END NORMALIZE_DATE_FORMAT; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_SOURCE_FILE_CONFIG(pFileUri IN VARCHAR2 DEFAULT NULL + , pSourceFileReceivedKey IN NUMBER DEFAULT NULL + , pSourceFileConfigKey IN NUMBER DEFAULT NULL) + RETURN CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE + IS + vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pFileUri => '''||nvl(pFileUri,'NULL')||'''' + ,'pSourceFileReceivedKey => '||nvl(to_char(pSourceFileReceivedKey),'NULL') + ,'pSourceFileConfigKey => '||nvl(to_char(pSourceFileConfigKey),'NULL'))); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + + BEGIN + IF pFileUri IS NOT NULL THEN + SELECT * + INTO vSourceFileConfig + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE REGEXP_LIKE(pFileUri, A_SOURCE_KEY||'/'||SOURCE_FILE_ID||'/'||TABLE_ID||'/'||SOURCE_FILE_NAME_PATTERN); + ELSIF pSourceFileReceivedKey IS NOT NULL THEN + SELECT T.* + INTO vSourceFileConfig + FROM CT_MRDS.A_SOURCE_FILE_CONFIG T, CT_MRDS.A_SOURCE_FILE_RECEIVED R + WHERE T.A_SOURCE_FILE_CONFIG_KEY = R.A_SOURCE_FILE_CONFIG_KEY + AND R.A_SOURCE_FILE_RECEIVED_KEY = pSourceFileReceivedKey; + ELSIF pSourceFileConfigKey IS NOT NULL THEN + SELECT * + INTO vSourceFileConfig + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE A_SOURCE_FILE_CONFIG_KEY = pSourceFileConfigKey; + ELSE + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EMPTY_FILEURI_AND_RECKEY, ENV_MANAGER.MSG_EMPTY_FILEURI_AND_RECKEY); + END IF; + -- Set global package variable vgSourceFileConfigKey - used in error messages + vgSourceFileConfigKey := vSourceFileConfig.A_SOURCE_FILE_CONFIG_KEY; + EXCEPTION + WHEN ENV_MANAGER.ERR_EMPTY_FILEURI_AND_RECKEY THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_EMPTY_FILEURI_AND_RECKEY, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EMPTY_FILEURI_AND_RECKEY, ENV_MANAGER.MSG_EMPTY_FILEURI_AND_RECKEY); + + WHEN NO_DATA_FOUND THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_NO_CONFIG_MATCH_FOR_FILEURI, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NO_CONFIG_MATCH_FOR_FILEURI, ENV_MANAGER.MSG_NO_CONFIG_MATCH_FOR_FILEURI); + + WHEN TOO_MANY_ROWS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_MULTIPLE_MATCH_FOR_SRCFILE, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MULTIPLE_MATCH_FOR_SRCFILE, ENV_MANAGER.MSG_MULTIPLE_MATCH_FOR_SRCFILE); + + WHEN OTHERS THEN + -- Log complete error details including full stack trace and backtrace + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_UNKNOWN, vParameters, 'FILE_MANAGER'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + END; + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); + RETURN vSourceFileConfig; + + END GET_SOURCE_FILE_CONFIG; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_SOURCE_FILE_RECEIVED_INFO(pSourceFileReceivedKey IN NUMBER DEFAULT NULL) + -- + -- Get source file received info + -- + RETURN tSourceFileReceived + IS + vSourceFileReceived tSourceFileReceived; + vBucket VARCHAR2(400); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedKey => '||nvl(to_char(pSourceFileReceivedKey),'NULL'))); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + BEGIN + SELECT R.A_SOURCE_FILE_RECEIVED_KEY, R.A_SOURCE_FILE_CONFIG_KEY, + 'INBOX'||'/'||T.A_SOURCE_KEY||'/'||T.SOURCE_FILE_ID||'/'||T.TABLE_ID||'/' as SOURCE_FILE_PREFIX_INBOX, + 'ODS'||'/'||T.A_SOURCE_KEY||'/'||T.TABLE_ID||'/' as SOURCE_FILE_PREFIX_ODS, + 'QUARANTINE'||'/'||T.A_SOURCE_KEY||'/'||T.TABLE_ID||'/' as SOURCE_FILE_PREFIX_QUARANTINE, + 'ARCHIVE'||'/'||T.A_SOURCE_KEY||'/'||T.SOURCE_FILE_ID||'/' as SOURCE_FILE_PREFIX_ARCHIVE, + R.SOURCE_FILE_NAME, + R.RECEPTION_DATE, R.PROCESSING_STATUS, R.EXTERNAL_TABLE_NAME + INTO vSourceFileReceived + FROM CT_MRDS.A_SOURCE_FILE_RECEIVED R, CT_MRDS.A_SOURCE_FILE_CONFIG T + WHERE R.A_SOURCE_FILE_CONFIG_KEY = T.A_SOURCE_FILE_CONFIG_KEY + AND A_SOURCE_FILE_RECEIVED_KEY = pSourceFileReceivedKey; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_NO_CONFIG_FOR_RECEIVED_FILE, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NO_CONFIG_FOR_RECEIVED_FILE, ENV_MANAGER.MSG_NO_CONFIG_FOR_RECEIVED_FILE); + WHEN TOO_MANY_ROWS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_MULTI_CONFIG_FOR_RECEIVED_FILE, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MULTI_CONFIG_FOR_RECEIVED_FILE, ENV_MANAGER.MSG_MULTI_CONFIG_FOR_RECEIVED_FILE); + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + END; + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); + + RETURN vSourceFileReceived; + + END GET_SOURCE_FILE_RECEIVED_INFO; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION REGISTER_SOURCE_FILE_RECEIVED(pSourceFileReceivedName IN VARCHAR2) + RETURN PLS_INTEGER + -- + -- Register a newly received source file A_SOURCE_FILE_RECEIVED + -- This overload automatically determines source file type from the file name + -- + IS + vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; + vSourceFileReceivedKey PLS_INTEGER; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedName => '''||nvl(pSourceFileReceivedName, 'NULL')||'''')); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO',vParameters); + + vSourceFileConfig := GET_SOURCE_FILE_CONFIG(pSourceFileReceivedName); + vSourceFileReceivedKey := REGISTER_SOURCE_FILE_RECEIVED(pSourceFileReceivedName, vSourceFileConfig); + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + RETURN vSourceFileReceivedKey; + EXCEPTION + + WHEN ENV_MANAGER.ERR_EMPTY_FILEURI_AND_RECKEY THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_EMPTY_FILEURI_AND_RECKEY, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EMPTY_FILEURI_AND_RECKEY, ENV_MANAGER.MSG_EMPTY_FILEURI_AND_RECKEY); + + WHEN ENV_MANAGER.ERR_NO_CONFIG_MATCH_FOR_FILEURI THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_NO_CONFIG_MATCH_FOR_FILEURI, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NO_CONFIG_MATCH_FOR_FILEURI, ENV_MANAGER.MSG_NO_CONFIG_MATCH_FOR_FILEURI); + + WHEN ENV_MANAGER.ERR_FILE_NOT_EXISTS_ON_CLOUD THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_FILE_NOT_EXISTS_ON_CLOUD, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_NOT_EXISTS_ON_CLOUD, ENV_MANAGER.MSG_FILE_NOT_EXISTS_ON_CLOUD); + + WHEN ENV_MANAGER.ERR_FILE_ALREADY_REGISTERED THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_FILE_ALREADY_REGISTERED, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_ALREADY_REGISTERED, ENV_MANAGER.MSG_FILE_ALREADY_REGISTERED); + + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_UNKNOWN, vParameters, 'FILE_MANAGER'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + END REGISTER_SOURCE_FILE_RECEIVED; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION REGISTER_SOURCE_FILE_RECEIVED( + pSourceFileReceivedName IN VARCHAR2 + ,pSourceFileConfig IN CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE) + RETURN PLS_INTEGER + -- + -- Register a newly received source file A_SOURCE_FILE_RECEIVED + -- + IS + vExternalTableName VARCHAR2(200); + vDirName VARCHAR2(1000); + vFileName VARCHAR2(1000); + vChecksum A_SOURCE_FILE_RECEIVED.CHECKSUM%TYPE; + vCreated A_SOURCE_FILE_RECEIVED.CREATED%TYPE; + vBytes A_SOURCE_FILE_RECEIVED.BYTES%TYPE; + vSourceFileReceivedKey PLS_INTEGER; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vRow CT_MRDS.A_SOURCE_FILE_RECEIVED%ROWTYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedName => '''||nvl(pSourceFileReceivedName, 'NULL')||'''' + ,'pSourceFileConfig => '||'tSourceFileConfig record type')); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + vDirName := REGEXP_SUBSTR(pSourceFileReceivedName, '(.*/)(.*)', 1, 1, NULL, 1); + -- Remove prefix from file name + vFileName := REGEXP_SUBSTR(pSourceFileReceivedName,'[^/]*$'); + + ENV_MANAGER.LOG_PROCESS_EVENT('gvCredentialName','DEBUG',ENV_MANAGER.gvCredentialName); + ENV_MANAGER.LOG_PROCESS_EVENT('gvInboxBucketUri','DEBUG',ENV_MANAGER.gvInboxBucketUri); + ENV_MANAGER.LOG_PROCESS_EVENT('vDirName','DEBUG',vDirName); + + SELECT + checksum, created, bytes + INTO + vChecksum, vCreated, vBytes + FROM DBMS_CLOUD.LIST_OBJECTS(ENV_MANAGER.gvCredentialName, + ENV_MANAGER.gvInboxBucketUri || vDirName + ) + WHERE object_name = vFileName + ; + vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; + vExternalTableName := REPLACE( + REGEXP_SUBSTR(pSourceFileConfig.TEMPLATE_TABLE_NAME||'_'||vSourceFileReceivedKey, + '\..*'), + '.',''); + + INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED + (A_SOURCE_FILE_RECEIVED_KEY, A_SOURCE_FILE_CONFIG_KEY, + SOURCE_FILE_NAME, RECEPTION_DATE, + PROCESSING_STATUS, EXTERNAL_TABLE_NAME, + CHECKSUM, CREATED, BYTES) + VALUES (vSourceFileReceivedKey, pSourceFileConfig.A_SOURCE_FILE_CONFIG_KEY, + vFileName, SYSDATE, + 'RECEIVED', vExternalTableName, + vChecksum, vCreated, vBytes); + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); + RETURN vSourceFileReceivedKey; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + vgMsgTmp := ENV_MANAGER.MSG_FILE_NOT_EXISTS_ON_CLOUD + ||cgBL||' '||'File: '||ENV_MANAGER.gvInboxBucketUri || vDirName || vFileName; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_NOT_EXISTS_ON_CLOUD, vgMsgTmp); + + WHEN DUP_VAL_ON_INDEX THEN + select * into vRow + from CT_MRDS.A_SOURCE_FILE_RECEIVED + where CHECKSUM = vChecksum + and CREATED = vCreated + and BYTES = vBytes + ; + vgMsgTmp := ENV_MANAGER.MSG_FILE_ALREADY_REGISTERED + ||cgBL||' '||'Details about existing File: ' + ||cgBL||' '||'-------------------------' + ||cgBL||' '||'A_SOURCE_FILE_RECEIVED_KEY = '||vRow.A_SOURCE_FILE_RECEIVED_KEY + ||cgBL||' '||'A_SOURCE_FILE_CONFIG_KEY = '||vRow.A_SOURCE_FILE_CONFIG_KEY + ||cgBL||' '||'SOURCE_FILE_NAME = '||vRow.SOURCE_FILE_NAME + ||cgBL||' '||'CHECKSUM = '||vRow.CHECKSUM + ||cgBL||' '||'CREATED = '||vRow.CREATED + ||cgBL||' '||'BYTES = '||vRow.BYTES + ||cgBL||' '||'RECEPTION_DATE = '||vRow.RECEPTION_DATE + ||cgBL||' '||'PROCESSING_STATUS = '||vRow.PROCESSING_STATUS + ||cgBL||' '||'EXTERNAL_TABLE_NAME = '||vRow.EXTERNAL_TABLE_NAME + ||cgBL||' '||'-------------------------' + ||cgBL||' '||'There cannot be two files with the same values for (CHECKSUM, CREATED, BYTES)' + ; + + +-- vChecksum, vCreated, vBytes + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_ALREADY_REGISTERED, vgMsgTmp); + + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END REGISTER_SOURCE_FILE_RECEIVED; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE SET_SOURCE_FILE_RECEIVED_STATUS(pSourceFileReceivedKey IN PLS_INTEGER, pStatus IN VARCHAR2) + -- + -- Change status of file in the A_SOURCE_FILE_RECEIVED table + -- + IS + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedKey => '||nvl(to_char(pSourceFileReceivedKey),'NULL') + ,'pStatus => '''||nvl(pStatus, 'NULL')||'''')); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + UPDATE CT_MRDS.A_SOURCE_FILE_RECEIVED + SET PROCESSING_STATUS=pStatus + WHERE A_SOURCE_FILE_RECEIVED_KEY=pSourceFileReceivedKey; + COMMIT; + ENV_MANAGER.LOG_PROCESS_EVENT('File status changed to '||pStatus,'INFO', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); + + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + + END SET_SOURCE_FILE_RECEIVED_STATUS; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_EXTERNAL_TABLE_COLUMNS(pTargetTableTemplate IN VARCHAR2) + RETURN CLOB + -- + -- Create list of columns for DBMS_CLOUD.CREATE_EXTERNAL_TABLE from existing template table + -- + IS + vColumnList CLOB; + vTableName VARCHAR2(200); + vSchemaName VARCHAR2(200); + BEGIN + vSchemaName := REPLACE(REGEXP_SUBSTR(pTargetTableTemplate,'.*\.'),'.',''); + vTableName := REPLACE(REGEXP_SUBSTR(pTargetTableTemplate,'\..*'),'.',''); + DBMS_METADATA.SET_TRANSFORM_PARAM(-1, 'SQLTERMINATOR', True); + DBMS_METADATA.SET_TRANSFORM_PARAM(-1, 'COLLATION_CLAUSE', 'NEVER'); + DBMS_METADATA.SET_TRANSFORM_PARAM(-1, 'REF_CONSTRAINTS', False); + DBMS_METADATA.SET_TRANSFORM_PARAM(-1, 'STORAGE', False); + DBMS_METADATA.SET_TRANSFORM_PARAM(-1, 'TABLESPACE', False); + DBMS_METADATA.SET_TRANSFORM_PARAM(-1, 'SEGMENT_ATTRIBUTES', False); + vColumnList := RTRIM( + LTRIM( + REGEXP_SUBSTR(DBMS_METADATA.GET_DDL('TABLE', vTableName, vSchemaName),'\(.*\)',1,1,'mn'), + '('), + ')'); + RETURN vColumnList; + END GET_EXTERNAL_TABLE_COLUMNS; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE CREATE_EXTERNAL_TABLE ( + pTableName IN VARCHAR2, + pTemplateTableName IN VARCHAR2, + pPrefix IN VARCHAR2, + pBucketUri IN VARCHAR2 DEFAULT ENV_MANAGER.gvInboxBucketUri, + pFileName IN VARCHAR2 DEFAULT NULL, + pDelimiter IN VARCHAR2 DEFAULT ',', + pEncoding IN VARCHAR2 DEFAULT NULL -- MARS-1049: NEW PARAMETER FOR FILE ENCODING + ) + -- + -- Create external table for a single source file to validate the file structure + -- + IS + vTableName VARCHAR2(200); + vColumnList CLOB; + vFieldList CLOB; + vFormat VARCHAR2(200); + + vPrefix VARCHAR2(200); + vFileName VARCHAR2(1000); + vFileExtension VARCHAR2(200); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pTableName => '''||nvl(pTableName, 'NULL')||'''' + ,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pPrefix => '''||nvl(pPrefix, 'NULL')||'''' + ,'pBucketUri => '''||nvl(pBucketUri, 'NULL')||'''' + ,'pFileName => '''||nvl(pFileName, 'NULL')||'''' + ,'pDelimiter => '''||nvl(pDelimiter, 'NULL')||'''' + ,'pEncoding => '''||nvl(pEncoding, 'NULL')||'''' -- MARS-1049: NOWY + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + -- Strip off leading and trailing slashes from prefix + vPrefix := TRIM(BOTH '/' FROM pPrefix); + + -- Generate column and field list from template table + GENERATE_EXTERNAL_TABLE_PARAMS (pTemplateTableName, vColumnList, vFieldList); + + --vFormat evaluation based on pBucketUri first, then pPrefix + -- Archive bucket should use parquet + implicit partitioning regardless of prefix + IF INSTR(pBucketUri, ENV_MANAGER.gvArchiveBucketName)>0 THEN + vFormat := '{"type": "parquet" + ,"implicit_partition_type": "hive" + ,"implicit_partition_columns":["PARTITION_YEAR","PARTITION_MONTH"]}'; + vColumnList := vColumnList||cgBL||' , "PARTITION_YEAR" varchar2(4)'||cgBL||', "PARTITION_MONTH" varchar2(2)'; + vFieldList := NULL; + vFileExtension := '.parquet'; + + -- For INBOX, ODS, and other ARCHIVE prefixes (not in archive bucket) use CSV + ELSIF SUBSTR(pPrefix,1,5) = 'INBOX' OR SUBSTR(pPrefix,1,3) = 'ODS' + OR SUBSTR(pPrefix,1,7) = 'ARCHIVE' + THEN + -- MARS-1049: Create format with encoding if specified + IF pDelimiter = '|' THEN + IF pEncoding IS NOT NULL AND LENGTH(TRIM(pEncoding)) > 0 THEN + vFormat := json_object( + 'delimiter' VALUE '|', + 'skipheaders' VALUE '1', + 'characterset' VALUE pEncoding + ); + ELSE + vFormat := json_object('delimiter' VALUE '|', 'skipheaders' VALUE '1'); + END IF; + ELSE + IF pEncoding IS NOT NULL AND LENGTH(TRIM(pEncoding)) > 0 THEN + vFormat := json_object( + 'type' VALUE 'CSV', + 'skipheaders' VALUE '1', + 'ignoremissingcolumns' VALUE 'true', + 'characterset' VALUE pEncoding + ); + ELSE + vFormat := json_object('type' VALUE 'CSV', 'skipheaders' VALUE '1', 'ignoremissingcolumns' value 'true'); + END IF; + END IF; + + vFileExtension := '.csv'; + + ELSE + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN_PREFIX, ENV_MANAGER.MSG_UNKNOWN_PREFIX); + END IF; + + -- No filename give: Match all csv files + IF pFileName IS NOT NULL THEN + vFileName := pFileName; + ELSE + vFileName := pBucketUri||vPrefix||'/*'||vFileExtension; + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('pTableName', 'DEBUG', pTableName); + ENV_MANAGER.LOG_PROCESS_EVENT('ENV_MANAGER.vpCredentialName', 'DEBUG', ENV_MANAGER.gvCredentialName); + ENV_MANAGER.LOG_PROCESS_EVENT('vFileName', 'DEBUG', vFileName); + ENV_MANAGER.LOG_PROCESS_EVENT('vColumnList', 'DEBUG', vColumnList); + ENV_MANAGER.LOG_PROCESS_EVENT('vFieldList', 'DEBUG', vFieldList); + ENV_MANAGER.LOG_PROCESS_EVENT('vFormat', 'DEBUG', vFormat); + + -- Pre-validation: Check CSV column count for CSV files only + IF SUBSTR(pPrefix,1,5) = 'INBOX' AND pFileName IS NOT NULL THEN + DECLARE + vCsvFirstLine VARCHAR2(4000); + vCsvColCount NUMBER := 0; + vTemplateColCount NUMBER := 0; + vExcessColumns VARCHAR2(2000); + + -- Get template column count + CURSOR c_template_count IS + SELECT COUNT(*) as col_count + FROM ALL_TAB_COLUMNS + WHERE OWNER = UPPER(REPLACE(REGEXP_SUBSTR(pTemplateTableName,'.*\.'),'.','')) + AND TABLE_NAME = UPPER(REGEXP_REPLACE(pTemplateTableName,'^.*\.','')); + + BEGIN + -- Get template column count + FOR rec IN c_template_count LOOP + vTemplateColCount := rec.col_count; + END LOOP; + + -- Read first line of CSV to count columns + BEGIN + SELECT UTL_RAW.CAST_TO_VARCHAR2( + DBMS_LOB.SUBSTR( + DBMS_CLOUD.GET_OBJECT( + credential_name => ENV_MANAGER.gvCredentialName, + object_uri => pFileName + ), + 4000, 1 + ) + ) INTO vCsvFirstLine FROM DUAL; + + -- Count commas in header line + 1 for total columns + vCsvColCount := REGEXP_COUNT(REGEXP_SUBSTR(vCsvFirstLine, '[^'||chr(10)||']*'), ',') + 1; + + ENV_MANAGER.LOG_PROCESS_EVENT('CSV Column Count: ' || vCsvColCount || ', Template Column Count: ' || vTemplateColCount, 'INFO', vParameters); + + -- Check for excess columns + IF vCsvColCount > vTemplateColCount THEN + vgMsgTmp := ENV_MANAGER.MSG_EXCESS_COLUMNS_DETECTED + ||cgBL||'EXCESS COLUMNS DETECTED!' + ||cgBL||'CSV file has ' || vCsvColCount || ' columns but template expects only ' || vTemplateColCount + ||cgBL||'Excess columns: ' || (vCsvColCount - vTemplateColCount) + ||cgBL||'CSV header: ' || SUBSTR(REGEXP_SUBSTR(vCsvFirstLine, '[^'||chr(10)||']*'), 1, 200) + ||cgBL||'POSSIBLE SOLUTIONS:' + ||cgBL||' 1. Remove excess columns from CSV file before processing' + ||cgBL||' 2. Add excess columns to template table: ' || pTemplateTableName; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EXCESS_COLUMNS_DETECTED, vgMsgTmp); + END IF; + + EXCEPTION + WHEN ENV_MANAGER.ERR_EXCESS_COLUMNS_DETECTED THEN + RAISE; -- Re-raise the excess columns error + WHEN ENV_MANAGER.ERR_FILE_VALIDATION_FAILED THEN + RAISE; -- Re-raise the validation error + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Warning: Could not perform pre-validation column count check: ' || SQLERRM, 'WARN', vParameters); + -- Continue with normal processing if pre-validation fails + END; + END; + END IF; + + DBMS_CLOUD.CREATE_EXTERNAL_TABLE( + TABLE_NAME => pTableName, + CREDENTIAL_NAME => ENV_MANAGER.gvCredentialName, + FILE_URI_LIST => vFileName, + COLUMN_LIST => vColumnList, + FIELD_LIST => vFieldList, + FORMAT => vFormat + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); + + EXCEPTION + WHEN ENV_MANAGER.ERR_EXCESS_COLUMNS_DETECTED THEN + RAISE; -- Re-raise the excess columns error with specific code -20011 + WHEN ENV_MANAGER.ERR_UNKNOWN_PREFIX THEN + vgMsgTmp := ENV_MANAGER.MSG_UNKNOWN_PREFIX || ': ' || pPrefix; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN_PREFIX, vgMsgTmp); + WHEN ENV_MANAGER.ERR_MISSING_COLUMN_DATE_FORMAT THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_MISSING_COLUMN_DATE_FORMAT, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MISSING_COLUMN_DATE_FORMAT, ENV_MANAGER.MSG_MISSING_COLUMN_DATE_FORMAT); + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END CREATE_EXTERNAL_TABLE; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE CREATE_EXTERNAL_TABLE(pSourceFileReceivedKey IN NUMBER) + -- + -- Create external table for a single source file to validate the file structure + -- + IS + vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; + vSourceFileReceived tSourceFileReceived; + vTableName VARCHAR2(200); + vFileName VARCHAR2(1000); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedKey => '||nvl(to_char(pSourceFileReceivedKey), 'NULL'))); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + vSourceFileConfig := GET_SOURCE_FILE_CONFIG(pSourceFileReceivedKey => pSourceFileReceivedKey); + vSourceFileReceived := GET_SOURCE_FILE_RECEIVED_INFO(pSourceFileReceivedKey); + vTableName := vSourceFileConfig.TEMPLATE_TABLE_NAME; + + vFileName := ENV_MANAGER.gvInboxBucketUri ||vSourceFileReceived.SOURCE_FILE_PREFIX_INBOX||vSourceFileReceived.SOURCE_FILE_NAME; + + CREATE_EXTERNAL_TABLE( + pTableName => vSourceFileReceived.EXTERNAL_TABLE_NAME, + pTemplateTableName => vSourceFileConfig.TEMPLATE_TABLE_NAME, + pPrefix => vSourceFileReceived.SOURCE_FILE_PREFIX_INBOX, + pBucketUri => ENV_MANAGER.gvInboxBucketUri, + pFileName => vFileName, + pDelimiter => ',', + pEncoding => vSourceFileConfig.ENCODING -- MARS-1049: NOWY PARAMETR + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + + END CREATE_EXTERNAL_TABLE; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE VALIDATE_SOURCE_FILE_RECEIVED(pSourceFileReceivedKey IN NUMBER) + -- + -- Check the structure of the received file using DBMS_CLOUD.VALIDATE_EXTERNAL_TABLE + -- + IS + vSourceFileReceived tSourceFileReceived; + vOperationId NUMBER := -1; + vBadfileTable USER_LOAD_OPERATIONS.BADFILE_TABLE%TYPE; + vStatus USER_LOAD_OPERATIONS.STATUS%TYPE; + vErrors NUMBER := 0; + vNumRows NUMBER := 0; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedKey => '||nvl(to_char(pSourceFileReceivedKey), 'NULL'))); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + vSourceFileReceived := GET_SOURCE_FILE_RECEIVED_INFO(pSourceFileReceivedKey); + ENV_MANAGER.LOG_PROCESS_EVENT('vSourceFileReceived.EXTERNAL_TABLE_NAME: '||vSourceFileReceived.EXTERNAL_TABLE_NAME,'DEBUG', vParameters); + BEGIN + DBMS_CLOUD.VALIDATE_EXTERNAL_TABLE(vSourceFileReceived.EXTERNAL_TABLE_NAME, vOperationId); + EXCEPTION + WHEN OTHERS THEN + -- Log complete error details including full stack trace and backtrace + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_FILE_VALIDATION_FAILED, vParameters, 'FILE_MANAGER'); + + -- Call detailed validation error analysis and log the results + DECLARE + vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; + vValidationLogTable VARCHAR2(200); + vTemplateSchema VARCHAR2(200); + vTemplateTable VARCHAR2(200); + vCsvFileUri VARCHAR2(2000); + vAnalysisResult VARCHAR2(32000); + vFailedOperationId NUMBER; + BEGIN + -- Get source file configuration + vSourceFileConfig := GET_SOURCE_FILE_CONFIG(pSourceFileReceivedKey => pSourceFileReceivedKey); + + -- Extract template schema and table from template table name + vTemplateSchema := REPLACE(REGEXP_SUBSTR(vSourceFileConfig.TEMPLATE_TABLE_NAME,'.*\.'),'.',''); + vTemplateTable := REPLACE(REGEXP_SUBSTR(vSourceFileConfig.TEMPLATE_TABLE_NAME,'\..*'),'.',''); + + -- Construct CSV file URI + vCsvFileUri := ENV_MANAGER.gvInboxBucketUri || vSourceFileReceived.SOURCE_FILE_PREFIX_INBOX || vSourceFileReceived.SOURCE_FILE_NAME; + + -- Find the failed validation operation ID + SELECT MAX(ID) INTO vFailedOperationId + FROM USER_LOAD_OPERATIONS + WHERE TABLE_NAME = vSourceFileReceived.EXTERNAL_TABLE_NAME + AND TYPE = 'VALIDATE' + AND STATUS != 'COMPLETED'; + + -- Get validation log table name + IF vFailedOperationId IS NOT NULL THEN + SELECT LOGFILE_TABLE INTO vValidationLogTable + FROM USER_LOAD_OPERATIONS + WHERE ID = vFailedOperationId; + + -- Call detailed error analysis + vAnalysisResult := ENV_MANAGER.ANALYZE_VALIDATION_ERRORS( + pValidationLogTable => vValidationLogTable, + pTemplateSchema => vTemplateSchema, + pTemplateTable => vTemplateTable, + pCsvFileUri => vCsvFileUri + ); + + -- Log detailed analysis results + ENV_MANAGER.LOG_PROCESS_EVENT('DETAILED VALIDATION ERROR ANALYSIS:', 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(vAnalysisResult, 'ERROR', vParameters); + END IF; + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Error during validation analysis: ' || SQLERRM, 'ERROR', vParameters); + END; + + MOVE_FILE(pSourceFileReceivedKey => pSourceFileReceivedKey, pDestination => 'QUARANTINE'); + -- Ensure the status change is committed before raising exception + COMMIT; + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_VALIDATION_FAILED, ENV_MANAGER.MSG_FILE_VALIDATION_FAILED); + END; + ENV_MANAGER.LOG_PROCESS_EVENT('vOperationId of validation: '||vOperationId,'DEBUG', vParameters); + IF vOperationId = -1 + THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_DIDNT_GET_LOAD_OPERATION_ID, ENV_MANAGER.MSG_DIDNT_GET_LOAD_OPERATION_ID); + END IF; + + SELECT BADFILE_TABLE, ROWS_LOADED, STATUS + INTO vBadfileTable, vNumRows, vStatus + FROM USER_LOAD_OPERATIONS + WHERE ID = vOperationId; + +-- DBMS_OUTPUT.PUT_LINE(vStatus); + SET_SOURCE_FILE_RECEIVED_STATUS(pSourceFileReceivedKey => pSourceFileReceivedKey, pStatus => 'VALIDATED'); + ENV_MANAGER.LOG_PROCESS_EVENT('File status changed to VALIDATED','DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + + EXCEPTION + WHEN ENV_MANAGER.ERR_DIDNT_GET_LOAD_OPERATION_ID THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_DIDNT_GET_LOAD_OPERATION_ID, 'ERROR', vParameters); + SET_SOURCE_FILE_RECEIVED_STATUS(pSourceFileReceivedKey => pSourceFileReceivedKey, pStatus => 'VALIDATION_FAILED'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_DIDNT_GET_LOAD_OPERATION_ID, ENV_MANAGER.MSG_DIDNT_GET_LOAD_OPERATION_ID); + + WHEN ENV_MANAGER.ERR_FILE_VALIDATION_FAILED THEN + vgMsgTmp := ENV_MANAGER.MSG_FILE_VALIDATION_FAILED; + ENV_MANAGER.LOG_PROCESS_ERROR(vgMsgTmp, vParameters, 'FILE_MANAGER'); + SET_SOURCE_FILE_RECEIVED_STATUS(pSourceFileReceivedKey => pSourceFileReceivedKey, pStatus => 'VALIDATION_FAILED'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_VALIDATION_FAILED, vgMsgTmp); + + WHEN OTHERS THEN + IF SQLCODE = -20404 THEN + vgMsgTmp := ENV_MANAGER.MSG_FILE_NOT_FOUND_ON_CLOUD||cgBL||SQLERRM; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_NOT_FOUND_ON_CLOUD, vgMsgTmp); + + ELSIF SQLCODE = -20003 THEN + execute immediate 'select LISTAGG(record, '''||cgBL||''') from (select * from '||REGEXP_SUBSTR(SQLERRM, '"([^"]+)"."([^"]+)"')||' order by rownum desc) where rownum <=2' + into vgMsgTmp; + vgMsgTmp := ENV_MANAGER.MSG_FILE_VALIDATION_FAILED||cgBL||SQLERRM||cgBL||vgMsgTmp; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_VALIDATION_FAILED, vgMsgTmp); +-- ELSIF SQLCODE = -20000 THEN + -- TO_DO Add additional info about current config + -- ENV_MANAGER.MSG_FILE_VALIDATION_FAILED := ENV_MANAGER.MSG_FILE_VALIDATION_FAILED||cgBL||SQLERRM||cgBL||FILE_MANAGER.OUTPUT_SOURCE_FILE_CONFIG_INFO( ..config key value.. ); +-- dbms_output.put_line(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); +-- ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_WRONG_DATE_TIMESTAMP_FORMAT, 'ERROR', vParameters); +-- ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); +-- RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_WRONG_DATE_TIMESTAMP_FORMAT, ENV_MANAGER.MSG_WRONG_DATE_TIMESTAMP_FORMAT); + + ELSE + -- Log complete error details including full stack trace and backtrace + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_UNKNOWN, vParameters, 'FILE_MANAGER'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + END IF; + END VALIDATE_SOURCE_FILE_RECEIVED; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION VALIDATE_EXTERNAL_TABLE(pTableName IN VARCHAR2) + RETURN VARCHAR2 + -- + -- wrapper for DBMS_CLOUD.VALIDATE_EXTERNAL_TABLE + -- + IS + vOperationId NUMBER := -1; + vBadfileTable USER_LOAD_OPERATIONS.BADFILE_TABLE%TYPE; + vLogfileTable USER_LOAD_OPERATIONS.LOGFILE_TABLE%TYPE; + vStatus USER_LOAD_OPERATIONS.STATUS%TYPE; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vDetails clob; + TYPE TCURSOR is REF CURSOR; + vCursor TCURSOR; + vQuery VARCHAR2(1000); + vRecord VARCHAR2(10000); + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pTableName => '''||nvl(pTableName, 'NULL')||'''')); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + DBMS_CLOUD.VALIDATE_EXTERNAL_TABLE(pTableName, vOperationId); + ENV_MANAGER.LOG_PROCESS_EVENT('vOperationId of validation: '||vOperationId,'DEBUG', vParameters); + IF vOperationId = -1 + THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_DIDNT_GET_LOAD_OPERATION_ID, ENV_MANAGER.MSG_DIDNT_GET_LOAD_OPERATION_ID); + END IF; + + SELECT decode(STATUS, 'COMPLETED', 'PASSED', STATUS), LOGFILE_TABLE, BADFILE_TABLE + INTO vStatus, vLogfileTable, vBadfileTable + FROM USER_LOAD_OPERATIONS + WHERE ID = vOperationId; + + RETURN vStatus; + EXCEPTION + WHEN ENV_MANAGER.ERR_DIDNT_GET_LOAD_OPERATION_ID THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_DIDNT_GET_LOAD_OPERATION_ID, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_DIDNT_GET_LOAD_OPERATION_ID, ENV_MANAGER.MSG_DIDNT_GET_LOAD_OPERATION_ID); + + WHEN OTHERS THEN + SELECT decode(STATUS, 'COMPLETED', 'PASSED', STATUS), LOGFILE_TABLE, BADFILE_TABLE + INTO vStatus, vLogfileTable, vBadfileTable + FROM USER_LOAD_OPERATIONS + WHERE ID = vOperationId; + vQuery := 'select record from ( + select + nvl(l.record,''----------------------------------------------------'') as record + ,rownum as lp + ,max(case when nvl(instr(l.record, ''error'' ),0) > 0 then rownum else 0 end) over (partition by 1) as ENV_MANAGER.ERR_row + from '||vLogfileTable||' l + ) + where lp >= ENV_MANAGER.ERR_row + order by rownum'; + + vDetails := vStatus||cgBL||'----------------------------------------------------'||cgBL; + + OPEN vCursor for vQuery; + loop + fetch vCursor into vRecord; + EXIT WHEN vCursor%NOTFOUND; + vDetails := vDetails ||vRecord ||cgBL; +-- for i in loop +-- vDetails := vDetails ||i.record ||cgBL; + end loop; + CLOSE vCursor; + vDetails := vDetails||'More details can be found in below tables:'||cgBL|| + ' SELECT * FROM USER_LOAD_OPERATIONS WHERE ID = '||vOperationId||';'||cgBL|| + ' SELECT * FROM '||vLogfileTable||';'||cgBL|| + ' SELECT * FROM '||vBadfileTable||';' + ; + + RETURN vDetails; + + END VALIDATE_EXTERNAL_TABLE; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION S_VALIDATE_EXTERNAL_TABLE(pTableName IN VARCHAR2) + RETURN VARCHAR2 + -- + -- Simple + -- + IS + vCount PLS_INTEGER; + BEGIN + execute immediate 'select count(1) from '||pTableName into vCount; + IF vCount >= 0 + THEN + RETURN 'PASSED'; + END IF; + + RETURN 'FAILED'; + EXCEPTION + WHEN OTHERS THEN + RETURN 'FAILED'; + END S_VALIDATE_EXTERNAL_TABLE; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE DROP_EXTERNAL_TABLE(pSourceFileReceivedKey IN NUMBER) + -- + -- Drop external table created to validate the file structure + -- + IS + vSourceFileReceived tSourceFileReceived; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedKey => '||nvl(to_char(pSourceFileReceivedKey), 'NULL'))); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + vSourceFileReceived := GET_SOURCE_FILE_RECEIVED_INFO(pSourceFileReceivedKey); + EXECUTE IMMEDIATE 'DROP TABLE '||vSourceFileReceived.EXTERNAL_TABLE_NAME; + + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + END DROP_EXTERNAL_TABLE; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE COPY_FILE(pSourceFileReceivedKey IN NUMBER, pDestination IN VARCHAR2) + -- + -- Possible pDestination values are: 'ODS' or 'ARCHIVE' + -- + IS + vSourceFileReceivedInfo tSourceFileReceived; + vSourceObject VARCHAR2(2000); + vTargetObject VARCHAR2(2000); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; +-- vStatus VARCHAR2(20); + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedKey => ' ||nvl(to_char(pSourceFileReceivedKey), 'NULL'), + 'pDestination => '''||nvl(pDestination, 'NULL')||'''')); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + vSourceFileReceivedInfo := GET_SOURCE_FILE_RECEIVED_INFO(pSourceFileReceivedKey); + + IF pDestination = 'ODS' THEN + vSourceObject := ENV_MANAGER.gvInboxBucketUri||vSourceFileReceivedInfo.SOURCE_FILE_PREFIX_INBOX ||vSourceFileReceivedInfo.SOURCE_FILE_NAME; + vTargetObject := ENV_MANAGER.gvDataBucketUri||vSourceFileReceivedInfo.SOURCE_FILE_PREFIX_ODS ||vSourceFileReceivedInfo.SOURCE_FILE_NAME; + ELSE + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_WRONG_DESTINATION_PARAM, ENV_MANAGER.MSG_WRONG_DESTINATION_PARAM); + END IF; + + DBMS_CLOUD.COPY_OBJECT(source_credential_name => ENV_MANAGER.gvCredentialName, + source_object_uri => vSourceObject, + target_object_uri => vTargetObject, + target_credential_name => ENV_MANAGER.gvCredentialName + ); + ENV_MANAGER.LOG_PROCESS_EVENT('File copied to '||pDestination||' target location','DEBUG', vTargetObject); + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + END COPY_FILE; + + ---------------------------------------------------------------------------------------------------- + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE MOVE_FILE(pSourceFileReceivedKey IN NUMBER, pDestination IN VARCHAR2) + -- + -- Possible pDestination values are: 'ODS' or 'ARCHIVE' + -- + IS + vSourceFileReceivedInfo tSourceFileReceived; + vSourceObject VARCHAR2(2000); + vTargetObject VARCHAR2(2000); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vStatus VARCHAR2(20); + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedKey => ' ||nvl(to_char(pSourceFileReceivedKey), 'NULL'), + 'pDestination => '''||nvl(pDestination, 'NULL')||'''')); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + + vSourceFileReceivedInfo := GET_SOURCE_FILE_RECEIVED_INFO(pSourceFileReceivedKey); + + IF pDestination = 'ODS' THEN + vSourceObject := ENV_MANAGER.gvInboxBucketUri||vSourceFileReceivedInfo.SOURCE_FILE_PREFIX_INBOX ||vSourceFileReceivedInfo.SOURCE_FILE_NAME; + vTargetObject := ENV_MANAGER.gvDataBucketUri||vSourceFileReceivedInfo.SOURCE_FILE_PREFIX_ODS ||vSourceFileReceivedInfo.SOURCE_FILE_NAME; + vStatus := 'READY_FOR_INGESTION'; + ELSIF pDestination = 'QUARANTINE' THEN + vSourceObject := ENV_MANAGER.gvInboxBucketUri ||vSourceFileReceivedInfo.SOURCE_FILE_PREFIX_INBOX ||vSourceFileReceivedInfo.SOURCE_FILE_NAME; + vTargetObject := ENV_MANAGER.gvInboxBucketUri||vSourceFileReceivedInfo.SOURCE_FILE_PREFIX_QUARANTINE||vSourceFileReceivedInfo.SOURCE_FILE_NAME; + vStatus := 'VALIDATION_FAILED'; + ELSE + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_WRONG_DESTINATION_PARAM, ENV_MANAGER.MSG_WRONG_DESTINATION_PARAM); + END IF; + + DBMS_CLOUD.MOVE_OBJECT(source_credential_name => ENV_MANAGER.gvCredentialName, + source_object_uri => vSourceObject, + target_object_uri => vTargetObject, + target_credential_name => ENV_MANAGER.gvCredentialName + ); + ENV_MANAGER.LOG_PROCESS_EVENT('File moved to '||pDestination||' target location','DEBUG', vTargetObject); + SET_SOURCE_FILE_RECEIVED_STATUS(pSourceFileReceivedKey => pSourceFileReceivedKey, pStatus => vStatus); + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + + EXCEPTION + WHEN ENV_MANAGER.ERR_WRONG_DESTINATION_PARAM THEN + ENV_MANAGER.MSG_WRONG_DESTINATION_PARAM := ENV_MANAGER.MSG_WRONG_DESTINATION_PARAM + ||cgBL||' '||'Possible parameters are: ''ODS'' or ''ARCHIVE''' + ||cgBL||' '||'Provided destination parameter: '''||pDestination||''''; + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_WRONG_DESTINATION_PARAM, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_WRONG_DESTINATION_PARAM, ENV_MANAGER.MSG_WRONG_DESTINATION_PARAM); + WHEN ENV_MANAGER.ERR_NO_CONFIG_FOR_RECEIVED_FILE THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_NO_CONFIG_FOR_RECEIVED_FILE, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NO_CONFIG_FOR_RECEIVED_FILE, ENV_MANAGER.MSG_NO_CONFIG_FOR_RECEIVED_FILE); + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + END MOVE_FILE; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE DELETE_FOLDER_CONTENTS(pBucketArea IN VARCHAR2, pFolderPrefix IN VARCHAR2) + -- + -- Delete all files from specified folder in cloud storage + -- pBucketArea: 'INBOX', 'DATA', 'ARCHIVE' + -- pFolderPrefix: folder path within bucket (e.g., 'C2D/UC_DISSEM/UC_NMA_DISSEM/') + -- + IS + vBucketUri VARCHAR2(2000); + vFolderUri VARCHAR2(2000); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vFilesDeleted PLS_INTEGER := 0; + vObjectName VARCHAR2(4000); + vFullObjectUri VARCHAR2(4000); + + -- Cursor to list all objects in the folder + CURSOR c_objects IS + SELECT object_name + FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( + credential_name => ENV_MANAGER.gvCredentialName, + location_uri => vBucketUri + )) + WHERE object_name IS NOT NULL + AND object_name LIKE pFolderPrefix || '%'; + + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( + 'pBucketArea => '''||nvl(pBucketArea, 'NULL')||'''', + 'pFolderPrefix => '''||nvl(pFolderPrefix, 'NULL')||'''' + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + -- Get bucket URI based on bucket area + vBucketUri := GET_BUCKET_URI(pBucketArea); + + ENV_MANAGER.LOG_PROCESS_EVENT('Listing objects in bucket with prefix: ' || pFolderPrefix, 'DEBUG', vBucketUri); + + -- List and delete all objects in the folder + FOR obj_rec IN c_objects LOOP + vObjectName := obj_rec.object_name; + vFullObjectUri := vBucketUri || vObjectName; + + BEGIN + ENV_MANAGER.LOG_PROCESS_EVENT('Deleting object', 'DEBUG', vFullObjectUri); + + DBMS_CLOUD.DELETE_OBJECT( + credential_name => ENV_MANAGER.gvCredentialName, + object_uri => vFullObjectUri + ); + + vFilesDeleted := vFilesDeleted + 1; + ENV_MANAGER.LOG_PROCESS_EVENT('Object deleted successfully', 'DEBUG', vObjectName); + + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Error deleting object: ' || vObjectName || ' - ' || SQLERRM, 'ERROR', vParameters); + -- Continue with next file instead of stopping the whole process + END; + END LOOP; + + ENV_MANAGER.LOG_PROCESS_EVENT('Total files deleted: ' || vFilesDeleted, 'INFO', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO', vParameters); + + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Error in DELETE_FOLDER_CONTENTS: ' || SQLERRM, 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + END DELETE_FOLDER_CONTENTS; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE PROCESS_SOURCE_FILE(pSourceFileReceivedName IN VARCHAR2) + -- + -- Ubmrella procedure that calls + -- - REGISTER_SOURCE_FILE_RECEIVED + -- - CREATE_EXTERNAL_TABLE + -- - VALIDATE_SOURCE_FILE_RECEIVED + -- - DROP_EXTERNAL_TABLE + -- - MOVE_FILE + IS + vSourceFileId NUMBER; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedName => '||nvl(pSourceFileReceivedName, 'NULL'))); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + ---- + vSourceFileId := REGISTER_SOURCE_FILE_RECEIVED(pSourceFileReceivedName); + CREATE_EXTERNAL_TABLE(vSourceFileId); + VALIDATE_SOURCE_FILE_RECEIVED(vSourceFileId); + DROP_EXTERNAL_TABLE(vSourceFileId); +-- COPY_FILE(vSourceFileId, 'ODS'); + MOVE_FILE(vSourceFileId, 'ODS'); + SET_SOURCE_FILE_RECEIVED_STATUS(pSourceFileReceivedKey => vSourceFileId, pStatus => 'READY_FOR_INGESTION'); + + + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + + EXCEPTION + -- -20001 + WHEN ENV_MANAGER.ERR_EMPTY_FILEURI_AND_RECKEY THEN + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_EMPTY_FILEURI_AND_RECKEY, vParameters, 'FILE_MANAGER'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EMPTY_FILEURI_AND_RECKEY, ENV_MANAGER.MSG_EMPTY_FILEURI_AND_RECKEY); + -- -20002 + WHEN ENV_MANAGER.ERR_NO_CONFIG_MATCH_FOR_FILEURI THEN + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_NO_CONFIG_MATCH_FOR_FILEURI, vParameters, 'FILE_MANAGER'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NO_CONFIG_MATCH_FOR_FILEURI, ENV_MANAGER.MSG_NO_CONFIG_MATCH_FOR_FILEURI); + -- -20003 + WHEN ENV_MANAGER.ERR_MULTIPLE_MATCH_FOR_SRCFILE THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_MULTIPLE_MATCH_FOR_SRCFILE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MULTIPLE_MATCH_FOR_SRCFILE, ENV_MANAGER.MSG_MULTIPLE_MATCH_FOR_SRCFILE); + -- -20004 + WHEN ENV_MANAGER.ERR_MISSING_COLUMN_DATE_FORMAT THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_MISSING_COLUMN_DATE_FORMAT), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MISSING_COLUMN_DATE_FORMAT, ENV_MANAGER.MSG_MISSING_COLUMN_DATE_FORMAT); + -- -20005 + WHEN ENV_MANAGER.ERR_MULTIPLE_COLUMN_DATE_FORMAT THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_MULTIPLE_COLUMN_DATE_FORMAT), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MULTIPLE_COLUMN_DATE_FORMAT, ENV_MANAGER.MSG_MULTIPLE_COLUMN_DATE_FORMAT); + -- -20006 + WHEN ENV_MANAGER.ERR_DIDNT_GET_LOAD_OPERATION_ID THEN + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_DIDNT_GET_LOAD_OPERATION_ID, vParameters, 'FILE_MANAGER'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_DIDNT_GET_LOAD_OPERATION_ID, ENV_MANAGER.MSG_DIDNT_GET_LOAD_OPERATION_ID); + -- -20007 + WHEN ENV_MANAGER.ERR_NO_CONFIG_FOR_RECEIVED_FILE THEN + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_NO_CONFIG_FOR_RECEIVED_FILE, vParameters, 'FILE_MANAGER'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NO_CONFIG_FOR_RECEIVED_FILE, ENV_MANAGER.MSG_NO_CONFIG_FOR_RECEIVED_FILE); + -- -20008 + WHEN ENV_MANAGER.ERR_MULTI_CONFIG_FOR_RECEIVED_FILE THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_MULTI_CONFIG_FOR_RECEIVED_FILE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MULTI_CONFIG_FOR_RECEIVED_FILE, ENV_MANAGER.MSG_MULTI_CONFIG_FOR_RECEIVED_FILE); + -- -20009 + WHEN ENV_MANAGER.ERR_FILE_NOT_FOUND_ON_CLOUD THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_FILE_NOT_FOUND_ON_CLOUD), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_NOT_FOUND_ON_CLOUD, ENV_MANAGER.MSG_FILE_NOT_FOUND_ON_CLOUD); + -- -20010 + WHEN ENV_MANAGER.ERR_FILE_VALIDATION_FAILED THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_FILE_VALIDATION_FAILED), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_VALIDATION_FAILED, ENV_MANAGER.MSG_FILE_VALIDATION_FAILED); + -- -20011 + WHEN ENV_MANAGER.ERR_NO_CONFIG_MATCH THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_NO_CONFIG_MATCH), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NO_CONFIG_MATCH, ENV_MANAGER.MSG_NO_CONFIG_MATCH); + -- -20012 + WHEN ENV_MANAGER.ERR_UNKNOWN_PREFIX THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_UNKNOWN_PREFIX), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN_PREFIX, ENV_MANAGER.MSG_UNKNOWN_PREFIX); + -- -20013 + WHEN ENV_MANAGER.ERR_TABLE_NOT_EXISTS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_TABLE_NOT_EXISTS), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, ENV_MANAGER.MSG_TABLE_NOT_EXISTS); + -- -20014 + WHEN ENV_MANAGER.ERR_COLUMN_NOT_EXISTS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_COLUMN_NOT_EXISTS), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, ENV_MANAGER.MSG_COLUMN_NOT_EXISTS); + -- -20015 + WHEN ENV_MANAGER.ERR_UNSUPPORTED_DATA_TYPE THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_UNSUPPORTED_DATA_TYPE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNSUPPORTED_DATA_TYPE, ENV_MANAGER.MSG_UNSUPPORTED_DATA_TYPE); + -- -20016 + WHEN ENV_MANAGER.ERR_MISSING_SOURCE_KEY THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_MISSING_SOURCE_KEY), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MISSING_SOURCE_KEY, ENV_MANAGER.MSG_MISSING_SOURCE_KEY); + -- -20017 + WHEN ENV_MANAGER.ERR_NULL_SOURCE_FILE_CONFIG_KEY THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_NULL_SOURCE_FILE_CONFIG_KEY), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NULL_SOURCE_FILE_CONFIG_KEY, ENV_MANAGER.MSG_NULL_SOURCE_FILE_CONFIG_KEY); + -- -20018 + WHEN ENV_MANAGER.ERR_DUPLICATED_SOURCE_KEY THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_DUPLICATED_SOURCE_KEY), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_DUPLICATED_SOURCE_KEY, ENV_MANAGER.MSG_DUPLICATED_SOURCE_KEY); + -- -20019 + WHEN ENV_MANAGER.ERR_MISSING_CONTAINER_CONFIG THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_MISSING_CONTAINER_CONFIG), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MISSING_CONTAINER_CONFIG, ENV_MANAGER.MSG_MISSING_CONTAINER_CONFIG); + -- -20020 + WHEN ENV_MANAGER.ERR_MULTIPLE_CONTAINER_ENTRIES THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_MULTIPLE_CONTAINER_ENTRIES), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MULTIPLE_CONTAINER_ENTRIES, ENV_MANAGER.MSG_MULTIPLE_CONTAINER_ENTRIES); + -- -20021 + WHEN ENV_MANAGER.ERR_WRONG_DESTINATION_PARAM THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_WRONG_DESTINATION_PARAM), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_WRONG_DESTINATION_PARAM, ENV_MANAGER.MSG_WRONG_DESTINATION_PARAM); + -- -20022 + WHEN ENV_MANAGER.ERR_FILE_NOT_EXISTS_ON_CLOUD THEN + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_FILE_NOT_EXISTS_ON_CLOUD, vParameters, 'FILE_MANAGER'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_NOT_EXISTS_ON_CLOUD, ENV_MANAGER.MSG_FILE_NOT_EXISTS_ON_CLOUD); + -- -20023 + WHEN ENV_MANAGER.ERR_FILE_ALREADY_REGISTERED THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_FILE_ALREADY_REGISTERED), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_ALREADY_REGISTERED, ENV_MANAGER.MSG_FILE_ALREADY_REGISTERED); + -- -20024 + WHEN ENV_MANAGER.ERR_WRONG_DATE_TIMESTAMP_FORMAT THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_WRONG_DATE_TIMESTAMP_FORMAT), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_WRONG_DATE_TIMESTAMP_FORMAT, ENV_MANAGER.MSG_WRONG_DATE_TIMESTAMP_FORMAT); + + -- -20011 + WHEN ENV_MANAGER.ERR_EXCESS_COLUMNS_DETECTED THEN + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_EXCESS_COLUMNS_DETECTED, vParameters, 'FILE_MANAGER'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EXCESS_COLUMNS_DETECTED, ENV_MANAGER.MSG_EXCESS_COLUMNS_DETECTED); + + -- -20999 + WHEN ENV_MANAGER.ERR_UNKNOWN THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_UNKNOWN), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + + + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN, 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END PROCESS_SOURCE_FILE; + + +---------------------------------------------------------------------------------------------------- + + FUNCTION PROCESS_SOURCE_FILE(pSourceFileReceivedName IN VARCHAR2) + RETURN PLS_INTEGER + IS + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedName => '||nvl(pSourceFileReceivedName, 'NULL'))); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + ---- + PROCESS_SOURCE_FILE(pSourceFileReceivedName => pSourceFileReceivedName); + ---- + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + RETURN SQLCODE; + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RETURN SQLCODE; + END PROCESS_SOURCE_FILE; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_DATE_FORMAT( + pTemplateTableName IN VARCHAR2, + pColumnName IN VARCHAR2 + ) + RETURN VARCHAR2 + IS + vDateFormat A_COLUMN_DATE_FORMAT.DATE_FORMAT%TYPE; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vColumnName VARCHAR2(200); + vGetDefault BOOLEAN := FALSE; + BEGIN + vColumnName := trim(pColumnName); + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pColumnName => '''||nvl(vColumnName, 'NULL')||'''')); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + BEGIN + SELECT DATE_FORMAT + INTO vDateFormat + FROM CT_MRDS.A_COLUMN_DATE_FORMAT F + WHERE F.TEMPLATE_TABLE_NAME = pTemplateTableName + AND F.COLUMN_NAME = vColumnName; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + vGetDefault := TRUE; + WHEN TOO_MANY_ROWS THEN + -- Below error should not happened because: + -- Unique constraint added on table A_COLUMN_DATE_FORMAT on columns: (TEMPLATE_TABLE_NAME, COLUMN_NAME) + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MULTIPLE_COLUMN_DATE_FORMAT, ENV_MANAGER.MSG_MULTIPLE_COLUMN_DATE_FORMAT); + END; + IF vGetDefault THEN + BEGIN + SELECT DATE_FORMAT + INTO vDateFormat + FROM CT_MRDS.A_COLUMN_DATE_FORMAT F + WHERE F.TEMPLATE_TABLE_NAME = pTemplateTableName + AND F.COLUMN_NAME = 'DEFAULT'; + EXCEPTION + WHEN NO_DATA_FOUND THEN + vDateFormat := ENV_MANAGER.gvDefaultDateFormat; + END; + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); + RETURN vDateFormat; + END GET_DATE_FORMAT; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE GENERATE_EXTERNAL_TABLE_PARAMS ( + + pTemplateTableName IN VARCHAR2, + pColumnList OUT CLOB, + pFieldList OUT CLOB + ) + IS + vSchemaName VARCHAR2(200); + vTableName VARCHAR2(200); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vMaxColumnNameLength PLS_INTEGER := 0; + vColType varchar2(200); + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pColumnList => '''||nvl(pColumnList, 'NULL')||'''' + ,'pFieldList = '''||nvl(pFieldList, 'NULL')||'''' + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + vSchemaName := REPLACE(REGEXP_SUBSTR(pTemplateTableName,'.*\.'),'.',''); + ENV_MANAGER.LOG_PROCESS_EVENT('vSchemaName','DEBUG', vSchemaName); + vTableName := REPLACE(REGEXP_SUBSTR(pTemplateTableName,'\..*'),'.',''); + ENV_MANAGER.LOG_PROCESS_EVENT('vTableName','DEBUG', vTableName); + FOR rec IN ( + SELECT + '"'||column_name||'"' as quoted_column_name, + column_name, + data_type, + data_length, + char_length, -- MARS-1056: Added for CHAR/BYTE semantics + char_used, -- MARS-1056: Added for CHAR/BYTE semantics + data_precision, + data_scale, + column_id, + max(length(column_name)+1) over (partition by table_name) as max_column_name_length + FROM all_tab_columns + WHERE table_name = UPPER(vTableName) + AND owner = NVL(UPPER(vSchemaName), USER) + ORDER BY column_id + ) LOOP + -- Build the column_list string + rec.quoted_column_name := rpad(rec.quoted_column_name, rec.max_column_name_length+2, ' '); + + vColType := + CASE + -- MARS-1056: Fixed VARCHAR2 definition logic to preserve CHAR/BYTE semantics + WHEN rec.data_type = 'VARCHAR2' THEN + CASE + WHEN rec.char_used = 'C' THEN + rec.quoted_column_name || ' VARCHAR2(' || rec.char_length || ' CHAR)' + WHEN rec.char_used = 'B' THEN + rec.quoted_column_name || ' VARCHAR2(' || rec.data_length || ' BYTE)' + ELSE + -- Fallback for NULL char_used (should not occur but handle gracefully) + rec.quoted_column_name || ' VARCHAR2(' || rec.data_length || ')' + END + -- Other character types (preserve original logic) + WHEN rec.data_type IN ('CHAR', 'NCHAR', 'NVARCHAR2') THEN + rec.quoted_column_name || ' ' || rec.data_type || '(' || rec.data_length || ')' + WHEN rec.data_type = 'NUMBER' THEN + rec.quoted_column_name || ' ' || rec.data_type || + CASE + WHEN rec.data_precision IS NOT NULL AND rec.data_scale IS NOT NULL THEN + '(' || rec.data_precision || ',' || rec.data_scale || ')' + WHEN rec.data_precision IS NOT NULL THEN + '(' || rec.data_precision || ')' + ELSE + '' + END + WHEN rec.data_type IN ('RAW') THEN + rec.quoted_column_name || ' ' || rec.data_type || '(' || rec.data_length || ')' + WHEN REGEXP_SUBSTR(rec.data_type, '^[A-Z]+') IN ('DATE', 'TIMESTAMP') THEN + rec.quoted_column_name || ' ' || rec.data_type + ELSE + rec.quoted_column_name || ' ' || rec.data_type + END; + pColumnList := pColumnList ||vColType ||cgBL|| ','; + -- Build the field_list string + -- Note: field_list uses CHAR() for CSV field definitions - this is correct behavior + pFieldList := pFieldList || + CASE + WHEN rec.data_type = 'DATE' THEN + -- MARS-1046: DATE format - wrap with NORMALIZE_DATE_FORMAT to fix ISO 8601 'T' separator + rec.quoted_column_name || ' DATE ' || CHR(39) || NORMALIZE_DATE_FORMAT(GET_DATE_FORMAT(pTemplateTableName => pTemplateTableName, pColumnName => rec.column_name)) || CHR(39) + WHEN rec.data_type LIKE 'TIMESTAMP%WITH TIME ZONE' THEN + -- MARS-1046: TIMESTAMP WITH TIME ZONE format for ISO 8601 with fractional seconds and timezone + -- Syntax: column_name CHAR(length) DATE_FORMAT TIMESTAMP WITH TIME ZONE MASK "format" + -- Use fixed length of 50 for ISO 8601 format (e.g., "2012-03-02T14:16:23.798+01:00" = 29 chars) + rec.quoted_column_name || ' CHAR(50) DATE_FORMAT TIMESTAMP WITH TIME ZONE MASK ' || CHR(39) || NORMALIZE_DATE_FORMAT(GET_DATE_FORMAT(pTemplateTableName => pTemplateTableName, pColumnName => rec.column_name)) || CHR(39) + WHEN REGEXP_SUBSTR(rec.data_type, '^[A-Z]+') = 'TIMESTAMP' THEN + -- Other TIMESTAMP types (without timezone) + rec.quoted_column_name || ' TIMESTAMP ' || CHR(39) || NORMALIZE_DATE_FORMAT(GET_DATE_FORMAT(pTemplateTableName => pTemplateTableName, pColumnName => rec.column_name)) || CHR(39) + WHEN rec.data_type IN ('CHAR', 'NCHAR', 'VARCHAR2', 'NVARCHAR2') THEN + -- For CSV field definitions, use data_length for CHAR() specification + rec.quoted_column_name || ' CHAR(' || rec.data_length || ')' + ELSE + rec.quoted_column_name + END ||cgBL|| ','; + + + END LOOP; + + -- Remove the trailing comma and space from the strings + pColumnList := ' '||RTRIM(pColumnList, ','); + pFieldList := ' '||RTRIM(pFieldList, ','); + ENV_MANAGER.LOG_PROCESS_EVENT('vColumnList', 'DEBUG', pColumnList); + -- TO_DO !!! + -- Add check if pColumnList/pFieldList is empty or not + -- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + + + -- Output the generated column_list and field_list + ENV_MANAGER.LOG_PROCESS_EVENT('column_list' ,'DEBUG',pColumnList); + ENV_MANAGER.LOG_PROCESS_EVENT('field_list' ,'DEBUG',pFieldList); + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); + + EXCEPTION +-- WHEN ENV_MANAGER.ERR_MISSING_COLUMN_DATE_FORMAT THEN +-- ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_MISSING_COLUMN_DATE_FORMAT, 'ERROR', vParameters); +-- RAISE_ERROR(ENV_MANAGER.CODE_MISSING_COLUMN_DATE_FORMAT); + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + END GENERATE_EXTERNAL_TABLE_PARAMS; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE ADD_SOURCE ( + pSourceKey IN CT_MRDS.A_SOURCE.A_SOURCE_KEY%TYPE, + pSourceName IN CT_MRDS.A_SOURCE.SOURCE_NAME%TYPE + ) IS + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + INSERT INTO CT_MRDS.A_SOURCE(A_SOURCE_KEY, SOURCE_NAME) VALUES (pSourceKey, pSourceName); + COMMIT; + EXCEPTION + WHEN DUP_VAL_ON_INDEX THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_DUPLICATED_SOURCE_KEY, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_DUPLICATED_SOURCE_KEY, ENV_MANAGER.MSG_DUPLICATED_SOURCE_KEY); + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + END ADD_SOURCE; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE DELETE_SOURCE_CASCADE ( + pSourceKey IN CT_MRDS.A_SOURCE.A_SOURCE_KEY%TYPE + ) IS + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vSharedTemplateCount PLS_INTEGER := 0; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceKey => '''||nvl(pSourceKey, 'NULL')||'''')); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + -- First pass: Delete container files (those that have CONTAINER_FILE_KEY set) + for rec in (select A_SOURCE_FILE_CONFIG_KEY, TEMPLATE_TABLE_NAME from CT_MRDS.A_SOURCE_FILE_CONFIG + where A_SOURCE_KEY = pSourceKey AND CONTAINER_FILE_KEY IS NOT NULL + ORDER BY A_SOURCE_FILE_CONFIG_KEY) loop + -- Delete processed file records + delete from CT_MRDS.A_SOURCE_FILE_RECEIVED WHERE A_SOURCE_FILE_CONFIG_KEY = rec.A_SOURCE_FILE_CONFIG_KEY; + ENV_MANAGER.LOG_PROCESS_EVENT('Deleted A_SOURCE_FILE_RECEIVED records for container config key: '||rec.A_SOURCE_FILE_CONFIG_KEY,'DEBUG', vParameters); + + -- Check if TEMPLATE_TABLE_NAME is shared with other source systems before deleting date formats + IF rec.TEMPLATE_TABLE_NAME IS NOT NULL THEN + SELECT COUNT(DISTINCT A_SOURCE_KEY) + INTO vSharedTemplateCount + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE TEMPLATE_TABLE_NAME = rec.TEMPLATE_TABLE_NAME + AND A_SOURCE_KEY <> pSourceKey; -- Exclude current source being deleted + + -- Only delete date formats if template table is not shared with other sources + IF vSharedTemplateCount = 0 THEN + delete from CT_MRDS.A_COLUMN_DATE_FORMAT WHERE TEMPLATE_TABLE_NAME = rec.TEMPLATE_TABLE_NAME; + ENV_MANAGER.LOG_PROCESS_EVENT('Deleted A_COLUMN_DATE_FORMAT records for template: '||rec.TEMPLATE_TABLE_NAME,'DEBUG', vParameters); + ELSE + ENV_MANAGER.LOG_PROCESS_EVENT('Skipping A_COLUMN_DATE_FORMAT deletion - template table '||rec.TEMPLATE_TABLE_NAME||' is shared with '||vSharedTemplateCount||' other source systems','WARNING', vParameters); + END IF; + END IF; + + -- Delete container file configuration + delete from CT_MRDS.A_SOURCE_FILE_CONFIG WHERE A_SOURCE_FILE_CONFIG_KEY = rec.A_SOURCE_FILE_CONFIG_KEY; + ENV_MANAGER.LOG_PROCESS_EVENT('Deleted container A_SOURCE_FILE_CONFIG record for config key: '||rec.A_SOURCE_FILE_CONFIG_KEY,'DEBUG', vParameters); + end loop; + COMMIT; -- Commit container deletions + + -- Second pass: Delete parent files (those that do NOT have CONTAINER_FILE_KEY set) + for rec in (select A_SOURCE_FILE_CONFIG_KEY, TEMPLATE_TABLE_NAME from CT_MRDS.A_SOURCE_FILE_CONFIG + where A_SOURCE_KEY = pSourceKey AND CONTAINER_FILE_KEY IS NULL + ORDER BY A_SOURCE_FILE_CONFIG_KEY) loop + -- Delete processed file records + delete from CT_MRDS.A_SOURCE_FILE_RECEIVED WHERE A_SOURCE_FILE_CONFIG_KEY = rec.A_SOURCE_FILE_CONFIG_KEY; + ENV_MANAGER.LOG_PROCESS_EVENT('Deleted A_SOURCE_FILE_RECEIVED records for parent config key: '||rec.A_SOURCE_FILE_CONFIG_KEY,'DEBUG', vParameters); + + -- Check if TEMPLATE_TABLE_NAME is shared with other source systems before deleting date formats + IF rec.TEMPLATE_TABLE_NAME IS NOT NULL THEN + SELECT COUNT(DISTINCT A_SOURCE_KEY) + INTO vSharedTemplateCount + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE TEMPLATE_TABLE_NAME = rec.TEMPLATE_TABLE_NAME + AND A_SOURCE_KEY <> pSourceKey; -- Exclude current source being deleted + + -- Only delete date formats if template table is not shared with other sources + IF vSharedTemplateCount = 0 THEN + delete from CT_MRDS.A_COLUMN_DATE_FORMAT WHERE TEMPLATE_TABLE_NAME = rec.TEMPLATE_TABLE_NAME; + ENV_MANAGER.LOG_PROCESS_EVENT('Deleted A_COLUMN_DATE_FORMAT records for template: '||rec.TEMPLATE_TABLE_NAME,'DEBUG', vParameters); + ELSE + ENV_MANAGER.LOG_PROCESS_EVENT('Skipping A_COLUMN_DATE_FORMAT deletion - template table '||rec.TEMPLATE_TABLE_NAME||' is shared with '||vSharedTemplateCount||' other source systems','WARNING', vParameters); + END IF; + END IF; + + -- Delete parent file configuration + delete from CT_MRDS.A_SOURCE_FILE_CONFIG WHERE A_SOURCE_FILE_CONFIG_KEY = rec.A_SOURCE_FILE_CONFIG_KEY; + ENV_MANAGER.LOG_PROCESS_EVENT('Deleted parent A_SOURCE_FILE_CONFIG record for config key: '||rec.A_SOURCE_FILE_CONFIG_KEY,'DEBUG', vParameters); + end loop; + COMMIT; -- Commit parent deletions + + -- Delete source system record + DELETE FROM CT_MRDS.A_SOURCE where A_SOURCE_KEY = pSourceKey; + ENV_MANAGER.LOG_PROCESS_EVENT('Deleted A_SOURCE record for source key: '||pSourceKey,'DEBUG', vParameters); + COMMIT; -- Final commit for source deletion + + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO', vParameters); + + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN, 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + END DELETE_SOURCE_CASCADE; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_CONTAINER_SOURCE_FILE_CONFIG_KEY ( + pSourceFileId IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_ID%TYPE + ) RETURN PLS_INTEGER + IS + vSourceFileConfigKey PLS_INTEGER; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileId => '||nvl(to_char(pSourceFileId), 'NULL'))); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + SELECT A_SOURCE_FILE_CONFIG_KEY + INTO vSourceFileConfigKey + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE SOURCE_FILE_ID = pSourceFileId + AND SOURCE_FILE_TYPE = 'CONTAINER'; + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); + RETURN vSourceFileConfigKey; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + ENV_MANAGER.MSG_MISSING_CONTAINER_CONFIG := 'No match in A_SOURCE_FILE_CONFIG where SOURCE_FILE_TYPE=''CONTAINER'' and SOURCE_FILE_ID = '''||pSourceFileId||''''; + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_MISSING_CONTAINER_CONFIG, 'WARNING', vParameters); + RETURN NULL; + WHEN TOO_MANY_ROWS THEN + ENV_MANAGER.MSG_MULTIPLE_CONTAINER_ENTRIES := 'GET_CONTAINER_SOURCE_FILE_CONFIG_KEY: Multiple SOURCE_FILE_TYPE=''CONTAINER'' matches for SOURCE_FILE_ID: '||pSourceFileId||' in A_SOURCE_FILE_CONFIG'; + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_MULTIPLE_CONTAINER_ENTRIES, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MULTIPLE_CONTAINER_ENTRIES, ENV_MANAGER.MSG_MULTIPLE_CONTAINER_ENTRIES); + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + + END GET_CONTAINER_SOURCE_FILE_CONFIG_KEY; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_SOURCE_FILE_CONFIG_KEY ( + pSourceFileType IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_TYPE%TYPE DEFAULT 'INPUT' + ,pSourceFileId IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_ID%TYPE + ,pTableId IN CT_MRDS.A_SOURCE_FILE_CONFIG.TABLE_ID%TYPE + ) RETURN PLS_INTEGER + IS + vSourceFileConfigKey PLS_INTEGER; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileType => '''||nvl(pSourceFileType, 'NULL')||'''' + ,'pSourceFileId => '''||nvl(pSourceFileId, 'NULL')||'''' + ,'pTableId => '''||nvl(pTableId, 'NULL')||'''')); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + SELECT A_SOURCE_FILE_CONFIG_KEY + INTO vSourceFileConfigKey + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE SOURCE_FILE_TYPE = pSourceFileType + AND SOURCE_FILE_ID = pSourceFileId + AND TABLE_ID = pTableId; + + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG', vParameters); + RETURN vSourceFileConfigKey; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + RETURN NULL; + WHEN TOO_MANY_ROWS THEN + vgMsgTmp := ENV_MANAGER.MSG_MULTIPLE_MATCH_FOR_SRCFILE + ||cgBL||' '||'GET_SOURCE_FILE_CONFIG_KEY: Multiple matches in A_SOURCE_FILE_CONFIG table WHERE' + ||cgBL||' '||'SOURCE_FILE_TYPE: '||pSourceFileType + ||cgBL||' '||'SOURCE_FILE_ID: '||pSourceFileId + ||cgBL||' '||'TABLE_ID: '||pTableId; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MULTIPLE_MATCH_FOR_SRCFILE, vgMsgTmp); + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + + END GET_SOURCE_FILE_CONFIG_KEY; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE ADD_SOURCE_FILE_CONFIG ( + pSourceKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_KEY%TYPE + ,pSourceFileType IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_TYPE%TYPE + ,pSourceFileId IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_ID%TYPE + ,pSourceFileDesc IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_DESC%TYPE + ,pSourceFileNamePattern IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_NAME_PATTERN%TYPE + ,pTableId IN CT_MRDS.A_SOURCE_FILE_CONFIG.TABLE_ID%TYPE DEFAULT NULL + ,pTemplateTableName IN CT_MRDS.A_SOURCE_FILE_CONFIG.TEMPLATE_TABLE_NAME%TYPE DEFAULT NULL + ,pContainerFileKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.CONTAINER_FILE_KEY%TYPE DEFAULT NULL + ,pEncoding IN CT_MRDS.A_SOURCE_FILE_CONFIG.ENCODING%TYPE DEFAULT NULL -- MARS-1049: NOWY PARAMETR + ) IS + vSourceFileConfigKey PLS_INTEGER; + vSourceKeyExists PLS_INTEGER := 0; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pSourceKey => '''||nvl(to_char(pSourceKey), 'NULL')||'''' + ,'pSourceFileType => '''||nvl(to_char(pSourceFileType), 'NULL')||'''' + ,'pSourceFileId => '''||nvl(to_char(pSourceFileId), 'NULL')||'''' + ,'pSourceFileDesc => '''||nvl(to_char(pSourceFileDesc), 'NULL')||'''' + ,'pSourceFileNamePattern => '''||nvl(to_char(pSourceFileNamePattern), 'NULL')||'''' + ,'pTableId => '''||nvl(to_char(pTableId), 'NULL')||'''' + ,'pTemplateTableName => '''||nvl(to_char(pTemplateTableName), 'NULL')||'''' + ,'pContainerFileKey => '''||nvl(to_char(pContainerFileKey), 'NULL')||'''' + ,'pEncoding => '''||nvl(to_char(pEncoding), 'NULL')||'''' -- MARS-1049: NOWY + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + INSERT INTO CT_MRDS.A_SOURCE_FILE_CONFIG(A_SOURCE_KEY, SOURCE_FILE_TYPE, SOURCE_FILE_ID, SOURCE_FILE_DESC, SOURCE_FILE_NAME_PATTERN, TABLE_ID, TEMPLATE_TABLE_NAME, CONTAINER_FILE_KEY, ENCODING) + VALUES (pSourceKey, pSourceFileType, pSourceFileId, pSourceFileDesc, pSourceFileNamePattern, pTableId, pTemplateTableName, pContainerFileKey, pEncoding); + COMMIT; + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + EXCEPTION + + WHEN OTHERS THEN + IF SQLCODE = -2291 THEN + ENV_MANAGER.MSG_MISSING_SOURCE_KEY := 'The Source with A_SOURCE_KEY: '''||pSourceKey||''' not found in parent table A_SOURCE.' + ||cgBL||'First add a record to A_SOURCE:' + ||cgBL||'call file_manager.add_source(pSourceKey => '''||pSourceKey||''', pSourceName => ''...'')'; + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_MISSING_SOURCE_KEY, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MISSING_SOURCE_KEY, ENV_MANAGER.MSG_MISSING_SOURCE_KEY); + ELSE + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + END IF; + + END ADD_SOURCE_FILE_CONFIG; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE ADD_COLUMN_DATE_FORMAT ( + pTemplateTableName IN CT_MRDS.A_COLUMN_DATE_FORMAT.TEMPLATE_TABLE_NAME%TYPE + ,pColumnName IN CT_MRDS.A_COLUMN_DATE_FORMAT.COLUMN_NAME%TYPE + ,pDateFormat IN CT_MRDS.A_COLUMN_DATE_FORMAT.DATE_FORMAT%TYPE + ) IS + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pColumnName => '''||nvl(pColumnName, 'NULL')||'''' + ,'pDateFormat => '''||nvl(pDateFormat, 'NULL')||'''' + )); + + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + INSERT INTO CT_MRDS.A_COLUMN_DATE_FORMAT(TEMPLATE_TABLE_NAME, COLUMN_NAME, DATE_FORMAT) + VALUES (pTemplateTableName, pColumnName, pDateFormat); + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN, 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + END ADD_COLUMN_DATE_FORMAT; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_BUCKET_URI(pBucketArea VARCHAR2) + RETURN VARCHAR2 + IS + BEGIN + CASE pBucketArea + WHEN 'INBOX' THEN RETURN ENV_MANAGER.gvInboxBucketUri; + WHEN 'ODS' THEN RETURN ENV_MANAGER.gvDataBucketUri; + WHEN 'DATA' THEN RETURN ENV_MANAGER.gvDataBucketUri; + WHEN 'ARCHIVE' THEN RETURN ENV_MANAGER.gvArchiveBucketUri; + ELSE + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_INVALID_BUCKET_AREA, + ENV_MANAGER.MSG_INVALID_BUCKET_AREA || ' Provided: ''' || pBucketArea || ''''); + END CASE; + END; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_DET_SOURCE_FILE_CONFIG_INFO ( + pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE + ,pIncludeContainerInfo IN PLS_INTEGER DEFAULT 1 + ,pIncludeColumnFormatInfo IN PLS_INTEGER DEFAULT 1 + ) RETURN VARCHAR2 + ---- + -- Function to get info about File Configuration entry + IS + vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; + vContainerFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; + vCount PLS_INTEGER := 0; + addColHeader BOOLEAN := TRUE; + vMsgTmp VARCHAR2(32000):= ''; + CURSOR cColumnFormat(vTableName A_COLUMN_DATE_FORMAT.TEMPLATE_TABLE_NAME%TYPE) IS + SELECT * + FROM CT_MRDS.A_COLUMN_DATE_FORMAT + WHERE TEMPLATE_TABLE_NAME = vTableName; + vColumnDateFormat A_COLUMN_DATE_FORMAT%ROWTYPE; + + FUNCTION FORMAT_CONFIG( pSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE, + pTypeInfo VARCHAR2 DEFAULT 'File Configuration', + pLevel VARCHAR2 DEFAULT '') + RETURN VARCHAR2 + IS + vMsg VARCHAR2(9999); + BEGIN + vMsg := '' + ||cgBL||pLevel||''||'Details about '||pTypeInfo||':' + ||cgBL||pLevel||''||'--------------------------------' + ||cgBL||pLevel||'A_SOURCE_FILE_CONFIG_KEY = '||pSourceFileConfig.A_SOURCE_FILE_CONFIG_KEY + ||cgBL||pLevel||'A_SOURCE_KEY = '||pSourceFileConfig.A_SOURCE_KEY + ||cgBL||pLevel||'SOURCE_FILE_TYPE = '||pSourceFileConfig.SOURCE_FILE_TYPE + ||cgBL||pLevel||'SOURCE_FILE_ID = '||pSourceFileConfig.SOURCE_FILE_ID + ||cgBL||pLevel||'SOURCE_FILE_DESC = '||pSourceFileConfig.SOURCE_FILE_DESC + ||cgBL||pLevel||'SOURCE_FILE_NAME_PATTERN = '||pSourceFileConfig.SOURCE_FILE_NAME_PATTERN + ||cgBL||pLevel||'TABLE_ID = '||pSourceFileConfig.TABLE_ID + ||cgBL||pLevel||'TEMPLATE_TABLE_NAME = '||pSourceFileConfig.TEMPLATE_TABLE_NAME + ||cgBL||pLevel||'CONTAINER_FILE_KEY = '||pSourceFileConfig.CONTAINER_FILE_KEY + ||cgBL||pLevel||'ODS_SCHEMA_NAME = '||pSourceFileConfig.ODS_SCHEMA_NAME + ||cgBL||pLevel||'DAYS_FOR_ARCHIVE_THRESHOLD = '||pSourceFileConfig.DAYS_FOR_ARCHIVE_THRESHOLD + ||cgBL||pLevel||'FILES_COUNT_OVER_ARCHIVE_THRESHOLD = '||pSourceFileConfig.FILES_COUNT_OVER_ARCHIVE_THRESHOLD + ||cgBL||pLevel||'BYTES_SUM_OVER_ARCHIVE_THRESHOLD = '||pSourceFileConfig.BYTES_SUM_OVER_ARCHIVE_THRESHOLD + ||cgBL||pLevel||'ROWS_COUNT_OVER_ARCHIVE_THRESHOLD = '||pSourceFileConfig.ROWS_COUNT_OVER_ARCHIVE_THRESHOLD + ||cgBL||pLevel||'HOURS_TO_EXPIRE_STATISTICS = '||pSourceFileConfig.HOURS_TO_EXPIRE_STATISTICS + + ||cgBL||pLevel||''||'--------------------------------' + ; + RETURN vMsg; + END FORMAT_CONFIG; + + BEGIN + vMsgTmp := ''; + -- Get Main Config + BEGIN + SELECT * + INTO vSourceFileConfig + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE A_SOURCE_FILE_CONFIG_KEY = pSourceFileConfigKey; + + vMsgTmp := FORMAT_CONFIG(pSourceFileConfig => vSourceFileConfig, pTypeInfo => 'File Configuration'); + EXCEPTION + WHEN NO_DATA_FOUND THEN + vMsgTmp := 'There is no config entry in A_SOURCE_FILE_CONFIG where A_SOURCE_FILE_CONFIG_KEY = '||pSourceFileConfigKey; + RETURN vMsgTmp; + END; + + -- Get Container Config + IF pIncludeContainerInfo > 0 THEN + BEGIN + SELECT * + INTO vContainerFileConfig + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE A_SOURCE_FILE_CONFIG_KEY = vSourceFileConfig.CONTAINER_FILE_KEY; + + vMsgTmp := vMsgTmp || cgBL || FORMAT_CONFIG(pSourceFileConfig => vContainerFileConfig, pTypeInfo => 'related Container Config', pLevel => ' '); + + EXCEPTION + WHEN NO_DATA_FOUND THEN + vMsgTmp := vMsgTmp|| cgBL || 'There is no CONTAINER config entry in A_SOURCE_FILE_CONFIG.'; + -- RETURN vMsgTmp; + END; + END IF; + + -- Get Column Date Format Config + IF pIncludeColumnFormatInfo > 0 THEN + BEGIN + OPEN cColumnFormat(vTableName => vSourceFileConfig.TEMPLATE_TABLE_NAME); + LOOP + FETCH cColumnFormat INTO vColumnDateFormat; + IF cColumnFormat%FOUND AND addColHeader THEN + vCount := 1; + vMsgTmp := vMsgTmp||cgBL|| cgBL || ' Column Date Format config entries:'; + vMsgTmp := vMsgTmp||cgBL||''||' --------------------------------'; + addColHeader := FALSE; + END IF; + EXIT WHEN cColumnFormat%NOTFOUND; + vMsgTmp := vMsgTmp + ||cgBL||' TEMPLATE_TABLE_NAME = '||vColumnDateFormat.TEMPLATE_TABLE_NAME + ||cgBL||' COLUMN_NAME = '||vColumnDateFormat.COLUMN_NAME + ||cgBL||' DATE_FORMAT = '||vColumnDateFormat.DATE_FORMAT + ||cgBL||''||' --------------------------------'; + END LOOP; + If vCount=0 THEN + vMsgTmp := vMsgTmp || cgBL || 'There is no Column Date Format config entries in A_COLUMN_DATE_FORMAT where TEMPLATE_TABLE_NAME = '||NVL(vSourceFileConfig.TEMPLATE_TABLE_NAME,'NULL'); + END IF; + CLOSE cColumnFormat; + END; + END IF; + + RETURN vMsgTmp; + EXCEPTION + + WHEN OTHERS THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END GET_DET_SOURCE_FILE_CONFIG_INFO; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_DET_SOURCE_FILE_RECEIVED_INFO ( + pSourceFileReceivedKey IN CT_MRDS.A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY%TYPE + ,pIncludeConfigInfo IN PLS_INTEGER DEFAULT 1 + ,pIncludeContainerInfo IN PLS_INTEGER DEFAULT 1 + ,pIncludeColumnFormatInfo IN PLS_INTEGER DEFAULT 1 + ) RETURN VARCHAR2 + ---- + -- Function to get info about File Received entry + IS + vSourceFileReceived CT_MRDS.A_SOURCE_FILE_RECEIVED%ROWTYPE; + vMsgTmp VARCHAR2(32000):= ''; + BEGIN + + BEGIN + SELECT * + INTO vSourceFileReceived + FROM CT_MRDS.A_SOURCE_FILE_RECEIVED + WHERE A_SOURCE_FILE_RECEIVED_KEY = pSourceFileReceivedKey; + + vMsgTmp := '' + ||cgBL||''||'Details about received file:' + ||cgBL||''||'--------------------------------' + ||cgBL||'A_SOURCE_FILE_RECEIVED_KEY = '||vSourceFileReceived.A_SOURCE_FILE_RECEIVED_KEY + ||cgBL||'A_SOURCE_FILE_CONFIG_KEY = '||vSourceFileReceived.A_SOURCE_FILE_CONFIG_KEY + ||cgBL||'SOURCE_FILE_NAME = '||vSourceFileReceived.SOURCE_FILE_NAME + ||cgBL||'CHECKSUM = '||vSourceFileReceived.CHECKSUM + ||cgBL||'CREATED = '||vSourceFileReceived.CREATED + ||cgBL||'BYTES = '||vSourceFileReceived.BYTES + ||cgBL||'RECEPTION_DATE = '||vSourceFileReceived.RECEPTION_DATE + ||cgBL||'PROCESSING_STATUS = '||vSourceFileReceived.PROCESSING_STATUS + ||cgBL||'EXTERNAL_TABLE_NAME = '||vSourceFileReceived.EXTERNAL_TABLE_NAME + ||cgBL||'PARTITION_YEAR = '||vSourceFileReceived.PARTITION_YEAR + ||cgBL||'PARTITION_MONTH = '||vSourceFileReceived.PARTITION_MONTH + ||cgBL||''||'--------------------------------' + ; + EXCEPTION + WHEN NO_DATA_FOUND THEN + vMsgTmp := 'There is no data in A_SOURCE_FILE_RECEIVED where A_SOURCE_FILE_RECEIVED_KEY = '||pSourceFileReceivedKey; + RETURN vMsgTmp; + END; + + IF pIncludeConfigInfo>0 THEN + vMsgTmp := vMsgTmp || cgBL || CT_MRDS.FILE_MANAGER.GET_DET_SOURCE_FILE_CONFIG_INFO(vSourceFileReceived.A_SOURCE_FILE_CONFIG_KEY,pIncludeContainerInfo,pIncludeColumnFormatInfo); + END IF; + RETURN vMsgTmp; + EXCEPTION + + WHEN OTHERS THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END GET_DET_SOURCE_FILE_RECEIVED_INFO; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_DET_USER_LOAD_OPERATIONS ( + pOperationId PLS_INTEGER + ) RETURN VARCHAR2 + ---- + -- Function to get info about File Received entry + IS + vUserLoadOperations USER_LOAD_OPERATIONS%ROWTYPE; + vMsgTmp VARCHAR2(32000):= ''; + BEGIN + + BEGIN + SELECT * + INTO vUserLoadOperations + FROM USER_LOAD_OPERATIONS + WHERE id = pOperationId; + + vMsgTmp := '' + ||''||'Details about USER_LOAD_OPERATIONS where ID = '||pOperationId + ||cgBL||''||'--------------------------------' + ||cgBL||'ID = '||vUserLoadOperations.ID + ||cgBL||'TYPE = '||vUserLoadOperations.TYPE + ||cgBL||'SID = '||vUserLoadOperations.SID + ||cgBL||'SERIAL# = '||vUserLoadOperations.SERIAL# + ||cgBL||'START_TIME = '||vUserLoadOperations.START_TIME + ||cgBL||'UPDATE_TIME = '||vUserLoadOperations.UPDATE_TIME + ||cgBL||'STATUS = '||vUserLoadOperations.STATUS + ||cgBL||'OWNER_NAME = '||vUserLoadOperations.OWNER_NAME + ||cgBL||'TABLE_NAME = '||vUserLoadOperations.TABLE_NAME + ||cgBL||'PARTITION_NAME = '||vUserLoadOperations.PARTITION_NAME + ||cgBL||'SUBPARTITION_NAME = '||vUserLoadOperations.SUBPARTITION_NAME + ||cgBL||'FILE_URI_LIST = '||vUserLoadOperations.FILE_URI_LIST + ||cgBL||'ROWS_LOADED = '||vUserLoadOperations.ROWS_LOADED + ||cgBL||'LOGFILE_TABLE = '||vUserLoadOperations.LOGFILE_TABLE + ||cgBL||'BADFILE_TABLE = '||vUserLoadOperations.BADFILE_TABLE + ||cgBL||'STATUS_TABLE = '||vUserLoadOperations.STATUS_TABLE + ||cgBL||'TEMPEXT_TABLE = '||vUserLoadOperations.TEMPEXT_TABLE + ||cgBL||'CREDENTIAL_NAME = '||vUserLoadOperations.CREDENTIAL_NAME + ||cgBL||'EXPIRATION_TIME = '||vUserLoadOperations.EXPIRATION_TIME + ||cgBL||''||'--------------------------------' + ; + EXCEPTION + WHEN NO_DATA_FOUND THEN + vMsgTmp := 'There is no data in USER_LOAD_OPERATIONS where ID = '||pOperationId; + RETURN vMsgTmp; + END; + + RETURN vMsgTmp; + EXCEPTION + + WHEN OTHERS THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END GET_DET_USER_LOAD_OPERATIONS; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION ANALYZE_VALIDATION_ERRORS( + pSourceFileReceivedKey IN NUMBER + ) RETURN VARCHAR2 + IS + vSourceFileReceived CT_MRDS.A_SOURCE_FILE_RECEIVED%ROWTYPE; + vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; + vValidationLogTable VARCHAR2(128); + vTemplateSchema VARCHAR2(128); + vTemplateTable VARCHAR2(128); + vCsvFileUri VARCHAR2(4000); + vParameters VARCHAR2(4000); + vResult VARCHAR2(32000); + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( + 'pSourceFileReceivedKey => ' || pSourceFileReceivedKey + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start', 'DEBUG', vParameters); + + -- Get file and config information + BEGIN + -- Get source file received data first + SELECT * + INTO vSourceFileReceived + FROM CT_MRDS.A_SOURCE_FILE_RECEIVED sfr + WHERE sfr.A_SOURCE_FILE_RECEIVED_KEY = pSourceFileReceivedKey; + + -- Get source file config data + SELECT * + INTO vSourceFileConfig + FROM CT_MRDS.A_SOURCE_FILE_CONFIG sfc + WHERE sfc.A_SOURCE_FILE_CONFIG_KEY = vSourceFileReceived.A_SOURCE_FILE_CONFIG_KEY; + EXCEPTION + WHEN NO_DATA_FOUND THEN + ENV_MANAGER.LOG_PROCESS_ERROR('Source file or config not found for key: ' || pSourceFileReceivedKey, vParameters); + RETURN 'Error: Source file with key ' || pSourceFileReceivedKey || ' not found in A_SOURCE_FILE_RECEIVED table'; + END; + + -- Extract template schema and table from template table name + vTemplateSchema := REGEXP_SUBSTR(vSourceFileConfig.TEMPLATE_TABLE_NAME, '^([^.]+)'); + vTemplateTable := REGEXP_SUBSTR(vSourceFileConfig.TEMPLATE_TABLE_NAME, '\.(.+)$', 1, 1, NULL, 1); + + -- Build CSV file URI + vCsvFileUri := ENV_MANAGER.gvInboxBucketUri || + 'INBOX/' || vSourceFileConfig.A_SOURCE_KEY || '/' || + vSourceFileConfig.SOURCE_FILE_ID || '/' || + vSourceFileConfig.TABLE_ID || '/' || + vSourceFileReceived.SOURCE_FILE_NAME; + + -- Find validation log table (most recent one with errors) + -- Look for validation log tables related to this external table + BEGIN + SELECT table_name + INTO vValidationLogTable + FROM ( + SELECT table_name, + REGEXP_SUBSTR(table_name, '\$([0-9]+)_', 1, 1, NULL, 1) as log_number + FROM USER_TABLES + WHERE table_name LIKE 'VALIDATE$%_LOG' + ORDER BY TO_NUMBER(log_number) DESC + ) + WHERE ROWNUM = 1; + EXCEPTION + WHEN NO_DATA_FOUND THEN + -- If no validation log tables found, use a default name + vValidationLogTable := 'VALIDATE$999_LOG'; + END; + + ENV_MANAGER.LOG_PROCESS_EVENT('Calling ENV_MANAGER.ANALYZE_VALIDATION_ERRORS with parameters: ' || + 'LogTable=' || vValidationLogTable || + ', Schema=' || vTemplateSchema || + ', Table=' || vTemplateTable || + ', URI=' || SUBSTR(vCsvFileUri, 1, 100) || '...', 'DEBUG', vParameters); + + -- Call the main function with derived parameters + vResult := ENV_MANAGER.ANALYZE_VALIDATION_ERRORS( + pValidationLogTable => vValidationLogTable, + pTemplateSchema => vTemplateSchema, + pTemplateTable => vTemplateTable, + pCsvFileUri => vCsvFileUri + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('End', 'DEBUG', vParameters); + RETURN vResult; + + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_ERROR('Error in ANALYZE_VALIDATION_ERRORS: ' || SQLERRM, vParameters); + RETURN 'Error analyzing validation errors: ' || SQLERRM || + cgBL || 'Source File Key: ' || pSourceFileReceivedKey || + cgBL || 'Check A_SOURCE_FILE_RECEIVED and A_SOURCE_FILE_CONFIG tables for data integrity.'; + END ANALYZE_VALIDATION_ERRORS; + + ---------------------------------------------------------------------------------------------------- + -- PACKAGE VERSION MANAGEMENT FUNCTIONS IMPLEMENTATION + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_VERSION + RETURN VARCHAR2 + IS + BEGIN + RETURN PACKAGE_VERSION; + END GET_VERSION; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_BUILD_INFO + RETURN VARCHAR2 + IS + BEGIN + RETURN ENV_MANAGER.GET_PACKAGE_VERSION_INFO( + pPackageName => 'FILE_MANAGER', + pVersion => PACKAGE_VERSION, + pBuildDate => PACKAGE_BUILD_DATE, + pAuthor => PACKAGE_AUTHOR + ); + END GET_BUILD_INFO; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_VERSION_HISTORY + RETURN VARCHAR2 + IS + BEGIN + RETURN ENV_MANAGER.FORMAT_VERSION_HISTORY( + pPackageName => 'FILE_MANAGER', + pVersionHistory => VERSION_HISTORY + ); + END GET_VERSION_HISTORY; + + ---------------------------------------------------------------------------------------------------- + +END; + +/ + +/ diff --git a/MARS_Packages/REL03/MARS-1057/current_version/FILE_MANAGER.pkg b/MARS_Packages/REL03/MARS-1057/current_version/FILE_MANAGER.pkg new file mode 100644 index 0000000..43cda3d --- /dev/null +++ b/MARS_Packages/REL03/MARS-1057/current_version/FILE_MANAGER.pkg @@ -0,0 +1,637 @@ +create or replace PACKAGE CT_MRDS.FILE_MANAGER +AUTHID CURRENT_USER +AS + /** + * General comment for package: Please put comments for functions and procedures as shown in below example. + * It is a standard. + * The structure of comment is used by GET_PACKAGE_DOCUMENTATION function + * which returns documentation text for confluence page (to Copy-Paste it). + **/ + + -- Example comment: + /** + * @name EX_PROCEDURE_NAME + * @desc Procedure description + * @example select FILE_MANAGER.EX_PROCEDURE_NAME(pParameter => 129) from dual; + * @ex_rslt Example Result + **/ + + -- Package Version Information (Semantic Versioning: MAJOR.MINOR.PATCH) + PACKAGE_VERSION CONSTANT VARCHAR2(10) := '3.3.1'; + PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2025-11-27 14:00:00'; + PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski'; + + -- Version History (Latest changes first) + VERSION_HISTORY CONSTANT VARCHAR2(4000) := + '3.3.1 (2025-11-27): MARS-1046 - Fixed ISO 8601 datetime format parsing with milliseconds and timezone (e.g., 2012-03-02T14:16:23.798+01:00)' || CHR(13)||CHR(10) || + '3.3.0 (2025-11-26): MARS-1056 - Fixed VARCHAR2 definitions in GENERATE_EXTERNAL_TABLE_PARAMS to preserve CHAR/BYTE semantics from template tables' || CHR(13)||CHR(10) || + '3.2.1 (2025-11-24): MARS-1049 - Added pEncoding parameter support for CSV character set specification' || CHR(13)||CHR(10) || + '3.2.0 (2025-10-22): Added package versioning system using centralized ENV_MANAGER functions' || CHR(13)||CHR(10) || + '3.1.0 (2025-10-20): Enhanced PROCESS_SOURCE_FILE with 6-step validation workflow' || CHR(13)||CHR(10) || + '3.0.0 (2025-10-15): Separated export procedures into dedicated DATA_EXPORTER package' || CHR(13)||CHR(10) || + '2.5.0 (2025-10-10): Added DELETE_SOURCE_CASCADE for safe configuration removal' || CHR(13)||CHR(10) || + '2.0.0 (2025-09-25): Added official path patterns support (INBOX 3-level, ODS 2-level, ARCHIVE 2-level)' || CHR(13)||CHR(10) || + '1.0.0 (2025-09-01): Initial release with file processing and validation capabilities'; + + TYPE tSourceFileReceived IS RECORD + ( + A_SOURCE_FILE_RECEIVED_KEY CT_MRDS.A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY%TYPE, + A_SOURCE_FILE_CONFIG_KEY CT_MRDS.A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_CONFIG_KEY%TYPE, + SOURCE_FILE_PREFIX_INBOX VARCHAR2(430), + SOURCE_FILE_PREFIX_ODS VARCHAR2(430), + SOURCE_FILE_PREFIX_QUARANTINE VARCHAR2(430), + SOURCE_FILE_PREFIX_ARCHIVE VARCHAR2(430), + SOURCE_FILE_NAME CT_MRDS.A_SOURCE_FILE_RECEIVED.SOURCE_FILE_NAME%TYPE, + RECEPTION_DATE CT_MRDS.A_SOURCE_FILE_RECEIVED.RECEPTION_DATE%TYPE, + PROCESSING_STATUS CT_MRDS.A_SOURCE_FILE_RECEIVED.PROCESSING_STATUS%TYPE, + EXTERNAL_TABLE_NAME CT_MRDS.A_SOURCE_FILE_RECEIVED.EXTERNAL_TABLE_NAME%TYPE + ); + + cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10); + vgSourceFileConfigKey PLS_INTEGER; + vgMsgTmp VARCHAR2(32000); + + + + --------------------------------------------------------------------------------------------------------------------------- + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name GET_SOURCE_FILE_CONFIG + * @desc Get source file type by matching the source file name against source file type naming patterns + * or by specifying the id of a received source file. + * @example ... + * @ex_rslt "CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE" + **/ + FUNCTION GET_SOURCE_FILE_CONFIG(pFileUri IN VARCHAR2 DEFAULT NULL + , pSourceFileReceivedKey IN NUMBER DEFAULT NULL + , pSourceFileConfigKey IN NUMBER DEFAULT NULL) + RETURN CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; + + + + /** + * @name REGISTER_SOURCE_FILE_RECEIVED + * @desc Register a newly received source file in A_SOURCE_FILE_RECEIVED table. + * This overload automatically determines source file type from the file name. + * It returns the value of A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY column for newly added record. + * @example vSourceFileReceivedKey := FILE_MANAGER.REGISTER_SOURCE_FILE_RECEIVED(pSourceFileReceivedName => 'INBOX/C2D/UC_DISSEM/UC_NMA_DISSEM/UC_NMA_DISSEM-277740.csv'); + * @ex_rslt 3245 + **/ + FUNCTION REGISTER_SOURCE_FILE_RECEIVED ( + pSourceFileReceivedName IN VARCHAR2 + ) + RETURN PLS_INTEGER; + + + + /** + * @name REGISTER_SOURCE_FILE_RECEIVED + * @desc Register a new new source file in A_SOURCE_FILE_RECEIVED table based on pSourceFileReceivedName and pSourceFileConfig. + * Then it returns the value of A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY column for newly added record. + * @example vSourceFileReceivedKey := FILE_MANAGER.REGISTER_SOURCE_FILE_RECEIVED( + * pSourceFileReceivedName => 'INBOX/C2D/UC_DISSEM/UC_NMA_DISSEM/UC_NMA_DISSEM-277740.csv' + * ,pSourceFileConfig => ...A_SOURCE_FILE_CONFIG%ROWTYPE... ); + * @ex_rslt 3245 + **/ + FUNCTION REGISTER_SOURCE_FILE_RECEIVED ( + pSourceFileReceivedName IN VARCHAR2, + pSourceFileConfig IN CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE + ) + RETURN PLS_INTEGER; + + + + /** + * @name SET_SOURCE_FILE_RECEIVED_STATUS + * @desc Set status of file in A_SOURCE_FILE_RECEIVED table - PROCESSING_STATUS column + * based on A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY + * and provided value of pStatus parameter + * @example exec FILE_MANAGER.SET_SOURCE_FILE_RECEIVED_STATUS(pSourceFileReceivedKey => 377, pStatus => 'READY_FOR_INGESTION'); + **/ + PROCEDURE SET_SOURCE_FILE_RECEIVED_STATUS( + pSourceFileReceivedKey IN PLS_INTEGER, + pStatus IN VARCHAR2 + ); + + + + /** + * @name GET_EXTERNAL_TABLE_COLUMNS + * @desc Function used to get string with all table columns definitions based on pTargetTableTemplate "TEMPLATE TABLE" name. + * It used for creating "EXTERNAL TABLE" using CREATE_EXTERNAL_TABLE procedure. + * @example select FILE_MANAGER.GET_EXTERNAL_TABLE_COLUMNS(pTargetTableTemplate => 'CT_ET_TEMPLATES.LM_STANDING_FACILITIES_HEADER') from dual; + * @ex_rslt "A_KEY" NUMBER(38,0) NOT NULL ENABLE, + * "A_WORKFLOW_HISTORY_KEY" NUMBER(38,0) NOT NULL ENABLE, + * "REV_NUMBER" NUMBER(28,0), + * "REF_DATE" DATE, + * "FREE_TEXT" VARCHAR2(1000 CHAR), + * "MLF_BS_TOTAL" NUMBER(28,10), + * "DF_BS_TOTAL" NUMBER(28,10), + * "MLF_SF_TOTAL" NUMBER(28,10), + * "DF_SF_TOTAL" NUMBER(28,10) + **/ + FUNCTION GET_EXTERNAL_TABLE_COLUMNS ( + pTargetTableTemplate IN VARCHAR2 + ) + RETURN CLOB; + + + + /** + * @name CREATE_EXTERNAL_TABLE + * @desc A wrapper procedure for DBMS_CLOUD.CREATE_EXTERNAL_TABLE which creates External Table + * MARS-1049: Added pEncoding parameter for CSV character set specification + * @param pEncoding - Character set encoding for CSV files (e.g., 'UTF8', 'WE8MSWIN1252') + * If provided, adds CHARACTERSET clause to external table definition + * @example + * begin + * FILE_MANAGER.CREATE_EXTERNAL_TABLE( + * pTableName => 'STANDING_FACILITIES_HEADER', + * pTemplateTableName => 'CT_ET_TEMPLATES.LM_STANDING_FACILITIES_HEADER', + * pPrefix => 'ODS/LM/STANDING_FACILITIES_HEADER/', + * pBucketUri => 'https://objectstorage.eu-frankfurt-1.oraclecloud.com/n/frcnomajoc7v/b/mrds_data_tst/o/', + * pFileName => NULL, + * pDelimiter => ',', + * pEncoding => 'UTF8' + * ); + * end; + **/ + PROCEDURE CREATE_EXTERNAL_TABLE ( + pTableName IN VARCHAR2, + pTemplateTableName IN VARCHAR2, + pPrefix IN VARCHAR2, + pBucketUri IN VARCHAR2 DEFAULT ENV_MANAGER.gvInboxBucketUri, + pFileName IN VARCHAR2 DEFAULT NULL, + pDelimiter IN VARCHAR2 DEFAULT ',', + pEncoding IN VARCHAR2 DEFAULT NULL -- MARS-1049: NOWY PARAMETR + ); + + + + /** + * @name CREATE_EXTERNAL_TABLE + * @desc Creates External Table for single file provided by + * pSourceFileReceivedKey parameter (A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY) + * @example exec FILE_MANAGER.CREATE_EXTERNAL_TABLE(pSourceFileReceivedKey => 377);; + **/ + PROCEDURE CREATE_EXTERNAL_TABLE ( + pSourceFileReceivedKey IN NUMBER + ); + + + + /** + * @name VALIDATE_SOURCE_FILE_RECEIVED + * @desc A wrapper procedure for DBMS_CLOUD.VALIDATE_EXTERNAL_TABLE + * It validate External table build upon single file + * provided by pSourceFileReceivedKey parameter (A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY) + * @example exec FILE_MANAGER.VALIDATE_SOURCE_FILE_RECEIVED(pSourceFileReceivedKey => 377); + **/ + PROCEDURE VALIDATE_SOURCE_FILE_RECEIVED + ( + pSourceFileReceivedKey IN NUMBER + ); + + + /** + * @name VALIDATE_EXTERNAL_TABLE + * @desc A wrapper function for DBMS_CLOUD.VALIDATE_EXTERNAL_TABLE. + * It validates External Table provided by parameter pTableName. + * It returns: PASSED or FAILED. + * @example + * declare + * vStatus VARCHAR2(100); + * begin + * vStatus := FILE_MANAGER.VALIDATE_EXTERNAL_TABLE(pTableName => 'STANDING_FACILITIES_HEADER'); + * DBMS_OUTPUT.PUT_LINE('vStatus = '||vStatus); + * end; + * + * @ex_rslt FAILED + **/ + FUNCTION VALIDATE_EXTERNAL_TABLE(pTableName IN VARCHAR2) + RETURN VARCHAR2; + + + /** + * @name S_VALIDATE_EXTERNAL_TABLE + * @desc A function which checks if SELECT query reterns any rows. + * It trys to selects External Table provided by parameter pTableName. + * It returns: PASSED or FAILED. + * @example + * declare + * vStatus VARCHAR2(100); + * begin + * vStatus := FILE_MANAGER.S_VALIDATE_EXTERNAL_TABLE(pTableName => 'STANDING_FACILITIES_HEADER'); + * DBMS_OUTPUT.PUT_LINE('vStatus = '||vStatus); + * end; + * + * @ex_rslt PASSED + **/ + FUNCTION S_VALIDATE_EXTERNAL_TABLE(pTableName IN VARCHAR2) + RETURN VARCHAR2; + + + + /** + * @name DROP_EXTERNAL_TABLE + * @desc It drops External Table for single file provided by + * pSourceFileReceivedKey parameter (A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY) + * @example exec FILE_MANAGER.DROP_EXTERNAL_TABLE(pSourceFileReceivedKey => 377); + **/ + PROCEDURE DROP_EXTERNAL_TABLE ( + pSourceFileReceivedKey IN NUMBER + ); + + + + /** + * @name COPY_FILE + * @desc It copies file provided by + * pSourceFileReceivedKey parameter (A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY) + * into destination provided by pDestination parameter. + * pDestination parameter allowed values are: 'ODS' + * @example exec FILE_MANAGER.COPY_FILE(pSourceFileReceivedKey => 377, pDestination => 'ODS'); + **/ + PROCEDURE COPY_FILE( + pSourceFileReceivedKey IN NUMBER, + pDestination IN VARCHAR2 + ); + + + + + /** + * @name MOVE_FILE + * @desc It moves file provided by + * pSourceFileReceivedKey parameter (A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY) + * into destination provided by pDestination parameter. + * pDestination parameter allowed values are: 'ODS', 'QUARANTINE' + * @example exec FILE_MANAGER.MOVE_FILE(pSourceFileReceivedKey => 377, pDestination => 'ODS'); + **/ + PROCEDURE MOVE_FILE( + pSourceFileReceivedKey IN NUMBER, + pDestination IN VARCHAR2 + ); + + + + /** + * @name DELETE_FOLDER_CONTENTS + * @desc It deletes all files from specified folder in the cloud storage. + * The procedure lists all objects in the specified folder prefix and deletes them one by one. + * pBucketArea parameter specifies which bucket to use: 'INBOX', 'DATA', 'ARCHIVE' + * pFolderPrefix parameter specifies the folder path within the bucket (e.g., 'C2D/UC_DISSEM/UC_NMA_DISSEM/') + * @example exec FILE_MANAGER.DELETE_FOLDER_CONTENTS(pBucketArea => 'INBOX', pFolderPrefix => 'C2D/UC_DISSEM/UC_NMA_DISSEM/'); + **/ + PROCEDURE DELETE_FOLDER_CONTENTS( + pBucketArea IN VARCHAR2, + pFolderPrefix IN VARCHAR2 + ); + + + + + /** + * @name PROCESS_SOURCE_FILE + * @desc It process file provided by pSourceFileReceivedName parameter. + * Ubmrella procedure that calls: + * - REGISTER_SOURCE_FILE_RECEIVED; + * - CREATE_EXTERNAL_TABLE; + * - VALIDATE_SOURCE_FILE_RECEIVED; + * - DROP_EXTERNAL_TABLE; + * - MOVE_FILE; + * @example exec FILE_MANAGER.PROCESS_SOURCE_FILE(pSourceFileReceivedName => 'INBOX/C2D/UC_DISSEM/UC_NMA_DISSEM/UC_NMA_DISSEM-277740.csv'); + **/ + PROCEDURE PROCESS_SOURCE_FILE(pSourceFileReceivedName IN VARCHAR2) + ; + + + + /** + * @name PROCESS_SOURCE_FILE + * @desc It process file provided by pSourceFileReceivedName parameter and return processing result value. + * It returns (success/failure) => 0 / -(value). + * Ubmrella function that calls PROCESS_SOURCE_FILE procedure. + * @example + * declare + * vResult PLS_INTEGER; + * begin + * vResult := CT_MRDS.FILE_MANAGER.PROCESS_SOURCE_FILE(PSOURCEFILERECEIVEDNAME => 'INBOX/C2D/UC_DISSEM/UC_NMA_DISSEM/UC_NMA_DISSEM-277740.csv'); + * DBMS_OUTPUT.PUT_LINE('vResult = ' || vResult); + * end; + * @ex_rslt 0 + * -20021 + **/ + FUNCTION PROCESS_SOURCE_FILE(pSourceFileReceivedName IN VARCHAR2) + RETURN PLS_INTEGER; + + + + /** + * @name GET_DATE_FORMAT + * @desc Returns date format for specified template table name and column name. + * Date is taken from configuration A_COLUMN_DATE_FORMAT table. + * @example select FILE_MANAGER.GET_DATE_FORMAT( + * pTemplateTableName => 'STANDING_FACILITIES_HEADER', + * pColumnName => 'SNAPSHOT_DATE') + * from dual; + * @ex_rslt DD/MM/YYYY HH24:MI:SS + **/ + FUNCTION GET_DATE_FORMAT( + pTemplateTableName IN VARCHAR2, + pColumnName IN VARCHAR2 + ) RETURN VARCHAR2; + + + + /** + * @name GENERATE_EXTERNAL_TABLE_PARAMS + * @desc It builds two strings: pColumnList and pFieldList for specified Template Table name, by parameter: pTemplateTableName. + * @example + * declare + * vColumnList CLOB; + * vFieldList CLOB; + * begin + * FILE_MANAGER.GENERATE_EXTERNAL_TABLE_PARAMS ( + * pTemplateTableName => 'CT_ET_TEMPLATES.LM_STANDING_FACILITIES_HEADER' + * ,pColumnList => vColumnList + * ,pFieldList => vFieldList + * ); + * DBMS_OUTPUT.PUT_LINE('vColumnList = '||vColumnList); + * DBMS_OUTPUT.PUT_LINE('vFieldList = '||vFieldList); + * end; + * / + **/ + PROCEDURE GENERATE_EXTERNAL_TABLE_PARAMS ( + pTemplateTableName IN VARCHAR2, + pColumnList OUT CLOB, + pFieldList OUT CLOB + ); + + + + + + /** + * @name ADD_SOURCE + * @desc Insert a new record to A_SOURCE table. + * pSourceKey is a PRIMARY KEY value. + **/ + PROCEDURE ADD_SOURCE ( + pSourceKey IN CT_MRDS.A_SOURCE.A_SOURCE_KEY%TYPE, + pSourceName IN CT_MRDS.A_SOURCE.SOURCE_NAME%TYPE + ); + + + + /** + * @name DELETE_SOURCE_CASCADE + * @desc Safely deletes a SOURCE specified by pSourceKey parameter from A_SOURCE table and all dependent tables: + * - A_SOURCE_FILE_CONFIG + * - A_SOURCE_FILE_RECEIVED + * - A_COLUMN_DATE_FORMAT (only if template table is not shared with other source systems) + * The procedure checks if template tables are shared before deleting date format configurations. + * If a template table is used by multiple source systems, date formats are preserved. + * @example CALL CT_MRDS.FILE_MANAGER.DELETE_SOURCE_CASCADE(pSourceKey => 'TEST_SYS'); + **/ + PROCEDURE DELETE_SOURCE_CASCADE ( + pSourceKey IN CT_MRDS.A_SOURCE.A_SOURCE_KEY%TYPE + ); + + + + /** + * @name GET_CONTAINER_SOURCE_FILE_CONFIG_KEY + * @desc For specified parameter pSourceFileId (A_SOURCE_FILE_CONFIG.SOURCE_FILE_ID) + * it returns A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY for related CONTAINER record. + * @example select FILE_MANAGER.GET_CONTAINER_SOURCE_FILE_CONFIG_KEY( + * pSourceFileId => 'UC_DISSEM') + * from dual; + * @ex_rslt 126 + **/ + FUNCTION GET_CONTAINER_SOURCE_FILE_CONFIG_KEY ( + pSourceFileId IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_ID%TYPE + ) RETURN PLS_INTEGER; + + + + /** + * @name GET_SOURCE_FILE_CONFIG_KEY + * @desc For specified input parameters, + * it returns A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY. + * @example select FILE_MANAGER.GET_SOURCE_FILE_CONFIG_KEY ( + * pSourceFileType => 'INPUT' + * ,pSourceFileId => 'UC_DISSEM' + * ,pTableId => 'UC_NMA_DISSEM') + * from dual; + * @ex_rslt 126 + **/ + FUNCTION GET_SOURCE_FILE_CONFIG_KEY ( + pSourceFileType IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_TYPE%TYPE DEFAULT 'INPUT' + ,pSourceFileId IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_ID%TYPE + ,pTableId IN CT_MRDS.A_SOURCE_FILE_CONFIG.TABLE_ID%TYPE + ) RETURN PLS_INTEGER; + + + + /** + * @name ADD_SOURCE_FILE_CONFIG + * @desc Insert a new record to A_SOURCE_FILE_CONFIG table. + * MARS-1049: Added pEncoding parameter for CSV character set specification. + * @param pEncoding - Character set encoding for CSV files (e.g., 'UTF8', 'WE8MSWIN1252', 'EE8ISO8859P2') + * If NULL, no CHARACTERSET clause is added to external table definitions + * @example CALL CT_MRDS.FILE_MANAGER.ADD_SOURCE_FILE_CONFIG( + * pSourceKey => 'C2D', pSourceFileType => 'INPUT', + * pSourceFileId => 'UC_DISSEM', pTableId => 'METADATA_LOADS', + * pTemplateTableName => 'CT_ET_TEMPLATES.C2D_A_UC_DISSEM_METADATA_LOADS', + * pEncoding => 'UTF8' + * ); + **/ + PROCEDURE ADD_SOURCE_FILE_CONFIG ( + pSourceKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_KEY%TYPE + ,pSourceFileType IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_TYPE%TYPE + ,pSourceFileId IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_ID%TYPE + ,pSourceFileDesc IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_DESC%TYPE + ,pSourceFileNamePattern IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_NAME_PATTERN%TYPE + ,pTableId IN CT_MRDS.A_SOURCE_FILE_CONFIG.TABLE_ID%TYPE DEFAULT NULL + ,pTemplateTableName IN CT_MRDS.A_SOURCE_FILE_CONFIG.TEMPLATE_TABLE_NAME%TYPE DEFAULT NULL + ,pContainerFileKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.CONTAINER_FILE_KEY%TYPE DEFAULT NULL + ,pEncoding IN CT_MRDS.A_SOURCE_FILE_CONFIG.ENCODING%TYPE DEFAULT NULL -- MARS-1049: NOWY PARAMETR + ); + + + + /** + * @name ADD_COLUMN_DATE_FORMAT + * @desc Insert a new record to A_COLUMN_DATE_FORMAT table. + **/ + PROCEDURE ADD_COLUMN_DATE_FORMAT ( + pTemplateTableName IN CT_MRDS.A_COLUMN_DATE_FORMAT.TEMPLATE_TABLE_NAME%TYPE + ,pColumnName IN CT_MRDS.A_COLUMN_DATE_FORMAT.COLUMN_NAME%TYPE + ,pDateFormat IN CT_MRDS.A_COLUMN_DATE_FORMAT.DATE_FORMAT%TYPE + ); + + + + /** + * @name GET_BUCKET_URI + * @desc Function used to get string with bucket http url. + * Possible input values for pBucketArea are: 'INBOX', 'ODS', 'DATA', 'ARCHIVE' + * @example select FILE_MANAGER.GET_BUCKET_URI(pBucketArea => 'ODS') from dual; + * @ex_rslt https://objectstorage.eu-frankfurt-1.oraclecloud.com/n/frcnomajoc7v/b/mrds_data_tst/o/ + **/ + FUNCTION GET_BUCKET_URI(pBucketArea VARCHAR2) + RETURN VARCHAR2; + + + + /** + * @name GET_DET_SOURCE_FILE_CONFIG_INFO + * @desc Function returns details about A_SOURCE_FILE_CONFIG record + * for specified pSourceFileConfigKey (A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY). + * If pIncludeContainerInfo is <> 0 it returns additional info about related Container config record (A_SOURCE_FILE_CONFIG) + * If pIncludeColumnFormatInfo is <> 0 it returns additional info about related ColumnFormat config record (A_COLUMN_DATE_FORMAT) + * @example select FILE_MANAGER.GET_DET_SOURCE_FILE_CONFIG_INFO ( + * pSourceFileConfigKey => 128 + * ,pIncludeContainerInfo => 1 + * ,pIncludeColumnFormatInfo => 1 + * ) from dual; + * @ex_rslt + * Details about File Configuration: + * -------------------------------- + * A_SOURCE_FILE_CONFIG_KEY = 128 + * A_SOURCE_KEY = C2D + * ... + * -------------------------------- + * + * Details about related Container Config: + * -------------------------------- + * A_SOURCE_FILE_CONFIG_KEY = 126 + * A_SOURCE_KEY = C2D + * ... + * -------------------------------- + * + * Column Date Format config entries: + * -------------------------------- + * TEMPLATE_TABLE_NAME = CT_ET_TEMPLATES.C2D_UC_MA_DISSEM + * ... + * -------------------------------- + **/ + FUNCTION GET_DET_SOURCE_FILE_CONFIG_INFO ( + pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE + ,pIncludeContainerInfo IN PLS_INTEGER DEFAULT 1 + ,pIncludeColumnFormatInfo IN PLS_INTEGER DEFAULT 1 + ) RETURN VARCHAR2; + + + + /** + * @name GET_DET_SOURCE_FILE_RECEIVED_INFO + * @desc Function returns details about A_SOURCE_FILE_RECEIVED record + * for specified pSourceFileReceivedKey (A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY). + * If pIncludeConfigInfo is <> 0 it returns additional info about related Container config record (A_SOURCE_FILE_CONFIG) + * If pIncludeContainerInfo is <> 0 it returns additional info about related Container config record (A_SOURCE_FILE_CONFIG) + * If pIncludeColumnFormatInfo is <> 0 it returns additional info about related ColumnFormat config record (A_COLUMN_DATE_FORMAT) + * @example select FILE_MANAGER.GET_DET_SOURCE_FILE_RECEIVED_INFO ( + * pSourceFileReceivedKey => 377 + * ,pIncludeConfigInfo => 1 + * ,pIncludeContainerInfo => 1 + * ,pIncludeColumnFormatInfo => 1 + * ) from dual; + * + **/ + FUNCTION GET_DET_SOURCE_FILE_RECEIVED_INFO ( + pSourceFileReceivedKey IN CT_MRDS.A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY%TYPE + ,pIncludeConfigInfo IN PLS_INTEGER DEFAULT 1 + ,pIncludeContainerInfo IN PLS_INTEGER DEFAULT 1 + ,pIncludeColumnFormatInfo IN PLS_INTEGER DEFAULT 1 + ) RETURN VARCHAR2; + + + + /** + * @name GET_DET_USER_LOAD_OPERATIONS + * @desc Function returns details from USER_LOAD_OPERATIONS table + * for specified pOperationId. + * @example select FILE_MANAGER.GET_DET_USER_LOAD_OPERATIONS (pOperationId => 3608) from dual; + * @ex_rslt + * Details about USER_LOAD_OPERATIONS where ID = 3608 + * -------------------------------- + * ID = 3608 + * TYPE = VALIDATE + * SID = 31260 + * SERIAL# = 52915 + * START_TIME = 2025-05-20 10.08.24.436983 EUROPE/BELGRADE + * UPDATE_TIME = 2025-05-20 10.08.24.458643 EUROPE/BELGRADE + * STATUS = FAILED + * OWNER_NAME = CT_MRDS + * TABLE_NAME = STANDING_FACILITIES_HEADER + * PARTITION_NAME = + * SUBPARTITION_NAME = + * FILE_URI_LIST = + * ROWS_LOADED = + * LOGFILE_TABLE = VALIDATE$3608_LOG + * BADFILE_TABLE = VALIDATE$3608_BAD + * STATUS_TABLE = + * TEMPEXT_TABLE = + * CREDENTIAL_NAME = + * EXPIRATION_TIME = 2025-05-22 10.08.24.436983000 EUROPE/BELGRADE + * -------------------------------- + **/ + FUNCTION GET_DET_USER_LOAD_OPERATIONS ( + pOperationId PLS_INTEGER + ) RETURN VARCHAR2; + + /** + * @name ANALYZE_VALIDATION_ERRORS + * @desc Wrapper function that analyzes validation errors for a source file using its received key. + * Automatically derives template schema, table name, CSV URI and validation log table + * from file metadata and calls ENV_MANAGER.ANALYZE_VALIDATION_ERRORS. + * @example SELECT FILE_MANAGER.ANALYZE_VALIDATION_ERRORS(63) FROM DUAL; + * @ex_rslt Detailed validation analysis report with column mismatches and solutions + **/ + FUNCTION ANALYZE_VALIDATION_ERRORS( + pSourceFileReceivedKey IN NUMBER + ) RETURN VARCHAR2; + + --------------------------------------------------------------------------------------------------------------------------- + -- PACKAGE VERSION MANAGEMENT FUNCTIONS + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name GET_VERSION + * @desc Returns the current version number of the FILE_MANAGER package. + * Uses semantic versioning format (MAJOR.MINOR.PATCH). + * @example SELECT FILE_MANAGER.GET_VERSION() FROM DUAL; + * @ex_rslt 3.2.0 + **/ + FUNCTION GET_VERSION RETURN VARCHAR2; + + /** + * @name GET_BUILD_INFO + * @desc Returns comprehensive build information including version, build date, and author. + * Uses centralized ENV_MANAGER.GET_PACKAGE_VERSION_INFO function. + * @example SELECT FILE_MANAGER.GET_BUILD_INFO() FROM DUAL; + * @ex_rslt Package: FILE_MANAGER + * Version: 3.2.0 + * Build Date: 2025-10-22 16:30:00 + * Author: Grzegorz Michalski + **/ + FUNCTION GET_BUILD_INFO RETURN VARCHAR2; + + /** + * @name GET_VERSION_HISTORY + * @desc Returns complete version history with all releases and changes. + * Uses centralized ENV_MANAGER.FORMAT_VERSION_HISTORY function. + * @example SELECT FILE_MANAGER.GET_VERSION_HISTORY() FROM DUAL; + * @ex_rslt FILE_MANAGER Version History: + * 3.2.0 (2025-10-22): Added package versioning system... + **/ + FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2; + +END; + +/ + +/ diff --git a/MARS_Packages/REL03/MARS-1057/install_mars1057.sql b/MARS_Packages/REL03/MARS-1057/install_mars1057.sql new file mode 100644 index 0000000..7119fd9 --- /dev/null +++ b/MARS_Packages/REL03/MARS-1057/install_mars1057.sql @@ -0,0 +1,91 @@ +-- =================================================================== +-- MARS-1057 INSTALL SCRIPT: Batch External Table Creation +-- =================================================================== +-- Purpose: Install FILE_MANAGER v3.4.0 with batch external table creation procedures +-- Author: Grzegorz Michalski +-- Date: 2025-11-27 +-- Version: 3.4.0 + +-- Dynamic spool file generation (using SYS_CONTEXT - no DBA privileges required) +-- Log files are automatically created in log/ subdirectory +-- IMPORTANT: Ensure log/ directory exists before SPOOL (use host mkdir) +host mkdir log 2>nul + +var filename VARCHAR2(100) +BEGIN + :filename := 'log/INSTALL_MARS_1057_' || SYS_CONTEXT('USERENV', 'CON_NAME') || '_' || TO_CHAR(SYSDATE,'YYYYMMDD_HH24MISS') || '.log'; +END; +/ +column filename new_value _filename +select :filename filename from dual; +spool &_filename + +SET ECHO OFF +SET TIMING ON +SET SERVEROUTPUT ON SIZE UNLIMITED +SET PAUSE OFF + +-- Set current schema context (optional - use when modifying packages in specific schema) +-- ALTER SESSION SET CURRENT_SCHEMA = CT_MRDS; + +PROMPT ========================================================================= +PROMPT MARS-1057: Batch External Table Creation +PROMPT ========================================================================= +PROMPT +PROMPT This script will install FILE_MANAGER package v3.4.0 with new features: +PROMPT - CREATE_EXTERNAL_TABLES_SET procedure +PROMPT - CREATE_EXTERNAL_TABLES_BATCH procedure +PROMPT +PROMPT Changes: +PROMPT - FILE_MANAGER package specification (3.3.0 -> 3.4.0) +PROMPT - FILE_MANAGER package body (3.3.0 -> 3.4.0) +PROMPT +PROMPT Expected Duration: 1-2 minutes +PROMPT ========================================================================= + +-- Confirm installation with user +ACCEPT continue CHAR PROMPT 'Type YES to continue with installation, or Ctrl+C to abort: ' +WHENEVER SQLERROR EXIT SQL.SQLCODE +BEGIN + IF '&continue' IS NULL OR TRIM('&continue') IS NULL OR UPPER(TRIM('&continue')) != 'YES' THEN + RAISE_APPLICATION_ERROR(-20001, 'Installation aborted by user'); + END IF; +END; +/ +WHENEVER SQLERROR CONTINUE + +PROMPT +PROMPT ========================================================================= +PROMPT Step 1: Install FILE_MANAGER Package Specification v3.4.0 +PROMPT ========================================================================= +@@01_MARS_1057_install_CT_MRDS_FILE_MANAGER_SPEC.sql + +PROMPT +PROMPT ========================================================================= +PROMPT Step 2: Install FILE_MANAGER Package Body v3.4.0 +PROMPT ========================================================================= +@@02_MARS_1057_install_CT_MRDS_FILE_MANAGER_BODY.sql + +PROMPT +PROMPT ========================================================================= +PROMPT Step 3: Track Package Version +PROMPT ========================================================================= +@@track_package_versions.sql + +PROMPT +PROMPT ========================================================================= +PROMPT Step 4: Verify All Tracked Packages +PROMPT ========================================================================= +@@verify_packages_version.sql + +PROMPT +PROMPT ========================================================================= +PROMPT MARS-1057 Installation - COMPLETED +PROMPT ========================================================================= +PROMPT Check the log file for complete installation details. +PROMPT Log file: log/INSTALL_MARS_1057__.log +PROMPT ========================================================================= + +spool off + +quit; diff --git a/MARS_Packages/REL03/MARS-1057/new_version/FILE_MANAGER.pkb b/MARS_Packages/REL03/MARS-1057/new_version/FILE_MANAGER.pkb new file mode 100644 index 0000000..baf7e98 --- /dev/null +++ b/MARS_Packages/REL03/MARS-1057/new_version/FILE_MANAGER.pkb @@ -0,0 +1,2151 @@ +create or replace PACKAGE BODY CT_MRDS.FILE_MANAGER +AS + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_SOURCE_FILE_CONFIG(pFileUri IN VARCHAR2 DEFAULT NULL + , pSourceFileReceivedKey IN NUMBER DEFAULT NULL + , pSourceFileConfigKey IN NUMBER DEFAULT NULL) + RETURN CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE + IS + vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pFileUri => '''||nvl(pFileUri,'NULL')||'''' + ,'pSourceFileReceivedKey => '||nvl(to_char(pSourceFileReceivedKey),'NULL') + ,'pSourceFileConfigKey => '||nvl(to_char(pSourceFileConfigKey),'NULL'))); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + + BEGIN + IF pFileUri IS NOT NULL THEN + SELECT * + INTO vSourceFileConfig + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE REGEXP_LIKE(pFileUri, A_SOURCE_KEY||'/'||SOURCE_FILE_ID||'/'||TABLE_ID||'/'||SOURCE_FILE_NAME_PATTERN); + ELSIF pSourceFileReceivedKey IS NOT NULL THEN + SELECT T.* + INTO vSourceFileConfig + FROM CT_MRDS.A_SOURCE_FILE_CONFIG T, CT_MRDS.A_SOURCE_FILE_RECEIVED R + WHERE T.A_SOURCE_FILE_CONFIG_KEY = R.A_SOURCE_FILE_CONFIG_KEY + AND R.A_SOURCE_FILE_RECEIVED_KEY = pSourceFileReceivedKey; + ELSIF pSourceFileConfigKey IS NOT NULL THEN + SELECT * + INTO vSourceFileConfig + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE A_SOURCE_FILE_CONFIG_KEY = pSourceFileConfigKey; + ELSE + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EMPTY_FILEURI_AND_RECKEY, ENV_MANAGER.MSG_EMPTY_FILEURI_AND_RECKEY); + END IF; + -- Set global package variable vgSourceFileConfigKey - used in error messages + vgSourceFileConfigKey := vSourceFileConfig.A_SOURCE_FILE_CONFIG_KEY; + EXCEPTION + WHEN ENV_MANAGER.ERR_EMPTY_FILEURI_AND_RECKEY THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_EMPTY_FILEURI_AND_RECKEY, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EMPTY_FILEURI_AND_RECKEY, ENV_MANAGER.MSG_EMPTY_FILEURI_AND_RECKEY); + + WHEN NO_DATA_FOUND THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_NO_CONFIG_MATCH_FOR_FILEURI, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NO_CONFIG_MATCH_FOR_FILEURI, ENV_MANAGER.MSG_NO_CONFIG_MATCH_FOR_FILEURI); + + WHEN TOO_MANY_ROWS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_MULTIPLE_MATCH_FOR_SRCFILE, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MULTIPLE_MATCH_FOR_SRCFILE, ENV_MANAGER.MSG_MULTIPLE_MATCH_FOR_SRCFILE); + + WHEN OTHERS THEN + -- Log complete error details including full stack trace and backtrace + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_UNKNOWN, vParameters, 'FILE_MANAGER'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + END; + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); + RETURN vSourceFileConfig; + + END GET_SOURCE_FILE_CONFIG; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_SOURCE_FILE_RECEIVED_INFO(pSourceFileReceivedKey IN NUMBER DEFAULT NULL) + -- + -- Get source file received info + -- + RETURN tSourceFileReceived + IS + vSourceFileReceived tSourceFileReceived; + vBucket VARCHAR2(400); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedKey => '||nvl(to_char(pSourceFileReceivedKey),'NULL'))); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + BEGIN + SELECT R.A_SOURCE_FILE_RECEIVED_KEY, R.A_SOURCE_FILE_CONFIG_KEY, + 'INBOX'||'/'||T.A_SOURCE_KEY||'/'||T.SOURCE_FILE_ID||'/'||T.TABLE_ID||'/' as SOURCE_FILE_PREFIX_INBOX, + 'ODS'||'/'||T.A_SOURCE_KEY||'/'||T.TABLE_ID||'/' as SOURCE_FILE_PREFIX_ODS, + 'QUARANTINE'||'/'||T.A_SOURCE_KEY||'/'||T.TABLE_ID||'/' as SOURCE_FILE_PREFIX_QUARANTINE, + 'ARCHIVE'||'/'||T.A_SOURCE_KEY||'/'||T.SOURCE_FILE_ID||'/' as SOURCE_FILE_PREFIX_ARCHIVE, + R.SOURCE_FILE_NAME, + R.RECEPTION_DATE, R.PROCESSING_STATUS, R.EXTERNAL_TABLE_NAME + INTO vSourceFileReceived + FROM CT_MRDS.A_SOURCE_FILE_RECEIVED R, CT_MRDS.A_SOURCE_FILE_CONFIG T + WHERE R.A_SOURCE_FILE_CONFIG_KEY = T.A_SOURCE_FILE_CONFIG_KEY + AND A_SOURCE_FILE_RECEIVED_KEY = pSourceFileReceivedKey; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_NO_CONFIG_FOR_RECEIVED_FILE, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NO_CONFIG_FOR_RECEIVED_FILE, ENV_MANAGER.MSG_NO_CONFIG_FOR_RECEIVED_FILE); + WHEN TOO_MANY_ROWS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_MULTI_CONFIG_FOR_RECEIVED_FILE, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MULTI_CONFIG_FOR_RECEIVED_FILE, ENV_MANAGER.MSG_MULTI_CONFIG_FOR_RECEIVED_FILE); + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + END; + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); + + RETURN vSourceFileReceived; + + END GET_SOURCE_FILE_RECEIVED_INFO; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION REGISTER_SOURCE_FILE_RECEIVED(pSourceFileReceivedName IN VARCHAR2) + RETURN PLS_INTEGER + -- + -- Register a newly received source file A_SOURCE_FILE_RECEIVED + -- This overload automatically determines source file type from the file name + -- + IS + vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; + vSourceFileReceivedKey PLS_INTEGER; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedName => '''||nvl(pSourceFileReceivedName, 'NULL')||'''')); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO',vParameters); + + vSourceFileConfig := GET_SOURCE_FILE_CONFIG(pSourceFileReceivedName); + vSourceFileReceivedKey := REGISTER_SOURCE_FILE_RECEIVED(pSourceFileReceivedName, vSourceFileConfig); + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + RETURN vSourceFileReceivedKey; + EXCEPTION + + WHEN ENV_MANAGER.ERR_EMPTY_FILEURI_AND_RECKEY THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_EMPTY_FILEURI_AND_RECKEY, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EMPTY_FILEURI_AND_RECKEY, ENV_MANAGER.MSG_EMPTY_FILEURI_AND_RECKEY); + + WHEN ENV_MANAGER.ERR_NO_CONFIG_MATCH_FOR_FILEURI THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_NO_CONFIG_MATCH_FOR_FILEURI, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NO_CONFIG_MATCH_FOR_FILEURI, ENV_MANAGER.MSG_NO_CONFIG_MATCH_FOR_FILEURI); + + WHEN ENV_MANAGER.ERR_FILE_NOT_EXISTS_ON_CLOUD THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_FILE_NOT_EXISTS_ON_CLOUD, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_NOT_EXISTS_ON_CLOUD, ENV_MANAGER.MSG_FILE_NOT_EXISTS_ON_CLOUD); + + WHEN ENV_MANAGER.ERR_FILE_ALREADY_REGISTERED THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_FILE_ALREADY_REGISTERED, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_ALREADY_REGISTERED, ENV_MANAGER.MSG_FILE_ALREADY_REGISTERED); + + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_UNKNOWN, vParameters, 'FILE_MANAGER'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + END REGISTER_SOURCE_FILE_RECEIVED; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION REGISTER_SOURCE_FILE_RECEIVED( + pSourceFileReceivedName IN VARCHAR2 + ,pSourceFileConfig IN CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE) + RETURN PLS_INTEGER + -- + -- Register a newly received source file A_SOURCE_FILE_RECEIVED + -- + IS + vExternalTableName VARCHAR2(200); + vDirName VARCHAR2(1000); + vFileName VARCHAR2(1000); + vChecksum A_SOURCE_FILE_RECEIVED.CHECKSUM%TYPE; + vCreated A_SOURCE_FILE_RECEIVED.CREATED%TYPE; + vBytes A_SOURCE_FILE_RECEIVED.BYTES%TYPE; + vSourceFileReceivedKey PLS_INTEGER; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vRow CT_MRDS.A_SOURCE_FILE_RECEIVED%ROWTYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedName => '''||nvl(pSourceFileReceivedName, 'NULL')||'''' + ,'pSourceFileConfig => '||'tSourceFileConfig record type')); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + vDirName := REGEXP_SUBSTR(pSourceFileReceivedName, '(.*/)(.*)', 1, 1, NULL, 1); + -- Remove prefix from file name + vFileName := REGEXP_SUBSTR(pSourceFileReceivedName,'[^/]*$'); + + ENV_MANAGER.LOG_PROCESS_EVENT('gvCredentialName','DEBUG',ENV_MANAGER.gvCredentialName); + ENV_MANAGER.LOG_PROCESS_EVENT('gvInboxBucketUri','DEBUG',ENV_MANAGER.gvInboxBucketUri); + ENV_MANAGER.LOG_PROCESS_EVENT('vDirName','DEBUG',vDirName); + + SELECT + checksum, created, bytes + INTO + vChecksum, vCreated, vBytes + FROM DBMS_CLOUD.LIST_OBJECTS(ENV_MANAGER.gvCredentialName, + ENV_MANAGER.gvInboxBucketUri || vDirName + ) + WHERE object_name = vFileName + ; + vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; + vExternalTableName := REPLACE( + REGEXP_SUBSTR(pSourceFileConfig.TEMPLATE_TABLE_NAME||'_'||vSourceFileReceivedKey, + '\..*'), + '.',''); + + INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED + (A_SOURCE_FILE_RECEIVED_KEY, A_SOURCE_FILE_CONFIG_KEY, + SOURCE_FILE_NAME, RECEPTION_DATE, + PROCESSING_STATUS, EXTERNAL_TABLE_NAME, + CHECKSUM, CREATED, BYTES) + VALUES (vSourceFileReceivedKey, pSourceFileConfig.A_SOURCE_FILE_CONFIG_KEY, + vFileName, SYSDATE, + 'RECEIVED', vExternalTableName, + vChecksum, vCreated, vBytes); + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); + RETURN vSourceFileReceivedKey; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + vgMsgTmp := ENV_MANAGER.MSG_FILE_NOT_EXISTS_ON_CLOUD + ||cgBL||' '||'File: '||ENV_MANAGER.gvInboxBucketUri || vDirName || vFileName; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_NOT_EXISTS_ON_CLOUD, vgMsgTmp); + + WHEN DUP_VAL_ON_INDEX THEN + select * into vRow + from CT_MRDS.A_SOURCE_FILE_RECEIVED + where CHECKSUM = vChecksum + and CREATED = vCreated + and BYTES = vBytes + ; + vgMsgTmp := ENV_MANAGER.MSG_FILE_ALREADY_REGISTERED + ||cgBL||' '||'Details about existing File: ' + ||cgBL||' '||'-------------------------' + ||cgBL||' '||'A_SOURCE_FILE_RECEIVED_KEY = '||vRow.A_SOURCE_FILE_RECEIVED_KEY + ||cgBL||' '||'A_SOURCE_FILE_CONFIG_KEY = '||vRow.A_SOURCE_FILE_CONFIG_KEY + ||cgBL||' '||'SOURCE_FILE_NAME = '||vRow.SOURCE_FILE_NAME + ||cgBL||' '||'CHECKSUM = '||vRow.CHECKSUM + ||cgBL||' '||'CREATED = '||vRow.CREATED + ||cgBL||' '||'BYTES = '||vRow.BYTES + ||cgBL||' '||'RECEPTION_DATE = '||vRow.RECEPTION_DATE + ||cgBL||' '||'PROCESSING_STATUS = '||vRow.PROCESSING_STATUS + ||cgBL||' '||'EXTERNAL_TABLE_NAME = '||vRow.EXTERNAL_TABLE_NAME + ||cgBL||' '||'-------------------------' + ||cgBL||' '||'There cannot be two files with the same values for (CHECKSUM, CREATED, BYTES)' + ; + + +-- vChecksum, vCreated, vBytes + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_ALREADY_REGISTERED, vgMsgTmp); + + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END REGISTER_SOURCE_FILE_RECEIVED; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE SET_SOURCE_FILE_RECEIVED_STATUS(pSourceFileReceivedKey IN PLS_INTEGER, pStatus IN VARCHAR2) + -- + -- Change status of file in the A_SOURCE_FILE_RECEIVED table + -- + IS + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedKey => '||nvl(to_char(pSourceFileReceivedKey),'NULL') + ,'pStatus => '''||nvl(pStatus, 'NULL')||'''')); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + UPDATE CT_MRDS.A_SOURCE_FILE_RECEIVED + SET PROCESSING_STATUS=pStatus + WHERE A_SOURCE_FILE_RECEIVED_KEY=pSourceFileReceivedKey; + COMMIT; + ENV_MANAGER.LOG_PROCESS_EVENT('File status changed to '||pStatus,'INFO', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); + + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + + END SET_SOURCE_FILE_RECEIVED_STATUS; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_EXTERNAL_TABLE_COLUMNS(pTargetTableTemplate IN VARCHAR2) + RETURN CLOB + -- + -- Create list of columns for DBMS_CLOUD.CREATE_EXTERNAL_TABLE from existing template table + -- + IS + vColumnList CLOB; + vTableName VARCHAR2(200); + vSchemaName VARCHAR2(200); + BEGIN + vSchemaName := REPLACE(REGEXP_SUBSTR(pTargetTableTemplate,'.*\.'),'.',''); + vTableName := REPLACE(REGEXP_SUBSTR(pTargetTableTemplate,'\..*'),'.',''); + DBMS_METADATA.SET_TRANSFORM_PARAM(-1, 'SQLTERMINATOR', True); + DBMS_METADATA.SET_TRANSFORM_PARAM(-1, 'COLLATION_CLAUSE', 'NEVER'); + DBMS_METADATA.SET_TRANSFORM_PARAM(-1, 'REF_CONSTRAINTS', False); + DBMS_METADATA.SET_TRANSFORM_PARAM(-1, 'STORAGE', False); + DBMS_METADATA.SET_TRANSFORM_PARAM(-1, 'TABLESPACE', False); + DBMS_METADATA.SET_TRANSFORM_PARAM(-1, 'SEGMENT_ATTRIBUTES', False); + vColumnList := RTRIM( + LTRIM( + REGEXP_SUBSTR(DBMS_METADATA.GET_DDL('TABLE', vTableName, vSchemaName),'\(.*\)',1,1,'mn'), + '('), + ')'); + RETURN vColumnList; + END GET_EXTERNAL_TABLE_COLUMNS; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE CREATE_EXTERNAL_TABLE ( + pTableName IN VARCHAR2, + pTemplateTableName IN VARCHAR2, + pPrefix IN VARCHAR2, + pBucketUri IN VARCHAR2 DEFAULT ENV_MANAGER.gvInboxBucketUri, + pFileName IN VARCHAR2 DEFAULT NULL, + pDelimiter IN VARCHAR2 DEFAULT ',', + pEncoding IN VARCHAR2 DEFAULT NULL -- MARS-1049: NEW PARAMETER FOR FILE ENCODING + ) + -- + -- Create external table for a single source file to validate the file structure + -- + IS + vTableName VARCHAR2(200); + vColumnList CLOB; + vFieldList CLOB; + vFormat VARCHAR2(200); + + vPrefix VARCHAR2(200); + vFileName VARCHAR2(1000); + vFileExtension VARCHAR2(200); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pTableName => '''||nvl(pTableName, 'NULL')||'''' + ,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pPrefix => '''||nvl(pPrefix, 'NULL')||'''' + ,'pBucketUri => '''||nvl(pBucketUri, 'NULL')||'''' + ,'pFileName => '''||nvl(pFileName, 'NULL')||'''' + ,'pDelimiter => '''||nvl(pDelimiter, 'NULL')||'''' + ,'pEncoding => '''||nvl(pEncoding, 'NULL')||'''' -- MARS-1049: NOWY + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + -- Strip off leading and trailing slashes from prefix + vPrefix := TRIM(BOTH '/' FROM pPrefix); + + -- Generate column and field list from template table + GENERATE_EXTERNAL_TABLE_PARAMS (pTemplateTableName, vColumnList, vFieldList); + + --vFormat evaluation based on pBucketUri first, then pPrefix + -- Archive bucket should use parquet + implicit partitioning regardless of prefix + IF INSTR(pBucketUri, ENV_MANAGER.gvArchiveBucketName)>0 THEN + vFormat := '{"type": "parquet" + ,"implicit_partition_type": "hive" + ,"implicit_partition_columns":["PARTITION_YEAR","PARTITION_MONTH"]}'; + vColumnList := vColumnList||cgBL||' , "PARTITION_YEAR" varchar2(4)'||cgBL||', "PARTITION_MONTH" varchar2(2)'; + vFieldList := NULL; + vFileExtension := '.parquet'; + + -- For INBOX, ODS, and other ARCHIVE prefixes (not in archive bucket) use CSV + ELSIF SUBSTR(pPrefix,1,5) = 'INBOX' OR SUBSTR(pPrefix,1,3) = 'ODS' + OR SUBSTR(pPrefix,1,7) = 'ARCHIVE' + THEN + -- MARS-1049: Create format with encoding if specified + IF pDelimiter = '|' THEN + IF pEncoding IS NOT NULL AND LENGTH(TRIM(pEncoding)) > 0 THEN + vFormat := json_object( + 'delimiter' VALUE '|', + 'skipheaders' VALUE '1', + 'characterset' VALUE pEncoding + ); + ELSE + vFormat := json_object('delimiter' VALUE '|', 'skipheaders' VALUE '1'); + END IF; + ELSE + IF pEncoding IS NOT NULL AND LENGTH(TRIM(pEncoding)) > 0 THEN + vFormat := json_object( + 'type' VALUE 'CSV', + 'skipheaders' VALUE '1', + 'ignoremissingcolumns' VALUE 'true', + 'characterset' VALUE pEncoding + ); + ELSE + vFormat := json_object('type' VALUE 'CSV', 'skipheaders' VALUE '1', 'ignoremissingcolumns' value 'true'); + END IF; + END IF; + + vFileExtension := '.csv'; + + ELSE + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN_PREFIX, ENV_MANAGER.MSG_UNKNOWN_PREFIX); + END IF; + + -- No filename give: Match all csv files + IF pFileName IS NOT NULL THEN + vFileName := pFileName; + ELSE + vFileName := pBucketUri||vPrefix||'/*'||vFileExtension; + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('pTableName', 'DEBUG', pTableName); + ENV_MANAGER.LOG_PROCESS_EVENT('ENV_MANAGER.vpCredentialName', 'DEBUG', ENV_MANAGER.gvCredentialName); + ENV_MANAGER.LOG_PROCESS_EVENT('vFileName', 'DEBUG', vFileName); + ENV_MANAGER.LOG_PROCESS_EVENT('vColumnList', 'DEBUG', vColumnList); + ENV_MANAGER.LOG_PROCESS_EVENT('vFieldList', 'DEBUG', vFieldList); + ENV_MANAGER.LOG_PROCESS_EVENT('vFormat', 'DEBUG', vFormat); + + -- Pre-validation: Check CSV column count for CSV files only + IF SUBSTR(pPrefix,1,5) = 'INBOX' AND pFileName IS NOT NULL THEN + DECLARE + vCsvFirstLine VARCHAR2(4000); + vCsvColCount NUMBER := 0; + vTemplateColCount NUMBER := 0; + vExcessColumns VARCHAR2(2000); + + -- Get template column count + CURSOR c_template_count IS + SELECT COUNT(*) as col_count + FROM ALL_TAB_COLUMNS + WHERE OWNER = UPPER(REPLACE(REGEXP_SUBSTR(pTemplateTableName,'.*\.'),'.','')) + AND TABLE_NAME = UPPER(REGEXP_REPLACE(pTemplateTableName,'^.*\.','')); + + BEGIN + -- Get template column count + FOR rec IN c_template_count LOOP + vTemplateColCount := rec.col_count; + END LOOP; + + -- Read first line of CSV to count columns + BEGIN + SELECT UTL_RAW.CAST_TO_VARCHAR2( + DBMS_LOB.SUBSTR( + DBMS_CLOUD.GET_OBJECT( + credential_name => ENV_MANAGER.gvCredentialName, + object_uri => pFileName + ), + 4000, 1 + ) + ) INTO vCsvFirstLine FROM DUAL; + + -- Count commas in header line + 1 for total columns + vCsvColCount := REGEXP_COUNT(REGEXP_SUBSTR(vCsvFirstLine, '[^'||chr(10)||']*'), ',') + 1; + + ENV_MANAGER.LOG_PROCESS_EVENT('CSV Column Count: ' || vCsvColCount || ', Template Column Count: ' || vTemplateColCount, 'INFO', vParameters); + + -- Check for excess columns + IF vCsvColCount > vTemplateColCount THEN + vgMsgTmp := ENV_MANAGER.MSG_EXCESS_COLUMNS_DETECTED + ||cgBL||'EXCESS COLUMNS DETECTED!' + ||cgBL||'CSV file has ' || vCsvColCount || ' columns but template expects only ' || vTemplateColCount + ||cgBL||'Excess columns: ' || (vCsvColCount - vTemplateColCount) + ||cgBL||'CSV header: ' || SUBSTR(REGEXP_SUBSTR(vCsvFirstLine, '[^'||chr(10)||']*'), 1, 200) + ||cgBL||'POSSIBLE SOLUTIONS:' + ||cgBL||' 1. Remove excess columns from CSV file before processing' + ||cgBL||' 2. Add excess columns to template table: ' || pTemplateTableName; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EXCESS_COLUMNS_DETECTED, vgMsgTmp); + END IF; + + EXCEPTION + WHEN ENV_MANAGER.ERR_EXCESS_COLUMNS_DETECTED THEN + RAISE; -- Re-raise the excess columns error + WHEN ENV_MANAGER.ERR_FILE_VALIDATION_FAILED THEN + RAISE; -- Re-raise the validation error + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Warning: Could not perform pre-validation column count check: ' || SQLERRM, 'WARN', vParameters); + -- Continue with normal processing if pre-validation fails + END; + END; + END IF; + + DBMS_CLOUD.CREATE_EXTERNAL_TABLE( + TABLE_NAME => pTableName, + CREDENTIAL_NAME => ENV_MANAGER.gvCredentialName, + FILE_URI_LIST => vFileName, + COLUMN_LIST => vColumnList, + FIELD_LIST => vFieldList, + FORMAT => vFormat + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); + + EXCEPTION + WHEN ENV_MANAGER.ERR_EXCESS_COLUMNS_DETECTED THEN + RAISE; -- Re-raise the excess columns error with specific code -20011 + WHEN ENV_MANAGER.ERR_UNKNOWN_PREFIX THEN + vgMsgTmp := ENV_MANAGER.MSG_UNKNOWN_PREFIX || ': ' || pPrefix; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN_PREFIX, vgMsgTmp); + WHEN ENV_MANAGER.ERR_MISSING_COLUMN_DATE_FORMAT THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_MISSING_COLUMN_DATE_FORMAT, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MISSING_COLUMN_DATE_FORMAT, ENV_MANAGER.MSG_MISSING_COLUMN_DATE_FORMAT); + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END CREATE_EXTERNAL_TABLE; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE CREATE_EXTERNAL_TABLE(pSourceFileReceivedKey IN NUMBER) + -- + -- Create external table for a single source file to validate the file structure + -- + IS + vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; + vSourceFileReceived tSourceFileReceived; + vTableName VARCHAR2(200); + vFileName VARCHAR2(1000); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedKey => '||nvl(to_char(pSourceFileReceivedKey), 'NULL'))); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + vSourceFileConfig := GET_SOURCE_FILE_CONFIG(pSourceFileReceivedKey => pSourceFileReceivedKey); + vSourceFileReceived := GET_SOURCE_FILE_RECEIVED_INFO(pSourceFileReceivedKey); + vTableName := vSourceFileConfig.TEMPLATE_TABLE_NAME; + + vFileName := ENV_MANAGER.gvInboxBucketUri ||vSourceFileReceived.SOURCE_FILE_PREFIX_INBOX||vSourceFileReceived.SOURCE_FILE_NAME; + + CREATE_EXTERNAL_TABLE( + pTableName => vSourceFileReceived.EXTERNAL_TABLE_NAME, + pTemplateTableName => vSourceFileConfig.TEMPLATE_TABLE_NAME, + pPrefix => vSourceFileReceived.SOURCE_FILE_PREFIX_INBOX, + pBucketUri => ENV_MANAGER.gvInboxBucketUri, + pFileName => vFileName, + pDelimiter => ',', + pEncoding => vSourceFileConfig.ENCODING -- MARS-1049: NOWY PARAMETR + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + + END CREATE_EXTERNAL_TABLE; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE VALIDATE_SOURCE_FILE_RECEIVED(pSourceFileReceivedKey IN NUMBER) + -- + -- Check the structure of the received file using DBMS_CLOUD.VALIDATE_EXTERNAL_TABLE + -- + IS + vSourceFileReceived tSourceFileReceived; + vOperationId NUMBER := -1; + vBadfileTable USER_LOAD_OPERATIONS.BADFILE_TABLE%TYPE; + vStatus USER_LOAD_OPERATIONS.STATUS%TYPE; + vErrors NUMBER := 0; + vNumRows NUMBER := 0; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedKey => '||nvl(to_char(pSourceFileReceivedKey), 'NULL'))); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + vSourceFileReceived := GET_SOURCE_FILE_RECEIVED_INFO(pSourceFileReceivedKey); + ENV_MANAGER.LOG_PROCESS_EVENT('vSourceFileReceived.EXTERNAL_TABLE_NAME: '||vSourceFileReceived.EXTERNAL_TABLE_NAME,'DEBUG', vParameters); + BEGIN + DBMS_CLOUD.VALIDATE_EXTERNAL_TABLE(vSourceFileReceived.EXTERNAL_TABLE_NAME, vOperationId); + EXCEPTION + WHEN OTHERS THEN + -- Log complete error details including full stack trace and backtrace + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_FILE_VALIDATION_FAILED, vParameters, 'FILE_MANAGER'); + + -- Call detailed validation error analysis and log the results + DECLARE + vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; + vValidationLogTable VARCHAR2(200); + vTemplateSchema VARCHAR2(200); + vTemplateTable VARCHAR2(200); + vCsvFileUri VARCHAR2(2000); + vAnalysisResult VARCHAR2(32000); + vFailedOperationId NUMBER; + BEGIN + -- Get source file configuration + vSourceFileConfig := GET_SOURCE_FILE_CONFIG(pSourceFileReceivedKey => pSourceFileReceivedKey); + + -- Extract template schema and table from template table name + vTemplateSchema := REPLACE(REGEXP_SUBSTR(vSourceFileConfig.TEMPLATE_TABLE_NAME,'.*\.'),'.',''); + vTemplateTable := REPLACE(REGEXP_SUBSTR(vSourceFileConfig.TEMPLATE_TABLE_NAME,'\..*'),'.',''); + + -- Construct CSV file URI + vCsvFileUri := ENV_MANAGER.gvInboxBucketUri || vSourceFileReceived.SOURCE_FILE_PREFIX_INBOX || vSourceFileReceived.SOURCE_FILE_NAME; + + -- Find the failed validation operation ID + SELECT MAX(ID) INTO vFailedOperationId + FROM USER_LOAD_OPERATIONS + WHERE TABLE_NAME = vSourceFileReceived.EXTERNAL_TABLE_NAME + AND TYPE = 'VALIDATE' + AND STATUS != 'COMPLETED'; + + -- Get validation log table name + IF vFailedOperationId IS NOT NULL THEN + SELECT LOGFILE_TABLE INTO vValidationLogTable + FROM USER_LOAD_OPERATIONS + WHERE ID = vFailedOperationId; + + -- Call detailed error analysis + vAnalysisResult := ENV_MANAGER.ANALYZE_VALIDATION_ERRORS( + pValidationLogTable => vValidationLogTable, + pTemplateSchema => vTemplateSchema, + pTemplateTable => vTemplateTable, + pCsvFileUri => vCsvFileUri + ); + + -- Log detailed analysis results + ENV_MANAGER.LOG_PROCESS_EVENT('DETAILED VALIDATION ERROR ANALYSIS:', 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(vAnalysisResult, 'ERROR', vParameters); + END IF; + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Error during validation analysis: ' || SQLERRM, 'ERROR', vParameters); + END; + + MOVE_FILE(pSourceFileReceivedKey => pSourceFileReceivedKey, pDestination => 'QUARANTINE'); + -- Ensure the status change is committed before raising exception + COMMIT; + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_VALIDATION_FAILED, ENV_MANAGER.MSG_FILE_VALIDATION_FAILED); + END; + ENV_MANAGER.LOG_PROCESS_EVENT('vOperationId of validation: '||vOperationId,'DEBUG', vParameters); + IF vOperationId = -1 + THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_DIDNT_GET_LOAD_OPERATION_ID, ENV_MANAGER.MSG_DIDNT_GET_LOAD_OPERATION_ID); + END IF; + + SELECT BADFILE_TABLE, ROWS_LOADED, STATUS + INTO vBadfileTable, vNumRows, vStatus + FROM USER_LOAD_OPERATIONS + WHERE ID = vOperationId; + +-- DBMS_OUTPUT.PUT_LINE(vStatus); + SET_SOURCE_FILE_RECEIVED_STATUS(pSourceFileReceivedKey => pSourceFileReceivedKey, pStatus => 'VALIDATED'); + ENV_MANAGER.LOG_PROCESS_EVENT('File status changed to VALIDATED','DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + + EXCEPTION + WHEN ENV_MANAGER.ERR_DIDNT_GET_LOAD_OPERATION_ID THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_DIDNT_GET_LOAD_OPERATION_ID, 'ERROR', vParameters); + SET_SOURCE_FILE_RECEIVED_STATUS(pSourceFileReceivedKey => pSourceFileReceivedKey, pStatus => 'VALIDATION_FAILED'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_DIDNT_GET_LOAD_OPERATION_ID, ENV_MANAGER.MSG_DIDNT_GET_LOAD_OPERATION_ID); + + WHEN ENV_MANAGER.ERR_FILE_VALIDATION_FAILED THEN + vgMsgTmp := ENV_MANAGER.MSG_FILE_VALIDATION_FAILED; + ENV_MANAGER.LOG_PROCESS_ERROR(vgMsgTmp, vParameters, 'FILE_MANAGER'); + SET_SOURCE_FILE_RECEIVED_STATUS(pSourceFileReceivedKey => pSourceFileReceivedKey, pStatus => 'VALIDATION_FAILED'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_VALIDATION_FAILED, vgMsgTmp); + + WHEN OTHERS THEN + IF SQLCODE = -20404 THEN + vgMsgTmp := ENV_MANAGER.MSG_FILE_NOT_FOUND_ON_CLOUD||cgBL||SQLERRM; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_NOT_FOUND_ON_CLOUD, vgMsgTmp); + + ELSIF SQLCODE = -20003 THEN + execute immediate 'select LISTAGG(record, '''||cgBL||''') from (select * from '||REGEXP_SUBSTR(SQLERRM, '"([^"]+)"."([^"]+)"')||' order by rownum desc) where rownum <=2' + into vgMsgTmp; + vgMsgTmp := ENV_MANAGER.MSG_FILE_VALIDATION_FAILED||cgBL||SQLERRM||cgBL||vgMsgTmp; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_VALIDATION_FAILED, vgMsgTmp); +-- ELSIF SQLCODE = -20000 THEN + -- TO_DO Add additional info about current config + -- ENV_MANAGER.MSG_FILE_VALIDATION_FAILED := ENV_MANAGER.MSG_FILE_VALIDATION_FAILED||cgBL||SQLERRM||cgBL||FILE_MANAGER.OUTPUT_SOURCE_FILE_CONFIG_INFO( ..config key value.. ); +-- dbms_output.put_line(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); +-- ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_WRONG_DATE_TIMESTAMP_FORMAT, 'ERROR', vParameters); +-- ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); +-- RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_WRONG_DATE_TIMESTAMP_FORMAT, ENV_MANAGER.MSG_WRONG_DATE_TIMESTAMP_FORMAT); + + ELSE + -- Log complete error details including full stack trace and backtrace + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_UNKNOWN, vParameters, 'FILE_MANAGER'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + END IF; + END VALIDATE_SOURCE_FILE_RECEIVED; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION VALIDATE_EXTERNAL_TABLE(pTableName IN VARCHAR2) + RETURN VARCHAR2 + -- + -- wrapper for DBMS_CLOUD.VALIDATE_EXTERNAL_TABLE + -- + IS + vOperationId NUMBER := -1; + vBadfileTable USER_LOAD_OPERATIONS.BADFILE_TABLE%TYPE; + vLogfileTable USER_LOAD_OPERATIONS.LOGFILE_TABLE%TYPE; + vStatus USER_LOAD_OPERATIONS.STATUS%TYPE; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vDetails clob; + TYPE TCURSOR is REF CURSOR; + vCursor TCURSOR; + vQuery VARCHAR2(1000); + vRecord VARCHAR2(10000); + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pTableName => '''||nvl(pTableName, 'NULL')||'''')); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + DBMS_CLOUD.VALIDATE_EXTERNAL_TABLE(pTableName, vOperationId); + ENV_MANAGER.LOG_PROCESS_EVENT('vOperationId of validation: '||vOperationId,'DEBUG', vParameters); + IF vOperationId = -1 + THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_DIDNT_GET_LOAD_OPERATION_ID, ENV_MANAGER.MSG_DIDNT_GET_LOAD_OPERATION_ID); + END IF; + + SELECT decode(STATUS, 'COMPLETED', 'PASSED', STATUS), LOGFILE_TABLE, BADFILE_TABLE + INTO vStatus, vLogfileTable, vBadfileTable + FROM USER_LOAD_OPERATIONS + WHERE ID = vOperationId; + + RETURN vStatus; + EXCEPTION + WHEN ENV_MANAGER.ERR_DIDNT_GET_LOAD_OPERATION_ID THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_DIDNT_GET_LOAD_OPERATION_ID, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_DIDNT_GET_LOAD_OPERATION_ID, ENV_MANAGER.MSG_DIDNT_GET_LOAD_OPERATION_ID); + + WHEN OTHERS THEN + SELECT decode(STATUS, 'COMPLETED', 'PASSED', STATUS), LOGFILE_TABLE, BADFILE_TABLE + INTO vStatus, vLogfileTable, vBadfileTable + FROM USER_LOAD_OPERATIONS + WHERE ID = vOperationId; + vQuery := 'select record from ( + select + nvl(l.record,''----------------------------------------------------'') as record + ,rownum as lp + ,max(case when nvl(instr(l.record, ''error'' ),0) > 0 then rownum else 0 end) over (partition by 1) as ENV_MANAGER.ERR_row + from '||vLogfileTable||' l + ) + where lp >= ENV_MANAGER.ERR_row + order by rownum'; + + vDetails := vStatus||cgBL||'----------------------------------------------------'||cgBL; + + OPEN vCursor for vQuery; + loop + fetch vCursor into vRecord; + EXIT WHEN vCursor%NOTFOUND; + vDetails := vDetails ||vRecord ||cgBL; +-- for i in loop +-- vDetails := vDetails ||i.record ||cgBL; + end loop; + CLOSE vCursor; + vDetails := vDetails||'More details can be found in below tables:'||cgBL|| + ' SELECT * FROM USER_LOAD_OPERATIONS WHERE ID = '||vOperationId||';'||cgBL|| + ' SELECT * FROM '||vLogfileTable||';'||cgBL|| + ' SELECT * FROM '||vBadfileTable||';' + ; + + RETURN vDetails; + + END VALIDATE_EXTERNAL_TABLE; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION S_VALIDATE_EXTERNAL_TABLE(pTableName IN VARCHAR2) + RETURN VARCHAR2 + -- + -- Simple + -- + IS + vCount PLS_INTEGER; + BEGIN + execute immediate 'select count(1) from '||pTableName into vCount; + IF vCount >= 0 + THEN + RETURN 'PASSED'; + END IF; + + RETURN 'FAILED'; + EXCEPTION + WHEN OTHERS THEN + RETURN 'FAILED'; + END S_VALIDATE_EXTERNAL_TABLE; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE DROP_EXTERNAL_TABLE(pSourceFileReceivedKey IN NUMBER) + -- + -- Drop external table created to validate the file structure + -- + IS + vSourceFileReceived tSourceFileReceived; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedKey => '||nvl(to_char(pSourceFileReceivedKey), 'NULL'))); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + vSourceFileReceived := GET_SOURCE_FILE_RECEIVED_INFO(pSourceFileReceivedKey); + EXECUTE IMMEDIATE 'DROP TABLE '||vSourceFileReceived.EXTERNAL_TABLE_NAME; + + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + END DROP_EXTERNAL_TABLE; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE COPY_FILE(pSourceFileReceivedKey IN NUMBER, pDestination IN VARCHAR2) + -- + -- Possible pDestination values are: 'ODS' or 'ARCHIVE' + -- + IS + vSourceFileReceivedInfo tSourceFileReceived; + vSourceObject VARCHAR2(2000); + vTargetObject VARCHAR2(2000); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; +-- vStatus VARCHAR2(20); + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedKey => ' ||nvl(to_char(pSourceFileReceivedKey), 'NULL'), + 'pDestination => '''||nvl(pDestination, 'NULL')||'''')); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + vSourceFileReceivedInfo := GET_SOURCE_FILE_RECEIVED_INFO(pSourceFileReceivedKey); + + IF pDestination = 'ODS' THEN + vSourceObject := ENV_MANAGER.gvInboxBucketUri||vSourceFileReceivedInfo.SOURCE_FILE_PREFIX_INBOX ||vSourceFileReceivedInfo.SOURCE_FILE_NAME; + vTargetObject := ENV_MANAGER.gvDataBucketUri||vSourceFileReceivedInfo.SOURCE_FILE_PREFIX_ODS ||vSourceFileReceivedInfo.SOURCE_FILE_NAME; + ELSE + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_WRONG_DESTINATION_PARAM, ENV_MANAGER.MSG_WRONG_DESTINATION_PARAM); + END IF; + + DBMS_CLOUD.COPY_OBJECT(source_credential_name => ENV_MANAGER.gvCredentialName, + source_object_uri => vSourceObject, + target_object_uri => vTargetObject, + target_credential_name => ENV_MANAGER.gvCredentialName + ); + ENV_MANAGER.LOG_PROCESS_EVENT('File copied to '||pDestination||' target location','DEBUG', vTargetObject); + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + END COPY_FILE; + + ---------------------------------------------------------------------------------------------------- + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE MOVE_FILE(pSourceFileReceivedKey IN NUMBER, pDestination IN VARCHAR2) + -- + -- Possible pDestination values are: 'ODS' or 'ARCHIVE' + -- + IS + vSourceFileReceivedInfo tSourceFileReceived; + vSourceObject VARCHAR2(2000); + vTargetObject VARCHAR2(2000); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vStatus VARCHAR2(20); + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedKey => ' ||nvl(to_char(pSourceFileReceivedKey), 'NULL'), + 'pDestination => '''||nvl(pDestination, 'NULL')||'''')); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + + vSourceFileReceivedInfo := GET_SOURCE_FILE_RECEIVED_INFO(pSourceFileReceivedKey); + + IF pDestination = 'ODS' THEN + vSourceObject := ENV_MANAGER.gvInboxBucketUri||vSourceFileReceivedInfo.SOURCE_FILE_PREFIX_INBOX ||vSourceFileReceivedInfo.SOURCE_FILE_NAME; + vTargetObject := ENV_MANAGER.gvDataBucketUri||vSourceFileReceivedInfo.SOURCE_FILE_PREFIX_ODS ||vSourceFileReceivedInfo.SOURCE_FILE_NAME; + vStatus := 'READY_FOR_INGESTION'; + ELSIF pDestination = 'QUARANTINE' THEN + vSourceObject := ENV_MANAGER.gvInboxBucketUri ||vSourceFileReceivedInfo.SOURCE_FILE_PREFIX_INBOX ||vSourceFileReceivedInfo.SOURCE_FILE_NAME; + vTargetObject := ENV_MANAGER.gvInboxBucketUri||vSourceFileReceivedInfo.SOURCE_FILE_PREFIX_QUARANTINE||vSourceFileReceivedInfo.SOURCE_FILE_NAME; + vStatus := 'VALIDATION_FAILED'; + ELSE + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_WRONG_DESTINATION_PARAM, ENV_MANAGER.MSG_WRONG_DESTINATION_PARAM); + END IF; + + DBMS_CLOUD.MOVE_OBJECT(source_credential_name => ENV_MANAGER.gvCredentialName, + source_object_uri => vSourceObject, + target_object_uri => vTargetObject, + target_credential_name => ENV_MANAGER.gvCredentialName + ); + ENV_MANAGER.LOG_PROCESS_EVENT('File moved to '||pDestination||' target location','DEBUG', vTargetObject); + SET_SOURCE_FILE_RECEIVED_STATUS(pSourceFileReceivedKey => pSourceFileReceivedKey, pStatus => vStatus); + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + + EXCEPTION + WHEN ENV_MANAGER.ERR_WRONG_DESTINATION_PARAM THEN + ENV_MANAGER.MSG_WRONG_DESTINATION_PARAM := ENV_MANAGER.MSG_WRONG_DESTINATION_PARAM + ||cgBL||' '||'Possible parameters are: ''ODS'' or ''ARCHIVE''' + ||cgBL||' '||'Provided destination parameter: '''||pDestination||''''; + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_WRONG_DESTINATION_PARAM, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_WRONG_DESTINATION_PARAM, ENV_MANAGER.MSG_WRONG_DESTINATION_PARAM); + WHEN ENV_MANAGER.ERR_NO_CONFIG_FOR_RECEIVED_FILE THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_NO_CONFIG_FOR_RECEIVED_FILE, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NO_CONFIG_FOR_RECEIVED_FILE, ENV_MANAGER.MSG_NO_CONFIG_FOR_RECEIVED_FILE); + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + END MOVE_FILE; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE DELETE_FOLDER_CONTENTS(pBucketArea IN VARCHAR2, pFolderPrefix IN VARCHAR2) + -- + -- Delete all files from specified folder in cloud storage + -- pBucketArea: 'INBOX', 'DATA', 'ARCHIVE' + -- pFolderPrefix: folder path within bucket (e.g., 'C2D/UC_DISSEM/UC_NMA_DISSEM/') + -- + IS + vBucketUri VARCHAR2(2000); + vFolderUri VARCHAR2(2000); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vFilesDeleted PLS_INTEGER := 0; + vObjectName VARCHAR2(4000); + vFullObjectUri VARCHAR2(4000); + + -- Cursor to list all objects in the folder + CURSOR c_objects IS + SELECT object_name + FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( + credential_name => ENV_MANAGER.gvCredentialName, + location_uri => vBucketUri + )) + WHERE object_name IS NOT NULL + AND object_name LIKE pFolderPrefix || '%'; + + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( + 'pBucketArea => '''||nvl(pBucketArea, 'NULL')||'''', + 'pFolderPrefix => '''||nvl(pFolderPrefix, 'NULL')||'''' + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + -- Get bucket URI based on bucket area + vBucketUri := GET_BUCKET_URI(pBucketArea); + + ENV_MANAGER.LOG_PROCESS_EVENT('Listing objects in bucket with prefix: ' || pFolderPrefix, 'DEBUG', vBucketUri); + + -- List and delete all objects in the folder + FOR obj_rec IN c_objects LOOP + vObjectName := obj_rec.object_name; + vFullObjectUri := vBucketUri || vObjectName; + + BEGIN + ENV_MANAGER.LOG_PROCESS_EVENT('Deleting object', 'DEBUG', vFullObjectUri); + + DBMS_CLOUD.DELETE_OBJECT( + credential_name => ENV_MANAGER.gvCredentialName, + object_uri => vFullObjectUri + ); + + vFilesDeleted := vFilesDeleted + 1; + ENV_MANAGER.LOG_PROCESS_EVENT('Object deleted successfully', 'DEBUG', vObjectName); + + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Error deleting object: ' || vObjectName || ' - ' || SQLERRM, 'ERROR', vParameters); + -- Continue with next file instead of stopping the whole process + END; + END LOOP; + + ENV_MANAGER.LOG_PROCESS_EVENT('Total files deleted: ' || vFilesDeleted, 'INFO', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO', vParameters); + + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Error in DELETE_FOLDER_CONTENTS: ' || SQLERRM, 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + END DELETE_FOLDER_CONTENTS; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE PROCESS_SOURCE_FILE(pSourceFileReceivedName IN VARCHAR2) + -- + -- Ubmrella procedure that calls + -- - REGISTER_SOURCE_FILE_RECEIVED + -- - CREATE_EXTERNAL_TABLE + -- - VALIDATE_SOURCE_FILE_RECEIVED + -- - DROP_EXTERNAL_TABLE + -- - MOVE_FILE + IS + vSourceFileId NUMBER; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedName => '||nvl(pSourceFileReceivedName, 'NULL'))); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + ---- + vSourceFileId := REGISTER_SOURCE_FILE_RECEIVED(pSourceFileReceivedName); + CREATE_EXTERNAL_TABLE(vSourceFileId); + VALIDATE_SOURCE_FILE_RECEIVED(vSourceFileId); + DROP_EXTERNAL_TABLE(vSourceFileId); +-- COPY_FILE(vSourceFileId, 'ODS'); + MOVE_FILE(vSourceFileId, 'ODS'); + SET_SOURCE_FILE_RECEIVED_STATUS(pSourceFileReceivedKey => vSourceFileId, pStatus => 'READY_FOR_INGESTION'); + + + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + + EXCEPTION + -- -20001 + WHEN ENV_MANAGER.ERR_EMPTY_FILEURI_AND_RECKEY THEN + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_EMPTY_FILEURI_AND_RECKEY, vParameters, 'FILE_MANAGER'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EMPTY_FILEURI_AND_RECKEY, ENV_MANAGER.MSG_EMPTY_FILEURI_AND_RECKEY); + -- -20002 + WHEN ENV_MANAGER.ERR_NO_CONFIG_MATCH_FOR_FILEURI THEN + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_NO_CONFIG_MATCH_FOR_FILEURI, vParameters, 'FILE_MANAGER'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NO_CONFIG_MATCH_FOR_FILEURI, ENV_MANAGER.MSG_NO_CONFIG_MATCH_FOR_FILEURI); + -- -20003 + WHEN ENV_MANAGER.ERR_MULTIPLE_MATCH_FOR_SRCFILE THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_MULTIPLE_MATCH_FOR_SRCFILE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MULTIPLE_MATCH_FOR_SRCFILE, ENV_MANAGER.MSG_MULTIPLE_MATCH_FOR_SRCFILE); + -- -20004 + WHEN ENV_MANAGER.ERR_MISSING_COLUMN_DATE_FORMAT THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_MISSING_COLUMN_DATE_FORMAT), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MISSING_COLUMN_DATE_FORMAT, ENV_MANAGER.MSG_MISSING_COLUMN_DATE_FORMAT); + -- -20005 + WHEN ENV_MANAGER.ERR_MULTIPLE_COLUMN_DATE_FORMAT THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_MULTIPLE_COLUMN_DATE_FORMAT), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MULTIPLE_COLUMN_DATE_FORMAT, ENV_MANAGER.MSG_MULTIPLE_COLUMN_DATE_FORMAT); + -- -20006 + WHEN ENV_MANAGER.ERR_DIDNT_GET_LOAD_OPERATION_ID THEN + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_DIDNT_GET_LOAD_OPERATION_ID, vParameters, 'FILE_MANAGER'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_DIDNT_GET_LOAD_OPERATION_ID, ENV_MANAGER.MSG_DIDNT_GET_LOAD_OPERATION_ID); + -- -20007 + WHEN ENV_MANAGER.ERR_NO_CONFIG_FOR_RECEIVED_FILE THEN + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_NO_CONFIG_FOR_RECEIVED_FILE, vParameters, 'FILE_MANAGER'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NO_CONFIG_FOR_RECEIVED_FILE, ENV_MANAGER.MSG_NO_CONFIG_FOR_RECEIVED_FILE); + -- -20008 + WHEN ENV_MANAGER.ERR_MULTI_CONFIG_FOR_RECEIVED_FILE THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_MULTI_CONFIG_FOR_RECEIVED_FILE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MULTI_CONFIG_FOR_RECEIVED_FILE, ENV_MANAGER.MSG_MULTI_CONFIG_FOR_RECEIVED_FILE); + -- -20009 + WHEN ENV_MANAGER.ERR_FILE_NOT_FOUND_ON_CLOUD THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_FILE_NOT_FOUND_ON_CLOUD), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_NOT_FOUND_ON_CLOUD, ENV_MANAGER.MSG_FILE_NOT_FOUND_ON_CLOUD); + -- -20010 + WHEN ENV_MANAGER.ERR_FILE_VALIDATION_FAILED THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_FILE_VALIDATION_FAILED), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_VALIDATION_FAILED, ENV_MANAGER.MSG_FILE_VALIDATION_FAILED); + -- -20011 + WHEN ENV_MANAGER.ERR_NO_CONFIG_MATCH THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_NO_CONFIG_MATCH), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NO_CONFIG_MATCH, ENV_MANAGER.MSG_NO_CONFIG_MATCH); + -- -20012 + WHEN ENV_MANAGER.ERR_UNKNOWN_PREFIX THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_UNKNOWN_PREFIX), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN_PREFIX, ENV_MANAGER.MSG_UNKNOWN_PREFIX); + -- -20013 + WHEN ENV_MANAGER.ERR_TABLE_NOT_EXISTS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_TABLE_NOT_EXISTS), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, ENV_MANAGER.MSG_TABLE_NOT_EXISTS); + -- -20014 + WHEN ENV_MANAGER.ERR_COLUMN_NOT_EXISTS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_COLUMN_NOT_EXISTS), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, ENV_MANAGER.MSG_COLUMN_NOT_EXISTS); + -- -20015 + WHEN ENV_MANAGER.ERR_UNSUPPORTED_DATA_TYPE THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_UNSUPPORTED_DATA_TYPE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNSUPPORTED_DATA_TYPE, ENV_MANAGER.MSG_UNSUPPORTED_DATA_TYPE); + -- -20016 + WHEN ENV_MANAGER.ERR_MISSING_SOURCE_KEY THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_MISSING_SOURCE_KEY), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MISSING_SOURCE_KEY, ENV_MANAGER.MSG_MISSING_SOURCE_KEY); + -- -20017 + WHEN ENV_MANAGER.ERR_NULL_SOURCE_FILE_CONFIG_KEY THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_NULL_SOURCE_FILE_CONFIG_KEY), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_NULL_SOURCE_FILE_CONFIG_KEY, ENV_MANAGER.MSG_NULL_SOURCE_FILE_CONFIG_KEY); + -- -20018 + WHEN ENV_MANAGER.ERR_DUPLICATED_SOURCE_KEY THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_DUPLICATED_SOURCE_KEY), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_DUPLICATED_SOURCE_KEY, ENV_MANAGER.MSG_DUPLICATED_SOURCE_KEY); + -- -20019 + WHEN ENV_MANAGER.ERR_MISSING_CONTAINER_CONFIG THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_MISSING_CONTAINER_CONFIG), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MISSING_CONTAINER_CONFIG, ENV_MANAGER.MSG_MISSING_CONTAINER_CONFIG); + -- -20020 + WHEN ENV_MANAGER.ERR_MULTIPLE_CONTAINER_ENTRIES THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_MULTIPLE_CONTAINER_ENTRIES), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MULTIPLE_CONTAINER_ENTRIES, ENV_MANAGER.MSG_MULTIPLE_CONTAINER_ENTRIES); + -- -20021 + WHEN ENV_MANAGER.ERR_WRONG_DESTINATION_PARAM THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_WRONG_DESTINATION_PARAM), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_WRONG_DESTINATION_PARAM, ENV_MANAGER.MSG_WRONG_DESTINATION_PARAM); + -- -20022 + WHEN ENV_MANAGER.ERR_FILE_NOT_EXISTS_ON_CLOUD THEN + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_FILE_NOT_EXISTS_ON_CLOUD, vParameters, 'FILE_MANAGER'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_NOT_EXISTS_ON_CLOUD, ENV_MANAGER.MSG_FILE_NOT_EXISTS_ON_CLOUD); + -- -20023 + WHEN ENV_MANAGER.ERR_FILE_ALREADY_REGISTERED THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_FILE_ALREADY_REGISTERED), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_FILE_ALREADY_REGISTERED, ENV_MANAGER.MSG_FILE_ALREADY_REGISTERED); + -- -20024 + WHEN ENV_MANAGER.ERR_WRONG_DATE_TIMESTAMP_FORMAT THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_WRONG_DATE_TIMESTAMP_FORMAT), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_WRONG_DATE_TIMESTAMP_FORMAT, ENV_MANAGER.MSG_WRONG_DATE_TIMESTAMP_FORMAT); + + -- -20011 + WHEN ENV_MANAGER.ERR_EXCESS_COLUMNS_DETECTED THEN + ENV_MANAGER.LOG_PROCESS_ERROR(ENV_MANAGER.MSG_EXCESS_COLUMNS_DETECTED, vParameters, 'FILE_MANAGER'); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_EXCESS_COLUMNS_DETECTED, ENV_MANAGER.MSG_EXCESS_COLUMNS_DETECTED); + + -- -20999 + WHEN ENV_MANAGER.ERR_UNKNOWN THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> ENV_MANAGER.CODE_UNKNOWN), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + + + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN, 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END PROCESS_SOURCE_FILE; + + +---------------------------------------------------------------------------------------------------- + + FUNCTION PROCESS_SOURCE_FILE(pSourceFileReceivedName IN VARCHAR2) + RETURN PLS_INTEGER + IS + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileReceivedName => '||nvl(pSourceFileReceivedName, 'NULL'))); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + ---- + PROCESS_SOURCE_FILE(pSourceFileReceivedName => pSourceFileReceivedName); + ---- + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + RETURN SQLCODE; + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RETURN SQLCODE; + END PROCESS_SOURCE_FILE; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_DATE_FORMAT( + pTemplateTableName IN VARCHAR2, + pColumnName IN VARCHAR2 + ) + RETURN VARCHAR2 + IS + vDateFormat A_COLUMN_DATE_FORMAT.DATE_FORMAT%TYPE; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vColumnName VARCHAR2(200); + vGetDefault BOOLEAN := FALSE; + BEGIN + vColumnName := trim(pColumnName); + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pColumnName => '''||nvl(vColumnName, 'NULL')||'''')); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + BEGIN + SELECT DATE_FORMAT + INTO vDateFormat + FROM CT_MRDS.A_COLUMN_DATE_FORMAT F + WHERE F.TEMPLATE_TABLE_NAME = pTemplateTableName + AND F.COLUMN_NAME = vColumnName; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + vGetDefault := TRUE; + WHEN TOO_MANY_ROWS THEN + -- Below error should not happened because: + -- Unique constraint added on table A_COLUMN_DATE_FORMAT on columns: (TEMPLATE_TABLE_NAME, COLUMN_NAME) + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MULTIPLE_COLUMN_DATE_FORMAT, ENV_MANAGER.MSG_MULTIPLE_COLUMN_DATE_FORMAT); + END; + IF vGetDefault THEN + BEGIN + SELECT DATE_FORMAT + INTO vDateFormat + FROM CT_MRDS.A_COLUMN_DATE_FORMAT F + WHERE F.TEMPLATE_TABLE_NAME = pTemplateTableName + AND F.COLUMN_NAME = 'DEFAULT'; + EXCEPTION + WHEN NO_DATA_FOUND THEN + vDateFormat := ENV_MANAGER.gvDefaultDateFormat; + END; + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); + RETURN vDateFormat; + END GET_DATE_FORMAT; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE GENERATE_EXTERNAL_TABLE_PARAMS ( + + pTemplateTableName IN VARCHAR2, + pColumnList OUT CLOB, + pFieldList OUT CLOB + ) + IS + vSchemaName VARCHAR2(200); + vTableName VARCHAR2(200); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vMaxColumnNameLength PLS_INTEGER := 0; + vColType varchar2(200); + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pColumnList => '''||nvl(pColumnList, 'NULL')||'''' + ,'pFieldList = '''||nvl(pFieldList, 'NULL')||'''' + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + vSchemaName := REPLACE(REGEXP_SUBSTR(pTemplateTableName,'.*\.'),'.',''); + ENV_MANAGER.LOG_PROCESS_EVENT('vSchemaName','DEBUG', vSchemaName); + vTableName := REPLACE(REGEXP_SUBSTR(pTemplateTableName,'\..*'),'.',''); + ENV_MANAGER.LOG_PROCESS_EVENT('vTableName','DEBUG', vTableName); + FOR rec IN ( + SELECT + '"'||column_name||'"' as quoted_column_name, + column_name, + data_type, + data_length, + char_length, -- MARS-1056: Added for CHAR/BYTE semantics + char_used, -- MARS-1056: Added for CHAR/BYTE semantics + data_precision, + data_scale, + column_id, + max(length(column_name)+1) over (partition by table_name) as max_column_name_length + FROM all_tab_columns + WHERE table_name = UPPER(vTableName) + AND owner = NVL(UPPER(vSchemaName), USER) + ORDER BY column_id + ) LOOP + -- Build the column_list string + rec.quoted_column_name := rpad(rec.quoted_column_name, rec.max_column_name_length+2, ' '); + + vColType := + CASE + -- MARS-1056: Fixed VARCHAR2 definition logic to preserve CHAR/BYTE semantics + WHEN rec.data_type = 'VARCHAR2' THEN + CASE + WHEN rec.char_used = 'C' THEN + rec.quoted_column_name || ' VARCHAR2(' || rec.char_length || ' CHAR)' + WHEN rec.char_used = 'B' THEN + rec.quoted_column_name || ' VARCHAR2(' || rec.data_length || ' BYTE)' + ELSE + -- Fallback for NULL char_used (should not occur but handle gracefully) + rec.quoted_column_name || ' VARCHAR2(' || rec.data_length || ')' + END + -- Other character types (preserve original logic) + WHEN rec.data_type IN ('CHAR', 'NCHAR', 'NVARCHAR2') THEN + rec.quoted_column_name || ' ' || rec.data_type || '(' || rec.data_length || ')' + WHEN rec.data_type = 'NUMBER' THEN + rec.quoted_column_name || ' ' || rec.data_type || + CASE + WHEN rec.data_precision IS NOT NULL AND rec.data_scale IS NOT NULL THEN + '(' || rec.data_precision || ',' || rec.data_scale || ')' + WHEN rec.data_precision IS NOT NULL THEN + '(' || rec.data_precision || ')' + ELSE + '' + END + WHEN rec.data_type IN ('RAW') THEN + rec.quoted_column_name || ' ' || rec.data_type || '(' || rec.data_length || ')' + WHEN REGEXP_SUBSTR(rec.data_type, '^[A-Z]+') IN ('DATE', 'TIMESTAMP') THEN + rec.quoted_column_name || ' ' || rec.data_type + ELSE + rec.quoted_column_name || ' ' || rec.data_type + END; + pColumnList := pColumnList ||vColType ||cgBL|| ','; + -- Build the field_list string + -- Note: field_list uses CHAR() for CSV field definitions - this is correct behavior + pFieldList := pFieldList || + CASE + WHEN REGEXP_SUBSTR(rec.data_type, '^[A-Z]+') IN ('DATE', 'TIMESTAMP') THEN + rec.quoted_column_name || ' DATE ' || CHR(39) || GET_DATE_FORMAT(pTemplateTableName => pTemplateTableName, pColumnName => rec.column_name) || CHR(39) + WHEN rec.data_type IN ('CHAR', 'NCHAR', 'VARCHAR2', 'NVARCHAR2') THEN + -- For CSV field definitions, use data_length for CHAR() specification + rec.quoted_column_name || ' CHAR(' || rec.data_length || ')' + ELSE + rec.quoted_column_name + END ||cgBL|| ','; + + + END LOOP; + + -- Remove the trailing comma and space from the strings + pColumnList := ' '||RTRIM(pColumnList, ','); + pFieldList := ' '||RTRIM(pFieldList, ','); + ENV_MANAGER.LOG_PROCESS_EVENT('vColumnList', 'DEBUG', pColumnList); + -- TO_DO !!! + -- Add check if pColumnList/pFieldList is empty or not + -- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + + + -- Output the generated column_list and field_list + ENV_MANAGER.LOG_PROCESS_EVENT('column_list' ,'DEBUG',pColumnList); + ENV_MANAGER.LOG_PROCESS_EVENT('field_list' ,'DEBUG',pFieldList); + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); + + EXCEPTION +-- WHEN ENV_MANAGER.ERR_MISSING_COLUMN_DATE_FORMAT THEN +-- ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_MISSING_COLUMN_DATE_FORMAT, 'ERROR', vParameters); +-- RAISE_ERROR(ENV_MANAGER.CODE_MISSING_COLUMN_DATE_FORMAT); + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + END GENERATE_EXTERNAL_TABLE_PARAMS; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE ADD_SOURCE ( + pSourceKey IN CT_MRDS.A_SOURCE.A_SOURCE_KEY%TYPE, + pSourceName IN CT_MRDS.A_SOURCE.SOURCE_NAME%TYPE + ) IS + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + INSERT INTO CT_MRDS.A_SOURCE(A_SOURCE_KEY, SOURCE_NAME) VALUES (pSourceKey, pSourceName); + COMMIT; + EXCEPTION + WHEN DUP_VAL_ON_INDEX THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_DUPLICATED_SOURCE_KEY, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_DUPLICATED_SOURCE_KEY, ENV_MANAGER.MSG_DUPLICATED_SOURCE_KEY); + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + END ADD_SOURCE; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE DELETE_SOURCE_CASCADE ( + pSourceKey IN CT_MRDS.A_SOURCE.A_SOURCE_KEY%TYPE + ) IS + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vSharedTemplateCount PLS_INTEGER := 0; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceKey => '''||nvl(pSourceKey, 'NULL')||'''')); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + -- First pass: Delete container files (those that have CONTAINER_FILE_KEY set) + for rec in (select A_SOURCE_FILE_CONFIG_KEY, TEMPLATE_TABLE_NAME from CT_MRDS.A_SOURCE_FILE_CONFIG + where A_SOURCE_KEY = pSourceKey AND CONTAINER_FILE_KEY IS NOT NULL + ORDER BY A_SOURCE_FILE_CONFIG_KEY) loop + -- Delete processed file records + delete from CT_MRDS.A_SOURCE_FILE_RECEIVED WHERE A_SOURCE_FILE_CONFIG_KEY = rec.A_SOURCE_FILE_CONFIG_KEY; + ENV_MANAGER.LOG_PROCESS_EVENT('Deleted A_SOURCE_FILE_RECEIVED records for container config key: '||rec.A_SOURCE_FILE_CONFIG_KEY,'DEBUG', vParameters); + + -- Check if TEMPLATE_TABLE_NAME is shared with other source systems before deleting date formats + IF rec.TEMPLATE_TABLE_NAME IS NOT NULL THEN + SELECT COUNT(DISTINCT A_SOURCE_KEY) + INTO vSharedTemplateCount + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE TEMPLATE_TABLE_NAME = rec.TEMPLATE_TABLE_NAME + AND A_SOURCE_KEY <> pSourceKey; -- Exclude current source being deleted + + -- Only delete date formats if template table is not shared with other sources + IF vSharedTemplateCount = 0 THEN + delete from CT_MRDS.A_COLUMN_DATE_FORMAT WHERE TEMPLATE_TABLE_NAME = rec.TEMPLATE_TABLE_NAME; + ENV_MANAGER.LOG_PROCESS_EVENT('Deleted A_COLUMN_DATE_FORMAT records for template: '||rec.TEMPLATE_TABLE_NAME,'DEBUG', vParameters); + ELSE + ENV_MANAGER.LOG_PROCESS_EVENT('Skipping A_COLUMN_DATE_FORMAT deletion - template table '||rec.TEMPLATE_TABLE_NAME||' is shared with '||vSharedTemplateCount||' other source systems','WARNING', vParameters); + END IF; + END IF; + + -- Delete container file configuration + delete from CT_MRDS.A_SOURCE_FILE_CONFIG WHERE A_SOURCE_FILE_CONFIG_KEY = rec.A_SOURCE_FILE_CONFIG_KEY; + ENV_MANAGER.LOG_PROCESS_EVENT('Deleted container A_SOURCE_FILE_CONFIG record for config key: '||rec.A_SOURCE_FILE_CONFIG_KEY,'DEBUG', vParameters); + end loop; + COMMIT; -- Commit container deletions + + -- Second pass: Delete parent files (those that do NOT have CONTAINER_FILE_KEY set) + for rec in (select A_SOURCE_FILE_CONFIG_KEY, TEMPLATE_TABLE_NAME from CT_MRDS.A_SOURCE_FILE_CONFIG + where A_SOURCE_KEY = pSourceKey AND CONTAINER_FILE_KEY IS NULL + ORDER BY A_SOURCE_FILE_CONFIG_KEY) loop + -- Delete processed file records + delete from CT_MRDS.A_SOURCE_FILE_RECEIVED WHERE A_SOURCE_FILE_CONFIG_KEY = rec.A_SOURCE_FILE_CONFIG_KEY; + ENV_MANAGER.LOG_PROCESS_EVENT('Deleted A_SOURCE_FILE_RECEIVED records for parent config key: '||rec.A_SOURCE_FILE_CONFIG_KEY,'DEBUG', vParameters); + + -- Check if TEMPLATE_TABLE_NAME is shared with other source systems before deleting date formats + IF rec.TEMPLATE_TABLE_NAME IS NOT NULL THEN + SELECT COUNT(DISTINCT A_SOURCE_KEY) + INTO vSharedTemplateCount + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE TEMPLATE_TABLE_NAME = rec.TEMPLATE_TABLE_NAME + AND A_SOURCE_KEY <> pSourceKey; -- Exclude current source being deleted + + -- Only delete date formats if template table is not shared with other sources + IF vSharedTemplateCount = 0 THEN + delete from CT_MRDS.A_COLUMN_DATE_FORMAT WHERE TEMPLATE_TABLE_NAME = rec.TEMPLATE_TABLE_NAME; + ENV_MANAGER.LOG_PROCESS_EVENT('Deleted A_COLUMN_DATE_FORMAT records for template: '||rec.TEMPLATE_TABLE_NAME,'DEBUG', vParameters); + ELSE + ENV_MANAGER.LOG_PROCESS_EVENT('Skipping A_COLUMN_DATE_FORMAT deletion - template table '||rec.TEMPLATE_TABLE_NAME||' is shared with '||vSharedTemplateCount||' other source systems','WARNING', vParameters); + END IF; + END IF; + + -- Delete parent file configuration + delete from CT_MRDS.A_SOURCE_FILE_CONFIG WHERE A_SOURCE_FILE_CONFIG_KEY = rec.A_SOURCE_FILE_CONFIG_KEY; + ENV_MANAGER.LOG_PROCESS_EVENT('Deleted parent A_SOURCE_FILE_CONFIG record for config key: '||rec.A_SOURCE_FILE_CONFIG_KEY,'DEBUG', vParameters); + end loop; + COMMIT; -- Commit parent deletions + + -- Delete source system record + DELETE FROM CT_MRDS.A_SOURCE where A_SOURCE_KEY = pSourceKey; + ENV_MANAGER.LOG_PROCESS_EVENT('Deleted A_SOURCE record for source key: '||pSourceKey,'DEBUG', vParameters); + COMMIT; -- Final commit for source deletion + + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO', vParameters); + + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN, 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + END DELETE_SOURCE_CASCADE; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_CONTAINER_SOURCE_FILE_CONFIG_KEY ( + pSourceFileId IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_ID%TYPE + ) RETURN PLS_INTEGER + IS + vSourceFileConfigKey PLS_INTEGER; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileId => '||nvl(to_char(pSourceFileId), 'NULL'))); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + SELECT A_SOURCE_FILE_CONFIG_KEY + INTO vSourceFileConfigKey + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE SOURCE_FILE_ID = pSourceFileId + AND SOURCE_FILE_TYPE = 'CONTAINER'; + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); + RETURN vSourceFileConfigKey; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + ENV_MANAGER.MSG_MISSING_CONTAINER_CONFIG := 'No match in A_SOURCE_FILE_CONFIG where SOURCE_FILE_TYPE=''CONTAINER'' and SOURCE_FILE_ID = '''||pSourceFileId||''''; + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_MISSING_CONTAINER_CONFIG, 'WARNING', vParameters); + RETURN NULL; + WHEN TOO_MANY_ROWS THEN + ENV_MANAGER.MSG_MULTIPLE_CONTAINER_ENTRIES := 'GET_CONTAINER_SOURCE_FILE_CONFIG_KEY: Multiple SOURCE_FILE_TYPE=''CONTAINER'' matches for SOURCE_FILE_ID: '||pSourceFileId||' in A_SOURCE_FILE_CONFIG'; + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_MULTIPLE_CONTAINER_ENTRIES, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MULTIPLE_CONTAINER_ENTRIES, ENV_MANAGER.MSG_MULTIPLE_CONTAINER_ENTRIES); + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + + END GET_CONTAINER_SOURCE_FILE_CONFIG_KEY; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_SOURCE_FILE_CONFIG_KEY ( + pSourceFileType IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_TYPE%TYPE DEFAULT 'INPUT' + ,pSourceFileId IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_ID%TYPE + ,pTableId IN CT_MRDS.A_SOURCE_FILE_CONFIG.TABLE_ID%TYPE + ) RETURN PLS_INTEGER + IS + vSourceFileConfigKey PLS_INTEGER; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileType => '''||nvl(pSourceFileType, 'NULL')||'''' + ,'pSourceFileId => '''||nvl(pSourceFileId, 'NULL')||'''' + ,'pTableId => '''||nvl(pTableId, 'NULL')||'''')); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + SELECT A_SOURCE_FILE_CONFIG_KEY + INTO vSourceFileConfigKey + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE SOURCE_FILE_TYPE = pSourceFileType + AND SOURCE_FILE_ID = pSourceFileId + AND TABLE_ID = pTableId; + + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG', vParameters); + RETURN vSourceFileConfigKey; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + RETURN NULL; + WHEN TOO_MANY_ROWS THEN + vgMsgTmp := ENV_MANAGER.MSG_MULTIPLE_MATCH_FOR_SRCFILE + ||cgBL||' '||'GET_SOURCE_FILE_CONFIG_KEY: Multiple matches in A_SOURCE_FILE_CONFIG table WHERE' + ||cgBL||' '||'SOURCE_FILE_TYPE: '||pSourceFileType + ||cgBL||' '||'SOURCE_FILE_ID: '||pSourceFileId + ||cgBL||' '||'TABLE_ID: '||pTableId; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MULTIPLE_MATCH_FOR_SRCFILE, vgMsgTmp); + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.MSG_UNKNOWN); + + END GET_SOURCE_FILE_CONFIG_KEY; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE ADD_SOURCE_FILE_CONFIG ( + pSourceKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_KEY%TYPE + ,pSourceFileType IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_TYPE%TYPE + ,pSourceFileId IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_ID%TYPE + ,pSourceFileDesc IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_DESC%TYPE + ,pSourceFileNamePattern IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_NAME_PATTERN%TYPE + ,pTableId IN CT_MRDS.A_SOURCE_FILE_CONFIG.TABLE_ID%TYPE DEFAULT NULL + ,pTemplateTableName IN CT_MRDS.A_SOURCE_FILE_CONFIG.TEMPLATE_TABLE_NAME%TYPE DEFAULT NULL + ,pContainerFileKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.CONTAINER_FILE_KEY%TYPE DEFAULT NULL + ,pEncoding IN CT_MRDS.A_SOURCE_FILE_CONFIG.ENCODING%TYPE DEFAULT NULL -- MARS-1049: NOWY PARAMETR + ) IS + vSourceFileConfigKey PLS_INTEGER; + vSourceKeyExists PLS_INTEGER := 0; + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pSourceKey => '''||nvl(to_char(pSourceKey), 'NULL')||'''' + ,'pSourceFileType => '''||nvl(to_char(pSourceFileType), 'NULL')||'''' + ,'pSourceFileId => '''||nvl(to_char(pSourceFileId), 'NULL')||'''' + ,'pSourceFileDesc => '''||nvl(to_char(pSourceFileDesc), 'NULL')||'''' + ,'pSourceFileNamePattern => '''||nvl(to_char(pSourceFileNamePattern), 'NULL')||'''' + ,'pTableId => '''||nvl(to_char(pTableId), 'NULL')||'''' + ,'pTemplateTableName => '''||nvl(to_char(pTemplateTableName), 'NULL')||'''' + ,'pContainerFileKey => '''||nvl(to_char(pContainerFileKey), 'NULL')||'''' + ,'pEncoding => '''||nvl(to_char(pEncoding), 'NULL')||'''' -- MARS-1049: NOWY + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + INSERT INTO CT_MRDS.A_SOURCE_FILE_CONFIG(A_SOURCE_KEY, SOURCE_FILE_TYPE, SOURCE_FILE_ID, SOURCE_FILE_DESC, SOURCE_FILE_NAME_PATTERN, TABLE_ID, TEMPLATE_TABLE_NAME, CONTAINER_FILE_KEY, ENCODING) + VALUES (pSourceKey, pSourceFileType, pSourceFileId, pSourceFileDesc, pSourceFileNamePattern, pTableId, pTemplateTableName, pContainerFileKey, pEncoding); + COMMIT; + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + EXCEPTION + + WHEN OTHERS THEN + IF SQLCODE = -2291 THEN + ENV_MANAGER.MSG_MISSING_SOURCE_KEY := 'The Source with A_SOURCE_KEY: '''||pSourceKey||''' not found in parent table A_SOURCE.' + ||cgBL||'First add a record to A_SOURCE:' + ||cgBL||'call file_manager.add_source(pSourceKey => '''||pSourceKey||''', pSourceName => ''...'')'; + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_MISSING_SOURCE_KEY, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_MISSING_SOURCE_KEY, ENV_MANAGER.MSG_MISSING_SOURCE_KEY); + ELSE + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN , 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + END IF; + + END ADD_SOURCE_FILE_CONFIG; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE ADD_COLUMN_DATE_FORMAT ( + pTemplateTableName IN CT_MRDS.A_COLUMN_DATE_FORMAT.TEMPLATE_TABLE_NAME%TYPE + ,pColumnName IN CT_MRDS.A_COLUMN_DATE_FORMAT.COLUMN_NAME%TYPE + ,pDateFormat IN CT_MRDS.A_COLUMN_DATE_FORMAT.DATE_FORMAT%TYPE + ) IS + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pColumnName => '''||nvl(pColumnName, 'NULL')||'''' + ,'pDateFormat => '''||nvl(pDateFormat, 'NULL')||'''' + )); + + ENV_MANAGER.LOG_PROCESS_EVENT('Start','DEBUG', vParameters); + INSERT INTO CT_MRDS.A_COLUMN_DATE_FORMAT(TEMPLATE_TABLE_NAME, COLUMN_NAME, DATE_FORMAT) + VALUES (pTemplateTableName, pColumnName, pDateFormat); + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('End','DEBUG',vParameters); + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.MSG_UNKNOWN, 'ERROR', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + END ADD_COLUMN_DATE_FORMAT; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_BUCKET_URI(pBucketArea VARCHAR2) + RETURN VARCHAR2 + IS + BEGIN + CASE pBucketArea + WHEN 'INBOX' THEN RETURN ENV_MANAGER.gvInboxBucketUri; + WHEN 'ODS' THEN RETURN ENV_MANAGER.gvDataBucketUri; + WHEN 'DATA' THEN RETURN ENV_MANAGER.gvDataBucketUri; + WHEN 'ARCHIVE' THEN RETURN ENV_MANAGER.gvArchiveBucketUri; + ELSE + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_INVALID_BUCKET_AREA, + ENV_MANAGER.MSG_INVALID_BUCKET_AREA || ' Provided: ''' || pBucketArea || ''''); + END CASE; + END; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_DET_SOURCE_FILE_CONFIG_INFO ( + pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE + ,pIncludeContainerInfo IN PLS_INTEGER DEFAULT 1 + ,pIncludeColumnFormatInfo IN PLS_INTEGER DEFAULT 1 + ) RETURN VARCHAR2 + ---- + -- Function to get info about File Configuration entry + IS + vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; + vContainerFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; + vCount PLS_INTEGER := 0; + addColHeader BOOLEAN := TRUE; + vMsgTmp VARCHAR2(32000):= ''; + CURSOR cColumnFormat(vTableName A_COLUMN_DATE_FORMAT.TEMPLATE_TABLE_NAME%TYPE) IS + SELECT * + FROM CT_MRDS.A_COLUMN_DATE_FORMAT + WHERE TEMPLATE_TABLE_NAME = vTableName; + vColumnDateFormat A_COLUMN_DATE_FORMAT%ROWTYPE; + + FUNCTION FORMAT_CONFIG( pSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE, + pTypeInfo VARCHAR2 DEFAULT 'File Configuration', + pLevel VARCHAR2 DEFAULT '') + RETURN VARCHAR2 + IS + vMsg VARCHAR2(9999); + BEGIN + vMsg := '' + ||cgBL||pLevel||''||'Details about '||pTypeInfo||':' + ||cgBL||pLevel||''||'--------------------------------' + ||cgBL||pLevel||'A_SOURCE_FILE_CONFIG_KEY = '||pSourceFileConfig.A_SOURCE_FILE_CONFIG_KEY + ||cgBL||pLevel||'A_SOURCE_KEY = '||pSourceFileConfig.A_SOURCE_KEY + ||cgBL||pLevel||'SOURCE_FILE_TYPE = '||pSourceFileConfig.SOURCE_FILE_TYPE + ||cgBL||pLevel||'SOURCE_FILE_ID = '||pSourceFileConfig.SOURCE_FILE_ID + ||cgBL||pLevel||'SOURCE_FILE_DESC = '||pSourceFileConfig.SOURCE_FILE_DESC + ||cgBL||pLevel||'SOURCE_FILE_NAME_PATTERN = '||pSourceFileConfig.SOURCE_FILE_NAME_PATTERN + ||cgBL||pLevel||'TABLE_ID = '||pSourceFileConfig.TABLE_ID + ||cgBL||pLevel||'TEMPLATE_TABLE_NAME = '||pSourceFileConfig.TEMPLATE_TABLE_NAME + ||cgBL||pLevel||'CONTAINER_FILE_KEY = '||pSourceFileConfig.CONTAINER_FILE_KEY + ||cgBL||pLevel||'ODS_SCHEMA_NAME = '||pSourceFileConfig.ODS_SCHEMA_NAME + ||cgBL||pLevel||'DAYS_FOR_ARCHIVE_THRESHOLD = '||pSourceFileConfig.DAYS_FOR_ARCHIVE_THRESHOLD + ||cgBL||pLevel||'FILES_COUNT_OVER_ARCHIVE_THRESHOLD = '||pSourceFileConfig.FILES_COUNT_OVER_ARCHIVE_THRESHOLD + ||cgBL||pLevel||'BYTES_SUM_OVER_ARCHIVE_THRESHOLD = '||pSourceFileConfig.BYTES_SUM_OVER_ARCHIVE_THRESHOLD + ||cgBL||pLevel||'ROWS_COUNT_OVER_ARCHIVE_THRESHOLD = '||pSourceFileConfig.ROWS_COUNT_OVER_ARCHIVE_THRESHOLD + ||cgBL||pLevel||'HOURS_TO_EXPIRE_STATISTICS = '||pSourceFileConfig.HOURS_TO_EXPIRE_STATISTICS + + ||cgBL||pLevel||''||'--------------------------------' + ; + RETURN vMsg; + END FORMAT_CONFIG; + + BEGIN + vMsgTmp := ''; + -- Get Main Config + BEGIN + SELECT * + INTO vSourceFileConfig + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE A_SOURCE_FILE_CONFIG_KEY = pSourceFileConfigKey; + + vMsgTmp := FORMAT_CONFIG(pSourceFileConfig => vSourceFileConfig, pTypeInfo => 'File Configuration'); + EXCEPTION + WHEN NO_DATA_FOUND THEN + vMsgTmp := 'There is no config entry in A_SOURCE_FILE_CONFIG where A_SOURCE_FILE_CONFIG_KEY = '||pSourceFileConfigKey; + RETURN vMsgTmp; + END; + + -- Get Container Config + IF pIncludeContainerInfo > 0 THEN + BEGIN + SELECT * + INTO vContainerFileConfig + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE A_SOURCE_FILE_CONFIG_KEY = vSourceFileConfig.CONTAINER_FILE_KEY; + + vMsgTmp := vMsgTmp || cgBL || FORMAT_CONFIG(pSourceFileConfig => vContainerFileConfig, pTypeInfo => 'related Container Config', pLevel => ' '); + + EXCEPTION + WHEN NO_DATA_FOUND THEN + vMsgTmp := vMsgTmp|| cgBL || 'There is no CONTAINER config entry in A_SOURCE_FILE_CONFIG.'; + -- RETURN vMsgTmp; + END; + END IF; + + -- Get Column Date Format Config + IF pIncludeColumnFormatInfo > 0 THEN + BEGIN + OPEN cColumnFormat(vTableName => vSourceFileConfig.TEMPLATE_TABLE_NAME); + LOOP + FETCH cColumnFormat INTO vColumnDateFormat; + IF cColumnFormat%FOUND AND addColHeader THEN + vCount := 1; + vMsgTmp := vMsgTmp||cgBL|| cgBL || ' Column Date Format config entries:'; + vMsgTmp := vMsgTmp||cgBL||''||' --------------------------------'; + addColHeader := FALSE; + END IF; + EXIT WHEN cColumnFormat%NOTFOUND; + vMsgTmp := vMsgTmp + ||cgBL||' TEMPLATE_TABLE_NAME = '||vColumnDateFormat.TEMPLATE_TABLE_NAME + ||cgBL||' COLUMN_NAME = '||vColumnDateFormat.COLUMN_NAME + ||cgBL||' DATE_FORMAT = '||vColumnDateFormat.DATE_FORMAT + ||cgBL||''||' --------------------------------'; + END LOOP; + If vCount=0 THEN + vMsgTmp := vMsgTmp || cgBL || 'There is no Column Date Format config entries in A_COLUMN_DATE_FORMAT where TEMPLATE_TABLE_NAME = '||NVL(vSourceFileConfig.TEMPLATE_TABLE_NAME,'NULL'); + END IF; + CLOSE cColumnFormat; + END; + END IF; + + RETURN vMsgTmp; + EXCEPTION + + WHEN OTHERS THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END GET_DET_SOURCE_FILE_CONFIG_INFO; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_DET_SOURCE_FILE_RECEIVED_INFO ( + pSourceFileReceivedKey IN CT_MRDS.A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY%TYPE + ,pIncludeConfigInfo IN PLS_INTEGER DEFAULT 1 + ,pIncludeContainerInfo IN PLS_INTEGER DEFAULT 1 + ,pIncludeColumnFormatInfo IN PLS_INTEGER DEFAULT 1 + ) RETURN VARCHAR2 + ---- + -- Function to get info about File Received entry + IS + vSourceFileReceived CT_MRDS.A_SOURCE_FILE_RECEIVED%ROWTYPE; + vMsgTmp VARCHAR2(32000):= ''; + BEGIN + + BEGIN + SELECT * + INTO vSourceFileReceived + FROM CT_MRDS.A_SOURCE_FILE_RECEIVED + WHERE A_SOURCE_FILE_RECEIVED_KEY = pSourceFileReceivedKey; + + vMsgTmp := '' + ||cgBL||''||'Details about received file:' + ||cgBL||''||'--------------------------------' + ||cgBL||'A_SOURCE_FILE_RECEIVED_KEY = '||vSourceFileReceived.A_SOURCE_FILE_RECEIVED_KEY + ||cgBL||'A_SOURCE_FILE_CONFIG_KEY = '||vSourceFileReceived.A_SOURCE_FILE_CONFIG_KEY + ||cgBL||'SOURCE_FILE_NAME = '||vSourceFileReceived.SOURCE_FILE_NAME + ||cgBL||'CHECKSUM = '||vSourceFileReceived.CHECKSUM + ||cgBL||'CREATED = '||vSourceFileReceived.CREATED + ||cgBL||'BYTES = '||vSourceFileReceived.BYTES + ||cgBL||'RECEPTION_DATE = '||vSourceFileReceived.RECEPTION_DATE + ||cgBL||'PROCESSING_STATUS = '||vSourceFileReceived.PROCESSING_STATUS + ||cgBL||'EXTERNAL_TABLE_NAME = '||vSourceFileReceived.EXTERNAL_TABLE_NAME + ||cgBL||'PARTITION_YEAR = '||vSourceFileReceived.PARTITION_YEAR + ||cgBL||'PARTITION_MONTH = '||vSourceFileReceived.PARTITION_MONTH + ||cgBL||''||'--------------------------------' + ; + EXCEPTION + WHEN NO_DATA_FOUND THEN + vMsgTmp := 'There is no data in A_SOURCE_FILE_RECEIVED where A_SOURCE_FILE_RECEIVED_KEY = '||pSourceFileReceivedKey; + RETURN vMsgTmp; + END; + + IF pIncludeConfigInfo>0 THEN + vMsgTmp := vMsgTmp || cgBL || CT_MRDS.FILE_MANAGER.GET_DET_SOURCE_FILE_CONFIG_INFO(vSourceFileReceived.A_SOURCE_FILE_CONFIG_KEY,pIncludeContainerInfo,pIncludeColumnFormatInfo); + END IF; + RETURN vMsgTmp; + EXCEPTION + + WHEN OTHERS THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END GET_DET_SOURCE_FILE_RECEIVED_INFO; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_DET_USER_LOAD_OPERATIONS ( + pOperationId PLS_INTEGER + ) RETURN VARCHAR2 + ---- + -- Function to get info about File Received entry + IS + vUserLoadOperations USER_LOAD_OPERATIONS%ROWTYPE; + vMsgTmp VARCHAR2(32000):= ''; + BEGIN + + BEGIN + SELECT * + INTO vUserLoadOperations + FROM USER_LOAD_OPERATIONS + WHERE id = pOperationId; + + vMsgTmp := '' + ||''||'Details about USER_LOAD_OPERATIONS where ID = '||pOperationId + ||cgBL||''||'--------------------------------' + ||cgBL||'ID = '||vUserLoadOperations.ID + ||cgBL||'TYPE = '||vUserLoadOperations.TYPE + ||cgBL||'SID = '||vUserLoadOperations.SID + ||cgBL||'SERIAL# = '||vUserLoadOperations.SERIAL# + ||cgBL||'START_TIME = '||vUserLoadOperations.START_TIME + ||cgBL||'UPDATE_TIME = '||vUserLoadOperations.UPDATE_TIME + ||cgBL||'STATUS = '||vUserLoadOperations.STATUS + ||cgBL||'OWNER_NAME = '||vUserLoadOperations.OWNER_NAME + ||cgBL||'TABLE_NAME = '||vUserLoadOperations.TABLE_NAME + ||cgBL||'PARTITION_NAME = '||vUserLoadOperations.PARTITION_NAME + ||cgBL||'SUBPARTITION_NAME = '||vUserLoadOperations.SUBPARTITION_NAME + ||cgBL||'FILE_URI_LIST = '||vUserLoadOperations.FILE_URI_LIST + ||cgBL||'ROWS_LOADED = '||vUserLoadOperations.ROWS_LOADED + ||cgBL||'LOGFILE_TABLE = '||vUserLoadOperations.LOGFILE_TABLE + ||cgBL||'BADFILE_TABLE = '||vUserLoadOperations.BADFILE_TABLE + ||cgBL||'STATUS_TABLE = '||vUserLoadOperations.STATUS_TABLE + ||cgBL||'TEMPEXT_TABLE = '||vUserLoadOperations.TEMPEXT_TABLE + ||cgBL||'CREDENTIAL_NAME = '||vUserLoadOperations.CREDENTIAL_NAME + ||cgBL||'EXPIRATION_TIME = '||vUserLoadOperations.EXPIRATION_TIME + ||cgBL||''||'--------------------------------' + ; + EXCEPTION + WHEN NO_DATA_FOUND THEN + vMsgTmp := 'There is no data in USER_LOAD_OPERATIONS where ID = '||pOperationId; + RETURN vMsgTmp; + END; + + RETURN vMsgTmp; + EXCEPTION + + WHEN OTHERS THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END GET_DET_USER_LOAD_OPERATIONS; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION ANALYZE_VALIDATION_ERRORS( + pSourceFileReceivedKey IN NUMBER + ) RETURN VARCHAR2 + IS + vSourceFileReceived CT_MRDS.A_SOURCE_FILE_RECEIVED%ROWTYPE; + vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; + vValidationLogTable VARCHAR2(128); + vTemplateSchema VARCHAR2(128); + vTemplateTable VARCHAR2(128); + vCsvFileUri VARCHAR2(4000); + vParameters VARCHAR2(4000); + vResult VARCHAR2(32000); + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( + 'pSourceFileReceivedKey => ' || pSourceFileReceivedKey + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start', 'DEBUG', vParameters); + + -- Get file and config information + BEGIN + -- Get source file received data first + SELECT * + INTO vSourceFileReceived + FROM CT_MRDS.A_SOURCE_FILE_RECEIVED sfr + WHERE sfr.A_SOURCE_FILE_RECEIVED_KEY = pSourceFileReceivedKey; + + -- Get source file config data + SELECT * + INTO vSourceFileConfig + FROM CT_MRDS.A_SOURCE_FILE_CONFIG sfc + WHERE sfc.A_SOURCE_FILE_CONFIG_KEY = vSourceFileReceived.A_SOURCE_FILE_CONFIG_KEY; + EXCEPTION + WHEN NO_DATA_FOUND THEN + ENV_MANAGER.LOG_PROCESS_ERROR('Source file or config not found for key: ' || pSourceFileReceivedKey, vParameters); + RETURN 'Error: Source file with key ' || pSourceFileReceivedKey || ' not found in A_SOURCE_FILE_RECEIVED table'; + END; + + -- Extract template schema and table from template table name + vTemplateSchema := REGEXP_SUBSTR(vSourceFileConfig.TEMPLATE_TABLE_NAME, '^([^.]+)'); + vTemplateTable := REGEXP_SUBSTR(vSourceFileConfig.TEMPLATE_TABLE_NAME, '\.(.+)$', 1, 1, NULL, 1); + + -- Build CSV file URI + vCsvFileUri := ENV_MANAGER.gvInboxBucketUri || + 'INBOX/' || vSourceFileConfig.A_SOURCE_KEY || '/' || + vSourceFileConfig.SOURCE_FILE_ID || '/' || + vSourceFileConfig.TABLE_ID || '/' || + vSourceFileReceived.SOURCE_FILE_NAME; + + -- Find validation log table (most recent one with errors) + -- Look for validation log tables related to this external table + BEGIN + SELECT table_name + INTO vValidationLogTable + FROM ( + SELECT table_name, + REGEXP_SUBSTR(table_name, '\$([0-9]+)_', 1, 1, NULL, 1) as log_number + FROM USER_TABLES + WHERE table_name LIKE 'VALIDATE$%_LOG' + ORDER BY TO_NUMBER(log_number) DESC + ) + WHERE ROWNUM = 1; + EXCEPTION + WHEN NO_DATA_FOUND THEN + -- If no validation log tables found, use a default name + vValidationLogTable := 'VALIDATE$999_LOG'; + END; + + ENV_MANAGER.LOG_PROCESS_EVENT('Calling ENV_MANAGER.ANALYZE_VALIDATION_ERRORS with parameters: ' || + 'LogTable=' || vValidationLogTable || + ', Schema=' || vTemplateSchema || + ', Table=' || vTemplateTable || + ', URI=' || SUBSTR(vCsvFileUri, 1, 100) || '...', 'DEBUG', vParameters); + + -- Call the main function with derived parameters + vResult := ENV_MANAGER.ANALYZE_VALIDATION_ERRORS( + pValidationLogTable => vValidationLogTable, + pTemplateSchema => vTemplateSchema, + pTemplateTable => vTemplateTable, + pCsvFileUri => vCsvFileUri + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('End', 'DEBUG', vParameters); + RETURN vResult; + + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_ERROR('Error in ANALYZE_VALIDATION_ERRORS: ' || SQLERRM, vParameters); + RETURN 'Error analyzing validation errors: ' || SQLERRM || + cgBL || 'Source File Key: ' || pSourceFileReceivedKey || + cgBL || 'Check A_SOURCE_FILE_RECEIVED and A_SOURCE_FILE_CONFIG tables for data integrity.'; + END ANALYZE_VALIDATION_ERRORS; + + ---------------------------------------------------------------------------------------------------- + -- EXTERNAL TABLE BATCH OPERATIONS IMPLEMENTATION (MARS-1057) + ---------------------------------------------------------------------------------------------------- + + PROCEDURE CREATE_EXTERNAL_TABLES_SET ( + pSourceFileConfigKey IN NUMBER, + pRecreate IN BOOLEAN DEFAULT FALSE + ) + IS + vSourceKey VARCHAR2(50); + vSourceFileId VARCHAR2(100); + vTableId VARCHAR2(100); + vTemplateTableName VARCHAR2(200); + vEncoding VARCHAR2(50); + vDelimiter VARCHAR2(10); + + vInboxTableName VARCHAR2(200); + vOdsTableName VARCHAR2(200); + vArchiveTableName VARCHAR2(200); + + vInboxPrefix VARCHAR2(500); + vOdsPrefix VARCHAR2(500); + vArchivePrefix VARCHAR2(500); + + vTableExists NUMBER; + vParameters VARCHAR2(4000); + + PROCEDURE DROP_IF_EXISTS(pTableName VARCHAR2) IS + BEGIN + SELECT COUNT(*) INTO vTableExists + FROM ALL_TABLES + WHERE OWNER = 'ODS' AND TABLE_NAME = pTableName; + + IF vTableExists > 0 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Dropping existing table: ' || pTableName, 'INFO'); + EXECUTE IMMEDIATE 'DROP TABLE ODS.' || pTableName; + END IF; + EXCEPTION + WHEN OTHERS THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Error dropping table ' || pTableName || ': ' || SQLERRM, 'WARNING'); + END DROP_IF_EXISTS; + + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( + 'pSourceFileConfigKey => ' || NVL(TO_CHAR(pSourceFileConfigKey), 'NULL'), + 'pRecreate => ' || CASE WHEN pRecreate THEN 'TRUE' ELSE 'FALSE' END + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start CREATE_EXTERNAL_TABLES_SET', 'INFO', vParameters); + + -- 1. Retrieve configuration from A_SOURCE_FILE_CONFIG + BEGIN + SELECT A_SOURCE_KEY, SOURCE_FILE_ID, TABLE_ID, TEMPLATE_TABLE_NAME, + NVL(ENCODING, 'UTF8') + INTO vSourceKey, vSourceFileId, vTableId, vTemplateTableName, + vEncoding + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE A_SOURCE_FILE_CONFIG_KEY = pSourceFileConfigKey; + + -- Set default delimiter (column DELIMITER does not exist in A_SOURCE_FILE_CONFIG) + vDelimiter := ','; + EXCEPTION + WHEN NO_DATA_FOUND THEN + vgMsgTmp := 'Source file config not found: ' || pSourceFileConfigKey; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(-20001, vgMsgTmp); + END; + + -- 2. Generate table names + vInboxTableName := vTableId || '_INBOX'; + vOdsTableName := vTableId || '_ODS'; + vArchiveTableName := vTableId || '_ARCHIVE'; + + -- 3. Generate paths (OFFICIAL PATH PATTERNS) + vInboxPrefix := 'INBOX/' || vSourceKey || '/' || vSourceFileId || '/' || vTableId; + vOdsPrefix := 'ODS/' || vSourceKey || '/' || vTableId; + vArchivePrefix := 'ARCHIVE/' || vSourceKey || '/' || vTableId; + + ENV_MANAGER.LOG_PROCESS_EVENT( + 'Creating external tables for: ' || vSourceKey || '/' || vSourceFileId || '/' || vTableId, + 'INFO' + ); + + -- 4. DROP existing tables if pRecreate = TRUE + IF pRecreate THEN + DROP_IF_EXISTS(vInboxTableName); + DROP_IF_EXISTS(vOdsTableName); + DROP_IF_EXISTS(vArchiveTableName); + END IF; + + -- 5. Create INBOX external table + ENV_MANAGER.LOG_PROCESS_EVENT('Creating INBOX external table: ' || vInboxTableName, 'INFO'); + ODS.FILE_MANAGER_ODS.CREATE_EXTERNAL_TABLE( + pTableName => vInboxTableName, + pTemplateTableName => vTemplateTableName, + pPrefix => vInboxPrefix, + pBucketUri => ENV_MANAGER.gvInboxBucketUri, + pDelimiter => vDelimiter, + pEncoding => vEncoding + ); + + -- 6. Create ODS external table + ENV_MANAGER.LOG_PROCESS_EVENT('Creating ODS external table: ' || vOdsTableName, 'INFO'); + ODS.FILE_MANAGER_ODS.CREATE_EXTERNAL_TABLE( + pTableName => vOdsTableName, + pTemplateTableName => vTemplateTableName, + pPrefix => vOdsPrefix, + pBucketUri => ENV_MANAGER.gvDataBucketUri, + pDelimiter => vDelimiter, + pEncoding => vEncoding + ); + + -- 7. Create ARCHIVE external table + ENV_MANAGER.LOG_PROCESS_EVENT('Creating ARCHIVE external table: ' || vArchiveTableName, 'INFO'); + ODS.FILE_MANAGER_ODS.CREATE_EXTERNAL_TABLE( + pTableName => vArchiveTableName, + pTemplateTableName => vTemplateTableName, + pPrefix => vArchivePrefix, + pBucketUri => ENV_MANAGER.gvArchiveBucketUri, + pDelimiter => vDelimiter, + pEncoding => vEncoding + ); + + ENV_MANAGER.LOG_PROCESS_EVENT( + 'End CREATE_EXTERNAL_TABLES_SET - Successfully created all 3 external tables for config: ' || pSourceFileConfigKey, + 'INFO', + vParameters + ); + + EXCEPTION + WHEN OTHERS THEN + vgMsgTmp := 'Error creating external tables for config ' || pSourceFileConfigKey || ': ' || SQLERRM; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(-20002, vgMsgTmp); + END CREATE_EXTERNAL_TABLES_SET; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE CREATE_EXTERNAL_TABLES_BATCH ( + pSourceKey IN VARCHAR2 DEFAULT NULL, + pSourceFileId IN VARCHAR2 DEFAULT NULL, + pTableId IN VARCHAR2 DEFAULT NULL, + pRecreate IN BOOLEAN DEFAULT FALSE + ) + IS + vCount NUMBER := 0; + vProcessed NUMBER := 0; + vFailed NUMBER := 0; + vParameters VARCHAR2(4000); + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( + 'pSourceKey => ''' || NVL(pSourceKey, 'NULL') || '''', + 'pSourceFileId => ''' || NVL(pSourceFileId, 'NULL') || '''', + 'pTableId => ''' || NVL(pTableId, 'NULL') || '''', + 'pRecreate => ' || CASE WHEN pRecreate THEN 'TRUE' ELSE 'FALSE' END + )); + + ENV_MANAGER.LOG_PROCESS_EVENT('Start CREATE_EXTERNAL_TABLES_BATCH', 'INFO', vParameters); + + -- Iterate over configurations matching criteria (only INPUT files) + FOR rec IN ( + SELECT A_SOURCE_FILE_CONFIG_KEY, A_SOURCE_KEY, SOURCE_FILE_ID, TABLE_ID + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE SOURCE_FILE_TYPE = 'INPUT' + AND (pSourceKey IS NULL OR A_SOURCE_KEY = pSourceKey) + AND (pSourceFileId IS NULL OR SOURCE_FILE_ID = pSourceFileId) + AND (pTableId IS NULL OR TABLE_ID = pTableId) + ORDER BY A_SOURCE_KEY, SOURCE_FILE_ID, TABLE_ID + ) LOOP + vCount := vCount + 1; + + BEGIN + ENV_MANAGER.LOG_PROCESS_EVENT( + 'Creating external tables set for: ' || rec.A_SOURCE_KEY || '/' || + rec.SOURCE_FILE_ID || '/' || rec.TABLE_ID, + 'INFO' + ); + + -- Call procedure to create set of 3 tables + CREATE_EXTERNAL_TABLES_SET( + pSourceFileConfigKey => rec.A_SOURCE_FILE_CONFIG_KEY, + pRecreate => pRecreate + ); + + vProcessed := vProcessed + 1; + + EXCEPTION + WHEN OTHERS THEN + vFailed := vFailed + 1; + ENV_MANAGER.LOG_PROCESS_EVENT( + 'Failed to create tables for config ' || rec.A_SOURCE_FILE_CONFIG_KEY || + ' (' || rec.A_SOURCE_KEY || '/' || rec.SOURCE_FILE_ID || '/' || rec.TABLE_ID || '): ' || SQLERRM, + 'ERROR' + ); + -- Continue processing despite error + END; + END LOOP; + + ENV_MANAGER.LOG_PROCESS_EVENT( + 'End CREATE_EXTERNAL_TABLES_BATCH - Total: ' || vCount || + ', Processed: ' || vProcessed || ', Failed: ' || vFailed, + 'INFO', + vParameters + ); + + IF vFailed > 0 THEN + vgMsgTmp := 'Batch completed with errors. Processed: ' || vProcessed || ', Failed: ' || vFailed; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'WARNING', vParameters); + RAISE_APPLICATION_ERROR(-20003, vgMsgTmp); + END IF; + + END CREATE_EXTERNAL_TABLES_BATCH; + + ---------------------------------------------------------------------------------------------------- + -- PACKAGE VERSION MANAGEMENT FUNCTIONS IMPLEMENTATION + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_VERSION + RETURN VARCHAR2 + IS + BEGIN + RETURN PACKAGE_VERSION; + END GET_VERSION; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_BUILD_INFO + RETURN VARCHAR2 + IS + BEGIN + RETURN ENV_MANAGER.GET_PACKAGE_VERSION_INFO( + pPackageName => 'FILE_MANAGER', + pVersion => PACKAGE_VERSION, + pBuildDate => PACKAGE_BUILD_DATE, + pAuthor => PACKAGE_AUTHOR + ); + END GET_BUILD_INFO; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_VERSION_HISTORY + RETURN VARCHAR2 + IS + BEGIN + RETURN ENV_MANAGER.FORMAT_VERSION_HISTORY( + pPackageName => 'FILE_MANAGER', + pVersionHistory => VERSION_HISTORY + ); + END GET_VERSION_HISTORY; + + ---------------------------------------------------------------------------------------------------- + +END; + +/ + +/ diff --git a/MARS_Packages/REL03/MARS-1057/new_version/FILE_MANAGER.pkg b/MARS_Packages/REL03/MARS-1057/new_version/FILE_MANAGER.pkg new file mode 100644 index 0000000..c7529a4 --- /dev/null +++ b/MARS_Packages/REL03/MARS-1057/new_version/FILE_MANAGER.pkg @@ -0,0 +1,696 @@ +create or replace PACKAGE CT_MRDS.FILE_MANAGER +AUTHID CURRENT_USER +AS + /** + * General comment for package: Please put comments for functions and procedures as shown in below example. + * It is a standard. + * The structure of comment is used by GET_PACKAGE_DOCUMENTATION function + * which returns documentation text for confluence page (to Copy-Paste it). + **/ + + -- Example comment: + /** + * @name EX_PROCEDURE_NAME + * @desc Procedure description + * @example select FILE_MANAGER.EX_PROCEDURE_NAME(pParameter => 129) from dual; + * @ex_rslt Example Result + **/ + + -- Package Version Information (Semantic Versioning: MAJOR.MINOR.PATCH) + PACKAGE_VERSION CONSTANT VARCHAR2(10) := '3.4.0'; + PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2025-11-27 14:00:00'; + PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski'; + + -- Version History (Latest changes first) + VERSION_HISTORY CONSTANT VARCHAR2(4000) := + '3.4.0 (2025-11-27): MARS-1057 - Added CREATE_EXTERNAL_TABLES_SET and CREATE_EXTERNAL_TABLES_BATCH procedures for batch external table creation' || CHR(13)||CHR(10) || + '3.3.0 (2025-11-26): MARS-1056 - Fixed VARCHAR2 definitions in GENERATE_EXTERNAL_TABLE_PARAMS to preserve CHAR/BYTE semantics from template tables' || CHR(13)||CHR(10) || + '3.2.1 (2025-11-24): MARS-1049 - Added pEncoding parameter support for CSV character set specification' || CHR(13)||CHR(10) || + '3.2.0 (2025-10-22): Added package versioning system using centralized ENV_MANAGER functions' || CHR(13)||CHR(10) || + '3.1.0 (2025-10-20): Enhanced PROCESS_SOURCE_FILE with 6-step validation workflow' || CHR(13)||CHR(10) || + '3.0.0 (2025-10-15): Separated export procedures into dedicated DATA_EXPORTER package' || CHR(13)||CHR(10) || + '2.5.0 (2025-10-10): Added DELETE_SOURCE_CASCADE for safe configuration removal' || CHR(13)||CHR(10) || + '2.0.0 (2025-09-25): Added official path patterns support (INBOX 3-level, ODS 2-level, ARCHIVE 2-level)' || CHR(13)||CHR(10) || + '1.0.0 (2025-09-01): Initial release with file processing and validation capabilities'; + + TYPE tSourceFileReceived IS RECORD + ( + A_SOURCE_FILE_RECEIVED_KEY CT_MRDS.A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY%TYPE, + A_SOURCE_FILE_CONFIG_KEY CT_MRDS.A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_CONFIG_KEY%TYPE, + SOURCE_FILE_PREFIX_INBOX VARCHAR2(430), + SOURCE_FILE_PREFIX_ODS VARCHAR2(430), + SOURCE_FILE_PREFIX_QUARANTINE VARCHAR2(430), + SOURCE_FILE_PREFIX_ARCHIVE VARCHAR2(430), + SOURCE_FILE_NAME CT_MRDS.A_SOURCE_FILE_RECEIVED.SOURCE_FILE_NAME%TYPE, + RECEPTION_DATE CT_MRDS.A_SOURCE_FILE_RECEIVED.RECEPTION_DATE%TYPE, + PROCESSING_STATUS CT_MRDS.A_SOURCE_FILE_RECEIVED.PROCESSING_STATUS%TYPE, + EXTERNAL_TABLE_NAME CT_MRDS.A_SOURCE_FILE_RECEIVED.EXTERNAL_TABLE_NAME%TYPE + ); + + cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10); + vgSourceFileConfigKey PLS_INTEGER; + vgMsgTmp VARCHAR2(32000); + + + + --------------------------------------------------------------------------------------------------------------------------- + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name GET_SOURCE_FILE_CONFIG + * @desc Get source file type by matching the source file name against source file type naming patterns + * or by specifying the id of a received source file. + * @example ... + * @ex_rslt "CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE" + **/ + FUNCTION GET_SOURCE_FILE_CONFIG(pFileUri IN VARCHAR2 DEFAULT NULL + , pSourceFileReceivedKey IN NUMBER DEFAULT NULL + , pSourceFileConfigKey IN NUMBER DEFAULT NULL) + RETURN CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; + + + + /** + * @name REGISTER_SOURCE_FILE_RECEIVED + * @desc Register a newly received source file in A_SOURCE_FILE_RECEIVED table. + * This overload automatically determines source file type from the file name. + * It returns the value of A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY column for newly added record. + * @example vSourceFileReceivedKey := FILE_MANAGER.REGISTER_SOURCE_FILE_RECEIVED(pSourceFileReceivedName => 'INBOX/C2D/UC_DISSEM/UC_NMA_DISSEM/UC_NMA_DISSEM-277740.csv'); + * @ex_rslt 3245 + **/ + FUNCTION REGISTER_SOURCE_FILE_RECEIVED ( + pSourceFileReceivedName IN VARCHAR2 + ) + RETURN PLS_INTEGER; + + + + /** + * @name REGISTER_SOURCE_FILE_RECEIVED + * @desc Register a new new source file in A_SOURCE_FILE_RECEIVED table based on pSourceFileReceivedName and pSourceFileConfig. + * Then it returns the value of A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY column for newly added record. + * @example vSourceFileReceivedKey := FILE_MANAGER.REGISTER_SOURCE_FILE_RECEIVED( + * pSourceFileReceivedName => 'INBOX/C2D/UC_DISSEM/UC_NMA_DISSEM/UC_NMA_DISSEM-277740.csv' + * ,pSourceFileConfig => ...A_SOURCE_FILE_CONFIG%ROWTYPE... ); + * @ex_rslt 3245 + **/ + FUNCTION REGISTER_SOURCE_FILE_RECEIVED ( + pSourceFileReceivedName IN VARCHAR2, + pSourceFileConfig IN CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE + ) + RETURN PLS_INTEGER; + + + + /** + * @name SET_SOURCE_FILE_RECEIVED_STATUS + * @desc Set status of file in A_SOURCE_FILE_RECEIVED table - PROCESSING_STATUS column + * based on A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY + * and provided value of pStatus parameter + * @example exec FILE_MANAGER.SET_SOURCE_FILE_RECEIVED_STATUS(pSourceFileReceivedKey => 377, pStatus => 'READY_FOR_INGESTION'); + **/ + PROCEDURE SET_SOURCE_FILE_RECEIVED_STATUS( + pSourceFileReceivedKey IN PLS_INTEGER, + pStatus IN VARCHAR2 + ); + + + + /** + * @name GET_EXTERNAL_TABLE_COLUMNS + * @desc Function used to get string with all table columns definitions based on pTargetTableTemplate "TEMPLATE TABLE" name. + * It used for creating "EXTERNAL TABLE" using CREATE_EXTERNAL_TABLE procedure. + * @example select FILE_MANAGER.GET_EXTERNAL_TABLE_COLUMNS(pTargetTableTemplate => 'CT_ET_TEMPLATES.LM_STANDING_FACILITIES_HEADER') from dual; + * @ex_rslt "A_KEY" NUMBER(38,0) NOT NULL ENABLE, + * "A_WORKFLOW_HISTORY_KEY" NUMBER(38,0) NOT NULL ENABLE, + * "REV_NUMBER" NUMBER(28,0), + * "REF_DATE" DATE, + * "FREE_TEXT" VARCHAR2(1000 CHAR), + * "MLF_BS_TOTAL" NUMBER(28,10), + * "DF_BS_TOTAL" NUMBER(28,10), + * "MLF_SF_TOTAL" NUMBER(28,10), + * "DF_SF_TOTAL" NUMBER(28,10) + **/ + FUNCTION GET_EXTERNAL_TABLE_COLUMNS ( + pTargetTableTemplate IN VARCHAR2 + ) + RETURN CLOB; + + + + /** + * @name CREATE_EXTERNAL_TABLE + * @desc A wrapper procedure for DBMS_CLOUD.CREATE_EXTERNAL_TABLE which creates External Table + * MARS-1049: Added pEncoding parameter for CSV character set specification + * @param pEncoding - Character set encoding for CSV files (e.g., 'UTF8', 'WE8MSWIN1252') + * If provided, adds CHARACTERSET clause to external table definition + * @example + * begin + * FILE_MANAGER.CREATE_EXTERNAL_TABLE( + * pTableName => 'STANDING_FACILITIES_HEADER', + * pTemplateTableName => 'CT_ET_TEMPLATES.LM_STANDING_FACILITIES_HEADER', + * pPrefix => 'ODS/LM/STANDING_FACILITIES_HEADER/', + * pBucketUri => 'https://objectstorage.eu-frankfurt-1.oraclecloud.com/n/frcnomajoc7v/b/mrds_data_tst/o/', + * pFileName => NULL, + * pDelimiter => ',', + * pEncoding => 'UTF8' + * ); + * end; + **/ + PROCEDURE CREATE_EXTERNAL_TABLE ( + pTableName IN VARCHAR2, + pTemplateTableName IN VARCHAR2, + pPrefix IN VARCHAR2, + pBucketUri IN VARCHAR2 DEFAULT ENV_MANAGER.gvInboxBucketUri, + pFileName IN VARCHAR2 DEFAULT NULL, + pDelimiter IN VARCHAR2 DEFAULT ',', + pEncoding IN VARCHAR2 DEFAULT NULL -- MARS-1049: NOWY PARAMETR + ); + + + + /** + * @name CREATE_EXTERNAL_TABLE + * @desc Creates External Table for single file provided by + * pSourceFileReceivedKey parameter (A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY) + * @example exec FILE_MANAGER.CREATE_EXTERNAL_TABLE(pSourceFileReceivedKey => 377);; + **/ + PROCEDURE CREATE_EXTERNAL_TABLE ( + pSourceFileReceivedKey IN NUMBER + ); + + + + /** + * @name VALIDATE_SOURCE_FILE_RECEIVED + * @desc A wrapper procedure for DBMS_CLOUD.VALIDATE_EXTERNAL_TABLE + * It validate External table build upon single file + * provided by pSourceFileReceivedKey parameter (A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY) + * @example exec FILE_MANAGER.VALIDATE_SOURCE_FILE_RECEIVED(pSourceFileReceivedKey => 377); + **/ + PROCEDURE VALIDATE_SOURCE_FILE_RECEIVED + ( + pSourceFileReceivedKey IN NUMBER + ); + + + /** + * @name VALIDATE_EXTERNAL_TABLE + * @desc A wrapper function for DBMS_CLOUD.VALIDATE_EXTERNAL_TABLE. + * It validates External Table provided by parameter pTableName. + * It returns: PASSED or FAILED. + * @example + * declare + * vStatus VARCHAR2(100); + * begin + * vStatus := FILE_MANAGER.VALIDATE_EXTERNAL_TABLE(pTableName => 'STANDING_FACILITIES_HEADER'); + * DBMS_OUTPUT.PUT_LINE('vStatus = '||vStatus); + * end; + * + * @ex_rslt FAILED + **/ + FUNCTION VALIDATE_EXTERNAL_TABLE(pTableName IN VARCHAR2) + RETURN VARCHAR2; + + + /** + * @name S_VALIDATE_EXTERNAL_TABLE + * @desc A function which checks if SELECT query reterns any rows. + * It trys to selects External Table provided by parameter pTableName. + * It returns: PASSED or FAILED. + * @example + * declare + * vStatus VARCHAR2(100); + * begin + * vStatus := FILE_MANAGER.S_VALIDATE_EXTERNAL_TABLE(pTableName => 'STANDING_FACILITIES_HEADER'); + * DBMS_OUTPUT.PUT_LINE('vStatus = '||vStatus); + * end; + * + * @ex_rslt PASSED + **/ + FUNCTION S_VALIDATE_EXTERNAL_TABLE(pTableName IN VARCHAR2) + RETURN VARCHAR2; + + + + /** + * @name DROP_EXTERNAL_TABLE + * @desc It drops External Table for single file provided by + * pSourceFileReceivedKey parameter (A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY) + * @example exec FILE_MANAGER.DROP_EXTERNAL_TABLE(pSourceFileReceivedKey => 377); + **/ + PROCEDURE DROP_EXTERNAL_TABLE ( + pSourceFileReceivedKey IN NUMBER + ); + + + + /** + * @name COPY_FILE + * @desc It copies file provided by + * pSourceFileReceivedKey parameter (A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY) + * into destination provided by pDestination parameter. + * pDestination parameter allowed values are: 'ODS' + * @example exec FILE_MANAGER.COPY_FILE(pSourceFileReceivedKey => 377, pDestination => 'ODS'); + **/ + PROCEDURE COPY_FILE( + pSourceFileReceivedKey IN NUMBER, + pDestination IN VARCHAR2 + ); + + + + + /** + * @name MOVE_FILE + * @desc It moves file provided by + * pSourceFileReceivedKey parameter (A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY) + * into destination provided by pDestination parameter. + * pDestination parameter allowed values are: 'ODS', 'QUARANTINE' + * @example exec FILE_MANAGER.MOVE_FILE(pSourceFileReceivedKey => 377, pDestination => 'ODS'); + **/ + PROCEDURE MOVE_FILE( + pSourceFileReceivedKey IN NUMBER, + pDestination IN VARCHAR2 + ); + + + + /** + * @name DELETE_FOLDER_CONTENTS + * @desc It deletes all files from specified folder in the cloud storage. + * The procedure lists all objects in the specified folder prefix and deletes them one by one. + * pBucketArea parameter specifies which bucket to use: 'INBOX', 'DATA', 'ARCHIVE' + * pFolderPrefix parameter specifies the folder path within the bucket (e.g., 'C2D/UC_DISSEM/UC_NMA_DISSEM/') + * @example exec FILE_MANAGER.DELETE_FOLDER_CONTENTS(pBucketArea => 'INBOX', pFolderPrefix => 'C2D/UC_DISSEM/UC_NMA_DISSEM/'); + **/ + PROCEDURE DELETE_FOLDER_CONTENTS( + pBucketArea IN VARCHAR2, + pFolderPrefix IN VARCHAR2 + ); + + + + + /** + * @name PROCESS_SOURCE_FILE + * @desc It process file provided by pSourceFileReceivedName parameter. + * Ubmrella procedure that calls: + * - REGISTER_SOURCE_FILE_RECEIVED; + * - CREATE_EXTERNAL_TABLE; + * - VALIDATE_SOURCE_FILE_RECEIVED; + * - DROP_EXTERNAL_TABLE; + * - MOVE_FILE; + * @example exec FILE_MANAGER.PROCESS_SOURCE_FILE(pSourceFileReceivedName => 'INBOX/C2D/UC_DISSEM/UC_NMA_DISSEM/UC_NMA_DISSEM-277740.csv'); + **/ + PROCEDURE PROCESS_SOURCE_FILE(pSourceFileReceivedName IN VARCHAR2) + ; + + + + /** + * @name PROCESS_SOURCE_FILE + * @desc It process file provided by pSourceFileReceivedName parameter and return processing result value. + * It returns (success/failure) => 0 / -(value). + * Ubmrella function that calls PROCESS_SOURCE_FILE procedure. + * @example + * declare + * vResult PLS_INTEGER; + * begin + * vResult := CT_MRDS.FILE_MANAGER.PROCESS_SOURCE_FILE(PSOURCEFILERECEIVEDNAME => 'INBOX/C2D/UC_DISSEM/UC_NMA_DISSEM/UC_NMA_DISSEM-277740.csv'); + * DBMS_OUTPUT.PUT_LINE('vResult = ' || vResult); + * end; + * @ex_rslt 0 + * -20021 + **/ + FUNCTION PROCESS_SOURCE_FILE(pSourceFileReceivedName IN VARCHAR2) + RETURN PLS_INTEGER; + + + + /** + * @name GET_DATE_FORMAT + * @desc Returns date format for specified template table name and column name. + * Date is taken from configuration A_COLUMN_DATE_FORMAT table. + * @example select FILE_MANAGER.GET_DATE_FORMAT( + * pTemplateTableName => 'STANDING_FACILITIES_HEADER', + * pColumnName => 'SNAPSHOT_DATE') + * from dual; + * @ex_rslt DD/MM/YYYY HH24:MI:SS + **/ + FUNCTION GET_DATE_FORMAT( + pTemplateTableName IN VARCHAR2, + pColumnName IN VARCHAR2 + ) RETURN VARCHAR2; + + + + /** + * @name GENERATE_EXTERNAL_TABLE_PARAMS + * @desc It builds two strings: pColumnList and pFieldList for specified Template Table name, by parameter: pTemplateTableName. + * @example + * declare + * vColumnList CLOB; + * vFieldList CLOB; + * begin + * FILE_MANAGER.GENERATE_EXTERNAL_TABLE_PARAMS ( + * pTemplateTableName => 'CT_ET_TEMPLATES.LM_STANDING_FACILITIES_HEADER' + * ,pColumnList => vColumnList + * ,pFieldList => vFieldList + * ); + * DBMS_OUTPUT.PUT_LINE('vColumnList = '||vColumnList); + * DBMS_OUTPUT.PUT_LINE('vFieldList = '||vFieldList); + * end; + * / + **/ + PROCEDURE GENERATE_EXTERNAL_TABLE_PARAMS ( + pTemplateTableName IN VARCHAR2, + pColumnList OUT CLOB, + pFieldList OUT CLOB + ); + + + + + + /** + * @name ADD_SOURCE + * @desc Insert a new record to A_SOURCE table. + * pSourceKey is a PRIMARY KEY value. + **/ + PROCEDURE ADD_SOURCE ( + pSourceKey IN CT_MRDS.A_SOURCE.A_SOURCE_KEY%TYPE, + pSourceName IN CT_MRDS.A_SOURCE.SOURCE_NAME%TYPE + ); + + + + /** + * @name DELETE_SOURCE_CASCADE + * @desc Safely deletes a SOURCE specified by pSourceKey parameter from A_SOURCE table and all dependent tables: + * - A_SOURCE_FILE_CONFIG + * - A_SOURCE_FILE_RECEIVED + * - A_COLUMN_DATE_FORMAT (only if template table is not shared with other source systems) + * The procedure checks if template tables are shared before deleting date format configurations. + * If a template table is used by multiple source systems, date formats are preserved. + * @example CALL CT_MRDS.FILE_MANAGER.DELETE_SOURCE_CASCADE(pSourceKey => 'TEST_SYS'); + **/ + PROCEDURE DELETE_SOURCE_CASCADE ( + pSourceKey IN CT_MRDS.A_SOURCE.A_SOURCE_KEY%TYPE + ); + + + + /** + * @name GET_CONTAINER_SOURCE_FILE_CONFIG_KEY + * @desc For specified parameter pSourceFileId (A_SOURCE_FILE_CONFIG.SOURCE_FILE_ID) + * it returns A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY for related CONTAINER record. + * @example select FILE_MANAGER.GET_CONTAINER_SOURCE_FILE_CONFIG_KEY( + * pSourceFileId => 'UC_DISSEM') + * from dual; + * @ex_rslt 126 + **/ + FUNCTION GET_CONTAINER_SOURCE_FILE_CONFIG_KEY ( + pSourceFileId IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_ID%TYPE + ) RETURN PLS_INTEGER; + + + + /** + * @name GET_SOURCE_FILE_CONFIG_KEY + * @desc For specified input parameters, + * it returns A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY. + * @example select FILE_MANAGER.GET_SOURCE_FILE_CONFIG_KEY ( + * pSourceFileType => 'INPUT' + * ,pSourceFileId => 'UC_DISSEM' + * ,pTableId => 'UC_NMA_DISSEM') + * from dual; + * @ex_rslt 126 + **/ + FUNCTION GET_SOURCE_FILE_CONFIG_KEY ( + pSourceFileType IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_TYPE%TYPE DEFAULT 'INPUT' + ,pSourceFileId IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_ID%TYPE + ,pTableId IN CT_MRDS.A_SOURCE_FILE_CONFIG.TABLE_ID%TYPE + ) RETURN PLS_INTEGER; + + + + /** + * @name ADD_SOURCE_FILE_CONFIG + * @desc Insert a new record to A_SOURCE_FILE_CONFIG table. + * MARS-1049: Added pEncoding parameter for CSV character set specification. + * @param pEncoding - Character set encoding for CSV files (e.g., 'UTF8', 'WE8MSWIN1252', 'EE8ISO8859P2') + * If NULL, no CHARACTERSET clause is added to external table definitions + * @example CALL CT_MRDS.FILE_MANAGER.ADD_SOURCE_FILE_CONFIG( + * pSourceKey => 'C2D', pSourceFileType => 'INPUT', + * pSourceFileId => 'UC_DISSEM', pTableId => 'METADATA_LOADS', + * pTemplateTableName => 'CT_ET_TEMPLATES.C2D_A_UC_DISSEM_METADATA_LOADS', + * pEncoding => 'UTF8' + * ); + **/ + PROCEDURE ADD_SOURCE_FILE_CONFIG ( + pSourceKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_KEY%TYPE + ,pSourceFileType IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_TYPE%TYPE + ,pSourceFileId IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_ID%TYPE + ,pSourceFileDesc IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_DESC%TYPE + ,pSourceFileNamePattern IN CT_MRDS.A_SOURCE_FILE_CONFIG.SOURCE_FILE_NAME_PATTERN%TYPE + ,pTableId IN CT_MRDS.A_SOURCE_FILE_CONFIG.TABLE_ID%TYPE DEFAULT NULL + ,pTemplateTableName IN CT_MRDS.A_SOURCE_FILE_CONFIG.TEMPLATE_TABLE_NAME%TYPE DEFAULT NULL + ,pContainerFileKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.CONTAINER_FILE_KEY%TYPE DEFAULT NULL + ,pEncoding IN CT_MRDS.A_SOURCE_FILE_CONFIG.ENCODING%TYPE DEFAULT NULL -- MARS-1049: NOWY PARAMETR + ); + + + + /** + * @name ADD_COLUMN_DATE_FORMAT + * @desc Insert a new record to A_COLUMN_DATE_FORMAT table. + **/ + PROCEDURE ADD_COLUMN_DATE_FORMAT ( + pTemplateTableName IN CT_MRDS.A_COLUMN_DATE_FORMAT.TEMPLATE_TABLE_NAME%TYPE + ,pColumnName IN CT_MRDS.A_COLUMN_DATE_FORMAT.COLUMN_NAME%TYPE + ,pDateFormat IN CT_MRDS.A_COLUMN_DATE_FORMAT.DATE_FORMAT%TYPE + ); + + + + /** + * @name GET_BUCKET_URI + * @desc Function used to get string with bucket http url. + * Possible input values for pBucketArea are: 'INBOX', 'ODS', 'DATA', 'ARCHIVE' + * @example select FILE_MANAGER.GET_BUCKET_URI(pBucketArea => 'ODS') from dual; + * @ex_rslt https://objectstorage.eu-frankfurt-1.oraclecloud.com/n/frcnomajoc7v/b/mrds_data_tst/o/ + **/ + FUNCTION GET_BUCKET_URI(pBucketArea VARCHAR2) + RETURN VARCHAR2; + + + + /** + * @name GET_DET_SOURCE_FILE_CONFIG_INFO + * @desc Function returns details about A_SOURCE_FILE_CONFIG record + * for specified pSourceFileConfigKey (A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY). + * If pIncludeContainerInfo is <> 0 it returns additional info about related Container config record (A_SOURCE_FILE_CONFIG) + * If pIncludeColumnFormatInfo is <> 0 it returns additional info about related ColumnFormat config record (A_COLUMN_DATE_FORMAT) + * @example select FILE_MANAGER.GET_DET_SOURCE_FILE_CONFIG_INFO ( + * pSourceFileConfigKey => 128 + * ,pIncludeContainerInfo => 1 + * ,pIncludeColumnFormatInfo => 1 + * ) from dual; + * @ex_rslt + * Details about File Configuration: + * -------------------------------- + * A_SOURCE_FILE_CONFIG_KEY = 128 + * A_SOURCE_KEY = C2D + * ... + * -------------------------------- + * + * Details about related Container Config: + * -------------------------------- + * A_SOURCE_FILE_CONFIG_KEY = 126 + * A_SOURCE_KEY = C2D + * ... + * -------------------------------- + * + * Column Date Format config entries: + * -------------------------------- + * TEMPLATE_TABLE_NAME = CT_ET_TEMPLATES.C2D_UC_MA_DISSEM + * ... + * -------------------------------- + **/ + FUNCTION GET_DET_SOURCE_FILE_CONFIG_INFO ( + pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE + ,pIncludeContainerInfo IN PLS_INTEGER DEFAULT 1 + ,pIncludeColumnFormatInfo IN PLS_INTEGER DEFAULT 1 + ) RETURN VARCHAR2; + + + + /** + * @name GET_DET_SOURCE_FILE_RECEIVED_INFO + * @desc Function returns details about A_SOURCE_FILE_RECEIVED record + * for specified pSourceFileReceivedKey (A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY). + * If pIncludeConfigInfo is <> 0 it returns additional info about related Container config record (A_SOURCE_FILE_CONFIG) + * If pIncludeContainerInfo is <> 0 it returns additional info about related Container config record (A_SOURCE_FILE_CONFIG) + * If pIncludeColumnFormatInfo is <> 0 it returns additional info about related ColumnFormat config record (A_COLUMN_DATE_FORMAT) + * @example select FILE_MANAGER.GET_DET_SOURCE_FILE_RECEIVED_INFO ( + * pSourceFileReceivedKey => 377 + * ,pIncludeConfigInfo => 1 + * ,pIncludeContainerInfo => 1 + * ,pIncludeColumnFormatInfo => 1 + * ) from dual; + * + **/ + FUNCTION GET_DET_SOURCE_FILE_RECEIVED_INFO ( + pSourceFileReceivedKey IN CT_MRDS.A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY%TYPE + ,pIncludeConfigInfo IN PLS_INTEGER DEFAULT 1 + ,pIncludeContainerInfo IN PLS_INTEGER DEFAULT 1 + ,pIncludeColumnFormatInfo IN PLS_INTEGER DEFAULT 1 + ) RETURN VARCHAR2; + + + + /** + * @name GET_DET_USER_LOAD_OPERATIONS + * @desc Function returns details from USER_LOAD_OPERATIONS table + * for specified pOperationId. + * @example select FILE_MANAGER.GET_DET_USER_LOAD_OPERATIONS (pOperationId => 3608) from dual; + * @ex_rslt + * Details about USER_LOAD_OPERATIONS where ID = 3608 + * -------------------------------- + * ID = 3608 + * TYPE = VALIDATE + * SID = 31260 + * SERIAL# = 52915 + * START_TIME = 2025-05-20 10.08.24.436983 EUROPE/BELGRADE + * UPDATE_TIME = 2025-05-20 10.08.24.458643 EUROPE/BELGRADE + * STATUS = FAILED + * OWNER_NAME = CT_MRDS + * TABLE_NAME = STANDING_FACILITIES_HEADER + * PARTITION_NAME = + * SUBPARTITION_NAME = + * FILE_URI_LIST = + * ROWS_LOADED = + * LOGFILE_TABLE = VALIDATE$3608_LOG + * BADFILE_TABLE = VALIDATE$3608_BAD + * STATUS_TABLE = + * TEMPEXT_TABLE = + * CREDENTIAL_NAME = + * EXPIRATION_TIME = 2025-05-22 10.08.24.436983000 EUROPE/BELGRADE + * -------------------------------- + **/ + FUNCTION GET_DET_USER_LOAD_OPERATIONS ( + pOperationId PLS_INTEGER + ) RETURN VARCHAR2; + + /** + * @name ANALYZE_VALIDATION_ERRORS + * @desc Wrapper function that analyzes validation errors for a source file using its received key. + * Automatically derives template schema, table name, CSV URI and validation log table + * from file metadata and calls ENV_MANAGER.ANALYZE_VALIDATION_ERRORS. + * @example SELECT FILE_MANAGER.ANALYZE_VALIDATION_ERRORS(63) FROM DUAL; + * @ex_rslt Detailed validation analysis report with column mismatches and solutions + **/ + FUNCTION ANALYZE_VALIDATION_ERRORS( + pSourceFileReceivedKey IN NUMBER + ) RETURN VARCHAR2; + + --------------------------------------------------------------------------------------------------------------------------- + -- EXTERNAL TABLE BATCH OPERATIONS (MARS-1057) + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name CREATE_EXTERNAL_TABLES_SET + * @desc Creates a complete set of 3 external tables (INBOX, ODS, ARCHIVE) for a single configuration + * from A_SOURCE_FILE_CONFIG table. Automatically generates table names and paths following + * official path patterns. Optionally drops and recreates existing tables. + * @param pSourceFileConfigKey - Primary key from A_SOURCE_FILE_CONFIG table + * @param pRecreate - If TRUE, drops existing tables before creating new ones; if FALSE, fails if tables exist + * @example BEGIN + * FILE_MANAGER.CREATE_EXTERNAL_TABLES_SET( + * pSourceFileConfigKey => 123, + * pRecreate => FALSE + * ); + * END; + * @ex_rslt Creates three external tables in ODS schema: + * - C2D_A_UC_DISSEM_METADATA_LOADS_INBOX + * - C2D_A_UC_DISSEM_METADATA_LOADS_ODS + * - C2D_A_UC_DISSEM_METADATA_LOADS_ARCHIVE + **/ + PROCEDURE CREATE_EXTERNAL_TABLES_SET ( + pSourceFileConfigKey IN NUMBER, + pRecreate IN BOOLEAN DEFAULT FALSE + ); + + /** + * @name CREATE_EXTERNAL_TABLES_BATCH + * @desc Creates external table sets for multiple configurations based on filter criteria. + * Processes only INPUT type files from A_SOURCE_FILE_CONFIG. Creates 3 tables per configuration + * (INBOX, ODS, ARCHIVE). Continues processing even if individual sets fail. + * @param pSourceKey - Filter by A_SOURCE_KEY (NULL = all sources) + * @param pSourceFileId - Filter by SOURCE_FILE_ID (NULL = all file types) + * @param pTableId - Filter by TABLE_ID (NULL = all tables) + * @param pRecreate - If TRUE, drops and recreates existing tables; if FALSE, skips if tables exist + * @example -- Create all external tables for C2D source + * BEGIN + * FILE_MANAGER.CREATE_EXTERNAL_TABLES_BATCH( + * pSourceKey => 'C2D', + * pRecreate => FALSE + * ); + * END; + * + * -- Recreate all external tables for all sources + * BEGIN + * FILE_MANAGER.CREATE_EXTERNAL_TABLES_BATCH( + * pRecreate => TRUE + * ); + * END; + * @ex_rslt Returns summary: Total: 10, Processed: 9, Failed: 1 + **/ + PROCEDURE CREATE_EXTERNAL_TABLES_BATCH ( + pSourceKey IN VARCHAR2 DEFAULT NULL, + pSourceFileId IN VARCHAR2 DEFAULT NULL, + pTableId IN VARCHAR2 DEFAULT NULL, + pRecreate IN BOOLEAN DEFAULT FALSE + ); + + --------------------------------------------------------------------------------------------------------------------------- + -- PACKAGE VERSION MANAGEMENT FUNCTIONS + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name GET_VERSION + * @desc Returns the current version number of the FILE_MANAGER package. + * Uses semantic versioning format (MAJOR.MINOR.PATCH). + * @example SELECT FILE_MANAGER.GET_VERSION() FROM DUAL; + * @ex_rslt 3.2.0 + **/ + FUNCTION GET_VERSION RETURN VARCHAR2; + + /** + * @name GET_BUILD_INFO + * @desc Returns comprehensive build information including version, build date, and author. + * Uses centralized ENV_MANAGER.GET_PACKAGE_VERSION_INFO function. + * @example SELECT FILE_MANAGER.GET_BUILD_INFO() FROM DUAL; + * @ex_rslt Package: FILE_MANAGER + * Version: 3.2.0 + * Build Date: 2025-10-22 16:30:00 + * Author: Grzegorz Michalski + **/ + FUNCTION GET_BUILD_INFO RETURN VARCHAR2; + + /** + * @name GET_VERSION_HISTORY + * @desc Returns complete version history with all releases and changes. + * Uses centralized ENV_MANAGER.FORMAT_VERSION_HISTORY function. + * @example SELECT FILE_MANAGER.GET_VERSION_HISTORY() FROM DUAL; + * @ex_rslt FILE_MANAGER Version History: + * 3.2.0 (2025-10-22): Added package versioning system... + **/ + FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2; + +END; + +/ + +/ diff --git a/MARS_Packages/REL03/MARS-1057/rollback_mars1057.sql b/MARS_Packages/REL03/MARS-1057/rollback_mars1057.sql new file mode 100644 index 0000000..1f9704b --- /dev/null +++ b/MARS_Packages/REL03/MARS-1057/rollback_mars1057.sql @@ -0,0 +1,87 @@ +-- =================================================================== +-- MARS-1057 ROLLBACK SCRIPT: Batch External Table Creation +-- =================================================================== +-- Purpose: Rollback FILE_MANAGER package to v3.3.0 (remove batch external table procedures) +-- Author: Grzegorz Michalski +-- Date: 2025-11-27 + +-- Dynamic spool file generation (using SYS_CONTEXT - no DBA privileges required) +-- Log files are automatically created in log/ subdirectory +-- IMPORTANT: Ensure log/ directory exists before SPOOL (use host mkdir) +host mkdir log 2>nul + +var filename VARCHAR2(100) +BEGIN + :filename := 'log/ROLLBACK_MARS_1057_' || SYS_CONTEXT('USERENV', 'CON_NAME') || '_' || TO_CHAR(SYSDATE,'YYYYMMDD_HH24MISS') || '.log'; +END; +/ +column filename new_value _filename +select :filename filename from dual; +spool &_filename + +SET ECHO OFF +SET TIMING ON +SET SERVEROUTPUT ON SIZE UNLIMITED +SET PAUSE OFF + +PROMPT ========================================================================= +PROMPT MARS-1057: Rollback Batch External Table Creation +PROMPT ========================================================================= +PROMPT WARNING: This will reverse all changes from MARS-1057 installation! +PROMPT +PROMPT Changes to be rolled back: +PROMPT - FILE_MANAGER package specification (3.4.0 -> 3.3.0) +PROMPT - FILE_MANAGER package body (3.4.0 -> 3.3.0) +PROMPT - Remove CREATE_EXTERNAL_TABLES_SET procedure +PROMPT - Remove CREATE_EXTERNAL_TABLES_BATCH procedure +PROMPT ========================================================================= + +-- Confirm rollback with user +ACCEPT continue CHAR PROMPT 'Type YES to continue with rollback, or Ctrl+C to abort: ' +WHENEVER SQLERROR EXIT SQL.SQLCODE +BEGIN + IF '&continue' IS NULL OR TRIM('&continue') IS NULL OR UPPER(TRIM('&continue')) != 'YES' THEN + RAISE_APPLICATION_ERROR(-20001, 'Rollback aborted by user'); + END IF; +END; +/ +WHENEVER SQLERROR CONTINUE + +-- Execute rollback scripts in REVERSE order (92, 91, not 91, 92) +PROMPT +PROMPT ========================================================================= +PROMPT Step 2: Rollback FILE_MANAGER Package Specification to v3.3.0 +PROMPT ========================================================================= +@@91_MARS_1057_rollback_CT_MRDS_FILE_MANAGER_SPEC.sql + +PROMPT +PROMPT ========================================================================= +PROMPT Step 1: Rollback FILE_MANAGER Package Body to v3.3.0 +PROMPT ========================================================================= +@@92_MARS_1057_rollback_CT_MRDS_FILE_MANAGER_BODY.sql + + + +PROMPT +PROMPT ========================================================================= +PROMPT Step 3: Track Rollback Version +PROMPT ========================================================================= +@@track_package_versions.sql + +PROMPT +PROMPT ========================================================================= +PROMPT Step 4: Verify All Tracked Packages +PROMPT ========================================================================= +@@verify_packages_version.sql + +PROMPT +PROMPT ========================================================================= +PROMPT MARS-1057 Rollback - COMPLETED +PROMPT ========================================================================= +PROMPT Check the log file for complete rollback details. +PROMPT Log file: log/ROLLBACK_MARS_1057__.log +PROMPT ========================================================================= + +spool off + +quit; diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/track_package_versions.sql b/MARS_Packages/REL03/MARS-1057/track_package_versions.sql similarity index 88% rename from MARS_Packages/REL01_ADDITIONS/MARS-828/track_package_versions.sql rename to MARS_Packages/REL03/MARS-1057/track_package_versions.sql index b497dfa..c0c7bbd 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/track_package_versions.sql +++ b/MARS_Packages/REL03/MARS-1057/track_package_versions.sql @@ -3,7 +3,7 @@ -- =================================================================== -- Purpose: Track specified Oracle package versions -- Author: Grzegorz Michalski --- Date: 2026-01-27 +-- Date: 2025-11-27 -- Version: 3.1.0 - List-Based Edition -- -- USAGE: @@ -29,7 +29,7 @@ DECLARE -- Format: 'SCHEMA.PACKAGE_NAME' -- =================================================================== vPackageList t_string_array := t_string_array( - 'CT_MRDS.FILE_ARCHIVER' + 'CT_MRDS.FILE_MANAGER' ); -- =================================================================== @@ -78,17 +78,16 @@ BEGIN END IF; END LOOP; - DBMS_OUTPUT.PUT_LINE(''); - DBMS_OUTPUT.PUT_LINE('Summary:'); - DBMS_OUTPUT.PUT_LINE('--------'); - DBMS_OUTPUT.PUT_LINE('Packages tracked: ' || vCount || '/' || vPackageList.COUNT); - + -- Display results IF vPackages.COUNT > 0 THEN + DBMS_OUTPUT.PUT_LINE('Packages tracked: ' || vCount || ' of ' || vPackages.COUNT); DBMS_OUTPUT.PUT_LINE(''); - DBMS_OUTPUT.PUT_LINE('Tracked Packages:'); + FOR i IN 1..vPackages.COUNT LOOP - DBMS_OUTPUT.PUT_LINE(' ' || vPackages(i).owner || '.' || vPackages(i).package_name || ' v' || vPackages(i).version); + DBMS_OUTPUT.PUT_LINE(vPackages(i).owner || '.' || vPackages(i).package_name || ' = ' || vPackages(i).version); END LOOP; + ELSE + DBMS_OUTPUT.PUT_LINE('No packages found in list'); END IF; DBMS_OUTPUT.PUT_LINE('========================================'); diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-828/verify_packages_version.sql b/MARS_Packages/REL03/MARS-1057/verify_packages_version.sql similarity index 98% rename from MARS_Packages/REL01_ADDITIONS/MARS-828/verify_packages_version.sql rename to MARS_Packages/REL03/MARS-1057/verify_packages_version.sql index f8ec009..97b6afd 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-828/verify_packages_version.sql +++ b/MARS_Packages/REL03/MARS-1057/verify_packages_version.sql @@ -3,7 +3,7 @@ -- =================================================================== -- Purpose: Verify all tracked Oracle packages for code changes -- Author: Grzegorz Michalski --- Date: 2025-12-04 +-- Date: 2025-11-27 -- Version: 1.0.0 -- -- USAGE: diff --git a/MARS_Packages/mrds_elt-dev-database/mrds_elt-dev-database/database/CT_MRDS/SCHEMA/tables/A_PARALLEL_EXPORT_CHUNKS.sql b/MARS_Packages/mrds_elt-dev-database/mrds_elt-dev-database/database/CT_MRDS/SCHEMA/tables/A_PARALLEL_EXPORT_CHUNKS.sql new file mode 100644 index 0000000..5af702f --- /dev/null +++ b/MARS_Packages/mrds_elt-dev-database/mrds_elt-dev-database/database/CT_MRDS/SCHEMA/tables/A_PARALLEL_EXPORT_CHUNKS.sql @@ -0,0 +1,51 @@ +-- ============================================================================ +-- Table: A_PARALLEL_EXPORT_CHUNKS +-- ============================================================================ +-- Purpose: Permanent table for storing partition export chunks for DBMS_PARALLEL_EXECUTE +-- CRITICAL: Must be permanent (not global temporary) because DBMS_PARALLEL_EXECUTE +-- runs callbacks in separate sessions that cannot access GTT data from parent session +-- Schema: CT_MRDS +-- Author: Grzegorz Michalski +-- Created: 2025-12-20 +-- Modified: 2025-12-21 - Changed from GTT to permanent table +-- Related: MARS-835-PREHOOK +-- ============================================================================ + +CREATE TABLE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS ( + CHUNK_ID NUMBER PRIMARY KEY, + TASK_NAME VARCHAR2(100) NOT NULL, + YEAR_VALUE VARCHAR2(4) NOT NULL, + MONTH_VALUE VARCHAR2(2) NOT NULL, + SCHEMA_NAME VARCHAR2(128) NOT NULL, + TABLE_NAME VARCHAR2(128) NOT NULL, + KEY_COLUMN_NAME VARCHAR2(128) NOT NULL, + BUCKET_URI VARCHAR2(4000) NOT NULL, + FOLDER_NAME VARCHAR2(1000) NOT NULL, + PROCESSED_COLUMNS VARCHAR2(32767), + MIN_DATE DATE NOT NULL, + MAX_DATE DATE NOT NULL, + CREDENTIAL_NAME VARCHAR2(200) NOT NULL, + FORMAT_TYPE VARCHAR2(20) NOT NULL, + FILE_BASE_NAME VARCHAR2(1000), + CREATED_DATE TIMESTAMP DEFAULT SYSTIMESTAMP NOT NULL +); + +CREATE INDEX IX_PARALLEL_CHUNKS_TASK ON CT_MRDS.A_PARALLEL_EXPORT_CHUNKS(TASK_NAME); + +COMMENT ON TABLE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS IS 'Permanent table for parallel export chunk processing (DBMS_PARALLEL_EXECUTE) - permanent because GTT data not visible in parallel callback sessions'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.CHUNK_ID IS 'Unique chunk identifier (partition number)'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.TASK_NAME IS 'DBMS_PARALLEL_EXECUTE task name for cleanup'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.YEAR_VALUE IS 'Partition year (YYYY)'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.MONTH_VALUE IS 'Partition month (MM)'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.SCHEMA_NAME IS 'Schema owning the source table'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.TABLE_NAME IS 'Source table name for export'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.KEY_COLUMN_NAME IS 'Key column for load history join'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.BUCKET_URI IS 'OCI bucket URI for export destination'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.FOLDER_NAME IS 'Folder name within bucket'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.PROCESSED_COLUMNS IS 'Comma-separated list of columns to export'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.MIN_DATE IS 'Minimum date filter for partition'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.MAX_DATE IS 'Maximum date filter for partition'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.CREDENTIAL_NAME IS 'OCI credential name for authentication'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.FORMAT_TYPE IS 'Export format: PARQUET or CSV'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.FILE_BASE_NAME IS 'Base filename for CSV exports (NULL for Parquet)'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.CREATED_DATE IS 'Timestamp when chunk was created'; diff --git a/README.md b/README.md index 3b103cc..1c5f696 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,12 @@ Kolejność wdrażania paczek: -REL01 MARS-1049 MARS-1049-ADHOC MARS-1056 + + REL02 MARS-1046 - --- Obecnie na środowisku DEV -REL01_ADDITIONS - MARS-835-PREHOOK - MARS-835 - MARS-826 - --- AKtualnie pracuje nad: - MARS-828 - --- Poniżej czeka na wdrożenie -REL03 MARS-1057 '3.4.0 (2025-11-27): MARS-1057 - Added CREATE_EXTERNAL_TABLES_SET and CREATE_EXTERNAL_TABLES_BATCH procedures for batch external table creation' || CHR(13)||CHR(10) || diff --git a/confluence/Package_Deployment_Guide.md b/confluence/Package_Deployment_Guide.md index 7ef2362..b1d4b05 100644 --- a/confluence/Package_Deployment_Guide.md +++ b/confluence/Package_Deployment_Guide.md @@ -84,7 +84,7 @@ c:\_git\_local_rep\ │ ├── MARS_Packages\ │ │ └── REL01\ │ │ └── MARS-1057\ # New package folder -│ │ ├── rollback_version\ +│ │ ├── current_version\ │ │ ├── new_version\ │ │ └── *.sql files │ ├── database\ @@ -169,6 +169,157 @@ The deployment system uses these key components: --- +## Universal Tracking and Verification Scripts + +### MANDATORY: Use Standard Template Files + +**CRITICAL:** Every MARS package MUST include two universal scripts for tracking and verification. These scripts are reusable across all MARS issues and provide consistent version management. + +#### Required Files Structure + +``` +MARS_Packages/ +└── REL0X/ + └── MARS-XXXX/ + ├── 01_MARS_XXXX_install_*.sql + ├── 02_MARS_XXXX_install_*.sql + ├── track_package_versions.sql # ✅ MANDATORY - Universal tracking + ├── verify_packages_version.sql # ✅ MANDATORY - Universal verification + ├── install_marsXXXX.sql # Master script + └── rollback_marsXXXX.sql +``` + +### track_package_versions.sql + +**Purpose:** Universal script for tracking multiple package versions with editable package list. + +**Key Features:** +- ✅ **Reusable** - Same file for all MARS issues +- ✅ **Configurable** - Edit package list array to specify which packages to track +- ✅ **Automatic** - Calls ENV_MANAGER.TRACK_PACKAGE_VERSION for each package +- ✅ **Summary Output** - Shows tracked packages count and versions + +**Template:** +```sql +-- =================================================================== +-- PACKAGE LIST - Edit this array to specify packages to track +-- =================================================================== +vPackageList t_string_array := t_string_array( + 'CT_MRDS.DATA_EXPORTER', + 'CT_MRDS.FILE_MANAGER', + 'ODS.FILE_MANAGER_ODS' +); +-- =================================================================== +``` + +**Usage in Install Script:** +```sql +PROMPT Step 4: Track Package Versions +PROMPT ========================================================================= +@@track_package_versions.sql +``` + +**Benefits over custom tracking scripts:** +- No hardcoded package names in multiple places +- Single point of configuration +- Consistent error handling +- Standardized output format +- Copy-paste ready for new MARS issues + +### verify_packages_version.sql + +**Purpose:** Universal script for verifying ALL tracked packages for code changes. + +**Key Features:** +- ✅ **Automatic Discovery** - Queries A_PACKAGE_VERSION_TRACKING to find all tracked packages +- ✅ **Hash Validation** - Calls CHECK_PACKAGE_CHANGES for each package +- ✅ **Status Report** - Shows OK or WARNING for each package +- ✅ **Zero Configuration** - No editing needed, works automatically + +**Usage in Install Script:** +```sql +PROMPT Final Step: Verify All Package Versions +PROMPT ========================================================================= +@@verify_packages_version.sql +``` + +**Output Example:** +``` +PACKAGE_OWNER PACKAGE_NAME VERSION STATUS +--------------- ------------------- ---------- ------------------------------- +CT_MRDS DATA_EXPORTER 2.2.0 OK: Package has not changed +CT_MRDS FILE_MANAGER 3.3.0 OK: Package has not changed +ODS FILE_MANAGER_ODS 2.1.0 OK: Package has not changed +``` + +### Master Install Script Structure (MANDATORY) + +Every `install_marsXXXX.sql` MUST follow this exact structure: + +```sql +-- =================================================================== +-- MARS-XXXX INSTALL SCRIPT +-- =================================================================== + +-- Dynamic spool file generation +host mkdir log 2>nul +-- ... spool setup ... + +PROMPT Step 1: Deploy Package Specification +@@01_MARS_XXXX_install_PACKAGE_SPEC.sql + +PROMPT Step 2: Deploy Package Body +@@02_MARS_XXXX_install_PACKAGE_BODY.sql + +PROMPT Step 3: Track Package Versions +@@track_package_versions.sql -- ✅ MANDATORY universal script + +PROMPT Step 4: Verify Package Versions +@@verify_packages_version.sql -- ✅ MANDATORY universal script + +spool off +quit; +``` + +### DO NOT Create Custom Track/Verify Scripts + +**❌ WRONG (Custom tracking per MARS issue):** +```sql +@@04_MARS_XXXX_track_version.sql -- DON'T DO THIS +@@05_MARS_XXXX_verify_installation.sql -- DON'T DO THIS +``` + +**✅ CORRECT (Universal scripts):** +```sql +@@track_package_versions.sql -- Always use this +@@verify_packages_version.sql -- Always use this +``` + +**Why universal scripts are mandatory:** +- **Consistency** - Same behavior across all MARS packages +- **Maintainability** - Bug fixes propagate to all packages +- **Simplicity** - No need to create custom scripts for each issue +- **Compliance** - Ensures all packages follow same standard +- **Code Review** - Easier to review when structure is identical + +### Copying Template Files + +**For each new MARS package, copy from reference implementation:** + +```powershell +# Copy universal scripts from MARS-826-PREHOOK (reference implementation) +Copy-Item "MARS_Packages\REL01_POST_DEACTIVATION\MARS-826-PREHOOK\track_package_versions.sql" ` + "MARS_Packages\RELXX\MARS-YYYY\track_package_versions.sql" + +Copy-Item "MARS_Packages\REL01_POST_DEACTIVATION\MARS-826-PREHOOK\verify_packages_version.sql" ` + "MARS_Packages\RELXX\MARS-YYYY\verify_packages_version.sql" + +# Edit package list in track_package_versions.sql +# No changes needed to verify_packages_version.sql (works automatically) +``` + +--- + ## Standard Deployment Workflow ### Step 1: Check Current Package Status @@ -592,8 +743,6 @@ WHERE OWNER = 'CT_MRDS' -- Replace with target schema MARS packages (e.g., MARS-1049, MARS-1011) are deployment packages that bundle database changes for specific features or fixes. They follow a standardized structure with logging, version tracking, and rollback capabilities. -**PREREQUISITE:** Before creating any MARS package, ensure you have completed **[Step 0: Prepare Dedicated Working Directory and Git Branch](#step-0-prepare-dedicated-working-directory-and-git-branch)**. Each MARS issue MUST have its own isolated working directory and feature branch. - **CRITICAL:** All MARS package installations MUST be executed as **ADMIN user** to ensure proper schema creation, privilege management, and cross-schema operations. **Installation User Requirements:** @@ -632,7 +781,7 @@ mock_data/ - track_package_versions.sql and verify_packages_version.sql - README.md - .gitignore (with standardized exclusions) -- rollback_version/ and new_version/ folders (if package modifications) +- current_version/ and new_version/ folders (if package modifications) - Core deployment files only Standard MARS package directory structure: @@ -651,7 +800,7 @@ MARS_Packages/REL01/MARS-XXXX/ ├── verify_packages_version.sql # Package verification script ├── ... ├── README.md # Package documentation -├── rollback_version/ # Backup of objects BEFORE changes (for rollback) +├── current_version/ # Backup of objects BEFORE changes (for rollback) │ ├── PACKAGE_NAME.pkg # Previous package specification │ └── PACKAGE_NAME.pkb # Previous package body ├── new_version/ # Updated objects AFTER changes (for installation) @@ -677,12 +826,12 @@ MARS_Packages/REL01/MARS-XXXX/ - **Master scripts**: `install_marsXXXX.sql` and `rollback_marsXXXX.sql` **Version Management Folders (for Database Object Modifications):** -- **`rollback_version/`** - Contains backup of database objects BEFORE changes (for rollback) +- **`current_version/`** - Contains backup of database objects BEFORE changes (for rollback) - **`new_version/`** - Contains updated database objects AFTER changes (for installation) **When to use version folders:** - Required when MARS package modifies ANY existing database objects (packages, tables, views, indexes, triggers, etc.) -- Copy original object definitions to `rollback_version/` before making changes +- Copy original object definitions to `current_version/` before making changes - Place modified object definitions in `new_version/` after changes - Examples: FILE_MANAGER.pkg/pkb, A_SOURCE_FILE_CONFIG.sql (table), V_STATUS.sql (view), IDX_CONFIG.sql (index) @@ -767,7 +916,19 @@ PROMPT Step 2: Second Installation Step PROMPT ========================================================================= @@02_MARS_XXXX_install_step2.sql --- ... more steps ... +-- ... more deployment steps ... + +PROMPT +PROMPT ========================================================================= +PROMPT Step N: Track Package Versions +PROMPT ========================================================================= +@@track_package_versions.sql + +PROMPT +PROMPT ========================================================================= +PROMPT Final Step: Verify Package Versions +PROMPT ========================================================================= +@@verify_packages_version.sql PROMPT PROMPT ========================================================================= @@ -791,7 +952,9 @@ quit; 7. **ACCEPT Validation**: User confirmation required before execution (safety feature) 8. **ALTER SESSION SET CURRENT_SCHEMA** (optional): Sets working schema for package operations 9. **@@ Includes**: All sub-scripts executed via `@@` command -10. **quit;**: Exits SQLcl/SQL*Plus after completion (important for automated deployments) +10. **track_package_versions.sql**: MANDATORY universal tracking script (must be included before verify) +11. **verify_packages_version.sql**: MANDATORY universal verification script (must be last step before completion) +12. **quit;**: Exits SQLcl/SQL*Plus after completion (important for automated deployments) **SET ECHO OFF Benefits:** - **Clean Output**: PROMPT messages appear only once in console and log files @@ -841,6 +1004,114 @@ quit; --- +### Complete MARS Package Example + +**MARS-835-PREHOOK** - Real-world example following all standards: + +**File Structure:** +``` +MARS_Packages/REL01_POST_DEACTIVATION/MARS-835-PREHOOK/ +├── .gitignore +├── install_mars835_prehook.sql # Master install (uses universal scripts) +├── rollback_mars835_prehook.sql # Master rollback +├── 01_MARS_835_install_DATA_EXPORTER_SPEC.sql +├── 02_MARS_835_install_DATA_EXPORTER_BODY.sql +├── 91_MARS_835_rollback_DATA_EXPORTER_BODY.sql +├── 92_MARS_835_rollback_DATA_EXPORTER_SPEC.sql +├── track_package_versions.sql # ✅ Universal tracking +├── verify_packages_version.sql # ✅ Universal verification +├── README.md +├── current_version/ +│ ├── DATA_EXPORTER.pkg # v2.1.1 (before changes) +│ └── DATA_EXPORTER.pkb +├── new_version/ +│ ├── DATA_EXPORTER.pkg # v2.2.0 (after changes) +│ └── DATA_EXPORTER.pkb +└── log/ # Auto-created by install script +``` + +**track_package_versions.sql** (edited for this MARS issue): +```sql +-- =================================================================== +-- PACKAGE LIST - Edit this array to specify packages to track +-- =================================================================== +vPackageList t_string_array := t_string_array( + 'CT_MRDS.DATA_EXPORTER' -- Only package modified in MARS-835 +); +-- =================================================================== +``` + +**install_mars835_prehook.sql** (master script): +```sql +-- =================================================================== +-- MARS-835-PREHOOK INSTALL SCRIPT +-- =================================================================== + +host mkdir log 2>nul + +var filename VARCHAR2(100) +BEGIN + :filename := 'log/INSTALL_MARS_835_PREHOOK_' || SYS_CONTEXT('USERENV', 'CON_NAME') || '_' || TO_CHAR(SYSDATE,'YYYYMMDD_HH24MISS') || '.log'; +END; +/ +column filename new_value _filename +select :filename filename from dual; +spool &_filename + +SET ECHO OFF +SET TIMING ON +SET SERVEROUTPUT ON SIZE UNLIMITED + +PROMPT ========================================================================= +PROMPT MARS-835-PREHOOK: DRY Refactoring for DATA_EXPORTER +PROMPT ========================================================================= + +ACCEPT continue CHAR PROMPT 'Type YES to continue: ' +WHENEVER SQLERROR EXIT SQL.SQLCODE +BEGIN + IF UPPER(TRIM('&continue')) != 'YES' THEN + RAISE_APPLICATION_ERROR(-20001, 'Installation aborted'); + END IF; +END; +/ +WHENEVER SQLERROR CONTINUE + +PROMPT +PROMPT Step 1: Deploy DATA_EXPORTER Package Specification (v2.2.0) +@@01_MARS_835_install_DATA_EXPORTER_SPEC.sql + +PROMPT +PROMPT Step 2: Deploy DATA_EXPORTER Package Body (v2.2.0) +@@02_MARS_835_install_DATA_EXPORTER_BODY.sql + +PROMPT +PROMPT Step 3: Track Package Versions +@@track_package_versions.sql + +PROMPT +PROMPT Step 4: Verify Package Versions +@@verify_packages_version.sql + +PROMPT +PROMPT ========================================================================= +PROMPT MARS-835-PREHOOK Installation - COMPLETED +PROMPT ========================================================================= + +spool off +quit; +``` + +**Why this is the correct pattern:** +- ✅ Uses universal `track_package_versions.sql` (no custom 04_MARS_835_track_version.sql) +- ✅ Uses universal `verify_packages_version.sql` (no custom 03_MARS_835_verify_installation.sql) +- ✅ Tracking happens AFTER deployment (Step 3) +- ✅ Verification happens LAST (Step 4) to confirm everything is correct +- ✅ Single configuration point (edit package list in track_package_versions.sql) +- ✅ Consistent with ALL other MARS packages +- ✅ Easy to copy/paste for new MARS issues + +--- + ### SPOOL Logging Benefits **Why SPOOL is mandatory:** @@ -1292,7 +1563,7 @@ Before creating a MARS package, ensure: - [ ] README.md documents installation process - [ ] Rollback scripts reverse all changes - [ ] Rollback includes verification step (@@test/verify_packages_version.sql) -- [ ] **rollback_version/ folder**: Contains backup of objects before changes (if modifying packages) +- [ ] **current_version/ folder**: Contains backup of objects before changes (if modifying packages) - [ ] **new_version/ folder**: Contains updated objects after changes (if modifying packages) - [ ] **test/ folder**: All test files and verification scripts organized in subfolder - [ ] **log/ folder**: SPOOL log files automatically created by master scripts @@ -1320,7 +1591,7 @@ MARS_Packages/REL01/MARS-1046/ ├── track_package_versions.sql # Version tracking ├── verify_packages_version.sql # Package verification ├── README.md # Comprehensive documentation -├── rollback_version/ # v3.3.0 backup for rollback +├── current_version/ # v3.3.0 backup for rollback │ ├── FILE_MANAGER.pkg │ └── FILE_MANAGER.pkb ├── new_version/ # v3.3.1 updated packages @@ -1342,7 +1613,7 @@ MARS_Packages/REL01/MARS-1046/ - SPOOL logging: `INSTALL_MARS_1046__.log` - ACCEPT validation: Requires explicit "YES" to proceed (both install & rollback) - quit; command: Clean SQLcl exit after completion -- Version folders: rollback_version/ (v3.3.0) and new_version/ (v3.3.1) +- Version folders: current_version/ (v3.3.0) and new_version/ (v3.3.1) - Test subfolder: All test files organized separately - 4-step workflow: Install packages → Track version → Verify packages → quit - Full rollback: Restore to previous version with verification @@ -1409,7 +1680,7 @@ Move-Item -Path "*.log" -Destination "log" -Force **Step 4: Verify clean structure** ```powershell # Check root directory - should only have deployment scripts and README -Get-ChildItem | Where-Object { $_.Name -notmatch "^(01|02|91|92|install|rollback|README|rollback_version|new_version|test|log)" } +Get-ChildItem | Where-Object { $_.Name -notmatch "^(01|02|91|92|install|rollback|README|current_version|new_version|test|log)" } # Expected: No output (all non-deployment files moved) ``` @@ -1425,7 +1696,7 @@ MARS_Packages/REL01/MARS-XXXX/ ├── track_package_versions.sql ├── verify_packages_version.sql ├── README.md # Documentation -├── rollback_version/ # Backup packages +├── current_version/ # Backup packages ├── new_version/ # Updated packages └── test/ # ALL test artifacts ├── test_marsXXXX.sql diff --git a/confluence/additions/Oracle_External_Tables_Column_Order_Issue.md b/confluence/additions/Oracle_External_Tables_Column_Order_Issue.md new file mode 100644 index 0000000..b5af70c --- /dev/null +++ b/confluence/additions/Oracle_External_Tables_Column_Order_Issue.md @@ -0,0 +1,338 @@ +# Oracle External Tables - Column Order Mapping Issue + +## Document Information +- **Date**: 2026-01-08 +- **Issue**: CSV External Tables map columns by position, not by name +- **Impact**: HIGH - NULL values in A_WORKFLOW_HISTORY_KEY column for CSV exports +- **Status**: IDENTIFIED - Solution required + +--- + +## Problem Summary + +Oracle External Tables using **CSV format** map columns **by position** (ignoring header names), while **Parquet format** uses **schema-based mapping** (by column name). This causes critical data loss when exporting to CSV files that are read by external tables with different column ordering than the source table. + +### Affected Operations +- ✅ **Parquet exports to ARCHIVE** - Working correctly (uses column names) +- ❌ **CSV exports to ODS/DATA** - Returns NULL for A_WORKFLOW_HISTORY_KEY (positional mismatch) + +--- + +## Root Cause Analysis + +### Source Table Structure (OU_CSDB.LEGACY_DEBT_DAILY) +``` +Column Position | Column Name | Data Type +----------------|----------------------|---------- +1 | A_KEY | NUMBER +2 | NEWUPDATED | DATE +3 | IDLOADDATE_DIM | DATE +... | ... | ... +72 | A_ETL_LOAD_SET_FK | NUMBER ← Key column +... | ... | ... +129 | PLACEHOLDER50 | VARCHAR2 +``` + +### External Table Structure (ODS.CSDB_DEBT_DAILY_ODS) +``` +Column Position | Column Name | Data Type +----------------|----------------------------|---------- +1 | A_KEY | NUMBER +2 | A_WORKFLOW_HISTORY_KEY | NUMBER ← Expected here! +3 | NEWUPDATED | DATE +4 | IDLOADDATE_DIM | DATE +... | ... | ... +129 | PLACEHOLDER50 | VARCHAR2 +``` + +### Export Query Generated +```sql +SELECT + T.A_KEY, -- Position 1 + T.NEWUPDATED, -- Position 2 + T.IDLOADDATE_DIM, -- Position 3 + ..., + T.A_ETL_LOAD_SET_FK AS A_WORKFLOW_HISTORY_KEY, -- Position 72 + ... +FROM OU_CSDB.LEGACY_DEBT_DAILY T +``` + +### CSV File Structure +```csv +A_KEY,NEWUPDATED,IDLOADDATE_DIM,...,A_WORKFLOW_HISTORY_KEY,... +1,2026-01-08,2025-12-01,...,999,... +``` + +### What External Table Reads +``` +Position 1 → A_KEY (NUMBER) ✅ Reads: 1 (correct) +Position 2 → A_WORKFLOW_HISTORY_KEY ❌ Reads: 2026-01-08 (NEWUPDATED as DATE) + Converts DATE → NUMBER → NULL +Position 3 → NEWUPDATED (DATE) ❌ Reads: 2025-12-01 (IDLOADDATE_DIM) +... +``` + +--- + +## Experimental Verification + +### Test 1: CSV with Mismatched Column Order + +**CSV File Content:** +```csv +A_KEY,NEWUPDATED,A_WORKFLOW_HISTORY_KEY +1,2026-01-08,999 +``` + +**External Table Definition (Wrong Order):** +```sql +CREATE EXTERNAL TABLE TEST_COLUMN_ORDER_WRONG ( + A_KEY NUMBER, + A_WORKFLOW_HISTORY_KEY NUMBER, -- Position 2 + NEWUPDATED DATE -- Position 3 +) +``` + +**Result:** +``` +❌ ORA-30653: reject limit reached +``` +**Reason**: Oracle tried to convert NEWUPDATED (DATE) to A_WORKFLOW_HISTORY_KEY (NUMBER) → Conversion failed + +--- + +### Test 2: CSV with Matching Column Order + +**CSV File Content:** +```csv +A_KEY,NEWUPDATED,A_WORKFLOW_HISTORY_KEY +1,2026-01-08,999 +``` + +**External Table Definition (Correct Order):** +```sql +CREATE EXTERNAL TABLE TEST_COLUMN_ORDER_MATCHING ( + A_KEY NUMBER, + NEWUPDATED DATE, -- Position 2 + A_WORKFLOW_HISTORY_KEY NUMBER -- Position 3 +) +``` + +**Result:** +``` +✅ SUCCESS +A_KEY=1, NEWUPDATED=2026-01-08, A_WORKFLOW_HISTORY_KEY=999 +``` + +--- + +### Test 3: Parquet with Mismatched Column Order + +**Parquet File Columns:** +``` +A_KEY (NUMBER), NEWUPDATED (DATE), A_WORKFLOW_HISTORY_KEY (NUMBER) +``` + +**External Table Definition (Different Order):** +```sql +CREATE EXTERNAL TABLE TEST_PARQUET_ORDER_WRONG ( + A_KEY NUMBER, + A_WORKFLOW_HISTORY_KEY NUMBER, -- Different position than Parquet! + NEWUPDATED DATE +) ... FORMAT parquet +``` + +**Result:** +``` +✅ SUCCESS +A_KEY=1, A_WORKFLOW_HISTORY_KEY=999, NEWUPDATED=2026-01-08 +``` +**Reason**: Parquet uses **schema-based mapping** - matches by column name, not position + +--- + +## Format Comparison + +| Aspect | CSV | Parquet | +|--------|-----|---------| +| **Column Mapping** | By position (1, 2, 3, ...) | By column name (schema) | +| **Header Usage** | Ignored (cosmetic only) | Used for mapping | +| **Order Sensitivity** | ❌ HIGH - Must match exactly | ✅ LOW - Order irrelevant | +| **Type Mismatch** | Reject/NULL conversion | Proper validation | +| **Current Status** | ❌ BROKEN for ODS exports | ✅ WORKING for ARCHIVE | + +--- + +## Current Implementation Issue + +### processColumnList Function (DATA_EXPORTER.pkb) +```sql +FUNCTION processColumnList(...) RETURN VARCHAR2 IS +BEGIN + IF pColumnList IS NULL THEN + -- Build list of all columns from SOURCE table + SELECT LISTAGG(column_name, ', ') WITHIN GROUP (ORDER BY column_id) + INTO vAllCols + FROM all_tab_columns + WHERE table_name = pTableName -- Source: LEGACY_DEBT_DAILY + AND owner = pSchemaName; + + -- Columns ordered by SOURCE table structure (1, 2, ..., 72, ...) + vResult := 'T.' || REPLACE(vAllCols, ', ', ', T.'); + + -- Add alias at original position (72) + vResult := REPLACE(vResult, 'T.' || pKeyColumnName, + 'T.' || pKeyColumnName || ' AS A_WORKFLOW_HISTORY_KEY'); + + RETURN vResult; + END IF; + -- ... +END; +``` + +**Problem**: Generates columns in **LEGACY_DEBT_DAILY** order, not **CSDB_DEBT_DAILY_ODS** order! + +--- + +## Real-World Impact Example + +### Export Command +```sql +CT_MRDS.DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE( + pSchemaName => 'OU_CSDB', + pTableName => 'LEGACY_DEBT_DAILY', + pKeyColumnName => 'A_ETL_LOAD_SET_FK', + pBucketArea => 'DATA', + pFolderName => 'ODS/CSDB/CSDB_DEBT_DAILY', + pMinDate => DATE '2025-01-01', + pMaxDate => SYSDATE, + pParallelDegree => 8 +); +``` + +### Result +- ✅ **Export completed successfully** - 6 CSV files created +- ✅ **Parquet export to ARCHIVE** - A_WORKFLOW_HISTORY_KEY populated correctly +- ❌ **CSV read via ODS.CSDB_DEBT_DAILY_ODS** - Returns NULL for A_WORKFLOW_HISTORY_KEY + +### Verification Query +```sql +-- Returns 0 non-NULL values +SELECT COUNT(*) +FROM ODS.CSDB_DEBT_DAILY_ODS +WHERE A_WORKFLOW_HISTORY_KEY IS NOT NULL; + +-- Returns: 0 +``` + +--- + +## Solution Options + +### Option 1: Rebuild External Tables (RECOMMENDED for existing system) +**Action**: Update all ODS external table definitions to match source table column order + +**Pros:** +- No code changes in DATA_EXPORTER +- Maintains backward compatibility +- One-time migration effort + +**Cons:** +- Requires access to all external table definitions +- Must coordinate with downstream consumers + +--- + +### Option 2: Modify Export Logic (RECOMMENDED for new exports) +**Action**: Update `processColumnList` to query external table structure and match column order + +**Implementation:** +```sql +FUNCTION processColumnList(..., pExternalTableOwner VARCHAR2, pExternalTableName VARCHAR2) IS +BEGIN + IF pExternalTableName IS NOT NULL THEN + -- Get column order from EXTERNAL table, not source table + SELECT LISTAGG(column_name, ', ') WITHIN GROUP (ORDER BY column_id) + INTO vAllCols + FROM all_tab_columns + WHERE table_name = pExternalTableName + AND owner = pExternalTableOwner; + + -- Build SELECT mapping source columns to external table order + -- ... + ELSE + -- Existing logic (source table order) + -- ... + END IF; +END; +``` + +**Pros:** +- Future-proof for new tables +- Explicit mapping control +- Self-documenting exports + +**Cons:** +- Requires API change (new parameters) +- More complex logic +- Performance overhead (additional metadata query) + +--- + +### Option 3: Use Parquet Format for ODS +**Action**: Switch ODS exports from CSV to Parquet + +**Pros:** +- Eliminates column order issue +- Better compression +- Stronger typing + +**Cons:** +- May break downstream Airflow/DBT pipelines expecting CSV +- Larger migration effort + +--- + +## Recommendations + +### Immediate Action (Hot Fix) +1. ✅ Document issue (this document) +2. Identify all affected CSV exports to ODS +3. Choose between Option 1 (rebuild tables) or Option 2 (modify export) + +### Long-term Strategy +1. Migrate ODS exports to **Parquet format** where possible +2. Maintain CSV only for systems requiring it +3. Add validation tests comparing source vs external table column order +4. Update documentation to warn about CSV positional mapping + +--- + +## Testing Checklist + +- [x] Verify CSV positional mapping behavior +- [x] Verify Parquet schema-based mapping behavior +- [x] Confirm NULL values in ODS.CSDB_DEBT_DAILY_ODS +- [x] Confirm correct values in CSDB_DEBT_DAILY_ARCHIVE (Parquet) +- [ ] Test proposed solution +- [ ] Validate all affected external tables +- [ ] Update deployment procedures + +--- + +## Related Issues +- MARS-835-PREHOOK: Parallel processing implementation (exposed this issue) +- A_WORKFLOW_HISTORY_KEY aliasing fix (partial - doesn't solve column order) + +--- + +## References +- Oracle Documentation: External Tables - Data Access Parameters +- DBMS_CLOUD.EXPORT_DATA format options +- Oracle External Tables Column Mapping Behavior (CSV vs Parquet) + +--- + +**Document Status**: DRAFT - Awaiting solution decision +**Last Updated**: 2026-01-08 +**Author**: GitHub Copilot (Analysis Session) diff --git a/confluence/additions/package_version_history.xlsx b/confluence/additions/package_version_history.xlsx new file mode 100644 index 0000000..88aa05b Binary files /dev/null and b/confluence/additions/package_version_history.xlsx differ