From c5d39fc01d61f54e23033aae4581cec6753621b5 Mon Sep 17 00:00:00 2001 From: Grzegorz Michalski Date: Fri, 13 Feb 2026 13:56:36 +0100 Subject: [PATCH] Add ENV_MANAGER package with comprehensive error handling and logging features - Introduced ENV_MANAGER package for managing environment configurations and error handling. - Implemented detailed error codes and messages for various scenarios, enhancing debugging capabilities. - Added logging procedures to track process events and errors, with configurable logging levels. - Included version management functions to track package versioning and changes. - Established a structure for documenting functions and procedures for better maintainability. --- .../rollback_version/v2.9.0/DATA_EXPORTER.pkb | 1669 +++++++++++++++++ .../rollback_version/v2.9.0/DATA_EXPORTER.pkg | 239 +++ .../rollback_version/v2.9.0/ENV_MANAGER.pkb | 1171 ++++++++++++ .../rollback_version/v2.9.0/ENV_MANAGER.pkg | 625 ++++++ 4 files changed, 3704 insertions(+) create mode 100644 MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/DATA_EXPORTER.pkb create mode 100644 MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/DATA_EXPORTER.pkg create mode 100644 MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/ENV_MANAGER.pkb create mode 100644 MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/ENV_MANAGER.pkg diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/DATA_EXPORTER.pkb b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/DATA_EXPORTER.pkb new file mode 100644 index 0000000..7a3c90a --- /dev/null +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/DATA_EXPORTER.pkb @@ -0,0 +1,1669 @@ +create or replace PACKAGE BODY CT_MRDS.DATA_EXPORTER +AS + + ---------------------------------------------------------------------------------------------------- + -- PRIVATE HELPER FUNCTIONS (USED BY MULTIPLE PROCEDURES) + ---------------------------------------------------------------------------------------------------- + + /** + * Sanitizes filename by replacing disallowed characters with underscores + **/ + FUNCTION sanitizeFilename(pFilename IN VARCHAR2) RETURN VARCHAR2 IS + vFilename VARCHAR2(1000); + BEGIN + vFilename := REGEXP_REPLACE(pFilename, '[^a-zA-Z0-9._-]', '_'); + RETURN vFilename; + END sanitizeFilename; + + ---------------------------------------------------------------------------------------------------- + + /** + * Deletes export file from OCI bucket if it exists (used for cleanup before retry) + * Silently ignores if file doesn't exist (ORA-20404) + **/ + PROCEDURE DELETE_FAILED_EXPORT_FILE( + pFileUri IN VARCHAR2, + pCredentialName IN VARCHAR2, + pParameters IN VARCHAR2 + ) IS + BEGIN + BEGIN + ENV_MANAGER.LOG_PROCESS_EVENT('Attempting to delete potentially corrupted file: ' || pFileUri, 'DEBUG', pParameters); + + DBMS_CLOUD.DELETE_OBJECT( + credential_name => pCredentialName, + object_uri => pFileUri + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('Deleted existing file (cleanup before retry): ' || pFileUri, 'INFO', pParameters); + EXCEPTION + WHEN OTHERS THEN + -- Object not found is OK (file doesn't exist) + IF SQLCODE = -20404 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('File does not exist (OK): ' || pFileUri, 'DEBUG', pParameters); + ELSE + -- Log but don't fail - export will attempt anyway + ENV_MANAGER.LOG_PROCESS_EVENT('Warning: Could not delete file (will retry export anyway): ' || SQLERRM, 'WARNING', pParameters); + END IF; + END; + END DELETE_FAILED_EXPORT_FILE; + + ---------------------------------------------------------------------------------------------------- + + /** + * Builds query with TO_CHAR for date/timestamp columns using per-column formats + * Retrieves format for each date column from FILE_MANAGER.GET_DATE_FORMAT + **/ + FUNCTION buildQueryWithDateFormats( + pColumnList IN VARCHAR2, + pTableName IN VARCHAR2, + pSchemaName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pTemplateTableName IN VARCHAR2 + ) RETURN VARCHAR2 IS + vResult VARCHAR2(32767); + vColumns VARCHAR2(32767); + vPos PLS_INTEGER; + vNextPos PLS_INTEGER; + vCurrentCol VARCHAR2(128); + vAllCols VARCHAR2(32767); + vDataType VARCHAR2(30); + vDateFormat VARCHAR2(200); + vTemplateSchema VARCHAR2(128); + vTemplateTable VARCHAR2(128); + vColExists NUMBER; + BEGIN + -- Build column list if not provided + IF pColumnList IS NULL THEN + -- Use template table for column order when provided + -- Template defines which columns to export and in what order + IF pTemplateTableName IS NOT NULL THEN + -- Parse template table name (SCHEMA.TABLE or just TABLE) + IF INSTR(pTemplateTableName, '.') > 0 THEN + vTemplateSchema := SUBSTR(pTemplateTableName, 1, INSTR(pTemplateTableName, '.') - 1); + vTemplateTable := SUBSTR(pTemplateTableName, INSTR(pTemplateTableName, '.') + 1); + ELSE + vTemplateSchema := pSchemaName; + vTemplateTable := pTemplateTableName; + END IF; + + -- Get columns from TEMPLATE table in template column order + -- Template defines target CSV structure (column order and which columns to include) + SELECT LISTAGG(column_name, ', ') WITHIN GROUP (ORDER BY column_id) + INTO vAllCols + FROM all_tab_columns + WHERE table_name = vTemplateTable + AND owner = vTemplateSchema; + ELSE + -- Get columns from source table when no template + SELECT LISTAGG(column_name, ', ') WITHIN GROUP (ORDER BY column_id) + INTO vAllCols + FROM all_tab_columns + WHERE table_name = pTableName + AND owner = pSchemaName; + END IF; + ELSE + vAllCols := pColumnList; + END IF; + + -- Process each column + vColumns := UPPER(REPLACE(vAllCols, ' ', '')); + vPos := 1; + vResult := ''; + + WHILE vPos <= LENGTH(vColumns) LOOP + vNextPos := INSTR(vColumns, ',', vPos); + IF vNextPos = 0 THEN + vNextPos := LENGTH(vColumns) + 1; + END IF; + + vCurrentCol := SUBSTR(vColumns, vPos, vNextPos - vPos); + + -- When using template table, check if column exists in SOURCE table + -- Template defines target structure, source provides data + -- Skip template columns that don't exist in source (except A_WORKFLOW_HISTORY_KEY) + IF pTemplateTableName IS NOT NULL THEN + -- Check if template column exists in SOURCE table + SELECT COUNT(*) INTO vColExists + FROM all_tab_columns + WHERE table_name = pTableName + AND column_name = vCurrentCol + AND owner = pSchemaName; + + -- Skip columns that don't exist in source table + -- Exception: A_WORKFLOW_HISTORY_KEY is virtual (mapped from pKeyColumnName) + IF vColExists = 0 AND UPPER(vCurrentCol) != 'A_WORKFLOW_HISTORY_KEY' THEN + vPos := vNextPos + 1; + CONTINUE; + END IF; + END IF; + + -- Get column data type from appropriate table (template or source) + IF pTemplateTableName IS NOT NULL THEN + -- Get data type from template table + SELECT data_type INTO vDataType + FROM all_tab_columns + WHERE table_name = vTemplateTable + AND column_name = vCurrentCol + AND owner = vTemplateSchema; + ELSE + -- Get data type from source table + SELECT data_type INTO vDataType + FROM all_tab_columns + WHERE table_name = pTableName + AND column_name = vCurrentCol + AND owner = pSchemaName; + END IF; + + -- Handle key column alias (template table has A_WORKFLOW_HISTORY_KEY, source table has pKeyColumnName) + IF UPPER(vCurrentCol) = 'A_WORKFLOW_HISTORY_KEY' THEN + vResult := vResult || CASE WHEN vResult IS NOT NULL THEN ', ' ELSE '' END || + 'T.' || pKeyColumnName || ' AS A_WORKFLOW_HISTORY_KEY'; + + -- Convert DATE/TIMESTAMP columns to CHAR with specific format + ELSIF vDataType IN ('DATE', 'TIMESTAMP', 'TIMESTAMP WITH TIME ZONE', 'TIMESTAMP WITH LOCAL TIME ZONE') THEN + IF pTemplateTableName IS NOT NULL THEN + vDateFormat := CT_MRDS.FILE_MANAGER.GET_DATE_FORMAT( + pTemplateTableName => pTemplateTableName, + pColumnName => vCurrentCol + ); + ELSE + vDateFormat := ENV_MANAGER.gvDefaultDateFormat; + END IF; + vResult := vResult || CASE WHEN vResult IS NOT NULL THEN ', ' ELSE '' END || + 'TO_CHAR(T.' || vCurrentCol || ', ''' || vDateFormat || ''') AS ' || vCurrentCol; + + -- Other columns as-is with T. prefix + ELSE + vResult := vResult || CASE WHEN vResult IS NOT NULL THEN ', ' ELSE '' END || + 'T.' || vCurrentCol; + END IF; + + vPos := vNextPos + 1; + END LOOP; + + RETURN vResult; + END buildQueryWithDateFormats; + + ---------------------------------------------------------------------------------------------------- + + -- Internal shared function to process column list with T. prefix and key column mapping + FUNCTION processColumnList(pColumnList IN VARCHAR2, pTableName IN VARCHAR2, pSchemaName IN VARCHAR2, pKeyColumnName IN VARCHAR2) RETURN VARCHAR2 IS + vResult VARCHAR2(32767); + vColumns VARCHAR2(32767); + vPos PLS_INTEGER; + vNextPos PLS_INTEGER; + vCurrentCol VARCHAR2(128); + vAllCols VARCHAR2(32767); + BEGIN + IF pColumnList IS NULL THEN + -- Build list of all columns + SELECT LISTAGG(column_name, ', ') WITHIN GROUP (ORDER BY column_id) + INTO vAllCols + FROM all_tab_columns + WHERE table_name = pTableName + AND owner = pSchemaName; + + -- Add T. prefix to all columns + vResult := 'T.' || REPLACE(vAllCols, ', ', ', T.'); + + -- Replace key column with aliased version (e.g., T.A_ETL_LOAD_SET_KEY_FK AS A_WORKFLOW_HISTORY_KEY) + vResult := REPLACE(vResult, 'T.' || pKeyColumnName, 'T.' || pKeyColumnName || ' AS A_WORKFLOW_HISTORY_KEY'); + + RETURN vResult; + END IF; + + -- Remove extra spaces and convert to uppercase + vColumns := UPPER(REPLACE(pColumnList, ' ', '')); + vPos := 1; + vResult := ''; + + -- Parse comma-separated column list and add T. prefix + WHILE vPos <= LENGTH(vColumns) LOOP + vNextPos := INSTR(vColumns, ',', vPos); + IF vNextPos = 0 THEN + vNextPos := LENGTH(vColumns) + 1; + END IF; + + vCurrentCol := SUBSTR(vColumns, vPos, vNextPos - vPos); + + -- Check if this is the key column (e.g., A_ETL_LOAD_SET_KEY_FK) and add alias + IF UPPER(vCurrentCol) = UPPER(pKeyColumnName) THEN + vCurrentCol := 'T.' || pKeyColumnName || ' AS A_WORKFLOW_HISTORY_KEY'; + ELSE + -- Add T. prefix if not already present + IF INSTR(vCurrentCol, '.') = 0 THEN + vCurrentCol := 'T.' || vCurrentCol; + END IF; + END IF; + + -- Add to result with comma separator + IF vResult IS NOT NULL THEN + vResult := vResult || ', '; + END IF; + vResult := vResult || vCurrentCol; + + vPos := vNextPos + 1; + END LOOP; + + RETURN vResult; + END processColumnList; + + ---------------------------------------------------------------------------------------------------- + + /** + * Validates table existence, key column existence, and column list + **/ + PROCEDURE VALIDATE_TABLE_AND_COLUMNS ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pColumnList IN VARCHAR2, + pParameters IN VARCHAR2 + ) IS + vCount INTEGER; + vColumns VARCHAR2(32767); + vPos PLS_INTEGER; + vNextPos PLS_INTEGER; + vCurrentCol VARCHAR2(128); + BEGIN + -- Check if table exists + SELECT COUNT(*) INTO vCount + FROM all_tables + WHERE table_name = pTableName + AND owner = pSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, ENV_MANAGER.MSG_TABLE_NOT_EXISTS); + END IF; + + -- Check if key column exists + SELECT COUNT(*) INTO vCount + FROM all_tab_columns + WHERE table_name = pTableName + AND column_name = pKeyColumnName + AND owner = pSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, ENV_MANAGER.MSG_COLUMN_NOT_EXISTS); + END IF; + + -- Validate pColumnList - check if all column names exist in the table + IF pColumnList IS NOT NULL THEN + vColumns := UPPER(REPLACE(pColumnList, ' ', '')); + vPos := 1; + + WHILE vPos <= LENGTH(vColumns) LOOP + vNextPos := INSTR(vColumns, ',', vPos); + IF vNextPos = 0 THEN + vNextPos := LENGTH(vColumns) + 1; + END IF; + + vCurrentCol := SUBSTR(vColumns, vPos, vNextPos - vPos); + + -- Remove table alias prefix if present + IF INSTR(vCurrentCol, '.') > 0 THEN + vCurrentCol := SUBSTR(vCurrentCol, INSTR(vCurrentCol, '.') + 1); + END IF; + + -- Check if column exists + SELECT COUNT(*) INTO vCount + FROM all_tab_columns + WHERE table_name = pTableName + AND column_name = vCurrentCol + AND owner = pSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, ENV_MANAGER.MSG_COLUMN_NOT_EXISTS); + END IF; + + vPos := vNextPos + 1; + END LOOP; + END IF; + END VALIDATE_TABLE_AND_COLUMNS; + + ---------------------------------------------------------------------------------------------------- + + /** + * Retrieves list of year/month partitions based on date range + **/ + FUNCTION GET_PARTITIONS ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pMinDate IN DATE, + pMaxDate IN DATE, + pParameters IN VARCHAR2 + ) RETURN partition_tab IS + vSql VARCHAR2(32000); + vPartitions partition_tab; + vKeyValuesYear DBMS_SQL.VARCHAR2_TABLE; + vKeyValuesMonth DBMS_SQL.VARCHAR2_TABLE; + vFullTableName VARCHAR2(200); + BEGIN + -- Build fully qualified table name if not already qualified + IF INSTR(pTableName, '.') > 0 THEN + vFullTableName := pTableName; -- Already fully qualified + ELSE + vFullTableName := pSchemaName || '.' || pTableName; + END IF; + + vSql := 'SELECT DISTINCT TO_CHAR(L.LOAD_START,''YYYY'') AS YR, TO_CHAR(L.LOAD_START,''MM'') AS MN + FROM ' || vFullTableName || ' T, CT_ODS.A_LOAD_HISTORY L + WHERE T.' || pKeyColumnName || ' = L.A_ETL_LOAD_SET_KEY + AND L.LOAD_START >= :pMinDate + AND L.LOAD_START < :pMaxDate + ORDER BY YR, MN'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Executing date range query: ' || vSql, 'DEBUG', pParameters); + EXECUTE IMMEDIATE vSql BULK COLLECT INTO vKeyValuesYear, vKeyValuesMonth USING pMinDate, pMaxDate; + + ENV_MANAGER.LOG_PROCESS_EVENT('Found ' || vKeyValuesYear.COUNT || ' year/month combinations to export', 'DEBUG', pParameters); + + -- Convert to partition_tab + vPartitions := partition_tab(); + vPartitions.EXTEND(vKeyValuesYear.COUNT); + FOR i IN 1 .. vKeyValuesYear.COUNT LOOP + vPartitions(i).year := vKeyValuesYear(i); + vPartitions(i).month := vKeyValuesMonth(i); + END LOOP; + + RETURN vPartitions; + END GET_PARTITIONS; + + ---------------------------------------------------------------------------------------------------- + + /** + * Exports single partition (year/month) to specified format (PARQUET or CSV) + * This is the core worker procedure that will be used for parallel processing in v2.3.0 + **/ + PROCEDURE EXPORT_SINGLE_PARTITION ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pYear IN VARCHAR2, + pMonth IN VARCHAR2, + pBucketUri IN VARCHAR2, + pFolderName IN VARCHAR2, + pProcessedColumns IN VARCHAR2, + pMinDate IN DATE, + pMaxDate IN DATE, + pCredentialName IN VARCHAR2, + pFormat IN VARCHAR2 DEFAULT 'PARQUET', + pFileBaseName IN VARCHAR2 DEFAULT NULL, + pMaxFileSize IN NUMBER DEFAULT 104857600, + pParameters IN VARCHAR2 + ) IS + vQuery VARCHAR2(32767); + vUri VARCHAR2(4000); + vFileName VARCHAR2(1000); + vFullTableName VARCHAR2(200); + BEGIN + -- Build fully qualified table name if not already qualified + IF INSTR(pTableName, '.') > 0 THEN + vFullTableName := pTableName; -- Already fully qualified + ELSE + vFullTableName := pSchemaName || '.' || pTableName; + END IF; + + -- Construct the query to extract data for the current year/month + vQuery := 'SELECT ' || pProcessedColumns || ' + FROM ' || vFullTableName || ' T, CT_ODS.A_LOAD_HISTORY L + WHERE T.' || pKeyColumnName || ' = L.A_ETL_LOAD_SET_KEY + AND TO_CHAR(L.LOAD_START,''YYYY'') = ' || CHR(39) || pYear || CHR(39) || ' + AND TO_CHAR(L.LOAD_START,''MM'') = ' || CHR(39) || pMonth || CHR(39) || ' + AND L.LOAD_START >= TO_DATE(' || CHR(39) || TO_CHAR(pMinDate, 'YYYY-MM-DD HH24:MI:SS') || CHR(39) || ', ''YYYY-MM-DD HH24:MI:SS'') + AND L.LOAD_START < TO_DATE(' || CHR(39) || TO_CHAR(pMaxDate, 'YYYY-MM-DD HH24:MI:SS') || CHR(39) || ', ''YYYY-MM-DD HH24:MI:SS'')'; + + -- Construct the URI based on format + IF pFormat = 'PARQUET' THEN + -- Parquet: Use Hive-style partitioning + -- Note: maxfilesize is NOT supported for Parquet format (Oracle limitation) + vUri := pBucketUri || + CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || + 'PARTITION_YEAR=' || sanitizeFilename(pYear) || '/' || + 'PARTITION_MONTH=' || sanitizeFilename(pMonth) || '/' || + sanitizeFilename(pYear) || sanitizeFilename(pMonth) || '.parquet'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Parquet export URI: ' || vUri, 'DEBUG', pParameters); + + -- Delete potentially corrupted file from previous failed attempt + -- This prevents Oracle from creating _1 suffixed files on retry + DELETE_FAILED_EXPORT_FILE(vUri, pCredentialName, pParameters); + + DBMS_CLOUD.EXPORT_DATA( + credential_name => pCredentialName, + file_uri_list => vUri, + query => vQuery, + format => json_object('type' VALUE 'parquet') + ); + ELSIF pFormat = 'CSV' THEN + -- CSV: Flat file structure with year/month in filename + vFileName := NVL(pFileBaseName, UPPER(pTableName)) || '_' || pYear || pMonth || '.csv'; + vUri := pBucketUri || + CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || + sanitizeFilename(vFileName); + + ENV_MANAGER.LOG_PROCESS_EVENT('CSV export URI: ' || vUri, 'DEBUG', pParameters); + + -- Delete potentially corrupted file from previous failed attempt + -- This prevents Oracle from creating _1 suffixed files on retry + DELETE_FAILED_EXPORT_FILE(vUri, pCredentialName, pParameters); + + -- Use json_object() for CSV export with maxfilesize in bytes (Oracle requirement) + -- Oracle maxfilesize: min 10MB (10485760), max 1GB (1073741824), default 10MB + -- NOTE: maxfilesize must be NUMBER (bytes), not string like '1000M' + -- Using 100MB (104857600) to avoid PGA memory issues with large files + DBMS_CLOUD.EXPORT_DATA( + credential_name => pCredentialName, + file_uri_list => vUri, + query => vQuery, + format => json_object( + 'type' VALUE 'CSV', + 'header' VALUE true, + 'quote' VALUE CHR(34), + 'delimiter' VALUE ',', + 'escape' VALUE true, + 'recorddelimiter' VALUE CHR(13)||CHR(10), -- CRLF dla Windows + 'maxfilesize' VALUE pMaxFileSize -- Dynamic maxfilesize in bytes (e.g., 104857600 = 100MB) + ) + ); + ELSE + RAISE_APPLICATION_ERROR(-20001, 'Unsupported format: ' || pFormat || '. Use PARQUET or CSV.'); + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('Processing Year/Month: ' || pYear || '/' || pMonth || ' (Format: ' || pFormat || ')', 'DEBUG', pParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Export query: ' || vQuery, 'DEBUG', pParameters); + END EXPORT_SINGLE_PARTITION; + + ---------------------------------------------------------------------------------------------------- + + /** + * Callback procedure for DBMS_PARALLEL_EXECUTE + * Processes single partition (year/month) chunk in parallel task + * Called by DBMS_PARALLEL_EXECUTE framework for each chunk + **/ + PROCEDURE EXPORT_PARTITION_PARALLEL ( + pStartId IN NUMBER, + pEndId IN NUMBER + ) IS + vYear VARCHAR2(4); + vMonth VARCHAR2(2); + vSchemaName VARCHAR2(128); + vTableName VARCHAR2(128); + vKeyColumnName VARCHAR2(128); + vBucketUri VARCHAR2(4000); + vFolderName VARCHAR2(1000); + vProcessedColumns VARCHAR2(32767); + vMinDate DATE; + vMaxDate DATE; + vCredentialName VARCHAR2(200); + vFormat VARCHAR2(20); + vFileBaseName VARCHAR2(1000); + vMaxFileSize NUMBER; + vParameters VARCHAR2(4000); + BEGIN + -- Retrieve chunk context from global temporary table + SELECT + YEAR_VALUE, + MONTH_VALUE, + SCHEMA_NAME, + TABLE_NAME, + KEY_COLUMN_NAME, + BUCKET_URI, + FOLDER_NAME, + PROCESSED_COLUMNS, + MIN_DATE, + MAX_DATE, + CREDENTIAL_NAME, + FORMAT_TYPE, + FILE_BASE_NAME, + MAX_FILE_SIZE + INTO + vYear, + vMonth, + vSchemaName, + vTableName, + vKeyColumnName, + vBucketUri, + vFolderName, + vProcessedColumns, + vMinDate, + vMaxDate, + vCredentialName, + vFormat, + vFileBaseName, + vMaxFileSize + FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + WHERE CHUNK_ID = pStartId; + + vParameters := 'Parallel task - Year: ' || vYear || ', Month: ' || vMonth || ', ChunkID: ' || pStartId; + ENV_MANAGER.LOG_PROCESS_EVENT('Starting parallel export for partition ' || vYear || '/' || vMonth, 'DEBUG', vParameters); + + -- Mark chunk as PROCESSING + UPDATE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + SET STATUS = 'PROCESSING', + ERROR_MESSAGE = NULL + WHERE CHUNK_ID = pStartId; + COMMIT; + + -- Call the worker procedure + EXPORT_SINGLE_PARTITION( + pSchemaName => vSchemaName, + pTableName => vTableName, + pKeyColumnName => vKeyColumnName, + pYear => vYear, + pMonth => vMonth, + pBucketUri => vBucketUri, + pFolderName => vFolderName, + pProcessedColumns => vProcessedColumns, + pMinDate => vMinDate, + pMaxDate => vMaxDate, + pCredentialName => vCredentialName, + pFormat => vFormat, + pFileBaseName => vFileBaseName, + pMaxFileSize => vMaxFileSize, + pParameters => vParameters + ); + + -- Mark chunk as COMPLETED + UPDATE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + SET STATUS = 'COMPLETED', + EXPORT_TIMESTAMP = SYSTIMESTAMP, + ERROR_MESSAGE = NULL + WHERE CHUNK_ID = pStartId; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Completed parallel export for partition ' || vYear || '/' || vMonth, 'DEBUG', vParameters); + EXCEPTION + WHEN OTHERS THEN + -- Capture error details in variable (SQLERRM cannot be used directly in SQL) + vgMsgTmp := 'Parallel task error for partition ' || vYear || '/' || vMonth || ' (ChunkID: ' || pStartId || '): ' || SQLERRM || cgBL || DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + + -- Mark chunk as FAILED with error message + -- Use vgMsgTmp variable instead of SQLERRM directly (Oracle limitation in SQL context) + UPDATE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + SET STATUS = 'FAILED', + ERROR_MESSAGE = SUBSTR(vgMsgTmp, 1, 4000) + WHERE CHUNK_ID = pStartId; + COMMIT; + + RAISE; + END EXPORT_PARTITION_PARALLEL; + + ---------------------------------------------------------------------------------------------------- + -- MAIN EXPORT PROCEDURES + ---------------------------------------------------------------------------------------------------- + + PROCEDURE EXPORT_TABLE_DATA ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pFileName IN VARCHAR2 default NULL, + pTemplateTableName IN VARCHAR2 default NULL, + pMaxFileSize IN NUMBER default 104857600, + pRegisterExport IN BOOLEAN default FALSE, + pProcessName IN VARCHAR2 default 'DATA_EXPORTER', + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ) + IS + vCount INTEGER; + vQuery VARCHAR2(32767); + vUri VARCHAR2(4000); + vTableName VARCHAR2(128); + vSchemaName VARCHAR2(128); + vKeyColumnName VARCHAR2(128); + vParameters VARCHAR2(4000); + vBucketUri VARCHAR2(4000); + vProcessedColumnList VARCHAR2(32767); + vCurrentCol VARCHAR2(128); + + -- Variables for file registration (when pRegisterExport=TRUE) + vConfigKey NUMBER; + vSourceKey VARCHAR2(100); + vTableId VARCHAR2(100); + vSlashPos1 NUMBER; + vSlashPos2 NUMBER; + vSourceFileReceivedKey NUMBER; + + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pSchemaName => '''||nvl(pSchemaName, 'NULL')||'''' + ,'pTableName => '''||nvl(pTableName, 'NULL')||'''' + ,'pKeyColumnName => '''||nvl(pKeyColumnName, 'NULL')||'''' + ,'pBucketArea => '''||nvl(pBucketArea, 'NULL')||'''' + ,'pFolderName => '''||nvl(pFolderName, 'NULL')||'''' + ,'pFileName => '''||nvl(pFileName, 'NULL')||'''' + ,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pMaxFileSize => '''||nvl(TO_CHAR(pMaxFileSize), 'NULL')||'''' + ,'pRegisterExport => '''||CASE WHEN pRegisterExport THEN 'TRUE' ELSE 'FALSE' END||'''' + ,'pProcessName => '''||nvl(pProcessName, 'NULL')||'''' + ,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||'''' + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + -- Get bucket URI based on bucket area using FILE_MANAGER function + vBucketUri := FILE_MANAGER.GET_BUCKET_URI(pBucketArea); + + -- Convert table and column names to uppercase to match data dictionary + vTableName := UPPER(pTableName); + vSchemaName := UPPER(pSchemaName); + vKeyColumnName := UPPER(pKeyColumnName); + + -- Check if table exists + SELECT COUNT(*) INTO vCount + FROM all_tables + WHERE table_name = vTableName + AND owner = vSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, ENV_MANAGER.MSG_TABLE_NOT_EXISTS); + END IF; + + -- Check if key column exists + SELECT COUNT(*) INTO vCount + FROM all_tab_columns + WHERE table_name = vTableName + AND column_name = vKeyColumnName + AND owner = vSchemaName; + + IF vCount = 0 THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, ENV_MANAGER.MSG_COLUMN_NOT_EXISTS); + END IF; + + -- Validate template table if provided + IF pTemplateTableName IS NOT NULL THEN + DECLARE + vTemplateSchema VARCHAR2(128); + vTemplateTable VARCHAR2(128); + vTemplateCount NUMBER; + BEGIN + -- Parse template table name (SCHEMA.TABLE or just TABLE) + IF INSTR(pTemplateTableName, '.') > 0 THEN + vTemplateSchema := UPPER(SUBSTR(pTemplateTableName, 1, INSTR(pTemplateTableName, '.') - 1)); + vTemplateTable := UPPER(SUBSTR(pTemplateTableName, INSTR(pTemplateTableName, '.') + 1)); + ELSE + vTemplateSchema := vSchemaName; + vTemplateTable := UPPER(pTemplateTableName); + END IF; + + -- Check if template table exists + SELECT COUNT(*) INTO vTemplateCount + FROM all_tables + WHERE table_name = vTemplateTable + AND owner = vTemplateSchema; + + IF vTemplateCount = 0 THEN + vgMsgTmp := ENV_MANAGER.MSG_TABLE_NOT_EXISTS || ': Template table ' || vTemplateSchema || '.' || vTemplateTable; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, vgMsgTmp); + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('Template table validated: ' || vTemplateSchema || '.' || vTemplateTable, 'DEBUG', vParameters); + END; + END IF; + + -- Build query with TO_CHAR for date columns (per-column format support) + vProcessedColumnList := buildQueryWithDateFormats(NULL, vTableName, vSchemaName, vKeyColumnName, pTemplateTableName); + + ENV_MANAGER.LOG_PROCESS_EVENT('Processed column list with TO_CHAR for date columns: ' || vProcessedColumnList, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Template table: ' || NVL(pTemplateTableName, 'NULL - using global default for all dates'), 'INFO', vParameters); + + vTableName := DBMS_ASSERT.SCHEMA_NAME(vSchemaName) || '.' || DBMS_ASSERT.simple_sql_name(vTableName); + + -- Lookup A_SOURCE_FILE_CONFIG_KEY based on pFolderName parsing if pRegisterExport is enabled + IF pRegisterExport THEN + -- Format: {BUCKET_AREA}/{SOURCE_KEY}/{TABLE_ID} + -- Example: 'ODS/CSDB/CSDB_DEBT_DAILY' -> SOURCE_KEY='CSDB', TABLE_ID='CSDB_DEBT_DAILY' + + -- Parse pFolderName to extract SOURCE_KEY and TABLE_ID + vSlashPos1 := INSTR(pFolderName, '/', 1, 1); -- First '/' position + vSlashPos2 := INSTR(pFolderName, '/', 1, 2); -- Second '/' position + + IF vSlashPos1 > 0 AND vSlashPos2 > 0 THEN + -- Extract segment 2 (SOURCE_KEY) and segment 3 (TABLE_ID) + vSourceKey := SUBSTR(pFolderName, vSlashPos1 + 1, vSlashPos2 - vSlashPos1 - 1); + vTableId := SUBSTR(pFolderName, vSlashPos2 + 1); + + -- Find configuration based on SOURCE_KEY and TABLE_ID + BEGIN + SELECT A_SOURCE_FILE_CONFIG_KEY + INTO vConfigKey + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE A_SOURCE_KEY = vSourceKey + AND TABLE_ID = vTableId + AND SOURCE_FILE_TYPE = 'INPUT' + AND ROWNUM = 1; + + ENV_MANAGER.LOG_PROCESS_EVENT('Found config key: ' || vConfigKey || ' for SOURCE=' || vSourceKey || ', TABLE=' || vTableId, 'DEBUG', vParameters); + EXCEPTION + WHEN NO_DATA_FOUND THEN + vConfigKey := -1; + ENV_MANAGER.LOG_PROCESS_EVENT('No config found for SOURCE=' || vSourceKey || ', TABLE=' || vTableId || ' - using default (-1)', 'INFO', vParameters); + END; + ELSE + -- Cannot parse folder name - use default + vConfigKey := -1; + ENV_MANAGER.LOG_PROCESS_EVENT('Cannot parse pFolderName: ' || pFolderName || ' - using default (-1)', 'WARNING', vParameters); + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('File registration enabled with config key: ' || vConfigKey, 'INFO', vParameters); + END IF; + + -- Construct single query for entire table (no join with A_LOAD_HISTORY - ensures single file output) + vQuery := 'SELECT ' || vProcessedColumnList || + ' FROM ' || vTableName || ' T'; + + -- Construct the URI for the file in OCI Object Storage + vUri := vBucketUri || + CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || + NVL(pFileName, UPPER(vTableName) || '.csv'); + + ENV_MANAGER.LOG_PROCESS_EVENT('Exporting to single file: ' || vUri, 'INFO', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Export query: ' || vQuery, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Max file size: ' || pMaxFileSize || ' bytes (' || ROUND(pMaxFileSize/1048576, 2) || ' MB)', 'DEBUG', vParameters); + + -- Use DBMS_CLOUD package to export data to the URI + -- Oracle maxfilesize: min 10MB (10485760), max 1GB (1073741824), default 100MB (104857600) + DBMS_CLOUD.EXPORT_DATA( + credential_name => pCredentialName, + file_uri_list => vUri, + query => vQuery, + format => json_object( + 'type' VALUE 'CSV', + 'header' VALUE true, + 'quote' VALUE CHR(34), + 'delimiter' VALUE ',', + 'escape' VALUE true, + 'recorddelimiter' VALUE CHR(13)||CHR(10), -- CRLF dla Windows + 'maxfilesize' VALUE pMaxFileSize -- Dynamic maxfilesize in bytes + ) + ); + + -- Register exported file to A_SOURCE_FILE_RECEIVED if requested + IF pRegisterExport THEN + DECLARE + vChecksum VARCHAR2(128); + vCreated TIMESTAMP WITH TIME ZONE; + vBytes NUMBER; + vActualFileName VARCHAR2(1000); -- Actual filename with Oracle suffix + vSanitizedFileName VARCHAR2(1000); + vFileName VARCHAR2(1000); + vRetryCount NUMBER := 0; + vMaxRetries NUMBER := 1; -- One retry after initial attempt + vRetryDelay NUMBER := 2; -- 2 seconds delay + BEGIN + -- Extract filename from URI (after last '/') + vFileName := SUBSTR(vUri, INSTR(vUri, '/', -1) + 1); + + -- Sanitize filename first (PL/SQL function cannot be used directly in SQL) + vSanitizedFileName := sanitizeFilename(vFileName); + + -- Remove .csv extension for LIKE pattern matching (Oracle adds suffixes BEFORE .csv) + -- Example: tablename.csv becomes tablename_1_20260211T102621591769Z.csv + vSanitizedFileName := REGEXP_REPLACE(vSanitizedFileName, '\.csv$', '', 1, 0, 'i'); + + -- Try to get file metadata with retry logic + <> + LOOP + BEGIN + SELECT object_name, checksum, created, bytes + INTO vActualFileName, vChecksum, vCreated, vBytes + FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( + credential_name => pCredentialName, + location_uri => vBucketUri + )) + WHERE object_name LIKE CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || vSanitizedFileName || '%' + ORDER BY created DESC, bytes DESC + FETCH FIRST 1 ROW ONLY; + + -- Extract filename only from full path (remove bucket folder prefix) + vActualFileName := SUBSTR(vActualFileName, INSTR(vActualFileName, '/', -1) + 1); + + -- Success - exit retry loop + EXIT metadata_retry_loop; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + vRetryCount := vRetryCount + 1; + + IF vRetryCount <= vMaxRetries THEN + -- Log retry attempt + ENV_MANAGER.LOG_PROCESS_EVENT('File not found in bucket (attempt ' || vRetryCount || '/' || (vMaxRetries + 1) || '), retrying after ' || vRetryDelay || ' seconds: ' || vFileName, 'DEBUG', vParameters); + + -- Wait before retry using DBMS_SESSION.SLEEP (alternative to DBMS_LOCK) + DBMS_SESSION.SLEEP(vRetryDelay); + ELSE + -- Max retries exceeded - re-raise exception + RAISE; + END IF; + END; + END LOOP metadata_retry_loop; + + -- Create A_SOURCE_FILE_RECEIVED record for this export with metadata + vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; + INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( + A_SOURCE_FILE_RECEIVED_KEY, + A_SOURCE_FILE_CONFIG_KEY, + SOURCE_FILE_NAME, + CHECKSUM, + CREATED, + BYTES, + RECEPTION_DATE, + PROCESSING_STATUS, + PARTITION_YEAR, + PARTITION_MONTH, + ARCH_PATH, + PROCESS_NAME + ) VALUES ( + vSourceFileReceivedKey, + NVL(vConfigKey, -1), -- Use config key if found, otherwise -1 + vActualFileName, -- Use actual filename with Oracle suffix + vChecksum, + vCreated, + vBytes, + SYSDATE, + 'INGESTED', + NULL, -- PARTITION_YEAR not used for single-file exports + NULL, -- PARTITION_MONTH not used for single-file exports + NULL, -- ARCH_PATH not used for single-file exports + pProcessName -- Process name from parameter + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('Registered file: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vActualFileName || ', Size=' || vBytes || ' bytes', 'INFO', vParameters); + EXCEPTION + WHEN NO_DATA_FOUND THEN + -- File not found after retries - log warning and continue without metadata + ENV_MANAGER.LOG_PROCESS_EVENT('WARNING: File not found in bucket after ' || (vMaxRetries + 1) || ' attempts: ' || vFileName, 'WARNING', vParameters); + + -- Sanitize filename for fallback INSERT (function cannot be used in SQL) + vSanitizedFileName := sanitizeFilename(vFileName); + + -- Insert without metadata using theoretical filename + vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; + INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( + A_SOURCE_FILE_RECEIVED_KEY, + A_SOURCE_FILE_CONFIG_KEY, + SOURCE_FILE_NAME, + RECEPTION_DATE, + PROCESSING_STATUS, + PARTITION_YEAR, + PARTITION_MONTH, + ARCH_PATH, + PROCESS_NAME + ) VALUES ( + vSourceFileReceivedKey, + NVL(vConfigKey, -1), -- Use config key if found, otherwise -1 + vSanitizedFileName, -- Use pre-calculated sanitized filename + SYSDATE, + 'INGESTED', + NULL, -- PARTITION_YEAR not used for single-file exports + NULL, -- PARTITION_MONTH not used for single-file exports + NULL, -- ARCH_PATH not used for single-file exports + pProcessName -- Process name from parameter + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('Registered file without metadata: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vSanitizedFileName, 'INFO', vParameters); + END; + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + EXCEPTION + WHEN ENV_MANAGER.ERR_TABLE_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_TABLE_NOT_EXISTS ||': '||vTableName; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_COLUMN_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_COLUMN_NOT_EXISTS || ' (TableName.ColumnName): ' || vTableName||'.'||vKeyColumnName||CASE WHEN vCurrentCol IS NOT NULL THEN '.'||vCurrentCol||' in column list' ELSE '' END; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, vgMsgTmp); + WHEN OTHERS THEN + -- Log complete error details including full stack trace and backtrace + ENV_MANAGER.LOG_PROCESS_ERROR('Export failed: ' || SQLERRM, vParameters, 'DATA_EXPORTER'); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END EXPORT_TABLE_DATA; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE EXPORT_TABLE_DATA_BY_DATE ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pColumnList IN VARCHAR2 default NULL, + pMinDate IN DATE default DATE '1900-01-01', + pMaxDate IN DATE default SYSDATE, + pParallelDegree IN NUMBER default 1, + pTemplateTableName IN VARCHAR2 default NULL, + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ) + IS + vTableName VARCHAR2(128); + vSchemaName VARCHAR2(128); + vKeyColumnName VARCHAR2(128); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vProcessedColumnList VARCHAR2(32767); + vBucketUri VARCHAR2(4000); + vCurrentCol VARCHAR2(128); + vPartitions partition_tab; + + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pSchemaName => '''||nvl(pSchemaName, 'NULL')||'''' + ,'pTableName => '''||nvl(pTableName, 'NULL')||'''' + ,'pKeyColumnName => '''||nvl(pKeyColumnName, 'NULL')||'''' + ,'pBucketArea => '''||nvl(pBucketArea, 'NULL')||'''' + ,'pFolderName => '''||nvl(pFolderName, 'NULL')||'''' + ,'pColumnList => '''||nvl(pColumnList, 'NULL')||'''' + ,'pMinDate => '''||nvl(TO_CHAR(pMinDate, 'YYYY-MM-DD HH24:MI:SS'), 'NULL')||'''' + ,'pMaxDate => '''||nvl(TO_CHAR(pMaxDate, 'YYYY-MM-DD HH24:MI:SS'), 'NULL')||'''' + ,'pParallelDegree => '''||nvl(TO_CHAR(pParallelDegree), 'NULL')||'''' + ,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||'''' + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + -- Get bucket URI based on bucket area using FILE_MANAGER function + vBucketUri := FILE_MANAGER.GET_BUCKET_URI(pBucketArea); + + -- Convert table and column names to uppercase to match data dictionary + vTableName := UPPER(pTableName); + vSchemaName := UPPER(pSchemaName); + vKeyColumnName := UPPER(pKeyColumnName); + + -- Validate table, key column, and column list using shared procedure + VALIDATE_TABLE_AND_COLUMNS(vSchemaName, vTableName, vKeyColumnName, pColumnList, vParameters); + + -- Build query with TO_CHAR for date columns (per-column format support) + vProcessedColumnList := buildQueryWithDateFormats(pColumnList, vTableName, vSchemaName, vKeyColumnName, pTemplateTableName); + + ENV_MANAGER.LOG_PROCESS_EVENT('Input column list: ' || NVL(pColumnList, 'NULL (building dynamic list from table metadata)'), 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Processed column list with TO_CHAR for date columns: ' || vProcessedColumnList, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Template table: ' || NVL(pTemplateTableName, 'NULL - using global default for all dates'), 'INFO', vParameters); + + vTableName := DBMS_ASSERT.SCHEMA_NAME(vSchemaName) || '.' || DBMS_ASSERT.simple_sql_name(vTableName); + + -- Validate parallel degree parameter + IF pParallelDegree < 1 OR pParallelDegree > 16 THEN + vgMsgTmp := ENV_MANAGER.MSG_INVALID_PARALLEL_DEGREE || ': ' || pParallelDegree || '. Valid range: 1-16'; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_INVALID_PARALLEL_DEGREE, vgMsgTmp); + END IF; + + -- Get partitions using shared function + vPartitions := GET_PARTITIONS(vSchemaName, vTableName, vKeyColumnName, pMinDate, pMaxDate, vParameters); + + ENV_MANAGER.LOG_PROCESS_EVENT('Found ' || vPartitions.COUNT || ' partitions to export with parallel degree ' || pParallelDegree, 'INFO', vParameters); + + -- Sequential processing (parallel degree = 1) + IF pParallelDegree = 1 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Using sequential processing (pParallelDegree = 1)', 'DEBUG', vParameters); + + FOR i IN 1 .. vPartitions.COUNT LOOP + EXPORT_SINGLE_PARTITION( + pSchemaName => vSchemaName, + pTableName => vTableName, + pKeyColumnName => vKeyColumnName, + pYear => vPartitions(i).year, + pMonth => vPartitions(i).month, + pBucketUri => vBucketUri, + pFolderName => pFolderName, + pProcessedColumns => vProcessedColumnList, + pMinDate => pMinDate, + pMaxDate => pMaxDate, + pCredentialName => pCredentialName, + pFormat => 'PARQUET', + pFileBaseName => NULL, + pMaxFileSize => 104857600, + pParameters => vParameters + ); + END LOOP; + + -- Parallel processing (parallel degree > 1) + ELSE + -- Skip parallel processing if no partitions found + IF vPartitions.COUNT = 0 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('No partitions to export - skipping parallel processing', 'INFO', vParameters); + ELSE + DECLARE + vTaskName VARCHAR2(128) := 'DATA_EXPORT_TASK_' || TO_CHAR(SYSTIMESTAMP, 'YYYYMMDDHH24MISSFF'); + vChunkId NUMBER; + BEGIN + ENV_MANAGER.LOG_PROCESS_EVENT('Using parallel processing with ' || pParallelDegree || ' threads', 'INFO', vParameters); + + -- Clean up old completed chunks (>24 hours) to prevent table bloat + -- CRITICAL: Do NOT delete chunks from other active sessions (same-day tasks) + -- This prevents race conditions when multiple exports run simultaneously + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + WHERE STATUS = 'COMPLETED' + AND CREATED_DATE < SYSTIMESTAMP - INTERVAL '1' DAY; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Cleared old COMPLETED chunks (>24h). Active session chunks preserved.', 'DEBUG', vParameters); + -- This prevents re-exporting successfully completed partitions + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'COMPLETED'; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Cleared COMPLETED chunks. FAILED chunks retained for retry.', 'DEBUG', vParameters); + + -- Populate chunks table (insert new chunks, preserve FAILED chunks for retry) + FOR i IN 1 .. vPartitions.COUNT LOOP + MERGE INTO CT_MRDS.A_PARALLEL_EXPORT_CHUNKS t + USING (SELECT i AS chunk_id, vPartitions(i).year AS yr, vPartitions(i).month AS mn FROM DUAL) s + ON (t.CHUNK_ID = s.chunk_id) + WHEN NOT MATCHED THEN + INSERT (CHUNK_ID, TASK_NAME, YEAR_VALUE, MONTH_VALUE, SCHEMA_NAME, TABLE_NAME, KEY_COLUMN_NAME, + BUCKET_URI, FOLDER_NAME, PROCESSED_COLUMNS, MIN_DATE, MAX_DATE, + CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE, STATUS) + VALUES (i, vTaskName, vPartitions(i).year, vPartitions(i).month, vSchemaName, vTableName, vKeyColumnName, + vBucketUri, pFolderName, vProcessedColumnList, pMinDate, pMaxDate, + pCredentialName, 'PARQUET', NULL, pTemplateTableName, 104857600, 'PENDING') + WHEN MATCHED THEN + UPDATE SET TASK_NAME = vTaskName, + STATUS = CASE WHEN t.STATUS = 'FAILED' THEN 'PENDING' ELSE t.STATUS END, + ERROR_MESSAGE = CASE WHEN t.STATUS = 'FAILED' THEN NULL ELSE t.ERROR_MESSAGE END; + END LOOP; + COMMIT; + + -- Log chunk statistics + DECLARE + vPendingCount NUMBER; + vFailedCount NUMBER; + BEGIN + SELECT COUNT(*) INTO vPendingCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'PENDING'; + SELECT COUNT(*) INTO vFailedCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'FAILED'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Chunk statistics: PENDING=' || vPendingCount || ', FAILED (retry)=' || vFailedCount, 'INFO', vParameters); + END; + + -- Create parallel task + DBMS_PARALLEL_EXECUTE.CREATE_TASK(task_name => vTaskName); + + -- Define chunks by number range (1 to partition count) + DBMS_PARALLEL_EXECUTE.CREATE_CHUNKS_BY_NUMBER_COL( + task_name => vTaskName, + table_owner => 'CT_MRDS', + table_name => 'A_PARALLEL_EXPORT_CHUNKS', + table_column => 'CHUNK_ID', + chunk_size => 1 -- Each partition is one chunk + ); + + -- Execute task in parallel + ENV_MANAGER.LOG_PROCESS_EVENT('Executing parallel task: ' || vTaskName, 'DEBUG', vParameters); + + DBMS_PARALLEL_EXECUTE.RUN_TASK( + task_name => vTaskName, + sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;', + language_flag => DBMS_SQL.NATIVE, + parallel_level => pParallelDegree + ); + + -- Check for errors + DECLARE + vErrorCount NUMBER; + BEGIN + SELECT COUNT(*) INTO vErrorCount + FROM USER_PARALLEL_EXECUTE_CHUNKS + WHERE task_name = vTaskName AND status = 'PROCESSED_WITH_ERROR'; + + IF vErrorCount > 0 THEN + vgMsgTmp := 'Parallel execution completed with ' || vErrorCount || ' errors. Check USER_PARALLEL_EXECUTE_CHUNKS for details.'; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + END IF; + END; + + -- Clean up task + DBMS_PARALLEL_EXECUTE.DROP_TASK(task_name => vTaskName); + + -- Clean up chunks for THIS specific task only (session-safe) + -- CRITICAL: Use TASK_NAME filter to avoid deleting chunks from other active sessions + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE TASK_NAME = vTaskName; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Parallel execution completed successfully', 'INFO', vParameters); + EXCEPTION + WHEN OTHERS THEN + -- Attempt to drop task on error + BEGIN + DBMS_PARALLEL_EXECUTE.DROP_TASK(task_name => vTaskName); + EXCEPTION + WHEN OTHERS THEN NULL; -- Ignore drop errors + END; + + vgMsgTmp := ENV_MANAGER.MSG_PARALLEL_EXECUTION_FAILED || ': ' || SQLERRM || cgBL || DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + END; + END IF; + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + EXCEPTION + WHEN ENV_MANAGER.ERR_TABLE_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_TABLE_NOT_EXISTS ||': '||vTableName; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_COLUMN_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_COLUMN_NOT_EXISTS || ' (TableName.ColumnName): ' || vTableName||'.'||vKeyColumnName||CASE WHEN vCurrentCol IS NOT NULL THEN '.'||vCurrentCol||' in pColumnList' ELSE '' END; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_INVALID_PARALLEL_DEGREE THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_INVALID_PARALLEL_DEGREE, vgMsgTmp); + WHEN ENV_MANAGER.ERR_PARALLEL_EXECUTION_FAILED THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + WHEN OTHERS THEN + -- Log complete error details including full stack trace and backtrace + ENV_MANAGER.LOG_PROCESS_ERROR('Export failed: ' || SQLERRM, vParameters, 'DATA_EXPORTER'); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END EXPORT_TABLE_DATA_BY_DATE; + + ---------------------------------------------------------------------------------------------------- + + /** + * @name EXPORT_TABLE_DATA_TO_CSV_BY_DATE + * @desc Exports data to a single CSV file with date filtering. + * Unlike EXPORT_TABLE_DATA_BY_DATE, this procedure creates one CSV file + * instead of multiple Parquet files partitioned by year/month. + * Uses the same date filtering mechanism with CT_ODS.A_LOAD_HISTORY. + * Allows specifying custom column list or uses T.* if pColumnList is NULL. + * Validates that all columns in pColumnList exist in the target table. + * Automatically adds 'T.' prefix to column names in pColumnList. + * @example + * begin + * DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE( + * pSchemaName => 'CT_MRDS', + * pTableName => 'MY_TABLE', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'DATA', + * pFolderName => 'exports', + * pFileName => 'my_export.csv', + * pColumnList => 'COLUMN1, COLUMN2, COLUMN3', -- Optional + * pMinDate => DATE '2024-01-01', + * pMaxDate => SYSDATE + * ); + * end; + **/ + PROCEDURE EXPORT_TABLE_DATA_TO_CSV_BY_DATE ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pFileName IN VARCHAR2 DEFAULT NULL, + pColumnList IN VARCHAR2 default NULL, + pMinDate IN DATE default DATE '1900-01-01', + pMaxDate IN DATE default SYSDATE, + pParallelDegree IN NUMBER default 1, + pTemplateTableName IN VARCHAR2 default NULL, + pMaxFileSize IN NUMBER default 104857600, + pRegisterExport IN BOOLEAN default FALSE, + pProcessName IN VARCHAR2 default 'DATA_EXPORTER', + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ) + IS + vTableName VARCHAR2(128); + vSchemaName VARCHAR2(128); + vKeyColumnName VARCHAR2(128); + vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE; + vFileBaseName VARCHAR2(4000); + vFileExtension VARCHAR2(10); + vProcessedColumnList VARCHAR2(32767); + vBucketUri VARCHAR2(4000); + vCurrentCol VARCHAR2(128); + vPartitions partition_tab; + vSourceFileReceivedKey NUMBER; + vFileName VARCHAR2(1000); + vFileUri VARCHAR2(4000); + -- Variables for A_SOURCE_FILE_CONFIG lookup + vSourceKey VARCHAR2(100); + vTableId VARCHAR2(200); + vConfigKey NUMBER := -1; + vSlashPos1 NUMBER; + vSlashPos2 NUMBER; + + BEGIN + vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pSchemaName => '''||nvl(pSchemaName, 'NULL')||'''' + ,'pTableName => '''||nvl(pTableName, 'NULL')||'''' + ,'pKeyColumnName => '''||nvl(pKeyColumnName, 'NULL')||'''' + ,'pBucketArea => '''||nvl(pBucketArea, 'NULL')||'''' + ,'pFolderName => '''||nvl(pFolderName, 'NULL')||'''' + ,'pFileName => '''||nvl(pFileName, 'NULL')||'''' + ,'pColumnList => '''||nvl(pColumnList, 'NULL')||'''' + ,'pMinDate => '''||nvl(TO_CHAR(pMinDate, 'YYYY-MM-DD HH24:MI:SS'), 'NULL')||'''' + ,'pMaxDate => '''||nvl(TO_CHAR(pMaxDate, 'YYYY-MM-DD HH24:MI:SS'), 'NULL')||'''' + ,'pParallelDegree => '''||nvl(TO_CHAR(pParallelDegree), 'NULL')||'''' + ,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pMaxFileSize => '''||nvl(TO_CHAR(pMaxFileSize), 'NULL')||'''' + ,'pRegisterExport => '''||CASE WHEN pRegisterExport THEN 'TRUE' ELSE 'FALSE' END||'''' + ,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||'''' + )); + ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); + + -- Get bucket URI based on bucket area using FILE_MANAGER function + vBucketUri := FILE_MANAGER.GET_BUCKET_URI(pBucketArea); + + -- Convert table and column names to uppercase to match data dictionary + vTableName := UPPER(pTableName); + vSchemaName := UPPER(pSchemaName); + vKeyColumnName := UPPER(pKeyColumnName); + + -- Extract base filename and extension or construct default filename + IF pFileName IS NOT NULL THEN + -- Use provided filename + IF INSTR(pFileName, '.') > 0 THEN + vFileBaseName := SUBSTR(pFileName, 1, INSTR(pFileName, '.', -1) - 1); + vFileExtension := SUBSTR(pFileName, INSTR(pFileName, '.', -1)); + ELSE + vFileBaseName := pFileName; + vFileExtension := '.csv'; + END IF; + ELSE + -- Construct default filename: TABLENAME (without extension, will be added by worker) + vFileBaseName := UPPER(pTableName); + vFileExtension := '.csv'; + END IF; + + -- Validate table, key column, and column list using shared procedure + VALIDATE_TABLE_AND_COLUMNS(vSchemaName, vTableName, vKeyColumnName, pColumnList, vParameters); + + -- Build query with TO_CHAR for date columns (per-column format support) + vProcessedColumnList := buildQueryWithDateFormats(pColumnList, vTableName, vSchemaName, vKeyColumnName, pTemplateTableName); + + ENV_MANAGER.LOG_PROCESS_EVENT('Input column list: ' || NVL(pColumnList, 'NULL (using dynamic column list)'), 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Processed column list with TO_CHAR for date columns: ' || vProcessedColumnList, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Template table: ' || NVL(pTemplateTableName, 'NULL - using global default for all dates'), 'INFO', vParameters); + + vTableName := DBMS_ASSERT.SCHEMA_NAME(vSchemaName) || '.' || DBMS_ASSERT.simple_sql_name(vTableName); + + -- Validate parallel degree parameter + IF pParallelDegree < 1 OR pParallelDegree > 16 THEN + vgMsgTmp := ENV_MANAGER.MSG_INVALID_PARALLEL_DEGREE || ': ' || pParallelDegree || '. Valid range: 1-16'; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_INVALID_PARALLEL_DEGREE, vgMsgTmp); + END IF; + + -- Get partitions using shared function + vPartitions := GET_PARTITIONS(vSchemaName, vTableName, vKeyColumnName, pMinDate, pMaxDate, vParameters); + + ENV_MANAGER.LOG_PROCESS_EVENT('Found ' || vPartitions.COUNT || ' year/month combinations to export', 'INFO', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Date range: ' || TO_CHAR(pMinDate, 'YYYY-MM-DD HH24:MI:SS') || ' to ' || TO_CHAR(pMaxDate, 'YYYY-MM-DD HH24:MI:SS'), 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Parallel degree: ' || pParallelDegree, 'INFO', vParameters); + + -- Sequential processing (parallel degree = 1) + IF pParallelDegree = 1 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('Using sequential processing (pParallelDegree = 1)', 'DEBUG', vParameters); + + FOR i IN 1 .. vPartitions.COUNT LOOP + EXPORT_SINGLE_PARTITION( + pSchemaName => vSchemaName, + pTableName => vTableName, + pKeyColumnName => vKeyColumnName, + pYear => vPartitions(i).year, + pMonth => vPartitions(i).month, + pBucketUri => vBucketUri, + pFolderName => pFolderName, + pProcessedColumns => vProcessedColumnList, + pMinDate => pMinDate, + pMaxDate => pMaxDate, + pCredentialName => pCredentialName, + pFormat => 'CSV', + pFileBaseName => vFileBaseName, + pMaxFileSize => pMaxFileSize, + pParameters => vParameters + ); + END LOOP; + + -- Parallel processing (parallel degree > 1) + ELSE + -- Skip parallel processing if no partitions found + IF vPartitions.COUNT = 0 THEN + ENV_MANAGER.LOG_PROCESS_EVENT('No partitions to export - skipping parallel CSV processing', 'INFO', vParameters); + ELSE + DECLARE + vTaskName VARCHAR2(128) := 'DATA_CSV_EXPORT_TASK_' || TO_CHAR(SYSTIMESTAMP, 'YYYYMMDDHH24MISSFF'); + vChunkId NUMBER; + BEGIN + ENV_MANAGER.LOG_PROCESS_EVENT('Using parallel processing with ' || pParallelDegree || ' threads', 'INFO', vParameters); + + -- Clean up old completed chunks (>24 hours) to prevent table bloat + -- CRITICAL: Do NOT delete chunks from other active sessions (same-day tasks) + -- This prevents race conditions when multiple CSV exports run simultaneously + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS + WHERE STATUS = 'COMPLETED' + AND CREATED_DATE < SYSTIMESTAMP - INTERVAL '1' DAY; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Cleared old COMPLETED chunks (>24h). Active session chunks preserved.', 'DEBUG', vParameters); + + -- Populate chunks table (insert new chunks, preserve FAILED chunks for retry) + FOR i IN 1 .. vPartitions.COUNT LOOP + MERGE INTO CT_MRDS.A_PARALLEL_EXPORT_CHUNKS t + USING (SELECT i AS chunk_id, vPartitions(i).year AS yr, vPartitions(i).month AS mn FROM DUAL) s + ON (t.CHUNK_ID = s.chunk_id) + WHEN NOT MATCHED THEN + INSERT (CHUNK_ID, TASK_NAME, YEAR_VALUE, MONTH_VALUE, SCHEMA_NAME, TABLE_NAME, KEY_COLUMN_NAME, + BUCKET_URI, FOLDER_NAME, PROCESSED_COLUMNS, MIN_DATE, MAX_DATE, + CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE, STATUS) + VALUES (i, vTaskName, vPartitions(i).year, vPartitions(i).month, vSchemaName, vTableName, vKeyColumnName, + vBucketUri, pFolderName, vProcessedColumnList, pMinDate, pMaxDate, + pCredentialName, 'CSV', vFileBaseName, pTemplateTableName, pMaxFileSize, 'PENDING') + WHEN MATCHED THEN + UPDATE SET TASK_NAME = vTaskName, + STATUS = CASE WHEN t.STATUS = 'FAILED' THEN 'PENDING' ELSE t.STATUS END, + ERROR_MESSAGE = CASE WHEN t.STATUS = 'FAILED' THEN NULL ELSE t.ERROR_MESSAGE END; + END LOOP; + COMMIT; + + -- Log chunk statistics + DECLARE + vPendingCount NUMBER; + vFailedCount NUMBER; + BEGIN + SELECT COUNT(*) INTO vPendingCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'PENDING'; + SELECT COUNT(*) INTO vFailedCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'FAILED'; + + ENV_MANAGER.LOG_PROCESS_EVENT('Chunk statistics: PENDING=' || vPendingCount || ', FAILED (retry)=' || vFailedCount, 'INFO', vParameters); + END; + + -- Create parallel task + DBMS_PARALLEL_EXECUTE.CREATE_TASK(task_name => vTaskName); + + -- Define chunks by number range (1 to partition count) + DBMS_PARALLEL_EXECUTE.CREATE_CHUNKS_BY_NUMBER_COL( + task_name => vTaskName, + table_owner => 'CT_MRDS', + table_name => 'A_PARALLEL_EXPORT_CHUNKS', + table_column => 'CHUNK_ID', + chunk_size => 1 -- Each partition is one chunk + ); + + -- Execute task in parallel + ENV_MANAGER.LOG_PROCESS_EVENT('Executing parallel CSV export task: ' || vTaskName, 'DEBUG', vParameters); + + DBMS_PARALLEL_EXECUTE.RUN_TASK( + task_name => vTaskName, + sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;', + language_flag => DBMS_SQL.NATIVE, + parallel_level => pParallelDegree + ); + + -- Check for errors + DECLARE + vErrorCount NUMBER; + BEGIN + SELECT COUNT(*) INTO vErrorCount + FROM USER_PARALLEL_EXECUTE_CHUNKS + WHERE task_name = vTaskName AND status = 'PROCESSED_WITH_ERROR'; + + IF vErrorCount > 0 THEN + vgMsgTmp := 'Parallel CSV export completed with ' || vErrorCount || ' errors. Check USER_PARALLEL_EXECUTE_CHUNKS for details.'; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + END IF; + END; + + -- Clean up task + DBMS_PARALLEL_EXECUTE.DROP_TASK(task_name => vTaskName); + + -- Clean up chunks for THIS specific task only (session-safe) + -- CRITICAL: Use TASK_NAME filter to avoid deleting chunks from other active CSV sessions + DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE TASK_NAME = vTaskName; + COMMIT; + + ENV_MANAGER.LOG_PROCESS_EVENT('Parallel CSV execution completed successfully', 'INFO', vParameters); + EXCEPTION + WHEN OTHERS THEN + -- Attempt to drop task on error + BEGIN + DBMS_PARALLEL_EXECUTE.DROP_TASK(task_name => vTaskName); + EXCEPTION + WHEN OTHERS THEN NULL; -- Ignore drop errors + END; + + vgMsgTmp := ENV_MANAGER.MSG_PARALLEL_EXECUTION_FAILED || ': ' || SQLERRM || cgBL || DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + END; + END IF; + END IF; + + -- Note: File registration handled by EXPORT_SINGLE_PARTITION when pRegisterExport=TRUE + -- Each partition calls pRegisterExport logic independently during serial/parallel execution + + -- Register exported files to A_SOURCE_FILE_RECEIVED if requested (after successful export) + IF pRegisterExport THEN + -- Lookup A_SOURCE_FILE_CONFIG_KEY based on pFolderName parsing + -- Format: {BUCKET_AREA}/{SOURCE_KEY}/{TABLE_ID} + -- Example: 'ODS/CSDB/CSDB_DEBT_DAILY' -> SOURCE_KEY='CSDB', TABLE_ID='CSDB_DEBT_DAILY' + + -- Parse pFolderName to extract SOURCE_KEY and TABLE_ID + vSlashPos1 := INSTR(pFolderName, '/', 1, 1); -- First '/' position + vSlashPos2 := INSTR(pFolderName, '/', 1, 2); -- Second '/' position + + IF vSlashPos1 > 0 AND vSlashPos2 > 0 THEN + -- Extract segment 2 (SOURCE_KEY) and segment 3 (TABLE_ID) + vSourceKey := SUBSTR(pFolderName, vSlashPos1 + 1, vSlashPos2 - vSlashPos1 - 1); + vTableId := SUBSTR(pFolderName, vSlashPos2 + 1); + + -- Find configuration based on SOURCE_KEY and TABLE_ID + BEGIN + SELECT A_SOURCE_FILE_CONFIG_KEY + INTO vConfigKey + FROM CT_MRDS.A_SOURCE_FILE_CONFIG + WHERE A_SOURCE_KEY = vSourceKey + AND TABLE_ID = vTableId + AND SOURCE_FILE_TYPE = 'INPUT' + AND ROWNUM = 1; + + ENV_MANAGER.LOG_PROCESS_EVENT('Found config key: ' || vConfigKey || ' for SOURCE=' || vSourceKey || ', TABLE=' || vTableId, 'DEBUG', vParameters); + EXCEPTION + WHEN NO_DATA_FOUND THEN + vConfigKey := -1; + ENV_MANAGER.LOG_PROCESS_EVENT('No config found for SOURCE=' || vSourceKey || ', TABLE=' || vTableId || ' - using default (-1)', 'INFO', vParameters); + END; + ELSE + -- Cannot parse folder name - use default + vConfigKey := -1; + ENV_MANAGER.LOG_PROCESS_EVENT('Cannot parse pFolderName: ' || pFolderName || ' - using default (-1)', 'WARNING', vParameters); + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('Registering ' || vPartitions.COUNT || ' exported files to A_SOURCE_FILE_RECEIVED with config key: ' || vConfigKey, 'INFO', vParameters); + + FOR i IN 1 .. vPartitions.COUNT LOOP + -- Construct filename and URI for this partition + vFileName := NVL(vFileBaseName, UPPER(REPLACE(vTableName, vSchemaName || '.', ''))) || '_' || vPartitions(i).year || vPartitions(i).month || '.csv'; + vFileUri := vBucketUri || CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || sanitizeFilename(vFileName); + + -- Get file metadata from OCI bucket (CHECKSUM, CREATED, BYTES) with retry logic + DECLARE + vChecksum VARCHAR2(128); + vCreated TIMESTAMP WITH TIME ZONE; + vBytes NUMBER; + vActualFileName VARCHAR2(1000); -- Actual filename with Oracle suffix + vSanitizedFileName VARCHAR2(1000); + vRetryCount NUMBER := 0; + vMaxRetries NUMBER := 1; -- One retry after initial attempt + vRetryDelay NUMBER := 2; -- 2 seconds delay + BEGIN + -- Sanitize filename first (PL/SQL function cannot be used directly in SQL) + vSanitizedFileName := sanitizeFilename(vFileName); + + -- Remove .csv extension for LIKE pattern matching (Oracle adds suffixes BEFORE .csv) + -- Example: LEGACY_DEBT_202508.csv becomes LEGACY_DEBT_202508_1_20260211T102621591769Z.csv + vSanitizedFileName := REGEXP_REPLACE(vSanitizedFileName, '\.csv$', '', 1, 0, 'i'); + + -- Try to get file metadata with retry logic + <> + LOOP + BEGIN + SELECT object_name, checksum, created, bytes + INTO vActualFileName, vChecksum, vCreated, vBytes + FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( + credential_name => pCredentialName, + location_uri => vBucketUri + )) + WHERE object_name LIKE CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || vSanitizedFileName || '%' + ORDER BY created DESC, bytes DESC + FETCH FIRST 1 ROW ONLY; + + -- Extract filename only from full path (remove bucket folder prefix) + -- vActualFileName contains: 'ODS/CSDB/CSDB_DEBT/LEGACY_DEBT_202508_1_20260211T111341375171Z.csv' + -- Extract only: 'LEGACY_DEBT_202508_1_20260211T111341375171Z.csv' + vActualFileName := SUBSTR(vActualFileName, INSTR(vActualFileName, '/', -1) + 1); + + -- Success - exit retry loop + EXIT metadata_retry_loop; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + vRetryCount := vRetryCount + 1; + + IF vRetryCount <= vMaxRetries THEN + -- Log retry attempt + ENV_MANAGER.LOG_PROCESS_EVENT('File not found in bucket (attempt ' || vRetryCount || '/' || (vMaxRetries + 1) || '), retrying after ' || vRetryDelay || ' seconds: ' || vFileName, 'DEBUG', vParameters); + + -- Wait before retry using DBMS_SESSION.SLEEP (alternative to DBMS_LOCK) + DBMS_SESSION.SLEEP(vRetryDelay); + ELSE + -- Max retries exceeded - re-raise exception + RAISE; + END IF; + END; + END LOOP metadata_retry_loop; + + -- Create A_SOURCE_FILE_RECEIVED record for this export with metadata + vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; + INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( + A_SOURCE_FILE_RECEIVED_KEY, + A_SOURCE_FILE_CONFIG_KEY, + SOURCE_FILE_NAME, + CHECKSUM, + CREATED, + BYTES, + RECEPTION_DATE, + PROCESSING_STATUS, + PARTITION_YEAR, + PARTITION_MONTH, + ARCH_PATH, + PROCESS_NAME + ) VALUES ( + vSourceFileReceivedKey, + vConfigKey, -- Config key from A_SOURCE_FILE_CONFIG lookup + vActualFileName, -- Use actual filename with Oracle suffix + vChecksum, + vCreated, + vBytes, + SYSDATE, + 'INGESTED', + NULL, -- PARTITION_YEAR not used for CSV exports + NULL, -- PARTITION_MONTH not used for CSV exports + NULL, -- ARCH_PATH not used for CSV exports + pProcessName -- Process name from parameter + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('Registered file: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vActualFileName || ', Size=' || vBytes || ' bytes', 'DEBUG', vParameters); + EXCEPTION + WHEN NO_DATA_FOUND THEN + -- File not found after retries - log warning and continue without metadata + ENV_MANAGER.LOG_PROCESS_EVENT('WARNING: File not found in bucket after ' || (vMaxRetries + 1) || ' attempts: ' || vFileName, 'WARNING', vParameters); + + -- Sanitize filename for fallback INSERT (function cannot be used in SQL) + vSanitizedFileName := sanitizeFilename(vFileName); + + -- Insert without metadata + vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; + INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( + A_SOURCE_FILE_RECEIVED_KEY, + A_SOURCE_FILE_CONFIG_KEY, + SOURCE_FILE_NAME, + RECEPTION_DATE, + PROCESSING_STATUS, + PARTITION_YEAR, + PARTITION_MONTH, + ARCH_PATH, + PROCESS_NAME + ) VALUES ( + vSourceFileReceivedKey, + vConfigKey, -- Config key from A_SOURCE_FILE_CONFIG lookup + vSanitizedFileName, -- Fallback: use theoretical filename if actual not found + SYSDATE, + 'INGESTED', + NULL, -- PARTITION_YEAR not used for CSV exports + NULL, -- PARTITION_MONTH not used for CSV exports + NULL, -- ARCH_PATH not used for CSV exports + pProcessName -- Process name from parameter + ); + END; + END LOOP; + + COMMIT; + ENV_MANAGER.LOG_PROCESS_EVENT('Successfully registered all ' || vPartitions.COUNT || ' files', 'INFO', vParameters); + END IF; + + ENV_MANAGER.LOG_PROCESS_EVENT('Export completed successfully for ' || vPartitions.COUNT || ' files', 'INFO', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); + + EXCEPTION + WHEN ENV_MANAGER.ERR_TABLE_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_TABLE_NOT_EXISTS ||': '||vTableName; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_COLUMN_NOT_EXISTS THEN + vgMsgTmp := ENV_MANAGER.MSG_COLUMN_NOT_EXISTS || ' (TableName.ColumnName): ' || vTableName||'.'||vKeyColumnName||CASE WHEN vCurrentCol IS NOT NULL THEN '.'||vCurrentCol||' in pColumnList' ELSE '' END; + ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, vgMsgTmp); + WHEN ENV_MANAGER.ERR_INVALID_PARALLEL_DEGREE THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_INVALID_PARALLEL_DEGREE, vgMsgTmp); + WHEN ENV_MANAGER.ERR_PARALLEL_EXECUTION_FAILED THEN + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_PARALLEL_EXECUTION_FAILED, vgMsgTmp); + WHEN OTHERS THEN + -- Log complete error details including full stack trace and backtrace + ENV_MANAGER.LOG_PROCESS_ERROR('Export failed: ' || SQLERRM, vParameters, 'DATA_EXPORTER'); + ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters); + RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNKNOWN, ENV_MANAGER.GET_ERROR_STACK(pFormat => 'OUTPUT', pCode=> SQLCODE)); + + END EXPORT_TABLE_DATA_TO_CSV_BY_DATE; + + ---------------------------------------------------------------------------------------------------- + -- VERSION MANAGEMENT FUNCTIONS + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_VERSION RETURN VARCHAR2 IS + BEGIN + RETURN PACKAGE_VERSION; + END GET_VERSION; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_BUILD_INFO RETURN VARCHAR2 IS + BEGIN + RETURN ENV_MANAGER.GET_PACKAGE_VERSION_INFO( + pPackageName => 'DATA_EXPORTER', + pVersion => PACKAGE_VERSION, + pBuildDate => PACKAGE_BUILD_DATE, + pAuthor => PACKAGE_AUTHOR + ); + END GET_BUILD_INFO; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2 IS + BEGIN + RETURN ENV_MANAGER.FORMAT_VERSION_HISTORY( + pPackageName => 'DATA_EXPORTER', + pVersionHistory => VERSION_HISTORY + ); + END GET_VERSION_HISTORY; + + ---------------------------------------------------------------------------------------------------- + +END; + +/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/DATA_EXPORTER.pkg b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/DATA_EXPORTER.pkg new file mode 100644 index 0000000..6b6a4b8 --- /dev/null +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/DATA_EXPORTER.pkg @@ -0,0 +1,239 @@ +create or replace PACKAGE CT_MRDS.DATA_EXPORTER +AUTHID CURRENT_USER +AS + /** + * Data Export Package: Provides comprehensive data export capabilities to various formats (CSV, Parquet) + * with support for cloud storage integration via Oracle Cloud Infrastructure (OCI). + * The structure of comment is used by GET_PACKAGE_DOCUMENTATION function + * which returns documentation text for confluence page (to Copy-Paste it). + **/ + + -- Package Version Information + PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.9.0'; + PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2026-02-13 14:00:00'; + PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski'; + + -- Version History (last 3-5 changes) + VERSION_HISTORY CONSTANT VARCHAR2(4000) := + 'v2.9.0 (2026-02-13): Added pProcessName parameter to EXPORT_TABLE_DATA and EXPORT_TABLE_DATA_TO_CSV_BY_DATE procedures for process tracking in A_SOURCE_FILE_RECEIVED table.' || CHR(10) || + 'v2.8.1 (2026-02-12): FIX query in EXPORT_TABLE_DATA - removed A_LOAD_HISTORY join to ensure single file output (simple SELECT).' || CHR(10) || + 'v2.8.0 (2026-02-12): MAJOR REFACTOR - EXPORT_TABLE_DATA now exports to single CSV file instead of partitioning by key values. Added pFileName parameter.' || CHR(10) || + 'v2.7.5 (2026-02-11): Added pRegisterExport parameter to EXPORT_TABLE_DATA procedure. When TRUE, registers each exported CSV file in A_SOURCE_FILE_RECEIVED.' || CHR(10) || + 'v2.7.4 (2026-02-11): ACTUAL FILENAME STORAGE - Store real filename with Oracle suffix in SOURCE_FILE_NAME instead of theoretical filename.' || CHR(10); + + cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10); + vgMsgTmp VARCHAR2(32000); + + --------------------------------------------------------------------------------------------------------------------------- + -- TYPE DEFINITIONS FOR PARTITION HANDLING + --------------------------------------------------------------------------------------------------------------------------- + + /** + * Record type for year/month partition information + **/ + TYPE partition_rec IS RECORD ( + year VARCHAR2(4), + month VARCHAR2(2) + ); + + /** + * Table type for collection of partition records + **/ + TYPE partition_tab IS TABLE OF partition_rec; + + --------------------------------------------------------------------------------------------------------------------------- + -- INTERNAL PARALLEL PROCESSING CALLBACK + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name EXPORT_PARTITION_PARALLEL + * @desc Internal callback procedure for DBMS_PARALLEL_EXECUTE. + * Processes single partition (year/month) chunk in parallel task. + * Called by DBMS_PARALLEL_EXECUTE framework for each chunk. + * This procedure is PUBLIC because DBMS_PARALLEL_EXECUTE requires it, + * but should NOT be called directly by external code. + * @param pStartId - Chunk start ID (CHUNK_ID from A_PARALLEL_EXPORT_CHUNKS table) + * @param pEndId - Chunk end ID (same as pStartId for single-row chunks) + **/ + PROCEDURE EXPORT_PARTITION_PARALLEL ( + pStartId IN NUMBER, + pEndId IN NUMBER + ); + + --------------------------------------------------------------------------------------------------------------------------- + -- MAIN EXPORT PROCEDURES + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name EXPORT_TABLE_DATA + * @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA. + * Exports data into single CSV file on OCI infrastructure. + * pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE' + * Supports template table for column order and per-column date formatting. + * When pRegisterExport=TRUE, successfully exported file is registered in: + * - CT_MRDS.A_SOURCE_FILE_RECEIVED (tracks file location, size, checksum, and metadata) + * @param pFileName - Optional filename (e.g., 'export.csv'). NULL = auto-generate from table name + * @param pTemplateTableName - Optional template table (SCHEMA.TABLE or TABLE) for: + * - Column order control (template defines CSV structure) + * - Per-column date formatting via FILE_MANAGER.GET_DATE_FORMAT + * - NULL = use source table columns in natural order + * @param pMaxFileSize - Maximum file size in bytes (default 104857600 = 100MB, min 10MB, max 1GB) + * @param pRegisterExport - When TRUE, registers exported CSV file in A_SOURCE_FILE_RECEIVED table + * @param pProcessName - Process name stored in PROCESS_NAME column (default 'DATA_EXPORTER') + * @example + * begin + * DATA_EXPORTER.EXPORT_TABLE_DATA( + * pSchemaName => 'CT_MRDS', + * pTableName => 'MY_TABLE', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'DATA', + * pFolderName => 'csv_exports', + * pFileName => 'my_export.csv', -- Optional + * pTemplateTableName => 'CT_ET_TEMPLATES.MY_TEMPLATE', -- Optional + * pMaxFileSize => 104857600, -- Optional, default 100MB + * pRegisterExport => TRUE -- Optional, default FALSE + * ); + * end; + **/ + PROCEDURE EXPORT_TABLE_DATA ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pFileName IN VARCHAR2 default NULL, + pTemplateTableName IN VARCHAR2 default NULL, + pMaxFileSize IN NUMBER default 104857600, + pRegisterExport IN BOOLEAN default FALSE, + pProcessName IN VARCHAR2 default 'DATA_EXPORTER', + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ); + + + + /** + * @name EXPORT_TABLE_DATA_BY_DATE + * @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA. + * Exports data into PARQUET files on OCI infrustructure. + * Each YEAR_MONTH pair goes to seperate file (implicit partitioning). + * Allows specifying custom column list or uses T.* if pColumnList is NULL. + * Validates that all columns in pColumnList exist in the target table. + * Automatically adds 'T.' prefix to column names in pColumnList. + * Supports parallel partition processing via pParallelDegree parameter (default 1, range 1-16). + * pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE' + * @example + * begin + * DATA_EXPORTER.EXPORT_TABLE_DATA_BY_DATE( + * pSchemaName => 'CT_MRDS', + * pTableName => 'MY_TABLE', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'DATA', + * pFolderName => 'parquet_exports', + * pColumnList => 'COLUMN1, COLUMN2, COLUMN3', -- Optional + * pMinDate => DATE '2024-01-01', + * pMaxDate => SYSDATE, + * pParallelDegree => 8 -- Optional, default 1, range 1-16 + * ); + * end; + **/ + PROCEDURE EXPORT_TABLE_DATA_BY_DATE ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pColumnList IN VARCHAR2 default NULL, + pMinDate IN DATE default DATE '1900-01-01', + pMaxDate IN DATE default SYSDATE, + pParallelDegree IN NUMBER default 1, + pTemplateTableName IN VARCHAR2 default NULL, + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ); + + + + /** + * @name EXPORT_TABLE_DATA_TO_CSV_BY_DATE + * @desc Exports data to separate CSV files partitioned by year and month. + * Creates one CSV file for each year/month combination found in the data. + * Uses the same date filtering mechanism with CT_ODS.A_LOAD_HISTORY as EXPORT_TABLE_DATA_BY_DATE, + * but exports to CSV format instead of Parquet. + * Supports parallel partition processing via pParallelDegree parameter (1-16). + * File naming pattern: {pFileName}_YYYYMM.csv or {TABLENAME}_YYYYMM.csv (if pFileName is NULL) + * When pRegisterExport=TRUE, successfully exported files are registered in: + * - CT_MRDS.A_SOURCE_FILE_RECEIVED (tracks file location, size, checksum, and metadata) + * @param pProcessName - Process name stored in PROCESS_NAME column (default 'DATA_EXPORTER') + * @example + * begin + * -- With custom filename + * DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE( + * pSchemaName => 'CT_MRDS', + * pTableName => 'MY_TABLE', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'DATA', + * pFolderName => 'exports', + * pFileName => 'my_export.csv', + * pMinDate => DATE '2024-01-01', + * pMaxDate => SYSDATE, + * pParallelDegree => 8, -- Optional, default 1, range 1-16 + * pRegisterExport => TRUE -- Optional, default FALSE, registers to A_SOURCE_FILE_RECEIVED + * ); + * + * -- With auto-generated filename (based on table name only) + * DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE( + * pSchemaName => 'OU_TOP', + * pTableName => 'AGGREGATED_ALLOTMENT', + * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', + * pBucketArea => 'ARCHIVE', + * pFolderName => 'exports', + * pMinDate => DATE '2025-09-01', + * pMaxDate => DATE '2025-09-17', + * pRegisterExport => TRUE -- Registers each export to A_SOURCE_FILE_RECEIVED table + * ); + * -- This will create files like: AGGREGATED_ALLOTMENT_202509.csv, etc. + * pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE' + * end; + **/ + PROCEDURE EXPORT_TABLE_DATA_TO_CSV_BY_DATE ( + pSchemaName IN VARCHAR2, + pTableName IN VARCHAR2, + pKeyColumnName IN VARCHAR2, + pBucketArea IN VARCHAR2, + pFolderName IN VARCHAR2, + pFileName IN VARCHAR2 DEFAULT NULL, + pColumnList IN VARCHAR2 default NULL, + pMinDate IN DATE default DATE '1900-01-01', + pMaxDate IN DATE default SYSDATE, + pParallelDegree IN NUMBER default 1, + pTemplateTableName IN VARCHAR2 default NULL, + pMaxFileSize IN NUMBER default 104857600, + pRegisterExport IN BOOLEAN default FALSE, + pProcessName IN VARCHAR2 default 'DATA_EXPORTER', + pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName + ); + + --------------------------------------------------------------------------------------------------------------------------- + -- VERSION MANAGEMENT FUNCTIONS + --------------------------------------------------------------------------------------------------------------------------- + + /** + * Returns the current package version number + * return: Version string in format X.Y.Z (e.g., '2.1.0') + **/ + FUNCTION GET_VERSION RETURN VARCHAR2; + + /** + * Returns comprehensive build information including version, date, and author + * return: Formatted string with complete build details + **/ + FUNCTION GET_BUILD_INFO RETURN VARCHAR2; + + /** + * Returns the version history with recent changes + * return: Multi-line string with version history + **/ + FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2; + +END; + +/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/ENV_MANAGER.pkb b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/ENV_MANAGER.pkb new file mode 100644 index 0000000..856d449 --- /dev/null +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/ENV_MANAGER.pkb @@ -0,0 +1,1171 @@ +create or replace PACKAGE BODY CT_MRDS.ENV_MANAGER +AS + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE INIT_ERRORS IS + BEGIN + Errors(CODE_EMPTY_FILEURI_AND_RECKEY) := Error_Record(CODE_EMPTY_FILEURI_AND_RECKEY, MSG_EMPTY_FILEURI_AND_RECKEY); -- -20001 + Errors(CODE_NO_CONFIG_MATCH_FOR_FILEURI) := Error_Record(CODE_NO_CONFIG_MATCH_FOR_FILEURI, MSG_NO_CONFIG_MATCH_FOR_FILEURI); -- -20002 + Errors(CODE_MULTIPLE_MATCH_FOR_SRCFILE) := Error_Record(CODE_MULTIPLE_MATCH_FOR_SRCFILE, MSG_MULTIPLE_MATCH_FOR_SRCFILE); -- -20003 + Errors(CODE_MISSING_COLUMN_DATE_FORMAT) := Error_Record(CODE_MISSING_COLUMN_DATE_FORMAT, MSG_MISSING_COLUMN_DATE_FORMAT); -- -20004 + Errors(CODE_MULTIPLE_COLUMN_DATE_FORMAT) := Error_Record(CODE_MULTIPLE_COLUMN_DATE_FORMAT, MSG_MULTIPLE_COLUMN_DATE_FORMAT); -- -20005 + Errors(CODE_DIDNT_GET_LOAD_OPERATION_ID) := Error_Record(CODE_DIDNT_GET_LOAD_OPERATION_ID, MSG_DIDNT_GET_LOAD_OPERATION_ID); -- -20006 + Errors(CODE_NO_CONFIG_FOR_RECEIVED_FILE) := Error_Record(CODE_NO_CONFIG_FOR_RECEIVED_FILE, MSG_NO_CONFIG_FOR_RECEIVED_FILE); -- -20007 + Errors(CODE_MULTI_CONFIG_FOR_RECEIVED_FILE) := Error_Record(CODE_MULTI_CONFIG_FOR_RECEIVED_FILE, MSG_MULTI_CONFIG_FOR_RECEIVED_FILE); -- -20008 + Errors(CODE_FILE_NOT_FOUND_ON_CLOUD) := Error_Record(CODE_FILE_NOT_FOUND_ON_CLOUD, MSG_FILE_NOT_FOUND_ON_CLOUD); -- -20009 + Errors(CODE_FILE_VALIDATION_FAILED) := Error_Record(CODE_FILE_VALIDATION_FAILED, MSG_FILE_VALIDATION_FAILED); -- -20010 + Errors(CODE_EXCESS_COLUMNS_DETECTED) := Error_Record(CODE_EXCESS_COLUMNS_DETECTED, MSG_EXCESS_COLUMNS_DETECTED); -- -20011 + Errors(CODE_NO_CONFIG_MATCH) := Error_Record(CODE_NO_CONFIG_MATCH, MSG_NO_CONFIG_MATCH); -- -20012 + Errors(CODE_UNKNOWN_PREFIX) := Error_Record(CODE_UNKNOWN_PREFIX, MSG_UNKNOWN_PREFIX); -- -20013 + Errors(CODE_TABLE_NOT_EXISTS) := Error_Record(CODE_TABLE_NOT_EXISTS, MSG_TABLE_NOT_EXISTS); -- -20014 + Errors(CODE_COLUMN_NOT_EXISTS) := Error_Record(CODE_COLUMN_NOT_EXISTS, MSG_COLUMN_NOT_EXISTS); -- -20015 + Errors(CODE_UNSUPPORTED_DATA_TYPE) := Error_Record(CODE_UNSUPPORTED_DATA_TYPE, MSG_UNSUPPORTED_DATA_TYPE); -- -20016 + Errors(CODE_MISSING_SOURCE_KEY) := Error_Record(CODE_MISSING_SOURCE_KEY, MSG_MISSING_SOURCE_KEY); -- -20017 + Errors(CODE_NULL_SOURCE_FILE_CONFIG_KEY) := Error_Record(CODE_NULL_SOURCE_FILE_CONFIG_KEY, MSG_NULL_SOURCE_FILE_CONFIG_KEY); -- -20018 + Errors(CODE_DUPLICATED_SOURCE_KEY) := Error_Record(CODE_DUPLICATED_SOURCE_KEY, MSG_DUPLICATED_SOURCE_KEY); -- -20019 + Errors(CODE_MISSING_CONTAINER_CONFIG) := Error_Record(CODE_MISSING_CONTAINER_CONFIG, MSG_MISSING_CONTAINER_CONFIG); -- -20020 + Errors(CODE_MULTIPLE_CONTAINER_ENTRIES) := Error_Record(CODE_MULTIPLE_CONTAINER_ENTRIES, MSG_MULTIPLE_CONTAINER_ENTRIES); -- -20021 + Errors(CODE_WRONG_DESTINATION_PARAM) := Error_Record(CODE_WRONG_DESTINATION_PARAM, MSG_WRONG_DESTINATION_PARAM); -- -20022 + Errors(CODE_FILE_NOT_EXISTS_ON_CLOUD) := Error_Record(CODE_FILE_NOT_EXISTS_ON_CLOUD, MSG_FILE_NOT_EXISTS_ON_CLOUD); -- -20023 + Errors(CODE_FILE_ALREADY_REGISTERED) := Error_Record(CODE_FILE_ALREADY_REGISTERED, MSG_FILE_ALREADY_REGISTERED); -- -20024 + Errors(CODE_WRONG_DATE_TIMESTAMP_FORMAT) := Error_Record(CODE_WRONG_DATE_TIMESTAMP_FORMAT, MSG_WRONG_DATE_TIMESTAMP_FORMAT); -- -20025 + Errors(CODE_ENVIRONMENT_NOT_SET) := Error_Record(CODE_ENVIRONMENT_NOT_SET, MSG_ENVIRONMENT_NOT_SET); -- -20026 + Errors(CODE_CONFIG_VARIABLE_NOT_SET) := Error_Record(CODE_CONFIG_VARIABLE_NOT_SET, MSG_CONFIG_VARIABLE_NOT_SET); -- -20027 + Errors(CODE_NOT_INPUT_SOURCE_FILE_TYPE) := Error_Record(CODE_NOT_INPUT_SOURCE_FILE_TYPE, MSG_NOT_INPUT_SOURCE_FILE_TYPE); -- -20028 + Errors(CODE_EXP_DATA_FOR_ARCH_FAILED) := Error_Record(CODE_EXP_DATA_FOR_ARCH_FAILED, MSG_EXP_DATA_FOR_ARCH_FAILED); -- -20029 + Errors(CODE_RESTORE_FILE_FROM_TRASH) := Error_Record(CODE_RESTORE_FILE_FROM_TRASH, MSG_RESTORE_FILE_FROM_TRASH); -- -20030 + Errors(CODE_CHANGE_STAT_TO_ARCHIVED_FAILED):= Error_Record(CODE_CHANGE_STAT_TO_ARCHIVED_FAILED, MSG_CHANGE_STAT_TO_ARCHIVED_FAILED); -- -20031 + Errors(CODE_MOVE_FILE_TO_TRASH_FAILED) := Error_Record(CODE_MOVE_FILE_TO_TRASH_FAILED, MSG_MOVE_FILE_TO_TRASH_FAILED); -- -20032 + Errors(CODE_DROP_EXPORTED_FILES_FAILED) := Error_Record(CODE_DROP_EXPORTED_FILES_FAILED, MSG_DROP_EXPORTED_FILES_FAILED); -- -20033 + Errors(CODE_INVALID_BUCKET_AREA) := Error_Record(CODE_INVALID_BUCKET_AREA, MSG_INVALID_BUCKET_AREA); -- -20034 + Errors(CODE_INVALID_PARALLEL_DEGREE) := Error_Record(CODE_INVALID_PARALLEL_DEGREE, MSG_INVALID_PARALLEL_DEGREE); -- -20110 + Errors(CODE_PARALLEL_EXECUTION_FAILED) := Error_Record(CODE_PARALLEL_EXECUTION_FAILED, MSG_PARALLEL_EXECUTION_FAILED); -- -20111 + + Errors(CODE_UNKNOWN) := Error_Record(CODE_UNKNOWN, MSG_UNKNOWN); -- -20999 + + END INIT_ERRORS; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_DEFAULT_ENV + RETURN VARCHAR2 + IS + vDefaultEnv CT_MRDS.a_file_manager_config.config_variable_value%TYPE; + BEGIN + select config_variable_value + into vDefaultEnv + from CT_MRDS.a_file_manager_config + where lower(environment_id)='default' + and lower(config_variable)='environmentid'; + RETURN vDefaultEnv; + EXCEPTION + WHEN NO_DATA_FOUND THEN + RETURN NULL; + END; + + ---------------------------------------------------------------------------------------------------- + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE INIT_VARIABLES( + pEnv VARCHAR2 + ) IS + BEGIN + for rec in ( + select + ENVIRONMENT_ID + ,REGION + ,NAMESPACE + ,INBOXBUCKETNAME + ,DATABUCKETNAME + ,ARCHIVEBUCKETNAME + ,CREDENTIALNAME + ,LOGGINGENABLED + ,MINLOGLEVEL + ,DEFAULTDATEFORMAT + ,CONSOLELOGGINGENABLED + from ( + select environment_id, config_variable, config_variable_value from CT_MRDS.A_FILE_MANAGER_CONFIG + where environment_id=pEnv + ) + pivot ( + min(config_variable_value) + for config_variable in ( + 'Region' as Region + ,'NameSpace' as NameSpace + ,'InboxBucketName' as InboxBucketName + ,'DataBucketName' as DataBucketName + ,'ArchiveBucketName' as ArchiveBucketName + ,'CredentialName' as CredentialName + ,'LoggingEnabled' as LoggingEnabled + ,'MinLogLevel' as MinLogLevel + ,'DefaultDateFormat' as DefaultDateFormat + ,'ConsoleLoggingEnabled' as ConsoleLoggingEnabled) + ) + ) loop + if (rec.NAMESPACE is NULL + or rec.REGION is NULL + or rec.NAMESPACE is NULL + or rec.INBOXBUCKETNAME is NULL + or rec.DATABUCKETNAME is NULL + or rec.ARCHIVEBUCKETNAME is NULL + or rec.CREDENTIALNAME is NULL + ) THEN + vgMsgTmp := MSG_CONFIG_VARIABLE_NOT_SET + ||cgBL||' '||'Details about existing Configuration Variables where environment_id='||pEnv||': ' + ||cgBL||' '||'-------------------------' + ||cgBL||' '||'Region = '||rec.Region + ||cgBL||' '||'NameSpace = '||rec.NameSpace + ||cgBL||' '||'InboxBucketName = '||rec.InboxBucketName + ||cgBL||' '||'DataBucketName = '||rec.DataBucketName + ||cgBL||' '||'ArchiveBucketName = '||rec.ArchiveBucketName + ||cgBL||' '||'CredentialName = '||rec.CredentialName + ; + LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR'); + RAISE_APPLICATION_ERROR(CODE_CONFIG_VARIABLE_NOT_SET, vgMsgTmp); + + elsif (rec.LOGGINGENABLED is NULL + or rec.MINLOGLEVEL is NULL + or rec.DEFAULTDATEFORMAT is NULL + ) THEN + vgMsgTmp := 'Missing configuration variables' + ||cgBL||' '||'Details about existing Configuration Variables where environment_id='||pEnv||': ' + ||cgBL||' '||'-------------------------' + ||cgBL||' '||'LoggingEnabled = '||rec.LoggingEnabled + ||cgBL||' '||'MinLogLevel = '||rec.MinLogLevel + ||cgBL||' '||'DefaultDateFormat = '||rec.DefaultDateFormat + ; + LOG_PROCESS_EVENT(vgMsgTmp, 'WARNING'); + + else + gvNameSpace := rec.NAMESPACE; + gvRegion := rec.REGION; + gvInboxBucketName := rec.INBOXBUCKETNAME; + gvDataBucketName := rec.DATABUCKETNAME; + gvArchiveBucketName := rec.ARCHIVEBUCKETNAME; + gvCredentialName := rec.CREDENTIALNAME; + gvInboxBucketUri := 'https://objectstorage.'||rec.REGION||'.oraclecloud.com/n/'||rec.NAMESPACE||'/b/'||rec.INBOXBUCKETNAME||'/o/'; + gvDataBucketUri := 'https://objectstorage.'||rec.REGION||'.oraclecloud.com/n/'||rec.NAMESPACE||'/b/'||rec.DATABUCKETNAME||'/o/'; + gvArchiveBucketUri := 'https://objectstorage.'||rec.REGION||'.oraclecloud.com/n/'||rec.NAMESPACE||'/b/'||rec.ARCHIVEBUCKETNAME||'/o/'; + gvLoggingEnabled := rec.LOGGINGENABLED; + gvMinLogLevel := rec.MINLOGLEVEL; + gvDefaultDateFormat := rec.DEFAULTDATEFORMAT; + gvConsoleLoggingEnabled := NVL(rec.CONSOLELOGGINGENABLED, 'ON'); + end if; + end loop; + EXCEPTION + WHEN NO_DATA_FOUND THEN + vgMsgTmp := MSG_CONFIG_VARIABLE_NOT_SET + ||cgBL||' '||'No configuration found for environment_id='||pEnv||' in A_FILE_MANAGER_CONFIG table'; + LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', 'pEnv='||pEnv); + RAISE_APPLICATION_ERROR(CODE_CONFIG_VARIABLE_NOT_SET, vgMsgTmp); + WHEN OTHERS THEN + vgMsgTmp := 'Unexpected error while initializing variables for environment: '||pEnv + ||cgBL||' '||'SQLERRM: '||SQLERRM; + LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', 'pEnv='||pEnv); + RAISE; + END INIT_VARIABLES; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_ERROR_MESSAGE( + pCode PLS_INTEGER + ) RETURN VARCHAR2 + IS + BEGIN + RETURN Errors(pCode).message; + EXCEPTION + WHEN NO_DATA_FOUND THEN + LOG_PROCESS_EVENT('No error message found for pCode='||pCode , 'WARNING', 'pCode='||pCode); + LOG_PROCESS_EVENT('Update ENV_MANAGER package header with new code.' , 'WARNING', 'pCode='||pCode); + RETURN NULL; + WHEN OTHERS THEN + LOG_PROCESS_EVENT(MSG_UNKNOWN , 'ERROR', 'pCode='||pCode); + RAISE_APPLICATION_ERROR(CODE_UNKNOWN, MSG_UNKNOWN); + END GET_ERROR_MESSAGE; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_ERROR_STACK( + pFormat VARCHAR2 + ,pCode PLS_INTEGER + ,pSourceFileReceivedKey CT_MRDS.A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY%TYPE DEFAULT NULL + ) RETURN VARCHAR2 + IS + vFullErrorCore VARCHAR2(32000); + vFullErrorMsg VARCHAR2(32000); + BEGIN +-- vgErrorMessage := SQLERRM|| cgBL; +-- vgErrorStack := DBMS_UTILITY.FORMAT_ERROR_STACK; +-- vgErrorBacktrace := DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + vFullErrorCore :='Error Message:' + ||cgBL|| SQLERRM|| cgBL + ||'-------------------------------------------------------' + ||cgBL||'Error Stack:' + ||cgBL|| DBMS_UTILITY.FORMAT_ERROR_STACK + ||'-------------------------------------------------------' + ||cgBL||'Error Backtrace:' + ||cgBL|| DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; +-- vFullErrorCore := REGEXP_REPLACE (vFullErrorCore, pCode||': ', pCode||': '||GET_ERROR_MESSAGE(pCode) , 1, 1); + IF (pFormat = 'TABLE') THEN + vFullErrorMsg := vFullErrorCore; + ELSE + vFullErrorMsg := cgBL||'------------------------------------------------------+' + ||cgBL||vFullErrorCore + ||'------------------------------------------------------+'; + END IF; +-- IF pSourceFileReceivedKey is not null THEN +-- vFullErrorMsg := vFullErrorMsg ||cgBL||GET_DET_SOURCE_FILE_RECEIVED_INFO(pSourceFileReceivedKey,1,1,1); +-- END IF; + + RETURN vFullErrorMsg; + EXCEPTION + WHEN OTHERS THEN + LOG_PROCESS_EVENT(MSG_UNKNOWN , 'ERROR', 'pFormat='||pFormat); + RETURN NULL; + END GET_ERROR_STACK; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION FORMAT_PARAMETERS( + pParameterList SYS.ODCIVARCHAR2LIST + ) RETURN VARCHAR2 IS + vResult VARCHAR2(10000); + BEGIN + FOR i IN 1 .. pParameterList.COUNT LOOP +-- dbms_output.put_line('pParameterList(i): '||pParameterList(i)); + if i < pParameterList.COUNT then vResult := vResult || replace(pParameterList(i), '''NULL''', 'NULL') ||' ,'|| cgBL; + else vResult := vResult || replace(pParameterList(i), '''NULL''', 'NULL'); + end if; + END LOOP; + RETURN vResult; + EXCEPTION + WHEN OTHERS THEN + LOG_PROCESS_EVENT('Error while formating parameters.' , 'WARNING'); + RETURN NULL; + END FORMAT_PARAMETERS; + + ---------------------------------------------------------------------------------------------------- + + + + PROCEDURE LOG_PROCESS_EVENT ( + pLogMessage VARCHAR2 + ,pLogLevel VARCHAR2 DEFAULT 'ERROR' + ,pParameters VARCHAR2 DEFAULT NULL + ,pProcessName VARCHAR2 DEFAULT 'FILE_MANAGER' + ) IS + PRAGMA AUTONOMOUS_TRANSACTION; + + vLoggingEnabled VARCHAR2(10); + vMinLogLevel VARCHAR2(10); + vCallStack VARCHAR2(10000); + vProcedureName VARCHAR2(100); + vProcedureLevel PLS_INTEGER; + vTotalLines PLS_INTEGER; + vCurrentLine PLS_INTEGER; + + -- Map of priority level + TYPE logLevelMap IS TABLE OF NUMBER INDEX BY VARCHAR2(10); + vLogLevels logLevelMap; + + BEGIN + -- Prority logging level (higher -> more important) + vLogLevels('DEBUG') := 1; + vLogLevels('INFO') := 2; + vLogLevels('WARNING') := 3; + vLogLevels('ERROR') := 4; + + -- Check id logging is TURN-OFF + IF gvLoggingEnabled = 'OFF' THEN + RETURN; + END IF; + -- Check logging level + IF vLogLevels(pLogLevel) < vLogLevels(gvMinLogLevel) THEN + RETURN; + END IF; + + vCallStack := DBMS_UTILITY.FORMAT_CALL_STACK; + vProcedureName := REGEXP_SUBSTR(vCallStack, 'package body\s+\w+\.(\w+\.\w+)', 1, 2, NULL, 1); + vTotalLines := REGEXP_COUNT(vCallStack, CHR(10)) + 1; + vCurrentLine := REGEXP_COUNT(SUBSTR(vCallStack, 1, INSTR(vCallStack, vProcedureName) - 1), CHR(10)) + 1; + vProcedureLevel := (vTotalLines - vCurrentLine + 1) - 3; + vProcedureName := LPAD(vProcedureName, LENGTH(vProcedureName) + 2*vProcedureLevel, ' '); + + INSERT INTO CT_MRDS.A_PROCESS_LOG (guid, username, osuser, machine, module, process_name, procedure_name, procedure_parameters, log_level, log_message) + VALUES (guid, gvUsername, gvOsuser, gvMachine, gvModule, pProcessName, vProcedureName, pParameters, pLogLevel, pLogMessage); + + COMMIT; + + -- Also output to console for immediate visibility (if enabled) + IF gvConsoleLoggingEnabled = 'ON' THEN + DBMS_OUTPUT.PUT_LINE('[' || TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') || '] [' || pLogLevel || '] ' || vProcedureName || ': ' || pLogMessage); + END IF; + + END LOG_PROCESS_EVENT; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE LOG_PROCESS_ERROR( + pLogMessage IN VARCHAR2, + pParameters IN VARCHAR2 DEFAULT NULL, + pProcessName IN VARCHAR2 DEFAULT 'FILE_MANAGER' + ) IS + PRAGMA AUTONOMOUS_TRANSACTION; + + vCallStack VARCHAR2(32767); + vErrorStack VARCHAR2(32767); + vErrorBacktrace VARCHAR2(32767); + vAdjustedBacktrace VARCHAR2(32767); + vErrorContext VARCHAR2(4000); + vProcName VARCHAR2(100); + vProcedureLevel PLS_INTEGER; + vTotalLines PLS_INTEGER; + vCurrentLine PLS_INTEGER; + vFullErrorMessage CLOB; + vTimestamp VARCHAR2(30); + vSessionInfo VARCHAR2(1000); + + BEGIN + -- Check if logging is disabled + IF gvLoggingEnabled = 'OFF' THEN + RETURN; + END IF; + + -- Capture all available error information + vErrorStack := DBMS_UTILITY.FORMAT_ERROR_STACK; + vErrorBacktrace := DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + vCallStack := DBMS_UTILITY.FORMAT_CALL_STACK; + vTimestamp := TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS'); + + -- Capture session information for better context + vSessionInfo := 'Session ID: ' || SYS_CONTEXT('USERENV', 'SID') || + ', User: ' || SYS_CONTEXT('USERENV', 'SESSION_USER') || + ', Module: ' || SYS_CONTEXT('USERENV', 'MODULE') || + ', Client Info: ' || NVL(SYS_CONTEXT('USERENV', 'CLIENT_INFO'), 'N/A') || + ', Action: ' || NVL(SYS_CONTEXT('USERENV', 'ACTION'), 'N/A'); + + -- Build error context information + vErrorContext := 'Environment: ' || gvEnv || + ', Process: ' || NVL(pProcessName, 'UNKNOWN') || + ', Timestamp: ' || vTimestamp || + ', SQLCODE: ' || SQLCODE || + ', Transaction Active: ' || CASE WHEN DBMS_TRANSACTION.STEP_ID IS NOT NULL THEN 'YES' ELSE 'NO' END; + + -- Extract procedure name and nesting level from call stack + -- Always extract actual procedure name from call stack for precise error location + vProcName := REGEXP_SUBSTR(vCallStack, 'package body\s+\w+\.(\w+\.\w+)', 1, 2, NULL, 1); + + -- If we couldn't extract procedure name from call stack, use provided process name + IF vProcName IS NULL THEN + vProcName := NVL(pProcessName, 'UNKNOWN'); + END IF; + + vTotalLines := REGEXP_COUNT(vCallStack, CHR(10)) + 1; + vCurrentLine := REGEXP_COUNT(SUBSTR(vCallStack, 1, INSTR(vCallStack, vProcName) - 1), CHR(10)) + 1; + vProcedureLevel := (vTotalLines - vCurrentLine + 1) - 3; + vProcName := LPAD(vProcName, LENGTH(vProcName) + 2*vProcedureLevel, ' '); + + -- Enhance line number display to show direct _BODY.sql file line numbers + -- Since packages are now split into separate _SPEC and _BODY files, line numbers map directly + vAdjustedBacktrace := REGEXP_REPLACE(vErrorBacktrace, + 'at "CT_MRDS\.FILE_MANAGER", line ([0-9]+)', + 'at "CT_MRDS.FILE_MANAGER", line \1 (-> FILE_MANAGER_BODY.sql:line \1)', 1, 0, 'i'); + + vAdjustedBacktrace := REGEXP_REPLACE(vAdjustedBacktrace, + 'at "CT_MRDS\.ENV_MANAGER", line ([0-9]+)', + 'at "CT_MRDS.ENV_MANAGER", line \1 (-> ENV_MANAGER_BODY.sql:line \1)', 1, 0, 'i'); + + -- Build comprehensive error message with professional formatting + vFullErrorMessage := 'ERROR REPORT' || cgBL || + '-------------------------------------------------------' || cgBL || + 'ERROR SUMMARY' || cgBL || + ' Message: ' || pLogMessage || cgBL || + ' Context: ' || vErrorContext || cgBL || + '-------------------------------------------------------' || cgBL || + 'SESSION INFORMATION' || cgBL || + ' ' || vSessionInfo || cgBL || + '-------------------------------------------------------' || cgBL || + 'ERROR STACK (Oracle Internal)' || cgBL || + vErrorStack || + '-------------------------------------------------------' || cgBL || + 'BACKTRACE INFORMATION (Oracle Internal)' || cgBL || + vErrorBacktrace || + '-------------------------------------------------------' || cgBL || + 'CALL STACK (Execution Path)' || cgBL || + vCallStack || + '-------------------------------------------------------' || cgBL || + 'QUICK REFERENCE' || cgBL || + ' SQLCODE: ' || SQLCODE || cgBL || + ' SQLERRM: ' || SQLERRM || cgBL || + ' Timestamp: ' || vTimestamp || cgBL || + ' Parameters: ' || NVL(pParameters, 'None provided') || cgBL || + '-------------------------------------------------------'; + + -- Insert comprehensive error record into log table + -- Note: LOG_MESSAGE is VARCHAR2(4000), so we'll truncate if needed but include key info + INSERT INTO CT_MRDS.A_PROCESS_LOG (guid, username, osuser, machine, module, process_name, procedure_name, procedure_parameters, log_level, log_message) + VALUES (guid, gvUsername, gvOsuser, gvMachine, gvModule, NVL(pProcessName, 'FILE_MANAGER'), vProcName, pParameters, 'ERROR', + CASE + WHEN LENGTH(vFullErrorMessage) <= 4000 THEN vFullErrorMessage + ELSE SUBSTR(vFullErrorMessage, 1, 3950) || '... [TRUNCATED]' + END); + + COMMIT; + + -- Enhanced console output for immediate visibility (if enabled) + IF gvConsoleLoggingEnabled = 'ON' THEN + DBMS_OUTPUT.PUT_LINE('======================================================='); + DBMS_OUTPUT.PUT_LINE('ERROR DETECTED AT: ' || vTimestamp); + DBMS_OUTPUT.PUT_LINE('PROCEDURE: ' || NVL(vProcName, 'UNKNOWN')); + DBMS_OUTPUT.PUT_LINE('MESSAGE: ' || pLogMessage); + DBMS_OUTPUT.PUT_LINE('SQLCODE: ' || SQLCODE || ' | ENVIRONMENT: ' || gvEnv); + -- Extract and show the most relevant file and line number + IF INSTR(vAdjustedBacktrace, '-> ') > 0 THEN + DBMS_OUTPUT.PUT_LINE('SOURCE FILE LOCATION: ' || REGEXP_SUBSTR(vAdjustedBacktrace, '-> [^)]+')); + END IF; + DBMS_OUTPUT.PUT_LINE('FULL DETAILS: Query A_PROCESS_LOG table for complete diagnostic info'); + DBMS_OUTPUT.PUT_LINE('QUERY (This Error): SELECT * FROM CT_MRDS.A_PROCESS_LOG WHERE GUID = ''' || guid || ''' ORDER BY LOG_TIMESTAMP DESC;'); + DBMS_OUTPUT.PUT_LINE('QUERY (Recent All): SELECT * FROM CT_MRDS.A_PROCESS_LOG WHERE LOG_TIMESTAMP >= SYSDATE - 1/1440 ORDER BY LOG_TIMESTAMP DESC;'); + DBMS_OUTPUT.PUT_LINE('======================================================='); + END IF; + + END LOG_PROCESS_ERROR; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION ANALYZE_VALIDATION_ERRORS( + pValidationLogTable VARCHAR2, + pTemplateSchema VARCHAR2, + pTemplateTable VARCHAR2, + pCsvFileUri VARCHAR2 + ) RETURN VARCHAR2 + IS + vAnalysisReport CLOB := ''; + vCsvHeader VARCHAR2(4000); + vExpectedOrder VARCHAR2(4000); + vCsvOrder VARCHAR2(4000); + vErrorDetails VARCHAR2(32000) := ''; + vSolutions VARCHAR2(4000); + vColumnMismatch VARCHAR2(1000); + vErrorCount NUMBER := 0; + vFirstDataError VARCHAR2(1000); + vErrorColumn VARCHAR2(100); + vErrorValue VARCHAR2(500); + vExpectedType VARCHAR2(100); + vTemplateColCount NUMBER := 0; + vCsvColCount NUMBER := 0; + vExcessColumns VARCHAR2(2000); + vCsvFirstLine VARCHAR2(4000); + + -- Cursor for template table columns + CURSOR c_template_columns IS + SELECT COLUMN_NAME, DATA_TYPE, COLUMN_ID + FROM ALL_TAB_COLUMNS + WHERE OWNER = UPPER(REGEXP_SUBSTR(pTemplateSchema || '.' || pTemplateTable, '^([^.]+)')) + AND TABLE_NAME = UPPER(REGEXP_SUBSTR(pTemplateSchema || '.' || pTemplateTable, '\.(.+)$', 1, 1, NULL, 1)) + ORDER BY COLUMN_ID; + + BEGIN + -- Build expected column order from template table and count columns + FOR rec IN c_template_columns LOOP + IF vExpectedOrder IS NOT NULL THEN + vExpectedOrder := vExpectedOrder || ', '; + END IF; + vExpectedOrder := vExpectedOrder || rec.COLUMN_NAME; + vTemplateColCount := vTemplateColCount + 1; + END LOOP; + + -- Parse validation log table for errors and CSV structure + BEGIN + -- Try to extract error information from the validation log table + EXECUTE IMMEDIATE 'SELECT COUNT(*) FROM ' || pValidationLogTable || + ' WHERE record LIKE ''error processing column%''' + INTO vErrorCount; + + -- Get first error details + IF vErrorCount > 0 THEN + EXECUTE IMMEDIATE 'SELECT record FROM ' || pValidationLogTable || + ' WHERE record LIKE ''error processing column%'' AND ROWNUM = 1' + INTO vFirstDataError; + + -- Parse error to extract column name and error type + vErrorColumn := REGEXP_SUBSTR(vFirstDataError, 'error processing column ([A-Z_]+)', 1, 1, NULL, 1); + + -- Try to get the actual error value from ORA-01722 message + BEGIN + EXECUTE IMMEDIATE 'SELECT record FROM ' || pValidationLogTable || + ' WHERE record LIKE ''ORA-01722%'' AND ROWNUM = 1' + INTO vFirstDataError; + vErrorValue := REGEXP_SUBSTR(vFirstDataError, 'string value containing ''([^'']+)''', 1, 1, NULL, 1); + EXCEPTION + WHEN NO_DATA_FOUND THEN + vErrorValue := 'unknown value'; + WHEN OTHERS THEN + vErrorValue := 'parsing error'; + END; + END IF; + + -- Try to extract CSV structure from validation log field definitions + BEGIN + EXECUTE IMMEDIATE ' + SELECT LISTAGG( + REGEXP_SUBSTR(record, ''^\s+([A-Z_]+)\s+'', 1, 1, NULL, 1), + '', '' + ) WITHIN GROUP (ORDER BY ROWNUM) + FROM ' || pValidationLogTable || ' + WHERE record LIKE '' %CHAR%'' + AND record NOT LIKE ''%Fields in Data Source%'' + AND REGEXP_SUBSTR(record, ''^\s+([A-Z_]+)\s+'') IS NOT NULL' + INTO vCsvOrder; + + -- Count CSV columns from parsed structure + IF vCsvOrder IS NOT NULL THEN + vCsvColCount := REGEXP_COUNT(vCsvOrder, ',') + 1; + END IF; + + EXCEPTION + WHEN OTHERS THEN + vCsvOrder := 'Unable to determine CSV column order from validation log'; + END; + + -- Alternative method: Try to read first line of CSV directly for column count + IF vCsvColCount = 0 THEN + BEGIN + -- This is a fallback - try to get CSV header from external source if possible + -- Note: This would require DBMS_CLOUD.GET_OBJECT or similar approach + -- For now, we'll rely on the validation log parsing + NULL; + EXCEPTION + WHEN OTHERS THEN + NULL; + END; + END IF; + + EXCEPTION + WHEN OTHERS THEN + vErrorDetails := 'Error analyzing validation log: ' || SQLERRM; + END; + + -- Detect column order mismatch and excess columns + IF vCsvOrder IS NOT NULL AND vExpectedOrder IS NOT NULL THEN + IF UPPER(REPLACE(vCsvOrder, ' ', '')) != UPPER(REPLACE(vExpectedOrder, ' ', '')) THEN + vColumnMismatch := 'YES'; + ELSE + vColumnMismatch := 'NO'; + END IF; + END IF; + + -- Check for excess columns + IF vCsvColCount > vTemplateColCount THEN + -- Try to identify which columns are excess + IF vCsvOrder IS NOT NULL THEN + -- Parse CSV columns and compare with template + DECLARE + vCsvCols SYS.ODCIVARCHAR2LIST; + vTemplateCols SYS.ODCIVARCHAR2LIST; + vExcessFound VARCHAR2(1) := 'N'; + i NUMBER; + BEGIN + -- Split CSV columns + SELECT TRIM(REGEXP_SUBSTR(vCsvOrder, '[^,]+', 1, LEVEL)) + BULK COLLECT INTO vCsvCols + FROM DUAL + CONNECT BY REGEXP_SUBSTR(vCsvOrder, '[^,]+', 1, LEVEL) IS NOT NULL; + + -- Split template columns + SELECT TRIM(REGEXP_SUBSTR(vExpectedOrder, '[^,]+', 1, LEVEL)) + BULK COLLECT INTO vTemplateCols + FROM DUAL + CONNECT BY REGEXP_SUBSTR(vExpectedOrder, '[^,]+', 1, LEVEL) IS NOT NULL; + + -- Find excess columns (those in CSV but not in template) + FOR i IN 1..vCsvCols.COUNT LOOP + DECLARE + vFoundInTemplate BOOLEAN := FALSE; + j NUMBER; + BEGIN + -- Check if CSV column exists in template + FOR j IN 1..vTemplateCols.COUNT LOOP + IF UPPER(TRIM(vCsvCols(i))) = UPPER(TRIM(vTemplateCols(j))) THEN + vFoundInTemplate := TRUE; + EXIT; + END IF; + END LOOP; + + -- If not found in template, it's an excess column + IF NOT vFoundInTemplate THEN + IF vExcessFound = 'Y' THEN + vExcessColumns := vExcessColumns || ', '; + END IF; + vExcessColumns := vExcessColumns || vCsvCols(i); + vExcessFound := 'Y'; + END IF; + END; + END LOOP; + EXCEPTION + WHEN OTHERS THEN + vExcessColumns := 'Unable to determine specific excess columns'; + END; + END IF; + END IF; + + -- Build comprehensive analysis report + vAnalysisReport := 'FILE VALIDATION FAILED - DETAILED ANALYSIS' || cgBL || + '=================================================' || cgBL || cgBL; + + -- Column structure analysis + vAnalysisReport := vAnalysisReport || + 'COLUMN STRUCTURE ANALYSIS:' || cgBL || + '---------------------------------------------------' || cgBL || + 'Template Expected Order: ' || vExpectedOrder || cgBL || + 'Template Column Count: ' || vTemplateColCount || cgBL || + 'CSV Detected Order: ' || NVL(vCsvOrder, 'Unknown') || cgBL || + 'CSV Column Count: ' || vCsvColCount || cgBL || cgBL; + + -- Report column count issues + IF vCsvColCount > vTemplateColCount THEN + vAnalysisReport := vAnalysisReport || + 'EXCESS COLUMNS DETECTED!' || cgBL || + 'CSV file has ' || (vCsvColCount - vTemplateColCount) || ' more columns than template allows.' || cgBL; + IF vExcessColumns IS NOT NULL THEN + vAnalysisReport := vAnalysisReport || + 'Excess columns found: ' || vExcessColumns || cgBL; + END IF; + vAnalysisReport := vAnalysisReport || cgBL; + END IF; + + -- Report column order issues + IF vColumnMismatch = 'YES' THEN + vAnalysisReport := vAnalysisReport || + 'COLUMN ORDER MISMATCH DETECTED!' || cgBL || + 'CSV columns are in different order than template expects.' || cgBL || cgBL; + END IF; + + -- Specific error analysis + IF vErrorCount > 0 THEN + vAnalysisReport := vAnalysisReport || + 'SPECIFIC ERRORS FOUND:' || cgBL || + '---------------------------------------------------' || cgBL; + + IF vErrorColumn IS NOT NULL THEN + -- Get expected data type for error column + FOR rec IN c_template_columns LOOP + IF rec.COLUMN_NAME = vErrorColumn THEN + vExpectedType := rec.DATA_TYPE; + EXIT; + END IF; + END LOOP; + + vAnalysisReport := vAnalysisReport || + '1. Column ' || vErrorColumn || ': Expected ' || vExpectedType || + ', received "' || NVL(vErrorValue, 'unknown value') || '" (TEXT)' || cgBL || + ' → CSV position contains different data type than expected' || cgBL; + END IF; + + vAnalysisReport := vAnalysisReport || + 'Total validation errors found: ' || vErrorCount || cgBL || cgBL; + END IF; + + -- Solutions section + vAnalysisReport := vAnalysisReport || + 'SUGGESTED SOLUTIONS:' || cgBL || + '---------------------------------------------------' || cgBL; + + -- Solutions for excess columns + IF vCsvColCount > vTemplateColCount THEN + vAnalysisReport := vAnalysisReport || + 'FOR EXCESS COLUMNS:' || cgBL || + '• Remove extra columns from CSV file' || cgBL || + '• Keep only these columns in this order: ' || vExpectedOrder || cgBL; + IF vExcessColumns IS NOT NULL THEN + vAnalysisReport := vAnalysisReport || + '• Specifically remove: ' || vExcessColumns || cgBL; + END IF; + vAnalysisReport := vAnalysisReport || cgBL; + END IF; + + -- Solutions for column order + IF vColumnMismatch = 'YES' THEN + vAnalysisReport := vAnalysisReport || + 'FOR COLUMN ORDER:' || cgBL || + '• Reorder CSV columns to match template: ' || vExpectedOrder || cgBL || + '• Or update template table column order to match CSV file' || cgBL || cgBL; + END IF; + + -- General solutions + vAnalysisReport := vAnalysisReport || + 'GENERAL RECOMMENDATIONS:' || cgBL || + '• Ensure CSV has exactly ' || vTemplateColCount || ' columns' || cgBL || + '• Verify column names match template table exactly' || cgBL || + '• Check data types in each column match expectations' || cgBL || cgBL; + + -- Validation log reference + vAnalysisReport := vAnalysisReport || + 'TECHNICAL DETAILS:' || cgBL || + '---------------------------------------------------' || cgBL || + 'Validation Log Table: ' || pValidationLogTable || cgBL || + 'Template Table: ' || pTemplateSchema || '.' || pTemplateTable || cgBL || + 'CSV File: ' || pCsvFileUri || cgBL || + 'Query validation details: SELECT * FROM ' || pValidationLogTable || ';' || cgBL; + + RETURN vAnalysisReport; + + EXCEPTION + WHEN OTHERS THEN + RETURN 'Error generating validation analysis: ' || SQLERRM || cgBL || + 'Validation Log Table: ' || pValidationLogTable || cgBL || + 'Check table manually: SELECT * FROM ' || pValidationLogTable || ';'; + END ANALYZE_VALIDATION_ERRORS; + + ---------------------------------------------------------------------------------------------------- + -- PACKAGE VERSION MANAGEMENT FUNCTIONS IMPLEMENTATION + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_VERSION + RETURN VARCHAR2 + IS + BEGIN + RETURN PACKAGE_VERSION; + END GET_VERSION; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_BUILD_INFO + RETURN VARCHAR2 + IS + BEGIN + RETURN GET_PACKAGE_VERSION_INFO( + pPackageName => 'ENV_MANAGER', + pVersion => PACKAGE_VERSION, + pBuildDate => PACKAGE_BUILD_DATE, + pAuthor => PACKAGE_AUTHOR + ); + END GET_BUILD_INFO; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_VERSION_HISTORY + RETURN VARCHAR2 + IS + BEGIN + RETURN FORMAT_VERSION_HISTORY( + pPackageName => 'ENV_MANAGER', + pVersionHistory => VERSION_HISTORY + ); + END GET_VERSION_HISTORY; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_PACKAGE_VERSION_INFO( + pPackageName VARCHAR2, + pVersion VARCHAR2, + pBuildDate VARCHAR2, + pAuthor VARCHAR2 + ) RETURN VARCHAR2 + IS + BEGIN + RETURN 'Package: ' || pPackageName || cgBL || + 'Version: ' || pVersion || cgBL || + 'Build Date: ' || pBuildDate || cgBL || + 'Author: ' || pAuthor; + END GET_PACKAGE_VERSION_INFO; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION FORMAT_VERSION_HISTORY( + pPackageName VARCHAR2, + pVersionHistory VARCHAR2 + ) RETURN VARCHAR2 + IS + BEGIN + RETURN pPackageName || ' Version History:' || cgBL || pVersionHistory; + END FORMAT_VERSION_HISTORY; + + ---------------------------------------------------------------------------------------------------- + -- PACKAGE HASH + CHANGE DETECTION FUNCTIONS IMPLEMENTATION + ---------------------------------------------------------------------------------------------------- + + FUNCTION CALCULATE_PACKAGE_HASH( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2, + pPackageType VARCHAR2 + ) RETURN VARCHAR2 + IS + vSourceCode CLOB; + vHash VARCHAR2(64); + vRawHash RAW(32); + BEGIN + -- Build complete source code from ALL_SOURCE using XMLAGG (no 4000 char limit) + -- CRITICAL: Cannot use LISTAGG due to VARCHAR2 limit + SELECT XMLAGG(XMLELEMENT(E, TEXT) ORDER BY LINE).GETCLOBVAL() + INTO vSourceCode + FROM ALL_SOURCE + WHERE OWNER = UPPER(pPackageOwner) + AND NAME = UPPER(pPackageName) + AND TYPE = UPPER(pPackageType); + + -- If empty, return NULL + IF vSourceCode IS NULL OR DBMS_LOB.GETLENGTH(vSourceCode) = 0 THEN + RETURN NULL; + END IF; + + -- Calculate SHA256 hash directly from CLOB + -- DBMS_CRYPTO.HASH has overload for CLOB in Oracle 19c+ + vRawHash := DBMS_CRYPTO.HASH( + src => vSourceCode, + typ => DBMS_CRYPTO.HASH_SH256 + ); + + -- Convert to hex string + vHash := LOWER(RAWTOHEX(vRawHash)); + + RETURN vHash; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + RETURN NULL; + WHEN OTHERS THEN + LOG_PROCESS_ERROR('Error calculating package hash: ' || SQLERRM, + 'pPackageOwner=' || pPackageOwner || ', pPackageName=' || pPackageName); + RETURN NULL; + END CALCULATE_PACKAGE_HASH; + + ---------------------------------------------------------------------------------------------------- + + PROCEDURE TRACK_PACKAGE_VERSION( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2, + pPackageVersion VARCHAR2, + pPackageBuildDate VARCHAR2, + pPackageAuthor VARCHAR2 + ) + IS + vHashSpec VARCHAR2(64); + vHashBody VARCHAR2(64); + vLastHashSpec VARCHAR2(64); + vLastHashBody VARCHAR2(64); + vLastVersion VARCHAR2(10); + vLineCountSpec NUMBER; + vLineCountBody NUMBER; + vChangeDetected CHAR(1) := 'N'; + vChangeMessage VARCHAR2(4000); + vParameters VARCHAR2(4000); + BEGIN + vParameters := FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( + 'pPackageOwner => ''' || pPackageOwner || '''', + 'pPackageName => ''' || pPackageName || '''', + 'pPackageVersion => ''' || pPackageVersion || '''' + )); + + LOG_PROCESS_EVENT('Start TRACK_PACKAGE_VERSION', 'INFO', vParameters); + + -- Calculate current hashes + vHashSpec := CALCULATE_PACKAGE_HASH(pPackageOwner, pPackageName, 'PACKAGE'); + vHashBody := CALCULATE_PACKAGE_HASH(pPackageOwner, pPackageName, 'PACKAGE BODY'); + + -- Get line counts + BEGIN + SELECT COUNT(*) + INTO vLineCountSpec + FROM ALL_SOURCE + WHERE OWNER = UPPER(pPackageOwner) + AND NAME = UPPER(pPackageName) + AND TYPE = 'PACKAGE'; + EXCEPTION + WHEN NO_DATA_FOUND THEN + vLineCountSpec := 0; + END; + + BEGIN + SELECT COUNT(*) + INTO vLineCountBody + FROM ALL_SOURCE + WHERE OWNER = UPPER(pPackageOwner) + AND NAME = UPPER(pPackageName) + AND TYPE = 'PACKAGE BODY'; + EXCEPTION + WHEN NO_DATA_FOUND THEN + vLineCountBody := 0; + END; + + -- Get last tracked version and hashes + BEGIN + SELECT PACKAGE_VERSION, SOURCE_CODE_HASH_SPEC, SOURCE_CODE_HASH_BODY + INTO vLastVersion, vLastHashSpec, vLastHashBody + FROM CT_MRDS.A_PACKAGE_VERSION_TRACKING + WHERE PACKAGE_OWNER = UPPER(pPackageOwner) + AND PACKAGE_NAME = UPPER(pPackageName) + ORDER BY TRACKING_DATE DESC + FETCH FIRST 1 ROW ONLY; + + -- Check if hash changed but version didn't + IF (vHashSpec != vLastHashSpec OR NVL(vHashBody,'X') != NVL(vLastHashBody,'X')) + AND pPackageVersion = vLastVersion THEN + + vChangeDetected := 'Y'; + vChangeMessage := 'WARNING: Source code changed without version update!' || cgBL || + 'Last Version: ' || vLastVersion || cgBL || + 'Current Version: ' || pPackageVersion || cgBL; + + IF vHashSpec != vLastHashSpec THEN + vChangeMessage := vChangeMessage || + 'SPEC Changed - Hash: ' || SUBSTR(vHashSpec, 1, 16) || '... (was: ' || + SUBSTR(vLastHashSpec, 1, 16) || '...)' || cgBL; + END IF; + + IF NVL(vHashBody,'X') != NVL(vLastHashBody,'X') THEN + vChangeMessage := vChangeMessage || + 'BODY Changed - Hash: ' || SUBSTR(vHashBody, 1, 16) || '... (was: ' || + SUBSTR(NVL(vLastHashBody,'NULL'), 1, 16) || '...)' || cgBL; + END IF; + + vChangeMessage := vChangeMessage || + 'RECOMMENDATION: Update PACKAGE_VERSION constant and PACKAGE_BUILD_DATE'; + + LOG_PROCESS_EVENT(vChangeMessage, 'WARNING', vParameters); + END IF; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + -- First time tracking this package + vChangeDetected := 'N'; + vChangeMessage := 'First tracking record for this package'; + LOG_PROCESS_EVENT(vChangeMessage, 'INFO', vParameters); + END; + + -- Insert tracking record + INSERT INTO CT_MRDS.A_PACKAGE_VERSION_TRACKING ( + PACKAGE_OWNER, + PACKAGE_NAME, + PACKAGE_TYPE, + PACKAGE_VERSION, + PACKAGE_BUILD_DATE, + PACKAGE_AUTHOR, + SOURCE_CODE_HASH_SPEC, + SOURCE_CODE_HASH_BODY, + LINE_COUNT_SPEC, + LINE_COUNT_BODY, + DETECTED_CHANGE_WITHOUT_VERSION, + CHANGE_DETECTION_MESSAGE + ) VALUES ( + UPPER(pPackageOwner), + UPPER(pPackageName), + 'BOTH', + pPackageVersion, + pPackageBuildDate, + pPackageAuthor, + vHashSpec, + vHashBody, + vLineCountSpec, + vLineCountBody, + vChangeDetected, + vChangeMessage + ); + + COMMIT; + + LOG_PROCESS_EVENT('End TRACK_PACKAGE_VERSION - Record inserted', 'INFO', vParameters); + + EXCEPTION + WHEN OTHERS THEN + LOG_PROCESS_ERROR('Error in TRACK_PACKAGE_VERSION: ' || SQLERRM, vParameters); + RAISE; + END TRACK_PACKAGE_VERSION; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION CHECK_PACKAGE_CHANGES( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2 + ) RETURN VARCHAR2 + IS + vCurrentHashSpec VARCHAR2(64); + vCurrentHashBody VARCHAR2(64); + vLastHashSpec VARCHAR2(64); + vLastHashBody VARCHAR2(64); + vLastVersion VARCHAR2(10); + vLastTrackingDate TIMESTAMP; + vChangeReport VARCHAR2(4000); + vSpecChanged BOOLEAN := FALSE; + vBodyChanged BOOLEAN := FALSE; + BEGIN + -- Get current hashes + vCurrentHashSpec := CALCULATE_PACKAGE_HASH(pPackageOwner, pPackageName, 'PACKAGE'); + vCurrentHashBody := CALCULATE_PACKAGE_HASH(pPackageOwner, pPackageName, 'PACKAGE BODY'); + + -- Get last tracked hashes + BEGIN + SELECT PACKAGE_VERSION, SOURCE_CODE_HASH_SPEC, SOURCE_CODE_HASH_BODY, TRACKING_DATE + INTO vLastVersion, vLastHashSpec, vLastHashBody, vLastTrackingDate + FROM CT_MRDS.A_PACKAGE_VERSION_TRACKING + WHERE PACKAGE_OWNER = UPPER(pPackageOwner) + AND PACKAGE_NAME = UPPER(pPackageName) + ORDER BY TRACKING_DATE DESC + FETCH FIRST 1 ROW ONLY; + EXCEPTION + WHEN NO_DATA_FOUND THEN + RETURN 'Package ' || pPackageOwner || '.' || pPackageName || ' has never been tracked.' || cgBL || + 'Run TRACK_PACKAGE_VERSION to establish baseline.'; + END; + + -- Check for changes + IF vCurrentHashSpec != vLastHashSpec THEN + vSpecChanged := TRUE; + END IF; + + IF NVL(vCurrentHashBody, 'X') != NVL(vLastHashBody, 'X') THEN + vBodyChanged := TRUE; + END IF; + + -- Build report + IF vSpecChanged OR vBodyChanged THEN + vChangeReport := 'WARNING: Package ' || pPackageOwner || '.' || pPackageName || ' has changed!' || cgBL || + '========================================' || cgBL || + 'Last Tracked Version: ' || vLastVersion || cgBL || + 'Last Tracked Date: ' || TO_CHAR(vLastTrackingDate, 'YYYY-MM-DD HH24:MI:SS') || cgBL || + cgBL; + + IF vSpecChanged THEN + vChangeReport := vChangeReport || + 'SPECIFICATION Changed:' || cgBL || + ' Current Hash: ' || SUBSTR(vCurrentHashSpec, 1, 16) || '...' || cgBL || + ' Last Hash: ' || SUBSTR(vLastHashSpec, 1, 16) || '...' || cgBL || + cgBL; + END IF; + + IF vBodyChanged THEN + vChangeReport := vChangeReport || + 'BODY Changed:' || cgBL || + ' Current Hash: ' || SUBSTR(NVL(vCurrentHashBody, 'NULL'), 1, 16) || '...' || cgBL || + ' Last Hash: ' || SUBSTR(NVL(vLastHashBody, 'NULL'), 1, 16) || '...' || cgBL || + cgBL; + END IF; + + vChangeReport := vChangeReport || + 'RECOMMENDATION:' || cgBL || + '1. Update PACKAGE_VERSION constant' || cgBL || + '2. Update PACKAGE_BUILD_DATE constant' || cgBL || + '3. Add entry to VERSION_HISTORY' || cgBL || + '4. Call TRACK_PACKAGE_VERSION to update tracking'; + ELSE + vChangeReport := 'OK: Package ' || pPackageOwner || '.' || pPackageName || ' has not changed.' || cgBL || + 'Last Tracked: ' || TO_CHAR(vLastTrackingDate, 'YYYY-MM-DD HH24:MI:SS') || cgBL || + 'Version: ' || vLastVersion; + END IF; + + RETURN vChangeReport; + + EXCEPTION + WHEN OTHERS THEN + RETURN 'Error checking package changes: ' || SQLERRM; + END CHECK_PACKAGE_CHANGES; + + ---------------------------------------------------------------------------------------------------- + + FUNCTION GET_PACKAGE_HASH_INFO( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2 + ) RETURN VARCHAR2 + IS + vCurrentHashSpec VARCHAR2(64); + vCurrentHashBody VARCHAR2(64); + vLastHashSpec VARCHAR2(64); + vLastHashBody VARCHAR2(64); + vLastVersion VARCHAR2(10); + vLastTrackingDate TIMESTAMP; + vLastChangeDetected CHAR(1); + vInfo VARCHAR2(4000); + BEGIN + -- Get current hashes + vCurrentHashSpec := CALCULATE_PACKAGE_HASH(pPackageOwner, pPackageName, 'PACKAGE'); + vCurrentHashBody := CALCULATE_PACKAGE_HASH(pPackageOwner, pPackageName, 'PACKAGE BODY'); + + -- Get last tracking info + BEGIN + SELECT PACKAGE_VERSION, + SOURCE_CODE_HASH_SPEC, + SOURCE_CODE_HASH_BODY, + TRACKING_DATE, + DETECTED_CHANGE_WITHOUT_VERSION + INTO vLastVersion, vLastHashSpec, vLastHashBody, vLastTrackingDate, vLastChangeDetected + FROM CT_MRDS.A_PACKAGE_VERSION_TRACKING + WHERE PACKAGE_OWNER = UPPER(pPackageOwner) + AND PACKAGE_NAME = UPPER(pPackageName) + ORDER BY TRACKING_DATE DESC + FETCH FIRST 1 ROW ONLY; + EXCEPTION + WHEN NO_DATA_FOUND THEN + RETURN 'Package: ' || pPackageOwner || '.' || pPackageName || cgBL || + 'Status: Never tracked' || cgBL || + 'Current Hash (SPEC): ' || SUBSTR(vCurrentHashSpec, 1, 16) || '...' || cgBL || + 'Current Hash (BODY): ' || SUBSTR(NVL(vCurrentHashBody, 'NULL'), 1, 16) || '...'; + END; + + -- Build info report + vInfo := 'Package: ' || pPackageOwner || '.' || pPackageName || cgBL || + 'Current Version: ' || vLastVersion || cgBL || + 'Last Tracked: ' || TO_CHAR(vLastTrackingDate, 'YYYY-MM-DD HH24:MI:SS') || cgBL || + cgBL || + 'Current Hash (SPEC): ' || SUBSTR(vCurrentHashSpec, 1, 32) || '...' || cgBL || + 'Last Hash (SPEC): ' || SUBSTR(vLastHashSpec, 1, 32) || '...' || cgBL; + + IF vCurrentHashBody IS NOT NULL OR vLastHashBody IS NOT NULL THEN + vInfo := vInfo || + 'Current Hash (BODY): ' || SUBSTR(NVL(vCurrentHashBody, 'NULL'), 1, 32) || '...' || cgBL || + 'Last Hash (BODY): ' || SUBSTR(NVL(vLastHashBody, 'NULL'), 1, 32) || '...' || cgBL; + END IF; + + vInfo := vInfo || cgBL; + + -- Status + IF vCurrentHashSpec = vLastHashSpec AND NVL(vCurrentHashBody, 'X') = NVL(vLastHashBody, 'X') THEN + vInfo := vInfo || 'Status: OK - No changes detected'; + ELSE + vInfo := vInfo || 'Status: CHANGED - Source code modified since last tracking'; + END IF; + + IF vLastChangeDetected = 'Y' THEN + vInfo := vInfo || cgBL || 'Last Tracking Warning: Change detected without version update'; + END IF; + + RETURN vInfo; + + EXCEPTION + WHEN OTHERS THEN + RETURN 'Error getting package hash info: ' || SQLERRM; + END GET_PACKAGE_HASH_INFO; + + ---------------------------------------------------------------------------------------------------- + +BEGIN + INIT_ERRORS; + guid := sys_guid(); + gvUsername := SYS_CONTEXT('USERENV', 'SESSION_USER'); + gvOsuser := SYS_CONTEXT('USERENV', 'OS_USER'); + gvMachine := SYS_CONTEXT('USERENV', 'HOST'); + gvModule := SYS_CONTEXT('USERENV', 'MODULE'); + + -- Get info about EnvironmentID. Without it package cannot proceed further. + -- Information about environment is needed to get proper configuration values + -- It can be set up in two different ways : + -- 1. Set it on session level: execute DBMS_SESSION.SET_IDENTIFIER (client_id => 'dev'); + -- 2. Set it on configuration level: Insert into CT_MRDS.A_FILE_MANAGER_CONFIG (ENVIRONMENT_ID,CONFIG_VARIABLE,CONFIG_VARIABLE_VALUE) values ('default','environment_id','dev'); + -- Session level setup (1.) takes precedence over configuration level one (2.) + + gvEnv := nvl(SYS_CONTEXT ('USERENV', 'CLIENT_IDENTIFIER'), GET_DEFAULT_ENV()); + if gvEnv is null then + dbms_output.put_line(MSG_ENVIRONMENT_NOT_SET); + LOG_PROCESS_EVENT(MSG_ENVIRONMENT_NOT_SET, 'ERROR'); + RAISE_APPLICATION_ERROR(CODE_ENVIRONMENT_NOT_SET, MSG_ENVIRONMENT_NOT_SET); + else + dbms_output.put_line('EnvironmentID set to: '||gvEnv); + end if; + + INIT_VARIABLES(pEnv => gvEnv); +END ENV_MANAGER; + +/ + +/ diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/ENV_MANAGER.pkg b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/ENV_MANAGER.pkg new file mode 100644 index 0000000..fded944 --- /dev/null +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/rollback_version/v2.9.0/ENV_MANAGER.pkg @@ -0,0 +1,625 @@ +create or replace PACKAGE CT_MRDS.ENV_MANAGER +AUTHID CURRENT_USER +AS + /** + * General comment for package: Please put comments for functions and procedures as shown in below example. + * It is a standard. + * The structure of comment is used by GET_PACKAGE_DOCUMENTATION function + * which returns documentation text for confluence page (to Copy-Paste it). + **/ + + -- Example comment: + /** + * @name EX_PROCEDURE_NAME + * @desc Procedure description + * @example select ENV_MANAGER.EX_PROCEDURE_NAME(pParameter => 129) from dual; + * @ex_rslt Example Result + **/ + + -- Package Version Information (Semantic Versioning: MAJOR.MINOR.PATCH) + PACKAGE_VERSION CONSTANT VARCHAR2(10) := '3.2.0'; + PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2025-12-20 10:00:00'; + PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski'; + + -- Version History (Latest changes first) + VERSION_HISTORY CONSTANT VARCHAR2(4000) := + '3.2.0 (2025-12-20): Added error codes for parallel execution support (CODE_INVALID_PARALLEL_DEGREE -20110, CODE_PARALLEL_EXECUTION_FAILED -20111)' || CHR(13)||CHR(10) || + '3.1.0 (2025-10-22): Added package hash tracking and automatic change detection system (SHA256 hashing)' || CHR(13)||CHR(10) || + '3.0.0 (2025-10-22): Added package versioning system with centralized version management functions' || CHR(13)||CHR(10) || + '2.1.0 (2025-10-15): Added ANALYZE_VALIDATION_ERRORS function for comprehensive CSV validation analysis' || CHR(13)||CHR(10) || + '2.0.0 (2025-10-01): Added LOG_PROCESS_ERROR procedure with enhanced error diagnostics and stack traces' || CHR(13)||CHR(10) || + '1.5.0 (2025-09-20): Added console logging support with gvConsoleLoggingEnabled configuration' || CHR(13)||CHR(10) || + '1.0.0 (2025-09-01): Initial release with error management and configuration system'; + + TYPE Error_Record IS RECORD ( + code PLS_INTEGER, + message VARCHAR2(4000) + ); + + TYPE tErrorList IS TABLE OF Error_Record INDEX BY PLS_INTEGER; + + Errors tErrorList; + + + guid VARCHAR2(32); + gvEnv VARCHAR2(200); + gvUsername VARCHAR2(128); + gvOsuser VARCHAR2(128); + gvMachine VARCHAR2(64); + gvModule VARCHAR2(64); + + gvNameSpace VARCHAR2(200); + gvRegion VARCHAR2(200); + gvDataBucketName VARCHAR2(200); + gvInboxBucketName VARCHAR2(200); + gvArchiveBucketName VARCHAR2(200); + gvDataBucketUri VARCHAR2(200); + gvInboxBucketUri VARCHAR2(200); + gvArchiveBucketUri VARCHAR2(200); + gvCredentialName VARCHAR2(200); + + -- Overwritten by variable "LoggingEnabled" in A_FILE_MANAGER_CONFIG.CONFIG_VARIABLE table + gvLoggingEnabled VARCHAR2(3) := 'ON'; -- 'ON' or 'OFF' + + -- Overwritten by variable "MinLogLevel" in A_FILE_MANAGER_CONFIG.CONFIG_VARIABLE table + -- Possible values: DEBUG ,INFO ,WARNING ,ERROR + gvMinLogLevel VARCHAR2(10) := 'DEBUG'; + + -- Overwritten by variable "DefaultDateFormat" in A_FILE_MANAGER_CONFIG.CONFIG_VARIABLE table + gvDefaultDateFormat VARCHAR2(200) := 'DD/MM/YYYY HH24:MI:SS'; + + -- Overwritten by variable "ConsoleLoggingEnabled" in A_FILE_MANAGER_CONFIG.CONFIG_VARIABLE table + gvConsoleLoggingEnabled VARCHAR2(3) := 'ON'; -- 'ON' or 'OFF' + + cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10); + + vgSourceFileConfigKey PLS_INTEGER; + + vgMsgTmp VARCHAR2(32000); + --Exceptions + ERR_EMPTY_FILEURI_AND_RECKEY EXCEPTION; + CODE_EMPTY_FILEURI_AND_RECKEY CONSTANT PLS_INTEGER := -20001; + MSG_EMPTY_FILEURI_AND_RECKEY VARCHAR2(4000) := 'Either pFileUri or pSourceFileReceivedKey must be not null'; + PRAGMA EXCEPTION_INIT( ERR_EMPTY_FILEURI_AND_RECKEY + ,CODE_EMPTY_FILEURI_AND_RECKEY); + + + ERR_NO_CONFIG_MATCH_FOR_FILEURI EXCEPTION; + CODE_NO_CONFIG_MATCH_FOR_FILEURI CONSTANT PLS_INTEGER := -20002; + MSG_NO_CONFIG_MATCH_FOR_FILEURI VARCHAR2(4000) := 'No match for source file in A_SOURCE_FILE_CONFIG table' + ||cgBL||' The file provided in parameter: pFileUri does not have ' + ||cgBL||' coresponding configuration in A_SOURCE_FILE_CONFIG table'; + PRAGMA EXCEPTION_INIT( ERR_NO_CONFIG_MATCH_FOR_FILEURI + ,CODE_NO_CONFIG_MATCH_FOR_FILEURI); + + ERR_MULTIPLE_MATCH_FOR_SRCFILE EXCEPTION; + CODE_MULTIPLE_MATCH_FOR_SRCFILE CONSTANT PLS_INTEGER := -20003; + MSG_MULTIPLE_MATCH_FOR_SRCFILE VARCHAR2(4000) := 'Multiple match for source file in A_SOURCE_FILE_CONFIG table'; + PRAGMA EXCEPTION_INIT( ERR_MULTIPLE_MATCH_FOR_SRCFILE + ,CODE_MULTIPLE_MATCH_FOR_SRCFILE); + + ERR_MISSING_COLUMN_DATE_FORMAT EXCEPTION; + CODE_MISSING_COLUMN_DATE_FORMAT CONSTANT PLS_INTEGER := -20004; + MSG_MISSING_COLUMN_DATE_FORMAT VARCHAR2(4000) := 'Missing entry in config table: A_COLUMN_DATE_FORMAT primary key(TEMPLATE_TABLE_NAME, COLUMN_NAME)' + ||cgBL||' Remember: each column which data_type IN (''DATE'', ''TIMESTAMP'')' + ||cgBL||' should have DateFormat specified in A_COLUMN_DATE_FORMAT table ' + ||cgBL||' for example: ''YYYY-MM-DD'''; + PRAGMA EXCEPTION_INIT( ERR_MISSING_COLUMN_DATE_FORMAT + ,CODE_MISSING_COLUMN_DATE_FORMAT); + + ERR_MULTIPLE_COLUMN_DATE_FORMAT EXCEPTION; + CODE_MULTIPLE_COLUMN_DATE_FORMAT CONSTANT PLS_INTEGER := -20005; + MSG_MULTIPLE_COLUMN_DATE_FORMAT VARCHAR2(4000) := 'Multiple records for date format in A_COLUMN_DATE_FORMAT table' + ||cgBL||' There should be only one format specified for each DAT/TIMESTAMP column'; + PRAGMA EXCEPTION_INIT( ERR_MULTIPLE_COLUMN_DATE_FORMAT + ,CODE_MULTIPLE_COLUMN_DATE_FORMAT); + + + ERR_DIDNT_GET_LOAD_OPERATION_ID EXCEPTION; + CODE_DIDNT_GET_LOAD_OPERATION_ID CONSTANT PLS_INTEGER := -20006; + MSG_DIDNT_GET_LOAD_OPERATION_ID VARCHAR2(4000) := 'Didnt get load operation id from external table validation'; + PRAGMA EXCEPTION_INIT( ERR_DIDNT_GET_LOAD_OPERATION_ID + ,CODE_DIDNT_GET_LOAD_OPERATION_ID); + + ERR_NO_CONFIG_FOR_RECEIVED_FILE EXCEPTION; + CODE_NO_CONFIG_FOR_RECEIVED_FILE CONSTANT PLS_INTEGER := -20007; + MSG_NO_CONFIG_FOR_RECEIVED_FILE VARCHAR2(4000) := 'No match for received source file in A_SOURCE_FILE_CONFIG ' + ||cgBL||' or missing data in A_SOURCE_FILE_RECEIVED table for provided pSourceFileReceivedKey parameter'; + PRAGMA EXCEPTION_INIT( ERR_NO_CONFIG_FOR_RECEIVED_FILE + ,CODE_NO_CONFIG_FOR_RECEIVED_FILE); + + ERR_MULTI_CONFIG_FOR_RECEIVED_FILE EXCEPTION; + CODE_MULTI_CONFIG_FOR_RECEIVED_FILE CONSTANT PLS_INTEGER := -20008; + MSG_MULTI_CONFIG_FOR_RECEIVED_FILE VARCHAR2(4000) := 'Multiple matchs for received source file in A_SOURCE_FILE_CONFIG'; + PRAGMA EXCEPTION_INIT( ERR_MULTI_CONFIG_FOR_RECEIVED_FILE + ,CODE_MULTI_CONFIG_FOR_RECEIVED_FILE); + + ERR_FILE_NOT_FOUND_ON_CLOUD EXCEPTION; + CODE_FILE_NOT_FOUND_ON_CLOUD CONSTANT PLS_INTEGER := -20009; + MSG_FILE_NOT_FOUND_ON_CLOUD VARCHAR2(4000) := 'File not found on the cloud'; + PRAGMA EXCEPTION_INIT( ERR_FILE_NOT_FOUND_ON_CLOUD + ,CODE_FILE_NOT_FOUND_ON_CLOUD); + + ERR_FILE_VALIDATION_FAILED EXCEPTION; + CODE_FILE_VALIDATION_FAILED CONSTANT PLS_INTEGER := -20010; + MSG_FILE_VALIDATION_FAILED VARCHAR2(4000) := 'File validation failed'; + PRAGMA EXCEPTION_INIT( ERR_FILE_VALIDATION_FAILED + ,CODE_FILE_VALIDATION_FAILED); + + ERR_EXCESS_COLUMNS_DETECTED EXCEPTION; + CODE_EXCESS_COLUMNS_DETECTED CONSTANT PLS_INTEGER := -20011; + MSG_EXCESS_COLUMNS_DETECTED VARCHAR2(4000) := 'CSV file contains more columns than template allows'; + PRAGMA EXCEPTION_INIT( ERR_EXCESS_COLUMNS_DETECTED + ,CODE_EXCESS_COLUMNS_DETECTED); + + ERR_NO_CONFIG_MATCH EXCEPTION; + CODE_NO_CONFIG_MATCH CONSTANT PLS_INTEGER := -20012; + MSG_NO_CONFIG_MATCH VARCHAR2(4000) := 'No match for specified parameters in A_SOURCE_FILE_CONFIG table'; + PRAGMA EXCEPTION_INIT( ERR_NO_CONFIG_MATCH + ,CODE_NO_CONFIG_MATCH); + + ERR_UNKNOWN_PREFIX EXCEPTION; + CODE_UNKNOWN_PREFIX CONSTANT PLS_INTEGER := -20013; + MSG_UNKNOWN_PREFIX VARCHAR2(4000) := 'Unknown prefix'; + PRAGMA EXCEPTION_INIT( ERR_UNKNOWN_PREFIX + ,CODE_UNKNOWN_PREFIX); + + ERR_TABLE_NOT_EXISTS EXCEPTION; + CODE_TABLE_NOT_EXISTS CONSTANT PLS_INTEGER := -20014; + MSG_TABLE_NOT_EXISTS VARCHAR2(4000) := 'Table does not exist'; + PRAGMA EXCEPTION_INIT( ERR_TABLE_NOT_EXISTS + ,CODE_TABLE_NOT_EXISTS); + + ERR_COLUMN_NOT_EXISTS EXCEPTION; + CODE_COLUMN_NOT_EXISTS CONSTANT PLS_INTEGER := -20015; + MSG_COLUMN_NOT_EXISTS VARCHAR2(4000) := 'Column does not exist in table'; + PRAGMA EXCEPTION_INIT( ERR_COLUMN_NOT_EXISTS + ,CODE_COLUMN_NOT_EXISTS); + + ERR_UNSUPPORTED_DATA_TYPE EXCEPTION; + CODE_UNSUPPORTED_DATA_TYPE CONSTANT PLS_INTEGER := -20016; + MSG_UNSUPPORTED_DATA_TYPE VARCHAR2(4000) := 'Unsupported data type'; + PRAGMA EXCEPTION_INIT( ERR_UNSUPPORTED_DATA_TYPE + ,CODE_UNSUPPORTED_DATA_TYPE); + + ERR_MISSING_SOURCE_KEY EXCEPTION; + CODE_MISSING_SOURCE_KEY CONSTANT PLS_INTEGER := -20017; + MSG_MISSING_SOURCE_KEY VARCHAR2(4000) := 'The Source was not found in parent table A_SOURCE'; + PRAGMA EXCEPTION_INIT( ERR_MISSING_SOURCE_KEY + ,CODE_MISSING_SOURCE_KEY); + + ERR_NULL_SOURCE_FILE_CONFIG_KEY EXCEPTION; + CODE_NULL_SOURCE_FILE_CONFIG_KEY CONSTANT PLS_INTEGER := -20018; + MSG_NULL_SOURCE_FILE_CONFIG_KEY VARCHAR2(4000) := 'No entry in A_SOURCE_FILE_CONFIG table for specified A_SOURCE_FILE_CONFIG_KEY'; + PRAGMA EXCEPTION_INIT( ERR_NULL_SOURCE_FILE_CONFIG_KEY + ,CODE_NULL_SOURCE_FILE_CONFIG_KEY); + + ERR_DUPLICATED_SOURCE_KEY EXCEPTION; + CODE_DUPLICATED_SOURCE_KEY CONSTANT PLS_INTEGER := -20019; + MSG_DUPLICATED_SOURCE_KEY VARCHAR2(4000) := 'The Source already exists in the A_SOURCE table'; + PRAGMA EXCEPTION_INIT( ERR_DUPLICATED_SOURCE_KEY + ,CODE_DUPLICATED_SOURCE_KEY); + + ERR_MISSING_CONTAINER_CONFIG EXCEPTION; + CODE_MISSING_CONTAINER_CONFIG CONSTANT PLS_INTEGER := -20020; + MSG_MISSING_CONTAINER_CONFIG VARCHAR2(4000) := 'No match in A_SOURCE_FILE_CONFIG table where SOURCE_FILE_TYPE=''CONTAINER'' and specified SOURCE_FILE_ID'; + PRAGMA EXCEPTION_INIT( ERR_MISSING_CONTAINER_CONFIG + ,CODE_MISSING_CONTAINER_CONFIG); + + ERR_MULTIPLE_CONTAINER_ENTRIES EXCEPTION; + CODE_MULTIPLE_CONTAINER_ENTRIES CONSTANT PLS_INTEGER := -20021; + MSG_MULTIPLE_CONTAINER_ENTRIES VARCHAR2(4000) := 'Multiple matches in A_SOURCE_FILE_CONFIG table where SOURCE_FILE_TYPE=''CONTAINER'' and specified SOURCE_FILE_ID'; + PRAGMA EXCEPTION_INIT( ERR_MULTIPLE_CONTAINER_ENTRIES + ,CODE_MULTIPLE_CONTAINER_ENTRIES); + + ERR_WRONG_DESTINATION_PARAM EXCEPTION; + CODE_WRONG_DESTINATION_PARAM CONSTANT PLS_INTEGER := -20022; + MSG_WRONG_DESTINATION_PARAM VARCHAR2(4000) := 'Wrong destination parameter provided.'; + PRAGMA EXCEPTION_INIT( ERR_WRONG_DESTINATION_PARAM + ,CODE_WRONG_DESTINATION_PARAM); + + ERR_FILE_NOT_EXISTS_ON_CLOUD EXCEPTION; + CODE_FILE_NOT_EXISTS_ON_CLOUD CONSTANT PLS_INTEGER := -20023; + MSG_FILE_NOT_EXISTS_ON_CLOUD VARCHAR2(4000) := 'File not exists on cloud.'; + PRAGMA EXCEPTION_INIT( ERR_FILE_NOT_EXISTS_ON_CLOUD + ,CODE_FILE_NOT_EXISTS_ON_CLOUD); + + ERR_FILE_ALREADY_REGISTERED EXCEPTION; + CODE_FILE_ALREADY_REGISTERED CONSTANT PLS_INTEGER := -20024; + MSG_FILE_ALREADY_REGISTERED VARCHAR2(4000) := 'File already registered in A_SOURCE_FILE_RECEIVED table.'; + PRAGMA EXCEPTION_INIT( ERR_FILE_ALREADY_REGISTERED + ,CODE_FILE_ALREADY_REGISTERED); + + ERR_WRONG_DATE_TIMESTAMP_FORMAT EXCEPTION; + CODE_WRONG_DATE_TIMESTAMP_FORMAT CONSTANT PLS_INTEGER := -20025; + MSG_WRONG_DATE_TIMESTAMP_FORMAT VARCHAR2(4000) := 'Provided DATE or TIMESTAMP format has errors (possible duplicated codes, ex: ''DD'').'; + PRAGMA EXCEPTION_INIT( ERR_WRONG_DATE_TIMESTAMP_FORMAT + ,CODE_WRONG_DATE_TIMESTAMP_FORMAT); + + ERR_ENVIRONMENT_NOT_SET EXCEPTION; + CODE_ENVIRONMENT_NOT_SET CONSTANT PLS_INTEGER := -20026; + MSG_ENVIRONMENT_NOT_SET VARCHAR2(4000) := 'EnvironmentID not set' + ||cgBL||' Information about environment is needed to get proper configuration values.' + ||cgBL||' It can be set up in two different ways:' + ||cgBL||' 1. Set it on session level: execute DBMS_SESSION.SET_IDENTIFIER (client_id => ''dev'')' + ||cgBL||' 2. Set it on configuration level: Insert into CT_MRDS.A_FILE_MANAGER_CONFIG (ENVIRONMENT_ID,CONFIG_VARIABLE,CONFIG_VARIABLE_VALUE) values (''default'',''environment_id'',''dev'')' + ||cgBL||' Session level setup (1.) takes precedence over configuration level one (2.)' + ; + PRAGMA EXCEPTION_INIT( ERR_ENVIRONMENT_NOT_SET + ,CODE_ENVIRONMENT_NOT_SET); + + + ERR_CONFIG_VARIABLE_NOT_SET EXCEPTION; + CODE_CONFIG_VARIABLE_NOT_SET CONSTANT PLS_INTEGER := -20027; + MSG_CONFIG_VARIABLE_NOT_SET VARCHAR2(4000) := 'Missing configuration value in A_FILE_MANAGER_CONFIG'; + PRAGMA EXCEPTION_INIT( ERR_CONFIG_VARIABLE_NOT_SET + ,CODE_CONFIG_VARIABLE_NOT_SET); + + ERR_NOT_INPUT_SOURCE_FILE_TYPE EXCEPTION; + CODE_NOT_INPUT_SOURCE_FILE_TYPE CONSTANT PLS_INTEGER := -20028; + MSG_NOT_INPUT_SOURCE_FILE_TYPE VARCHAR2(4000) := 'Archival can be executed only for A_SOURCE_FILE_CONFIG_KEY where SOURCE_FILE_TYPE=''INPUT'''; + PRAGMA EXCEPTION_INIT( ERR_NOT_INPUT_SOURCE_FILE_TYPE + ,CODE_NOT_INPUT_SOURCE_FILE_TYPE); + + ERR_EXP_DATA_FOR_ARCH_FAILED EXCEPTION; + CODE_EXP_DATA_FOR_ARCH_FAILED CONSTANT PLS_INTEGER := -20029; + MSG_EXP_DATA_FOR_ARCH_FAILED VARCHAR2(4000) := 'Export data for archival failed.'; + PRAGMA EXCEPTION_INIT( ERR_EXP_DATA_FOR_ARCH_FAILED + ,CODE_EXP_DATA_FOR_ARCH_FAILED); + + ERR_RESTORE_FILE_FROM_TRASH EXCEPTION; + CODE_RESTORE_FILE_FROM_TRASH CONSTANT PLS_INTEGER := -20030; + MSG_RESTORE_FILE_FROM_TRASH VARCHAR2(4000) := 'Unexpected issues occured while archival process. Restoration of exported files failed.'; + PRAGMA EXCEPTION_INIT( ERR_RESTORE_FILE_FROM_TRASH + ,CODE_RESTORE_FILE_FROM_TRASH); + + ERR_CHANGE_STAT_TO_ARCHIVED_FAILED EXCEPTION; + CODE_CHANGE_STAT_TO_ARCHIVED_FAILED CONSTANT PLS_INTEGER := -20031; + MSG_CHANGE_STAT_TO_ARCHIVED_FAILED VARCHAR2(4000) := 'Failed to change file status to: ARCHIVED in A_SOURCE_FILE_RECEIVED table.'; + PRAGMA EXCEPTION_INIT( ERR_CHANGE_STAT_TO_ARCHIVED_FAILED + ,CODE_CHANGE_STAT_TO_ARCHIVED_FAILED); + + ERR_MOVE_FILE_TO_TRASH_FAILED EXCEPTION; + CODE_MOVE_FILE_TO_TRASH_FAILED CONSTANT PLS_INTEGER := -20032; + MSG_MOVE_FILE_TO_TRASH_FAILED VARCHAR2(4000) := 'FAILED to move file to TRASH before DROPPING it.'; + PRAGMA EXCEPTION_INIT( ERR_MOVE_FILE_TO_TRASH_FAILED + ,CODE_MOVE_FILE_TO_TRASH_FAILED); + + ERR_DROP_EXPORTED_FILES_FAILED EXCEPTION; + CODE_DROP_EXPORTED_FILES_FAILED CONSTANT PLS_INTEGER := -20033; + MSG_DROP_EXPORTED_FILES_FAILED VARCHAR2(4000) := 'FAILED to move file to TRASH before DROPPING it.'; + PRAGMA EXCEPTION_INIT( ERR_DROP_EXPORTED_FILES_FAILED + ,CODE_DROP_EXPORTED_FILES_FAILED); + + ERR_INVALID_BUCKET_AREA EXCEPTION; + CODE_INVALID_BUCKET_AREA CONSTANT PLS_INTEGER := -20034; + MSG_INVALID_BUCKET_AREA VARCHAR2(4000) := 'Invalid bucket area specified. Valid values: INBOX, ODS, DATA, ARCHIVE'; + PRAGMA EXCEPTION_INIT( ERR_INVALID_BUCKET_AREA + ,CODE_INVALID_BUCKET_AREA); + + ERR_INVALID_PARALLEL_DEGREE EXCEPTION; + CODE_INVALID_PARALLEL_DEGREE CONSTANT PLS_INTEGER := -20110; + MSG_INVALID_PARALLEL_DEGREE VARCHAR2(4000) := 'Invalid parallel degree parameter. Must be between 1 and 16'; + PRAGMA EXCEPTION_INIT( ERR_INVALID_PARALLEL_DEGREE + ,CODE_INVALID_PARALLEL_DEGREE); + + ERR_PARALLEL_EXECUTION_FAILED EXCEPTION; + CODE_PARALLEL_EXECUTION_FAILED CONSTANT PLS_INTEGER := -20111; + MSG_PARALLEL_EXECUTION_FAILED VARCHAR2(4000) := 'Parallel execution failed'; + PRAGMA EXCEPTION_INIT( ERR_PARALLEL_EXECUTION_FAILED + ,CODE_PARALLEL_EXECUTION_FAILED); + + ERR_UNKNOWN EXCEPTION; + CODE_UNKNOWN CONSTANT PLS_INTEGER := -20999; + MSG_UNKNOWN VARCHAR2(4000) := 'Unknown Error Occured'; + PRAGMA EXCEPTION_INIT( ERR_UNKNOWN + ,CODE_UNKNOWN); + + --------------------------------------------------------------------------------------------------------------------------- + --------------------------------------------------------------------------------------------------------------------------- + + + + + /** + * @name LOG_PROCESS_EVENT + * @desc Insert a new log record into A_PROCESS_LOG table. + * Also outputs to console if gvConsoleLoggingEnabled = 'ON'. + * Respects logging level configuration (gvMinLogLevel). + * @example ENV_MANAGER.LOG_PROCESS_EVENT('Process completed successfully', 'INFO', 'pParam1=value1'); + * @ex_rslt Record inserted into A_PROCESS_LOG table and optionally displayed in console output + **/ + PROCEDURE LOG_PROCESS_EVENT ( + pLogMessage VARCHAR2 + ,pLogLevel VARCHAR2 DEFAULT 'ERROR' + ,pParameters VARCHAR2 DEFAULT NULL + ,pProcessName VARCHAR2 DEFAULT 'FILE_MANAGER' + ); + + /** + * @name LOG_PROCESS_ERROR + * @desc Insert a detailed error record into A_PROCESS_LOG table with full stack trace, backtrace, and call stack. + * This procedure captures comprehensive error information for debugging purposes while + * allowing clean user-facing error messages to be raised separately. + * @param pLogMessage - Base error message description + * @param pParameters - Procedure parameters for context + * @param pProcessName - Name of the calling process/package + * @ex_rslt Record inserted into A_PROCESS_LOG table with complete error stack information + */ + PROCEDURE LOG_PROCESS_ERROR ( + pLogMessage VARCHAR2 + ,pParameters VARCHAR2 DEFAULT NULL + ,pProcessName VARCHAR2 DEFAULT 'FILE_MANAGER' + ); + + /** + * @name INIT_ERRORS + * @desc Loads data into Errors array. + * Errors array is a list of Record(Error_Code, Error_Message) index by Error_Code. + * Called automatically during package initialization. + * @example Called automatically when package is first referenced + * @ex_rslt Errors array populated with all error codes and messages + **/ + PROCEDURE INIT_ERRORS; + + + + /** + * @name GET_DEFAULT_ENV + * @desc It returns string with name of default environment. + * Return string is A_FILE_MANAGER_CONFIG.ENVIRONMENT_ID value. + * @example select ENV_MANAGER.GET_DEFAULT_ENV() from dual; + * @ex_rslt dev + **/ + FUNCTION GET_DEFAULT_ENV + RETURN VARCHAR2; + + + + /** + * @name INIT_VARIABLES + * @desc For specified pEnv parameter (A_FILE_MANAGER_CONFIG.ENVIRONMENT_ID) + * Assign values to following global package variables: + * - gvNameSpace + * - gvRegion + * - gvCredentialName + * - gvInboxBucketName + * - gvDataBucketName + * - gvArchiveBucketName + * - gvInboxBucketUri + * - gvDataBucketUri + * - gvArchiveBucketUri + * - gvLoggingEnabled + * - gvMinLogLevel + * - gvDefaultDateFormat + * - gvConsoleLoggingEnabled + **/ + PROCEDURE INIT_VARIABLES( + pEnv VARCHAR2 + ); + + + + /** + * @name GET_ERROR_MESSAGE + * @desc It returns string with error message for specified pCode (Error_Code). + * Error message is take from Errors Array loaded by INIT_ERRORS procedure + * @example select ENV_MANAGER.GET_ERROR_MESSAGE(pCode => -20009) from dual; + * @ex_rslt File not found on the cloud + **/ + FUNCTION GET_ERROR_MESSAGE( + pCode PLS_INTEGER + ) RETURN VARCHAR2; + + + + /** + * @name GET_ERROR_STACK + * @desc It returns string with all possible error stack info. + * Error message is take from Errors Array loaded by INIT_ERRORS procedure + * @example + * select ENV_MANAGER.GET_ERROR_STACK( + * pFormat => 'OUTPUT' + * ,pCode => -20009 + * ,pSourceFileReceivedKey => NULL) + * from dual + * @ex_rslt + * ------------------------------------------------------+ + * Error Message: + * ORA-0000: normal, successful completion + * ------------------------------------------------------- + * Error Stack: + * ------------------------------------------------------- + * Error Backtrace: + * ------------------------------------------------------+ + **/ + FUNCTION GET_ERROR_STACK( + pFormat VARCHAR2 + ,pCode PLS_INTEGER + ,pSourceFileReceivedKey CT_MRDS.A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY%TYPE DEFAULT NULL + ) RETURN VARCHAR2; + + /** + * @name FORMAT_PARAMETERS + * @desc Formats parameter list for logging purposes. + * Converts SYS.ODCIVARCHAR2LIST to formatted string with proper NULL handling. + * @example select ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('param1=value1', 'param2=NULL')) from dual; + * @ex_rslt param1=value1 , + * param2=NULL + **/ + FUNCTION FORMAT_PARAMETERS( + pParameterList SYS.ODCIVARCHAR2LIST + ) RETURN VARCHAR2; + + /** + * @name ANALYZE_VALIDATION_ERRORS + * @desc Analyzes CSV validation errors and generates detailed diagnostic report. + * Compares CSV structure with template table and provides specific error analysis. + * Includes suggested solutions for common validation issues. + * @param pValidationLogTable - Name of validation log table (e.g., VALIDATE$242_LOG) + * @param pTemplateSchema - Schema of template table (e.g., CT_ET_TEMPLATES) + * @param pTemplateTable - Name of template table (e.g., MOCK_PROC_TABLE) + * @param pCsvFileUri - URI of CSV file being validated + * @example SELECT ENV_MANAGER.ANALYZE_VALIDATION_ERRORS('VALIDATE$242_LOG', 'CT_ET_TEMPLATES', 'MOCK_PROC_TABLE', 'https://...') FROM DUAL; + * @ex_rslt Detailed validation analysis report with column mismatches and solutions + **/ + FUNCTION ANALYZE_VALIDATION_ERRORS( + pValidationLogTable VARCHAR2, + pTemplateSchema VARCHAR2, + pTemplateTable VARCHAR2, + pCsvFileUri VARCHAR2 + ) RETURN VARCHAR2; + + --------------------------------------------------------------------------------------------------------------------------- + -- PACKAGE VERSION MANAGEMENT FUNCTIONS + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name GET_VERSION + * @desc Returns the current version number of the ENV_MANAGER package. + * Uses semantic versioning format (MAJOR.MINOR.PATCH). + * @example SELECT ENV_MANAGER.GET_VERSION() FROM DUAL; + * @ex_rslt 3.0.0 + **/ + FUNCTION GET_VERSION RETURN VARCHAR2; + + /** + * @name GET_BUILD_INFO + * @desc Returns comprehensive build information including version, build date, and author. + * Formatted for display in logs or monitoring systems. + * @example SELECT ENV_MANAGER.GET_BUILD_INFO() FROM DUAL; + * @ex_rslt Package: ENV_MANAGER + * Version: 3.0.0 + * Build Date: 2025-10-22 16:00:00 + * Author: Grzegorz Michalski + **/ + FUNCTION GET_BUILD_INFO RETURN VARCHAR2; + + /** + * @name GET_VERSION_HISTORY + * @desc Returns complete version history with all releases and changes. + * Shows evolution of package features over time. + * @example SELECT ENV_MANAGER.GET_VERSION_HISTORY() FROM DUAL; + * @ex_rslt ENV_MANAGER Version History: + * 3.0.0 (2025-10-22): Added package versioning system... + * 2.1.0 (2025-10-15): Added ANALYZE_VALIDATION_ERRORS function... + **/ + FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2; + + /** + * @name GET_PACKAGE_VERSION_INFO + * @desc Universal function to get formatted version information for any package. + * This centralized function is used by all packages in the system. + * @param pPackageName - Name of the package + * @param pVersion - Version string (MAJOR.MINOR.PATCH format) + * @param pBuildDate - Build date timestamp + * @param pAuthor - Package author name + * @example SELECT ENV_MANAGER.GET_PACKAGE_VERSION_INFO('FILE_MANAGER', '2.1.0', '2025-10-22 15:00:00', 'Grzegorz Michalski') FROM DUAL; + * @ex_rslt Package: FILE_MANAGER + * Version: 2.1.0 + * Build Date: 2025-10-22 15:00:00 + * Author: Grzegorz Michalski + **/ + FUNCTION GET_PACKAGE_VERSION_INFO( + pPackageName VARCHAR2, + pVersion VARCHAR2, + pBuildDate VARCHAR2, + pAuthor VARCHAR2 + ) RETURN VARCHAR2; + + /** + * @name FORMAT_VERSION_HISTORY + * @desc Universal function to format version history for any package. + * Adds package name header and proper formatting. + * @param pPackageName - Name of the package + * @param pVersionHistory - Complete version history text + * @example SELECT ENV_MANAGER.FORMAT_VERSION_HISTORY('FILE_MANAGER', '2.1.0 (2025-10-22): Export procedures...') FROM DUAL; + * @ex_rslt FILE_MANAGER Version History: + * 2.1.0 (2025-10-22): Export procedures... + **/ + FUNCTION FORMAT_VERSION_HISTORY( + pPackageName VARCHAR2, + pVersionHistory VARCHAR2 + ) RETURN VARCHAR2; + + --------------------------------------------------------------------------------------------------------------------------- + -- PACKAGE HASH + CHANGE DETECTION FUNCTIONS + --------------------------------------------------------------------------------------------------------------------------- + + /** + * @name CALCULATE_PACKAGE_HASH + * @desc Calculates SHA256 hash of package source code from ALL_SOURCE. + * Returns hash for both SPEC and BODY (if exists). + * Used for automatic change detection. + * @param pPackageOwner - Schema owner of the package + * @param pPackageName - Name of the package + * @param pPackageType - Type of package code ('PACKAGE' for SPEC, 'PACKAGE BODY' for BODY) + * @example SELECT ENV_MANAGER.CALCULATE_PACKAGE_HASH('CT_MRDS', 'FILE_MANAGER', 'PACKAGE') FROM DUAL; + * @ex_rslt A7B3C5D9E8F1234567890ABCDEF... (64-character SHA256 hash) + **/ + FUNCTION CALCULATE_PACKAGE_HASH( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2, + pPackageType VARCHAR2 -- 'PACKAGE' or 'PACKAGE BODY' + ) RETURN VARCHAR2; + + /** + * @name TRACK_PACKAGE_VERSION + * @desc Records package version and source code hash in A_PACKAGE_VERSION_TRACKING table. + * Automatically detects if source code changed without version update. + * Should be called after every package deployment. + * @param pPackageOwner - Schema owner of the package + * @param pPackageName - Name of the package + * @param pPackageVersion - Current version from PACKAGE_VERSION constant + * @param pPackageBuildDate - Build date from PACKAGE_BUILD_DATE constant + * @param pPackageAuthor - Author from PACKAGE_AUTHOR constant + * @example EXEC ENV_MANAGER.TRACK_PACKAGE_VERSION('CT_MRDS', 'FILE_MANAGER', '3.2.0', '2025-10-22 16:30:00', 'Grzegorz Michalski'); + * @ex_rslt Record inserted into A_PACKAGE_VERSION_TRACKING with change detection status + **/ + PROCEDURE TRACK_PACKAGE_VERSION( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2, + pPackageVersion VARCHAR2, + pPackageBuildDate VARCHAR2, + pPackageAuthor VARCHAR2 + ); + + /** + * @name CHECK_PACKAGE_CHANGES + * @desc Checks if package source code has changed since last tracking. + * Compares current hash with last recorded hash in A_PACKAGE_VERSION_TRACKING. + * Returns detailed change detection report. + * @param pPackageOwner - Schema owner of the package + * @param pPackageName - Name of the package + * @example SELECT ENV_MANAGER.CHECK_PACKAGE_CHANGES('CT_MRDS', 'FILE_MANAGER') FROM DUAL; + * @ex_rslt WARNING: Package changed without version update! + * Last Version: 3.2.0 + * Current Hash (SPEC): A7B3C5D9... + * Last Hash (SPEC): B8C4D6E0... + * RECOMMENDATION: Update PACKAGE_VERSION and PACKAGE_BUILD_DATE + **/ + FUNCTION CHECK_PACKAGE_CHANGES( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2 + ) RETURN VARCHAR2; + + /** + * @name GET_PACKAGE_HASH_INFO + * @desc Returns formatted information about package hash and tracking history. + * Includes current hash, last tracked hash, and change detection status. + * @param pPackageOwner - Schema owner of the package + * @param pPackageName - Name of the package + * @example SELECT ENV_MANAGER.GET_PACKAGE_HASH_INFO('CT_MRDS', 'FILE_MANAGER') FROM DUAL; + * @ex_rslt Package: CT_MRDS.FILE_MANAGER + * Current Version: 3.2.0 + * Current Hash (SPEC): A7B3C5D9... + * Last Tracked: 2025-10-22 16:30:00 + * Status: OK - No changes detected + **/ + FUNCTION GET_PACKAGE_HASH_INFO( + pPackageOwner VARCHAR2, + pPackageName VARCHAR2 + ) RETURN VARCHAR2; + +END ENV_MANAGER; +/