From 6039d6bce4bafefe4b7592bda7ecf379a5f44d2c Mon Sep 17 00:00:00 2001 From: Grzegorz Michalski Date: Thu, 12 Feb 2026 09:56:57 +0100 Subject: [PATCH] Update DATA_EXPORTER package to version 2.8.0, refactor EXPORT_TABLE_DATA to export to a single CSV file, and add pFileName and pMaxFileSize parameters. --- .../new_version/DATA_EXPORTER.pkb | 347 ++++++++---------- .../new_version/DATA_EXPORTER.pkg | 23 +- 2 files changed, 167 insertions(+), 203 deletions(-) diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkb b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkb index 20bc5fa..4eb1396 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkb +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkb @@ -602,20 +602,16 @@ AS pKeyColumnName IN VARCHAR2, pBucketArea IN VARCHAR2, pFolderName IN VARCHAR2, + pFileName IN VARCHAR2 default NULL, pTemplateTableName IN VARCHAR2 default NULL, + pMaxFileSize IN NUMBER default 104857600, pRegisterExport IN BOOLEAN default FALSE, pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName ) IS - -- Type definition for key values - TYPE key_value_tab IS TABLE OF VARCHAR2(4000); - vKeyValues key_value_tab; vCount INTEGER; - vSql VARCHAR2(4000); - vKeyValue VARCHAR2(4000); vQuery VARCHAR2(32767); vUri VARCHAR2(4000); - vDataType VARCHAR2(30); vTableName VARCHAR2(128); vSchemaName VARCHAR2(128); vKeyColumnName VARCHAR2(128); @@ -638,7 +634,9 @@ AS ,'pKeyColumnName => '''||nvl(pKeyColumnName, 'NULL')||'''' ,'pBucketArea => '''||nvl(pBucketArea, 'NULL')||'''' ,'pFolderName => '''||nvl(pFolderName, 'NULL')||'''' + ,'pFileName => '''||nvl(pFileName, 'NULL')||'''' ,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||'''' + ,'pMaxFileSize => '''||nvl(TO_CHAR(pMaxFileSize), 'NULL')||'''' ,'pRegisterExport => '''||CASE WHEN pRegisterExport THEN 'TRUE' ELSE 'FALSE' END||'''' ,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||'''' )); @@ -671,16 +669,8 @@ AS IF vCount = 0 THEN RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, ENV_MANAGER.MSG_COLUMN_NOT_EXISTS); - END IF; - -- Get the data type of the key column - SELECT data_type INTO vDataType - FROM all_tab_columns - WHERE table_name = vTableName - AND column_name = vKeyColumnName - AND owner = vSchemaName; - -- Validate template table if provided IF pTemplateTableName IS NOT NULL THEN DECLARE @@ -760,183 +750,158 @@ AS ENV_MANAGER.LOG_PROCESS_EVENT('File registration enabled with config key: ' || vConfigKey, 'INFO', vParameters); END IF; - -- Fetch unique key values from A_LOAD_HISTORY - vSql := 'SELECT DISTINCT L.A_ETL_LOAD_SET_KEY' || - ' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' || - ' WHERE T.' || DBMS_ASSERT.simple_sql_name(vKeyColumnName) || ' = L.A_ETL_LOAD_SET_KEY'; + -- Construct single query for entire table (no key value partitioning) + vQuery := 'SELECT ' || vProcessedColumnList || + ' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' || + ' WHERE T.' || DBMS_ASSERT.simple_sql_name(vKeyColumnName) || ' = L.A_ETL_LOAD_SET_KEY'; + + -- Construct the URI for the file in OCI Object Storage + vUri := vBucketUri || + CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || + NVL(pFileName, UPPER(vTableName) || '.csv'); + + ENV_MANAGER.LOG_PROCESS_EVENT('Exporting to single file: ' || vUri, 'INFO', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Export query: ' || vQuery, 'DEBUG', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Max file size: ' || pMaxFileSize || ' bytes (' || ROUND(pMaxFileSize/1048576, 2) || ' MB)', 'DEBUG', vParameters); + + -- Use DBMS_CLOUD package to export data to the URI + -- Oracle maxfilesize: min 10MB (10485760), max 1GB (1073741824), default 100MB (104857600) + DBMS_CLOUD.EXPORT_DATA( + credential_name => pCredentialName, + file_uri_list => vUri, + query => vQuery, + format => json_object( + 'type' VALUE 'CSV', + 'header' VALUE true, + 'quote' VALUE CHR(34), + 'delimiter' VALUE ',', + 'escape' VALUE true, + 'recorddelimiter' VALUE CHR(13)||CHR(10), -- CRLF dla Windows + 'maxfilesize' VALUE pMaxFileSize -- Dynamic maxfilesize in bytes + ) + ); - ENV_MANAGER.LOG_PROCESS_EVENT('Executing key values query: ' || vSql, 'DEBUG', vParameters); - EXECUTE IMMEDIATE vSql BULK COLLECT INTO vKeyValues; - ENV_MANAGER.LOG_PROCESS_EVENT('Found ' || vKeyValues.COUNT || ' unique key values to process', 'DEBUG', vParameters); - - -- Loop over each unique key value - FOR i IN 1 .. vKeyValues.COUNT LOOP - vKeyValue := vKeyValues(i); - - -- Construct the query to extract data for the current key value with A_WORKFLOW_HISTORY_KEY mapping - IF vDataType IN ('VARCHAR2', 'CHAR', 'NCHAR', 'NVARCHAR2') THEN - vQuery := 'SELECT ' || vProcessedColumnList || - ' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' || - ' WHERE T.' || DBMS_ASSERT.simple_sql_name(vKeyColumnName) || ' = L.A_ETL_LOAD_SET_KEY' || - ' AND L.A_ETL_LOAD_SET_KEY = ' || CHR(39) || vKeyValue || CHR(39); - ELSIF vDataType IN ('NUMBER', 'FLOAT', 'BINARY_FLOAT', 'BINARY_DOUBLE') THEN - vQuery := 'SELECT ' || vProcessedColumnList || - ' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' || - ' WHERE T.' || DBMS_ASSERT.simple_sql_name(vKeyColumnName) || ' = L.A_ETL_LOAD_SET_KEY' || - ' AND L.A_ETL_LOAD_SET_KEY = ' || vKeyValue; - ELSIF vDataType LIKE 'TIMESTAMP%' OR vDataType = 'DATE' THEN - vQuery := 'SELECT ' || vProcessedColumnList || - ' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' || - ' WHERE T.' || DBMS_ASSERT.simple_sql_name(vKeyColumnName) || ' = L.A_ETL_LOAD_SET_KEY' || - ' AND L.A_ETL_LOAD_SET_KEY = TO_TIMESTAMP(' || CHR(39) || vKeyValue || CHR(39) ||', ''YYYY-MM-DD HH24:MI:SS.FF'')'; - ELSE - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNSUPPORTED_DATA_TYPE, ENV_MANAGER.MSG_UNSUPPORTED_DATA_TYPE); - END IF; - - -- Construct the URI for the file in OCI Object Storage - vUri := vBucketUri || - CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || - sanitizeFilename(vKeyValue) || '.csv'; - - ENV_MANAGER.LOG_PROCESS_EVENT('Processing key value: ' || vKeyValue || ' (' || (i) || '/' || vKeyValues.COUNT || ')', 'DEBUG', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT('Export query: ' || vQuery, 'DEBUG', vParameters); - ENV_MANAGER.LOG_PROCESS_EVENT('Export URI: ' || vUri, 'DEBUG', vParameters); - - -- Use DBMS_CLOUD package to export data to the URI - DBMS_CLOUD.EXPORT_DATA( - credential_name => pCredentialName, - file_uri_list => vUri, - query => vQuery, - format => json_object('type' VALUE 'CSV', 'header' VALUE true) - ); - - -- Register exported file to A_SOURCE_FILE_RECEIVED if requested - IF pRegisterExport THEN - DECLARE - vChecksum VARCHAR2(128); - vCreated TIMESTAMP WITH TIME ZONE; - vBytes NUMBER; - vActualFileName VARCHAR2(1000); -- Actual filename with Oracle suffix - vSanitizedFileName VARCHAR2(1000); - vFileName VARCHAR2(1000); - vRetryCount NUMBER := 0; - vMaxRetries NUMBER := 1; -- One retry after initial attempt - vRetryDelay NUMBER := 2; -- 2 seconds delay - BEGIN - -- Extract filename from URI (after last '/') - vFileName := SUBSTR(vUri, INSTR(vUri, '/', -1) + 1); - - -- Sanitize filename first (PL/SQL function cannot be used directly in SQL) - vSanitizedFileName := sanitizeFilename(vFileName); - - -- Remove .csv extension for LIKE pattern matching (Oracle adds suffixes BEFORE .csv) - -- Example: keyvalue.csv becomes keyvalue_1_20260211T102621591769Z.csv - vSanitizedFileName := REGEXP_REPLACE(vSanitizedFileName, '\.csv$', '', 1, 0, 'i'); - - -- Try to get file metadata with retry logic - <> - LOOP - BEGIN - SELECT object_name, checksum, created, bytes - INTO vActualFileName, vChecksum, vCreated, vBytes - FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( - credential_name => pCredentialName, - location_uri => vBucketUri - )) - WHERE object_name LIKE CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || vSanitizedFileName || '%' - ORDER BY created DESC, bytes DESC - FETCH FIRST 1 ROW ONLY; - - -- Extract filename only from full path (remove bucket folder prefix) - vActualFileName := SUBSTR(vActualFileName, INSTR(vActualFileName, '/', -1) + 1); - - -- Success - exit retry loop - EXIT metadata_retry_loop; - - EXCEPTION - WHEN NO_DATA_FOUND THEN - vRetryCount := vRetryCount + 1; - - IF vRetryCount <= vMaxRetries THEN - -- Log retry attempt - ENV_MANAGER.LOG_PROCESS_EVENT('File not found in bucket (attempt ' || vRetryCount || '/' || (vMaxRetries + 1) || '), retrying after ' || vRetryDelay || ' seconds: ' || vFileName, 'DEBUG', vParameters); - - -- Wait before retry using DBMS_SESSION.SLEEP (alternative to DBMS_LOCK) - DBMS_SESSION.SLEEP(vRetryDelay); - ELSE - -- Max retries exceeded - re-raise exception - RAISE; - END IF; - END; - END LOOP metadata_retry_loop; - - -- Create A_SOURCE_FILE_RECEIVED record for this export with metadata - vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; - INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( - A_SOURCE_FILE_RECEIVED_KEY, - A_SOURCE_FILE_CONFIG_KEY, - SOURCE_FILE_NAME, - CHECKSUM, - CREATED, - BYTES, - RECEPTION_DATE, - PROCESSING_STATUS, - PARTITION_YEAR, - PARTITION_MONTH, - ARCH_FILE_NAME - ) VALUES ( - vSourceFileReceivedKey, - NVL(vConfigKey, -1), -- Use config key if found, otherwise -1 - vActualFileName, -- Use actual filename with Oracle suffix - vChecksum, - vCreated, - vBytes, - SYSDATE, - 'INGESTED', - NULL, -- PARTITION_YEAR not used for single-file exports - NULL, -- PARTITION_MONTH not used for single-file exports - NULL -- ARCH_FILE_NAME not used for single-file exports - ); - - ENV_MANAGER.LOG_PROCESS_EVENT('Registered file: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vActualFileName || ', Size=' || vBytes || ' bytes', 'DEBUG', vParameters); - EXCEPTION - WHEN NO_DATA_FOUND THEN - -- File not found after retries - log warning and continue without metadata - ENV_MANAGER.LOG_PROCESS_EVENT('WARNING: File not found in bucket after ' || (vMaxRetries + 1) || ' attempts: ' || vFileName, 'WARNING', vParameters); - - -- Sanitize filename for fallback INSERT (function cannot be used in SQL) - vSanitizedFileName := sanitizeFilename(vFileName); - - -- Insert without metadata using theoretical filename - vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; - INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( - A_SOURCE_FILE_RECEIVED_KEY, - A_SOURCE_FILE_CONFIG_KEY, - SOURCE_FILE_NAME, - RECEPTION_DATE, - PROCESSING_STATUS, - PARTITION_YEAR, - PARTITION_MONTH, - ARCH_FILE_NAME - ) VALUES ( - vSourceFileReceivedKey, - NVL(vConfigKey, -1), -- Use config key if found, otherwise -1 - vSanitizedFileName, -- Use pre-calculated sanitized filename - SYSDATE, - 'INGESTED', - NULL, -- PARTITION_YEAR not used for single-file exports - NULL, -- PARTITION_MONTH not used for single-file exports - NULL -- ARCH_FILE_NAME not used for single-file exports - ); - - ENV_MANAGER.LOG_PROCESS_EVENT('Registered file without metadata: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vSanitizedFileName, 'DEBUG', vParameters); - END; - END IF; - END LOOP; - - -- Log summary of file registration if enabled + -- Register exported file to A_SOURCE_FILE_RECEIVED if requested IF pRegisterExport THEN - ENV_MANAGER.LOG_PROCESS_EVENT('Registered ' || vKeyValues.COUNT || ' exported files to A_SOURCE_FILE_RECEIVED with config key: ' || vConfigKey, 'INFO', vParameters); + DECLARE + vChecksum VARCHAR2(128); + vCreated TIMESTAMP WITH TIME ZONE; + vBytes NUMBER; + vActualFileName VARCHAR2(1000); -- Actual filename with Oracle suffix + vSanitizedFileName VARCHAR2(1000); + vFileName VARCHAR2(1000); + vRetryCount NUMBER := 0; + vMaxRetries NUMBER := 1; -- One retry after initial attempt + vRetryDelay NUMBER := 2; -- 2 seconds delay + BEGIN + -- Extract filename from URI (after last '/') + vFileName := SUBSTR(vUri, INSTR(vUri, '/', -1) + 1); + + -- Sanitize filename first (PL/SQL function cannot be used directly in SQL) + vSanitizedFileName := sanitizeFilename(vFileName); + + -- Remove .csv extension for LIKE pattern matching (Oracle adds suffixes BEFORE .csv) + -- Example: tablename.csv becomes tablename_1_20260211T102621591769Z.csv + vSanitizedFileName := REGEXP_REPLACE(vSanitizedFileName, '\.csv$', '', 1, 0, 'i'); + + -- Try to get file metadata with retry logic + <> + LOOP + BEGIN + SELECT object_name, checksum, created, bytes + INTO vActualFileName, vChecksum, vCreated, vBytes + FROM TABLE(DBMS_CLOUD.LIST_OBJECTS( + credential_name => pCredentialName, + location_uri => vBucketUri + )) + WHERE object_name LIKE CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || vSanitizedFileName || '%' + ORDER BY created DESC, bytes DESC + FETCH FIRST 1 ROW ONLY; + + -- Extract filename only from full path (remove bucket folder prefix) + vActualFileName := SUBSTR(vActualFileName, INSTR(vActualFileName, '/', -1) + 1); + + -- Success - exit retry loop + EXIT metadata_retry_loop; + + EXCEPTION + WHEN NO_DATA_FOUND THEN + vRetryCount := vRetryCount + 1; + + IF vRetryCount <= vMaxRetries THEN + -- Log retry attempt + ENV_MANAGER.LOG_PROCESS_EVENT('File not found in bucket (attempt ' || vRetryCount || '/' || (vMaxRetries + 1) || '), retrying after ' || vRetryDelay || ' seconds: ' || vFileName, 'DEBUG', vParameters); + + -- Wait before retry using DBMS_SESSION.SLEEP (alternative to DBMS_LOCK) + DBMS_SESSION.SLEEP(vRetryDelay); + ELSE + -- Max retries exceeded - re-raise exception + RAISE; + END IF; + END; + END LOOP metadata_retry_loop; + + -- Create A_SOURCE_FILE_RECEIVED record for this export with metadata + vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; + INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( + A_SOURCE_FILE_RECEIVED_KEY, + A_SOURCE_FILE_CONFIG_KEY, + SOURCE_FILE_NAME, + CHECKSUM, + CREATED, + BYTES, + RECEPTION_DATE, + PROCESSING_STATUS, + PARTITION_YEAR, + PARTITION_MONTH, + ARCH_FILE_NAME + ) VALUES ( + vSourceFileReceivedKey, + NVL(vConfigKey, -1), -- Use config key if found, otherwise -1 + vActualFileName, -- Use actual filename with Oracle suffix + vChecksum, + vCreated, + vBytes, + SYSDATE, + 'INGESTED', + NULL, -- PARTITION_YEAR not used for single-file exports + NULL, -- PARTITION_MONTH not used for single-file exports + NULL -- ARCH_FILE_NAME not used for single-file exports + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('Registered file: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vActualFileName || ', Size=' || vBytes || ' bytes', 'INFO', vParameters); + EXCEPTION + WHEN NO_DATA_FOUND THEN + -- File not found after retries - log warning and continue without metadata + ENV_MANAGER.LOG_PROCESS_EVENT('WARNING: File not found in bucket after ' || (vMaxRetries + 1) || ' attempts: ' || vFileName, 'WARNING', vParameters); + + -- Sanitize filename for fallback INSERT (function cannot be used in SQL) + vSanitizedFileName := sanitizeFilename(vFileName); + + -- Insert without metadata using theoretical filename + vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL; + INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED ( + A_SOURCE_FILE_RECEIVED_KEY, + A_SOURCE_FILE_CONFIG_KEY, + SOURCE_FILE_NAME, + RECEPTION_DATE, + PROCESSING_STATUS, + PARTITION_YEAR, + PARTITION_MONTH, + ARCH_FILE_NAME + ) VALUES ( + vSourceFileReceivedKey, + NVL(vConfigKey, -1), -- Use config key if found, otherwise -1 + vSanitizedFileName, -- Use pre-calculated sanitized filename + SYSDATE, + 'INGESTED', + NULL, -- PARTITION_YEAR not used for single-file exports + NULL, -- PARTITION_MONTH not used for single-file exports + NULL -- ARCH_FILE_NAME not used for single-file exports + ); + + ENV_MANAGER.LOG_PROCESS_EVENT('Registered file without metadata: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vSanitizedFileName, 'INFO', vParameters); + END; END IF; ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters); @@ -949,10 +914,6 @@ AS vgMsgTmp := ENV_MANAGER.MSG_COLUMN_NOT_EXISTS || ' (TableName.ColumnName): ' || vTableName||'.'||vKeyColumnName||CASE WHEN vCurrentCol IS NOT NULL THEN '.'||vCurrentCol||' in column list' ELSE '' END; ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_COLUMN_NOT_EXISTS, vgMsgTmp); - WHEN ENV_MANAGER.ERR_UNSUPPORTED_DATA_TYPE THEN - vgMsgTmp := ENV_MANAGER.MSG_UNSUPPORTED_DATA_TYPE || ' vDataType: '||vDataType; - ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); - RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_UNSUPPORTED_DATA_TYPE, vgMsgTmp); WHEN OTHERS THEN -- Log complete error details including full stack trace and backtrace ENV_MANAGER.LOG_PROCESS_ERROR('Export failed: ' || SQLERRM, vParameters, 'DATA_EXPORTER'); diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkg b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkg index dcb7e51..2af9688 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkg +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkg @@ -9,21 +9,18 @@ AS **/ -- Package Version Information - PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.7.5'; - PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2026-02-11 12:15:00'; + PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.8.0'; + PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2026-02-12 09:00:00'; PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski'; -- Version History (last 3-5 changes) VERSION_HISTORY CONSTANT VARCHAR2(4000) := + 'v2.8.0 (2026-02-12): MAJOR REFACTOR - EXPORT_TABLE_DATA now exports to single CSV file instead of partitioning by key values. Added pFileName parameter.' || CHR(10) || 'v2.7.5 (2026-02-11): Added pRegisterExport parameter to EXPORT_TABLE_DATA procedure. When TRUE, registers each exported CSV file in A_SOURCE_FILE_RECEIVED.' || CHR(10) || 'v2.7.4 (2026-02-11): ACTUAL FILENAME STORAGE - Store real filename with Oracle suffix in SOURCE_FILE_NAME instead of theoretical filename.' || CHR(10) || 'v2.7.3 (2026-02-11): FIX LIKE pattern for DBMS_CLOUD.LIST_OBJECTS - Removed .csv extension from filename before pattern matching.' || CHR(10) || 'v2.7.2 (2026-02-11): FIX pRegisterExport in EXPORT_TABLE_DATA_TO_CSV_BY_DATE - Added missing pRegisterExport parameter to EXPORT_SINGLE_PARTITION call.' || CHR(10) || - 'v2.7.1 (2026-02-11): AUTO-LOOKUP A_SOURCE_FILE_CONFIG_KEY - Parse pFolderName to automatically find config key from A_SOURCE_FILE_CONFIG.' || CHR(10) || - 'v2.7.0 (2026-02-10): Added pRegisterExport parameter to EXPORT_TABLE_DATA_TO_CSV_BY_DATE. When TRUE, registers each exported CSV file in A_SOURCE_FILE_RECEIVED.' || CHR(10) || - 'v2.6.3 (2026-01-28): COMPILATION FIX - Resolved ORA-00904 error in EXPORT_PARTITION_PARALLEL. SQLERRM properly assigned to vgMsgTmp variable.' || CHR(10) || - 'v2.6.2 (2026-01-28): CRITICAL FIX - Race condition when multiple exports run simultaneously. Session-safe cleanup with TASK_NAME filtering.' || CHR(10) || - 'v2.6.0 (2026-01-28): CRITICAL FIX - Added STATUS tracking to A_PARALLEL_EXPORT_CHUNKS table to prevent data duplication on retry.' || CHR(10); + 'v2.7.1 (2026-02-11): AUTO-LOOKUP A_SOURCE_FILE_CONFIG_KEY - Parse pFolderName to automatically find config key from A_SOURCE_FILE_CONFIG.' || CHR(10); cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10); vgMsgTmp VARCHAR2(32000); @@ -71,16 +68,18 @@ AS /** * @name EXPORT_TABLE_DATA * @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA. - * Exports data into CSV file on OCI infrustructure. + * Exports data into single CSV file on OCI infrastructure. * pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE' * Supports template table for column order and per-column date formatting. - * When pRegisterExport=TRUE, successfully exported files are registered in: + * When pRegisterExport=TRUE, successfully exported file is registered in: * - CT_MRDS.A_SOURCE_FILE_RECEIVED (tracks file location, size, checksum, and metadata) + * @param pFileName - Optional filename (e.g., 'export.csv'). NULL = auto-generate from table name * @param pTemplateTableName - Optional template table (SCHEMA.TABLE or TABLE) for: * - Column order control (template defines CSV structure) * - Per-column date formatting via FILE_MANAGER.GET_DATE_FORMAT * - NULL = use source table columns in natural order - * @param pRegisterExport - When TRUE, registers each exported CSV file in A_SOURCE_FILE_RECEIVED table + * @param pMaxFileSize - Maximum file size in bytes (default 104857600 = 100MB, min 10MB, max 1GB) + * @param pRegisterExport - When TRUE, registers exported CSV file in A_SOURCE_FILE_RECEIVED table * @example * begin * DATA_EXPORTER.EXPORT_TABLE_DATA( @@ -89,7 +88,9 @@ AS * pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK', * pBucketArea => 'DATA', * pFolderName => 'csv_exports', + * pFileName => 'my_export.csv', -- Optional * pTemplateTableName => 'CT_ET_TEMPLATES.MY_TEMPLATE', -- Optional + * pMaxFileSize => 104857600, -- Optional, default 100MB * pRegisterExport => TRUE -- Optional, default FALSE * ); * end; @@ -100,7 +101,9 @@ AS pKeyColumnName IN VARCHAR2, pBucketArea IN VARCHAR2, pFolderName IN VARCHAR2, + pFileName IN VARCHAR2 default NULL, pTemplateTableName IN VARCHAR2 default NULL, + pMaxFileSize IN NUMBER default 104857600, pRegisterExport IN BOOLEAN default FALSE, pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName );