Merge branch 'develop'
This commit is contained in:
@@ -597,12 +597,14 @@ AS
|
|||||||
----------------------------------------------------------------------------------------------------
|
----------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
PROCEDURE EXPORT_TABLE_DATA (
|
PROCEDURE EXPORT_TABLE_DATA (
|
||||||
pSchemaName IN VARCHAR2,
|
pSchemaName IN VARCHAR2,
|
||||||
pTableName IN VARCHAR2,
|
pTableName IN VARCHAR2,
|
||||||
pKeyColumnName IN VARCHAR2,
|
pKeyColumnName IN VARCHAR2,
|
||||||
pBucketArea IN VARCHAR2,
|
pBucketArea IN VARCHAR2,
|
||||||
pFolderName IN VARCHAR2,
|
pFolderName IN VARCHAR2,
|
||||||
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
|
pTemplateTableName IN VARCHAR2 default NULL,
|
||||||
|
pRegisterExport IN BOOLEAN default FALSE,
|
||||||
|
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
|
||||||
)
|
)
|
||||||
IS
|
IS
|
||||||
-- Type definition for key values
|
-- Type definition for key values
|
||||||
@@ -621,7 +623,14 @@ AS
|
|||||||
vBucketUri VARCHAR2(4000);
|
vBucketUri VARCHAR2(4000);
|
||||||
vProcessedColumnList VARCHAR2(32767);
|
vProcessedColumnList VARCHAR2(32767);
|
||||||
vCurrentCol VARCHAR2(128);
|
vCurrentCol VARCHAR2(128);
|
||||||
vAllColumnsList VARCHAR2(32767);
|
|
||||||
|
-- Variables for file registration (when pRegisterExport=TRUE)
|
||||||
|
vConfigKey NUMBER;
|
||||||
|
vSourceKey VARCHAR2(100);
|
||||||
|
vTableId VARCHAR2(100);
|
||||||
|
vSlashPos1 NUMBER;
|
||||||
|
vSlashPos2 NUMBER;
|
||||||
|
vSourceFileReceivedKey NUMBER;
|
||||||
|
|
||||||
BEGIN
|
BEGIN
|
||||||
vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pSchemaName => '''||nvl(pSchemaName, 'NULL')||''''
|
vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pSchemaName => '''||nvl(pSchemaName, 'NULL')||''''
|
||||||
@@ -629,6 +638,8 @@ AS
|
|||||||
,'pKeyColumnName => '''||nvl(pKeyColumnName, 'NULL')||''''
|
,'pKeyColumnName => '''||nvl(pKeyColumnName, 'NULL')||''''
|
||||||
,'pBucketArea => '''||nvl(pBucketArea, 'NULL')||''''
|
,'pBucketArea => '''||nvl(pBucketArea, 'NULL')||''''
|
||||||
,'pFolderName => '''||nvl(pFolderName, 'NULL')||''''
|
,'pFolderName => '''||nvl(pFolderName, 'NULL')||''''
|
||||||
|
,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||''''
|
||||||
|
,'pRegisterExport => '''||CASE WHEN pRegisterExport THEN 'TRUE' ELSE 'FALSE' END||''''
|
||||||
,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||''''
|
,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||''''
|
||||||
));
|
));
|
||||||
ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters);
|
ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters);
|
||||||
@@ -670,20 +681,85 @@ AS
|
|||||||
AND column_name = vKeyColumnName
|
AND column_name = vKeyColumnName
|
||||||
AND owner = vSchemaName;
|
AND owner = vSchemaName;
|
||||||
|
|
||||||
-- Build list of all columns for the table (including key column for aliasing)
|
-- Validate template table if provided
|
||||||
SELECT LISTAGG(column_name, ', ') WITHIN GROUP (ORDER BY column_id)
|
IF pTemplateTableName IS NOT NULL THEN
|
||||||
INTO vAllColumnsList
|
DECLARE
|
||||||
FROM all_tab_columns
|
vTemplateSchema VARCHAR2(128);
|
||||||
WHERE table_name = vTableName
|
vTemplateTable VARCHAR2(128);
|
||||||
AND owner = vSchemaName;
|
vTemplateCount NUMBER;
|
||||||
|
BEGIN
|
||||||
|
-- Parse template table name (SCHEMA.TABLE or just TABLE)
|
||||||
|
IF INSTR(pTemplateTableName, '.') > 0 THEN
|
||||||
|
vTemplateSchema := UPPER(SUBSTR(pTemplateTableName, 1, INSTR(pTemplateTableName, '.') - 1));
|
||||||
|
vTemplateTable := UPPER(SUBSTR(pTemplateTableName, INSTR(pTemplateTableName, '.') + 1));
|
||||||
|
ELSE
|
||||||
|
vTemplateSchema := vSchemaName;
|
||||||
|
vTemplateTable := UPPER(pTemplateTableName);
|
||||||
|
END IF;
|
||||||
|
|
||||||
-- Process column list to add T. prefix and alias key column as A_WORKFLOW_HISTORY_KEY
|
-- Check if template table exists
|
||||||
vProcessedColumnList := processColumnList(vAllColumnsList, vTableName, vSchemaName, vKeyColumnName);
|
SELECT COUNT(*) INTO vTemplateCount
|
||||||
|
FROM all_tables
|
||||||
|
WHERE table_name = vTemplateTable
|
||||||
|
AND owner = vTemplateSchema;
|
||||||
|
|
||||||
ENV_MANAGER.LOG_PROCESS_EVENT('Dynamic column list built: ' || vAllColumnsList, 'DEBUG', vParameters);
|
IF vTemplateCount = 0 THEN
|
||||||
ENV_MANAGER.LOG_PROCESS_EVENT('Processed column list with T. prefix: ' || vProcessedColumnList, 'DEBUG', vParameters);
|
vgMsgTmp := ENV_MANAGER.MSG_TABLE_NOT_EXISTS || ': Template table ' || vTemplateSchema || '.' || vTemplateTable;
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters);
|
||||||
|
RAISE_APPLICATION_ERROR(ENV_MANAGER.CODE_TABLE_NOT_EXISTS, vgMsgTmp);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('Template table validated: ' || vTemplateSchema || '.' || vTemplateTable, 'DEBUG', vParameters);
|
||||||
|
END;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
-- Build query with TO_CHAR for date columns (per-column format support)
|
||||||
|
vProcessedColumnList := buildQueryWithDateFormats(NULL, vTableName, vSchemaName, vKeyColumnName, pTemplateTableName);
|
||||||
|
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('Processed column list with TO_CHAR for date columns: ' || vProcessedColumnList, 'DEBUG', vParameters);
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('Template table: ' || NVL(pTemplateTableName, 'NULL - using global default for all dates'), 'INFO', vParameters);
|
||||||
|
|
||||||
vTableName := DBMS_ASSERT.SCHEMA_NAME(vSchemaName) || '.' || DBMS_ASSERT.simple_sql_name(vTableName);
|
vTableName := DBMS_ASSERT.SCHEMA_NAME(vSchemaName) || '.' || DBMS_ASSERT.simple_sql_name(vTableName);
|
||||||
|
|
||||||
|
-- Lookup A_SOURCE_FILE_CONFIG_KEY based on pFolderName parsing if pRegisterExport is enabled
|
||||||
|
IF pRegisterExport THEN
|
||||||
|
-- Format: {BUCKET_AREA}/{SOURCE_KEY}/{TABLE_ID}
|
||||||
|
-- Example: 'ODS/CSDB/CSDB_DEBT_DAILY' -> SOURCE_KEY='CSDB', TABLE_ID='CSDB_DEBT_DAILY'
|
||||||
|
|
||||||
|
-- Parse pFolderName to extract SOURCE_KEY and TABLE_ID
|
||||||
|
vSlashPos1 := INSTR(pFolderName, '/', 1, 1); -- First '/' position
|
||||||
|
vSlashPos2 := INSTR(pFolderName, '/', 1, 2); -- Second '/' position
|
||||||
|
|
||||||
|
IF vSlashPos1 > 0 AND vSlashPos2 > 0 THEN
|
||||||
|
-- Extract segment 2 (SOURCE_KEY) and segment 3 (TABLE_ID)
|
||||||
|
vSourceKey := SUBSTR(pFolderName, vSlashPos1 + 1, vSlashPos2 - vSlashPos1 - 1);
|
||||||
|
vTableId := SUBSTR(pFolderName, vSlashPos2 + 1);
|
||||||
|
|
||||||
|
-- Find configuration based on SOURCE_KEY and TABLE_ID
|
||||||
|
BEGIN
|
||||||
|
SELECT A_SOURCE_FILE_CONFIG_KEY
|
||||||
|
INTO vConfigKey
|
||||||
|
FROM CT_MRDS.A_SOURCE_FILE_CONFIG
|
||||||
|
WHERE A_SOURCE_KEY = vSourceKey
|
||||||
|
AND TABLE_ID = vTableId
|
||||||
|
AND SOURCE_FILE_TYPE = 'INPUT'
|
||||||
|
AND ROWNUM = 1;
|
||||||
|
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('Found config key: ' || vConfigKey || ' for SOURCE=' || vSourceKey || ', TABLE=' || vTableId, 'DEBUG', vParameters);
|
||||||
|
EXCEPTION
|
||||||
|
WHEN NO_DATA_FOUND THEN
|
||||||
|
vConfigKey := -1;
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('No config found for SOURCE=' || vSourceKey || ', TABLE=' || vTableId || ' - using default (-1)', 'INFO', vParameters);
|
||||||
|
END;
|
||||||
|
ELSE
|
||||||
|
-- Cannot parse folder name - use default
|
||||||
|
vConfigKey := -1;
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('Cannot parse pFolderName: ' || pFolderName || ' - using default (-1)', 'WARNING', vParameters);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('File registration enabled with config key: ' || vConfigKey, 'INFO', vParameters);
|
||||||
|
END IF;
|
||||||
|
|
||||||
-- Fetch unique key values from A_LOAD_HISTORY
|
-- Fetch unique key values from A_LOAD_HISTORY
|
||||||
vSql := 'SELECT DISTINCT L.A_ETL_LOAD_SET_KEY' ||
|
vSql := 'SELECT DISTINCT L.A_ETL_LOAD_SET_KEY' ||
|
||||||
' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' ||
|
' FROM ' || vTableName || ' T, CT_ODS.A_LOAD_HISTORY L' ||
|
||||||
@@ -733,7 +809,136 @@ AS
|
|||||||
query => vQuery,
|
query => vQuery,
|
||||||
format => json_object('type' VALUE 'CSV', 'header' VALUE true)
|
format => json_object('type' VALUE 'CSV', 'header' VALUE true)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
-- Register exported file to A_SOURCE_FILE_RECEIVED if requested
|
||||||
|
IF pRegisterExport THEN
|
||||||
|
DECLARE
|
||||||
|
vChecksum VARCHAR2(128);
|
||||||
|
vCreated TIMESTAMP WITH TIME ZONE;
|
||||||
|
vBytes NUMBER;
|
||||||
|
vActualFileName VARCHAR2(1000); -- Actual filename with Oracle suffix
|
||||||
|
vSanitizedFileName VARCHAR2(1000);
|
||||||
|
vFileName VARCHAR2(1000);
|
||||||
|
vRetryCount NUMBER := 0;
|
||||||
|
vMaxRetries NUMBER := 1; -- One retry after initial attempt
|
||||||
|
vRetryDelay NUMBER := 2; -- 2 seconds delay
|
||||||
|
BEGIN
|
||||||
|
-- Extract filename from URI (after last '/')
|
||||||
|
vFileName := SUBSTR(vUri, INSTR(vUri, '/', -1) + 1);
|
||||||
|
|
||||||
|
-- Sanitize filename first (PL/SQL function cannot be used directly in SQL)
|
||||||
|
vSanitizedFileName := sanitizeFilename(vFileName);
|
||||||
|
|
||||||
|
-- Remove .csv extension for LIKE pattern matching (Oracle adds suffixes BEFORE .csv)
|
||||||
|
-- Example: keyvalue.csv becomes keyvalue_1_20260211T102621591769Z.csv
|
||||||
|
vSanitizedFileName := REGEXP_REPLACE(vSanitizedFileName, '\.csv$', '', 1, 0, 'i');
|
||||||
|
|
||||||
|
-- Try to get file metadata with retry logic
|
||||||
|
<<metadata_retry_loop>>
|
||||||
|
LOOP
|
||||||
|
BEGIN
|
||||||
|
SELECT object_name, checksum, created, bytes
|
||||||
|
INTO vActualFileName, vChecksum, vCreated, vBytes
|
||||||
|
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
|
||||||
|
credential_name => pCredentialName,
|
||||||
|
location_uri => vBucketUri
|
||||||
|
))
|
||||||
|
WHERE object_name LIKE CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || vSanitizedFileName || '%'
|
||||||
|
ORDER BY created DESC, bytes DESC
|
||||||
|
FETCH FIRST 1 ROW ONLY;
|
||||||
|
|
||||||
|
-- Extract filename only from full path (remove bucket folder prefix)
|
||||||
|
vActualFileName := SUBSTR(vActualFileName, INSTR(vActualFileName, '/', -1) + 1);
|
||||||
|
|
||||||
|
-- Success - exit retry loop
|
||||||
|
EXIT metadata_retry_loop;
|
||||||
|
|
||||||
|
EXCEPTION
|
||||||
|
WHEN NO_DATA_FOUND THEN
|
||||||
|
vRetryCount := vRetryCount + 1;
|
||||||
|
|
||||||
|
IF vRetryCount <= vMaxRetries THEN
|
||||||
|
-- Log retry attempt
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('File not found in bucket (attempt ' || vRetryCount || '/' || (vMaxRetries + 1) || '), retrying after ' || vRetryDelay || ' seconds: ' || vFileName, 'DEBUG', vParameters);
|
||||||
|
|
||||||
|
-- Wait before retry using DBMS_SESSION.SLEEP (alternative to DBMS_LOCK)
|
||||||
|
DBMS_SESSION.SLEEP(vRetryDelay);
|
||||||
|
ELSE
|
||||||
|
-- Max retries exceeded - re-raise exception
|
||||||
|
RAISE;
|
||||||
|
END IF;
|
||||||
|
END;
|
||||||
|
END LOOP metadata_retry_loop;
|
||||||
|
|
||||||
|
-- Create A_SOURCE_FILE_RECEIVED record for this export with metadata
|
||||||
|
vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL;
|
||||||
|
INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED (
|
||||||
|
A_SOURCE_FILE_RECEIVED_KEY,
|
||||||
|
A_SOURCE_FILE_CONFIG_KEY,
|
||||||
|
SOURCE_FILE_NAME,
|
||||||
|
CHECKSUM,
|
||||||
|
CREATED,
|
||||||
|
BYTES,
|
||||||
|
RECEPTION_DATE,
|
||||||
|
PROCESSING_STATUS,
|
||||||
|
PARTITION_YEAR,
|
||||||
|
PARTITION_MONTH,
|
||||||
|
ARCH_FILE_NAME
|
||||||
|
) VALUES (
|
||||||
|
vSourceFileReceivedKey,
|
||||||
|
NVL(vConfigKey, -1), -- Use config key if found, otherwise -1
|
||||||
|
vActualFileName, -- Use actual filename with Oracle suffix
|
||||||
|
vChecksum,
|
||||||
|
vCreated,
|
||||||
|
vBytes,
|
||||||
|
SYSDATE,
|
||||||
|
'INGESTED',
|
||||||
|
NULL, -- PARTITION_YEAR not used for single-file exports
|
||||||
|
NULL, -- PARTITION_MONTH not used for single-file exports
|
||||||
|
NULL -- ARCH_FILE_NAME not used for single-file exports
|
||||||
|
);
|
||||||
|
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('Registered file: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vActualFileName || ', Size=' || vBytes || ' bytes', 'DEBUG', vParameters);
|
||||||
|
EXCEPTION
|
||||||
|
WHEN NO_DATA_FOUND THEN
|
||||||
|
-- File not found after retries - log warning and continue without metadata
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('WARNING: File not found in bucket after ' || (vMaxRetries + 1) || ' attempts: ' || vFileName, 'WARNING', vParameters);
|
||||||
|
|
||||||
|
-- Sanitize filename for fallback INSERT (function cannot be used in SQL)
|
||||||
|
vSanitizedFileName := sanitizeFilename(vFileName);
|
||||||
|
|
||||||
|
-- Insert without metadata using theoretical filename
|
||||||
|
vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL;
|
||||||
|
INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED (
|
||||||
|
A_SOURCE_FILE_RECEIVED_KEY,
|
||||||
|
A_SOURCE_FILE_CONFIG_KEY,
|
||||||
|
SOURCE_FILE_NAME,
|
||||||
|
RECEPTION_DATE,
|
||||||
|
PROCESSING_STATUS,
|
||||||
|
PARTITION_YEAR,
|
||||||
|
PARTITION_MONTH,
|
||||||
|
ARCH_FILE_NAME
|
||||||
|
) VALUES (
|
||||||
|
vSourceFileReceivedKey,
|
||||||
|
NVL(vConfigKey, -1), -- Use config key if found, otherwise -1
|
||||||
|
vSanitizedFileName, -- Use pre-calculated sanitized filename
|
||||||
|
SYSDATE,
|
||||||
|
'INGESTED',
|
||||||
|
NULL, -- PARTITION_YEAR not used for single-file exports
|
||||||
|
NULL, -- PARTITION_MONTH not used for single-file exports
|
||||||
|
NULL -- ARCH_FILE_NAME not used for single-file exports
|
||||||
|
);
|
||||||
|
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('Registered file without metadata: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vSanitizedFileName, 'DEBUG', vParameters);
|
||||||
|
END;
|
||||||
|
END IF;
|
||||||
END LOOP;
|
END LOOP;
|
||||||
|
|
||||||
|
-- Log summary of file registration if enabled
|
||||||
|
IF pRegisterExport THEN
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('Registered ' || vKeyValues.COUNT || ' exported files to A_SOURCE_FILE_RECEIVED with config key: ' || vConfigKey, 'INFO', vParameters);
|
||||||
|
END IF;
|
||||||
|
|
||||||
ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters);
|
ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters);
|
||||||
EXCEPTION
|
EXCEPTION
|
||||||
WHEN ENV_MANAGER.ERR_TABLE_NOT_EXISTS THEN
|
WHEN ENV_MANAGER.ERR_TABLE_NOT_EXISTS THEN
|
||||||
@@ -1032,6 +1237,7 @@ AS
|
|||||||
pParallelDegree IN NUMBER default 1,
|
pParallelDegree IN NUMBER default 1,
|
||||||
pTemplateTableName IN VARCHAR2 default NULL,
|
pTemplateTableName IN VARCHAR2 default NULL,
|
||||||
pMaxFileSize IN NUMBER default 104857600,
|
pMaxFileSize IN NUMBER default 104857600,
|
||||||
|
pRegisterExport IN BOOLEAN default FALSE,
|
||||||
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
|
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
|
||||||
)
|
)
|
||||||
IS
|
IS
|
||||||
@@ -1045,6 +1251,15 @@ AS
|
|||||||
vBucketUri VARCHAR2(4000);
|
vBucketUri VARCHAR2(4000);
|
||||||
vCurrentCol VARCHAR2(128);
|
vCurrentCol VARCHAR2(128);
|
||||||
vPartitions partition_tab;
|
vPartitions partition_tab;
|
||||||
|
vSourceFileReceivedKey NUMBER;
|
||||||
|
vFileName VARCHAR2(1000);
|
||||||
|
vFileUri VARCHAR2(4000);
|
||||||
|
-- Variables for A_SOURCE_FILE_CONFIG lookup
|
||||||
|
vSourceKey VARCHAR2(100);
|
||||||
|
vTableId VARCHAR2(200);
|
||||||
|
vConfigKey NUMBER := -1;
|
||||||
|
vSlashPos1 NUMBER;
|
||||||
|
vSlashPos2 NUMBER;
|
||||||
|
|
||||||
BEGIN
|
BEGIN
|
||||||
vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pSchemaName => '''||nvl(pSchemaName, 'NULL')||''''
|
vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST( 'pSchemaName => '''||nvl(pSchemaName, 'NULL')||''''
|
||||||
@@ -1059,6 +1274,7 @@ AS
|
|||||||
,'pParallelDegree => '''||nvl(TO_CHAR(pParallelDegree), 'NULL')||''''
|
,'pParallelDegree => '''||nvl(TO_CHAR(pParallelDegree), 'NULL')||''''
|
||||||
,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||''''
|
,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||''''
|
||||||
,'pMaxFileSize => '''||nvl(TO_CHAR(pMaxFileSize), 'NULL')||''''
|
,'pMaxFileSize => '''||nvl(TO_CHAR(pMaxFileSize), 'NULL')||''''
|
||||||
|
,'pRegisterExport => '''||CASE WHEN pRegisterExport THEN 'TRUE' ELSE 'FALSE' END||''''
|
||||||
,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||''''
|
,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||''''
|
||||||
));
|
));
|
||||||
ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters);
|
ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters);
|
||||||
@@ -1251,6 +1467,175 @@ AS
|
|||||||
END IF;
|
END IF;
|
||||||
END IF;
|
END IF;
|
||||||
|
|
||||||
|
-- Note: File registration handled by EXPORT_SINGLE_PARTITION when pRegisterExport=TRUE
|
||||||
|
-- Each partition calls pRegisterExport logic independently during serial/parallel execution
|
||||||
|
|
||||||
|
-- Register exported files to A_SOURCE_FILE_RECEIVED if requested (after successful export)
|
||||||
|
IF pRegisterExport THEN
|
||||||
|
-- Lookup A_SOURCE_FILE_CONFIG_KEY based on pFolderName parsing
|
||||||
|
-- Format: {BUCKET_AREA}/{SOURCE_KEY}/{TABLE_ID}
|
||||||
|
-- Example: 'ODS/CSDB/CSDB_DEBT_DAILY' -> SOURCE_KEY='CSDB', TABLE_ID='CSDB_DEBT_DAILY'
|
||||||
|
|
||||||
|
-- Parse pFolderName to extract SOURCE_KEY and TABLE_ID
|
||||||
|
vSlashPos1 := INSTR(pFolderName, '/', 1, 1); -- First '/' position
|
||||||
|
vSlashPos2 := INSTR(pFolderName, '/', 1, 2); -- Second '/' position
|
||||||
|
|
||||||
|
IF vSlashPos1 > 0 AND vSlashPos2 > 0 THEN
|
||||||
|
-- Extract segment 2 (SOURCE_KEY) and segment 3 (TABLE_ID)
|
||||||
|
vSourceKey := SUBSTR(pFolderName, vSlashPos1 + 1, vSlashPos2 - vSlashPos1 - 1);
|
||||||
|
vTableId := SUBSTR(pFolderName, vSlashPos2 + 1);
|
||||||
|
|
||||||
|
-- Find configuration based on SOURCE_KEY and TABLE_ID
|
||||||
|
BEGIN
|
||||||
|
SELECT A_SOURCE_FILE_CONFIG_KEY
|
||||||
|
INTO vConfigKey
|
||||||
|
FROM CT_MRDS.A_SOURCE_FILE_CONFIG
|
||||||
|
WHERE A_SOURCE_KEY = vSourceKey
|
||||||
|
AND TABLE_ID = vTableId
|
||||||
|
AND SOURCE_FILE_TYPE = 'INPUT'
|
||||||
|
AND ROWNUM = 1;
|
||||||
|
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('Found config key: ' || vConfigKey || ' for SOURCE=' || vSourceKey || ', TABLE=' || vTableId, 'DEBUG', vParameters);
|
||||||
|
EXCEPTION
|
||||||
|
WHEN NO_DATA_FOUND THEN
|
||||||
|
vConfigKey := -1;
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('No config found for SOURCE=' || vSourceKey || ', TABLE=' || vTableId || ' - using default (-1)', 'INFO', vParameters);
|
||||||
|
END;
|
||||||
|
ELSE
|
||||||
|
-- Cannot parse folder name - use default
|
||||||
|
vConfigKey := -1;
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('Cannot parse pFolderName: ' || pFolderName || ' - using default (-1)', 'WARNING', vParameters);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('Registering ' || vPartitions.COUNT || ' exported files to A_SOURCE_FILE_RECEIVED with config key: ' || vConfigKey, 'INFO', vParameters);
|
||||||
|
|
||||||
|
FOR i IN 1 .. vPartitions.COUNT LOOP
|
||||||
|
-- Construct filename and URI for this partition
|
||||||
|
vFileName := NVL(vFileBaseName, UPPER(REPLACE(vTableName, vSchemaName || '.', ''))) || '_' || vPartitions(i).year || vPartitions(i).month || '.csv';
|
||||||
|
vFileUri := vBucketUri || CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || sanitizeFilename(vFileName);
|
||||||
|
|
||||||
|
-- Get file metadata from OCI bucket (CHECKSUM, CREATED, BYTES) with retry logic
|
||||||
|
DECLARE
|
||||||
|
vChecksum VARCHAR2(128);
|
||||||
|
vCreated TIMESTAMP WITH TIME ZONE;
|
||||||
|
vBytes NUMBER;
|
||||||
|
vActualFileName VARCHAR2(1000); -- Actual filename with Oracle suffix
|
||||||
|
vSanitizedFileName VARCHAR2(1000);
|
||||||
|
vRetryCount NUMBER := 0;
|
||||||
|
vMaxRetries NUMBER := 1; -- One retry after initial attempt
|
||||||
|
vRetryDelay NUMBER := 2; -- 2 seconds delay
|
||||||
|
BEGIN
|
||||||
|
-- Sanitize filename first (PL/SQL function cannot be used directly in SQL)
|
||||||
|
vSanitizedFileName := sanitizeFilename(vFileName);
|
||||||
|
|
||||||
|
-- Remove .csv extension for LIKE pattern matching (Oracle adds suffixes BEFORE .csv)
|
||||||
|
-- Example: LEGACY_DEBT_202508.csv becomes LEGACY_DEBT_202508_1_20260211T102621591769Z.csv
|
||||||
|
vSanitizedFileName := REGEXP_REPLACE(vSanitizedFileName, '\.csv$', '', 1, 0, 'i');
|
||||||
|
|
||||||
|
-- Try to get file metadata with retry logic
|
||||||
|
<<metadata_retry_loop>>
|
||||||
|
LOOP
|
||||||
|
BEGIN
|
||||||
|
SELECT object_name, checksum, created, bytes
|
||||||
|
INTO vActualFileName, vChecksum, vCreated, vBytes
|
||||||
|
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
|
||||||
|
credential_name => pCredentialName,
|
||||||
|
location_uri => vBucketUri
|
||||||
|
))
|
||||||
|
WHERE object_name LIKE CASE WHEN pFolderName IS NOT NULL THEN pFolderName || '/' ELSE '' END || vSanitizedFileName || '%'
|
||||||
|
ORDER BY created DESC, bytes DESC
|
||||||
|
FETCH FIRST 1 ROW ONLY;
|
||||||
|
|
||||||
|
-- Extract filename only from full path (remove bucket folder prefix)
|
||||||
|
-- vActualFileName contains: 'ODS/CSDB/CSDB_DEBT/LEGACY_DEBT_202508_1_20260211T111341375171Z.csv'
|
||||||
|
-- Extract only: 'LEGACY_DEBT_202508_1_20260211T111341375171Z.csv'
|
||||||
|
vActualFileName := SUBSTR(vActualFileName, INSTR(vActualFileName, '/', -1) + 1);
|
||||||
|
|
||||||
|
-- Success - exit retry loop
|
||||||
|
EXIT metadata_retry_loop;
|
||||||
|
|
||||||
|
EXCEPTION
|
||||||
|
WHEN NO_DATA_FOUND THEN
|
||||||
|
vRetryCount := vRetryCount + 1;
|
||||||
|
|
||||||
|
IF vRetryCount <= vMaxRetries THEN
|
||||||
|
-- Log retry attempt
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('File not found in bucket (attempt ' || vRetryCount || '/' || (vMaxRetries + 1) || '), retrying after ' || vRetryDelay || ' seconds: ' || vFileName, 'DEBUG', vParameters);
|
||||||
|
|
||||||
|
-- Wait before retry using DBMS_SESSION.SLEEP (alternative to DBMS_LOCK)
|
||||||
|
DBMS_SESSION.SLEEP(vRetryDelay);
|
||||||
|
ELSE
|
||||||
|
-- Max retries exceeded - re-raise exception
|
||||||
|
RAISE;
|
||||||
|
END IF;
|
||||||
|
END;
|
||||||
|
END LOOP metadata_retry_loop;
|
||||||
|
|
||||||
|
-- Create A_SOURCE_FILE_RECEIVED record for this export with metadata
|
||||||
|
vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL;
|
||||||
|
INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED (
|
||||||
|
A_SOURCE_FILE_RECEIVED_KEY,
|
||||||
|
A_SOURCE_FILE_CONFIG_KEY,
|
||||||
|
SOURCE_FILE_NAME,
|
||||||
|
CHECKSUM,
|
||||||
|
CREATED,
|
||||||
|
BYTES,
|
||||||
|
RECEPTION_DATE,
|
||||||
|
PROCESSING_STATUS,
|
||||||
|
PARTITION_YEAR,
|
||||||
|
PARTITION_MONTH,
|
||||||
|
ARCH_FILE_NAME
|
||||||
|
) VALUES (
|
||||||
|
vSourceFileReceivedKey,
|
||||||
|
vConfigKey, -- Config key from A_SOURCE_FILE_CONFIG lookup
|
||||||
|
vActualFileName, -- Use actual filename with Oracle suffix
|
||||||
|
vChecksum,
|
||||||
|
vCreated,
|
||||||
|
vBytes,
|
||||||
|
SYSDATE,
|
||||||
|
'INGESTED',
|
||||||
|
NULL, -- PARTITION_YEAR not used for CSV exports
|
||||||
|
NULL, -- PARTITION_MONTH not used for CSV exports
|
||||||
|
NULL -- ARCH_FILE_NAME not used for CSV exports
|
||||||
|
);
|
||||||
|
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('Registered file: FileReceivedKey=' || vSourceFileReceivedKey || ', File=' || vActualFileName || ', Size=' || vBytes || ' bytes', 'DEBUG', vParameters);
|
||||||
|
EXCEPTION
|
||||||
|
WHEN NO_DATA_FOUND THEN
|
||||||
|
-- File not found after retries - log warning and continue without metadata
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('WARNING: File not found in bucket after ' || (vMaxRetries + 1) || ' attempts: ' || vFileName, 'WARNING', vParameters);
|
||||||
|
|
||||||
|
-- Sanitize filename for fallback INSERT (function cannot be used in SQL)
|
||||||
|
vSanitizedFileName := sanitizeFilename(vFileName);
|
||||||
|
|
||||||
|
-- Insert without metadata
|
||||||
|
vSourceFileReceivedKey := CT_MRDS.A_SOURCE_FILE_RECEIVED_KEY_SEQ.NEXTVAL;
|
||||||
|
INSERT INTO CT_MRDS.A_SOURCE_FILE_RECEIVED (
|
||||||
|
A_SOURCE_FILE_RECEIVED_KEY,
|
||||||
|
A_SOURCE_FILE_CONFIG_KEY,
|
||||||
|
SOURCE_FILE_NAME,
|
||||||
|
RECEPTION_DATE,
|
||||||
|
PROCESSING_STATUS,
|
||||||
|
PARTITION_YEAR,
|
||||||
|
PARTITION_MONTH,
|
||||||
|
ARCH_FILE_NAME
|
||||||
|
) VALUES (
|
||||||
|
vSourceFileReceivedKey,
|
||||||
|
vConfigKey, -- Config key from A_SOURCE_FILE_CONFIG lookup
|
||||||
|
vSanitizedFileName, -- Fallback: use theoretical filename if actual not found
|
||||||
|
SYSDATE,
|
||||||
|
'INGESTED',
|
||||||
|
NULL, -- PARTITION_YEAR not used for CSV exports
|
||||||
|
NULL, -- PARTITION_MONTH not used for CSV exports
|
||||||
|
NULL -- ARCH_FILE_NAME not used for CSV exports
|
||||||
|
);
|
||||||
|
END;
|
||||||
|
END LOOP;
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
|
ENV_MANAGER.LOG_PROCESS_EVENT('Successfully registered all ' || vPartitions.COUNT || ' files', 'INFO', vParameters);
|
||||||
|
END IF;
|
||||||
|
|
||||||
ENV_MANAGER.LOG_PROCESS_EVENT('Export completed successfully for ' || vPartitions.COUNT || ' files', 'INFO', vParameters);
|
ENV_MANAGER.LOG_PROCESS_EVENT('Export completed successfully for ' || vPartitions.COUNT || ' files', 'INFO', vParameters);
|
||||||
ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters);
|
ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters);
|
||||||
|
|
||||||
|
|||||||
@@ -9,23 +9,21 @@ AS
|
|||||||
**/
|
**/
|
||||||
|
|
||||||
-- Package Version Information
|
-- Package Version Information
|
||||||
PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.6.3';
|
PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.7.5';
|
||||||
PACKAGE_BUILD_DATE CONSTANT VARCHAR2(19) := '2026-01-28 19:30:00';
|
PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2026-02-11 12:15:00';
|
||||||
PACKAGE_AUTHOR CONSTANT VARCHAR2(50) := 'MRDS Development Team';
|
PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski';
|
||||||
|
|
||||||
-- Version History (last 3-5 changes)
|
-- Version History (last 3-5 changes)
|
||||||
VERSION_HISTORY CONSTANT VARCHAR2(4000) :=
|
VERSION_HISTORY CONSTANT VARCHAR2(4000) :=
|
||||||
'v2.6.3 (2026-01-28): COMPILATION FIX - Resolved ORA-00904 error in EXPORT_PARTITION_PARALLEL. SQLERRM and DBMS_UTILITY.FORMAT_ERROR_BACKTRACE cannot be used directly in SQL UPDATE statements. Now properly assigned to vgMsgTmp variable before UPDATE.' || CHR(10) ||
|
'v2.7.5 (2026-02-11): Added pRegisterExport parameter to EXPORT_TABLE_DATA procedure. When TRUE, registers each exported CSV file in A_SOURCE_FILE_RECEIVED.' || CHR(10) ||
|
||||||
'v2.6.2 (2026-01-28): CRITICAL FIX - Race condition when multiple exports run simultaneously. Changed DELETE to filter by age (>24h) instead of deleting all COMPLETED chunks. Prevents concurrent sessions from deleting each other chunks. Session-safe cleanup with TASK_NAME filtering. Enables true parallel execution of multiple export jobs.' || CHR(10) ||
|
'v2.7.4 (2026-02-11): ACTUAL FILENAME STORAGE - Store real filename with Oracle suffix in SOURCE_FILE_NAME instead of theoretical filename.' || CHR(10) ||
|
||||||
'v2.6.1 (2026-01-28): Added DELETE_FAILED_EXPORT_FILE procedure to clean up partial/corrupted files before retry. When partition fails mid-export, partial file is deleted before retry to prevent Oracle from creating _1 suffixed duplicates. Ensures clean retry without orphaned files in OCI bucket.' || CHR(10) ||
|
'v2.7.3 (2026-02-11): FIX LIKE pattern for DBMS_CLOUD.LIST_OBJECTS - Removed .csv extension from filename before pattern matching.' || CHR(10) ||
|
||||||
'v2.6.0 (2026-01-28): CRITICAL FIX - Added STATUS tracking to A_PARALLEL_EXPORT_CHUNKS table to prevent data duplication on retry. System now restarts ONLY failed partitions instead of re-exporting all data. Added ERROR_MESSAGE and EXPORT_TIMESTAMP columns for better error handling and monitoring. Prevents duplicate file creation when parallel tasks fail (e.g., 22 partitions with 16 threads, 3 failures no longer duplicates 19 successful exports).' || CHR(10) ||
|
'v2.7.2 (2026-02-11): FIX pRegisterExport in EXPORT_TABLE_DATA_TO_CSV_BY_DATE - Added missing pRegisterExport parameter to EXPORT_SINGLE_PARTITION call.' || CHR(10) ||
|
||||||
'v2.5.0 (2026-01-26): Added recorddelimiter parameter with CRLF (CHR(13)||CHR(10)) for CSV exports to ensure Windows-compatible line endings. Improves cross-platform compatibility when CSV files are opened in Windows applications (Notepad, Excel).' || CHR(10) ||
|
'v2.7.1 (2026-02-11): AUTO-LOOKUP A_SOURCE_FILE_CONFIG_KEY - Parse pFolderName to automatically find config key from A_SOURCE_FILE_CONFIG.' || CHR(10) ||
|
||||||
'v2.4.0 (2026-01-11): Added pTemplateTableName parameter for per-column date format configuration. Implements dynamic query building with TO_CHAR for each date/timestamp column using FILE_MANAGER.GET_DATE_FORMAT. Supports 3-tier hierarchy: column-specific, template DEFAULT, global fallback. Eliminates single dateformat limitation of DBMS_CLOUD.EXPORT_DATA.' || CHR(10) ||
|
'v2.7.0 (2026-02-10): Added pRegisterExport parameter to EXPORT_TABLE_DATA_TO_CSV_BY_DATE. When TRUE, registers each exported CSV file in A_SOURCE_FILE_RECEIVED.' || CHR(10) ||
|
||||||
'v2.3.0 (2025-12-20): Added parallel partition processing using DBMS_PARALLEL_EXECUTE. New pParallelDegree parameter (1-16, default 1) for EXPORT_TABLE_DATA_BY_DATE and EXPORT_TABLE_DATA_TO_CSV_BY_DATE procedures. Each year/month partition processed in separate thread for improved performance.' || CHR(10) ||
|
'v2.6.3 (2026-01-28): COMPILATION FIX - Resolved ORA-00904 error in EXPORT_PARTITION_PARALLEL. SQLERRM properly assigned to vgMsgTmp variable.' || CHR(10) ||
|
||||||
'v2.2.0 (2025-12-19): DRY refactoring - extracted shared helper functions (sanitizeFilename, VALIDATE_TABLE_AND_COLUMNS, GET_PARTITIONS, EXPORT_SINGLE_PARTITION worker procedure). Reduced code duplication by ~400 lines. Prepared architecture for v2.3.0 parallel processing.' || CHR(10) ||
|
'v2.6.2 (2026-01-28): CRITICAL FIX - Race condition when multiple exports run simultaneously. Session-safe cleanup with TASK_NAME filtering.' || CHR(10) ||
|
||||||
'v2.1.1 (2025-12-04): Fixed JOIN column reference A_WORKFLOW_HISTORY_KEY -> A_ETL_LOAD_SET_KEY, added consistent column mapping and dynamic column list to EXPORT_TABLE_DATA procedure, enhanced DEBUG logging for all export operations' || CHR(10) ||
|
'v2.6.0 (2026-01-28): CRITICAL FIX - Added STATUS tracking to A_PARALLEL_EXPORT_CHUNKS table to prevent data duplication on retry.' || CHR(10);
|
||||||
'v2.1.0 (2025-10-22): Added version tracking and PARTITION_YEAR/PARTITION_MONTH support' || CHR(10) ||
|
|
||||||
'v2.0.0 (2025-10-01): Separated export functionality from FILE_MANAGER package' || CHR(10);
|
|
||||||
|
|
||||||
cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10);
|
cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10);
|
||||||
vgMsgTmp VARCHAR2(32000);
|
vgMsgTmp VARCHAR2(32000);
|
||||||
@@ -75,24 +73,36 @@ AS
|
|||||||
* @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA.
|
* @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA.
|
||||||
* Exports data into CSV file on OCI infrustructure.
|
* Exports data into CSV file on OCI infrustructure.
|
||||||
* pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE'
|
* pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE'
|
||||||
|
* Supports template table for column order and per-column date formatting.
|
||||||
|
* When pRegisterExport=TRUE, successfully exported files are registered in:
|
||||||
|
* - CT_MRDS.A_SOURCE_FILE_RECEIVED (tracks file location, size, checksum, and metadata)
|
||||||
|
* @param pTemplateTableName - Optional template table (SCHEMA.TABLE or TABLE) for:
|
||||||
|
* - Column order control (template defines CSV structure)
|
||||||
|
* - Per-column date formatting via FILE_MANAGER.GET_DATE_FORMAT
|
||||||
|
* - NULL = use source table columns in natural order
|
||||||
|
* @param pRegisterExport - When TRUE, registers each exported CSV file in A_SOURCE_FILE_RECEIVED table
|
||||||
* @example
|
* @example
|
||||||
* begin
|
* begin
|
||||||
* DATA_EXPORTER.EXPORT_TABLE_DATA(
|
* DATA_EXPORTER.EXPORT_TABLE_DATA(
|
||||||
* pSchemaName => 'CT_MRDS',
|
* pSchemaName => 'CT_MRDS',
|
||||||
* pTableName => 'MY_TABLE',
|
* pTableName => 'MY_TABLE',
|
||||||
* pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK',
|
* pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK',
|
||||||
* pBucketArea => 'DATA',
|
* pBucketArea => 'DATA',
|
||||||
* pFolderName => 'csv_exports'
|
* pFolderName => 'csv_exports',
|
||||||
|
* pTemplateTableName => 'CT_ET_TEMPLATES.MY_TEMPLATE', -- Optional
|
||||||
|
* pRegisterExport => TRUE -- Optional, default FALSE
|
||||||
* );
|
* );
|
||||||
* end;
|
* end;
|
||||||
**/
|
**/
|
||||||
PROCEDURE EXPORT_TABLE_DATA (
|
PROCEDURE EXPORT_TABLE_DATA (
|
||||||
pSchemaName IN VARCHAR2,
|
pSchemaName IN VARCHAR2,
|
||||||
pTableName IN VARCHAR2,
|
pTableName IN VARCHAR2,
|
||||||
pKeyColumnName IN VARCHAR2,
|
pKeyColumnName IN VARCHAR2,
|
||||||
pBucketArea IN VARCHAR2,
|
pBucketArea IN VARCHAR2,
|
||||||
pFolderName IN VARCHAR2,
|
pFolderName IN VARCHAR2,
|
||||||
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
|
pTemplateTableName IN VARCHAR2 default NULL,
|
||||||
|
pRegisterExport IN BOOLEAN default FALSE,
|
||||||
|
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
@@ -146,6 +156,8 @@ AS
|
|||||||
* but exports to CSV format instead of Parquet.
|
* but exports to CSV format instead of Parquet.
|
||||||
* Supports parallel partition processing via pParallelDegree parameter (1-16).
|
* Supports parallel partition processing via pParallelDegree parameter (1-16).
|
||||||
* File naming pattern: {pFileName}_YYYYMM.csv or {TABLENAME}_YYYYMM.csv (if pFileName is NULL)
|
* File naming pattern: {pFileName}_YYYYMM.csv or {TABLENAME}_YYYYMM.csv (if pFileName is NULL)
|
||||||
|
* When pRegisterExport=TRUE, successfully exported files are registered in:
|
||||||
|
* - CT_MRDS.A_SOURCE_FILE_RECEIVED (tracks file location, size, checksum, and metadata)
|
||||||
* @example
|
* @example
|
||||||
* begin
|
* begin
|
||||||
* -- With custom filename
|
* -- With custom filename
|
||||||
@@ -158,7 +170,8 @@ AS
|
|||||||
* pFileName => 'my_export.csv',
|
* pFileName => 'my_export.csv',
|
||||||
* pMinDate => DATE '2024-01-01',
|
* pMinDate => DATE '2024-01-01',
|
||||||
* pMaxDate => SYSDATE,
|
* pMaxDate => SYSDATE,
|
||||||
* pParallelDegree => 8 -- Optional, default 1, range 1-16
|
* pParallelDegree => 8, -- Optional, default 1, range 1-16
|
||||||
|
* pRegisterExport => TRUE -- Optional, default FALSE, registers to A_SOURCE_FILE_RECEIVED
|
||||||
* );
|
* );
|
||||||
*
|
*
|
||||||
* -- With auto-generated filename (based on table name only)
|
* -- With auto-generated filename (based on table name only)
|
||||||
@@ -169,7 +182,8 @@ AS
|
|||||||
* pBucketArea => 'ARCHIVE',
|
* pBucketArea => 'ARCHIVE',
|
||||||
* pFolderName => 'exports',
|
* pFolderName => 'exports',
|
||||||
* pMinDate => DATE '2025-09-01',
|
* pMinDate => DATE '2025-09-01',
|
||||||
* pMaxDate => DATE '2025-09-17'
|
* pMaxDate => DATE '2025-09-17',
|
||||||
|
* pRegisterExport => TRUE -- Registers each export to A_SOURCE_FILE_RECEIVED table
|
||||||
* );
|
* );
|
||||||
* -- This will create files like: AGGREGATED_ALLOTMENT_202509.csv, etc.
|
* -- This will create files like: AGGREGATED_ALLOTMENT_202509.csv, etc.
|
||||||
* pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE'
|
* pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE'
|
||||||
@@ -188,6 +202,7 @@ AS
|
|||||||
pParallelDegree IN NUMBER default 1,
|
pParallelDegree IN NUMBER default 1,
|
||||||
pTemplateTableName IN VARCHAR2 default NULL,
|
pTemplateTableName IN VARCHAR2 default NULL,
|
||||||
pMaxFileSize IN NUMBER default 104857600,
|
pMaxFileSize IN NUMBER default 104857600,
|
||||||
|
pRegisterExport IN BOOLEAN default FALSE,
|
||||||
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
|
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,218 @@
|
|||||||
|
create or replace PACKAGE CT_MRDS.DATA_EXPORTER
|
||||||
|
AUTHID CURRENT_USER
|
||||||
|
AS
|
||||||
|
/**
|
||||||
|
* Data Export Package: Provides comprehensive data export capabilities to various formats (CSV, Parquet)
|
||||||
|
* with support for cloud storage integration via Oracle Cloud Infrastructure (OCI).
|
||||||
|
* The structure of comment is used by GET_PACKAGE_DOCUMENTATION function
|
||||||
|
* which returns documentation text for confluence page (to Copy-Paste it).
|
||||||
|
**/
|
||||||
|
|
||||||
|
-- Package Version Information
|
||||||
|
PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.6.3';
|
||||||
|
PACKAGE_BUILD_DATE CONSTANT VARCHAR2(19) := '2026-01-28 19:30:00';
|
||||||
|
PACKAGE_AUTHOR CONSTANT VARCHAR2(50) := 'MRDS Development Team';
|
||||||
|
|
||||||
|
-- Version History (last 3-5 changes)
|
||||||
|
VERSION_HISTORY CONSTANT VARCHAR2(4000) :=
|
||||||
|
'v2.6.3 (2026-01-28): COMPILATION FIX - Resolved ORA-00904 error in EXPORT_PARTITION_PARALLEL. SQLERRM and DBMS_UTILITY.FORMAT_ERROR_BACKTRACE cannot be used directly in SQL UPDATE statements. Now properly assigned to vgMsgTmp variable before UPDATE.' || CHR(10) ||
|
||||||
|
'v2.6.2 (2026-01-28): CRITICAL FIX - Race condition when multiple exports run simultaneously. Changed DELETE to filter by age (>24h) instead of deleting all COMPLETED chunks. Prevents concurrent sessions from deleting each other chunks. Session-safe cleanup with TASK_NAME filtering. Enables true parallel execution of multiple export jobs.' || CHR(10) ||
|
||||||
|
'v2.6.1 (2026-01-28): Added DELETE_FAILED_EXPORT_FILE procedure to clean up partial/corrupted files before retry. When partition fails mid-export, partial file is deleted before retry to prevent Oracle from creating _1 suffixed duplicates. Ensures clean retry without orphaned files in OCI bucket.' || CHR(10) ||
|
||||||
|
'v2.6.0 (2026-01-28): CRITICAL FIX - Added STATUS tracking to A_PARALLEL_EXPORT_CHUNKS table to prevent data duplication on retry. System now restarts ONLY failed partitions instead of re-exporting all data. Added ERROR_MESSAGE and EXPORT_TIMESTAMP columns for better error handling and monitoring. Prevents duplicate file creation when parallel tasks fail (e.g., 22 partitions with 16 threads, 3 failures no longer duplicates 19 successful exports).' || CHR(10) ||
|
||||||
|
'v2.5.0 (2026-01-26): Added recorddelimiter parameter with CRLF (CHR(13)||CHR(10)) for CSV exports to ensure Windows-compatible line endings. Improves cross-platform compatibility when CSV files are opened in Windows applications (Notepad, Excel).' || CHR(10) ||
|
||||||
|
'v2.4.0 (2026-01-11): Added pTemplateTableName parameter for per-column date format configuration. Implements dynamic query building with TO_CHAR for each date/timestamp column using FILE_MANAGER.GET_DATE_FORMAT. Supports 3-tier hierarchy: column-specific, template DEFAULT, global fallback. Eliminates single dateformat limitation of DBMS_CLOUD.EXPORT_DATA.' || CHR(10) ||
|
||||||
|
'v2.3.0 (2025-12-20): Added parallel partition processing using DBMS_PARALLEL_EXECUTE. New pParallelDegree parameter (1-16, default 1) for EXPORT_TABLE_DATA_BY_DATE and EXPORT_TABLE_DATA_TO_CSV_BY_DATE procedures. Each year/month partition processed in separate thread for improved performance.' || CHR(10) ||
|
||||||
|
'v2.2.0 (2025-12-19): DRY refactoring - extracted shared helper functions (sanitizeFilename, VALIDATE_TABLE_AND_COLUMNS, GET_PARTITIONS, EXPORT_SINGLE_PARTITION worker procedure). Reduced code duplication by ~400 lines. Prepared architecture for v2.3.0 parallel processing.' || CHR(10) ||
|
||||||
|
'v2.1.1 (2025-12-04): Fixed JOIN column reference A_WORKFLOW_HISTORY_KEY -> A_ETL_LOAD_SET_KEY, added consistent column mapping and dynamic column list to EXPORT_TABLE_DATA procedure, enhanced DEBUG logging for all export operations' || CHR(10) ||
|
||||||
|
'v2.1.0 (2025-10-22): Added version tracking and PARTITION_YEAR/PARTITION_MONTH support' || CHR(10) ||
|
||||||
|
'v2.0.0 (2025-10-01): Separated export functionality from FILE_MANAGER package' || CHR(10);
|
||||||
|
|
||||||
|
cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10);
|
||||||
|
vgMsgTmp VARCHAR2(32000);
|
||||||
|
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
-- TYPE DEFINITIONS FOR PARTITION HANDLING
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Record type for year/month partition information
|
||||||
|
**/
|
||||||
|
TYPE partition_rec IS RECORD (
|
||||||
|
year VARCHAR2(4),
|
||||||
|
month VARCHAR2(2)
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Table type for collection of partition records
|
||||||
|
**/
|
||||||
|
TYPE partition_tab IS TABLE OF partition_rec;
|
||||||
|
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
-- INTERNAL PARALLEL PROCESSING CALLBACK
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name EXPORT_PARTITION_PARALLEL
|
||||||
|
* @desc Internal callback procedure for DBMS_PARALLEL_EXECUTE.
|
||||||
|
* Processes single partition (year/month) chunk in parallel task.
|
||||||
|
* Called by DBMS_PARALLEL_EXECUTE framework for each chunk.
|
||||||
|
* This procedure is PUBLIC because DBMS_PARALLEL_EXECUTE requires it,
|
||||||
|
* but should NOT be called directly by external code.
|
||||||
|
* @param pStartId - Chunk start ID (CHUNK_ID from A_PARALLEL_EXPORT_CHUNKS table)
|
||||||
|
* @param pEndId - Chunk end ID (same as pStartId for single-row chunks)
|
||||||
|
**/
|
||||||
|
PROCEDURE EXPORT_PARTITION_PARALLEL (
|
||||||
|
pStartId IN NUMBER,
|
||||||
|
pEndId IN NUMBER
|
||||||
|
);
|
||||||
|
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
-- MAIN EXPORT PROCEDURES
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name EXPORT_TABLE_DATA
|
||||||
|
* @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA.
|
||||||
|
* Exports data into CSV file on OCI infrustructure.
|
||||||
|
* pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE'
|
||||||
|
* @example
|
||||||
|
* begin
|
||||||
|
* DATA_EXPORTER.EXPORT_TABLE_DATA(
|
||||||
|
* pSchemaName => 'CT_MRDS',
|
||||||
|
* pTableName => 'MY_TABLE',
|
||||||
|
* pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK',
|
||||||
|
* pBucketArea => 'DATA',
|
||||||
|
* pFolderName => 'csv_exports'
|
||||||
|
* );
|
||||||
|
* end;
|
||||||
|
**/
|
||||||
|
PROCEDURE EXPORT_TABLE_DATA (
|
||||||
|
pSchemaName IN VARCHAR2,
|
||||||
|
pTableName IN VARCHAR2,
|
||||||
|
pKeyColumnName IN VARCHAR2,
|
||||||
|
pBucketArea IN VARCHAR2,
|
||||||
|
pFolderName IN VARCHAR2,
|
||||||
|
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name EXPORT_TABLE_DATA_BY_DATE
|
||||||
|
* @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA.
|
||||||
|
* Exports data into PARQUET files on OCI infrustructure.
|
||||||
|
* Each YEAR_MONTH pair goes to seperate file (implicit partitioning).
|
||||||
|
* Allows specifying custom column list or uses T.* if pColumnList is NULL.
|
||||||
|
* Validates that all columns in pColumnList exist in the target table.
|
||||||
|
* Automatically adds 'T.' prefix to column names in pColumnList.
|
||||||
|
* Supports parallel partition processing via pParallelDegree parameter (default 1, range 1-16).
|
||||||
|
* pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE'
|
||||||
|
* @example
|
||||||
|
* begin
|
||||||
|
* DATA_EXPORTER.EXPORT_TABLE_DATA_BY_DATE(
|
||||||
|
* pSchemaName => 'CT_MRDS',
|
||||||
|
* pTableName => 'MY_TABLE',
|
||||||
|
* pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK',
|
||||||
|
* pBucketArea => 'DATA',
|
||||||
|
* pFolderName => 'parquet_exports',
|
||||||
|
* pColumnList => 'COLUMN1, COLUMN2, COLUMN3', -- Optional
|
||||||
|
* pMinDate => DATE '2024-01-01',
|
||||||
|
* pMaxDate => SYSDATE,
|
||||||
|
* pParallelDegree => 8 -- Optional, default 1, range 1-16
|
||||||
|
* );
|
||||||
|
* end;
|
||||||
|
**/
|
||||||
|
PROCEDURE EXPORT_TABLE_DATA_BY_DATE (
|
||||||
|
pSchemaName IN VARCHAR2,
|
||||||
|
pTableName IN VARCHAR2,
|
||||||
|
pKeyColumnName IN VARCHAR2,
|
||||||
|
pBucketArea IN VARCHAR2,
|
||||||
|
pFolderName IN VARCHAR2,
|
||||||
|
pColumnList IN VARCHAR2 default NULL,
|
||||||
|
pMinDate IN DATE default DATE '1900-01-01',
|
||||||
|
pMaxDate IN DATE default SYSDATE,
|
||||||
|
pParallelDegree IN NUMBER default 1,
|
||||||
|
pTemplateTableName IN VARCHAR2 default NULL,
|
||||||
|
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name EXPORT_TABLE_DATA_TO_CSV_BY_DATE
|
||||||
|
* @desc Exports data to separate CSV files partitioned by year and month.
|
||||||
|
* Creates one CSV file for each year/month combination found in the data.
|
||||||
|
* Uses the same date filtering mechanism with CT_ODS.A_LOAD_HISTORY as EXPORT_TABLE_DATA_BY_DATE,
|
||||||
|
* but exports to CSV format instead of Parquet.
|
||||||
|
* Supports parallel partition processing via pParallelDegree parameter (1-16).
|
||||||
|
* File naming pattern: {pFileName}_YYYYMM.csv or {TABLENAME}_YYYYMM.csv (if pFileName is NULL)
|
||||||
|
* @example
|
||||||
|
* begin
|
||||||
|
* -- With custom filename
|
||||||
|
* DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE(
|
||||||
|
* pSchemaName => 'CT_MRDS',
|
||||||
|
* pTableName => 'MY_TABLE',
|
||||||
|
* pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK',
|
||||||
|
* pBucketArea => 'DATA',
|
||||||
|
* pFolderName => 'exports',
|
||||||
|
* pFileName => 'my_export.csv',
|
||||||
|
* pMinDate => DATE '2024-01-01',
|
||||||
|
* pMaxDate => SYSDATE,
|
||||||
|
* pParallelDegree => 8 -- Optional, default 1, range 1-16
|
||||||
|
* );
|
||||||
|
*
|
||||||
|
* -- With auto-generated filename (based on table name only)
|
||||||
|
* DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE(
|
||||||
|
* pSchemaName => 'OU_TOP',
|
||||||
|
* pTableName => 'AGGREGATED_ALLOTMENT',
|
||||||
|
* pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK',
|
||||||
|
* pBucketArea => 'ARCHIVE',
|
||||||
|
* pFolderName => 'exports',
|
||||||
|
* pMinDate => DATE '2025-09-01',
|
||||||
|
* pMaxDate => DATE '2025-09-17'
|
||||||
|
* );
|
||||||
|
* -- This will create files like: AGGREGATED_ALLOTMENT_202509.csv, etc.
|
||||||
|
* pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE'
|
||||||
|
* end;
|
||||||
|
**/
|
||||||
|
PROCEDURE EXPORT_TABLE_DATA_TO_CSV_BY_DATE (
|
||||||
|
pSchemaName IN VARCHAR2,
|
||||||
|
pTableName IN VARCHAR2,
|
||||||
|
pKeyColumnName IN VARCHAR2,
|
||||||
|
pBucketArea IN VARCHAR2,
|
||||||
|
pFolderName IN VARCHAR2,
|
||||||
|
pFileName IN VARCHAR2 DEFAULT NULL,
|
||||||
|
pColumnList IN VARCHAR2 default NULL,
|
||||||
|
pMinDate IN DATE default DATE '1900-01-01',
|
||||||
|
pMaxDate IN DATE default SYSDATE,
|
||||||
|
pParallelDegree IN NUMBER default 1,
|
||||||
|
pTemplateTableName IN VARCHAR2 default NULL,
|
||||||
|
pMaxFileSize IN NUMBER default 104857600,
|
||||||
|
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
|
||||||
|
);
|
||||||
|
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
-- VERSION MANAGEMENT FUNCTIONS
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the current package version number
|
||||||
|
* return: Version string in format X.Y.Z (e.g., '2.1.0')
|
||||||
|
**/
|
||||||
|
FUNCTION GET_VERSION RETURN VARCHAR2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns comprehensive build information including version, date, and author
|
||||||
|
* return: Formatted string with complete build details
|
||||||
|
**/
|
||||||
|
FUNCTION GET_BUILD_INFO RETURN VARCHAR2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the version history with recent changes
|
||||||
|
* return: Multi-line string with version history
|
||||||
|
**/
|
||||||
|
FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2;
|
||||||
|
|
||||||
|
END;
|
||||||
|
|
||||||
|
/
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,227 @@
|
|||||||
|
create or replace PACKAGE CT_MRDS.DATA_EXPORTER
|
||||||
|
AUTHID CURRENT_USER
|
||||||
|
AS
|
||||||
|
/**
|
||||||
|
* Data Export Package: Provides comprehensive data export capabilities to various formats (CSV, Parquet)
|
||||||
|
* with support for cloud storage integration via Oracle Cloud Infrastructure (OCI).
|
||||||
|
* The structure of comment is used by GET_PACKAGE_DOCUMENTATION function
|
||||||
|
* which returns documentation text for confluence page (to Copy-Paste it).
|
||||||
|
**/
|
||||||
|
|
||||||
|
-- Package Version Information
|
||||||
|
PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.7.4';
|
||||||
|
PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2026-02-11 12:10:00';
|
||||||
|
PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski';
|
||||||
|
|
||||||
|
-- Version History (last 3-5 changes)
|
||||||
|
VERSION_HISTORY CONSTANT VARCHAR2(4000) :=
|
||||||
|
'v2.7.4 (2026-02-11): ACTUAL FILENAME STORAGE - Store real filename with Oracle suffix in SOURCE_FILE_NAME instead of theoretical filename. Changes LIST_OBJECTS query to SELECT object_name and stores actual filename like LEGACY_DEBT_202508_1_20260211T111341375171Z.csv instead of LEGACY_DEBT_202508.csv. Enables accurate file tracking.' || CHR(10) ||
|
||||||
|
'v2.7.3 (2026-02-11): FIX LIKE pattern for DBMS_CLOUD.LIST_OBJECTS - Removed .csv extension from filename before pattern matching. Oracle EXPORT_DATA creates files with suffixes BEFORE .csv so LIKE pattern should be filename% not filename.csv%. Enables proper metadata retrieval (CHECKSUM, CREATED, BYTES).' || CHR(10) ||
|
||||||
|
'v2.7.2 (2026-02-11): FIX pRegisterExport in EXPORT_TABLE_DATA_TO_CSV_BY_DATE - Added missing pRegisterExport parameter to EXPORT_SINGLE_PARTITION call. Previously files were not registered because parameter was not passed through.' || CHR(10) ||
|
||||||
|
'v2.7.1 (2026-02-11): AUTO-LOOKUP A_SOURCE_FILE_CONFIG_KEY - Parse pFolderName to automatically find config key from A_SOURCE_FILE_CONFIG. Example: ODS/CSDB/CSDB_DEBT_DAILY extracts SOURCE_KEY=CSDB, TABLE_ID=CSDB_DEBT_DAILY.' || CHR(10) ||
|
||||||
|
'v2.7.0 (2026-02-10): Added pRegisterExport parameter to EXPORT_TABLE_DATA_TO_CSV_BY_DATE. When TRUE, registers each exported CSV file in A_SOURCE_FILE_RECEIVED with metadata from DBMS_CLOUD.LIST_OBJECTS. Enables file tracking and integrity verification.' || CHR(10);
|
||||||
|
'v2.6.3 (2026-01-28): COMPILATION FIX - Resolved ORA-00904 error in EXPORT_PARTITION_PARALLEL. SQLERRM and DBMS_UTILITY.FORMAT_ERROR_BACKTRACE cannot be used directly in SQL UPDATE statements. Now properly assigned to vgMsgTmp variable before UPDATE.' || CHR(10) ||
|
||||||
|
'v2.6.2 (2026-01-28): CRITICAL FIX - Race condition when multiple exports run simultaneously. Changed DELETE to filter by age (>24h) instead of deleting all COMPLETED chunks. Prevents concurrent sessions from deleting each other chunks. Session-safe cleanup with TASK_NAME filtering. Enables true parallel execution of multiple export jobs.' || CHR(10) ||
|
||||||
|
'v2.6.0 (2026-01-28): CRITICAL FIX - Added STATUS tracking to A_PARALLEL_EXPORT_CHUNKS table to prevent data duplication on retry. System now restarts ONLY failed partitions instead of re-exporting all data. Added ERROR_MESSAGE and EXPORT_TIMESTAMP columns for better error handling and monitoring. Prevents duplicate file creation when parallel tasks fail (e.g., 22 partitions with 16 threads, 3 failures no longer duplicates 19 successful exports).' || CHR(10) ||
|
||||||
|
'v2.5.0 (2026-01-26): Added recorddelimiter parameter with CRLF (CHR(13)||CHR(10)) for CSV exports to ensure Windows-compatible line endings. Improves cross-platform compatibility when CSV files are opened in Windows applications (Notepad, Excel).' || CHR(10) ||
|
||||||
|
'v2.4.0 (2026-01-11): Added pTemplateTableName parameter for per-column date format configuration. Implements dynamic query building with TO_CHAR for each date/timestamp column using FILE_MANAGER.GET_DATE_FORMAT. Supports 3-tier hierarchy: column-specific, template DEFAULT, global fallback. Eliminates single dateformat limitation of DBMS_CLOUD.EXPORT_DATA.' || CHR(10) ||
|
||||||
|
'v2.3.0 (2025-12-20): Added parallel partition processing using DBMS_PARALLEL_EXECUTE. New pParallelDegree parameter (1-16, default 1) for EXPORT_TABLE_DATA_BY_DATE and EXPORT_TABLE_DATA_TO_CSV_BY_DATE procedures. Each year/month partition processed in separate thread for improved performance.' || CHR(10) ||
|
||||||
|
'v2.2.0 (2025-12-19): DRY refactoring - extracted shared helper functions (sanitizeFilename, VALIDATE_TABLE_AND_COLUMNS, GET_PARTITIONS, EXPORT_SINGLE_PARTITION worker procedure). Reduced code duplication by ~400 lines. Prepared architecture for v2.3.0 parallel processing.' || CHR(10) ||
|
||||||
|
'v2.1.1 (2025-12-04): Fixed JOIN column reference A_WORKFLOW_HISTORY_KEY -> A_ETL_LOAD_SET_KEY, added consistent column mapping and dynamic column list to EXPORT_TABLE_DATA procedure, enhanced DEBUG logging for all export operations' || CHR(10) ||
|
||||||
|
'v2.1.0 (2025-10-22): Added version tracking and PARTITION_YEAR/PARTITION_MONTH support' || CHR(10) ||
|
||||||
|
'v2.0.0 (2025-10-01): Separated export functionality from FILE_MANAGER package' || CHR(10);
|
||||||
|
|
||||||
|
cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10);
|
||||||
|
vgMsgTmp VARCHAR2(32000);
|
||||||
|
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
-- TYPE DEFINITIONS FOR PARTITION HANDLING
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Record type for year/month partition information
|
||||||
|
**/
|
||||||
|
TYPE partition_rec IS RECORD (
|
||||||
|
year VARCHAR2(4),
|
||||||
|
month VARCHAR2(2)
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Table type for collection of partition records
|
||||||
|
**/
|
||||||
|
TYPE partition_tab IS TABLE OF partition_rec;
|
||||||
|
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
-- INTERNAL PARALLEL PROCESSING CALLBACK
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name EXPORT_PARTITION_PARALLEL
|
||||||
|
* @desc Internal callback procedure for DBMS_PARALLEL_EXECUTE.
|
||||||
|
* Processes single partition (year/month) chunk in parallel task.
|
||||||
|
* Called by DBMS_PARALLEL_EXECUTE framework for each chunk.
|
||||||
|
* This procedure is PUBLIC because DBMS_PARALLEL_EXECUTE requires it,
|
||||||
|
* but should NOT be called directly by external code.
|
||||||
|
* @param pStartId - Chunk start ID (CHUNK_ID from A_PARALLEL_EXPORT_CHUNKS table)
|
||||||
|
* @param pEndId - Chunk end ID (same as pStartId for single-row chunks)
|
||||||
|
**/
|
||||||
|
PROCEDURE EXPORT_PARTITION_PARALLEL (
|
||||||
|
pStartId IN NUMBER,
|
||||||
|
pEndId IN NUMBER
|
||||||
|
);
|
||||||
|
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
-- MAIN EXPORT PROCEDURES
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name EXPORT_TABLE_DATA
|
||||||
|
* @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA.
|
||||||
|
* Exports data into CSV file on OCI infrustructure.
|
||||||
|
* pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE'
|
||||||
|
* @example
|
||||||
|
* begin
|
||||||
|
* DATA_EXPORTER.EXPORT_TABLE_DATA(
|
||||||
|
* pSchemaName => 'CT_MRDS',
|
||||||
|
* pTableName => 'MY_TABLE',
|
||||||
|
* pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK',
|
||||||
|
* pBucketArea => 'DATA',
|
||||||
|
* pFolderName => 'csv_exports'
|
||||||
|
* );
|
||||||
|
* end;
|
||||||
|
**/
|
||||||
|
PROCEDURE EXPORT_TABLE_DATA (
|
||||||
|
pSchemaName IN VARCHAR2,
|
||||||
|
pTableName IN VARCHAR2,
|
||||||
|
pKeyColumnName IN VARCHAR2,
|
||||||
|
pBucketArea IN VARCHAR2,
|
||||||
|
pFolderName IN VARCHAR2,
|
||||||
|
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name EXPORT_TABLE_DATA_BY_DATE
|
||||||
|
* @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA.
|
||||||
|
* Exports data into PARQUET files on OCI infrustructure.
|
||||||
|
* Each YEAR_MONTH pair goes to seperate file (implicit partitioning).
|
||||||
|
* Allows specifying custom column list or uses T.* if pColumnList is NULL.
|
||||||
|
* Validates that all columns in pColumnList exist in the target table.
|
||||||
|
* Automatically adds 'T.' prefix to column names in pColumnList.
|
||||||
|
* Supports parallel partition processing via pParallelDegree parameter (default 1, range 1-16).
|
||||||
|
* pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE'
|
||||||
|
* @example
|
||||||
|
* begin
|
||||||
|
* DATA_EXPORTER.EXPORT_TABLE_DATA_BY_DATE(
|
||||||
|
* pSchemaName => 'CT_MRDS',
|
||||||
|
* pTableName => 'MY_TABLE',
|
||||||
|
* pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK',
|
||||||
|
* pBucketArea => 'DATA',
|
||||||
|
* pFolderName => 'parquet_exports',
|
||||||
|
* pColumnList => 'COLUMN1, COLUMN2, COLUMN3', -- Optional
|
||||||
|
* pMinDate => DATE '2024-01-01',
|
||||||
|
* pMaxDate => SYSDATE,
|
||||||
|
* pParallelDegree => 8 -- Optional, default 1, range 1-16
|
||||||
|
* );
|
||||||
|
* end;
|
||||||
|
**/
|
||||||
|
PROCEDURE EXPORT_TABLE_DATA_BY_DATE (
|
||||||
|
pSchemaName IN VARCHAR2,
|
||||||
|
pTableName IN VARCHAR2,
|
||||||
|
pKeyColumnName IN VARCHAR2,
|
||||||
|
pBucketArea IN VARCHAR2,
|
||||||
|
pFolderName IN VARCHAR2,
|
||||||
|
pColumnList IN VARCHAR2 default NULL,
|
||||||
|
pMinDate IN DATE default DATE '1900-01-01',
|
||||||
|
pMaxDate IN DATE default SYSDATE,
|
||||||
|
pParallelDegree IN NUMBER default 1,
|
||||||
|
pTemplateTableName IN VARCHAR2 default NULL,
|
||||||
|
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name EXPORT_TABLE_DATA_TO_CSV_BY_DATE
|
||||||
|
* @desc Exports data to separate CSV files partitioned by year and month.
|
||||||
|
* Creates one CSV file for each year/month combination found in the data.
|
||||||
|
* Uses the same date filtering mechanism with CT_ODS.A_LOAD_HISTORY as EXPORT_TABLE_DATA_BY_DATE,
|
||||||
|
* but exports to CSV format instead of Parquet.
|
||||||
|
* Supports parallel partition processing via pParallelDegree parameter (1-16).
|
||||||
|
* File naming pattern: {pFileName}_YYYYMM.csv or {TABLENAME}_YYYYMM.csv (if pFileName is NULL)
|
||||||
|
* When pRegisterExport=TRUE, successfully exported files are registered in:
|
||||||
|
* - CT_MRDS.A_SOURCE_FILE_RECEIVED (tracks file location, size, checksum, and metadata)
|
||||||
|
* @example
|
||||||
|
* begin
|
||||||
|
* -- With custom filename
|
||||||
|
* DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE(
|
||||||
|
* pSchemaName => 'CT_MRDS',
|
||||||
|
* pTableName => 'MY_TABLE',
|
||||||
|
* pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK',
|
||||||
|
* pBucketArea => 'DATA',
|
||||||
|
* pFolderName => 'exports',
|
||||||
|
* pFileName => 'my_export.csv',
|
||||||
|
* pMinDate => DATE '2024-01-01',
|
||||||
|
* pMaxDate => SYSDATE,
|
||||||
|
* pParallelDegree => 8, -- Optional, default 1, range 1-16
|
||||||
|
* pRegisterExport => TRUE -- Optional, default FALSE, registers to A_SOURCE_FILE_RECEIVED
|
||||||
|
* );
|
||||||
|
*
|
||||||
|
* -- With auto-generated filename (based on table name only)
|
||||||
|
* DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE(
|
||||||
|
* pSchemaName => 'OU_TOP',
|
||||||
|
* pTableName => 'AGGREGATED_ALLOTMENT',
|
||||||
|
* pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK',
|
||||||
|
* pBucketArea => 'ARCHIVE',
|
||||||
|
* pFolderName => 'exports',
|
||||||
|
* pMinDate => DATE '2025-09-01',
|
||||||
|
* pMaxDate => DATE '2025-09-17',
|
||||||
|
* pRegisterExport => TRUE -- Registers each export to A_SOURCE_FILE_RECEIVED table
|
||||||
|
* );
|
||||||
|
* -- This will create files like: AGGREGATED_ALLOTMENT_202509.csv, etc.
|
||||||
|
* pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE'
|
||||||
|
* end;
|
||||||
|
**/
|
||||||
|
PROCEDURE EXPORT_TABLE_DATA_TO_CSV_BY_DATE (
|
||||||
|
pSchemaName IN VARCHAR2,
|
||||||
|
pTableName IN VARCHAR2,
|
||||||
|
pKeyColumnName IN VARCHAR2,
|
||||||
|
pBucketArea IN VARCHAR2,
|
||||||
|
pFolderName IN VARCHAR2,
|
||||||
|
pFileName IN VARCHAR2 DEFAULT NULL,
|
||||||
|
pColumnList IN VARCHAR2 default NULL,
|
||||||
|
pMinDate IN DATE default DATE '1900-01-01',
|
||||||
|
pMaxDate IN DATE default SYSDATE,
|
||||||
|
pParallelDegree IN NUMBER default 1,
|
||||||
|
pTemplateTableName IN VARCHAR2 default NULL,
|
||||||
|
pMaxFileSize IN NUMBER default 104857600,
|
||||||
|
pRegisterExport IN BOOLEAN default FALSE,
|
||||||
|
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
|
||||||
|
);
|
||||||
|
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
-- VERSION MANAGEMENT FUNCTIONS
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the current package version number
|
||||||
|
* return: Version string in format X.Y.Z (e.g., '2.1.0')
|
||||||
|
**/
|
||||||
|
FUNCTION GET_VERSION RETURN VARCHAR2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns comprehensive build information including version, date, and author
|
||||||
|
* return: Formatted string with complete build details
|
||||||
|
**/
|
||||||
|
FUNCTION GET_BUILD_INFO RETURN VARCHAR2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the version history with recent changes
|
||||||
|
* return: Multi-line string with version history
|
||||||
|
**/
|
||||||
|
FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2;
|
||||||
|
|
||||||
|
END;
|
||||||
|
|
||||||
|
/
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,625 @@
|
|||||||
|
create or replace PACKAGE CT_MRDS.ENV_MANAGER
|
||||||
|
AUTHID CURRENT_USER
|
||||||
|
AS
|
||||||
|
/**
|
||||||
|
* General comment for package: Please put comments for functions and procedures as shown in below example.
|
||||||
|
* It is a standard.
|
||||||
|
* The structure of comment is used by GET_PACKAGE_DOCUMENTATION function
|
||||||
|
* which returns documentation text for confluence page (to Copy-Paste it).
|
||||||
|
**/
|
||||||
|
|
||||||
|
-- Example comment:
|
||||||
|
/**
|
||||||
|
* @name EX_PROCEDURE_NAME
|
||||||
|
* @desc Procedure description
|
||||||
|
* @example select ENV_MANAGER.EX_PROCEDURE_NAME(pParameter => 129) from dual;
|
||||||
|
* @ex_rslt Example Result
|
||||||
|
**/
|
||||||
|
|
||||||
|
-- Package Version Information (Semantic Versioning: MAJOR.MINOR.PATCH)
|
||||||
|
PACKAGE_VERSION CONSTANT VARCHAR2(10) := '3.2.0';
|
||||||
|
PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2025-12-20 10:00:00';
|
||||||
|
PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski';
|
||||||
|
|
||||||
|
-- Version History (Latest changes first)
|
||||||
|
VERSION_HISTORY CONSTANT VARCHAR2(4000) :=
|
||||||
|
'3.2.0 (2025-12-20): Added error codes for parallel execution support (CODE_INVALID_PARALLEL_DEGREE -20110, CODE_PARALLEL_EXECUTION_FAILED -20111)' || CHR(13)||CHR(10) ||
|
||||||
|
'3.1.0 (2025-10-22): Added package hash tracking and automatic change detection system (SHA256 hashing)' || CHR(13)||CHR(10) ||
|
||||||
|
'3.0.0 (2025-10-22): Added package versioning system with centralized version management functions' || CHR(13)||CHR(10) ||
|
||||||
|
'2.1.0 (2025-10-15): Added ANALYZE_VALIDATION_ERRORS function for comprehensive CSV validation analysis' || CHR(13)||CHR(10) ||
|
||||||
|
'2.0.0 (2025-10-01): Added LOG_PROCESS_ERROR procedure with enhanced error diagnostics and stack traces' || CHR(13)||CHR(10) ||
|
||||||
|
'1.5.0 (2025-09-20): Added console logging support with gvConsoleLoggingEnabled configuration' || CHR(13)||CHR(10) ||
|
||||||
|
'1.0.0 (2025-09-01): Initial release with error management and configuration system';
|
||||||
|
|
||||||
|
TYPE Error_Record IS RECORD (
|
||||||
|
code PLS_INTEGER,
|
||||||
|
message VARCHAR2(4000)
|
||||||
|
);
|
||||||
|
|
||||||
|
TYPE tErrorList IS TABLE OF Error_Record INDEX BY PLS_INTEGER;
|
||||||
|
|
||||||
|
Errors tErrorList;
|
||||||
|
|
||||||
|
|
||||||
|
guid VARCHAR2(32);
|
||||||
|
gvEnv VARCHAR2(200);
|
||||||
|
gvUsername VARCHAR2(128);
|
||||||
|
gvOsuser VARCHAR2(128);
|
||||||
|
gvMachine VARCHAR2(64);
|
||||||
|
gvModule VARCHAR2(64);
|
||||||
|
|
||||||
|
gvNameSpace VARCHAR2(200);
|
||||||
|
gvRegion VARCHAR2(200);
|
||||||
|
gvDataBucketName VARCHAR2(200);
|
||||||
|
gvInboxBucketName VARCHAR2(200);
|
||||||
|
gvArchiveBucketName VARCHAR2(200);
|
||||||
|
gvDataBucketUri VARCHAR2(200);
|
||||||
|
gvInboxBucketUri VARCHAR2(200);
|
||||||
|
gvArchiveBucketUri VARCHAR2(200);
|
||||||
|
gvCredentialName VARCHAR2(200);
|
||||||
|
|
||||||
|
-- Overwritten by variable "LoggingEnabled" in A_FILE_MANAGER_CONFIG.CONFIG_VARIABLE table
|
||||||
|
gvLoggingEnabled VARCHAR2(3) := 'ON'; -- 'ON' or 'OFF'
|
||||||
|
|
||||||
|
-- Overwritten by variable "MinLogLevel" in A_FILE_MANAGER_CONFIG.CONFIG_VARIABLE table
|
||||||
|
-- Possible values: DEBUG ,INFO ,WARNING ,ERROR
|
||||||
|
gvMinLogLevel VARCHAR2(10) := 'DEBUG';
|
||||||
|
|
||||||
|
-- Overwritten by variable "DefaultDateFormat" in A_FILE_MANAGER_CONFIG.CONFIG_VARIABLE table
|
||||||
|
gvDefaultDateFormat VARCHAR2(200) := 'DD/MM/YYYY HH24:MI:SS';
|
||||||
|
|
||||||
|
-- Overwritten by variable "ConsoleLoggingEnabled" in A_FILE_MANAGER_CONFIG.CONFIG_VARIABLE table
|
||||||
|
gvConsoleLoggingEnabled VARCHAR2(3) := 'ON'; -- 'ON' or 'OFF'
|
||||||
|
|
||||||
|
cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10);
|
||||||
|
|
||||||
|
vgSourceFileConfigKey PLS_INTEGER;
|
||||||
|
|
||||||
|
vgMsgTmp VARCHAR2(32000);
|
||||||
|
--Exceptions
|
||||||
|
ERR_EMPTY_FILEURI_AND_RECKEY EXCEPTION;
|
||||||
|
CODE_EMPTY_FILEURI_AND_RECKEY CONSTANT PLS_INTEGER := -20001;
|
||||||
|
MSG_EMPTY_FILEURI_AND_RECKEY VARCHAR2(4000) := 'Either pFileUri or pSourceFileReceivedKey must be not null';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_EMPTY_FILEURI_AND_RECKEY
|
||||||
|
,CODE_EMPTY_FILEURI_AND_RECKEY);
|
||||||
|
|
||||||
|
|
||||||
|
ERR_NO_CONFIG_MATCH_FOR_FILEURI EXCEPTION;
|
||||||
|
CODE_NO_CONFIG_MATCH_FOR_FILEURI CONSTANT PLS_INTEGER := -20002;
|
||||||
|
MSG_NO_CONFIG_MATCH_FOR_FILEURI VARCHAR2(4000) := 'No match for source file in A_SOURCE_FILE_CONFIG table'
|
||||||
|
||cgBL||' The file provided in parameter: pFileUri does not have '
|
||||||
|
||cgBL||' coresponding configuration in A_SOURCE_FILE_CONFIG table';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_NO_CONFIG_MATCH_FOR_FILEURI
|
||||||
|
,CODE_NO_CONFIG_MATCH_FOR_FILEURI);
|
||||||
|
|
||||||
|
ERR_MULTIPLE_MATCH_FOR_SRCFILE EXCEPTION;
|
||||||
|
CODE_MULTIPLE_MATCH_FOR_SRCFILE CONSTANT PLS_INTEGER := -20003;
|
||||||
|
MSG_MULTIPLE_MATCH_FOR_SRCFILE VARCHAR2(4000) := 'Multiple match for source file in A_SOURCE_FILE_CONFIG table';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_MULTIPLE_MATCH_FOR_SRCFILE
|
||||||
|
,CODE_MULTIPLE_MATCH_FOR_SRCFILE);
|
||||||
|
|
||||||
|
ERR_MISSING_COLUMN_DATE_FORMAT EXCEPTION;
|
||||||
|
CODE_MISSING_COLUMN_DATE_FORMAT CONSTANT PLS_INTEGER := -20004;
|
||||||
|
MSG_MISSING_COLUMN_DATE_FORMAT VARCHAR2(4000) := 'Missing entry in config table: A_COLUMN_DATE_FORMAT primary key(TEMPLATE_TABLE_NAME, COLUMN_NAME)'
|
||||||
|
||cgBL||' Remember: each column which data_type IN (''DATE'', ''TIMESTAMP'')'
|
||||||
|
||cgBL||' should have DateFormat specified in A_COLUMN_DATE_FORMAT table '
|
||||||
|
||cgBL||' for example: ''YYYY-MM-DD''';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_MISSING_COLUMN_DATE_FORMAT
|
||||||
|
,CODE_MISSING_COLUMN_DATE_FORMAT);
|
||||||
|
|
||||||
|
ERR_MULTIPLE_COLUMN_DATE_FORMAT EXCEPTION;
|
||||||
|
CODE_MULTIPLE_COLUMN_DATE_FORMAT CONSTANT PLS_INTEGER := -20005;
|
||||||
|
MSG_MULTIPLE_COLUMN_DATE_FORMAT VARCHAR2(4000) := 'Multiple records for date format in A_COLUMN_DATE_FORMAT table'
|
||||||
|
||cgBL||' There should be only one format specified for each DAT/TIMESTAMP column';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_MULTIPLE_COLUMN_DATE_FORMAT
|
||||||
|
,CODE_MULTIPLE_COLUMN_DATE_FORMAT);
|
||||||
|
|
||||||
|
|
||||||
|
ERR_DIDNT_GET_LOAD_OPERATION_ID EXCEPTION;
|
||||||
|
CODE_DIDNT_GET_LOAD_OPERATION_ID CONSTANT PLS_INTEGER := -20006;
|
||||||
|
MSG_DIDNT_GET_LOAD_OPERATION_ID VARCHAR2(4000) := 'Didnt get load operation id from external table validation';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_DIDNT_GET_LOAD_OPERATION_ID
|
||||||
|
,CODE_DIDNT_GET_LOAD_OPERATION_ID);
|
||||||
|
|
||||||
|
ERR_NO_CONFIG_FOR_RECEIVED_FILE EXCEPTION;
|
||||||
|
CODE_NO_CONFIG_FOR_RECEIVED_FILE CONSTANT PLS_INTEGER := -20007;
|
||||||
|
MSG_NO_CONFIG_FOR_RECEIVED_FILE VARCHAR2(4000) := 'No match for received source file in A_SOURCE_FILE_CONFIG '
|
||||||
|
||cgBL||' or missing data in A_SOURCE_FILE_RECEIVED table for provided pSourceFileReceivedKey parameter';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_NO_CONFIG_FOR_RECEIVED_FILE
|
||||||
|
,CODE_NO_CONFIG_FOR_RECEIVED_FILE);
|
||||||
|
|
||||||
|
ERR_MULTI_CONFIG_FOR_RECEIVED_FILE EXCEPTION;
|
||||||
|
CODE_MULTI_CONFIG_FOR_RECEIVED_FILE CONSTANT PLS_INTEGER := -20008;
|
||||||
|
MSG_MULTI_CONFIG_FOR_RECEIVED_FILE VARCHAR2(4000) := 'Multiple matchs for received source file in A_SOURCE_FILE_CONFIG';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_MULTI_CONFIG_FOR_RECEIVED_FILE
|
||||||
|
,CODE_MULTI_CONFIG_FOR_RECEIVED_FILE);
|
||||||
|
|
||||||
|
ERR_FILE_NOT_FOUND_ON_CLOUD EXCEPTION;
|
||||||
|
CODE_FILE_NOT_FOUND_ON_CLOUD CONSTANT PLS_INTEGER := -20009;
|
||||||
|
MSG_FILE_NOT_FOUND_ON_CLOUD VARCHAR2(4000) := 'File not found on the cloud';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_FILE_NOT_FOUND_ON_CLOUD
|
||||||
|
,CODE_FILE_NOT_FOUND_ON_CLOUD);
|
||||||
|
|
||||||
|
ERR_FILE_VALIDATION_FAILED EXCEPTION;
|
||||||
|
CODE_FILE_VALIDATION_FAILED CONSTANT PLS_INTEGER := -20010;
|
||||||
|
MSG_FILE_VALIDATION_FAILED VARCHAR2(4000) := 'File validation failed';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_FILE_VALIDATION_FAILED
|
||||||
|
,CODE_FILE_VALIDATION_FAILED);
|
||||||
|
|
||||||
|
ERR_EXCESS_COLUMNS_DETECTED EXCEPTION;
|
||||||
|
CODE_EXCESS_COLUMNS_DETECTED CONSTANT PLS_INTEGER := -20011;
|
||||||
|
MSG_EXCESS_COLUMNS_DETECTED VARCHAR2(4000) := 'CSV file contains more columns than template allows';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_EXCESS_COLUMNS_DETECTED
|
||||||
|
,CODE_EXCESS_COLUMNS_DETECTED);
|
||||||
|
|
||||||
|
ERR_NO_CONFIG_MATCH EXCEPTION;
|
||||||
|
CODE_NO_CONFIG_MATCH CONSTANT PLS_INTEGER := -20012;
|
||||||
|
MSG_NO_CONFIG_MATCH VARCHAR2(4000) := 'No match for specified parameters in A_SOURCE_FILE_CONFIG table';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_NO_CONFIG_MATCH
|
||||||
|
,CODE_NO_CONFIG_MATCH);
|
||||||
|
|
||||||
|
ERR_UNKNOWN_PREFIX EXCEPTION;
|
||||||
|
CODE_UNKNOWN_PREFIX CONSTANT PLS_INTEGER := -20013;
|
||||||
|
MSG_UNKNOWN_PREFIX VARCHAR2(4000) := 'Unknown prefix';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_UNKNOWN_PREFIX
|
||||||
|
,CODE_UNKNOWN_PREFIX);
|
||||||
|
|
||||||
|
ERR_TABLE_NOT_EXISTS EXCEPTION;
|
||||||
|
CODE_TABLE_NOT_EXISTS CONSTANT PLS_INTEGER := -20014;
|
||||||
|
MSG_TABLE_NOT_EXISTS VARCHAR2(4000) := 'Table does not exist';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_TABLE_NOT_EXISTS
|
||||||
|
,CODE_TABLE_NOT_EXISTS);
|
||||||
|
|
||||||
|
ERR_COLUMN_NOT_EXISTS EXCEPTION;
|
||||||
|
CODE_COLUMN_NOT_EXISTS CONSTANT PLS_INTEGER := -20015;
|
||||||
|
MSG_COLUMN_NOT_EXISTS VARCHAR2(4000) := 'Column does not exist in table';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_COLUMN_NOT_EXISTS
|
||||||
|
,CODE_COLUMN_NOT_EXISTS);
|
||||||
|
|
||||||
|
ERR_UNSUPPORTED_DATA_TYPE EXCEPTION;
|
||||||
|
CODE_UNSUPPORTED_DATA_TYPE CONSTANT PLS_INTEGER := -20016;
|
||||||
|
MSG_UNSUPPORTED_DATA_TYPE VARCHAR2(4000) := 'Unsupported data type';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_UNSUPPORTED_DATA_TYPE
|
||||||
|
,CODE_UNSUPPORTED_DATA_TYPE);
|
||||||
|
|
||||||
|
ERR_MISSING_SOURCE_KEY EXCEPTION;
|
||||||
|
CODE_MISSING_SOURCE_KEY CONSTANT PLS_INTEGER := -20017;
|
||||||
|
MSG_MISSING_SOURCE_KEY VARCHAR2(4000) := 'The Source was not found in parent table A_SOURCE';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_MISSING_SOURCE_KEY
|
||||||
|
,CODE_MISSING_SOURCE_KEY);
|
||||||
|
|
||||||
|
ERR_NULL_SOURCE_FILE_CONFIG_KEY EXCEPTION;
|
||||||
|
CODE_NULL_SOURCE_FILE_CONFIG_KEY CONSTANT PLS_INTEGER := -20018;
|
||||||
|
MSG_NULL_SOURCE_FILE_CONFIG_KEY VARCHAR2(4000) := 'No entry in A_SOURCE_FILE_CONFIG table for specified A_SOURCE_FILE_CONFIG_KEY';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_NULL_SOURCE_FILE_CONFIG_KEY
|
||||||
|
,CODE_NULL_SOURCE_FILE_CONFIG_KEY);
|
||||||
|
|
||||||
|
ERR_DUPLICATED_SOURCE_KEY EXCEPTION;
|
||||||
|
CODE_DUPLICATED_SOURCE_KEY CONSTANT PLS_INTEGER := -20019;
|
||||||
|
MSG_DUPLICATED_SOURCE_KEY VARCHAR2(4000) := 'The Source already exists in the A_SOURCE table';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_DUPLICATED_SOURCE_KEY
|
||||||
|
,CODE_DUPLICATED_SOURCE_KEY);
|
||||||
|
|
||||||
|
ERR_MISSING_CONTAINER_CONFIG EXCEPTION;
|
||||||
|
CODE_MISSING_CONTAINER_CONFIG CONSTANT PLS_INTEGER := -20020;
|
||||||
|
MSG_MISSING_CONTAINER_CONFIG VARCHAR2(4000) := 'No match in A_SOURCE_FILE_CONFIG table where SOURCE_FILE_TYPE=''CONTAINER'' and specified SOURCE_FILE_ID';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_MISSING_CONTAINER_CONFIG
|
||||||
|
,CODE_MISSING_CONTAINER_CONFIG);
|
||||||
|
|
||||||
|
ERR_MULTIPLE_CONTAINER_ENTRIES EXCEPTION;
|
||||||
|
CODE_MULTIPLE_CONTAINER_ENTRIES CONSTANT PLS_INTEGER := -20021;
|
||||||
|
MSG_MULTIPLE_CONTAINER_ENTRIES VARCHAR2(4000) := 'Multiple matches in A_SOURCE_FILE_CONFIG table where SOURCE_FILE_TYPE=''CONTAINER'' and specified SOURCE_FILE_ID';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_MULTIPLE_CONTAINER_ENTRIES
|
||||||
|
,CODE_MULTIPLE_CONTAINER_ENTRIES);
|
||||||
|
|
||||||
|
ERR_WRONG_DESTINATION_PARAM EXCEPTION;
|
||||||
|
CODE_WRONG_DESTINATION_PARAM CONSTANT PLS_INTEGER := -20022;
|
||||||
|
MSG_WRONG_DESTINATION_PARAM VARCHAR2(4000) := 'Wrong destination parameter provided.';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_WRONG_DESTINATION_PARAM
|
||||||
|
,CODE_WRONG_DESTINATION_PARAM);
|
||||||
|
|
||||||
|
ERR_FILE_NOT_EXISTS_ON_CLOUD EXCEPTION;
|
||||||
|
CODE_FILE_NOT_EXISTS_ON_CLOUD CONSTANT PLS_INTEGER := -20023;
|
||||||
|
MSG_FILE_NOT_EXISTS_ON_CLOUD VARCHAR2(4000) := 'File not exists on cloud.';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_FILE_NOT_EXISTS_ON_CLOUD
|
||||||
|
,CODE_FILE_NOT_EXISTS_ON_CLOUD);
|
||||||
|
|
||||||
|
ERR_FILE_ALREADY_REGISTERED EXCEPTION;
|
||||||
|
CODE_FILE_ALREADY_REGISTERED CONSTANT PLS_INTEGER := -20024;
|
||||||
|
MSG_FILE_ALREADY_REGISTERED VARCHAR2(4000) := 'File already registered in A_SOURCE_FILE_RECEIVED table.';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_FILE_ALREADY_REGISTERED
|
||||||
|
,CODE_FILE_ALREADY_REGISTERED);
|
||||||
|
|
||||||
|
ERR_WRONG_DATE_TIMESTAMP_FORMAT EXCEPTION;
|
||||||
|
CODE_WRONG_DATE_TIMESTAMP_FORMAT CONSTANT PLS_INTEGER := -20025;
|
||||||
|
MSG_WRONG_DATE_TIMESTAMP_FORMAT VARCHAR2(4000) := 'Provided DATE or TIMESTAMP format has errors (possible duplicated codes, ex: ''DD'').';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_WRONG_DATE_TIMESTAMP_FORMAT
|
||||||
|
,CODE_WRONG_DATE_TIMESTAMP_FORMAT);
|
||||||
|
|
||||||
|
ERR_ENVIRONMENT_NOT_SET EXCEPTION;
|
||||||
|
CODE_ENVIRONMENT_NOT_SET CONSTANT PLS_INTEGER := -20026;
|
||||||
|
MSG_ENVIRONMENT_NOT_SET VARCHAR2(4000) := 'EnvironmentID not set'
|
||||||
|
||cgBL||' Information about environment is needed to get proper configuration values.'
|
||||||
|
||cgBL||' It can be set up in two different ways:'
|
||||||
|
||cgBL||' 1. Set it on session level: execute DBMS_SESSION.SET_IDENTIFIER (client_id => ''dev'')'
|
||||||
|
||cgBL||' 2. Set it on configuration level: Insert into CT_MRDS.A_FILE_MANAGER_CONFIG (ENVIRONMENT_ID,CONFIG_VARIABLE,CONFIG_VARIABLE_VALUE) values (''default'',''environment_id'',''dev'')'
|
||||||
|
||cgBL||' Session level setup (1.) takes precedence over configuration level one (2.)'
|
||||||
|
;
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_ENVIRONMENT_NOT_SET
|
||||||
|
,CODE_ENVIRONMENT_NOT_SET);
|
||||||
|
|
||||||
|
|
||||||
|
ERR_CONFIG_VARIABLE_NOT_SET EXCEPTION;
|
||||||
|
CODE_CONFIG_VARIABLE_NOT_SET CONSTANT PLS_INTEGER := -20027;
|
||||||
|
MSG_CONFIG_VARIABLE_NOT_SET VARCHAR2(4000) := 'Missing configuration value in A_FILE_MANAGER_CONFIG';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_CONFIG_VARIABLE_NOT_SET
|
||||||
|
,CODE_CONFIG_VARIABLE_NOT_SET);
|
||||||
|
|
||||||
|
ERR_NOT_INPUT_SOURCE_FILE_TYPE EXCEPTION;
|
||||||
|
CODE_NOT_INPUT_SOURCE_FILE_TYPE CONSTANT PLS_INTEGER := -20028;
|
||||||
|
MSG_NOT_INPUT_SOURCE_FILE_TYPE VARCHAR2(4000) := 'Archival can be executed only for A_SOURCE_FILE_CONFIG_KEY where SOURCE_FILE_TYPE=''INPUT''';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_NOT_INPUT_SOURCE_FILE_TYPE
|
||||||
|
,CODE_NOT_INPUT_SOURCE_FILE_TYPE);
|
||||||
|
|
||||||
|
ERR_EXP_DATA_FOR_ARCH_FAILED EXCEPTION;
|
||||||
|
CODE_EXP_DATA_FOR_ARCH_FAILED CONSTANT PLS_INTEGER := -20029;
|
||||||
|
MSG_EXP_DATA_FOR_ARCH_FAILED VARCHAR2(4000) := 'Export data for archival failed.';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_EXP_DATA_FOR_ARCH_FAILED
|
||||||
|
,CODE_EXP_DATA_FOR_ARCH_FAILED);
|
||||||
|
|
||||||
|
ERR_RESTORE_FILE_FROM_TRASH EXCEPTION;
|
||||||
|
CODE_RESTORE_FILE_FROM_TRASH CONSTANT PLS_INTEGER := -20030;
|
||||||
|
MSG_RESTORE_FILE_FROM_TRASH VARCHAR2(4000) := 'Unexpected issues occured while archival process. Restoration of exported files failed.';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_RESTORE_FILE_FROM_TRASH
|
||||||
|
,CODE_RESTORE_FILE_FROM_TRASH);
|
||||||
|
|
||||||
|
ERR_CHANGE_STAT_TO_ARCHIVED_FAILED EXCEPTION;
|
||||||
|
CODE_CHANGE_STAT_TO_ARCHIVED_FAILED CONSTANT PLS_INTEGER := -20031;
|
||||||
|
MSG_CHANGE_STAT_TO_ARCHIVED_FAILED VARCHAR2(4000) := 'Failed to change file status to: ARCHIVED in A_SOURCE_FILE_RECEIVED table.';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_CHANGE_STAT_TO_ARCHIVED_FAILED
|
||||||
|
,CODE_CHANGE_STAT_TO_ARCHIVED_FAILED);
|
||||||
|
|
||||||
|
ERR_MOVE_FILE_TO_TRASH_FAILED EXCEPTION;
|
||||||
|
CODE_MOVE_FILE_TO_TRASH_FAILED CONSTANT PLS_INTEGER := -20032;
|
||||||
|
MSG_MOVE_FILE_TO_TRASH_FAILED VARCHAR2(4000) := 'FAILED to move file to TRASH before DROPPING it.';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_MOVE_FILE_TO_TRASH_FAILED
|
||||||
|
,CODE_MOVE_FILE_TO_TRASH_FAILED);
|
||||||
|
|
||||||
|
ERR_DROP_EXPORTED_FILES_FAILED EXCEPTION;
|
||||||
|
CODE_DROP_EXPORTED_FILES_FAILED CONSTANT PLS_INTEGER := -20033;
|
||||||
|
MSG_DROP_EXPORTED_FILES_FAILED VARCHAR2(4000) := 'FAILED to move file to TRASH before DROPPING it.';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_DROP_EXPORTED_FILES_FAILED
|
||||||
|
,CODE_DROP_EXPORTED_FILES_FAILED);
|
||||||
|
|
||||||
|
ERR_INVALID_BUCKET_AREA EXCEPTION;
|
||||||
|
CODE_INVALID_BUCKET_AREA CONSTANT PLS_INTEGER := -20034;
|
||||||
|
MSG_INVALID_BUCKET_AREA VARCHAR2(4000) := 'Invalid bucket area specified. Valid values: INBOX, ODS, DATA, ARCHIVE';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_INVALID_BUCKET_AREA
|
||||||
|
,CODE_INVALID_BUCKET_AREA);
|
||||||
|
|
||||||
|
ERR_INVALID_PARALLEL_DEGREE EXCEPTION;
|
||||||
|
CODE_INVALID_PARALLEL_DEGREE CONSTANT PLS_INTEGER := -20110;
|
||||||
|
MSG_INVALID_PARALLEL_DEGREE VARCHAR2(4000) := 'Invalid parallel degree parameter. Must be between 1 and 16';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_INVALID_PARALLEL_DEGREE
|
||||||
|
,CODE_INVALID_PARALLEL_DEGREE);
|
||||||
|
|
||||||
|
ERR_PARALLEL_EXECUTION_FAILED EXCEPTION;
|
||||||
|
CODE_PARALLEL_EXECUTION_FAILED CONSTANT PLS_INTEGER := -20111;
|
||||||
|
MSG_PARALLEL_EXECUTION_FAILED VARCHAR2(4000) := 'Parallel execution failed';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_PARALLEL_EXECUTION_FAILED
|
||||||
|
,CODE_PARALLEL_EXECUTION_FAILED);
|
||||||
|
|
||||||
|
ERR_UNKNOWN EXCEPTION;
|
||||||
|
CODE_UNKNOWN CONSTANT PLS_INTEGER := -20999;
|
||||||
|
MSG_UNKNOWN VARCHAR2(4000) := 'Unknown Error Occured';
|
||||||
|
PRAGMA EXCEPTION_INIT( ERR_UNKNOWN
|
||||||
|
,CODE_UNKNOWN);
|
||||||
|
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name LOG_PROCESS_EVENT
|
||||||
|
* @desc Insert a new log record into A_PROCESS_LOG table.
|
||||||
|
* Also outputs to console if gvConsoleLoggingEnabled = 'ON'.
|
||||||
|
* Respects logging level configuration (gvMinLogLevel).
|
||||||
|
* @example ENV_MANAGER.LOG_PROCESS_EVENT('Process completed successfully', 'INFO', 'pParam1=value1');
|
||||||
|
* @ex_rslt Record inserted into A_PROCESS_LOG table and optionally displayed in console output
|
||||||
|
**/
|
||||||
|
PROCEDURE LOG_PROCESS_EVENT (
|
||||||
|
pLogMessage VARCHAR2
|
||||||
|
,pLogLevel VARCHAR2 DEFAULT 'ERROR'
|
||||||
|
,pParameters VARCHAR2 DEFAULT NULL
|
||||||
|
,pProcessName VARCHAR2 DEFAULT 'FILE_MANAGER'
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name LOG_PROCESS_ERROR
|
||||||
|
* @desc Insert a detailed error record into A_PROCESS_LOG table with full stack trace, backtrace, and call stack.
|
||||||
|
* This procedure captures comprehensive error information for debugging purposes while
|
||||||
|
* allowing clean user-facing error messages to be raised separately.
|
||||||
|
* @param pLogMessage - Base error message description
|
||||||
|
* @param pParameters - Procedure parameters for context
|
||||||
|
* @param pProcessName - Name of the calling process/package
|
||||||
|
* @ex_rslt Record inserted into A_PROCESS_LOG table with complete error stack information
|
||||||
|
*/
|
||||||
|
PROCEDURE LOG_PROCESS_ERROR (
|
||||||
|
pLogMessage VARCHAR2
|
||||||
|
,pParameters VARCHAR2 DEFAULT NULL
|
||||||
|
,pProcessName VARCHAR2 DEFAULT 'FILE_MANAGER'
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name INIT_ERRORS
|
||||||
|
* @desc Loads data into Errors array.
|
||||||
|
* Errors array is a list of Record(Error_Code, Error_Message) index by Error_Code.
|
||||||
|
* Called automatically during package initialization.
|
||||||
|
* @example Called automatically when package is first referenced
|
||||||
|
* @ex_rslt Errors array populated with all error codes and messages
|
||||||
|
**/
|
||||||
|
PROCEDURE INIT_ERRORS;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name GET_DEFAULT_ENV
|
||||||
|
* @desc It returns string with name of default environment.
|
||||||
|
* Return string is A_FILE_MANAGER_CONFIG.ENVIRONMENT_ID value.
|
||||||
|
* @example select ENV_MANAGER.GET_DEFAULT_ENV() from dual;
|
||||||
|
* @ex_rslt dev
|
||||||
|
**/
|
||||||
|
FUNCTION GET_DEFAULT_ENV
|
||||||
|
RETURN VARCHAR2;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name INIT_VARIABLES
|
||||||
|
* @desc For specified pEnv parameter (A_FILE_MANAGER_CONFIG.ENVIRONMENT_ID)
|
||||||
|
* Assign values to following global package variables:
|
||||||
|
* - gvNameSpace
|
||||||
|
* - gvRegion
|
||||||
|
* - gvCredentialName
|
||||||
|
* - gvInboxBucketName
|
||||||
|
* - gvDataBucketName
|
||||||
|
* - gvArchiveBucketName
|
||||||
|
* - gvInboxBucketUri
|
||||||
|
* - gvDataBucketUri
|
||||||
|
* - gvArchiveBucketUri
|
||||||
|
* - gvLoggingEnabled
|
||||||
|
* - gvMinLogLevel
|
||||||
|
* - gvDefaultDateFormat
|
||||||
|
* - gvConsoleLoggingEnabled
|
||||||
|
**/
|
||||||
|
PROCEDURE INIT_VARIABLES(
|
||||||
|
pEnv VARCHAR2
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name GET_ERROR_MESSAGE
|
||||||
|
* @desc It returns string with error message for specified pCode (Error_Code).
|
||||||
|
* Error message is take from Errors Array loaded by INIT_ERRORS procedure
|
||||||
|
* @example select ENV_MANAGER.GET_ERROR_MESSAGE(pCode => -20009) from dual;
|
||||||
|
* @ex_rslt File not found on the cloud
|
||||||
|
**/
|
||||||
|
FUNCTION GET_ERROR_MESSAGE(
|
||||||
|
pCode PLS_INTEGER
|
||||||
|
) RETURN VARCHAR2;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name GET_ERROR_STACK
|
||||||
|
* @desc It returns string with all possible error stack info.
|
||||||
|
* Error message is take from Errors Array loaded by INIT_ERRORS procedure
|
||||||
|
* @example
|
||||||
|
* select ENV_MANAGER.GET_ERROR_STACK(
|
||||||
|
* pFormat => 'OUTPUT'
|
||||||
|
* ,pCode => -20009
|
||||||
|
* ,pSourceFileReceivedKey => NULL)
|
||||||
|
* from dual
|
||||||
|
* @ex_rslt
|
||||||
|
* ------------------------------------------------------+
|
||||||
|
* Error Message:
|
||||||
|
* ORA-0000: normal, successful completion
|
||||||
|
* -------------------------------------------------------
|
||||||
|
* Error Stack:
|
||||||
|
* -------------------------------------------------------
|
||||||
|
* Error Backtrace:
|
||||||
|
* ------------------------------------------------------+
|
||||||
|
**/
|
||||||
|
FUNCTION GET_ERROR_STACK(
|
||||||
|
pFormat VARCHAR2
|
||||||
|
,pCode PLS_INTEGER
|
||||||
|
,pSourceFileReceivedKey CT_MRDS.A_SOURCE_FILE_RECEIVED.A_SOURCE_FILE_RECEIVED_KEY%TYPE DEFAULT NULL
|
||||||
|
) RETURN VARCHAR2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name FORMAT_PARAMETERS
|
||||||
|
* @desc Formats parameter list for logging purposes.
|
||||||
|
* Converts SYS.ODCIVARCHAR2LIST to formatted string with proper NULL handling.
|
||||||
|
* @example select ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('param1=value1', 'param2=NULL')) from dual;
|
||||||
|
* @ex_rslt param1=value1 ,
|
||||||
|
* param2=NULL
|
||||||
|
**/
|
||||||
|
FUNCTION FORMAT_PARAMETERS(
|
||||||
|
pParameterList SYS.ODCIVARCHAR2LIST
|
||||||
|
) RETURN VARCHAR2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name ANALYZE_VALIDATION_ERRORS
|
||||||
|
* @desc Analyzes CSV validation errors and generates detailed diagnostic report.
|
||||||
|
* Compares CSV structure with template table and provides specific error analysis.
|
||||||
|
* Includes suggested solutions for common validation issues.
|
||||||
|
* @param pValidationLogTable - Name of validation log table (e.g., VALIDATE$242_LOG)
|
||||||
|
* @param pTemplateSchema - Schema of template table (e.g., CT_ET_TEMPLATES)
|
||||||
|
* @param pTemplateTable - Name of template table (e.g., MOCK_PROC_TABLE)
|
||||||
|
* @param pCsvFileUri - URI of CSV file being validated
|
||||||
|
* @example SELECT ENV_MANAGER.ANALYZE_VALIDATION_ERRORS('VALIDATE$242_LOG', 'CT_ET_TEMPLATES', 'MOCK_PROC_TABLE', 'https://...') FROM DUAL;
|
||||||
|
* @ex_rslt Detailed validation analysis report with column mismatches and solutions
|
||||||
|
**/
|
||||||
|
FUNCTION ANALYZE_VALIDATION_ERRORS(
|
||||||
|
pValidationLogTable VARCHAR2,
|
||||||
|
pTemplateSchema VARCHAR2,
|
||||||
|
pTemplateTable VARCHAR2,
|
||||||
|
pCsvFileUri VARCHAR2
|
||||||
|
) RETURN VARCHAR2;
|
||||||
|
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
-- PACKAGE VERSION MANAGEMENT FUNCTIONS
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name GET_VERSION
|
||||||
|
* @desc Returns the current version number of the ENV_MANAGER package.
|
||||||
|
* Uses semantic versioning format (MAJOR.MINOR.PATCH).
|
||||||
|
* @example SELECT ENV_MANAGER.GET_VERSION() FROM DUAL;
|
||||||
|
* @ex_rslt 3.0.0
|
||||||
|
**/
|
||||||
|
FUNCTION GET_VERSION RETURN VARCHAR2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name GET_BUILD_INFO
|
||||||
|
* @desc Returns comprehensive build information including version, build date, and author.
|
||||||
|
* Formatted for display in logs or monitoring systems.
|
||||||
|
* @example SELECT ENV_MANAGER.GET_BUILD_INFO() FROM DUAL;
|
||||||
|
* @ex_rslt Package: ENV_MANAGER
|
||||||
|
* Version: 3.0.0
|
||||||
|
* Build Date: 2025-10-22 16:00:00
|
||||||
|
* Author: Grzegorz Michalski
|
||||||
|
**/
|
||||||
|
FUNCTION GET_BUILD_INFO RETURN VARCHAR2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name GET_VERSION_HISTORY
|
||||||
|
* @desc Returns complete version history with all releases and changes.
|
||||||
|
* Shows evolution of package features over time.
|
||||||
|
* @example SELECT ENV_MANAGER.GET_VERSION_HISTORY() FROM DUAL;
|
||||||
|
* @ex_rslt ENV_MANAGER Version History:
|
||||||
|
* 3.0.0 (2025-10-22): Added package versioning system...
|
||||||
|
* 2.1.0 (2025-10-15): Added ANALYZE_VALIDATION_ERRORS function...
|
||||||
|
**/
|
||||||
|
FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name GET_PACKAGE_VERSION_INFO
|
||||||
|
* @desc Universal function to get formatted version information for any package.
|
||||||
|
* This centralized function is used by all packages in the system.
|
||||||
|
* @param pPackageName - Name of the package
|
||||||
|
* @param pVersion - Version string (MAJOR.MINOR.PATCH format)
|
||||||
|
* @param pBuildDate - Build date timestamp
|
||||||
|
* @param pAuthor - Package author name
|
||||||
|
* @example SELECT ENV_MANAGER.GET_PACKAGE_VERSION_INFO('FILE_MANAGER', '2.1.0', '2025-10-22 15:00:00', 'Grzegorz Michalski') FROM DUAL;
|
||||||
|
* @ex_rslt Package: FILE_MANAGER
|
||||||
|
* Version: 2.1.0
|
||||||
|
* Build Date: 2025-10-22 15:00:00
|
||||||
|
* Author: Grzegorz Michalski
|
||||||
|
**/
|
||||||
|
FUNCTION GET_PACKAGE_VERSION_INFO(
|
||||||
|
pPackageName VARCHAR2,
|
||||||
|
pVersion VARCHAR2,
|
||||||
|
pBuildDate VARCHAR2,
|
||||||
|
pAuthor VARCHAR2
|
||||||
|
) RETURN VARCHAR2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name FORMAT_VERSION_HISTORY
|
||||||
|
* @desc Universal function to format version history for any package.
|
||||||
|
* Adds package name header and proper formatting.
|
||||||
|
* @param pPackageName - Name of the package
|
||||||
|
* @param pVersionHistory - Complete version history text
|
||||||
|
* @example SELECT ENV_MANAGER.FORMAT_VERSION_HISTORY('FILE_MANAGER', '2.1.0 (2025-10-22): Export procedures...') FROM DUAL;
|
||||||
|
* @ex_rslt FILE_MANAGER Version History:
|
||||||
|
* 2.1.0 (2025-10-22): Export procedures...
|
||||||
|
**/
|
||||||
|
FUNCTION FORMAT_VERSION_HISTORY(
|
||||||
|
pPackageName VARCHAR2,
|
||||||
|
pVersionHistory VARCHAR2
|
||||||
|
) RETURN VARCHAR2;
|
||||||
|
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
-- PACKAGE HASH + CHANGE DETECTION FUNCTIONS
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name CALCULATE_PACKAGE_HASH
|
||||||
|
* @desc Calculates SHA256 hash of package source code from ALL_SOURCE.
|
||||||
|
* Returns hash for both SPEC and BODY (if exists).
|
||||||
|
* Used for automatic change detection.
|
||||||
|
* @param pPackageOwner - Schema owner of the package
|
||||||
|
* @param pPackageName - Name of the package
|
||||||
|
* @param pPackageType - Type of package code ('PACKAGE' for SPEC, 'PACKAGE BODY' for BODY)
|
||||||
|
* @example SELECT ENV_MANAGER.CALCULATE_PACKAGE_HASH('CT_MRDS', 'FILE_MANAGER', 'PACKAGE') FROM DUAL;
|
||||||
|
* @ex_rslt A7B3C5D9E8F1234567890ABCDEF... (64-character SHA256 hash)
|
||||||
|
**/
|
||||||
|
FUNCTION CALCULATE_PACKAGE_HASH(
|
||||||
|
pPackageOwner VARCHAR2,
|
||||||
|
pPackageName VARCHAR2,
|
||||||
|
pPackageType VARCHAR2 -- 'PACKAGE' or 'PACKAGE BODY'
|
||||||
|
) RETURN VARCHAR2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name TRACK_PACKAGE_VERSION
|
||||||
|
* @desc Records package version and source code hash in A_PACKAGE_VERSION_TRACKING table.
|
||||||
|
* Automatically detects if source code changed without version update.
|
||||||
|
* Should be called after every package deployment.
|
||||||
|
* @param pPackageOwner - Schema owner of the package
|
||||||
|
* @param pPackageName - Name of the package
|
||||||
|
* @param pPackageVersion - Current version from PACKAGE_VERSION constant
|
||||||
|
* @param pPackageBuildDate - Build date from PACKAGE_BUILD_DATE constant
|
||||||
|
* @param pPackageAuthor - Author from PACKAGE_AUTHOR constant
|
||||||
|
* @example EXEC ENV_MANAGER.TRACK_PACKAGE_VERSION('CT_MRDS', 'FILE_MANAGER', '3.2.0', '2025-10-22 16:30:00', 'Grzegorz Michalski');
|
||||||
|
* @ex_rslt Record inserted into A_PACKAGE_VERSION_TRACKING with change detection status
|
||||||
|
**/
|
||||||
|
PROCEDURE TRACK_PACKAGE_VERSION(
|
||||||
|
pPackageOwner VARCHAR2,
|
||||||
|
pPackageName VARCHAR2,
|
||||||
|
pPackageVersion VARCHAR2,
|
||||||
|
pPackageBuildDate VARCHAR2,
|
||||||
|
pPackageAuthor VARCHAR2
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name CHECK_PACKAGE_CHANGES
|
||||||
|
* @desc Checks if package source code has changed since last tracking.
|
||||||
|
* Compares current hash with last recorded hash in A_PACKAGE_VERSION_TRACKING.
|
||||||
|
* Returns detailed change detection report.
|
||||||
|
* @param pPackageOwner - Schema owner of the package
|
||||||
|
* @param pPackageName - Name of the package
|
||||||
|
* @example SELECT ENV_MANAGER.CHECK_PACKAGE_CHANGES('CT_MRDS', 'FILE_MANAGER') FROM DUAL;
|
||||||
|
* @ex_rslt WARNING: Package changed without version update!
|
||||||
|
* Last Version: 3.2.0
|
||||||
|
* Current Hash (SPEC): A7B3C5D9...
|
||||||
|
* Last Hash (SPEC): B8C4D6E0...
|
||||||
|
* RECOMMENDATION: Update PACKAGE_VERSION and PACKAGE_BUILD_DATE
|
||||||
|
**/
|
||||||
|
FUNCTION CHECK_PACKAGE_CHANGES(
|
||||||
|
pPackageOwner VARCHAR2,
|
||||||
|
pPackageName VARCHAR2
|
||||||
|
) RETURN VARCHAR2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name GET_PACKAGE_HASH_INFO
|
||||||
|
* @desc Returns formatted information about package hash and tracking history.
|
||||||
|
* Includes current hash, last tracked hash, and change detection status.
|
||||||
|
* @param pPackageOwner - Schema owner of the package
|
||||||
|
* @param pPackageName - Name of the package
|
||||||
|
* @example SELECT ENV_MANAGER.GET_PACKAGE_HASH_INFO('CT_MRDS', 'FILE_MANAGER') FROM DUAL;
|
||||||
|
* @ex_rslt Package: CT_MRDS.FILE_MANAGER
|
||||||
|
* Current Version: 3.2.0
|
||||||
|
* Current Hash (SPEC): A7B3C5D9...
|
||||||
|
* Last Tracked: 2025-10-22 16:30:00
|
||||||
|
* Status: OK - No changes detected
|
||||||
|
**/
|
||||||
|
FUNCTION GET_PACKAGE_HASH_INFO(
|
||||||
|
pPackageOwner VARCHAR2,
|
||||||
|
pPackageName VARCHAR2
|
||||||
|
) RETURN VARCHAR2;
|
||||||
|
|
||||||
|
END ENV_MANAGER;
|
||||||
|
/
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,227 @@
|
|||||||
|
create or replace PACKAGE CT_MRDS.DATA_EXPORTER
|
||||||
|
AUTHID CURRENT_USER
|
||||||
|
AS
|
||||||
|
/**
|
||||||
|
* Data Export Package: Provides comprehensive data export capabilities to various formats (CSV, Parquet)
|
||||||
|
* with support for cloud storage integration via Oracle Cloud Infrastructure (OCI).
|
||||||
|
* The structure of comment is used by GET_PACKAGE_DOCUMENTATION function
|
||||||
|
* which returns documentation text for confluence page (to Copy-Paste it).
|
||||||
|
**/
|
||||||
|
|
||||||
|
-- Package Version Information (Semantic Versioning: MAJOR.MINOR.PATCH)
|
||||||
|
PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.8.0';
|
||||||
|
PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2026-02-10 11:00:00';
|
||||||
|
PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski';
|
||||||
|
|
||||||
|
cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10);
|
||||||
|
|
||||||
|
-- Version History (Latest changes first)
|
||||||
|
VERSION_HISTORY CONSTANT VARCHAR2(4000) :=
|
||||||
|
'v2.8.0 (2026-02-10): CRITICAL FIX - Removed duplicate post-export registration code that conflicted with per-partition registration. Post-export registration (SERVICE_NAME=DATA_EXPORTER, WORKFLOW_START=SYSTIMESTAMP) removed. Per-partition registration (SERVICE_NAME=CSV_EXPORT, WORKFLOW_START=partition_date) now executes exclusively. Prevents duplicate workflow records and ensures CSV files contain constant workflow keys instead of A_ETL_LOAD_SET_FK aliases.' || cgBL ||
|
||||||
|
'v2.7.0 (2026-02-09): NEW FEATURE - Added pRegisterExport parameter to EXPORT_TABLE_DATA_TO_CSV_BY_DATE. When TRUE, successfully exported files are registered in A_WORKFLOW_HISTORY (one record per YEAR/MONTH) and A_SOURCE_FILE_RECEIVED tables for tracking and audit purposes.' || cgBL ||
|
||||||
|
'v2.6.3 (2026-01-28): COMPILATION FIX - Resolved ORA-00904 error in EXPORT_PARTITION_PARALLEL. SQLERRM and DBMS_UTILITY.FORMAT_ERROR_BACKTRACE cannot be used directly in SQL UPDATE statements. Now properly assigned to vgMsgTmp variable before UPDATE.' || cgBL ||
|
||||||
|
'v2.6.2 (2026-01-28): CRITICAL FIX - Race condition when multiple exports run simultaneously. Changed DELETE to filter by age (>24h) instead of deleting all COMPLETED chunks. Prevents concurrent sessions from deleting each other chunks. Session-safe cleanup with TASK_NAME filtering. Enables true parallel execution of multiple export jobs.' || cgBL ||
|
||||||
|
'v2.6.1 (2026-01-28): Added DELETE_FAILED_EXPORT_FILE procedure to clean up partial/corrupted files before retry. When partition fails mid-export, partial file is deleted before retry to prevent Oracle from creating _1 suffixed duplicates. Ensures clean retry without orphaned files in OCI bucket.' || cgBL ||
|
||||||
|
'v2.6.0 (2026-01-28): CRITICAL FIX - Added STATUS tracking to A_PARALLEL_EXPORT_CHUNKS table to prevent data duplication on retry. System now restarts ONLY failed partitions instead of re-exporting all data. Added ERROR_MESSAGE and EXPORT_TIMESTAMP columns for better error handling and monitoring. Prevents duplicate file creation when parallel tasks fail (e.g., 22 partitions with 16 threads, 3 failures no longer duplicates 19 successful exports).' || cgBL ||
|
||||||
|
'v2.5.0 (2026-01-26): Added recorddelimiter parameter with CRLF (CHR(13)||CHR(10)) for CSV exports to ensure Windows-compatible line endings. Improves cross-platform compatibility when CSV files are opened in Windows applications (Notepad, Excel).' || cgBL ||
|
||||||
|
'v2.4.0 (2026-01-11): Added pTemplateTableName parameter for per-column date format configuration. Implements dynamic query building with TO_CHAR for each date/timestamp column using FILE_MANAGER.GET_DATE_FORMAT. Supports 3-tier hierarchy: column-specific, template DEFAULT, global fallback. Eliminates single dateformat limitation of DBMS_CLOUD.EXPORT_DATA.' || cgBL ||
|
||||||
|
'v2.3.0 (2025-12-20): Added parallel partition processing using DBMS_PARALLEL_EXECUTE. New pParallelDegree parameter (1-16, default 1) for EXPORT_TABLE_DATA_BY_DATE and EXPORT_TABLE_DATA_TO_CSV_BY_DATE procedures. Each year/month partition processed in separate thread for improved performance.' || cgBL ||
|
||||||
|
'v2.2.0 (2025-12-19): DRY refactoring - extracted shared helper functions (sanitizeFilename, VALIDATE_TABLE_AND_COLUMNS, GET_PARTITIONS, EXPORT_SINGLE_PARTITION worker procedure). Reduced code duplication by ~400 lines. Prepared architecture for v2.3.0 parallel processing.' || cgBL ||
|
||||||
|
'v2.1.1 (2025-12-04): Fixed JOIN column reference A_WORKFLOW_HISTORY_KEY -> A_ETL_LOAD_SET_KEY, added consistent column mapping and dynamic column list to EXPORT_TABLE_DATA procedure, enhanced DEBUG logging for all export operations' || cgBL ||
|
||||||
|
'v2.1.0 (2025-10-22): Added version tracking and PARTITION_YEAR/PARTITION_MONTH support' || cgBL ||
|
||||||
|
'v2.0.0 (2025-10-01): Separated export functionality from FILE_MANAGER package';
|
||||||
|
|
||||||
|
vgMsgTmp VARCHAR2(32000);
|
||||||
|
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
-- TYPE DEFINITIONS FOR PARTITION HANDLING
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Record type for year/month partition information
|
||||||
|
**/
|
||||||
|
TYPE partition_rec IS RECORD (
|
||||||
|
year VARCHAR2(4),
|
||||||
|
month VARCHAR2(2)
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Table type for collection of partition records
|
||||||
|
**/
|
||||||
|
TYPE partition_tab IS TABLE OF partition_rec;
|
||||||
|
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
-- INTERNAL PARALLEL PROCESSING CALLBACK
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name EXPORT_PARTITION_PARALLEL
|
||||||
|
* @desc Internal callback procedure for DBMS_PARALLEL_EXECUTE.
|
||||||
|
* Processes single partition (year/month) chunk in parallel task.
|
||||||
|
* Called by DBMS_PARALLEL_EXECUTE framework for each chunk.
|
||||||
|
* This procedure is PUBLIC because DBMS_PARALLEL_EXECUTE requires it,
|
||||||
|
* but should NOT be called directly by external code.
|
||||||
|
* @param pStartId - Chunk start ID (CHUNK_ID from A_PARALLEL_EXPORT_CHUNKS table)
|
||||||
|
* @param pEndId - Chunk end ID (same as pStartId for single-row chunks)
|
||||||
|
**/
|
||||||
|
PROCEDURE EXPORT_PARTITION_PARALLEL (
|
||||||
|
pStartId IN NUMBER,
|
||||||
|
pEndId IN NUMBER
|
||||||
|
);
|
||||||
|
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
-- MAIN EXPORT PROCEDURES
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name EXPORT_TABLE_DATA
|
||||||
|
* @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA.
|
||||||
|
* Exports data into CSV file on OCI infrustructure.
|
||||||
|
* pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE'
|
||||||
|
* @example
|
||||||
|
* begin
|
||||||
|
* DATA_EXPORTER.EXPORT_TABLE_DATA(
|
||||||
|
* pSchemaName => 'CT_MRDS',
|
||||||
|
* pTableName => 'MY_TABLE',
|
||||||
|
* pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK',
|
||||||
|
* pBucketArea => 'DATA',
|
||||||
|
* pFolderName => 'csv_exports'
|
||||||
|
* );
|
||||||
|
* end;
|
||||||
|
**/
|
||||||
|
PROCEDURE EXPORT_TABLE_DATA (
|
||||||
|
pSchemaName IN VARCHAR2,
|
||||||
|
pTableName IN VARCHAR2,
|
||||||
|
pKeyColumnName IN VARCHAR2,
|
||||||
|
pBucketArea IN VARCHAR2,
|
||||||
|
pFolderName IN VARCHAR2,
|
||||||
|
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name EXPORT_TABLE_DATA_BY_DATE
|
||||||
|
* @desc Wrapper procedure for DBMS_CLOUD.EXPORT_DATA.
|
||||||
|
* Exports data into PARQUET files on OCI infrustructure.
|
||||||
|
* Each YEAR_MONTH pair goes to seperate file (implicit partitioning).
|
||||||
|
* Allows specifying custom column list or uses T.* if pColumnList is NULL.
|
||||||
|
* Validates that all columns in pColumnList exist in the target table.
|
||||||
|
* Automatically adds 'T.' prefix to column names in pColumnList.
|
||||||
|
* Supports parallel partition processing via pParallelDegree parameter (default 1, range 1-16).
|
||||||
|
* pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE'
|
||||||
|
* @example
|
||||||
|
* begin
|
||||||
|
* DATA_EXPORTER.EXPORT_TABLE_DATA_BY_DATE(
|
||||||
|
* pSchemaName => 'CT_MRDS',
|
||||||
|
* pTableName => 'MY_TABLE',
|
||||||
|
* pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK',
|
||||||
|
* pBucketArea => 'DATA',
|
||||||
|
* pFolderName => 'parquet_exports',
|
||||||
|
* pColumnList => 'COLUMN1, COLUMN2, COLUMN3', -- Optional
|
||||||
|
* pMinDate => DATE '2024-01-01',
|
||||||
|
* pMaxDate => SYSDATE,
|
||||||
|
* pParallelDegree => 8 -- Optional, default 1, range 1-16
|
||||||
|
* );
|
||||||
|
* end;
|
||||||
|
**/
|
||||||
|
PROCEDURE EXPORT_TABLE_DATA_BY_DATE (
|
||||||
|
pSchemaName IN VARCHAR2,
|
||||||
|
pTableName IN VARCHAR2,
|
||||||
|
pKeyColumnName IN VARCHAR2,
|
||||||
|
pBucketArea IN VARCHAR2,
|
||||||
|
pFolderName IN VARCHAR2,
|
||||||
|
pColumnList IN VARCHAR2 default NULL,
|
||||||
|
pMinDate IN DATE default DATE '1900-01-01',
|
||||||
|
pMaxDate IN DATE default SYSDATE,
|
||||||
|
pParallelDegree IN NUMBER default 1,
|
||||||
|
pTemplateTableName IN VARCHAR2 default NULL,
|
||||||
|
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @name EXPORT_TABLE_DATA_TO_CSV_BY_DATE
|
||||||
|
* @desc Exports data to separate CSV files partitioned by year and month.
|
||||||
|
* Creates one CSV file for each year/month combination found in the data.
|
||||||
|
* Uses the same date filtering mechanism with CT_ODS.A_LOAD_HISTORY as EXPORT_TABLE_DATA_BY_DATE,
|
||||||
|
* but exports to CSV format instead of Parquet.
|
||||||
|
* Supports parallel partition processing via pParallelDegree parameter (1-16).
|
||||||
|
* File naming pattern: {pFileName}_YYYYMM.csv or {TABLENAME}_YYYYMM.csv (if pFileName is NULL)
|
||||||
|
* When pRegisterExport=TRUE, successfully exported files are registered in:
|
||||||
|
* - CT_MRDS.A_WORKFLOW_HISTORY (one record per YEAR/MONTH with export timestamp)
|
||||||
|
* - CT_MRDS.A_SOURCE_FILE_RECEIVED (tracks file location and partition info)
|
||||||
|
* @example
|
||||||
|
* begin
|
||||||
|
* -- With custom filename
|
||||||
|
* DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE(
|
||||||
|
* pSchemaName => 'CT_MRDS',
|
||||||
|
* pTableName => 'MY_TABLE',
|
||||||
|
* pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK',
|
||||||
|
* pBucketArea => 'DATA',
|
||||||
|
* pFolderName => 'exports',
|
||||||
|
* pFileName => 'my_export.csv',
|
||||||
|
* pMinDate => DATE '2024-01-01',
|
||||||
|
* pMaxDate => SYSDATE,
|
||||||
|
* pParallelDegree => 8, -- Optional, default 1, range 1-16
|
||||||
|
* pRegisterExport => TRUE -- Optional, default FALSE, registers to A_WORKFLOW_HISTORY and A_SOURCE_FILE_RECEIVED
|
||||||
|
* );
|
||||||
|
*
|
||||||
|
* -- With auto-generated filename (based on table name only)
|
||||||
|
* DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE(
|
||||||
|
* pSchemaName => 'OU_TOP',
|
||||||
|
* pTableName => 'AGGREGATED_ALLOTMENT',
|
||||||
|
* pKeyColumnName => 'A_ETL_LOAD_SET_KEY_FK',
|
||||||
|
* pBucketArea => 'ARCHIVE',
|
||||||
|
* pFolderName => 'exports',
|
||||||
|
* pMinDate => DATE '2025-09-01',
|
||||||
|
* pMaxDate => DATE '2025-09-17',
|
||||||
|
* pRegisterExport => TRUE -- Registers each export to tracking tables
|
||||||
|
* );
|
||||||
|
* -- This will create files like: AGGREGATED_ALLOTMENT_202509.csv, etc.
|
||||||
|
* pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE'
|
||||||
|
* end;
|
||||||
|
**/
|
||||||
|
PROCEDURE EXPORT_TABLE_DATA_TO_CSV_BY_DATE (
|
||||||
|
pSchemaName IN VARCHAR2,
|
||||||
|
pTableName IN VARCHAR2,
|
||||||
|
pKeyColumnName IN VARCHAR2,
|
||||||
|
pBucketArea IN VARCHAR2,
|
||||||
|
pFolderName IN VARCHAR2,
|
||||||
|
pFileName IN VARCHAR2 DEFAULT NULL,
|
||||||
|
pColumnList IN VARCHAR2 default NULL,
|
||||||
|
pMinDate IN DATE default DATE '1900-01-01',
|
||||||
|
pMaxDate IN DATE default SYSDATE,
|
||||||
|
pParallelDegree IN NUMBER default 1,
|
||||||
|
pTemplateTableName IN VARCHAR2 default NULL,
|
||||||
|
pMaxFileSize IN NUMBER default 104857600,
|
||||||
|
pRegisterExport IN BOOLEAN default FALSE,
|
||||||
|
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
|
||||||
|
);
|
||||||
|
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
-- VERSION MANAGEMENT FUNCTIONS
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the current package version number
|
||||||
|
* return: Version string in format X.Y.Z (e.g., '2.1.0')
|
||||||
|
**/
|
||||||
|
FUNCTION GET_VERSION RETURN VARCHAR2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns comprehensive build information including version, date, and author
|
||||||
|
* return: Formatted string with complete build details
|
||||||
|
**/
|
||||||
|
FUNCTION GET_BUILD_INFO RETURN VARCHAR2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the version history with recent changes
|
||||||
|
* return: Multi-line string with version history
|
||||||
|
**/
|
||||||
|
FUNCTION GET_VERSION_HISTORY RETURN VARCHAR2;
|
||||||
|
|
||||||
|
END;
|
||||||
|
|
||||||
|
/
|
||||||
@@ -106,7 +106,8 @@ BEGIN
|
|||||||
pMaxDate => SYSDATE,
|
pMaxDate => SYSDATE,
|
||||||
pParallelDegree => 16,
|
pParallelDegree => 16,
|
||||||
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT',
|
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT',
|
||||||
pMaxFileSize => 104857600 -- 100MB in bytes (safe for parallel execution, avoids ORA-04036)
|
pMaxFileSize => 104857600, -- 100MB in bytes (safe for parallel execution, avoids ORA-04036)
|
||||||
|
pRegisterExport => TRUE -- Register exported files in A_SOURCE_FILE_RECEIVED with metadata (CHECKSUM, CREATED, BYTES)
|
||||||
);
|
);
|
||||||
|
|
||||||
DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_DEBT exported to DATA bucket with template column order');
|
DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_DEBT exported to DATA bucket with template column order');
|
||||||
@@ -225,7 +226,8 @@ BEGIN
|
|||||||
pMaxDate => SYSDATE,
|
pMaxDate => SYSDATE,
|
||||||
pParallelDegree => 16,
|
pParallelDegree => 16,
|
||||||
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT_DAILY',
|
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT_DAILY',
|
||||||
pMaxFileSize => 104857600 -- 100MB in bytes (safe for parallel execution, avoids ORA-04036)
|
pMaxFileSize => 104857600, -- 100MB in bytes (safe for parallel execution, avoids ORA-04036)
|
||||||
|
pRegisterExport => TRUE -- Register exported files in A_SOURCE_FILE_RECEIVED with metadata (CHECKSUM, CREATED, BYTES)
|
||||||
);
|
);
|
||||||
|
|
||||||
DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_DEBT_DAILY exported to DATA bucket with template column order');
|
DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_DEBT_DAILY exported to DATA bucket with template column order');
|
||||||
|
|||||||
@@ -1,26 +0,0 @@
|
|||||||
GRANT SELECT, INSERT, UPDATE, DELETE ON ct_ods.a_load_history TO ct_mrds;
|
|
||||||
|
|
||||||
create or replace TRIGGER ct_mrds.a_workflow_history
|
|
||||||
AFTER INSERT OR UPDATE OF workflow_successful ON ct_mrds.a_workflow_history
|
|
||||||
REFERENCING NEW AS new OLD AS old
|
|
||||||
FOR EACH ROW
|
|
||||||
DECLARE
|
|
||||||
v_workflow_name VARCHAR2(128);
|
|
||||||
BEGIN
|
|
||||||
IF :new.workflow_name IN ('w_ODS_LM_STANDING_FACILITIES', 'w_ODS_CSDB_DEBT', 'w_ODS_CSDB_DEBT_DAILY', 'w_ODS_CSDB_RATINGS_FULL') AND :new.service_name = 'ODS' THEN
|
|
||||||
IF :new.workflow_successful <> :old.workflow_successful AND :new.workflow_successful = 'Y' THEN
|
|
||||||
IF :new.workflow_name = 'w_ODS_LM_STANDING_FACILITIES' THEN
|
|
||||||
v_workflow_name := 'w_ODS_LM_STANDING_FACILITY';
|
|
||||||
ELSE
|
|
||||||
v_workflow_name := :new.workflow_name;
|
|
||||||
END IF;
|
|
||||||
INSERT INTO ct_ods.a_load_history (
|
|
||||||
a_etl_load_set_key, workflow_name, infa_run_id, load_start, load_end, exdi_appl_req_id, exdi_correlation_id, load_successful, wla_run_id, dq_flag
|
|
||||||
) VALUES (
|
|
||||||
:new.a_workflow_history_key, v_workflow_name, NULL, :new.workflow_start, :new.workflow_end, NULL, NULL, :new.workflow_successful, NULL, NULL
|
|
||||||
);
|
|
||||||
END IF;
|
|
||||||
END IF;
|
|
||||||
END
|
|
||||||
;
|
|
||||||
/
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
--DROP TRIGGER ct_mrds.a_workflow_history;
|
|
||||||
REVOKE SELECT, INSERT, UPDATE, DELETE ON ct_ods.a_load_history FROM ct_mrds;
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
WHENEVER SQLERROR EXIT FAILURE
|
|
||||||
SET SERVEROUTPUT ON
|
|
||||||
SET TIMING ON
|
|
||||||
SET ECHO ON
|
|
||||||
SET HEADING OFF
|
|
||||||
SET FEEDBACK ON
|
|
||||||
SET VERIFY OFF
|
|
||||||
|
|
||||||
var filename VARCHAR2(100)
|
|
||||||
BEGIN
|
|
||||||
SELECT 'INSTALL_MARS_851_' || SUBSTR(PDB_NAME, (INSTR(PDB_NAME,'_',1)+1), (LENGTH(PDB_NAME)-INSTR(PDB_NAME,'_',1))) || '_' ||TO_CHAR(SYSDATE,'YYYYMMDD_HH24MISS')||'.log' INTO :filename from DBA_PDBS;
|
|
||||||
END;
|
|
||||||
/
|
|
||||||
column filename new_value _filename
|
|
||||||
select :filename filename from dual;
|
|
||||||
spool &_filename
|
|
||||||
|
|
||||||
prompt ##### started at time #####
|
|
||||||
select systimestamp from dual;
|
|
||||||
prompt ##### database name #####
|
|
||||||
SELECT SUBSTR(PDB_NAME, (INSTR(PDB_NAME,'_',1)+1), (LENGTH(PDB_NAME)-INSTR(PDB_NAME,'_',1))) AS PDB_NAME FROM DBA_PDBS;
|
|
||||||
|
|
||||||
|
|
||||||
@@01_MARS_851_install_CT_MRDS_A_LOAD_HISTORY_TRIGGER.sql
|
|
||||||
|
|
||||||
|
|
||||||
SET ECHO OFF
|
|
||||||
|
|
||||||
prompt ##### completed at time #####
|
|
||||||
select systimestamp from dual;
|
|
||||||
|
|
||||||
SPOOL OFF
|
|
||||||
EXIT
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
WHENEVER SQLERROR EXIT FAILURE
|
|
||||||
SET SERVEROUTPUT ON
|
|
||||||
SET TIMING ON
|
|
||||||
SET ECHO ON
|
|
||||||
SET HEADING OFF
|
|
||||||
SET FEEDBACK ON
|
|
||||||
SET VERIFY OFF
|
|
||||||
|
|
||||||
var filename VARCHAR2(100)
|
|
||||||
BEGIN
|
|
||||||
SELECT 'ROLLBACK_MARS_851_' || SUBSTR(PDB_NAME, (INSTR(PDB_NAME,'_',1)+1), (LENGTH(PDB_NAME)-INSTR(PDB_NAME,'_',1))) || '_' ||TO_CHAR(SYSDATE,'YYYYMMDD_HH24MISS')||'.log' INTO :filename from DBA_PDBS;
|
|
||||||
END;
|
|
||||||
/
|
|
||||||
column filename new_value _filename
|
|
||||||
select :filename filename from dual;
|
|
||||||
spool &_filename
|
|
||||||
|
|
||||||
prompt ##### started at time #####
|
|
||||||
select systimestamp from dual;
|
|
||||||
prompt ##### database name #####
|
|
||||||
SELECT SUBSTR(PDB_NAME, (INSTR(PDB_NAME,'_',1)+1), (LENGTH(PDB_NAME)-INSTR(PDB_NAME,'_',1))) AS PDB_NAME FROM DBA_PDBS;
|
|
||||||
|
|
||||||
|
|
||||||
@@91_MARS_851_rollback_CT_MRDS_A_LOAD_HISTORY_TRIGGER.sql
|
|
||||||
|
|
||||||
|
|
||||||
SET ECHO OFF
|
|
||||||
|
|
||||||
prompt ##### completed at time #####
|
|
||||||
select systimestamp from dual;
|
|
||||||
|
|
||||||
SPOOL OFF
|
|
||||||
EXIT
|
|
||||||
@@ -0,0 +1,156 @@
|
|||||||
|
-- ===================================================================
|
||||||
|
-- MARS-956: Export Historical C2D MPEC Data to DATA Bucket
|
||||||
|
-- ===================================================================
|
||||||
|
-- Purpose: One-time export of historical C2D MPEC delta data from
|
||||||
|
-- OU_C2D operational database to DATA bucket as CSV files
|
||||||
|
-- Method: Using DATA_EXPORTER.EXPORT_TABLE_DATA procedure
|
||||||
|
-- Target: DATA bucket with folder structure DATA/C2D/{TABLE_NAME}
|
||||||
|
-- Format: CSV files for complete historical data access
|
||||||
|
-- ===================================================================
|
||||||
|
|
||||||
|
PROMPT =========================================================================
|
||||||
|
PROMPT MARS-956: Starting C2D MPEC Historical Data Export
|
||||||
|
PROMPT =========================================================================
|
||||||
|
PROMPT Export Strategy:
|
||||||
|
PROMPT - Source: OU_C2D schema tables (operational database)
|
||||||
|
PROMPT - Target: DATA bucket as CSV files
|
||||||
|
PROMPT - Method: DATA_EXPORTER.EXPORT_TABLE_DATA
|
||||||
|
PROMPT - Structure: Must match ODS template tables
|
||||||
|
PROMPT - Registration: Files registered in A_SOURCE_FILE_RECEIVED
|
||||||
|
PROMPT =========================================================================
|
||||||
|
|
||||||
|
-- Log export start
|
||||||
|
INSERT INTO CT_MRDS.A_PROCESS_LOG (PACKAGE_NAME, PROCEDURE_NAME, EVENT_TYPE, EVENT_MESSAGE, PROCEDURE_PARAMETERS)
|
||||||
|
VALUES ('MARS-956', 'EXPORT_C2D_MPEC_DATA', 'INFO', 'Starting historical C2D MPEC data export',
|
||||||
|
'Tables: MPEC_ADMIN, MPEC_CONTENT, MPEC_CONTENT_CRITERION');
|
||||||
|
COMMIT;
|
||||||
|
|
||||||
|
-- ===================================================================
|
||||||
|
-- TABLE 1: OU_C2D.MPEC_ADMIN -> DATA/C2D/C2D_MPEC_ADMIN
|
||||||
|
-- ===================================================================
|
||||||
|
|
||||||
|
PROMPT Exporting Table 1/3: OU_C2D.MPEC_ADMIN
|
||||||
|
PROMPT Target: mrds_data_dev/DATA/C2D/C2D_MPEC_ADMIN
|
||||||
|
|
||||||
|
BEGIN
|
||||||
|
CT_MRDS.DATA_EXPORTER.EXPORT_TABLE_DATA(
|
||||||
|
pSchemaName => 'OU_C2D',
|
||||||
|
pTableName => 'MPEC_ADMIN',
|
||||||
|
pKeyColumnName => 'A_ETL_LOAD_SET_FK', -- ETL key for data lookup
|
||||||
|
pBucketArea => 'DATA',
|
||||||
|
pFolderName => 'DATA/C2D/C2D_MPEC_ADMIN',
|
||||||
|
pTemplateTableName => 'CT_ET_TEMPLATES.C2D_MPEC_ADMIN', -- Template for column order
|
||||||
|
pRegisterExport => TRUE, -- Register files in A_SOURCE_FILE_RECEIVED
|
||||||
|
pCredentialName => 'DEF_CRED_ARN'
|
||||||
|
);
|
||||||
|
|
||||||
|
DBMS_OUTPUT.PUT_LINE('✓ MPEC_ADMIN export completed successfully');
|
||||||
|
EXCEPTION
|
||||||
|
WHEN OTHERS THEN
|
||||||
|
DBMS_OUTPUT.PUT_LINE('✗ MPEC_ADMIN export failed: ' || SQLERRM);
|
||||||
|
-- Log error but continue with other tables
|
||||||
|
INSERT INTO CT_MRDS.A_PROCESS_LOG (PACKAGE_NAME, PROCEDURE_NAME, EVENT_TYPE, EVENT_MESSAGE)
|
||||||
|
VALUES ('MARS-956', 'EXPORT_MPEC_ADMIN', 'ERROR', 'Export failed: ' || SQLERRM);
|
||||||
|
COMMIT;
|
||||||
|
RAISE;
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
|
||||||
|
-- ===================================================================
|
||||||
|
-- TABLE 2: OU_C2D.MPEC_CONTENT -> DATA/C2D/C2D_MPEC_CONTENT
|
||||||
|
-- ===================================================================
|
||||||
|
|
||||||
|
PROMPT Exporting Table 2/3: OU_C2D.MPEC_CONTENT
|
||||||
|
PROMPT Target: mrds_data_dev/DATA/C2D/C2D_MPEC_CONTENT
|
||||||
|
|
||||||
|
BEGIN
|
||||||
|
CT_MRDS.DATA_EXPORTER.EXPORT_TABLE_DATA(
|
||||||
|
pSchemaName => 'OU_C2D',
|
||||||
|
pTableName => 'MPEC_CONTENT',
|
||||||
|
pKeyColumnName => 'A_ETL_LOAD_SET_FK', -- ETL key for data lookup
|
||||||
|
pBucketArea => 'DATA',
|
||||||
|
pFolderName => 'DATA/C2D/C2D_MPEC_CONTENT',
|
||||||
|
pTemplateTableName => 'CT_ET_TEMPLATES.C2D_MPEC_CONTENT', -- Template for column order
|
||||||
|
pRegisterExport => TRUE, -- Register files in A_SOURCE_FILE_RECEIVED
|
||||||
|
pCredentialName => 'DEF_CRED_ARN'
|
||||||
|
);
|
||||||
|
|
||||||
|
DBMS_OUTPUT.PUT_LINE('✓ MPEC_CONTENT export completed successfully');
|
||||||
|
EXCEPTION
|
||||||
|
WHEN OTHERS THEN
|
||||||
|
DBMS_OUTPUT.PUT_LINE('✗ MPEC_CONTENT export failed: ' || SQLERRM);
|
||||||
|
-- Log error but continue with other tables
|
||||||
|
INSERT INTO CT_MRDS.A_PROCESS_LOG (PACKAGE_NAME, PROCEDURE_NAME, EVENT_TYPE, EVENT_MESSAGE)
|
||||||
|
VALUES ('MARS-956', 'EXPORT_MPEC_CONTENT', 'ERROR', 'Export failed: ' || SQLERRM);
|
||||||
|
COMMIT;
|
||||||
|
RAISE;
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
|
||||||
|
-- ===================================================================
|
||||||
|
-- TABLE 3: OU_C2D.MPEC_CONTENT_CRITERION -> DATA/C2D/C2D_MPEC_CONTENT_CRITERION
|
||||||
|
-- ===================================================================
|
||||||
|
|
||||||
|
PROMPT Exporting Table 3/3: OU_C2D.MPEC_CONTENT_CRITERION
|
||||||
|
PROMPT Target: mrds_data_dev/DATA/C2D/C2D_MPEC_CONTENT_CRITERION
|
||||||
|
|
||||||
|
BEGIN
|
||||||
|
CT_MRDS.DATA_EXPORTER.EXPORT_TABLE_DATA(
|
||||||
|
pSchemaName => 'OU_C2D',
|
||||||
|
pTableName => 'MPEC_CONTENT_CRITERION',
|
||||||
|
pKeyColumnName => 'A_ETL_LOAD_SET_FK', -- ETL key for data lookup
|
||||||
|
pBucketArea => 'DATA',
|
||||||
|
pFolderName => 'DATA/C2D/C2D_MPEC_CONTENT_CRITERION',
|
||||||
|
pTemplateTableName => 'CT_ET_TEMPLATES.C2D_MPEC_CONTENT_CRITERION', -- Template for column order
|
||||||
|
pRegisterExport => TRUE, -- Register files in A_SOURCE_FILE_RECEIVED
|
||||||
|
pCredentialName => 'DEF_CRED_ARN'
|
||||||
|
);
|
||||||
|
|
||||||
|
DBMS_OUTPUT.PUT_LINE('✓ MPEC_CONTENT_CRITERION export completed successfully');
|
||||||
|
EXCEPTION
|
||||||
|
WHEN OTHERS THEN
|
||||||
|
DBMS_OUTPUT.PUT_LINE('✗ MPEC_CONTENT_CRITERION export failed: ' || SQLERRM);
|
||||||
|
-- Log error
|
||||||
|
INSERT INTO CT_MRDS.A_PROCESS_LOG (PACKAGE_NAME, PROCEDURE_NAME, EVENT_TYPE, EVENT_MESSAGE)
|
||||||
|
VALUES ('MARS-956', 'EXPORT_MPEC_CONTENT_CRITERION', 'ERROR', 'Export failed: ' || SQLERRM);
|
||||||
|
COMMIT;
|
||||||
|
RAISE;
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
|
||||||
|
-- ===================================================================
|
||||||
|
-- Export Summary and Verification
|
||||||
|
-- ===================================================================
|
||||||
|
|
||||||
|
PROMPT =========================================================================
|
||||||
|
PROMPT Export Summary - Checking Results
|
||||||
|
PROMPT =========================================================================
|
||||||
|
|
||||||
|
-- Log completion
|
||||||
|
INSERT INTO CT_MRDS.A_PROCESS_LOG (PACKAGE_NAME, PROCEDURE_NAME, EVENT_TYPE, EVENT_MESSAGE)
|
||||||
|
VALUES ('MARS-956', 'EXPORT_C2D_MPEC_DATA', 'INFO', 'All C2D MPEC historical exports completed successfully');
|
||||||
|
COMMIT;
|
||||||
|
|
||||||
|
-- Display recent export activity
|
||||||
|
PROMPT Recent Export Activity (last 30 minutes):
|
||||||
|
SELECT TO_CHAR(EVENT_TIMESTAMP, 'YYYY-MM-DD HH24:MI:SS') AS EXPORT_TIME,
|
||||||
|
PACKAGE_NAME,
|
||||||
|
PROCEDURE_NAME,
|
||||||
|
EVENT_TYPE,
|
||||||
|
EVENT_MESSAGE
|
||||||
|
FROM CT_MRDS.A_PROCESS_LOG
|
||||||
|
WHERE PACKAGE_NAME = 'MARS-956'
|
||||||
|
OR PROCEDURE_NAME LIKE '%DATA_EXPORTER%'
|
||||||
|
AND EVENT_TIMESTAMP >= SYSTIMESTAMP - INTERVAL '30' MINUTE
|
||||||
|
ORDER BY EVENT_TIMESTAMP DESC
|
||||||
|
FETCH FIRST 20 ROWS ONLY;
|
||||||
|
|
||||||
|
PROMPT =========================================================================
|
||||||
|
PROMPT MARS-956 Export Completed Successfully!
|
||||||
|
PROMPT =========================================================================
|
||||||
|
PROMPT Next Steps:
|
||||||
|
PROMPT 1. Verify CSV files created in DATA bucket
|
||||||
|
PROMPT 2. Check file structure matches template tables
|
||||||
|
PROMPT 3. Validate row counts match source tables
|
||||||
|
PROMPT 4. Confirm data available for delta queries
|
||||||
|
PROMPT =========================================================================
|
||||||
68
MARS_Packages/REL02_POST/MARS-956/README.md
Normal file
68
MARS_Packages/REL02_POST/MARS-956/README.md
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
# MARS-956: Exporting Historical data for ODS: C2D MPEC (delta)
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
**Purpose**: One-time export of historical C2D MPEC delta data from operational database (OU_C2D) to DATA bucket as CSV files.
|
||||||
|
|
||||||
|
**Approach**: Use DATA_EXPORTER export functionality EXPORT_TABLE_DATA for bulk data movement with file registration.
|
||||||
|
|
||||||
|
**Input**: Old tables in OU_C2D operational database
|
||||||
|
**Output**: CSV files in DATA bucket
|
||||||
|
**Mapping**: Structure must match new ODS template tables
|
||||||
|
|
||||||
|
## Tables to Export
|
||||||
|
|
||||||
|
| Source Table (OU_C2D) | Target Location (DATA) | Export Type | Time Dependency |
|
||||||
|
|------------------------|-------------------------|-------------|------------------|
|
||||||
|
| `MPEC_ADMIN` | `mrds_data_dev/DATA/C2D/C2D_MPEC_ADMIN` | CSV to DATA | Sync with REL_02 |
|
||||||
|
| `MPEC_CONTENT` | `mrds_data_dev/DATA/C2D/C2D_MPEC_CONTENT` | CSV to DATA | Sync with REL_02 |
|
||||||
|
| `MPEC_CONTENT_CRITERION` | `mrds_data_dev/DATA/C2D/C2D_MPEC_CONTENT_CRITERION` | CSV to DATA | Sync with REL_02 |
|
||||||
|
|
||||||
|
## Export Strategy
|
||||||
|
|
||||||
|
- **Format**: CSV files in DATA bucket
|
||||||
|
- **Reason**: Complete history of delta records needed for all queries
|
||||||
|
- **Method**: `DATA_EXPORTER.EXPORT_TABLE_DATA` procedure
|
||||||
|
- **Bucket Area**: `'DATA'`
|
||||||
|
- **Folder Structure**: `'DATA/C2D/{TABLE_NAME}'`
|
||||||
|
- **File Registration**: Files registered in A_SOURCE_FILE_RECEIVED table
|
||||||
|
|
||||||
|
## Installation Steps
|
||||||
|
|
||||||
|
1. Run master install script: `@install_mars956.sql`
|
||||||
|
2. Verify exports completed successfully
|
||||||
|
3. Confirm CSV files created in DATA bucket with expected structure
|
||||||
|
|
||||||
|
## Files Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
MARS-956/
|
||||||
|
├── README.md # This file
|
||||||
|
├── install_mars956.sql # Master installation script
|
||||||
|
├── 01_MARS_956_export_c2d_mpec_data.sql # Export procedures execution
|
||||||
|
├── track_package_versions.sql # Universal version tracking
|
||||||
|
├── verify_packages_version.sql # Universal version verification
|
||||||
|
└── rollback_mars956.sql # Rollback script (if needed)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- OU_C2D schema access for source tables
|
||||||
|
- DATA_EXPORTER package v2.7.5+ deployed (with pRegisterExport support)
|
||||||
|
- DEF_CRED_ARN credentials configured
|
||||||
|
- DATA bucket accessible
|
||||||
|
|
||||||
|
## Post-Installation Verification
|
||||||
|
|
||||||
|
1. Check export completion in A_PROCESS_LOG
|
||||||
|
2. Verify CSV files created in DATA bucket
|
||||||
|
3. Validate file structure matches template tables
|
||||||
|
4. Confirm row counts match source tables
|
||||||
|
5. Check file registration in A_SOURCE_FILE_RECEIVED table
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- This is a **one-time** data migration
|
||||||
|
- No package modifications required (uses existing DATA_EXPORTER)
|
||||||
|
- Export timing critical - must sync with REL_02 deployment
|
||||||
|
- Complete history required for delta queries
|
||||||
128
MARS_Packages/REL02_POST/MARS-956/install_mars956.sql
Normal file
128
MARS_Packages/REL02_POST/MARS-956/install_mars956.sql
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
-- ===================================================================
|
||||||
|
-- MARS-956 MASTER INSTALLATION SCRIPT
|
||||||
|
-- ===================================================================
|
||||||
|
-- Purpose: Export Historical C2D MPEC data from OU_C2D to DATA bucket
|
||||||
|
-- Author: Grzegorz Michalski
|
||||||
|
-- Date: 2026-02-11
|
||||||
|
--
|
||||||
|
-- Requirements:
|
||||||
|
-- - ADMIN user access for MARS installation
|
||||||
|
-- - OU_C2D schema access for source tables
|
||||||
|
-- - DATA_EXPORTER package v2.7.4+ deployed
|
||||||
|
-- - DEF_CRED_ARN credentials configured
|
||||||
|
-- - DATA bucket accessible
|
||||||
|
-- ===================================================================
|
||||||
|
|
||||||
|
-- Dynamic spool file generation
|
||||||
|
host mkdir log 2>nul
|
||||||
|
define spoolfile = 'log\install_mars956_'
|
||||||
|
define timestamp = ''
|
||||||
|
|
||||||
|
-- Get current timestamp for unique log filename
|
||||||
|
column current_time new_value timestamp
|
||||||
|
SELECT TO_CHAR(SYSDATE, 'YYYYMMDD_HH24MISS') AS current_time FROM dual;
|
||||||
|
|
||||||
|
-- Start logging
|
||||||
|
spool &spoolfile.×tamp..log
|
||||||
|
|
||||||
|
-- Display environment information
|
||||||
|
PROMPT =========================================================================
|
||||||
|
PROMPT MARS-956 INSTALLATION - Export Historical C2D MPEC Data
|
||||||
|
PROMPT =========================================================================
|
||||||
|
PROMPT Installation Start:
|
||||||
|
SELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') AS INSTALL_START FROM DUAL;
|
||||||
|
|
||||||
|
PROMPT Current User:
|
||||||
|
SELECT USER AS CURRENT_USER FROM DUAL;
|
||||||
|
|
||||||
|
PROMPT Database Info:
|
||||||
|
SELECT INSTANCE_NAME, VERSION, STATUS FROM V$INSTANCE;
|
||||||
|
|
||||||
|
PROMPT =========================================================================
|
||||||
|
PROMPT Installation Details:
|
||||||
|
PROMPT - Purpose: One-time export of historical C2D MPEC delta data
|
||||||
|
PROMPT - Source: OU_C2D schema tables (operational database)
|
||||||
|
PROMPT - Target: DATA bucket as CSV files
|
||||||
|
PROMPT - Tables: MPEC_ADMIN, MPEC_CONTENT, MPEC_CONTENT_CRITERION
|
||||||
|
PROMPT - Method: DATA_EXPORTER.EXPORT_TABLE_DATA_BY_DATE
|
||||||
|
PROMPT =========================================================================
|
||||||
|
|
||||||
|
SET SERVEROUTPUT ON SIZE 1000000
|
||||||
|
SET LINESIZE 200
|
||||||
|
SET PAGESIZE 1000
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT Step 1: Verify Prerequisites
|
||||||
|
PROMPT =========================================================================
|
||||||
|
|
||||||
|
-- Verify DATA_EXPORTER package is available
|
||||||
|
PROMPT Checking DATA_EXPORTER package availability...
|
||||||
|
SELECT 'DATA_EXPORTER v' || CT_MRDS.DATA_EXPORTER.PACKAGE_VERSION ||
|
||||||
|
' (Build: ' || CT_MRDS.DATA_EXPORTER.PACKAGE_BUILD_DATE || ')' AS PACKAGE_INFO
|
||||||
|
FROM DUAL;
|
||||||
|
|
||||||
|
-- Verify source tables exist in OU_C2D
|
||||||
|
PROMPT Checking source tables in OU_C2D schema...
|
||||||
|
SELECT table_name, num_rows
|
||||||
|
FROM all_tables
|
||||||
|
WHERE owner = 'OU_C2D'
|
||||||
|
AND table_name IN ('MPEC_ADMIN', 'MPEC_CONTENT', 'MPEC_CONTENT_CRITERION')
|
||||||
|
ORDER BY table_name;
|
||||||
|
|
||||||
|
-- Verify template tables exist in CT_ET_TEMPLATES
|
||||||
|
PROMPT Checking template tables in CT_ET_TEMPLATES schema...
|
||||||
|
SELECT table_name
|
||||||
|
FROM all_tables
|
||||||
|
WHERE owner = 'CT_ET_TEMPLATES'
|
||||||
|
AND table_name IN ('C2D_MPEC_ADMIN', 'C2D_MPEC_CONTENT', 'C2D_MPEC_CONTENT_CRITERION')
|
||||||
|
ORDER BY table_name;
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT Step 2: Execute Historical Data Export
|
||||||
|
PROMPT =========================================================================
|
||||||
|
@@01_MARS_956_export_c2d_mpec_data.sql
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT Step 3: Track Package Versions
|
||||||
|
PROMPT =========================================================================
|
||||||
|
@@track_package_versions.sql
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT Step 4: Verify Package Versions
|
||||||
|
PROMPT =========================================================================
|
||||||
|
@@verify_packages_version.sql
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT =========================================================================
|
||||||
|
PROMPT MARS-956 INSTALLATION SUMMARY
|
||||||
|
PROMPT =========================================================================
|
||||||
|
|
||||||
|
-- Display final summary
|
||||||
|
PROMPT Installation Completed:
|
||||||
|
SELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') AS INSTALL_END FROM DUAL;
|
||||||
|
|
||||||
|
PROMPT Export Results Summary:
|
||||||
|
SELECT COUNT(*) AS EXPORT_LOG_ENTRIES,
|
||||||
|
MIN(EVENT_TIMESTAMP) AS FIRST_EXPORT,
|
||||||
|
MAX(EVENT_TIMESTAMP) AS LAST_EXPORT
|
||||||
|
FROM CT_MRDS.A_PROCESS_LOG
|
||||||
|
WHERE PACKAGE_NAME = 'MARS-956'
|
||||||
|
AND EVENT_TIMESTAMP >= SYSDATE - 1; -- Last 24 hours
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT =========================================================================
|
||||||
|
PROMPT POST-INSTALLATION TASKS
|
||||||
|
PROMPT =========================================================================
|
||||||
|
PROMPT 1. Verify CSV files created in DATA bucket:
|
||||||
|
PROMPT - mrds_data_dev/ODS/C2D/C2D_MPEC_ADMIN/*.csv
|
||||||
|
PROMPT - mrds_data_dev/ODS/C2D/C2D_MPEC_CONTENT/*.csv
|
||||||
|
PROMPT - mrds_data_dev/ODS/C2D/C2D_MPEC_CONTENT_CRITERION/*.csv
|
||||||
|
PROMPT
|
||||||
|
PROMPT 2. Check file structure matches template tables
|
||||||
|
PROMPT 3. Validate row counts match source tables
|
||||||
|
PROMPT 4. Confirm data available for delta queries
|
||||||
|
PROMPT 5. Sync deployment timing with REL_02 deployment
|
||||||
|
PROMPT =========================================================================
|
||||||
|
|
||||||
|
spool off
|
||||||
|
quit;
|
||||||
85
MARS_Packages/REL02_POST/MARS-956/rollback_mars956.sql
Normal file
85
MARS_Packages/REL02_POST/MARS-956/rollback_mars956.sql
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
-- ===================================================================
|
||||||
|
-- MARS-956 ROLLBACK SCRIPT
|
||||||
|
-- ===================================================================
|
||||||
|
-- Purpose: Rollback/cleanup for MARS-956 C2D MPEC historical data export
|
||||||
|
-- Author: Grzegorz Michalski
|
||||||
|
-- Date: 2026-02-11
|
||||||
|
--
|
||||||
|
-- NOTE: This is primarily for cleanup of log entries and tracking data.
|
||||||
|
-- The exported CSV files would need to be manually removed from
|
||||||
|
-- the DATA bucket if rollback is required.
|
||||||
|
-- ===================================================================
|
||||||
|
|
||||||
|
-- Start logging
|
||||||
|
spool rollback_mars956.log
|
||||||
|
|
||||||
|
PROMPT =========================================================================
|
||||||
|
PROMPT MARS-956 ROLLBACK - Cleanup Historical C2D MPEC Export
|
||||||
|
PROMPT =========================================================================
|
||||||
|
PROMPT Rollback Start:
|
||||||
|
SELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') AS ROLLBACK_START FROM DUAL;
|
||||||
|
|
||||||
|
SET SERVEROUTPUT ON SIZE 1000000
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT Step 1: Review Export Activity
|
||||||
|
PROMPT =========================================================================
|
||||||
|
|
||||||
|
-- Show what was exported
|
||||||
|
PROMPT Recent MARS-956 export activity:
|
||||||
|
SELECT TO_CHAR(EVENT_TIMESTAMP, 'YYYY-MM-DD HH24:MI:SS') AS EXPORT_TIME,
|
||||||
|
PROCEDURE_NAME,
|
||||||
|
EVENT_TYPE,
|
||||||
|
EVENT_MESSAGE
|
||||||
|
FROM CT_MRDS.A_PROCESS_LOG
|
||||||
|
WHERE PACKAGE_NAME = 'MARS-956'
|
||||||
|
OR PROCEDURE_NAME LIKE '%MARS_956%'
|
||||||
|
ORDER BY EVENT_TIMESTAMP DESC;
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT Step 2: Cleanup Log Entries (Optional)
|
||||||
|
PROMPT =========================================================================
|
||||||
|
|
||||||
|
-- Optionally remove MARS-956 log entries (uncomment if needed)
|
||||||
|
/*
|
||||||
|
DELETE FROM CT_MRDS.A_PROCESS_LOG
|
||||||
|
WHERE PACKAGE_NAME = 'MARS-956'
|
||||||
|
OR PROCEDURE_NAME LIKE '%MARS_956%';
|
||||||
|
|
||||||
|
PROMPT Deleted log entries:
|
||||||
|
SELECT SQL%ROWCOUNT AS DELETED_ROWS FROM DUAL;
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
|
*/
|
||||||
|
|
||||||
|
PROMPT Log cleanup skipped (uncomment DELETE statement if cleanup needed)
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT Step 3: Manual Steps Required
|
||||||
|
PROMPT =========================================================================
|
||||||
|
|
||||||
|
PROMPT ⚠️ MANUAL CLEANUP REQUIRED:
|
||||||
|
PROMPT
|
||||||
|
PROMPT If complete rollback is needed, manually remove CSV files from DATA bucket:
|
||||||
|
PROMPT - mrds_data_dev/ODS/C2D/C2D_MPEC_ADMIN/*.csv
|
||||||
|
PROMPT - mrds_data_dev/ODS/C2D/C2D_MPEC_CONTENT/*.csv
|
||||||
|
PROMPT - mrds_data_dev/ODS/C2D/C2D_MPEC_CONTENT_CRITERION/*.csv
|
||||||
|
PROMPT
|
||||||
|
PROMPT Use OCI CLI or console to remove files:
|
||||||
|
PROMPT oci os object list --bucket-name mrds_data_dev --prefix "ODS/C2D/C2D_MPEC"
|
||||||
|
PROMPT oci os object delete --bucket-name mrds_data_dev --name "path/to/file.csv"
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT =========================================================================
|
||||||
|
PROMPT MARS-956 ROLLBACK SUMMARY
|
||||||
|
PROMPT =========================================================================
|
||||||
|
|
||||||
|
PROMPT Rollback Completed:
|
||||||
|
SELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') AS ROLLBACK_END FROM DUAL;
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT Note: This rollback script primarily cleans up log entries.
|
||||||
|
PROMPT Exported CSV files require manual removal from DATA bucket.
|
||||||
|
|
||||||
|
spool off
|
||||||
|
quit;
|
||||||
96
MARS_Packages/REL02_POST/MARS-956/track_package_versions.sql
Normal file
96
MARS_Packages/REL02_POST/MARS-956/track_package_versions.sql
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
-- ===================================================================
|
||||||
|
-- Simple Package Version Tracking Script
|
||||||
|
-- ===================================================================
|
||||||
|
-- Purpose: Track specified Oracle package versions for MARS-956
|
||||||
|
-- Author: Grzegorz Michalski
|
||||||
|
-- Date: 2026-02-11
|
||||||
|
-- Version: 3.1.0 - List-Based Edition
|
||||||
|
--
|
||||||
|
-- USAGE:
|
||||||
|
-- 1. Edit package list below (add/remove packages as needed)
|
||||||
|
-- 2. Include in your install/rollback script: @@track_package_versions.sql
|
||||||
|
-- ===================================================================
|
||||||
|
|
||||||
|
SET SERVEROUTPUT ON;
|
||||||
|
|
||||||
|
DECLARE
|
||||||
|
TYPE t_package_rec IS RECORD (
|
||||||
|
owner VARCHAR2(50),
|
||||||
|
package_name VARCHAR2(50),
|
||||||
|
version VARCHAR2(50)
|
||||||
|
);
|
||||||
|
TYPE t_packages IS TABLE OF t_package_rec;
|
||||||
|
TYPE t_string_array IS TABLE OF VARCHAR2(100);
|
||||||
|
|
||||||
|
-- ===================================================================
|
||||||
|
-- PACKAGE LIST - Edit this array to specify packages to track
|
||||||
|
-- ===================================================================
|
||||||
|
-- MARS-956: Historical C2D MPEC data export - using existing packages
|
||||||
|
-- No new packages created, tracking existing DATA_EXPORTER usage
|
||||||
|
-- ===================================================================
|
||||||
|
vPackageList t_string_array := t_string_array(
|
||||||
|
'CT_MRDS.DATA_EXPORTER'
|
||||||
|
);
|
||||||
|
-- ===================================================================
|
||||||
|
|
||||||
|
vPackages t_packages := t_packages();
|
||||||
|
vVersion VARCHAR2(50);
|
||||||
|
vCount NUMBER := 0;
|
||||||
|
vOwner VARCHAR2(50);
|
||||||
|
vPackageName VARCHAR2(50);
|
||||||
|
vDotPos NUMBER;
|
||||||
|
BEGIN
|
||||||
|
DBMS_OUTPUT.PUT_LINE('========================================');
|
||||||
|
DBMS_OUTPUT.PUT_LINE('MARS-956: Package Version Tracking');
|
||||||
|
DBMS_OUTPUT.PUT_LINE('========================================');
|
||||||
|
|
||||||
|
-- Process each package in the list
|
||||||
|
FOR i IN 1..vPackageList.COUNT LOOP
|
||||||
|
vDotPos := INSTR(vPackageList(i), '.');
|
||||||
|
IF vDotPos > 0 THEN
|
||||||
|
vOwner := SUBSTR(vPackageList(i), 1, vDotPos - 1);
|
||||||
|
vPackageName := SUBSTR(vPackageList(i), vDotPos + 1);
|
||||||
|
|
||||||
|
-- Get package version
|
||||||
|
BEGIN
|
||||||
|
EXECUTE IMMEDIATE 'SELECT ' || vOwner || '.' || vPackageName || '.GET_VERSION() FROM DUAL' INTO vVersion;
|
||||||
|
vPackages.EXTEND;
|
||||||
|
vPackages(vPackages.COUNT).owner := vOwner;
|
||||||
|
vPackages(vPackages.COUNT).package_name := vPackageName;
|
||||||
|
vPackages(vPackages.COUNT).version := vVersion;
|
||||||
|
|
||||||
|
-- Track in ENV_MANAGER
|
||||||
|
BEGIN
|
||||||
|
CT_MRDS.ENV_MANAGER.TRACK_PACKAGE_VERSION(
|
||||||
|
pPackageOwner => vOwner,
|
||||||
|
pPackageName => vPackageName,
|
||||||
|
pPackageVersion => vVersion,
|
||||||
|
pPackageBuildDate => TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS'),
|
||||||
|
pPackageAuthor => 'Grzegorz Michalski'
|
||||||
|
);
|
||||||
|
vCount := vCount + 1;
|
||||||
|
EXCEPTION
|
||||||
|
WHEN OTHERS THEN NULL; -- Continue even if tracking fails
|
||||||
|
END;
|
||||||
|
EXCEPTION
|
||||||
|
WHEN OTHERS THEN NULL; -- Skip packages that fail
|
||||||
|
END;
|
||||||
|
END IF;
|
||||||
|
END LOOP;
|
||||||
|
|
||||||
|
DBMS_OUTPUT.PUT_LINE('');
|
||||||
|
DBMS_OUTPUT.PUT_LINE('Summary:');
|
||||||
|
DBMS_OUTPUT.PUT_LINE('--------');
|
||||||
|
DBMS_OUTPUT.PUT_LINE('Packages tracked: ' || vCount || '/' || vPackageList.COUNT);
|
||||||
|
|
||||||
|
IF vPackages.COUNT > 0 THEN
|
||||||
|
DBMS_OUTPUT.PUT_LINE('');
|
||||||
|
DBMS_OUTPUT.PUT_LINE('Tracked Packages:');
|
||||||
|
FOR i IN 1..vPackages.COUNT LOOP
|
||||||
|
DBMS_OUTPUT.PUT_LINE(' ' || vPackages(i).owner || '.' || vPackages(i).package_name || ' v' || vPackages(i).version);
|
||||||
|
END LOOP;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
DBMS_OUTPUT.PUT_LINE('========================================');
|
||||||
|
END;
|
||||||
|
/
|
||||||
182
MARS_Packages/REL02_POST/MARS-956/validate_export.sql
Normal file
182
MARS_Packages/REL02_POST/MARS-956/validate_export.sql
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
-- ===================================================================
|
||||||
|
-- MARS-956 POST-EXPORT VALIDATION SCRIPT
|
||||||
|
-- ===================================================================
|
||||||
|
-- Purpose: Validate C2D MPEC historical data export results
|
||||||
|
-- Author: Grzegorz Michalski
|
||||||
|
-- Date: 2026-02-11
|
||||||
|
--
|
||||||
|
-- Run after MARS-956 installation to verify export success
|
||||||
|
-- ===================================================================
|
||||||
|
|
||||||
|
SET LINESIZE 200
|
||||||
|
SET PAGESIZE 1000
|
||||||
|
SET SERVEROUTPUT ON SIZE 1000000
|
||||||
|
|
||||||
|
PROMPT =========================================================================
|
||||||
|
PROMPT MARS-956 POST-EXPORT VALIDATION
|
||||||
|
PROMPT =========================================================================
|
||||||
|
PROMPT Validation Start:
|
||||||
|
SELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') AS VALIDATION_START FROM DUAL;
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT 1. Export Process Log Review
|
||||||
|
PROMPT =========================================================================
|
||||||
|
|
||||||
|
-- Check export completion status
|
||||||
|
PROMPT Recent MARS-956 export activity:
|
||||||
|
SELECT TO_CHAR(EVENT_TIMESTAMP, 'YYYY-MM-DD HH24:MI:SS') AS EVENT_TIME,
|
||||||
|
PROCEDURE_NAME,
|
||||||
|
EVENT_TYPE,
|
||||||
|
SUBSTR(EVENT_MESSAGE, 1, 80) AS MESSAGE_PREVIEW
|
||||||
|
FROM CT_MRDS.A_PROCESS_LOG
|
||||||
|
WHERE PACKAGE_NAME = 'MARS-956'
|
||||||
|
OR PROCEDURE_NAME LIKE '%MARS_956%'
|
||||||
|
OR PROCEDURE_NAME LIKE '%DATA_EXPORTER%'
|
||||||
|
ORDER BY EVENT_TIMESTAMP DESC
|
||||||
|
FETCH FIRST 20 ROWS ONLY;
|
||||||
|
|
||||||
|
-- Check for any errors
|
||||||
|
PROMPT Export errors (if any):
|
||||||
|
SELECT TO_CHAR(EVENT_TIMESTAMP, 'YYYY-MM-DD HH24:MI:SS') AS ERROR_TIME,
|
||||||
|
PROCEDURE_NAME,
|
||||||
|
EVENT_MESSAGE
|
||||||
|
FROM CT_MRDS.A_PROCESS_LOG
|
||||||
|
WHERE (PACKAGE_NAME = 'MARS-956' OR PROCEDURE_NAME LIKE '%MARS_956%')
|
||||||
|
AND EVENT_TYPE = 'ERROR'
|
||||||
|
AND EVENT_TIMESTAMP >= SYSDATE - 1; -- Last 24 hours
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT 2. Source Table Row Counts
|
||||||
|
PROMPT =========================================================================
|
||||||
|
|
||||||
|
-- Get source table counts for comparison
|
||||||
|
PROMPT Source table row counts (OU_C2D):
|
||||||
|
SELECT 'OU_C2D' AS SCHEMA_NAME,
|
||||||
|
table_name,
|
||||||
|
num_rows,
|
||||||
|
TO_CHAR(last_analyzed, 'YYYY-MM-DD HH24:MI:SS') AS STATS_DATE
|
||||||
|
FROM all_tables
|
||||||
|
WHERE owner = 'OU_C2D'
|
||||||
|
AND table_name IN ('MPEC_ADMIN', 'MPEC_CONTENT', 'MPEC_CONTENT_CRITERION')
|
||||||
|
ORDER BY table_name;
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT 3. Template Table Structure Verification
|
||||||
|
PROMPT =========================================================================
|
||||||
|
|
||||||
|
-- Verify template tables exist and have proper structure
|
||||||
|
PROMPT Template tables in CT_ET_TEMPLATES:
|
||||||
|
SELECT table_name,
|
||||||
|
num_rows,
|
||||||
|
TO_CHAR(last_analyzed, 'YYYY-MM-DD HH24:MI:SS') AS STATS_DATE
|
||||||
|
FROM all_tables
|
||||||
|
WHERE owner = 'CT_ET_TEMPLATES'
|
||||||
|
AND table_name IN ('C2D_MPEC_ADMIN', 'C2D_MPEC_CONTENT', 'C2D_MPEC_CONTENT_CRITERION')
|
||||||
|
ORDER BY table_name;
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT Template table column counts:
|
||||||
|
SELECT owner, table_name, COUNT(*) AS COLUMN_COUNT
|
||||||
|
FROM all_tab_columns
|
||||||
|
WHERE owner IN ('OU_C2D', 'CT_ET_TEMPLATES')
|
||||||
|
AND ((owner = 'OU_C2D' AND table_name IN ('MPEC_ADMIN', 'MPEC_CONTENT', 'MPEC_CONTENT_CRITERION'))
|
||||||
|
OR (owner = 'CT_ET_TEMPLATES' AND table_name IN ('C2D_MPEC_ADMIN', 'C2D_MPEC_CONTENT', 'C2D_MPEC_CONTENT_CRITERION')))
|
||||||
|
GROUP BY owner, table_name
|
||||||
|
ORDER BY table_name, owner;
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT 4. File Registration Validation
|
||||||
|
PROMPT =========================================================================
|
||||||
|
|
||||||
|
-- Check if exported files were registered in A_SOURCE_FILE_RECEIVED
|
||||||
|
PROMPT Registered export files (last 24 hours):
|
||||||
|
SELECT A_SOURCE_FILE_RECEIVED_KEY,
|
||||||
|
A_SOURCE_FILE_CONFIG_KEY,
|
||||||
|
SOURCE_FILE_NAME,
|
||||||
|
ROUND(BYTES/1024, 2) AS SIZE_KB,
|
||||||
|
PROCESSING_STATUS,
|
||||||
|
TO_CHAR(RECEPTION_DATE, 'YYYY-MM-DD HH24:MI:SS') AS REGISTERED_TIME
|
||||||
|
FROM CT_MRDS.A_SOURCE_FILE_RECEIVED
|
||||||
|
WHERE RECEPTION_DATE >= SYSDATE - 1 -- Last 24 hours
|
||||||
|
AND (SOURCE_FILE_NAME LIKE '%MPEC_%' OR A_SOURCE_FILE_CONFIG_KEY IN (
|
||||||
|
SELECT A_SOURCE_FILE_CONFIG_KEY
|
||||||
|
FROM CT_MRDS.A_SOURCE_FILE_CONFIG
|
||||||
|
WHERE A_SOURCE_KEY = 'C2D' AND TABLE_ID LIKE '%MPEC%'
|
||||||
|
))
|
||||||
|
ORDER BY RECEPTION_DATE DESC;
|
||||||
|
|
||||||
|
-- Count registered files per config key
|
||||||
|
PROMPT File registration summary:
|
||||||
|
SELECT
|
||||||
|
CASE WHEN A_SOURCE_FILE_CONFIG_KEY = -1 THEN 'Default (no config)'
|
||||||
|
ELSE 'Config Key: ' || A_SOURCE_FILE_CONFIG_KEY
|
||||||
|
END AS CONFIG_INFO,
|
||||||
|
COUNT(*) AS REGISTERED_FILES
|
||||||
|
FROM CT_MRDS.A_SOURCE_FILE_RECEIVED
|
||||||
|
WHERE RECEPTION_DATE >= SYSDATE - 1 -- Last 24 hours
|
||||||
|
AND (SOURCE_FILE_NAME LIKE '%MPEC_%' OR A_SOURCE_FILE_CONFIG_KEY IN (
|
||||||
|
SELECT A_SOURCE_FILE_CONFIG_KEY
|
||||||
|
FROM CT_MRDS.A_SOURCE_FILE_CONFIG
|
||||||
|
WHERE A_SOURCE_KEY = 'C2D' AND TABLE_ID LIKE '%MPEC%'
|
||||||
|
))
|
||||||
|
GROUP BY A_SOURCE_FILE_CONFIG_KEY
|
||||||
|
ORDER BY A_SOURCE_FILE_CONFIG_KEY;
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT 5. Export File Validation Commands
|
||||||
|
PROMPT =========================================================================
|
||||||
|
|
||||||
|
PROMPT To validate exported CSV files, use these OCI CLI commands:
|
||||||
|
PROMPT
|
||||||
|
PROMPT # List exported files
|
||||||
|
PROMPT oci os object list --bucket-name mrds_data_dev --prefix "DATA/C2D/C2D_MPEC"
|
||||||
|
PROMPT
|
||||||
|
PROMPT # Check file sizes
|
||||||
|
PROMPT oci os object list --bucket-name mrds_data_dev --prefix "DATA/C2D/C2D_MPEC_ADMIN"
|
||||||
|
PROMPT oci os object list --bucket-name mrds_data_dev --prefix "DATA/C2D/C2D_MPEC_CONTENT"
|
||||||
|
PROMPT oci os object list --bucket-name mrds_data_dev --prefix "DATA/C2D/C2D_MPEC_CONTENT_CRITERION"
|
||||||
|
PROMPT
|
||||||
|
PROMPT # Download sample file for validation
|
||||||
|
PROMPT oci os object get --bucket-name mrds_data_dev --name "DATA/C2D/C2D_MPEC_ADMIN/filename.csv" --file sample.csv
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT 6. Data Quality Checks (Manual)
|
||||||
|
PROMPT =========================================================================
|
||||||
|
|
||||||
|
PROMPT Manual verification steps:
|
||||||
|
PROMPT 1. Download sample CSV files from each folder
|
||||||
|
PROMPT 2. Verify CSV header matches template table columns
|
||||||
|
PROMPT 3. Check data formats (especially dates) match expectations
|
||||||
|
PROMPT 4. Confirm row counts approximately match source tables
|
||||||
|
PROMPT 5. Validate no empty files were created
|
||||||
|
PROMPT 6. Test loading sample data into external tables
|
||||||
|
PROMPT 7. Verify file registration entries in A_SOURCE_FILE_RECEIVED
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT 7. Next Steps for ODS Integration
|
||||||
|
PROMPT =========================================================================
|
||||||
|
|
||||||
|
PROMPT After validation success:
|
||||||
|
PROMPT 1. Configure external tables pointing to CSV files
|
||||||
|
PROMPT 2. Test external table queries
|
||||||
|
PROMPT 3. Setup scheduled data refresh processes (if needed)
|
||||||
|
PROMPT 4. Document file locations and access patterns
|
||||||
|
PROMPT 5. Coordinate with REL_02 deployment timing
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT =========================================================================
|
||||||
|
PROMPT VALIDATION COMPLETED
|
||||||
|
PROMPT =========================================================================
|
||||||
|
PROMPT Validation End:
|
||||||
|
SELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS') AS VALIDATION_END FROM DUAL;
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT Review the output above to confirm:
|
||||||
|
PROMPT ✓ Export processes completed without errors
|
||||||
|
PROMPT ✓ Source table row counts are reasonable
|
||||||
|
PROMPT ✓ Template tables exist and have matching structure
|
||||||
|
PROMPT ✓ Exported files registered in A_SOURCE_FILE_RECEIVED table
|
||||||
|
PROMPT ✓ Manual file validation steps are understood
|
||||||
|
PROMPT
|
||||||
|
PROMPT If any issues found, check export logs and re-run specific exports if needed.
|
||||||
|
PROMPT =========================================================================
|
||||||
@@ -0,0 +1,62 @@
|
|||||||
|
-- ===================================================================
|
||||||
|
-- Universal Package Version Verification Script
|
||||||
|
-- ===================================================================
|
||||||
|
-- Purpose: Verify all tracked Oracle packages for code changes (MARS-956)
|
||||||
|
-- Author: Grzegorz Michalski
|
||||||
|
-- Date: 2026-02-11
|
||||||
|
-- Version: 1.0.0
|
||||||
|
--
|
||||||
|
-- USAGE:
|
||||||
|
-- Include at the end of install/rollback scripts: @@verify_packages_version.sql
|
||||||
|
--
|
||||||
|
-- OUTPUT:
|
||||||
|
-- - List of all tracked packages with their current status
|
||||||
|
-- - OK: Package has not changed since last tracking
|
||||||
|
-- - WARNING: Package code changed without version update
|
||||||
|
-- ===================================================================
|
||||||
|
|
||||||
|
SET LINESIZE 200
|
||||||
|
SET PAGESIZE 1000
|
||||||
|
SET FEEDBACK OFF
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT ========================================
|
||||||
|
PROMPT MARS-956: Package Version Verification
|
||||||
|
PROMPT ========================================
|
||||||
|
PROMPT
|
||||||
|
|
||||||
|
COLUMN PACKAGE_OWNER FORMAT A15
|
||||||
|
COLUMN PACKAGE_NAME FORMAT A20
|
||||||
|
COLUMN VERSION FORMAT A10
|
||||||
|
COLUMN STATUS FORMAT A80
|
||||||
|
|
||||||
|
SELECT
|
||||||
|
PACKAGE_OWNER,
|
||||||
|
PACKAGE_NAME,
|
||||||
|
PACKAGE_VERSION AS VERSION,
|
||||||
|
CT_MRDS.ENV_MANAGER.CHECK_PACKAGE_CHANGES(PACKAGE_OWNER, PACKAGE_NAME) AS STATUS
|
||||||
|
FROM (
|
||||||
|
SELECT
|
||||||
|
PACKAGE_OWNER,
|
||||||
|
PACKAGE_NAME,
|
||||||
|
PACKAGE_VERSION,
|
||||||
|
ROW_NUMBER() OVER (PARTITION BY PACKAGE_OWNER, PACKAGE_NAME ORDER BY TRACKING_DATE DESC) AS RN
|
||||||
|
FROM CT_MRDS.A_PACKAGE_VERSION_TRACKING
|
||||||
|
)
|
||||||
|
WHERE RN = 1
|
||||||
|
ORDER BY PACKAGE_OWNER, PACKAGE_NAME;
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
PROMPT ========================================
|
||||||
|
PROMPT MARS-956: Verification Complete
|
||||||
|
PROMPT ========================================
|
||||||
|
PROMPT
|
||||||
|
PROMPT Legend:
|
||||||
|
PROMPT OK - Package has not changed since last tracking
|
||||||
|
PROMPT WARNING - Package code changed without version update
|
||||||
|
PROMPT
|
||||||
|
PROMPT For detailed hash information, use:
|
||||||
|
PROMPT SELECT ENV_MANAGER.GET_PACKAGE_HASH_INFO('OWNER', 'PACKAGE') FROM DUAL;
|
||||||
|
PROMPT ========================================
|
||||||
|
|
||||||
|
SET FEEDBACK ON
|
||||||
205
confluence/DATA_EXPORTER_pMaxFileSize_Analysis.md
Normal file
205
confluence/DATA_EXPORTER_pMaxFileSize_Analysis.md
Normal file
@@ -0,0 +1,205 @@
|
|||||||
|
# Analiza parametru pMaxFileSize dla DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE
|
||||||
|
|
||||||
|
## Pytanie
|
||||||
|
Jaki maksymalnie można ustawić parametr `pMaxFileSize` przy `pParallelDegree => 1` w procedurze `CT_MRDS.DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE`?
|
||||||
|
|
||||||
|
## Odpowiedź
|
||||||
|
|
||||||
|
### Limity Oracle DBMS_CLOUD.EXPORT_DATA
|
||||||
|
|
||||||
|
**UWAGA**: Komentarz w kodzie źródłowym (`DATA_EXPORTER.pkb`, linia 444-449) zawiera **BŁĘDNĄ** informację o maksymalnym limicie 1GB.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Oracle maxfilesize: min 10MB (10485760), max 1GB (1073741824), default 10MB -- NIEPRAWIDŁOWE!
|
||||||
|
-- NOTE: maxfilesize must be NUMBER (bytes), not string like '1000M'
|
||||||
|
-- Using 100MB (104857600) to avoid PGA memory issues with large files
|
||||||
|
```
|
||||||
|
|
||||||
|
**Rzeczywiste limity Oracle** (zweryfikowane testami 2026-02-05):
|
||||||
|
- **Minimum**: 10 MB (10,485,760 bajtów)
|
||||||
|
- **Maksimum**: **4,294,967,295 bajtów** (4 GB - 1 bajt) = **2^32 - 1** (unsigned 32-bit integer max)
|
||||||
|
- **Wartość = 4GB** (4,294,967,296): ORA-01426 numeric overflow
|
||||||
|
|
||||||
|
### Test Environment
|
||||||
|
|
||||||
|
- **Oracle Database**: Oracle AI Database 26ai Enterprise Edition Release 23.26.1.1.0
|
||||||
|
- **PGA Configuration**:
|
||||||
|
- pga_aggregate_target: 3000M (3GB)
|
||||||
|
- pga_aggregate_limit: 6000M (6GB) - hard limit
|
||||||
|
- **Test Dataset**: OU_CSDB.LEGACY_DEBT table (1,014,078 rows)
|
||||||
|
- **OCI Object Storage**: eu-frankfurt-1 region
|
||||||
|
|
||||||
|
### Wartości graniczne
|
||||||
|
|
||||||
|
| Parametr | Wartość (bajty) | Wartość (MB/GB) | Status |
|
||||||
|
|----------|----------------|-----------------|---------|
|
||||||
|
| **Minimum** | 10,485,760 | 10 MB | Oracle minimum |
|
||||||
|
| **Aktualny standard** | 104,857,600 | 100 MB | Używane w kodzie |
|
||||||
|
| **Zalecane bezpieczne (parallel)** | 524,288,000 | 500 MB | Dla pParallelDegree > 1 |
|
||||||
|
| **Zalecane bezpieczne (sequential)** | 1,073,741,824 | 1 GB | Dla pParallelDegree = 1 |
|
||||||
|
| **Maksimum Oracle** | **4,294,967,295** | **~4 GB** | **2^32 - 1 (unsigned 32-bit max)** |
|
||||||
|
| **Błąd numeric overflow** | 4,294,967,296 | 4 GB | ORA-01426 (dokładnie 2^32) |
|
||||||
|
|
||||||
|
### Odpowiedź na pytanie
|
||||||
|
|
||||||
|
**Maksymalna wartość pMaxFileSize przy pParallelDegree => 1:**
|
||||||
|
```sql
|
||||||
|
pMaxFileSize => 1073741824 -- 1 GB (1,073,741,824 bajtów)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Przykład użycia z maksymalną wartością
|
||||||
|
|
||||||
|
```sql
|
||||||
|
BEGIN
|
||||||
|
CT_MRDS.DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE(
|
||||||
|
pSchemaName => 'OU_CSDB',
|
||||||
|
pTableName => 'LEGACY_DEBT',
|
||||||
|
pKeyColumnName => 'A_ETL_LOAD_SET_FK',
|
||||||
|
pBucketArea => 'DATA',
|
||||||
|
pFolderName => 'ODS/CSDB/CSDB_DEBT',
|
||||||
|
pMinDate => DATE '2024-01-01',
|
||||||
|
pMaxDate => SYSDATE,
|
||||||
|
pParallelDegree => 1, -- Sequential processing
|
||||||
|
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT',
|
||||||
|
pMaxFileSize => 1073741824 -- 1GB - MAXIMUM możliwa wartość
|
||||||
|
);
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
```
|
||||||
|
|
||||||
|
## Rekomendacje
|
||||||
|
|
||||||
|
### Dla pParallelDegree => 1 (przetwarzanie sekwencyjne)
|
||||||
|
|
||||||
|
1. **Maksimum bezwzględne**: 1 GB (1,073,741,824 bajtów)
|
||||||
|
- To jest twardy limit Oracle DBMS_CLOUD.EXPORT_DATA
|
||||||
|
- Wartości powyżej 1GB spowodują błąd Oracle
|
||||||
|
|
||||||
|
2. **Zalecane wartości w zależności od scenariusza**:
|
||||||
|
- **Małe tabele** (< 1M rekordów): 100 MB (104,857,600) - aktualny standard
|
||||||
|
- **Średnie tabele** (1-10M rekordów): 250 MB (262,144,000)
|
||||||
|
- **Duże tabele** (> 10M rekordów): 500 MB (524,288,000)
|
||||||
|
- **Bardzo duże tabele** z dużą ilością RAM: 1 GB (1,073,741,824)
|
||||||
|
|
||||||
|
3. **Kwestie pamięciowe (PGA)**:
|
||||||
|
- W komentarzu kodu widoczne jest ostrzeżenie: "avoid PGA memory issues"
|
||||||
|
- Przy `pParallelDegree => 1` (brak równoległości), ryzyko błędu ORA-04036 jest niższe
|
||||||
|
- Można bezpiecznie używać wyższych wartości niż przy przetwarzaniu równoległym
|
||||||
|
|
||||||
|
### Dla pParallelDegree > 1 (przetwarzanie równoległe)
|
||||||
|
|
||||||
|
- **Maksimum techniczne**: nadal 1 GB
|
||||||
|
- **Zalecane**: 100-200 MB z powodu pamięci PGA
|
||||||
|
- **Aktualny standard**: 100 MB (bezpieczna wartość)
|
||||||
|
|
||||||
|
## Różnice względem przetwarzania równoległego
|
||||||
|
|
||||||
|
| Aspekt | pParallelDegree = 1 | pParallelDegree > 1 |
|
||||||
|
|--------|---------------------|---------------------|
|
||||||
|
| **Maksimum Oracle** | 1 GB | 1 GB |
|
||||||
|
| **Zalecane max** | 500 MB - 1 GB | 100-200 MB |
|
||||||
|
| **Ryzyko ORA-04036** | Niskie | Wysokie (wiele procesów) |
|
||||||
|
| **Użycie PGA** | Pojedynczy proces | Wielokrotne procesy |
|
||||||
|
|
||||||
|
## Skrypty testowe
|
||||||
|
|
||||||
|
Przygotowano dwa skrypty testowe:
|
||||||
|
|
||||||
|
### 1. Pełny test (7 scenariuszy)
|
||||||
|
**Plik**: `MARS_Packages/tests/test_maxfilesize_parameter.sql`
|
||||||
|
- Test z wartościami: 10MB, 100MB, 250MB, 500MB, 750MB, 1GB, 1.5GB
|
||||||
|
- Automatyczne czyszczenie testowych plików
|
||||||
|
- Szczegółowe logowanie wyników
|
||||||
|
|
||||||
|
### 2. Szybki test (maksimum)
|
||||||
|
**Plik**: `MARS_Packages/tests/test_maxfilesize_quick.sql`
|
||||||
|
- Jednorazowy test z maksymalną wartością 1GB
|
||||||
|
- Szybka weryfikacja poprawności konfiguracji
|
||||||
|
|
||||||
|
### Uruchomienie testów
|
||||||
|
|
||||||
|
```powershell
|
||||||
|
# Pełny test wszystkich wartości
|
||||||
|
Get-Content "MARS_Packages\tests\test_maxfilesize_parameter.sql" | sql "CT_MRDS/Cloudpass#34@ggmichalski_high"
|
||||||
|
|
||||||
|
# Szybki test maksymalnej wartości
|
||||||
|
Get-Content "MARS_Packages\tests\test_maxfilesize_quick.sql" | sql "CT_MRDS/Cloudpass#34@ggmichalski_high"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Wnioski (zaktualizowane 2026-02-05)
|
||||||
|
|
||||||
|
1. **Maksymalna wartość Oracle** dla `pMaxFileSize` wynosi **4,294,967,295 bajtów** (4 GB - 1 bajt)
|
||||||
|
- To jest **2^32 - 1** = maksymalna wartość unsigned 32-bit integer
|
||||||
|
- Wartość >= 4,294,967,296 (dokładnie 4GB) powoduje ORA-01426 numeric overflow
|
||||||
|
- **Komentarz w kodzie źródłowym o limicie 1GB jest NIEPRAWIDŁOWY**
|
||||||
|
|
||||||
|
2. **Zalecana wartość dla pParallelDegree = 1** (sequential): **1-2 GB**
|
||||||
|
- Niskie ry~4GB można używać dla bardzo dużych tabel (teoretycznie)cyjnym
|
||||||
|
- Maksimum 3GB można używać dla bardzo dużych tabel
|
||||||
|
|
||||||
|
3. **Zalecana wartość dla pParallelDegree > 1** (parallel): **100-500 MB**
|
||||||
|
- Wysokie ryzyko ORA-04036 przy wielu wątkach i dużych wartościach
|
||||||
|
- Aktualny standard 100MB jest bezpieczny ale konserwatywny
|
||||||
|
|
||||||
|
4. **Weryfikacja testowa** (2026-02-05):
|
||||||
|
- 1GB (1,073,741,824): ✅ DZIAŁA
|
||||||
|
- 2GB (2,147,483,648): ✅ DZIAŁA
|
||||||
|
- 3GB (3,221,225,472): ✅ DZIAŁA
|
||||||
|
- 3.5GB (3,758,096,384): ✅ DZIAŁA
|
||||||
|
- 3.9GB (4,187,593,113): ✅ DZIAŁA
|
||||||
|
- 3.99GB (4,284,481,126): ✅ DZIAŁA
|
||||||
|
- **4GB - 1 bajt (4,294,967,295)**: ✅ DZIAŁA - **MAKSIMUM ORACLE** (2^32 - 1)
|
||||||
|
- **4GB (4,294,967,296)**: ❌ ORA-01426 numeric overflow (2^32)
|
||||||
|
- 5GB+: ❌ ORA-01426 numeric overflow
|
||||||
|
|
||||||
|
5. **Test z pojedynczym wątkiem** (2026-02-05):
|
||||||
|
- pMaxFileSize = 4,294,967,295, pParallelDegree = 1, 60 partycji
|
||||||
|
- Result: ❌ ORA-04036 (PGA memory exceeded - limit 6GB)
|
||||||
|
- Wniosek: Parametr jest akceptowany, ale sekwencyjne przetwarzanie wielu partycji przekracza limit PGA
|
||||||
|
- **Dla 60+ partycji używaj pParallelDegree=4 zamiast 1**
|
||||||
|
|
||||||
|
## Kod źródłowy - fragment kluczowy
|
||||||
|
|
||||||
|
Z pliku `DATA_EXPORTER.pkb`, procedura `EXPORT_SINGLE_PARTITION`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Use json_object() for CSV export with maxfilesize in bytes (Oracle requirement)
|
||||||
|
-- Oracle maxfilesize: min 10MB (10485760), max 1GB (1073741824), default 10MB
|
||||||
|
-- NOTE: maxfilesize must be NUMBER (bytes), not string like '1000M'
|
||||||
|
-- Using 100MB (104857600) to avoid PGA memory issues with large files
|
||||||
|
DBMS_CLOUD.EXPORT_DATA(
|
||||||
|
credential_name => pCredentialName,
|
||||||
|
file_uri_list => vUri,
|
||||||
|
query => vQuery,
|
||||||
|
format => json_object(
|
||||||
|
'type' VALUE 'CSV',
|
||||||
|
'header' VALUE true,
|
||||||
|
'quote' VALUE CHR(34),
|
||||||
|
'delimiter' VALUE ',',
|
||||||
|
'escape' VALUE true,
|
||||||
|
'recorddelimiter' VALUE CHR(13)||CHR(10),
|
||||||
|
'maxfilesize' VALUE pMaxFileSize -- Dynamic maxfilesize in bytes
|
||||||
|
)
|
||||||
|
);
|
||||||
|
```
|
||||||
|
(1,073,741,824): ✅ Sukces
|
||||||
|
- Test 2GB (2,147,483,648): ✅ Sukces - komentarz w kodzie BŁĘDNY
|
||||||
|
- Test 3GB (3,221,225,472): ✅ Sukces
|
||||||
|
- Test 3.5GB (3,758,096,384): ✅ Sukces
|
||||||
|
- Test 3.75GB (4,026,531,840): ✅ Sukces
|
||||||
|
- Test 3.9GB (4,187,593,113): ✅ Sukces
|
||||||
|
- Test 3.99GB (4,284,481,126): ✅ Sukces
|
||||||
|
- Test **4GB - 1 bajt (4,294,967,295)**: ✅ Sukces - **MAKSIMUM = 2^32 - 1**
|
||||||
|
- Test 4GB (4,294,967,296): ❌ ORA-01426 numeric overflow (dokładnie 2^32)
|
||||||
|
- Test 5GB+: ❌ ORA-01426 numeric overflow
|
||||||
|
- Test **4GB-1 + parallel=1 + 60 partycji**: ❌ ORA-04036 PGA memory exceeded
|
||||||
|
|
||||||
|
**Konkluzja**: Komentarz w `DATA_EXPORTER.pkb` o maksymalnym limicie 1GB był nieprawidłowy. Rzeczywiste maksimum Oracle DBMS_CLOUD.EXPORT_DATA to **4,294,967,295 bajtów** (4 GB - 1 bajt), co odpowiada maksymalnej wartości **unsigned 32-bit integer (2^32 - 1)**. Jednakże, **w środowisku Autonomous Database z limitami PGA** (pga_aggregate_limit=6GB), sekwencyjne przetwarzanie dużej liczby partycji (60+) może przekroczyć limit pamięci nawet z poprawną wartością parametru
|
||||||
|
- Test 3GB: ✅ Sukces - rzeczywiste maksimum Oracle
|
||||||
|
- Test 4GB: ❌ ORA-01426 numeric overflow
|
||||||
|
- Test 5GB+: ❌ ORA-01426 numeric overflow
|
||||||
|
|
||||||
|
**Konkluzja**: Komentarz w `DATA_EXPORTER.pkb` o maksymalnym limicie 1GB był nieprawidłowy. Rzeczywiste maksimum Oracle DBMS_CLOUD.EXPORT_DATA to **3 GB**.
|
||||||
|
|
||||||
|
### 2026-02-04 - Analiza początkowa
|
||||||
|
Pierwsza wersja dokumentacji oparta na komentarzach w kodzie źródłowym.
|
||||||
205
confluence/additions/DATA_EXPORTER_pMaxFileSize_Analysis.md
Normal file
205
confluence/additions/DATA_EXPORTER_pMaxFileSize_Analysis.md
Normal file
@@ -0,0 +1,205 @@
|
|||||||
|
# Analiza parametru pMaxFileSize dla DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE
|
||||||
|
|
||||||
|
## Pytanie
|
||||||
|
Jaki maksymalnie można ustawić parametr `pMaxFileSize` przy `pParallelDegree => 1` w procedurze `CT_MRDS.DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE`?
|
||||||
|
|
||||||
|
## Odpowiedź
|
||||||
|
|
||||||
|
### Limity Oracle DBMS_CLOUD.EXPORT_DATA
|
||||||
|
|
||||||
|
**UWAGA**: Komentarz w kodzie źródłowym (`DATA_EXPORTER.pkb`, linia 444-449) zawiera **BŁĘDNĄ** informację o maksymalnym limicie 1GB.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Oracle maxfilesize: min 10MB (10485760), max 1GB (1073741824), default 10MB -- NIEPRAWIDŁOWE!
|
||||||
|
-- NOTE: maxfilesize must be NUMBER (bytes), not string like '1000M'
|
||||||
|
-- Using 100MB (104857600) to avoid PGA memory issues with large files
|
||||||
|
```
|
||||||
|
|
||||||
|
**Rzeczywiste limity Oracle** (zweryfikowane testami 2026-02-05):
|
||||||
|
- **Minimum**: 10 MB (10,485,760 bajtów)
|
||||||
|
- **Maksimum**: **4,294,967,295 bajtów** (4 GB - 1 bajt) = **2^32 - 1** (unsigned 32-bit integer max)
|
||||||
|
- **Wartość = 4GB** (4,294,967,296): ORA-01426 numeric overflow
|
||||||
|
|
||||||
|
### Test Environment
|
||||||
|
|
||||||
|
- **Oracle Database**: Oracle AI Database 26ai Enterprise Edition Release 23.26.1.1.0
|
||||||
|
- **PGA Configuration**:
|
||||||
|
- pga_aggregate_target: 3000M (3GB)
|
||||||
|
- pga_aggregate_limit: 6000M (6GB) - hard limit
|
||||||
|
- **Test Dataset**: OU_CSDB.LEGACY_DEBT table (1,014,078 rows)
|
||||||
|
- **OCI Object Storage**: eu-frankfurt-1 region
|
||||||
|
|
||||||
|
### Wartości graniczne
|
||||||
|
|
||||||
|
| Parametr | Wartość (bajty) | Wartość (MB/GB) | Status |
|
||||||
|
|----------|----------------|-----------------|---------|
|
||||||
|
| **Minimum** | 10,485,760 | 10 MB | Oracle minimum |
|
||||||
|
| **Aktualny standard** | 104,857,600 | 100 MB | Używane w kodzie |
|
||||||
|
| **Zalecane bezpieczne (parallel)** | 524,288,000 | 500 MB | Dla pParallelDegree > 1 |
|
||||||
|
| **Zalecane bezpieczne (sequential)** | 1,073,741,824 | 1 GB | Dla pParallelDegree = 1 |
|
||||||
|
| **Maksimum Oracle** | **4,294,967,295** | **~4 GB** | **2^32 - 1 (unsigned 32-bit max)** |
|
||||||
|
| **Błąd numeric overflow** | 4,294,967,296 | 4 GB | ORA-01426 (dokładnie 2^32) |
|
||||||
|
|
||||||
|
### Odpowiedź na pytanie
|
||||||
|
|
||||||
|
**Maksymalna wartość pMaxFileSize przy pParallelDegree => 1:**
|
||||||
|
```sql
|
||||||
|
pMaxFileSize => 1073741824 -- 1 GB (1,073,741,824 bajtów)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Przykład użycia z maksymalną wartością
|
||||||
|
|
||||||
|
```sql
|
||||||
|
BEGIN
|
||||||
|
CT_MRDS.DATA_EXPORTER.EXPORT_TABLE_DATA_TO_CSV_BY_DATE(
|
||||||
|
pSchemaName => 'OU_CSDB',
|
||||||
|
pTableName => 'LEGACY_DEBT',
|
||||||
|
pKeyColumnName => 'A_ETL_LOAD_SET_FK',
|
||||||
|
pBucketArea => 'DATA',
|
||||||
|
pFolderName => 'ODS/CSDB/CSDB_DEBT',
|
||||||
|
pMinDate => DATE '2024-01-01',
|
||||||
|
pMaxDate => SYSDATE,
|
||||||
|
pParallelDegree => 1, -- Sequential processing
|
||||||
|
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT',
|
||||||
|
pMaxFileSize => 1073741824 -- 1GB - MAXIMUM możliwa wartość
|
||||||
|
);
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
```
|
||||||
|
|
||||||
|
## Rekomendacje
|
||||||
|
|
||||||
|
### Dla pParallelDegree => 1 (przetwarzanie sekwencyjne)
|
||||||
|
|
||||||
|
1. **Maksimum bezwzględne**: 1 GB (1,073,741,824 bajtów)
|
||||||
|
- To jest twardy limit Oracle DBMS_CLOUD.EXPORT_DATA
|
||||||
|
- Wartości powyżej 1GB spowodują błąd Oracle
|
||||||
|
|
||||||
|
2. **Zalecane wartości w zależności od scenariusza**:
|
||||||
|
- **Małe tabele** (< 1M rekordów): 100 MB (104,857,600) - aktualny standard
|
||||||
|
- **Średnie tabele** (1-10M rekordów): 250 MB (262,144,000)
|
||||||
|
- **Duże tabele** (> 10M rekordów): 500 MB (524,288,000)
|
||||||
|
- **Bardzo duże tabele** z dużą ilością RAM: 1 GB (1,073,741,824)
|
||||||
|
|
||||||
|
3. **Kwestie pamięciowe (PGA)**:
|
||||||
|
- W komentarzu kodu widoczne jest ostrzeżenie: "avoid PGA memory issues"
|
||||||
|
- Przy `pParallelDegree => 1` (brak równoległości), ryzyko błędu ORA-04036 jest niższe
|
||||||
|
- Można bezpiecznie używać wyższych wartości niż przy przetwarzaniu równoległym
|
||||||
|
|
||||||
|
### Dla pParallelDegree > 1 (przetwarzanie równoległe)
|
||||||
|
|
||||||
|
- **Maksimum techniczne**: nadal 1 GB
|
||||||
|
- **Zalecane**: 100-200 MB z powodu pamięci PGA
|
||||||
|
- **Aktualny standard**: 100 MB (bezpieczna wartość)
|
||||||
|
|
||||||
|
## Różnice względem przetwarzania równoległego
|
||||||
|
|
||||||
|
| Aspekt | pParallelDegree = 1 | pParallelDegree > 1 |
|
||||||
|
|--------|---------------------|---------------------|
|
||||||
|
| **Maksimum Oracle** | 1 GB | 1 GB |
|
||||||
|
| **Zalecane max** | 500 MB - 1 GB | 100-200 MB |
|
||||||
|
| **Ryzyko ORA-04036** | Niskie | Wysokie (wiele procesów) |
|
||||||
|
| **Użycie PGA** | Pojedynczy proces | Wielokrotne procesy |
|
||||||
|
|
||||||
|
## Skrypty testowe
|
||||||
|
|
||||||
|
Przygotowano dwa skrypty testowe:
|
||||||
|
|
||||||
|
### 1. Pełny test (7 scenariuszy)
|
||||||
|
**Plik**: `MARS_Packages/tests/test_maxfilesize_parameter.sql`
|
||||||
|
- Test z wartościami: 10MB, 100MB, 250MB, 500MB, 750MB, 1GB, 1.5GB
|
||||||
|
- Automatyczne czyszczenie testowych plików
|
||||||
|
- Szczegółowe logowanie wyników
|
||||||
|
|
||||||
|
### 2. Szybki test (maksimum)
|
||||||
|
**Plik**: `MARS_Packages/tests/test_maxfilesize_quick.sql`
|
||||||
|
- Jednorazowy test z maksymalną wartością 1GB
|
||||||
|
- Szybka weryfikacja poprawności konfiguracji
|
||||||
|
|
||||||
|
### Uruchomienie testów
|
||||||
|
|
||||||
|
```powershell
|
||||||
|
# Pełny test wszystkich wartości
|
||||||
|
Get-Content "MARS_Packages\tests\test_maxfilesize_parameter.sql" | sql "CT_MRDS/Cloudpass#34@ggmichalski_high"
|
||||||
|
|
||||||
|
# Szybki test maksymalnej wartości
|
||||||
|
Get-Content "MARS_Packages\tests\test_maxfilesize_quick.sql" | sql "CT_MRDS/Cloudpass#34@ggmichalski_high"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Wnioski (zaktualizowane 2026-02-05)
|
||||||
|
|
||||||
|
1. **Maksymalna wartość Oracle** dla `pMaxFileSize` wynosi **4,294,967,295 bajtów** (4 GB - 1 bajt)
|
||||||
|
- To jest **2^32 - 1** = maksymalna wartość unsigned 32-bit integer
|
||||||
|
- Wartość >= 4,294,967,296 (dokładnie 4GB) powoduje ORA-01426 numeric overflow
|
||||||
|
- **Komentarz w kodzie źródłowym o limicie 1GB jest NIEPRAWIDŁOWY**
|
||||||
|
|
||||||
|
2. **Zalecana wartość dla pParallelDegree = 1** (sequential): **1-2 GB**
|
||||||
|
- Niskie ry~4GB można używać dla bardzo dużych tabel (teoretycznie)cyjnym
|
||||||
|
- Maksimum 3GB można używać dla bardzo dużych tabel
|
||||||
|
|
||||||
|
3. **Zalecana wartość dla pParallelDegree > 1** (parallel): **100-500 MB**
|
||||||
|
- Wysokie ryzyko ORA-04036 przy wielu wątkach i dużych wartościach
|
||||||
|
- Aktualny standard 100MB jest bezpieczny ale konserwatywny
|
||||||
|
|
||||||
|
4. **Weryfikacja testowa** (2026-02-05):
|
||||||
|
- 1GB (1,073,741,824): ✅ DZIAŁA
|
||||||
|
- 2GB (2,147,483,648): ✅ DZIAŁA
|
||||||
|
- 3GB (3,221,225,472): ✅ DZIAŁA
|
||||||
|
- 3.5GB (3,758,096,384): ✅ DZIAŁA
|
||||||
|
- 3.9GB (4,187,593,113): ✅ DZIAŁA
|
||||||
|
- 3.99GB (4,284,481,126): ✅ DZIAŁA
|
||||||
|
- **4GB - 1 bajt (4,294,967,295)**: ✅ DZIAŁA - **MAKSIMUM ORACLE** (2^32 - 1)
|
||||||
|
- **4GB (4,294,967,296)**: ❌ ORA-01426 numeric overflow (2^32)
|
||||||
|
- 5GB+: ❌ ORA-01426 numeric overflow
|
||||||
|
|
||||||
|
5. **Test z pojedynczym wątkiem** (2026-02-05):
|
||||||
|
- pMaxFileSize = 4,294,967,295, pParallelDegree = 1, 60 partycji
|
||||||
|
- Result: ❌ ORA-04036 (PGA memory exceeded - limit 6GB)
|
||||||
|
- Wniosek: Parametr jest akceptowany, ale sekwencyjne przetwarzanie wielu partycji przekracza limit PGA
|
||||||
|
- **Dla 60+ partycji używaj pParallelDegree=4 zamiast 1**
|
||||||
|
|
||||||
|
## Kod źródłowy - fragment kluczowy
|
||||||
|
|
||||||
|
Z pliku `DATA_EXPORTER.pkb`, procedura `EXPORT_SINGLE_PARTITION`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Use json_object() for CSV export with maxfilesize in bytes (Oracle requirement)
|
||||||
|
-- Oracle maxfilesize: min 10MB (10485760), max 1GB (1073741824), default 10MB
|
||||||
|
-- NOTE: maxfilesize must be NUMBER (bytes), not string like '1000M'
|
||||||
|
-- Using 100MB (104857600) to avoid PGA memory issues with large files
|
||||||
|
DBMS_CLOUD.EXPORT_DATA(
|
||||||
|
credential_name => pCredentialName,
|
||||||
|
file_uri_list => vUri,
|
||||||
|
query => vQuery,
|
||||||
|
format => json_object(
|
||||||
|
'type' VALUE 'CSV',
|
||||||
|
'header' VALUE true,
|
||||||
|
'quote' VALUE CHR(34),
|
||||||
|
'delimiter' VALUE ',',
|
||||||
|
'escape' VALUE true,
|
||||||
|
'recorddelimiter' VALUE CHR(13)||CHR(10),
|
||||||
|
'maxfilesize' VALUE pMaxFileSize -- Dynamic maxfilesize in bytes
|
||||||
|
)
|
||||||
|
);
|
||||||
|
```
|
||||||
|
(1,073,741,824): ✅ Sukces
|
||||||
|
- Test 2GB (2,147,483,648): ✅ Sukces - komentarz w kodzie BŁĘDNY
|
||||||
|
- Test 3GB (3,221,225,472): ✅ Sukces
|
||||||
|
- Test 3.5GB (3,758,096,384): ✅ Sukces
|
||||||
|
- Test 3.75GB (4,026,531,840): ✅ Sukces
|
||||||
|
- Test 3.9GB (4,187,593,113): ✅ Sukces
|
||||||
|
- Test 3.99GB (4,284,481,126): ✅ Sukces
|
||||||
|
- Test **4GB - 1 bajt (4,294,967,295)**: ✅ Sukces - **MAKSIMUM = 2^32 - 1**
|
||||||
|
- Test 4GB (4,294,967,296): ❌ ORA-01426 numeric overflow (dokładnie 2^32)
|
||||||
|
- Test 5GB+: ❌ ORA-01426 numeric overflow
|
||||||
|
- Test **4GB-1 + parallel=1 + 60 partycji**: ❌ ORA-04036 PGA memory exceeded
|
||||||
|
|
||||||
|
**Konkluzja**: Komentarz w `DATA_EXPORTER.pkb` o maksymalnym limicie 1GB był nieprawidłowy. Rzeczywiste maksimum Oracle DBMS_CLOUD.EXPORT_DATA to **4,294,967,295 bajtów** (4 GB - 1 bajt), co odpowiada maksymalnej wartości **unsigned 32-bit integer (2^32 - 1)**. Jednakże, **w środowisku Autonomous Database z limitami PGA** (pga_aggregate_limit=6GB), sekwencyjne przetwarzanie dużej liczby partycji (60+) może przekroczyć limit pamięci nawet z poprawną wartością parametru
|
||||||
|
- Test 3GB: ✅ Sukces - rzeczywiste maksimum Oracle
|
||||||
|
- Test 4GB: ❌ ORA-01426 numeric overflow
|
||||||
|
- Test 5GB+: ❌ ORA-01426 numeric overflow
|
||||||
|
|
||||||
|
**Konkluzja**: Komentarz w `DATA_EXPORTER.pkb` o maksymalnym limicie 1GB był nieprawidłowy. Rzeczywiste maksimum Oracle DBMS_CLOUD.EXPORT_DATA to **3 GB**.
|
||||||
|
|
||||||
|
### 2026-02-04 - Analiza początkowa
|
||||||
|
Pierwsza wersja dokumentacji oparta na komentarzach w kodzie źródłowym.
|
||||||
135
confluence/additions/pMaxFileSize_Final_Summary.md
Normal file
135
confluence/additions/pMaxFileSize_Final_Summary.md
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
# pMaxFileSize Testing - Final Summary
|
||||||
|
|
||||||
|
## Objective
|
||||||
|
Find the maximum practical value for `pMaxFileSize` parameter in DATA_EXPORTER package.
|
||||||
|
|
||||||
|
## Test Results
|
||||||
|
|
||||||
|
### Successful Tests
|
||||||
|
|
||||||
|
#### Test 1: pMaxFileSize=400MB with pParallelDegree=12
|
||||||
|
- **Data Volume**: 240x multiplication (~6.24M rows, 4.05GB)
|
||||||
|
- **Duration**: 7m 51s
|
||||||
|
- **Files Generated**: 12 files (11×400MB + 1×394.46MB = 4.68GB total)
|
||||||
|
- **Status**: ✅ SUCCESS
|
||||||
|
- **Key Finding**: File splitting works perfectly at 400MB limit
|
||||||
|
|
||||||
|
#### Test 2: pMaxFileSize=1GB with pParallelDegree=10
|
||||||
|
- **Data Volume**: 300x multiplication (~7.8M rows, 5.07GB)
|
||||||
|
- **Duration**: 9m 47s
|
||||||
|
- **Files Generated**: 6 files (5×1GB + 1×873.11MB = 5.85GB total)
|
||||||
|
- **Status**: ✅ SUCCESS
|
||||||
|
- **Key Finding**: 1GB files work with moderate parallelism
|
||||||
|
|
||||||
|
### Failed Tests
|
||||||
|
|
||||||
|
#### Test 3: pMaxFileSize=400MB with pParallelDegree=16
|
||||||
|
- **Error**: ORA-04036 PGA exceeded
|
||||||
|
- **Conclusion**: Too many concurrent threads (16×78 partitions)
|
||||||
|
|
||||||
|
#### Test 4: pMaxFileSize=1GB with pParallelDegree=1
|
||||||
|
- **Data Attempts**: 320x, 300x, 240x, 200x
|
||||||
|
- **Error**: ORA-65114 space usage in container too high
|
||||||
|
- **Conclusion**: Large data volumes exceed Autonomous DB storage limits for temp tables
|
||||||
|
|
||||||
|
## Key Discoveries
|
||||||
|
|
||||||
|
### 1. pMaxFileSize ≠ PGA Buffer Size
|
||||||
|
**CRITICAL**: File size limit does NOT create proportional memory buffers.
|
||||||
|
- ✅ `pParallelDegree=10 × pMaxFileSize=1GB` = SUCCESS
|
||||||
|
- ❌ `pParallelDegree=16 × pMaxFileSize=400MB` = FAILURE
|
||||||
|
|
||||||
|
**Conclusion**: Number of concurrent threads (parallelism) impacts PGA more than individual file size.
|
||||||
|
|
||||||
|
### 2. Parallelism is Primary PGA Driver
|
||||||
|
- Higher parallelism = more PGA consumption
|
||||||
|
- Lower parallelism allows larger file sizes
|
||||||
|
- Sweet spot: pParallelDegree=10-12 for large exports
|
||||||
|
|
||||||
|
### 3. Storage Container Limits
|
||||||
|
Autonomous Database has strict storage limits for temporary tables:
|
||||||
|
- 240x multiplication succeeded with pParallelDegree=12
|
||||||
|
- Same 240x multiplication FAILED when tested alone (container cleanup needed)
|
||||||
|
- 300x+ multiplication consistently hits ORA-65114 storage limit
|
||||||
|
|
||||||
|
### 4. File Splitting Mechanism Works Perfectly
|
||||||
|
- Files split at exact pMaxFileSize limit (within ±0.01MB)
|
||||||
|
- Example: 11 files @ 400.00MB + 1 file @ 394.46MB
|
||||||
|
- Hive-style partitioning maintained correctly
|
||||||
|
|
||||||
|
## Theoretical Maximum
|
||||||
|
|
||||||
|
### From Oracle Documentation
|
||||||
|
- **Maximum file size**: 2^32-1 bytes (~4GB) - limited by VARCHAR2 file naming
|
||||||
|
- **Practical limit**: Depends on PGA configuration and data volume
|
||||||
|
|
||||||
|
### From Testing
|
||||||
|
| pParallelDegree | pMaxFileSize | Status | Notes |
|
||||||
|
|-----------------|--------------|---------|-------|
|
||||||
|
| 16 | 400MB | ❌ | PGA exceeded |
|
||||||
|
| 12 | 400MB | ✅ | Perfect |
|
||||||
|
| 10 | 1GB | ✅ | Perfect |
|
||||||
|
| 1 | 1GB | ❌ | Data volume too large for temp table creation |
|
||||||
|
|
||||||
|
**Tested Maximum**: **1GB with pParallelDegree=10**
|
||||||
|
|
||||||
|
**Untested Range**: 1GB-4GB (blocked by storage limits for test data creation)
|
||||||
|
|
||||||
|
## Production Recommendations
|
||||||
|
|
||||||
|
### Conservative (Safest)
|
||||||
|
```sql
|
||||||
|
pParallelDegree => 8
|
||||||
|
pMaxFileSize => 209715200 -- 200MB
|
||||||
|
```
|
||||||
|
- Minimal PGA risk
|
||||||
|
- Fast enough for most use cases
|
||||||
|
- Works with high partition counts
|
||||||
|
|
||||||
|
### Balanced (Recommended)
|
||||||
|
```sql
|
||||||
|
pParallelDegree => 10
|
||||||
|
pMaxFileSize => 419430400 -- 400MB
|
||||||
|
```
|
||||||
|
- Good balance of speed and safety
|
||||||
|
- Proven successful in testing
|
||||||
|
- Handles large datasets well
|
||||||
|
|
||||||
|
### Performance (Maximum Tested)
|
||||||
|
```sql
|
||||||
|
pParallelDegree => 10
|
||||||
|
pMaxFileSize => 1073741824 -- 1GB
|
||||||
|
```
|
||||||
|
- Maximum tested configuration
|
||||||
|
- Best for very large single-partition exports
|
||||||
|
- Requires monitoring of PGA usage
|
||||||
|
|
||||||
|
### NOT Recommended
|
||||||
|
```sql
|
||||||
|
pParallelDegree > 12 -- Risks PGA exceeded errors
|
||||||
|
pMaxFileSize > 1GB -- Untested, may hit limits
|
||||||
|
```
|
||||||
|
|
||||||
|
## Technical Constraints
|
||||||
|
|
||||||
|
### PGA Configuration (ggmichalski database)
|
||||||
|
- `pga_aggregate_target`: 3GB (soft limit)
|
||||||
|
- `pga_aggregate_limit`: 6GB (hard limit - triggers ORA-04036)
|
||||||
|
|
||||||
|
### Query Processing PGA Usage
|
||||||
|
- Large datasets with DATE transformations consume significant PGA
|
||||||
|
- Query processing memory usage is independent of output file size
|
||||||
|
- Example: 8.3M rows with TO_CHAR() operations exceeded 6GB PGA
|
||||||
|
|
||||||
|
### Storage Limits
|
||||||
|
- ORA-65114: Space usage in container too high
|
||||||
|
- Affects temp table creation with large UNION ALL chains
|
||||||
|
- Cleanup required between large test data creates
|
||||||
|
|
||||||
|
## Conclusion
|
||||||
|
|
||||||
|
**Maximum Practical pMaxFileSize**: **1GB** (with pParallelDegree=10)
|
||||||
|
|
||||||
|
**Optimal Production Configuration**: **400MB** (with pParallelDegree=10-12)
|
||||||
|
|
||||||
|
**Key Insight**: **Parallelism matters more than file size** for PGA management. Lower parallelism allows larger files without hitting memory limits.
|
||||||
Reference in New Issue
Block a user