aktualizacja definicji

This commit is contained in:
Grzegorz Michalski
2026-02-04 14:41:30 +01:00
parent 569c647b6c
commit 19925603fa
6 changed files with 1119 additions and 342 deletions

View File

@@ -9,22 +9,65 @@ AS
**/ **/
-- Package Version Information -- Package Version Information
PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.1.1'; PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.6.3';
PACKAGE_BUILD_DATE CONSTANT VARCHAR2(19) := '2025-12-04 13:10:00'; PACKAGE_BUILD_DATE CONSTANT VARCHAR2(19) := '2026-01-28 19:30:00';
PACKAGE_AUTHOR CONSTANT VARCHAR2(50) := 'MRDS Development Team'; PACKAGE_AUTHOR CONSTANT VARCHAR2(50) := 'MRDS Development Team';
-- Version History (last 3-5 changes) -- Version History (last 3-5 changes)
VERSION_HISTORY CONSTANT VARCHAR2(4000) := VERSION_HISTORY CONSTANT VARCHAR2(4000) :=
'v2.6.3 (2026-01-28): COMPILATION FIX - Resolved ORA-00904 error in EXPORT_PARTITION_PARALLEL. SQLERRM and DBMS_UTILITY.FORMAT_ERROR_BACKTRACE cannot be used directly in SQL UPDATE statements. Now properly assigned to vgMsgTmp variable before UPDATE.' || CHR(10) ||
'v2.6.2 (2026-01-28): CRITICAL FIX - Race condition when multiple exports run simultaneously. Changed DELETE to filter by age (>24h) instead of deleting all COMPLETED chunks. Prevents concurrent sessions from deleting each other chunks. Session-safe cleanup with TASK_NAME filtering. Enables true parallel execution of multiple export jobs.' || CHR(10) ||
'v2.6.1 (2026-01-28): Added DELETE_FAILED_EXPORT_FILE procedure to clean up partial/corrupted files before retry. When partition fails mid-export, partial file is deleted before retry to prevent Oracle from creating _1 suffixed duplicates. Ensures clean retry without orphaned files in OCI bucket.' || CHR(10) ||
'v2.6.0 (2026-01-28): CRITICAL FIX - Added STATUS tracking to A_PARALLEL_EXPORT_CHUNKS table to prevent data duplication on retry. System now restarts ONLY failed partitions instead of re-exporting all data. Added ERROR_MESSAGE and EXPORT_TIMESTAMP columns for better error handling and monitoring. Prevents duplicate file creation when parallel tasks fail (e.g., 22 partitions with 16 threads, 3 failures no longer duplicates 19 successful exports).' || CHR(10) ||
'v2.5.0 (2026-01-26): Added recorddelimiter parameter with CRLF (CHR(13)||CHR(10)) for CSV exports to ensure Windows-compatible line endings. Improves cross-platform compatibility when CSV files are opened in Windows applications (Notepad, Excel).' || CHR(10) ||
'v2.4.0 (2026-01-11): Added pTemplateTableName parameter for per-column date format configuration. Implements dynamic query building with TO_CHAR for each date/timestamp column using FILE_MANAGER.GET_DATE_FORMAT. Supports 3-tier hierarchy: column-specific, template DEFAULT, global fallback. Eliminates single dateformat limitation of DBMS_CLOUD.EXPORT_DATA.' || CHR(10) ||
'v2.3.0 (2025-12-20): Added parallel partition processing using DBMS_PARALLEL_EXECUTE. New pParallelDegree parameter (1-16, default 1) for EXPORT_TABLE_DATA_BY_DATE and EXPORT_TABLE_DATA_TO_CSV_BY_DATE procedures. Each year/month partition processed in separate thread for improved performance.' || CHR(10) ||
'v2.2.0 (2025-12-19): DRY refactoring - extracted shared helper functions (sanitizeFilename, VALIDATE_TABLE_AND_COLUMNS, GET_PARTITIONS, EXPORT_SINGLE_PARTITION worker procedure). Reduced code duplication by ~400 lines. Prepared architecture for v2.3.0 parallel processing.' || CHR(10) ||
'v2.1.1 (2025-12-04): Fixed JOIN column reference A_WORKFLOW_HISTORY_KEY -> A_ETL_LOAD_SET_KEY, added consistent column mapping and dynamic column list to EXPORT_TABLE_DATA procedure, enhanced DEBUG logging for all export operations' || CHR(10) || 'v2.1.1 (2025-12-04): Fixed JOIN column reference A_WORKFLOW_HISTORY_KEY -> A_ETL_LOAD_SET_KEY, added consistent column mapping and dynamic column list to EXPORT_TABLE_DATA procedure, enhanced DEBUG logging for all export operations' || CHR(10) ||
'v2.1.1 (2025-12-04): Fixed JOIN column reference A_WORKFLOW_HISTORY_KEY -> A_ETL_LOAD_SET_KEY' || CHR(10) ||
'v2.1.0 (2025-10-22): Added version tracking and PARTITION_YEAR/PARTITION_MONTH support' || CHR(10) || 'v2.1.0 (2025-10-22): Added version tracking and PARTITION_YEAR/PARTITION_MONTH support' || CHR(10) ||
'v2.0.0 (2025-10-01): Separated export functionality from FILE_MANAGER package' || CHR(10) || 'v2.0.0 (2025-10-01): Separated export functionality from FILE_MANAGER package' || CHR(10);
'v1.0.0 (2025-09-15): Initial implementation within FILE_MANAGER package' || CHR(10);
cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10); cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10);
vgMsgTmp VARCHAR2(32000); vgMsgTmp VARCHAR2(32000);
--------------------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------------------------------------------
-- TYPE DEFINITIONS FOR PARTITION HANDLING
---------------------------------------------------------------------------------------------------------------------------
/**
* Record type for year/month partition information
**/
TYPE partition_rec IS RECORD (
year VARCHAR2(4),
month VARCHAR2(2)
);
/**
* Table type for collection of partition records
**/
TYPE partition_tab IS TABLE OF partition_rec;
---------------------------------------------------------------------------------------------------------------------------
-- INTERNAL PARALLEL PROCESSING CALLBACK
---------------------------------------------------------------------------------------------------------------------------
/**
* @name EXPORT_PARTITION_PARALLEL
* @desc Internal callback procedure for DBMS_PARALLEL_EXECUTE.
* Processes single partition (year/month) chunk in parallel task.
* Called by DBMS_PARALLEL_EXECUTE framework for each chunk.
* This procedure is PUBLIC because DBMS_PARALLEL_EXECUTE requires it,
* but should NOT be called directly by external code.
* @param pStartId - Chunk start ID (CHUNK_ID from A_PARALLEL_EXPORT_CHUNKS table)
* @param pEndId - Chunk end ID (same as pStartId for single-row chunks)
**/
PROCEDURE EXPORT_PARTITION_PARALLEL (
pStartId IN NUMBER,
pEndId IN NUMBER
);
---------------------------------------------------------------------------------------------------------------------------
-- MAIN EXPORT PROCEDURES
--------------------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------------------------------------------
/** /**
@@ -62,6 +105,7 @@ AS
* Allows specifying custom column list or uses T.* if pColumnList is NULL. * Allows specifying custom column list or uses T.* if pColumnList is NULL.
* Validates that all columns in pColumnList exist in the target table. * Validates that all columns in pColumnList exist in the target table.
* Automatically adds 'T.' prefix to column names in pColumnList. * Automatically adds 'T.' prefix to column names in pColumnList.
* Supports parallel partition processing via pParallelDegree parameter (default 1, range 1-16).
* pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE' * pBucketArea parameter accepts: 'INBOX', 'ODS', 'DATA', 'ARCHIVE'
* @example * @example
* begin * begin
@@ -73,7 +117,8 @@ AS
* pFolderName => 'parquet_exports', * pFolderName => 'parquet_exports',
* pColumnList => 'COLUMN1, COLUMN2, COLUMN3', -- Optional * pColumnList => 'COLUMN1, COLUMN2, COLUMN3', -- Optional
* pMinDate => DATE '2024-01-01', * pMinDate => DATE '2024-01-01',
* pMaxDate => SYSDATE * pMaxDate => SYSDATE,
* pParallelDegree => 8 -- Optional, default 1, range 1-16
* ); * );
* end; * end;
**/ **/
@@ -86,6 +131,8 @@ AS
pColumnList IN VARCHAR2 default NULL, pColumnList IN VARCHAR2 default NULL,
pMinDate IN DATE default DATE '1900-01-01', pMinDate IN DATE default DATE '1900-01-01',
pMaxDate IN DATE default SYSDATE, pMaxDate IN DATE default SYSDATE,
pParallelDegree IN NUMBER default 1,
pTemplateTableName IN VARCHAR2 default NULL,
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
); );
@@ -97,6 +144,7 @@ AS
* Creates one CSV file for each year/month combination found in the data. * Creates one CSV file for each year/month combination found in the data.
* Uses the same date filtering mechanism with CT_ODS.A_LOAD_HISTORY as EXPORT_TABLE_DATA_BY_DATE, * Uses the same date filtering mechanism with CT_ODS.A_LOAD_HISTORY as EXPORT_TABLE_DATA_BY_DATE,
* but exports to CSV format instead of Parquet. * but exports to CSV format instead of Parquet.
* Supports parallel partition processing via pParallelDegree parameter (1-16).
* File naming pattern: {pFileName}_YYYYMM.csv or {TABLENAME}_YYYYMM.csv (if pFileName is NULL) * File naming pattern: {pFileName}_YYYYMM.csv or {TABLENAME}_YYYYMM.csv (if pFileName is NULL)
* @example * @example
* begin * begin
@@ -109,7 +157,8 @@ AS
* pFolderName => 'exports', * pFolderName => 'exports',
* pFileName => 'my_export.csv', * pFileName => 'my_export.csv',
* pMinDate => DATE '2024-01-01', * pMinDate => DATE '2024-01-01',
* pMaxDate => SYSDATE * pMaxDate => SYSDATE,
* pParallelDegree => 8 -- Optional, default 1, range 1-16
* ); * );
* *
* -- With auto-generated filename (based on table name only) * -- With auto-generated filename (based on table name only)
@@ -136,6 +185,9 @@ AS
pColumnList IN VARCHAR2 default NULL, pColumnList IN VARCHAR2 default NULL,
pMinDate IN DATE default DATE '1900-01-01', pMinDate IN DATE default DATE '1900-01-01',
pMaxDate IN DATE default SYSDATE, pMaxDate IN DATE default SYSDATE,
pParallelDegree IN NUMBER default 1,
pTemplateTableName IN VARCHAR2 default NULL,
pMaxFileSize IN NUMBER default 104857600,
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
); );

View File

@@ -39,6 +39,8 @@ AS
Errors(CODE_MOVE_FILE_TO_TRASH_FAILED) := Error_Record(CODE_MOVE_FILE_TO_TRASH_FAILED, MSG_MOVE_FILE_TO_TRASH_FAILED); -- -20032 Errors(CODE_MOVE_FILE_TO_TRASH_FAILED) := Error_Record(CODE_MOVE_FILE_TO_TRASH_FAILED, MSG_MOVE_FILE_TO_TRASH_FAILED); -- -20032
Errors(CODE_DROP_EXPORTED_FILES_FAILED) := Error_Record(CODE_DROP_EXPORTED_FILES_FAILED, MSG_DROP_EXPORTED_FILES_FAILED); -- -20033 Errors(CODE_DROP_EXPORTED_FILES_FAILED) := Error_Record(CODE_DROP_EXPORTED_FILES_FAILED, MSG_DROP_EXPORTED_FILES_FAILED); -- -20033
Errors(CODE_INVALID_BUCKET_AREA) := Error_Record(CODE_INVALID_BUCKET_AREA, MSG_INVALID_BUCKET_AREA); -- -20034 Errors(CODE_INVALID_BUCKET_AREA) := Error_Record(CODE_INVALID_BUCKET_AREA, MSG_INVALID_BUCKET_AREA); -- -20034
Errors(CODE_INVALID_PARALLEL_DEGREE) := Error_Record(CODE_INVALID_PARALLEL_DEGREE, MSG_INVALID_PARALLEL_DEGREE); -- -20110
Errors(CODE_PARALLEL_EXECUTION_FAILED) := Error_Record(CODE_PARALLEL_EXECUTION_FAILED, MSG_PARALLEL_EXECUTION_FAILED); -- -20111
Errors(CODE_UNKNOWN) := Error_Record(CODE_UNKNOWN, MSG_UNKNOWN); -- -20999 Errors(CODE_UNKNOWN) := Error_Record(CODE_UNKNOWN, MSG_UNKNOWN); -- -20999
@@ -1163,3 +1165,7 @@ BEGIN
INIT_VARIABLES(pEnv => gvEnv); INIT_VARIABLES(pEnv => gvEnv);
END ENV_MANAGER; END ENV_MANAGER;
/
/

View File

@@ -17,12 +17,13 @@ AS
**/ **/
-- Package Version Information (Semantic Versioning: MAJOR.MINOR.PATCH) -- Package Version Information (Semantic Versioning: MAJOR.MINOR.PATCH)
PACKAGE_VERSION CONSTANT VARCHAR2(10) := '3.1.0'; PACKAGE_VERSION CONSTANT VARCHAR2(10) := '3.2.0';
PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2025-10-22 20:57:00'; PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2025-12-20 10:00:00';
PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski'; PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski';
-- Version History (Latest changes first) -- Version History (Latest changes first)
VERSION_HISTORY CONSTANT VARCHAR2(4000) := VERSION_HISTORY CONSTANT VARCHAR2(4000) :=
'3.2.0 (2025-12-20): Added error codes for parallel execution support (CODE_INVALID_PARALLEL_DEGREE -20110, CODE_PARALLEL_EXECUTION_FAILED -20111)' || CHR(13)||CHR(10) ||
'3.1.0 (2025-10-22): Added package hash tracking and automatic change detection system (SHA256 hashing)' || CHR(13)||CHR(10) || '3.1.0 (2025-10-22): Added package hash tracking and automatic change detection system (SHA256 hashing)' || CHR(13)||CHR(10) ||
'3.0.0 (2025-10-22): Added package versioning system with centralized version management functions' || CHR(13)||CHR(10) || '3.0.0 (2025-10-22): Added package versioning system with centralized version management functions' || CHR(13)||CHR(10) ||
'2.1.0 (2025-10-15): Added ANALYZE_VALIDATION_ERRORS function for comprehensive CSV validation analysis' || CHR(13)||CHR(10) || '2.1.0 (2025-10-15): Added ANALYZE_VALIDATION_ERRORS function for comprehensive CSV validation analysis' || CHR(13)||CHR(10) ||
@@ -296,6 +297,18 @@ AS
PRAGMA EXCEPTION_INIT( ERR_INVALID_BUCKET_AREA PRAGMA EXCEPTION_INIT( ERR_INVALID_BUCKET_AREA
,CODE_INVALID_BUCKET_AREA); ,CODE_INVALID_BUCKET_AREA);
ERR_INVALID_PARALLEL_DEGREE EXCEPTION;
CODE_INVALID_PARALLEL_DEGREE CONSTANT PLS_INTEGER := -20110;
MSG_INVALID_PARALLEL_DEGREE VARCHAR2(4000) := 'Invalid parallel degree parameter. Must be between 1 and 16';
PRAGMA EXCEPTION_INIT( ERR_INVALID_PARALLEL_DEGREE
,CODE_INVALID_PARALLEL_DEGREE);
ERR_PARALLEL_EXECUTION_FAILED EXCEPTION;
CODE_PARALLEL_EXECUTION_FAILED CONSTANT PLS_INTEGER := -20111;
MSG_PARALLEL_EXECUTION_FAILED VARCHAR2(4000) := 'Parallel execution failed';
PRAGMA EXCEPTION_INIT( ERR_PARALLEL_EXECUTION_FAILED
,CODE_PARALLEL_EXECUTION_FAILED);
ERR_UNKNOWN EXCEPTION; ERR_UNKNOWN EXCEPTION;
CODE_UNKNOWN CONSTANT PLS_INTEGER := -20999; CODE_UNKNOWN CONSTANT PLS_INTEGER := -20999;
MSG_UNKNOWN VARCHAR2(4000) := 'Unknown Error Occured'; MSG_UNKNOWN VARCHAR2(4000) := 'Unknown Error Occured';
@@ -609,3 +622,4 @@ AS
) RETURN VARCHAR2; ) RETURN VARCHAR2;
END ENV_MANAGER; END ENV_MANAGER;
/

View File

@@ -1,6 +1,54 @@
create or replace PACKAGE BODY CT_MRDS.FILE_ARCHIVER create or replace PACKAGE BODY CT_MRDS.FILE_ARCHIVER
AS AS
----------------------------------------------------------------------------------------------------
-- PRIVATE FUNCTION: GET_ARCHIVAL_WHERE_CLAUSE
----------------------------------------------------------------------------------------------------
/**
* @name GET_ARCHIVAL_WHERE_CLAUSE
* @desc Private function that generates WHERE clause based on ARCHIVAL_STRATEGY configuration.
* Supports four strategies: THRESHOLD_BASED, CURRENT_MONTH_ONLY, MINIMUM_AGE_MONTHS, HYBRID.
* @param pSourceFileConfig - Source file configuration record with ARCHIVAL_STRATEGY
* @return VARCHAR2 - WHERE clause for filtering archival candidates
**/
FUNCTION GET_ARCHIVAL_WHERE_CLAUSE(
pSourceFileConfig IN CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE
) RETURN VARCHAR2
IS
vWhereClause VARCHAR2(4000);
cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10);
BEGIN
CASE pSourceFileConfig.ARCHIVAL_STRATEGY
-- Legacy threshold-based strategy (backward compatible)
WHEN 'THRESHOLD_BASED' THEN
vWhereClause := 'extract(day from (systimestamp - workflow_start)) > ' || pSourceFileConfig.DAYS_FOR_ARCHIVE_THRESHOLD;
-- Archive all data except current month
WHEN 'CURRENT_MONTH_ONLY' THEN
vWhereClause := 'TRUNC(workflow_start, ''MM'') < TRUNC(SYSDATE, ''MM'')';
-- Archive only data older than X months
WHEN 'MINIMUM_AGE_MONTHS' THEN
IF pSourceFileConfig.MINIMUM_AGE_MONTHS IS NULL THEN
RAISE_APPLICATION_ERROR(-20001, 'MINIMUM_AGE_MONTHS must be configured for MINIMUM_AGE_MONTHS strategy');
END IF;
vWhereClause := 'workflow_start < ADD_MONTHS(TRUNC(SYSDATE, ''MM''), -' || pSourceFileConfig.MINIMUM_AGE_MONTHS || ')';
-- Hybrid: Current month exclusion AND minimum age requirement
WHEN 'HYBRID' THEN
IF pSourceFileConfig.MINIMUM_AGE_MONTHS IS NULL THEN
RAISE_APPLICATION_ERROR(-20001, 'MINIMUM_AGE_MONTHS must be configured for HYBRID strategy');
END IF;
vWhereClause := 'TRUNC(workflow_start, ''MM'') < TRUNC(SYSDATE, ''MM'') ' ||
'AND workflow_start < ADD_MONTHS(TRUNC(SYSDATE, ''MM''), -' || pSourceFileConfig.MINIMUM_AGE_MONTHS || ')';
ELSE
RAISE_APPLICATION_ERROR(-20002, 'Invalid ARCHIVAL_STRATEGY: ' || pSourceFileConfig.ARCHIVAL_STRATEGY);
END CASE;
RETURN vWhereClause;
END GET_ARCHIVAL_WHERE_CLAUSE;
---------------------------------------------------------------------------------------------------- ----------------------------------------------------------------------------------------------------
FUNCTION GET_TABLE_STAT(pSourceFileConfigKey IN NUMBER) FUNCTION GET_TABLE_STAT(pSourceFileConfigKey IN NUMBER)
@@ -85,6 +133,8 @@ AS
if LENGTH(vArchivalTriggeredBy)>0 THEN if LENGTH(vArchivalTriggeredBy)>0 THEN
ENV_MANAGER.LOG_PROCESS_EVENT('Archival Triggered By: '||vArchivalTriggeredBy,'INFO'); ENV_MANAGER.LOG_PROCESS_EVENT('Archival Triggered By: '||vArchivalTriggeredBy,'INFO');
vTableName := DBMS_ASSERT.SCHEMA_NAME(vSourceFileConfig.ODS_SCHEMA_NAME) || '.'||vSourceFileConfig.A_SOURCE_KEY||'_'||DBMS_ASSERT.simple_sql_name(vSourceFileConfig.TABLE_ID)||'_ODS'; vTableName := DBMS_ASSERT.SCHEMA_NAME(vSourceFileConfig.ODS_SCHEMA_NAME) || '.'||vSourceFileConfig.A_SOURCE_KEY||'_'||DBMS_ASSERT.simple_sql_name(vSourceFileConfig.TABLE_ID)||'_ODS';
-- Use strategy-based WHERE clause (MARS-828)
vQuery := ' vQuery := '
select t_filename( select t_filename(
file$name file$name
@@ -96,7 +146,7 @@ AS
from '||vTableName||' s from '||vTableName||' s
join CT_MRDS.a_workflow_history h join CT_MRDS.a_workflow_history h
on s.a_workflow_history_key = h.a_workflow_history_key on s.a_workflow_history_key = h.a_workflow_history_key
where extract(day from (systimestamp - workflow_start)) > '||vSourceFileConfig.DAYS_FOR_ARCHIVE_THRESHOLD where ' || GET_ARCHIVAL_WHERE_CLAUSE(vSourceFileConfig)
; ;
-- Get all files that will be archived into "vfiles" collection ("regular data files") -- Get all files that will be archived into "vfiles" collection ("regular data files")
@@ -337,6 +387,7 @@ AS
vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE; vSourceFileConfig CT_MRDS.A_SOURCE_FILE_CONFIG%ROWTYPE;
vTableName VARCHAR2(200); vTableName VARCHAR2(200);
vQuery VARCHAR2(32000); vQuery VARCHAR2(32000);
vWhereClause VARCHAR2(4000);
BEGIN BEGIN
vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileConfigKey => '||nvl(to_char(pSourceFileConfigKey), 'NULL'))); vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileConfigKey => '||nvl(to_char(pSourceFileConfigKey), 'NULL')));
ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters); ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters);
@@ -344,6 +395,12 @@ AS
vTableName := DBMS_ASSERT.SCHEMA_NAME(vSourceFileConfig.ODS_SCHEMA_NAME) || '.'||vSourceFileConfig.A_SOURCE_KEY||'_'||DBMS_ASSERT.simple_sql_name(vSourceFileConfig.TABLE_ID)||'_ODS'; vTableName := DBMS_ASSERT.SCHEMA_NAME(vSourceFileConfig.ODS_SCHEMA_NAME) || '.'||vSourceFileConfig.A_SOURCE_KEY||'_'||DBMS_ASSERT.simple_sql_name(vSourceFileConfig.TABLE_ID)||'_ODS';
ENV_MANAGER.LOG_PROCESS_EVENT('vTableName','DEBUG',vTableName); ENV_MANAGER.LOG_PROCESS_EVENT('vTableName','DEBUG',vTableName);
-- Get WHERE clause based on archival strategy (MARS-828)
vWhereClause := GET_ARCHIVAL_WHERE_CLAUSE(vSourceFileConfig);
ENV_MANAGER.LOG_PROCESS_EVENT('vWhereClause','DEBUG',vWhereClause);
-- Use strategy-based WHERE clause for statistics (MARS-828)
vQuery := vQuery :=
'with tmp as ( 'with tmp as (
select select
@@ -367,11 +424,11 @@ AS
,'||pSourceFileConfigKey||' as A_SOURCE_FILE_CONFIG_KEY ,'||pSourceFileConfigKey||' as A_SOURCE_FILE_CONFIG_KEY
,'''||vTableName||''' as TABLE_NAME ,'''||vTableName||''' as TABLE_NAME
,count(*) as FILE_COUNT ,count(*) as FILE_COUNT
,sum(case when extract(day from (systimestamp - workflow_start)) > '||vSourceFileConfig.DAYS_FOR_ARCHIVE_THRESHOLD||' then 1 else 0 end) as OLD_FILE_COUNT ,sum(case when ' || vWhereClause || ' then 1 else 0 end) as OLD_FILE_COUNT
,sum (row_count_per_file) as ROW_COUNT ,sum (row_count_per_file) as ROW_COUNT
,sum(case when extract(day from (systimestamp - workflow_start)) > '||vSourceFileConfig.DAYS_FOR_ARCHIVE_THRESHOLD||' then row_count_per_file else 0 end) as OLD_ROW_COUNT ,sum(case when ' || vWhereClause || ' then row_count_per_file else 0 end) as OLD_ROW_COUNT
,sum(r.bytes) as BYTES ,sum(r.bytes) as BYTES
,sum(case when extract(day from (systimestamp - workflow_start)) > '||vSourceFileConfig.DAYS_FOR_ARCHIVE_THRESHOLD||' then r.bytes else 0 end) as OLD_BYTES ,sum(case when ' || vWhereClause || ' then r.bytes else 0 end) as OLD_BYTES
,'||vSourceFileConfig.DAYS_FOR_ARCHIVE_THRESHOLD||' as DAYS_FOR_ARCHIVE_THRESHOLD ,'||vSourceFileConfig.DAYS_FOR_ARCHIVE_THRESHOLD||' as DAYS_FOR_ARCHIVE_THRESHOLD
,systimestamp as CREATED ,systimestamp as CREATED
from tmp_gr t from tmp_gr t
@@ -438,6 +495,48 @@ AS
---------------------------------------------------------------------------------------------------- ----------------------------------------------------------------------------------------------------
FUNCTION ARCHIVE_TABLE_DATA (
pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE
) RETURN PLS_INTEGER
IS
vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE;
BEGIN
vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileConfigKey => '||nvl(to_char(pSourceFileConfigKey), 'NULL')));
ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters);
----
ARCHIVE_TABLE_DATA(pSourceFileConfigKey => pSourceFileConfigKey);
----
ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters);
RETURN SQLCODE;
EXCEPTION
WHEN OTHERS THEN
ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters);
RETURN SQLCODE;
END ARCHIVE_TABLE_DATA;
----------------------------------------------------------------------------------------------------
FUNCTION GATHER_TABLE_STAT (
pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE
) RETURN PLS_INTEGER
IS
vParameters CT_MRDS.A_PROCESS_LOG.PROCEDURE_PARAMETERS%TYPE;
BEGIN
vParameters := ENV_MANAGER.FORMAT_PARAMETERS(SYS.ODCIVARCHAR2LIST('pSourceFileConfigKey => '||nvl(to_char(pSourceFileConfigKey), 'NULL')));
ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters);
----
GATHER_TABLE_STAT(pSourceFileConfigKey => pSourceFileConfigKey);
----
ENV_MANAGER.LOG_PROCESS_EVENT('End','INFO',vParameters);
RETURN SQLCODE;
EXCEPTION
WHEN OTHERS THEN
ENV_MANAGER.LOG_PROCESS_EVENT(ENV_MANAGER.GET_ERROR_STACK(pFormat => 'TABLE', pCode=> SQLCODE), 'ERROR', vParameters);
RETURN SQLCODE;
END GATHER_TABLE_STAT;
----------------------------------------------------------------------------------------------------
END; END;
/ /

View File

@@ -17,12 +17,14 @@ AS
**/ **/
-- Package Version Information (Semantic Versioning: MAJOR.MINOR.PATCH) -- Package Version Information (Semantic Versioning: MAJOR.MINOR.PATCH)
PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.0.0'; PACKAGE_VERSION CONSTANT VARCHAR2(10) := '3.1.0';
PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2025-10-22 16:45:00'; PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2026-01-29 21:00:00';
PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski'; PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski';
-- Version History (Latest changes first) -- Version History (Latest changes first)
VERSION_HISTORY CONSTANT VARCHAR2(4000) := VERSION_HISTORY CONSTANT VARCHAR2(4000) :=
'3.1.0 (2026-01-29): Added function overloads for ARCHIVE_TABLE_DATA and GATHER_TABLE_STAT returning SQLCODE for Python library integration' || CHR(13)||CHR(10) ||
'3.0.0 (2026-01-27): MARS-828 - Added flexible archival strategies (CURRENT_MONTH_ONLY, MINIMUM_AGE_MONTHS, HYBRID) via ARCHIVAL_STRATEGY configuration' || CHR(13)||CHR(10) ||
'2.0.0 (2025-10-22): Added package versioning system using centralized ENV_MANAGER functions' || CHR(13)||CHR(10) || '2.0.0 (2025-10-22): Added package versioning system using centralized ENV_MANAGER functions' || CHR(13)||CHR(10) ||
'1.5.0 (2025-10-18): Enhanced ARCHIVE_TABLE_DATA with Hive-style partitioning support' || CHR(13)||CHR(10) || '1.5.0 (2025-10-18): Enhanced ARCHIVE_TABLE_DATA with Hive-style partitioning support' || CHR(13)||CHR(10) ||
'1.0.0 (2025-09-15): Initial release with table archival and statistics gathering'; '1.0.0 (2025-09-15): Initial release with table archival and statistics gathering';
@@ -39,6 +41,18 @@ AS
pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE
); );
/**
* @name ARCHIVE_TABLE_DATA
* @desc Function overload for ARCHIVE_TABLE_DATA procedure.
* Returns SQLCODE for Python library integration.
* Calls the main ARCHIVE_TABLE_DATA procedure and captures execution result.
* @example SELECT FILE_ARCHIVER.ARCHIVE_TABLE_DATA(pSourceFileConfigKey => 123) FROM DUAL;
* @ex_rslt 0 (success) or error code
**/
FUNCTION ARCHIVE_TABLE_DATA (
pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE
) RETURN PLS_INTEGER;
/** /**
@@ -50,6 +64,18 @@ AS
pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE
); );
/**
* @name GATHER_TABLE_STAT
* @desc Function overload for GATHER_TABLE_STAT procedure.
* Returns SQLCODE for Python library integration.
* Calls the main GATHER_TABLE_STAT procedure and captures execution result.
* @example SELECT FILE_ARCHIVER.GATHER_TABLE_STAT(pSourceFileConfigKey => 123) FROM DUAL;
* @ex_rslt 0 (success) or error code
**/
FUNCTION GATHER_TABLE_STAT (
pSourceFileConfigKey IN CT_MRDS.A_SOURCE_FILE_CONFIG.A_SOURCE_FILE_CONFIG_KEY%TYPE
) RETURN PLS_INTEGER;
--------------------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------------------------------------------
-- PACKAGE VERSION MANAGEMENT FUNCTIONS -- PACKAGE VERSION MANAGEMENT FUNCTIONS
--------------------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------------------------------------------