Compare commits

...

7 Commits

16 changed files with 99 additions and 910 deletions

View File

@@ -4,7 +4,7 @@
-- Purpose:
-- 1. Rename column ARCH_FILE_NAME to ARCH_PATH
-- 2. Add new column PROCESS_NAME VARCHAR2(200)
-- Author: System Generated
-- Author: Grzegorz Michalski
-- Date: 2026-02-13
-- ====================================================================

View File

@@ -4,7 +4,7 @@
-- Purpose:
-- 1. Rename column ARCH_PATH back to ARCH_FILE_NAME
-- 2. Remove column PROCESS_NAME
-- Author: System Generated
-- Author: Grzegorz Michalski
-- Date: 2026-02-13
-- ====================================================================

View File

@@ -43,6 +43,7 @@ CREATE TABLE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS (
FILE_BASE_NAME VARCHAR2(1000),
TEMPLATE_TABLE_NAME VARCHAR2(200),
MAX_FILE_SIZE NUMBER DEFAULT 104857600 NOT NULL,
JOB_CLASS VARCHAR2(128),
STATUS VARCHAR2(30) DEFAULT 'PENDING' NOT NULL,
ERROR_MESSAGE VARCHAR2(4000),
EXPORT_TIMESTAMP TIMESTAMP,
@@ -69,6 +70,7 @@ COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.FORMAT_TYPE IS 'Export format
COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.FILE_BASE_NAME IS 'Base filename for CSV exports (NULL for Parquet)';
COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.TEMPLATE_TABLE_NAME IS 'Template table name for per-column date format configuration (e.g., CT_ET_TEMPLATES.TABLE_NAME)';
COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.MAX_FILE_SIZE IS 'Maximum file size in bytes for CSV exports only (e.g., 104857600 = 100MB, 1073741824 = 1GB) - default 100MB (104857600). NOTE: Not applicable for PARQUET format (Oracle limitation)';
COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.JOB_CLASS IS 'Oracle Scheduler job class name for resource management (e.g., ''high'', ''DEFAULT_JOB_CLASS'') - NULL uses default scheduler priority';
COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.STATUS IS 'Chunk processing status: PENDING (not started), PROCESSING (in progress), COMPLETED (success), FAILED (error) - allows retry of failed partitions only';
COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.ERROR_MESSAGE IS 'Error message if chunk processing failed (STATUS = FAILED)';
COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.EXPORT_TIMESTAMP IS 'Timestamp when chunk export was completed (STATUS = COMPLETED)';

View File

@@ -501,6 +501,7 @@ AS
vFormat VARCHAR2(20);
vFileBaseName VARCHAR2(1000);
vMaxFileSize NUMBER;
vJobClass VARCHAR2(128);
vParameters VARCHAR2(4000);
BEGIN
-- Retrieve chunk context from global temporary table
@@ -518,7 +519,8 @@ AS
CREDENTIAL_NAME,
FORMAT_TYPE,
FILE_BASE_NAME,
MAX_FILE_SIZE
MAX_FILE_SIZE,
JOB_CLASS
INTO
vYear,
vMonth,
@@ -533,7 +535,8 @@ AS
vCredentialName,
vFormat,
vFileBaseName,
vMaxFileSize
vMaxFileSize,
vJobClass
FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS
WHERE CHUNK_ID = pStartId;
@@ -953,6 +956,7 @@ AS
pMaxDate IN DATE default SYSDATE,
pParallelDegree IN NUMBER default 1,
pTemplateTableName IN VARCHAR2 default NULL,
pJobClass IN VARCHAR2 default NULL,
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
)
IS
@@ -976,6 +980,7 @@ AS
,'pMaxDate => '''||nvl(TO_CHAR(pMaxDate, 'YYYY-MM-DD HH24:MI:SS'), 'NULL')||''''
,'pParallelDegree => '''||nvl(TO_CHAR(pParallelDegree), 'NULL')||''''
,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||''''
,'pJobClass => '''||nvl(pJobClass, 'NULL')||''''
,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||''''
));
ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters);
@@ -1047,22 +1052,7 @@ AS
vChunkId NUMBER;
BEGIN
ENV_MANAGER.LOG_PROCESS_EVENT('Using parallel processing with ' || pParallelDegree || ' threads', 'INFO', vParameters);
-- Clean up old completed chunks (>24 hours) to prevent table bloat
-- CRITICAL: Do NOT delete chunks from other active sessions (same-day tasks)
-- This prevents race conditions when multiple exports run simultaneously
DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS
WHERE STATUS = 'COMPLETED'
AND CREATED_DATE < SYSTIMESTAMP - INTERVAL '1' DAY;
COMMIT;
ENV_MANAGER.LOG_PROCESS_EVENT('Cleared old COMPLETED chunks (>24h). Active session chunks preserved.', 'DEBUG', vParameters);
-- This prevents re-exporting successfully completed partitions
DELETE FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'COMPLETED';
COMMIT;
ENV_MANAGER.LOG_PROCESS_EVENT('Cleared COMPLETED chunks. FAILED chunks retained for retry.', 'DEBUG', vParameters);
-- Populate chunks table (insert new chunks, preserve FAILED chunks for retry)
FOR i IN 1 .. vPartitions.COUNT LOOP
MERGE INTO CT_MRDS.A_PARALLEL_EXPORT_CHUNKS t
@@ -1071,10 +1061,10 @@ AS
WHEN NOT MATCHED THEN
INSERT (CHUNK_ID, TASK_NAME, YEAR_VALUE, MONTH_VALUE, SCHEMA_NAME, TABLE_NAME, KEY_COLUMN_NAME,
BUCKET_URI, FOLDER_NAME, PROCESSED_COLUMNS, MIN_DATE, MAX_DATE,
CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE, STATUS)
CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE, JOB_CLASS, STATUS)
VALUES (i, vTaskName, vPartitions(i).year, vPartitions(i).month, vSchemaName, vTableName, vKeyColumnName,
vBucketUri, pFolderName, vProcessedColumnList, pMinDate, pMaxDate,
pCredentialName, 'PARQUET', NULL, pTemplateTableName, 104857600, 'PENDING')
pCredentialName, 'PARQUET', NULL, pTemplateTableName, 104857600, pJobClass, 'PENDING')
WHEN MATCHED THEN
UPDATE SET TASK_NAME = vTaskName,
STATUS = CASE WHEN t.STATUS = 'FAILED' THEN 'PENDING' ELSE t.STATUS END,
@@ -1106,14 +1096,24 @@ AS
);
-- Execute task in parallel
ENV_MANAGER.LOG_PROCESS_EVENT('Executing parallel task: ' || vTaskName, 'DEBUG', vParameters);
ENV_MANAGER.LOG_PROCESS_EVENT('Executing parallel task: ' || vTaskName || CASE WHEN pJobClass IS NOT NULL THEN ' with job class: ' || pJobClass ELSE '' END, 'DEBUG', vParameters);
DBMS_PARALLEL_EXECUTE.RUN_TASK(
task_name => vTaskName,
sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;',
language_flag => DBMS_SQL.NATIVE,
parallel_level => pParallelDegree
);
IF pJobClass IS NOT NULL THEN
DBMS_PARALLEL_EXECUTE.RUN_TASK(
task_name => vTaskName,
sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;',
language_flag => DBMS_SQL.NATIVE,
parallel_level => pParallelDegree,
job_class => pJobClass
);
ELSE
DBMS_PARALLEL_EXECUTE.RUN_TASK(
task_name => vTaskName,
sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;',
language_flag => DBMS_SQL.NATIVE,
parallel_level => pParallelDegree
);
END IF;
-- Check for errors
DECLARE
@@ -1218,6 +1218,7 @@ AS
pMaxFileSize IN NUMBER default 104857600,
pRegisterExport IN BOOLEAN default FALSE,
pProcessName IN VARCHAR2 default 'DATA_EXPORTER',
pJobClass IN VARCHAR2 default NULL,
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
)
IS
@@ -1255,6 +1256,7 @@ AS
,'pTemplateTableName => '''||nvl(pTemplateTableName, 'NULL')||''''
,'pMaxFileSize => '''||nvl(TO_CHAR(pMaxFileSize), 'NULL')||''''
,'pRegisterExport => '''||CASE WHEN pRegisterExport THEN 'TRUE' ELSE 'FALSE' END||''''
,'pJobClass => '''||nvl(pJobClass, 'NULL')||''''
,'pCredentialName => '''||nvl(pCredentialName, 'NULL')||''''
));
ENV_MANAGER.LOG_PROCESS_EVENT('Start','INFO', vParameters);
@@ -1363,10 +1365,10 @@ AS
WHEN NOT MATCHED THEN
INSERT (CHUNK_ID, TASK_NAME, YEAR_VALUE, MONTH_VALUE, SCHEMA_NAME, TABLE_NAME, KEY_COLUMN_NAME,
BUCKET_URI, FOLDER_NAME, PROCESSED_COLUMNS, MIN_DATE, MAX_DATE,
CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE, STATUS)
CREDENTIAL_NAME, FORMAT_TYPE, FILE_BASE_NAME, TEMPLATE_TABLE_NAME, MAX_FILE_SIZE, JOB_CLASS, STATUS)
VALUES (i, vTaskName, vPartitions(i).year, vPartitions(i).month, vSchemaName, vTableName, vKeyColumnName,
vBucketUri, pFolderName, vProcessedColumnList, pMinDate, pMaxDate,
pCredentialName, 'CSV', vFileBaseName, pTemplateTableName, pMaxFileSize, 'PENDING')
pCredentialName, 'CSV', vFileBaseName, pTemplateTableName, pMaxFileSize, pJobClass, 'PENDING')
WHEN MATCHED THEN
UPDATE SET TASK_NAME = vTaskName,
STATUS = CASE WHEN t.STATUS = 'FAILED' THEN 'PENDING' ELSE t.STATUS END,
@@ -1398,14 +1400,24 @@ AS
);
-- Execute task in parallel
ENV_MANAGER.LOG_PROCESS_EVENT('Executing parallel CSV export task: ' || vTaskName, 'DEBUG', vParameters);
ENV_MANAGER.LOG_PROCESS_EVENT('Executing parallel CSV export task: ' || vTaskName || CASE WHEN pJobClass IS NOT NULL THEN ' with job class: ' || pJobClass ELSE '' END, 'DEBUG', vParameters);
DBMS_PARALLEL_EXECUTE.RUN_TASK(
task_name => vTaskName,
sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;',
language_flag => DBMS_SQL.NATIVE,
parallel_level => pParallelDegree
);
IF pJobClass IS NOT NULL THEN
DBMS_PARALLEL_EXECUTE.RUN_TASK(
task_name => vTaskName,
sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;',
language_flag => DBMS_SQL.NATIVE,
parallel_level => pParallelDegree,
job_class => pJobClass
);
ELSE
DBMS_PARALLEL_EXECUTE.RUN_TASK(
task_name => vTaskName,
sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;',
language_flag => DBMS_SQL.NATIVE,
parallel_level => pParallelDegree
);
END IF;
-- Check for errors
DECLARE

View File

@@ -9,17 +9,17 @@ AS
**/
-- Package Version Information
PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.10.0';
PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2026-02-13 16:30:00';
PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.11.0';
PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2026-02-18 10:00:00';
PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski';
-- Version History (last 3-5 changes)
VERSION_HISTORY CONSTANT VARCHAR2(4000) :=
'v2.11.0 (2026-02-18): Added pJobClass parameter to EXPORT_TABLE_DATA_BY_DATE and EXPORT_TABLE_DATA_TO_CSV_BY_DATE for Oracle Scheduler job class support (resource/priority management).' || CHR(10) ||
'v2.10.1 (2026-02-17): CRITICAL FIX - Remove redundant COMPLETED chunks deletion before parallel export that caused ORA-01403 errors (phantom chunks created by CREATE_CHUNKS_BY_NUMBER_COL).' || CHR(10) ||
'v2.10.0 (2026-02-13): CRITICAL FIX - Register ALL files created by DBMS_CLOUD.EXPORT_DATA (multi-file support due to Oracle parallel processing on large instances). Prevents orphaned files in rollback.' || CHR(10) ||
'v2.9.0 (2026-02-13): Added pProcessName parameter to EXPORT_TABLE_DATA and EXPORT_TABLE_DATA_TO_CSV_BY_DATE procedures for process tracking in A_SOURCE_FILE_RECEIVED table.' || CHR(10) ||
'v2.8.1 (2026-02-12): FIX query in EXPORT_TABLE_DATA - removed A_LOAD_HISTORY join to ensure single file output (simple SELECT).' || CHR(10) ||
'v2.8.0 (2026-02-12): MAJOR REFACTOR - EXPORT_TABLE_DATA now exports to single CSV file instead of partitioning by key values. Added pFileName parameter.' || CHR(10) ||
'v2.7.5 (2026-02-11): Added pRegisterExport parameter to EXPORT_TABLE_DATA procedure. When TRUE, registers each exported CSV file in A_SOURCE_FILE_RECEIVED.' || CHR(10);
'v2.8.1 (2026-02-12): FIX query in EXPORT_TABLE_DATA - removed A_LOAD_HISTORY join to ensure single file output (simple SELECT).' || CHR(10);
cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10);
vgMsgTmp VARCHAR2(32000);
@@ -147,6 +147,7 @@ AS
pMaxDate IN DATE default SYSDATE,
pParallelDegree IN NUMBER default 1,
pTemplateTableName IN VARCHAR2 default NULL,
pJobClass IN VARCHAR2 default NULL,
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
);
@@ -209,6 +210,7 @@ AS
pMaxFileSize IN NUMBER default 104857600,
pRegisterExport IN BOOLEAN default FALSE,
pProcessName IN VARCHAR2 default 'DATA_EXPORTER',
pJobClass IN VARCHAR2 default NULL,
pCredentialName IN VARCHAR2 default ENV_MANAGER.gvCredentialName
);

View File

@@ -103,12 +103,13 @@ BEGIN
pBucketArea => 'DATA',
pFolderName => 'ODS/CSDB/CSDB_DEBT',
pMinDate => &cutoff_date,
pMaxDate => SYSDATE,
pMaxDate => DATE '9999-12-31', -- Include future dates (MAX_LOAD_START can be beyond SYSDATE)
pParallelDegree => 16,
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT',
pMaxFileSize => 104857600, -- 100MB in bytes (safe for parallel execution, avoids ORA-04036)
pRegisterExport => TRUE, -- Register exported files in A_SOURCE_FILE_RECEIVED with metadata (CHECKSUM, CREATED, BYTES)
pProcessName => 'MARS-835' -- Process identifier for tracking
pProcessName => 'MARS-835', -- Process identifier for tracking
pJobClass => 'high' -- Oracle Scheduler job class for resource management
);
DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_DEBT exported to DATA bucket with template column order');
@@ -129,7 +130,8 @@ BEGIN
pFolderName => 'ARCHIVE/CSDB/CSDB_DEBT',
pMaxDate => &cutoff_date,
pParallelDegree => 16,
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT'
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT',
pJobClass => 'high' -- Oracle Scheduler job class for resource management
);
DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_DEBT exported to HIST bucket with template column order');
@@ -224,12 +226,13 @@ BEGIN
pBucketArea => 'DATA',
pFolderName => 'ODS/CSDB/CSDB_DEBT_DAILY',
pMinDate => &cutoff_date,
pMaxDate => SYSDATE,
pMaxDate => DATE '9999-12-31', -- Include future dates (MAX_LOAD_START can be beyond SYSDATE)
pParallelDegree => 16,
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT_DAILY',
pMaxFileSize => 104857600, -- 100MB in bytes (safe for parallel execution, avoids ORA-04036)
pRegisterExport => TRUE, -- Register exported files in A_SOURCE_FILE_RECEIVED with metadata (CHECKSUM, CREATED, BYTES)
pProcessName => 'MARS-835' -- Process identifier for tracking
pProcessName => 'MARS-835', -- Process identifier for tracking
pJobClass => 'high' -- Oracle Scheduler job class for resource management
);
DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_DEBT_DAILY exported to DATA bucket with template column order');
@@ -250,7 +253,8 @@ BEGIN
pFolderName => 'ARCHIVE/CSDB/CSDB_DEBT_DAILY',
pMaxDate => &cutoff_date,
pParallelDegree => 16,
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT_DAILY'
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT_DAILY',
pJobClass => 'high' -- Oracle Scheduler job class for resource management
);
DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_DEBT_DAILY exported to HIST bucket with template column order');

View File

@@ -33,9 +33,11 @@ BEGIN
pKeyColumnName => 'A_ETL_LOAD_SET_FK',
pBucketArea => 'ARCHIVE',
pFolderName => 'ARCHIVE/CSDB/CSDB_INSTR_RAT_FULL',
pMaxDate => SYSDATE,
pMinDate => DATE '1900-01-01', -- Explicit start date for clarity
pMaxDate => DATE '9999-12-31', -- Include future dates (MAX_LOAD_START can be beyond SYSDATE)
pParallelDegree => 8,
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_INSTR_RAT_FULL'
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_INSTR_RAT_FULL',
pJobClass => 'high' -- Oracle Scheduler job class for resource management
);
DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_INSTR_RAT_FULL exported to HIST bucket with template column order');
@@ -60,9 +62,11 @@ BEGIN
pKeyColumnName => 'A_ETL_LOAD_SET_FK',
pBucketArea => 'ARCHIVE',
pFolderName => 'ARCHIVE/CSDB/CSDB_INSTR_DESC_FULL',
pMaxDate => SYSDATE,
pMinDate => DATE '1900-01-01', -- Explicit start date for clarity
pMaxDate => DATE '9999-12-31', -- Include future dates (MAX_LOAD_START can be beyond SYSDATE)
pParallelDegree => 8,
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_INSTR_DESC_FULL'
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_INSTR_DESC_FULL',
pJobClass => 'high' -- Oracle Scheduler job class for resource management
);
DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_INSTR_DESC_FULL exported to HIST bucket with template column order');
@@ -87,9 +91,11 @@ BEGIN
pKeyColumnName => 'A_ETL_LOAD_SET_FK',
pBucketArea => 'ARCHIVE',
pFolderName => 'ARCHIVE/CSDB/CSDB_ISSUER_RAT_FULL',
pMaxDate => SYSDATE,
pMinDate => DATE '1900-01-01', -- Explicit start date for clarity
pMaxDate => DATE '9999-12-31', -- Include future dates (MAX_LOAD_START can be beyond SYSDATE)
pParallelDegree => 8,
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_ISSUER_RAT_FULL'
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_ISSUER_RAT_FULL',
pJobClass => 'high' -- Oracle Scheduler job class for resource management
);
DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_ISSUER_RAT_FULL exported to HIST bucket with template column order');
@@ -114,9 +120,11 @@ BEGIN
pKeyColumnName => 'A_ETL_LOAD_SET_FK',
pBucketArea => 'ARCHIVE',
pFolderName => 'ARCHIVE/CSDB/CSDB_ISSUER_DESC_FULL',
pMaxDate => SYSDATE,
pMinDate => DATE '1900-01-01', -- Explicit start date for clarity
pMaxDate => DATE '9999-12-31', -- Include future dates (MAX_LOAD_START can be beyond SYSDATE)
pParallelDegree => 8,
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_ISSUER_DESC_FULL'
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_ISSUER_DESC_FULL',
pJobClass => 'high' -- Oracle Scheduler job class for resource management
);
DBMS_OUTPUT.PUT_LINE('SUCCESS: LEGACY_ISSUER_DESC_FULL exported to HIST bucket with template column order');

View File

@@ -1,165 +0,0 @@
# MARS-835: One-Time CSDB Data Export from Operational Database to External Tables
## Overview
This package performs a one-time bulk export of CSDB data from operational database tables (OU_CSDB schema) to new external tables in OCI buckets. The export uses DATA_EXPORTER v2.4.0 with per-column date format handling to move historical data to either DATA bucket (CSV format) or HIST bucket (Parquet format with Hive-style partitioning).
**Migration Strategy:**
- **Split Export (2 tables)**: DEBT, DEBT_DAILY - Last 6 months → DATA (CSV), Older data → HIST (Parquet)
- **HIST Only (4 tables)**: INSTR_RAT_FULL, INSTR_DESC_FULL, ISSUER_RAT_FULL, ISSUER_DESC_FULL - All data → HIST (Parquet)
**Key Transformations:**
- Column rename: `A_ETL_LOAD_SET_FK``A_WORKFLOW_HISTORY_KEY` (all tables)
- Column removal: DEBT (2 columns), DEBT_DAILY (6 columns) not required in new structure
## Contents
- `install_mars835.sql` - Master installation script with SPOOL logging
- `rollback_mars835.sql` - Master rollback script
- `01_MARS_835_*.sql` - Individual installation scripts
- `91_MARS_835_*.sql` - Individual rollback scripts
- `track_package_versions.sql` - Package version tracking
- `verify_packages_version.sql` - Package verification
## Prerequisites
- Oracle Database 23ai
- ADMIN user access (required for all MARS installations)
- ENV_MANAGER v3.1.0+
- Required schema privileges
## Installation
### Option 1: Master Script (Recommended)
```powershell
# IMPORTANT: Execute as ADMIN user for proper privilege management
Get-Content "MARS_Packages/REL01_POST_DEACTIVATION/MARS-835/install_mars835.sql" | sql "ADMIN/Cloudpass#34@ggmichalski_high"
# Log file created: log/INSTALL_MARS_835_<PDB>_<timestamp>.log
```
### Option 2: Individual Scripts
```powershell
# IMPORTANT: Execute as ADMIN user
Get-Content "01_MARS_835_*.sql" | sql "ADMIN/Cloudpass#34@ggmichalski_high"
Get-Content "02_MARS_835_*.sql" | sql "ADMIN/Cloudpass#34@ggmichalski_high"
# ... etc
```
## Verification
```sql
-- Verify package versions
SELECT PACKAGE_NAME.GET_VERSION() FROM DUAL;
-- Check for errors (ADMIN user checks specific schema)
SELECT * FROM ALL_ERRORS
WHERE OWNER = 'CT_MRDS' -- Replace with target schema
AND NAME = 'PACKAGE_NAME';
-- Verify no untracked changes
SELECT ENV_MANAGER.CHECK_PACKAGE_CHANGES('CT_MRDS', 'PACKAGE_NAME') FROM DUAL;
```
## Rollback
```powershell
# IMPORTANT: Execute as ADMIN user
Get-Content "MARS_Packages/REL01_POST_DEACTIVATION/MARS-835/rollback_mars835.sql" | sql "ADMIN/Cloudpass#34@ggmichalski_high"
**NOTE**: Rollback for data exports is **NOT RECOMMENDED** as it would delete exported files from OCI buckets. Only use rollback if export failed and needs to be restarted.
```
## Expected Changes
### Data Export Summary
**6 CSDB tables exported from OU_CSDB schema:**
**Group 1: Split DATA + HIST (Time Critical)**
1. **DEBT** - Last 6 months → DATA, Older → HIST
2. **DEBT_DAILY** - Last 6 months → DATA, Older → HIST
**Group 2: HIST Only (Weekend Bulk)**
3. **INSTR_RAT_FULL** - All data → HIST
4. **INSTR_DESC_FULL** - All data → HIST
5. **ISSUER_RAT_FULL** - All data → HIST
6. **ISSUER_DESC_FULL** - All data → HIST
### Bucket Destinations (DEV environment)
- **DATA Bucket**: `mrds_data_dev/ODS/CSDB/` (CSV format)
- **HIST Bucket**: `mrds_hist_dev/ARCHIVE/CSDB/` (Parquet with partitioning)
### Column Mappings
- **All tables**: `A_ETL_LOAD_SET_FK` renamed to `A_WORKFLOW_HISTORY_KEY`
- **DEBT**: Removed columns: `IDIRDEPOSITORY`, `VA_BONDDURATION`
- **DEBT_DAILY**: Removed columns: `STEPID`, `PROGRAMNAME`, `PROGRAMCEILING`, `PROGRAMSTATUS`, `ISSUERNACE21SECTOR`, `INSTRUMENTQUOTATIONBASIS`
## Testing
### Post-Export Verification
1. **Verify CSV files in DATA bucket** (DEBT, DEBT_DAILY - last 6 months):
```sql
-- Check exported files
SELECT object_name, bytes
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => 'DEF_CRED_ARN',
location_uri => 'https://objectstorage.region.oraclecloud.com/n/namespace/b/mrds_data_dev/o/ODS/CSDB/'
)) WHERE object_name LIKE '%CSDB_DEBT%';
```
2. **Verify Parquet files in HIST bucket** (all 6 tables):
```sql
-- Check archived files with Hive partitioning
SELECT object_name, bytes
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => 'DEF_CRED_ARN',
location_uri => 'https://objectstorage.region.oraclecloud.com/n/namespace/b/mrds_hist_dev/o/ARCHIVE/CSDB/'
)) WHERE object_name LIKE '%PARTITION_YEAR=%';
```
3. **Validate row counts match source tables**:
```sql
-- Compare counts between source and exported data
SELECT COUNT(*) FROM OU_CSDB.DEBT;
SELECT COUNT(*) FROM ODS.CSDB_DEBT_ODS; -- External table pointing to DATA
SELECT COUNT(*) FROM ODS.CSDB_DEBT_ARCHIVE; -- External table pointing to HIST
```
4. **Verify column mappings**:
```sql
-- Check A_WORKFLOW_HISTORY_KEY exists in exported data
SELECT A_WORKFLOW_HISTORY_KEY, COUNT(*)
FROM ODS.CSDB_DEBT_ARCHIVE
GROUP BY A_WORKFLOW_HISTORY_KEY;
```
## Known Issues
### Timing Constraints
- **DATA exports (DEBT, DEBT_DAILY)**: Must execute during parallel old+new loads phase after Production deployment
- **HIST exports (all 6 tables)**: Can run anytime, recommended for weekend bulk execution to avoid interference
### Environment-Specific Configuration
- Bucket names must be adjusted for each environment:
- DEV: `mrds_data_dev`, `mrds_hist_dev`
- TEST: `mrds_data_test`, `mrds_hist_test`
- PROD: `mrds_data_prod`, `mrds_hist_prod`
### Data Cutoff Date
- Export scripts use 6-month cutoff date calculated as `ADD_MONTHS(SYSDATE, -6)`
- Verify cutoff aligns with business requirements before execution
### One-Time Execution
- This is a **ONE-TIME data migration** package
- After successful execution, package should be **deactivated** (moved to REL01_POST_DEACTIVATION)
- Do not re-run unless explicitly required for data refresh
## Related
- **JIRA**: MARS-835 - CSDB Data Export to External Tables
- **Confluence**: FILE_MANAGER package - MRDS - Technical Team
- **Confluence**: Table Setup Guide for FILE PROCESSOR System
- **Source Schema**: OU_CSDB (Operational Database)
- **Target Schema**: ODS (External Tables)
- **Migration Type**: One-time bulk export (deactivated post-execution)
---
**Author:** Grzegorz Michalski
**Date:** 2025-12-04
**Version:** 1.0.0

View File

@@ -1,207 +0,0 @@
# MARS-835: Required External Tables for Smart Column Mapping
## Overview
This document lists all external tables required for MARS-835 data exports using DATA_EXPORTER v2.4.0 with Smart Column Mapping feature.
**Purpose**: Smart Column Mapping ensures CSV files are generated with columns in the EXACT order expected by external tables, preventing NULL values due to Oracle's positional CSV mapping.
---
## Required External Tables
### Group 1: DATA Bucket (CSV Format) - **CRITICAL**
#### 1. ODS.CSDB_DEBT_DATA_ODS
- **Source Table**: OU_CSDB.LEGACY_DEBT
- **Format**: CSV
- **Bucket**: DATA (mrds_data_dev/ODS/CSDB/CSDB_DEBT/)
- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY (position 2 recommended)
- **Critical**: Must use Smart Column Mapping to avoid NULL values in A_WORKFLOW_HISTORY_KEY
#### 2. ODS.CSDB_DEBT_DAILY_DATA_ODS
- **Source Table**: OU_CSDB.LEGACY_DEBT_DAILY
- **Format**: CSV
- **Bucket**: DATA (mrds_data_dev/ODS/CSDB/CSDB_DEBT_DAILY/)
- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY (position 2 recommended)
- **Critical**: Must use Smart Column Mapping to avoid NULL values in A_WORKFLOW_HISTORY_KEY
---
### Group 2: ARCHIVE Bucket (Parquet Format) - **RECOMMENDED**
#### 3. ODS.CSDB_DEBT_ARCHIVE
- **Source Table**: OU_CSDB.LEGACY_DEBT
- **Format**: Parquet with Hive partitioning
- **Bucket**: ARCHIVE (mrds_hist_dev/ARCHIVE/CSDB/CSDB_DEBT/)
- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY
- **Note**: Parquet uses schema-based mapping (column order less critical but Smart Column Mapping ensures consistency)
#### 4. ODS.CSDB_DEBT_DAILY_ARCHIVE
- **Source Table**: OU_CSDB.LEGACY_DEBT_DAILY
- **Format**: Parquet with Hive partitioning
- **Bucket**: ARCHIVE (mrds_hist_dev/ARCHIVE/CSDB/CSDB_DEBT_DAILY/)
- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY
#### 5. ODS.CSDB_INSTR_RAT_FULL_ARCHIVE
- **Source Table**: OU_CSDB.LEGACY_INSTR_RAT_FULL
- **Format**: Parquet with Hive partitioning
- **Bucket**: ARCHIVE (mrds_hist_dev/ARCHIVE/CSDB/CSDB_INSTR_RAT_FULL/)
- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY
#### 6. ODS.CSDB_INSTR_DESC_FULL_ARCHIVE
- **Source Table**: OU_CSDB.LEGACY_INSTR_DESC_FULL
- **Format**: Parquet with Hive partitioning
- **Bucket**: ARCHIVE (mrds_hist_dev/ARCHIVE/CSDB/CSDB_INSTR_DESC_FULL/)
- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY
#### 7. ODS.CSDB_ISSUER_RAT_FULL_ARCHIVE
- **Source Table**: OU_CSDB.LEGACY_ISSUER_RAT_FULL
- **Format**: Parquet with Hive partitioning
- **Bucket**: ARCHIVE (mrds_hist_dev/ARCHIVE/CSDB/CSDB_ISSUER_RAT_FULL/)
- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY
#### 8. ODS.CSDB_ISSUER_DESC_FULL_ARCHIVE
- **Source Table**: OU_CSDB.LEGACY_ISSUER_DESC_FULL
- **Format**: Parquet with Hive partitioning
- **Bucket**: ARCHIVE (mrds_hist_dev/ARCHIVE/CSDB/CSDB_ISSUER_DESC_FULL/)
- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY
---
## External Table Column Order Requirements
### **CRITICAL for CSV Tables** (DATA bucket):
All CSV external tables MUST have **A_WORKFLOW_HISTORY_KEY at position 2**:
```
Position 1: A_KEY (NUMBER)
Position 2: A_WORKFLOW_HISTORY_KEY (NUMBER) ← MUST BE HERE!
Position 3+: Other columns in any order
```
**Reason**: Oracle External Tables with CSV format use **positional mapping** (ignore header row). If source table has A_ETL_LOAD_SET_FK at position 72, but CSV puts it at position 72 while external table expects A_WORKFLOW_HISTORY_KEY at position 2, the external table will try to read position 2 (which might be a DATE column) as NUMBER → conversion fails → NULL value.
**Solution**: Smart Column Mapping (v2.4.0) generates CSV columns in EXTERNAL TABLE order, ensuring position 2 has the correct NUMBER value.
### **OPTIONAL for Parquet Tables** (ARCHIVE bucket):
Parquet format uses **schema-based mapping** (column names). Column order doesn't matter, but Smart Column Mapping provides consistency.
---
## Creation Script Example
### CSV External Table (CRITICAL - Correct Column Order)
```sql
-- Example: ODS.CSDB_DEBT_DATA_ODS
-- IMPORTANT: A_WORKFLOW_HISTORY_KEY must be at position 2!
BEGIN
ODS.FILE_MANAGER_ODS.CREATE_EXTERNAL_TABLE(
pTableName => 'CSDB_DEBT_DATA_ODS',
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT_TEMPLATE',
pPrefix => 'ODS/CSDB/CSDB_DEBT',
pBucketUri => CT_MRDS.ENV_MANAGER.gvDataBucketUri,
pFormat => 'CSV' -- Uses positional mapping!
);
END;
/
-- Verify column order (A_WORKFLOW_HISTORY_KEY should be position 2)
SELECT column_id, column_name, data_type
FROM all_tab_columns
WHERE table_name = 'CSDB_DEBT_DATA_ODS'
AND owner = 'ODS'
ORDER BY column_id;
```
### Parquet External Table (Optional Column Order)
```sql
-- Example: ODS.CSDB_DEBT_ARCHIVE
-- Column order flexible (schema-based mapping)
BEGIN
ODS.FILE_MANAGER_ODS.CREATE_EXTERNAL_TABLE(
pTableName => 'CSDB_DEBT_ARCHIVE',
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT_TEMPLATE',
pPrefix => 'ARCHIVE/CSDB/CSDB_DEBT',
pBucketUri => CT_MRDS.ENV_MANAGER.gvArchiveBucketUri,
pFormat => 'PARQUET' -- Uses schema-based mapping
);
END;
/
```
---
## Template Tables Required
All external tables require corresponding template tables in CT_ET_TEMPLATES schema:
- `CT_ET_TEMPLATES.CSDB_DEBT_TEMPLATE`
- `CT_ET_TEMPLATES.CSDB_DEBT_DAILY_TEMPLATE`
- `CT_ET_TEMPLATES.CSDB_INSTR_RAT_FULL_TEMPLATE`
- `CT_ET_TEMPLATES.CSDB_INSTR_DESC_FULL_TEMPLATE`
- `CT_ET_TEMPLATES.CSDB_ISSUER_RAT_FULL_TEMPLATE`
- `CT_ET_TEMPLATES.CSDB_ISSUER_DESC_FULL_TEMPLATE`
**Note**: Template tables must be created by ADMIN or CT_ET_TEMPLATES user (MRDS_LOADER cannot create them).
---
## Verification Checklist
Before running MARS-835 exports:
- [ ] All 8 external tables exist in ODS schema
- [ ] CSV tables (DATA bucket) have A_WORKFLOW_HISTORY_KEY at position 2
- [ ] Template tables exist in CT_ET_TEMPLATES schema
- [ ] MRDS_LOADER has EXECUTE privilege on ODS.FILE_MANAGER_ODS
- [ ] ODS schema has access to CT_MRDS.ENV_MANAGER for logging
- [ ] DATA_EXPORTER v2.4.0 deployed with Smart Column Mapping feature
---
## Testing Verification
After export, verify A_WORKFLOW_HISTORY_KEY is not NULL:
```sql
-- CSV tables (should be 100% populated)
SELECT 'CSDB_DEBT_DATA_ODS' AS TABLE_NAME,
COUNT(*) AS TOTAL_ROWS,
COUNT(A_WORKFLOW_HISTORY_KEY) AS NON_NULL_COUNT,
ROUND(COUNT(A_WORKFLOW_HISTORY_KEY) * 100.0 / NULLIF(COUNT(*), 0), 2) AS SUCCESS_RATE_PCT
FROM ODS.CSDB_DEBT_DATA_ODS;
SELECT 'CSDB_DEBT_DAILY_DATA_ODS' AS TABLE_NAME,
COUNT(*) AS TOTAL_ROWS,
COUNT(A_WORKFLOW_HISTORY_KEY) AS NON_NULL_COUNT,
ROUND(COUNT(A_WORKFLOW_HISTORY_KEY) * 100.0 / NULLIF(COUNT(*), 0), 2) AS SUCCESS_RATE_PCT
FROM ODS.CSDB_DEBT_DAILY_DATA_ODS;
-- Parquet tables (should also be 100% populated)
SELECT 'CSDB_DEBT_ARCHIVE' AS TABLE_NAME,
COUNT(*) AS TOTAL_ROWS,
COUNT(A_WORKFLOW_HISTORY_KEY) AS NON_NULL_COUNT,
ROUND(COUNT(A_WORKFLOW_HISTORY_KEY) * 100.0 / NULLIF(COUNT(*), 0), 2) AS SUCCESS_RATE_PCT
FROM ODS.CSDB_DEBT_ARCHIVE;
```
**Expected Result**: SUCCESS_RATE_PCT = 100.00 for all tables
---
## Related Documentation
- [DATA_EXPORTER v2.4.0 Smart Column Mapping Examples](../MARS-835-PREHOOK/current_version/v2.3.0/DATA_EXPORTER_v2.4.0_Smart_Column_Mapping_Examples.sql)
- [Oracle External Tables Column Order Issue](../../confluence/additions/Oracle_External_Tables_Column_Order_Issue.md)
- [MARS-835 README](README.md)
---
**Last Updated**: 2026-01-09
**Author**: GitHub Copilot (MARS-835 Update)

View File

@@ -1,80 +0,0 @@
SET SERVEROUTPUT ON SIZE UNLIMITED
SET DEFINE OFF
DECLARE
vCredential VARCHAR2(100) := 'OCI$RESOURCE_PRINCIPAL';
vDataBucket VARCHAR2(200) := 'https://objectstorage.eu-frankfurt-1.oraclecloud.com/n/frtgjxu7zl7c/b/data/o/';
vArchiveBucket VARCHAR2(200) := 'https://objectstorage.eu-frankfurt-1.oraclecloud.com/n/frtgjxu7zl7c/b/history/o/';
vCount NUMBER := 0;
BEGIN
DBMS_OUTPUT.PUT_LINE('=== Checking CSV files in DATA bucket ===');
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredential,
location_uri => vDataBucket || 'ODS/CSDB/CSDB_DEBT/'
))
WHERE object_name LIKE 'LEGACY_DEBT%'
) LOOP
vCount := vCount + 1;
DBMS_OUTPUT.PUT_LINE(' [' || vCount || '] ' || rec.object_name);
END LOOP;
DBMS_OUTPUT.PUT_LINE('Total CSV files DEBT: ' || vCount);
vCount := 0;
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredential,
location_uri => vDataBucket || 'ODS/CSDB/CSDB_DEBT_DAILY/'
))
WHERE object_name LIKE 'LEGACY_DEBT_DAILY%'
) LOOP
vCount := vCount + 1;
DBMS_OUTPUT.PUT_LINE(' [' || vCount || '] ' || rec.object_name);
END LOOP;
DBMS_OUTPUT.PUT_LINE('Total CSV files DEBT_DAILY: ' || vCount);
DBMS_OUTPUT.PUT_LINE(CHR(10) || '=== Checking Parquet files in ARCHIVE bucket ===');
vCount := 0;
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredential,
location_uri => vArchiveBucket || 'ARCHIVE/CSDB/CSDB_DEBT/'
))
WHERE ROWNUM <= 5
) LOOP
vCount := vCount + 1;
DBMS_OUTPUT.PUT_LINE(' [' || vCount || '] ' || rec.object_name);
END LOOP;
DBMS_OUTPUT.PUT_LINE('Total Parquet files DEBT (first 5): ' || vCount);
vCount := 0;
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredential,
location_uri => vArchiveBucket || 'ARCHIVE/CSDB/CSDB_DEBT_DAILY/'
))
) LOOP
vCount := vCount + 1;
DBMS_OUTPUT.PUT_LINE(' [' || vCount || '] ' || rec.object_name);
END LOOP;
DBMS_OUTPUT.PUT_LINE('Total Parquet files DEBT_DAILY: ' || vCount);
DBMS_OUTPUT.PUT_LINE(CHR(10) || '=== Now testing DELETE_OBJECT ===');
DBMS_OUTPUT.PUT_LINE('Testing delete for: ODS/CSDB/CSDB_DEBT/LEGACY_DEBT_202510_1_20260213T092239041072Z.csv');
BEGIN
DBMS_CLOUD.DELETE_OBJECT(
credential_name => vCredential,
object_uri => vDataBucket || 'ODS/CSDB/CSDB_DEBT/LEGACY_DEBT_202510_1_20260213T092239041072Z.csv'
);
DBMS_OUTPUT.PUT_LINE('SUCCESS: File deleted');
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('ERROR: ' || SQLERRM);
END;
END;
/

View File

@@ -1,126 +0,0 @@
--=============================================================================================================================
-- MARS-835 Manual Cleanup - Delete remaining files after rollback
--=============================================================================================================================
SET SERVEROUTPUT ON SIZE UNLIMITED
SET DEFINE OFF
DECLARE
vDataBucketUri VARCHAR2(500);
vArchiveBucketUri VARCHAR2(500);
vCredentialName VARCHAR2(100);
vFileCount NUMBER := 0;
BEGIN
-- Get bucket URIs and credential
vDataBucketUri := CT_MRDS.FILE_MANAGER.GET_BUCKET_URI('DATA');
vArchiveBucketUri := CT_MRDS.FILE_MANAGER.GET_BUCKET_URI('ARCHIVE');
vCredentialName := CT_MRDS.ENV_MANAGER.gvCredentialName;
DBMS_OUTPUT.PUT_LINE('========================================================================');
DBMS_OUTPUT.PUT_LINE('MARS-835 Manual Cleanup');
DBMS_OUTPUT.PUT_LINE('========================================================================');
-- Delete DEBT CSV files from DATA bucket
DBMS_OUTPUT.PUT_LINE(CHR(10) || '1. Deleting DEBT CSV files from DATA bucket...');
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredentialName,
location_uri => vDataBucketUri || 'ODS/CSDB/CSDB_DEBT/'
))
WHERE object_name LIKE 'LEGACY_DEBT%'
) LOOP
BEGIN
DBMS_CLOUD.DELETE_OBJECT(
credential_name => vCredentialName,
object_uri => vDataBucketUri || 'ODS/CSDB/CSDB_DEBT/' || rec.object_name
);
DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name);
vFileCount := vFileCount + 1;
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE(' ERROR: ' || rec.object_name || ' - ' || SQLERRM);
END;
END LOOP;
DBMS_OUTPUT.PUT_LINE('Total deleted: ' || vFileCount);
-- Delete DEBT_DAILY CSV files from DATA bucket
DBMS_OUTPUT.PUT_LINE(CHR(10) || '2. Deleting DEBT_DAILY CSV files from DATA bucket...');
vFileCount := 0;
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredentialName,
location_uri => vDataBucketUri || 'ODS/CSDB/CSDB_DEBT_DAILY/'
))
WHERE object_name LIKE 'LEGACY_DEBT_DAILY%'
) LOOP
BEGIN
DBMS_CLOUD.DELETE_OBJECT(
credential_name => vCredentialName,
object_uri => vDataBucketUri || 'ODS/CSDB/CSDB_DEBT_DAILY/' || rec.object_name
);
DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name);
vFileCount := vFileCount + 1;
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE(' ERROR: ' || rec.object_name || ' - ' || SQLERRM);
END;
END LOOP;
DBMS_OUTPUT.PUT_LINE('Total deleted: ' || vFileCount);
-- Delete DEBT Parquet files from ARCHIVE bucket
DBMS_OUTPUT.PUT_LINE(CHR(10) || '3. Deleting DEBT Parquet files from ARCHIVE bucket...');
vFileCount := 0;
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredentialName,
location_uri => vArchiveBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT/'
))
WHERE object_name NOT LIKE '%/'
) LOOP
BEGIN
DBMS_CLOUD.DELETE_OBJECT(
credential_name => vCredentialName,
object_uri => vArchiveBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT/' || rec.object_name
);
DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name);
vFileCount := vFileCount + 1;
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE(' ERROR: ' || rec.object_name || ' - ' || SQLERRM);
END;
END LOOP;
DBMS_OUTPUT.PUT_LINE('Total deleted: ' || vFileCount);
-- Delete DEBT_DAILY Parquet files from ARCHIVE bucket
DBMS_OUTPUT.PUT_LINE(CHR(10) || '4. Deleting DEBT_DAILY Parquet files from ARCHIVE bucket...');
vFileCount := 0;
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredentialName,
location_uri => vArchiveBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT_DAILY/'
))
WHERE object_name NOT LIKE '%/'
) LOOP
BEGIN
DBMS_CLOUD.DELETE_OBJECT(
credential_name => vCredentialName,
object_uri => vArchiveBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT_DAILY/' || rec.object_name
);
DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name);
vFileCount := vFileCount + 1;
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE(' ERROR: ' || rec.object_name || ' - ' || SQLERRM);
END;
END LOOP;
DBMS_OUTPUT.PUT_LINE('Total deleted: ' || vFileCount);
DBMS_OUTPUT.PUT_LINE(CHR(10) || '========================================================================');
DBMS_OUTPUT.PUT_LINE('Manual cleanup completed');
DBMS_OUTPUT.PUT_LINE('========================================================================');
END;
/

View File

@@ -1,93 +0,0 @@
-- MARS-835: Manual cleanup of Parquet files only (after bugfix)
-- Description: Removes orphaned Parquet files from ARCHIVE bucket
-- Usage: Execute as CT_MRDS user
SET SERVEROUTPUT ON SIZE UNLIMITED
DECLARE
vCredentialName VARCHAR2(100) := CT_MRDS.ENV_MANAGER.gvCredentialName;
vHistBucketUri VARCHAR2(200) := CT_MRDS.FILE_MANAGER.GET_BUCKET_URI('ARCHIVE');
vFileCount NUMBER := 0;
vStartTime TIMESTAMP := SYSTIMESTAMP;
vElapsedTime INTERVAL DAY TO SECOND;
BEGIN
DBMS_OUTPUT.PUT_LINE('==========================================================');
DBMS_OUTPUT.PUT_LINE('MANUAL CLEANUP: Parquet files only');
DBMS_OUTPUT.PUT_LINE('==========================================================');
DBMS_OUTPUT.PUT_LINE('Start Time: ' || TO_CHAR(vStartTime, 'YYYY-MM-DD HH24:MI:SS'));
DBMS_OUTPUT.PUT_LINE('Credential: ' || vCredentialName);
DBMS_OUTPUT.PUT_LINE('Archive Bucket: ' || vHistBucketUri);
DBMS_OUTPUT.PUT_LINE('----------------------------------------------------------');
-- Delete CSDB_DEBT Parquet files
DBMS_OUTPUT.PUT_LINE(chr(10) || 'Deleting CSDB_DEBT Parquet files...');
vFileCount := 0;
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredentialName,
location_uri => vHistBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT/'
))
WHERE object_name NOT LIKE '%/' -- Exclude directories
) LOOP
BEGIN
DBMS_CLOUD.DELETE_OBJECT(
credential_name => vCredentialName,
object_uri => vHistBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT/' || rec.object_name
);
DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name);
vFileCount := vFileCount + 1;
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -20404 THEN
DBMS_OUTPUT.PUT_LINE(' Skipped (not found): ' || rec.object_name);
ELSE
DBMS_OUTPUT.PUT_LINE(' ERROR: ' || SQLERRM || ' - ' || rec.object_name);
END IF;
END;
END LOOP;
DBMS_OUTPUT.PUT_LINE('CSDB_DEBT Parquet files deleted: ' || vFileCount);
-- Delete CSDB_DEBT_DAILY Parquet files
DBMS_OUTPUT.PUT_LINE(chr(10) || 'Deleting CSDB_DEBT_DAILY Parquet files...');
vFileCount := 0;
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredentialName,
location_uri => vHistBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT_DAILY/'
))
WHERE object_name NOT LIKE '%/' -- Exclude directories
) LOOP
BEGIN
DBMS_CLOUD.DELETE_OBJECT(
credential_name => vCredentialName,
object_uri => vHistBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT_DAILY/' || rec.object_name
);
DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name);
vFileCount := vFileCount + 1;
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -20404 THEN
DBMS_OUTPUT.PUT_LINE(' Skipped (not found): ' || rec.object_name);
ELSE
DBMS_OUTPUT.PUT_LINE(' ERROR: ' || SQLERRM || ' - ' || rec.object_name);
END IF;
END;
END LOOP;
DBMS_OUTPUT.PUT_LINE('CSDB_DEBT_DAILY Parquet files deleted: ' || vFileCount);
-- Final summary
vElapsedTime := SYSTIMESTAMP - vStartTime;
DBMS_OUTPUT.PUT_LINE('----------------------------------------------------------');
DBMS_OUTPUT.PUT_LINE('End Time: ' || TO_CHAR(SYSTIMESTAMP, 'YYYY-MM-DD HH24:MI:SS'));
DBMS_OUTPUT.PUT_LINE('Elapsed Time: ' || vElapsedTime);
DBMS_OUTPUT.PUT_LINE('==========================================================');
DBMS_OUTPUT.PUT_LINE('MANUAL CLEANUP COMPLETED');
DBMS_OUTPUT.PUT_LINE('==========================================================');
END;
/

View File

@@ -1,92 +0,0 @@
-- ===================================================================
-- Simple Package Version Tracking Script
-- ===================================================================
-- Purpose: Track specified Oracle package versions
-- Author: Grzegorz Michalski
-- Date: 2025-12-04
-- Version: 3.1.0 - List-Based Edition
--
-- USAGE:
-- 1. Edit package list below (add/remove packages as needed)
-- 2. Include in your install/rollback script: @@track_package_versions.sql
-- ===================================================================
SET SERVEROUTPUT ON;
DECLARE
TYPE t_package_rec IS RECORD (
owner VARCHAR2(50),
name VARCHAR2(50),
version VARCHAR2(50)
);
TYPE t_packages IS TABLE OF t_package_rec;
TYPE t_string_array IS TABLE OF VARCHAR2(100);
-- ===================================================================
-- PACKAGE LIST - Edit this array to specify packages to track
-- ===================================================================
-- Add or remove entries as needed for your MARS issue
-- Format: 'SCHEMA.PACKAGE_NAME'
-- ===================================================================
vPackageList t_string_array := t_string_array(
'CT_MRDS.FILE_MANAGER',
'ODS.FILE_MANAGER_ODS'
);
-- ===================================================================
vPackages t_packages := t_packages();
vVersion VARCHAR2(50);
vCount NUMBER := 0;
vOwner VARCHAR2(50);
vPackageName VARCHAR2(50);
vDotPos NUMBER;
BEGIN
DBMS_OUTPUT.PUT_LINE('========================================');
DBMS_OUTPUT.PUT_LINE('Package Version Tracking');
DBMS_OUTPUT.PUT_LINE('========================================');
-- Process each package in the list
FOR i IN 1..vPackageList.COUNT LOOP
vDotPos := INSTR(vPackageList(i), '.');
IF vDotPos > 0 THEN
vOwner := SUBSTR(vPackageList(i), 1, vDotPos - 1);
vPackageName := SUBSTR(vPackageList(i), vDotPos + 1);
BEGIN
EXECUTE IMMEDIATE 'SELECT ' || vPackageList(i) || '.GET_VERSION() FROM DUAL'
INTO vVersion;
vPackages.EXTEND;
vPackages(vPackages.COUNT).owner := vOwner;
vPackages(vPackages.COUNT).name := vPackageName;
vPackages(vPackages.COUNT).version := vVersion;
CT_MRDS.ENV_MANAGER.TRACK_PACKAGE_VERSION(
pPackageOwner => vOwner,
pPackageName => vPackageName,
pPackageVersion => vVersion,
pPackageBuildDate => TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS'),
pPackageAuthor => 'Grzegorz Michalski'
);
vCount := vCount + 1;
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Error tracking ' || vPackageList(i) || ': ' || SQLERRM);
END;
END IF;
END LOOP;
-- Display results
IF vPackages.COUNT > 0 THEN
DBMS_OUTPUT.PUT_LINE('Packages tracked: ' || vCount || ' of ' || vPackages.COUNT);
FOR i IN 1..vPackages.COUNT LOOP
DBMS_OUTPUT.PUT_LINE(' ' || vPackages(i).owner || '.' || vPackages(i).name ||
' (v' || vPackages(i).version || ')');
END LOOP;
ELSE
DBMS_OUTPUT.PUT_LINE('No packages found in list');
END IF;
DBMS_OUTPUT.PUT_LINE('========================================');
END;
/

View File

@@ -1,62 +0,0 @@
-- ===================================================================
-- Universal Package Version Verification Script
-- ===================================================================
-- Purpose: Verify all tracked Oracle packages for code changes
-- Author: Grzegorz Michalski
-- Date: 2025-12-04
-- Version: 1.0.0
--
-- USAGE:
-- Include at the end of install/rollback scripts: @@verify_packages_version.sql
--
-- OUTPUT:
-- - List of all tracked packages with their current status
-- - OK: Package has not changed since last tracking
-- - WARNING: Package code changed without version update
-- ===================================================================
SET LINESIZE 200
SET PAGESIZE 1000
SET FEEDBACK OFF
PROMPT
PROMPT ========================================
PROMPT Package Version Verification
PROMPT ========================================
PROMPT
COLUMN PACKAGE_OWNER FORMAT A15
COLUMN PACKAGE_NAME FORMAT A20
COLUMN VERSION FORMAT A10
COLUMN STATUS FORMAT A80
SELECT
PACKAGE_OWNER,
PACKAGE_NAME,
PACKAGE_VERSION AS VERSION,
CT_MRDS.ENV_MANAGER.CHECK_PACKAGE_CHANGES(PACKAGE_OWNER, PACKAGE_NAME) AS STATUS
FROM (
SELECT
PACKAGE_OWNER,
PACKAGE_NAME,
PACKAGE_VERSION,
ROW_NUMBER() OVER (PARTITION BY PACKAGE_OWNER, PACKAGE_NAME ORDER BY TRACKING_DATE DESC) AS RN
FROM CT_MRDS.A_PACKAGE_VERSION_TRACKING
)
WHERE RN = 1
ORDER BY PACKAGE_OWNER, PACKAGE_NAME;
PROMPT
PROMPT ========================================
PROMPT Verification Complete
PROMPT ========================================
PROMPT
PROMPT Legend:
PROMPT OK - Package has not changed since last tracking
PROMPT WARNING - Package code changed without version update
PROMPT
PROMPT For detailed hash information, use:
PROMPT SELECT ENV_MANAGER.GET_PACKAGE_HASH_INFO('OWNER', 'PACKAGE') FROM DUAL;
PROMPT ========================================
SET FEEDBACK ON

View File

@@ -436,20 +436,6 @@ PROMPT =========================================================================
INSERT INTO CT_MRDS.A_PROCESS_LOG (PROCESS_NAME, PROCEDURE_NAME, LOG_LEVEL, LOG_MESSAGE)
VALUES ('MARS-956', 'EXPORT_C2D_MPEC_DATA', 'INFO', 'All C2D MPEC historical exports completed successfully');
-- Display recent export activity
PROMPT Recent Export Activity (last 30 minutes):
SELECT TO_CHAR(LOG_TIMESTAMP, 'YYYY-MM-DD HH24:MI:SS') AS EXPORT_TIME,
PROCESS_NAME,
PROCEDURE_NAME,
LOG_LEVEL,
LOG_MESSAGE
FROM CT_MRDS.A_PROCESS_LOG
WHERE PROCESS_NAME = 'MARS-956'
OR PROCEDURE_NAME LIKE '%DATA_EXPORTER%'
AND LOG_TIMESTAMP >= SYSTIMESTAMP - INTERVAL '30' MINUTE
ORDER BY LOG_TIMESTAMP DESC
FETCH FIRST 20 ROWS ONLY;
PROMPT
PROMPT =====================================================================================
PROMPT MARS-956 C2D MPEC Export Completed Successfully!

View File

@@ -42,38 +42,38 @@ sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars1056.sql"
New-Item -ItemType Directory -Force -Path "log" | Out-Null; Move-Item -Path "*.log" -Destination "log" -Force
cd .\MARS_Packages\REL02\MARS-1046
sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars1046.sql"
sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars1046.sql"
echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars1046.sql"
echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars1046.sql"
7z a -pMojeSuperHaslo#123 -mhe=on M1046_arch.7z MARS-1046/
cd .\MARS_Packages\REL01_ADDITIONS\MARS-826-PREHOOK
sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars826_prehook.sql"
sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars826_prehook.sql"
echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars826_prehook.sql"
echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars826_prehook.sql"
7z a -pMojeSuperHaslo#123 -mhe=on M826PH_arch.7z MARS-826-PREHOOK
cd .\MARS_Packages\REL01_ADDITIONS\MARS-826
sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars826.sql"
sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars826.sql"
echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars826.sql"
echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars826.sql"
7z a -pMojeSuperHaslo#123 -mhe=on M826_arch.7z MARS-826\
cd .\MARS_Packages\REL01_ADDITIONS\MARS-835
sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars835.sql"
sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars835.sql"
echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars835.sql"
echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars835.sql"
7z a -pMojeSuperHaslo#123 -mhe=on M835_arch.7z MARS-835
cd .\MARS_Packages\REL01_ADDITIONS\MARS-835-PREHOOK
sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars835_prehook.sql"
sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars835_prehook.sql"
echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars835_prehook.sql"
echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars835_prehook.sql"
7z a -pMojeSuperHaslo#123 -mhe=on M835PH_arch.7z MARS-835-PREHOOK
cd .\MARS_Packages\REL03\MARS-1057
sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars1057.sql"
sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars1057.sql"
echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars1057.sql"
echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars1057.sql"
7z a -pMojeSuperHaslo#123 -mhe=on M1057_arch.7z MARS-1057
cd .\MARS_Packages\REL02_POST\MARS-956
sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars956.sql"
sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars956.sql"
echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@install_mars956.sql"
echo 'yes' | sql "ADMIN/Cloudpass#34@ggmichalski_high" "@rollback_mars956.sql"
7z a -pMojeSuperHaslo#123 -mhe=on M956_arch.7z MARS-956