Refactor MARS-835 installation scripts to include explicit date handling and remove obsolete files

This commit is contained in:
Grzegorz Michalski
2026-02-17 11:12:03 +01:00
parent 912b7a6466
commit 020dacb571
9 changed files with 10 additions and 831 deletions

View File

@@ -103,7 +103,7 @@ BEGIN
pBucketArea => 'DATA', pBucketArea => 'DATA',
pFolderName => 'ODS/CSDB/CSDB_DEBT', pFolderName => 'ODS/CSDB/CSDB_DEBT',
pMinDate => &cutoff_date, pMinDate => &cutoff_date,
pMaxDate => SYSDATE, pMaxDate => DATE '9999-12-31', -- Include future dates (MAX_LOAD_START can be beyond SYSDATE)
pParallelDegree => 16, pParallelDegree => 16,
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT', pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT',
pMaxFileSize => 104857600, -- 100MB in bytes (safe for parallel execution, avoids ORA-04036) pMaxFileSize => 104857600, -- 100MB in bytes (safe for parallel execution, avoids ORA-04036)
@@ -224,7 +224,7 @@ BEGIN
pBucketArea => 'DATA', pBucketArea => 'DATA',
pFolderName => 'ODS/CSDB/CSDB_DEBT_DAILY', pFolderName => 'ODS/CSDB/CSDB_DEBT_DAILY',
pMinDate => &cutoff_date, pMinDate => &cutoff_date,
pMaxDate => SYSDATE, pMaxDate => DATE '9999-12-31', -- Include future dates (MAX_LOAD_START can be beyond SYSDATE)
pParallelDegree => 16, pParallelDegree => 16,
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT_DAILY', pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT_DAILY',
pMaxFileSize => 104857600, -- 100MB in bytes (safe for parallel execution, avoids ORA-04036) pMaxFileSize => 104857600, -- 100MB in bytes (safe for parallel execution, avoids ORA-04036)

View File

@@ -33,7 +33,8 @@ BEGIN
pKeyColumnName => 'A_ETL_LOAD_SET_FK', pKeyColumnName => 'A_ETL_LOAD_SET_FK',
pBucketArea => 'ARCHIVE', pBucketArea => 'ARCHIVE',
pFolderName => 'ARCHIVE/CSDB/CSDB_INSTR_RAT_FULL', pFolderName => 'ARCHIVE/CSDB/CSDB_INSTR_RAT_FULL',
pMaxDate => SYSDATE, pMinDate => DATE '1900-01-01', -- Explicit start date for clarity
pMaxDate => DATE '9999-12-31', -- Include future dates (MAX_LOAD_START can be beyond SYSDATE)
pParallelDegree => 8, pParallelDegree => 8,
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_INSTR_RAT_FULL' pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_INSTR_RAT_FULL'
); );
@@ -60,7 +61,8 @@ BEGIN
pKeyColumnName => 'A_ETL_LOAD_SET_FK', pKeyColumnName => 'A_ETL_LOAD_SET_FK',
pBucketArea => 'ARCHIVE', pBucketArea => 'ARCHIVE',
pFolderName => 'ARCHIVE/CSDB/CSDB_INSTR_DESC_FULL', pFolderName => 'ARCHIVE/CSDB/CSDB_INSTR_DESC_FULL',
pMaxDate => SYSDATE, pMinDate => DATE '1900-01-01', -- Explicit start date for clarity
pMaxDate => DATE '9999-12-31', -- Include future dates (MAX_LOAD_START can be beyond SYSDATE)
pParallelDegree => 8, pParallelDegree => 8,
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_INSTR_DESC_FULL' pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_INSTR_DESC_FULL'
); );
@@ -87,7 +89,8 @@ BEGIN
pKeyColumnName => 'A_ETL_LOAD_SET_FK', pKeyColumnName => 'A_ETL_LOAD_SET_FK',
pBucketArea => 'ARCHIVE', pBucketArea => 'ARCHIVE',
pFolderName => 'ARCHIVE/CSDB/CSDB_ISSUER_RAT_FULL', pFolderName => 'ARCHIVE/CSDB/CSDB_ISSUER_RAT_FULL',
pMaxDate => SYSDATE, pMinDate => DATE '1900-01-01', -- Explicit start date for clarity
pMaxDate => DATE '9999-12-31', -- Include future dates (MAX_LOAD_START can be beyond SYSDATE)
pParallelDegree => 8, pParallelDegree => 8,
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_ISSUER_RAT_FULL' pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_ISSUER_RAT_FULL'
); );
@@ -114,7 +117,8 @@ BEGIN
pKeyColumnName => 'A_ETL_LOAD_SET_FK', pKeyColumnName => 'A_ETL_LOAD_SET_FK',
pBucketArea => 'ARCHIVE', pBucketArea => 'ARCHIVE',
pFolderName => 'ARCHIVE/CSDB/CSDB_ISSUER_DESC_FULL', pFolderName => 'ARCHIVE/CSDB/CSDB_ISSUER_DESC_FULL',
pMaxDate => SYSDATE, pMinDate => DATE '1900-01-01', -- Explicit start date for clarity
pMaxDate => DATE '9999-12-31', -- Include future dates (MAX_LOAD_START can be beyond SYSDATE)
pParallelDegree => 8, pParallelDegree => 8,
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_ISSUER_DESC_FULL' pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_ISSUER_DESC_FULL'
); );

View File

@@ -1,165 +0,0 @@
# MARS-835: One-Time CSDB Data Export from Operational Database to External Tables
## Overview
This package performs a one-time bulk export of CSDB data from operational database tables (OU_CSDB schema) to new external tables in OCI buckets. The export uses DATA_EXPORTER v2.4.0 with per-column date format handling to move historical data to either DATA bucket (CSV format) or HIST bucket (Parquet format with Hive-style partitioning).
**Migration Strategy:**
- **Split Export (2 tables)**: DEBT, DEBT_DAILY - Last 6 months → DATA (CSV), Older data → HIST (Parquet)
- **HIST Only (4 tables)**: INSTR_RAT_FULL, INSTR_DESC_FULL, ISSUER_RAT_FULL, ISSUER_DESC_FULL - All data → HIST (Parquet)
**Key Transformations:**
- Column rename: `A_ETL_LOAD_SET_FK``A_WORKFLOW_HISTORY_KEY` (all tables)
- Column removal: DEBT (2 columns), DEBT_DAILY (6 columns) not required in new structure
## Contents
- `install_mars835.sql` - Master installation script with SPOOL logging
- `rollback_mars835.sql` - Master rollback script
- `01_MARS_835_*.sql` - Individual installation scripts
- `91_MARS_835_*.sql` - Individual rollback scripts
- `track_package_versions.sql` - Package version tracking
- `verify_packages_version.sql` - Package verification
## Prerequisites
- Oracle Database 23ai
- ADMIN user access (required for all MARS installations)
- ENV_MANAGER v3.1.0+
- Required schema privileges
## Installation
### Option 1: Master Script (Recommended)
```powershell
# IMPORTANT: Execute as ADMIN user for proper privilege management
Get-Content "MARS_Packages/REL01_POST_DEACTIVATION/MARS-835/install_mars835.sql" | sql "ADMIN/Cloudpass#34@ggmichalski_high"
# Log file created: log/INSTALL_MARS_835_<PDB>_<timestamp>.log
```
### Option 2: Individual Scripts
```powershell
# IMPORTANT: Execute as ADMIN user
Get-Content "01_MARS_835_*.sql" | sql "ADMIN/Cloudpass#34@ggmichalski_high"
Get-Content "02_MARS_835_*.sql" | sql "ADMIN/Cloudpass#34@ggmichalski_high"
# ... etc
```
## Verification
```sql
-- Verify package versions
SELECT PACKAGE_NAME.GET_VERSION() FROM DUAL;
-- Check for errors (ADMIN user checks specific schema)
SELECT * FROM ALL_ERRORS
WHERE OWNER = 'CT_MRDS' -- Replace with target schema
AND NAME = 'PACKAGE_NAME';
-- Verify no untracked changes
SELECT ENV_MANAGER.CHECK_PACKAGE_CHANGES('CT_MRDS', 'PACKAGE_NAME') FROM DUAL;
```
## Rollback
```powershell
# IMPORTANT: Execute as ADMIN user
Get-Content "MARS_Packages/REL01_POST_DEACTIVATION/MARS-835/rollback_mars835.sql" | sql "ADMIN/Cloudpass#34@ggmichalski_high"
**NOTE**: Rollback for data exports is **NOT RECOMMENDED** as it would delete exported files from OCI buckets. Only use rollback if export failed and needs to be restarted.
```
## Expected Changes
### Data Export Summary
**6 CSDB tables exported from OU_CSDB schema:**
**Group 1: Split DATA + HIST (Time Critical)**
1. **DEBT** - Last 6 months → DATA, Older → HIST
2. **DEBT_DAILY** - Last 6 months → DATA, Older → HIST
**Group 2: HIST Only (Weekend Bulk)**
3. **INSTR_RAT_FULL** - All data → HIST
4. **INSTR_DESC_FULL** - All data → HIST
5. **ISSUER_RAT_FULL** - All data → HIST
6. **ISSUER_DESC_FULL** - All data → HIST
### Bucket Destinations (DEV environment)
- **DATA Bucket**: `mrds_data_dev/ODS/CSDB/` (CSV format)
- **HIST Bucket**: `mrds_hist_dev/ARCHIVE/CSDB/` (Parquet with partitioning)
### Column Mappings
- **All tables**: `A_ETL_LOAD_SET_FK` renamed to `A_WORKFLOW_HISTORY_KEY`
- **DEBT**: Removed columns: `IDIRDEPOSITORY`, `VA_BONDDURATION`
- **DEBT_DAILY**: Removed columns: `STEPID`, `PROGRAMNAME`, `PROGRAMCEILING`, `PROGRAMSTATUS`, `ISSUERNACE21SECTOR`, `INSTRUMENTQUOTATIONBASIS`
## Testing
### Post-Export Verification
1. **Verify CSV files in DATA bucket** (DEBT, DEBT_DAILY - last 6 months):
```sql
-- Check exported files
SELECT object_name, bytes
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => 'DEF_CRED_ARN',
location_uri => 'https://objectstorage.region.oraclecloud.com/n/namespace/b/mrds_data_dev/o/ODS/CSDB/'
)) WHERE object_name LIKE '%CSDB_DEBT%';
```
2. **Verify Parquet files in HIST bucket** (all 6 tables):
```sql
-- Check archived files with Hive partitioning
SELECT object_name, bytes
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => 'DEF_CRED_ARN',
location_uri => 'https://objectstorage.region.oraclecloud.com/n/namespace/b/mrds_hist_dev/o/ARCHIVE/CSDB/'
)) WHERE object_name LIKE '%PARTITION_YEAR=%';
```
3. **Validate row counts match source tables**:
```sql
-- Compare counts between source and exported data
SELECT COUNT(*) FROM OU_CSDB.DEBT;
SELECT COUNT(*) FROM ODS.CSDB_DEBT_ODS; -- External table pointing to DATA
SELECT COUNT(*) FROM ODS.CSDB_DEBT_ARCHIVE; -- External table pointing to HIST
```
4. **Verify column mappings**:
```sql
-- Check A_WORKFLOW_HISTORY_KEY exists in exported data
SELECT A_WORKFLOW_HISTORY_KEY, COUNT(*)
FROM ODS.CSDB_DEBT_ARCHIVE
GROUP BY A_WORKFLOW_HISTORY_KEY;
```
## Known Issues
### Timing Constraints
- **DATA exports (DEBT, DEBT_DAILY)**: Must execute during parallel old+new loads phase after Production deployment
- **HIST exports (all 6 tables)**: Can run anytime, recommended for weekend bulk execution to avoid interference
### Environment-Specific Configuration
- Bucket names must be adjusted for each environment:
- DEV: `mrds_data_dev`, `mrds_hist_dev`
- TEST: `mrds_data_test`, `mrds_hist_test`
- PROD: `mrds_data_prod`, `mrds_hist_prod`
### Data Cutoff Date
- Export scripts use 6-month cutoff date calculated as `ADD_MONTHS(SYSDATE, -6)`
- Verify cutoff aligns with business requirements before execution
### One-Time Execution
- This is a **ONE-TIME data migration** package
- After successful execution, package should be **deactivated** (moved to REL01_POST_DEACTIVATION)
- Do not re-run unless explicitly required for data refresh
## Related
- **JIRA**: MARS-835 - CSDB Data Export to External Tables
- **Confluence**: FILE_MANAGER package - MRDS - Technical Team
- **Confluence**: Table Setup Guide for FILE PROCESSOR System
- **Source Schema**: OU_CSDB (Operational Database)
- **Target Schema**: ODS (External Tables)
- **Migration Type**: One-time bulk export (deactivated post-execution)
---
**Author:** Grzegorz Michalski
**Date:** 2025-12-04
**Version:** 1.0.0

View File

@@ -1,207 +0,0 @@
# MARS-835: Required External Tables for Smart Column Mapping
## Overview
This document lists all external tables required for MARS-835 data exports using DATA_EXPORTER v2.4.0 with Smart Column Mapping feature.
**Purpose**: Smart Column Mapping ensures CSV files are generated with columns in the EXACT order expected by external tables, preventing NULL values due to Oracle's positional CSV mapping.
---
## Required External Tables
### Group 1: DATA Bucket (CSV Format) - **CRITICAL**
#### 1. ODS.CSDB_DEBT_DATA_ODS
- **Source Table**: OU_CSDB.LEGACY_DEBT
- **Format**: CSV
- **Bucket**: DATA (mrds_data_dev/ODS/CSDB/CSDB_DEBT/)
- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY (position 2 recommended)
- **Critical**: Must use Smart Column Mapping to avoid NULL values in A_WORKFLOW_HISTORY_KEY
#### 2. ODS.CSDB_DEBT_DAILY_DATA_ODS
- **Source Table**: OU_CSDB.LEGACY_DEBT_DAILY
- **Format**: CSV
- **Bucket**: DATA (mrds_data_dev/ODS/CSDB/CSDB_DEBT_DAILY/)
- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY (position 2 recommended)
- **Critical**: Must use Smart Column Mapping to avoid NULL values in A_WORKFLOW_HISTORY_KEY
---
### Group 2: ARCHIVE Bucket (Parquet Format) - **RECOMMENDED**
#### 3. ODS.CSDB_DEBT_ARCHIVE
- **Source Table**: OU_CSDB.LEGACY_DEBT
- **Format**: Parquet with Hive partitioning
- **Bucket**: ARCHIVE (mrds_hist_dev/ARCHIVE/CSDB/CSDB_DEBT/)
- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY
- **Note**: Parquet uses schema-based mapping (column order less critical but Smart Column Mapping ensures consistency)
#### 4. ODS.CSDB_DEBT_DAILY_ARCHIVE
- **Source Table**: OU_CSDB.LEGACY_DEBT_DAILY
- **Format**: Parquet with Hive partitioning
- **Bucket**: ARCHIVE (mrds_hist_dev/ARCHIVE/CSDB/CSDB_DEBT_DAILY/)
- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY
#### 5. ODS.CSDB_INSTR_RAT_FULL_ARCHIVE
- **Source Table**: OU_CSDB.LEGACY_INSTR_RAT_FULL
- **Format**: Parquet with Hive partitioning
- **Bucket**: ARCHIVE (mrds_hist_dev/ARCHIVE/CSDB/CSDB_INSTR_RAT_FULL/)
- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY
#### 6. ODS.CSDB_INSTR_DESC_FULL_ARCHIVE
- **Source Table**: OU_CSDB.LEGACY_INSTR_DESC_FULL
- **Format**: Parquet with Hive partitioning
- **Bucket**: ARCHIVE (mrds_hist_dev/ARCHIVE/CSDB/CSDB_INSTR_DESC_FULL/)
- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY
#### 7. ODS.CSDB_ISSUER_RAT_FULL_ARCHIVE
- **Source Table**: OU_CSDB.LEGACY_ISSUER_RAT_FULL
- **Format**: Parquet with Hive partitioning
- **Bucket**: ARCHIVE (mrds_hist_dev/ARCHIVE/CSDB/CSDB_ISSUER_RAT_FULL/)
- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY
#### 8. ODS.CSDB_ISSUER_DESC_FULL_ARCHIVE
- **Source Table**: OU_CSDB.LEGACY_ISSUER_DESC_FULL
- **Format**: Parquet with Hive partitioning
- **Bucket**: ARCHIVE (mrds_hist_dev/ARCHIVE/CSDB/CSDB_ISSUER_DESC_FULL/)
- **Key Column Mapping**: A_ETL_LOAD_SET_FK → A_WORKFLOW_HISTORY_KEY
---
## External Table Column Order Requirements
### **CRITICAL for CSV Tables** (DATA bucket):
All CSV external tables MUST have **A_WORKFLOW_HISTORY_KEY at position 2**:
```
Position 1: A_KEY (NUMBER)
Position 2: A_WORKFLOW_HISTORY_KEY (NUMBER) ← MUST BE HERE!
Position 3+: Other columns in any order
```
**Reason**: Oracle External Tables with CSV format use **positional mapping** (ignore header row). If source table has A_ETL_LOAD_SET_FK at position 72, but CSV puts it at position 72 while external table expects A_WORKFLOW_HISTORY_KEY at position 2, the external table will try to read position 2 (which might be a DATE column) as NUMBER → conversion fails → NULL value.
**Solution**: Smart Column Mapping (v2.4.0) generates CSV columns in EXTERNAL TABLE order, ensuring position 2 has the correct NUMBER value.
### **OPTIONAL for Parquet Tables** (ARCHIVE bucket):
Parquet format uses **schema-based mapping** (column names). Column order doesn't matter, but Smart Column Mapping provides consistency.
---
## Creation Script Example
### CSV External Table (CRITICAL - Correct Column Order)
```sql
-- Example: ODS.CSDB_DEBT_DATA_ODS
-- IMPORTANT: A_WORKFLOW_HISTORY_KEY must be at position 2!
BEGIN
ODS.FILE_MANAGER_ODS.CREATE_EXTERNAL_TABLE(
pTableName => 'CSDB_DEBT_DATA_ODS',
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT_TEMPLATE',
pPrefix => 'ODS/CSDB/CSDB_DEBT',
pBucketUri => CT_MRDS.ENV_MANAGER.gvDataBucketUri,
pFormat => 'CSV' -- Uses positional mapping!
);
END;
/
-- Verify column order (A_WORKFLOW_HISTORY_KEY should be position 2)
SELECT column_id, column_name, data_type
FROM all_tab_columns
WHERE table_name = 'CSDB_DEBT_DATA_ODS'
AND owner = 'ODS'
ORDER BY column_id;
```
### Parquet External Table (Optional Column Order)
```sql
-- Example: ODS.CSDB_DEBT_ARCHIVE
-- Column order flexible (schema-based mapping)
BEGIN
ODS.FILE_MANAGER_ODS.CREATE_EXTERNAL_TABLE(
pTableName => 'CSDB_DEBT_ARCHIVE',
pTemplateTableName => 'CT_ET_TEMPLATES.CSDB_DEBT_TEMPLATE',
pPrefix => 'ARCHIVE/CSDB/CSDB_DEBT',
pBucketUri => CT_MRDS.ENV_MANAGER.gvArchiveBucketUri,
pFormat => 'PARQUET' -- Uses schema-based mapping
);
END;
/
```
---
## Template Tables Required
All external tables require corresponding template tables in CT_ET_TEMPLATES schema:
- `CT_ET_TEMPLATES.CSDB_DEBT_TEMPLATE`
- `CT_ET_TEMPLATES.CSDB_DEBT_DAILY_TEMPLATE`
- `CT_ET_TEMPLATES.CSDB_INSTR_RAT_FULL_TEMPLATE`
- `CT_ET_TEMPLATES.CSDB_INSTR_DESC_FULL_TEMPLATE`
- `CT_ET_TEMPLATES.CSDB_ISSUER_RAT_FULL_TEMPLATE`
- `CT_ET_TEMPLATES.CSDB_ISSUER_DESC_FULL_TEMPLATE`
**Note**: Template tables must be created by ADMIN or CT_ET_TEMPLATES user (MRDS_LOADER cannot create them).
---
## Verification Checklist
Before running MARS-835 exports:
- [ ] All 8 external tables exist in ODS schema
- [ ] CSV tables (DATA bucket) have A_WORKFLOW_HISTORY_KEY at position 2
- [ ] Template tables exist in CT_ET_TEMPLATES schema
- [ ] MRDS_LOADER has EXECUTE privilege on ODS.FILE_MANAGER_ODS
- [ ] ODS schema has access to CT_MRDS.ENV_MANAGER for logging
- [ ] DATA_EXPORTER v2.4.0 deployed with Smart Column Mapping feature
---
## Testing Verification
After export, verify A_WORKFLOW_HISTORY_KEY is not NULL:
```sql
-- CSV tables (should be 100% populated)
SELECT 'CSDB_DEBT_DATA_ODS' AS TABLE_NAME,
COUNT(*) AS TOTAL_ROWS,
COUNT(A_WORKFLOW_HISTORY_KEY) AS NON_NULL_COUNT,
ROUND(COUNT(A_WORKFLOW_HISTORY_KEY) * 100.0 / NULLIF(COUNT(*), 0), 2) AS SUCCESS_RATE_PCT
FROM ODS.CSDB_DEBT_DATA_ODS;
SELECT 'CSDB_DEBT_DAILY_DATA_ODS' AS TABLE_NAME,
COUNT(*) AS TOTAL_ROWS,
COUNT(A_WORKFLOW_HISTORY_KEY) AS NON_NULL_COUNT,
ROUND(COUNT(A_WORKFLOW_HISTORY_KEY) * 100.0 / NULLIF(COUNT(*), 0), 2) AS SUCCESS_RATE_PCT
FROM ODS.CSDB_DEBT_DAILY_DATA_ODS;
-- Parquet tables (should also be 100% populated)
SELECT 'CSDB_DEBT_ARCHIVE' AS TABLE_NAME,
COUNT(*) AS TOTAL_ROWS,
COUNT(A_WORKFLOW_HISTORY_KEY) AS NON_NULL_COUNT,
ROUND(COUNT(A_WORKFLOW_HISTORY_KEY) * 100.0 / NULLIF(COUNT(*), 0), 2) AS SUCCESS_RATE_PCT
FROM ODS.CSDB_DEBT_ARCHIVE;
```
**Expected Result**: SUCCESS_RATE_PCT = 100.00 for all tables
---
## Related Documentation
- [DATA_EXPORTER v2.4.0 Smart Column Mapping Examples](../MARS-835-PREHOOK/current_version/v2.3.0/DATA_EXPORTER_v2.4.0_Smart_Column_Mapping_Examples.sql)
- [Oracle External Tables Column Order Issue](../../confluence/additions/Oracle_External_Tables_Column_Order_Issue.md)
- [MARS-835 README](README.md)
---
**Last Updated**: 2026-01-09
**Author**: GitHub Copilot (MARS-835 Update)

View File

@@ -1,80 +0,0 @@
SET SERVEROUTPUT ON SIZE UNLIMITED
SET DEFINE OFF
DECLARE
vCredential VARCHAR2(100) := 'OCI$RESOURCE_PRINCIPAL';
vDataBucket VARCHAR2(200) := 'https://objectstorage.eu-frankfurt-1.oraclecloud.com/n/frtgjxu7zl7c/b/data/o/';
vArchiveBucket VARCHAR2(200) := 'https://objectstorage.eu-frankfurt-1.oraclecloud.com/n/frtgjxu7zl7c/b/history/o/';
vCount NUMBER := 0;
BEGIN
DBMS_OUTPUT.PUT_LINE('=== Checking CSV files in DATA bucket ===');
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredential,
location_uri => vDataBucket || 'ODS/CSDB/CSDB_DEBT/'
))
WHERE object_name LIKE 'LEGACY_DEBT%'
) LOOP
vCount := vCount + 1;
DBMS_OUTPUT.PUT_LINE(' [' || vCount || '] ' || rec.object_name);
END LOOP;
DBMS_OUTPUT.PUT_LINE('Total CSV files DEBT: ' || vCount);
vCount := 0;
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredential,
location_uri => vDataBucket || 'ODS/CSDB/CSDB_DEBT_DAILY/'
))
WHERE object_name LIKE 'LEGACY_DEBT_DAILY%'
) LOOP
vCount := vCount + 1;
DBMS_OUTPUT.PUT_LINE(' [' || vCount || '] ' || rec.object_name);
END LOOP;
DBMS_OUTPUT.PUT_LINE('Total CSV files DEBT_DAILY: ' || vCount);
DBMS_OUTPUT.PUT_LINE(CHR(10) || '=== Checking Parquet files in ARCHIVE bucket ===');
vCount := 0;
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredential,
location_uri => vArchiveBucket || 'ARCHIVE/CSDB/CSDB_DEBT/'
))
WHERE ROWNUM <= 5
) LOOP
vCount := vCount + 1;
DBMS_OUTPUT.PUT_LINE(' [' || vCount || '] ' || rec.object_name);
END LOOP;
DBMS_OUTPUT.PUT_LINE('Total Parquet files DEBT (first 5): ' || vCount);
vCount := 0;
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredential,
location_uri => vArchiveBucket || 'ARCHIVE/CSDB/CSDB_DEBT_DAILY/'
))
) LOOP
vCount := vCount + 1;
DBMS_OUTPUT.PUT_LINE(' [' || vCount || '] ' || rec.object_name);
END LOOP;
DBMS_OUTPUT.PUT_LINE('Total Parquet files DEBT_DAILY: ' || vCount);
DBMS_OUTPUT.PUT_LINE(CHR(10) || '=== Now testing DELETE_OBJECT ===');
DBMS_OUTPUT.PUT_LINE('Testing delete for: ODS/CSDB/CSDB_DEBT/LEGACY_DEBT_202510_1_20260213T092239041072Z.csv');
BEGIN
DBMS_CLOUD.DELETE_OBJECT(
credential_name => vCredential,
object_uri => vDataBucket || 'ODS/CSDB/CSDB_DEBT/LEGACY_DEBT_202510_1_20260213T092239041072Z.csv'
);
DBMS_OUTPUT.PUT_LINE('SUCCESS: File deleted');
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('ERROR: ' || SQLERRM);
END;
END;
/

View File

@@ -1,126 +0,0 @@
--=============================================================================================================================
-- MARS-835 Manual Cleanup - Delete remaining files after rollback
--=============================================================================================================================
SET SERVEROUTPUT ON SIZE UNLIMITED
SET DEFINE OFF
DECLARE
vDataBucketUri VARCHAR2(500);
vArchiveBucketUri VARCHAR2(500);
vCredentialName VARCHAR2(100);
vFileCount NUMBER := 0;
BEGIN
-- Get bucket URIs and credential
vDataBucketUri := CT_MRDS.FILE_MANAGER.GET_BUCKET_URI('DATA');
vArchiveBucketUri := CT_MRDS.FILE_MANAGER.GET_BUCKET_URI('ARCHIVE');
vCredentialName := CT_MRDS.ENV_MANAGER.gvCredentialName;
DBMS_OUTPUT.PUT_LINE('========================================================================');
DBMS_OUTPUT.PUT_LINE('MARS-835 Manual Cleanup');
DBMS_OUTPUT.PUT_LINE('========================================================================');
-- Delete DEBT CSV files from DATA bucket
DBMS_OUTPUT.PUT_LINE(CHR(10) || '1. Deleting DEBT CSV files from DATA bucket...');
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredentialName,
location_uri => vDataBucketUri || 'ODS/CSDB/CSDB_DEBT/'
))
WHERE object_name LIKE 'LEGACY_DEBT%'
) LOOP
BEGIN
DBMS_CLOUD.DELETE_OBJECT(
credential_name => vCredentialName,
object_uri => vDataBucketUri || 'ODS/CSDB/CSDB_DEBT/' || rec.object_name
);
DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name);
vFileCount := vFileCount + 1;
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE(' ERROR: ' || rec.object_name || ' - ' || SQLERRM);
END;
END LOOP;
DBMS_OUTPUT.PUT_LINE('Total deleted: ' || vFileCount);
-- Delete DEBT_DAILY CSV files from DATA bucket
DBMS_OUTPUT.PUT_LINE(CHR(10) || '2. Deleting DEBT_DAILY CSV files from DATA bucket...');
vFileCount := 0;
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredentialName,
location_uri => vDataBucketUri || 'ODS/CSDB/CSDB_DEBT_DAILY/'
))
WHERE object_name LIKE 'LEGACY_DEBT_DAILY%'
) LOOP
BEGIN
DBMS_CLOUD.DELETE_OBJECT(
credential_name => vCredentialName,
object_uri => vDataBucketUri || 'ODS/CSDB/CSDB_DEBT_DAILY/' || rec.object_name
);
DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name);
vFileCount := vFileCount + 1;
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE(' ERROR: ' || rec.object_name || ' - ' || SQLERRM);
END;
END LOOP;
DBMS_OUTPUT.PUT_LINE('Total deleted: ' || vFileCount);
-- Delete DEBT Parquet files from ARCHIVE bucket
DBMS_OUTPUT.PUT_LINE(CHR(10) || '3. Deleting DEBT Parquet files from ARCHIVE bucket...');
vFileCount := 0;
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredentialName,
location_uri => vArchiveBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT/'
))
WHERE object_name NOT LIKE '%/'
) LOOP
BEGIN
DBMS_CLOUD.DELETE_OBJECT(
credential_name => vCredentialName,
object_uri => vArchiveBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT/' || rec.object_name
);
DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name);
vFileCount := vFileCount + 1;
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE(' ERROR: ' || rec.object_name || ' - ' || SQLERRM);
END;
END LOOP;
DBMS_OUTPUT.PUT_LINE('Total deleted: ' || vFileCount);
-- Delete DEBT_DAILY Parquet files from ARCHIVE bucket
DBMS_OUTPUT.PUT_LINE(CHR(10) || '4. Deleting DEBT_DAILY Parquet files from ARCHIVE bucket...');
vFileCount := 0;
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredentialName,
location_uri => vArchiveBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT_DAILY/'
))
WHERE object_name NOT LIKE '%/'
) LOOP
BEGIN
DBMS_CLOUD.DELETE_OBJECT(
credential_name => vCredentialName,
object_uri => vArchiveBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT_DAILY/' || rec.object_name
);
DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name);
vFileCount := vFileCount + 1;
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE(' ERROR: ' || rec.object_name || ' - ' || SQLERRM);
END;
END LOOP;
DBMS_OUTPUT.PUT_LINE('Total deleted: ' || vFileCount);
DBMS_OUTPUT.PUT_LINE(CHR(10) || '========================================================================');
DBMS_OUTPUT.PUT_LINE('Manual cleanup completed');
DBMS_OUTPUT.PUT_LINE('========================================================================');
END;
/

View File

@@ -1,93 +0,0 @@
-- MARS-835: Manual cleanup of Parquet files only (after bugfix)
-- Description: Removes orphaned Parquet files from ARCHIVE bucket
-- Usage: Execute as CT_MRDS user
SET SERVEROUTPUT ON SIZE UNLIMITED
DECLARE
vCredentialName VARCHAR2(100) := CT_MRDS.ENV_MANAGER.gvCredentialName;
vHistBucketUri VARCHAR2(200) := CT_MRDS.FILE_MANAGER.GET_BUCKET_URI('ARCHIVE');
vFileCount NUMBER := 0;
vStartTime TIMESTAMP := SYSTIMESTAMP;
vElapsedTime INTERVAL DAY TO SECOND;
BEGIN
DBMS_OUTPUT.PUT_LINE('==========================================================');
DBMS_OUTPUT.PUT_LINE('MANUAL CLEANUP: Parquet files only');
DBMS_OUTPUT.PUT_LINE('==========================================================');
DBMS_OUTPUT.PUT_LINE('Start Time: ' || TO_CHAR(vStartTime, 'YYYY-MM-DD HH24:MI:SS'));
DBMS_OUTPUT.PUT_LINE('Credential: ' || vCredentialName);
DBMS_OUTPUT.PUT_LINE('Archive Bucket: ' || vHistBucketUri);
DBMS_OUTPUT.PUT_LINE('----------------------------------------------------------');
-- Delete CSDB_DEBT Parquet files
DBMS_OUTPUT.PUT_LINE(chr(10) || 'Deleting CSDB_DEBT Parquet files...');
vFileCount := 0;
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredentialName,
location_uri => vHistBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT/'
))
WHERE object_name NOT LIKE '%/' -- Exclude directories
) LOOP
BEGIN
DBMS_CLOUD.DELETE_OBJECT(
credential_name => vCredentialName,
object_uri => vHistBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT/' || rec.object_name
);
DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name);
vFileCount := vFileCount + 1;
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -20404 THEN
DBMS_OUTPUT.PUT_LINE(' Skipped (not found): ' || rec.object_name);
ELSE
DBMS_OUTPUT.PUT_LINE(' ERROR: ' || SQLERRM || ' - ' || rec.object_name);
END IF;
END;
END LOOP;
DBMS_OUTPUT.PUT_LINE('CSDB_DEBT Parquet files deleted: ' || vFileCount);
-- Delete CSDB_DEBT_DAILY Parquet files
DBMS_OUTPUT.PUT_LINE(chr(10) || 'Deleting CSDB_DEBT_DAILY Parquet files...');
vFileCount := 0;
FOR rec IN (
SELECT object_name
FROM TABLE(DBMS_CLOUD.LIST_OBJECTS(
credential_name => vCredentialName,
location_uri => vHistBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT_DAILY/'
))
WHERE object_name NOT LIKE '%/' -- Exclude directories
) LOOP
BEGIN
DBMS_CLOUD.DELETE_OBJECT(
credential_name => vCredentialName,
object_uri => vHistBucketUri || 'ARCHIVE/CSDB/CSDB_DEBT_DAILY/' || rec.object_name
);
DBMS_OUTPUT.PUT_LINE(' Deleted: ' || rec.object_name);
vFileCount := vFileCount + 1;
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -20404 THEN
DBMS_OUTPUT.PUT_LINE(' Skipped (not found): ' || rec.object_name);
ELSE
DBMS_OUTPUT.PUT_LINE(' ERROR: ' || SQLERRM || ' - ' || rec.object_name);
END IF;
END;
END LOOP;
DBMS_OUTPUT.PUT_LINE('CSDB_DEBT_DAILY Parquet files deleted: ' || vFileCount);
-- Final summary
vElapsedTime := SYSTIMESTAMP - vStartTime;
DBMS_OUTPUT.PUT_LINE('----------------------------------------------------------');
DBMS_OUTPUT.PUT_LINE('End Time: ' || TO_CHAR(SYSTIMESTAMP, 'YYYY-MM-DD HH24:MI:SS'));
DBMS_OUTPUT.PUT_LINE('Elapsed Time: ' || vElapsedTime);
DBMS_OUTPUT.PUT_LINE('==========================================================');
DBMS_OUTPUT.PUT_LINE('MANUAL CLEANUP COMPLETED');
DBMS_OUTPUT.PUT_LINE('==========================================================');
END;
/

View File

@@ -1,92 +0,0 @@
-- ===================================================================
-- Simple Package Version Tracking Script
-- ===================================================================
-- Purpose: Track specified Oracle package versions
-- Author: Grzegorz Michalski
-- Date: 2025-12-04
-- Version: 3.1.0 - List-Based Edition
--
-- USAGE:
-- 1. Edit package list below (add/remove packages as needed)
-- 2. Include in your install/rollback script: @@track_package_versions.sql
-- ===================================================================
SET SERVEROUTPUT ON;
DECLARE
TYPE t_package_rec IS RECORD (
owner VARCHAR2(50),
name VARCHAR2(50),
version VARCHAR2(50)
);
TYPE t_packages IS TABLE OF t_package_rec;
TYPE t_string_array IS TABLE OF VARCHAR2(100);
-- ===================================================================
-- PACKAGE LIST - Edit this array to specify packages to track
-- ===================================================================
-- Add or remove entries as needed for your MARS issue
-- Format: 'SCHEMA.PACKAGE_NAME'
-- ===================================================================
vPackageList t_string_array := t_string_array(
'CT_MRDS.FILE_MANAGER',
'ODS.FILE_MANAGER_ODS'
);
-- ===================================================================
vPackages t_packages := t_packages();
vVersion VARCHAR2(50);
vCount NUMBER := 0;
vOwner VARCHAR2(50);
vPackageName VARCHAR2(50);
vDotPos NUMBER;
BEGIN
DBMS_OUTPUT.PUT_LINE('========================================');
DBMS_OUTPUT.PUT_LINE('Package Version Tracking');
DBMS_OUTPUT.PUT_LINE('========================================');
-- Process each package in the list
FOR i IN 1..vPackageList.COUNT LOOP
vDotPos := INSTR(vPackageList(i), '.');
IF vDotPos > 0 THEN
vOwner := SUBSTR(vPackageList(i), 1, vDotPos - 1);
vPackageName := SUBSTR(vPackageList(i), vDotPos + 1);
BEGIN
EXECUTE IMMEDIATE 'SELECT ' || vPackageList(i) || '.GET_VERSION() FROM DUAL'
INTO vVersion;
vPackages.EXTEND;
vPackages(vPackages.COUNT).owner := vOwner;
vPackages(vPackages.COUNT).name := vPackageName;
vPackages(vPackages.COUNT).version := vVersion;
CT_MRDS.ENV_MANAGER.TRACK_PACKAGE_VERSION(
pPackageOwner => vOwner,
pPackageName => vPackageName,
pPackageVersion => vVersion,
pPackageBuildDate => TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS'),
pPackageAuthor => 'Grzegorz Michalski'
);
vCount := vCount + 1;
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Error tracking ' || vPackageList(i) || ': ' || SQLERRM);
END;
END IF;
END LOOP;
-- Display results
IF vPackages.COUNT > 0 THEN
DBMS_OUTPUT.PUT_LINE('Packages tracked: ' || vCount || ' of ' || vPackages.COUNT);
FOR i IN 1..vPackages.COUNT LOOP
DBMS_OUTPUT.PUT_LINE(' ' || vPackages(i).owner || '.' || vPackages(i).name ||
' (v' || vPackages(i).version || ')');
END LOOP;
ELSE
DBMS_OUTPUT.PUT_LINE('No packages found in list');
END IF;
DBMS_OUTPUT.PUT_LINE('========================================');
END;
/

View File

@@ -1,62 +0,0 @@
-- ===================================================================
-- Universal Package Version Verification Script
-- ===================================================================
-- Purpose: Verify all tracked Oracle packages for code changes
-- Author: Grzegorz Michalski
-- Date: 2025-12-04
-- Version: 1.0.0
--
-- USAGE:
-- Include at the end of install/rollback scripts: @@verify_packages_version.sql
--
-- OUTPUT:
-- - List of all tracked packages with their current status
-- - OK: Package has not changed since last tracking
-- - WARNING: Package code changed without version update
-- ===================================================================
SET LINESIZE 200
SET PAGESIZE 1000
SET FEEDBACK OFF
PROMPT
PROMPT ========================================
PROMPT Package Version Verification
PROMPT ========================================
PROMPT
COLUMN PACKAGE_OWNER FORMAT A15
COLUMN PACKAGE_NAME FORMAT A20
COLUMN VERSION FORMAT A10
COLUMN STATUS FORMAT A80
SELECT
PACKAGE_OWNER,
PACKAGE_NAME,
PACKAGE_VERSION AS VERSION,
CT_MRDS.ENV_MANAGER.CHECK_PACKAGE_CHANGES(PACKAGE_OWNER, PACKAGE_NAME) AS STATUS
FROM (
SELECT
PACKAGE_OWNER,
PACKAGE_NAME,
PACKAGE_VERSION,
ROW_NUMBER() OVER (PARTITION BY PACKAGE_OWNER, PACKAGE_NAME ORDER BY TRACKING_DATE DESC) AS RN
FROM CT_MRDS.A_PACKAGE_VERSION_TRACKING
)
WHERE RN = 1
ORDER BY PACKAGE_OWNER, PACKAGE_NAME;
PROMPT
PROMPT ========================================
PROMPT Verification Complete
PROMPT ========================================
PROMPT
PROMPT Legend:
PROMPT OK - Package has not changed since last tracking
PROMPT WARNING - Package code changed without version update
PROMPT
PROMPT For detailed hash information, use:
PROMPT SELECT ENV_MANAGER.GET_PACKAGE_HASH_INFO('OWNER', 'PACKAGE') FROM DUAL;
PROMPT ========================================
SET FEEDBACK ON