diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/A_PARALLEL_EXPORT_CHUNKS.sql b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/A_PARALLEL_EXPORT_CHUNKS.sql index bd37230..298ac97 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/A_PARALLEL_EXPORT_CHUNKS.sql +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/A_PARALLEL_EXPORT_CHUNKS.sql @@ -26,7 +26,7 @@ END; / CREATE TABLE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS ( - CHUNK_ID NUMBER PRIMARY KEY, + CHUNK_ID NUMBER NOT NULL, TASK_NAME VARCHAR2(100) NOT NULL, YEAR_VALUE VARCHAR2(4) NOT NULL, MONTH_VALUE VARCHAR2(2) NOT NULL, @@ -47,14 +47,16 @@ CREATE TABLE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS ( STATUS VARCHAR2(30) DEFAULT 'PENDING' NOT NULL, ERROR_MESSAGE VARCHAR2(4000), EXPORT_TIMESTAMP TIMESTAMP, - CREATED_DATE TIMESTAMP DEFAULT SYSTIMESTAMP NOT NULL + CREATED_DATE TIMESTAMP DEFAULT SYSTIMESTAMP NOT NULL, + CONSTRAINT PK_PARALLEL_EXPORT_CHUNKS PRIMARY KEY (TASK_NAME, CHUNK_ID) ); -CREATE INDEX IX_PARALLEL_CHUNKS_TASK ON CT_MRDS.A_PARALLEL_EXPORT_CHUNKS(TASK_NAME); +-- Index for status-based queries (e.g., WHERE STATUS = 'FAILED' AND TASK_NAME = ?) +CREATE INDEX IX_PARALLEL_CHUNKS_STATUS_TASK ON CT_MRDS.A_PARALLEL_EXPORT_CHUNKS(STATUS, TASK_NAME); -COMMENT ON TABLE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS IS 'Permanent table for parallel export chunk processing (DBMS_PARALLEL_EXECUTE) - permanent because GTT data not visible in parallel callback sessions'; -COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.CHUNK_ID IS 'Unique chunk identifier (partition number)'; -COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.TASK_NAME IS 'DBMS_PARALLEL_EXECUTE task name for cleanup'; +COMMENT ON TABLE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS IS 'Permanent table for parallel export chunk processing (DBMS_PARALLEL_EXECUTE) - permanent because GTT data not visible in parallel callback sessions. PK: (TASK_NAME, CHUNK_ID) ensures session isolation for concurrent exports.'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.CHUNK_ID IS 'Chunk identifier within task (partition number) - unique per TASK_NAME, not globally'; +COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.TASK_NAME IS 'DBMS_PARALLEL_EXECUTE task name - session isolation key, part of composite PK with CHUNK_ID'; COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.YEAR_VALUE IS 'Partition year (YYYY)'; COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.MONTH_VALUE IS 'Partition month (MM)'; COMMENT ON COLUMN CT_MRDS.A_PARALLEL_EXPORT_CHUNKS.SCHEMA_NAME IS 'Schema owning the source table'; diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkb b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkb index f24cc3a..13771b1 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkb +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkb @@ -558,7 +558,8 @@ AS **/ PROCEDURE EXPORT_PARTITION_PARALLEL ( pStartId IN NUMBER, - pEndId IN NUMBER + pEndId IN NUMBER, + pTaskName IN VARCHAR2 DEFAULT NULL ) IS vYear VARCHAR2(4); vMonth VARCHAR2(2); @@ -575,9 +576,12 @@ AS vFileBaseName VARCHAR2(1000); vMaxFileSize NUMBER; vJobClass VARCHAR2(128); + vTaskName VARCHAR2(128); vParameters VARCHAR2(4000); BEGIN - -- Retrieve chunk context from global temporary table + -- Retrieve chunk context from A_PARALLEL_EXPORT_CHUNKS table + -- CRITICAL: Filter by CHUNK_ID and TASK_NAME for precise session isolation + -- pTaskName parameter passed from RUN_TASK ensures deterministic single-row retrieval SELECT YEAR_VALUE, MONTH_VALUE, @@ -593,7 +597,8 @@ AS FORMAT_TYPE, FILE_BASE_NAME, MAX_FILE_SIZE, - JOB_CLASS + JOB_CLASS, + TASK_NAME INTO vYear, vMonth, @@ -609,18 +614,22 @@ AS vFormat, vFileBaseName, vMaxFileSize, - vJobClass + vJobClass, + vTaskName FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS - WHERE CHUNK_ID = pStartId; + WHERE CHUNK_ID = pStartId + AND TASK_NAME = pTaskName; - vParameters := 'Parallel task - Year: ' || vYear || ', Month: ' || vMonth || ', ChunkID: ' || pStartId; + vParameters := 'Parallel task - Year: ' || vYear || ', Month: ' || vMonth || ', ChunkID: ' || pStartId || ', TaskName: ' || vTaskName; ENV_MANAGER.LOG_PROCESS_EVENT('Starting parallel export for partition ' || vYear || '/' || vMonth, 'DEBUG', vParameters); -- Mark chunk as PROCESSING + -- CRITICAL: Use both CHUNK_ID AND TASK_NAME for session isolation UPDATE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS SET STATUS = 'PROCESSING', ERROR_MESSAGE = NULL - WHERE CHUNK_ID = pStartId; + WHERE CHUNK_ID = pStartId + AND TASK_NAME = vTaskName; COMMIT; -- Call the worker procedure @@ -643,26 +652,30 @@ AS ); -- Mark chunk as COMPLETED + -- CRITICAL: Use both CHUNK_ID AND TASK_NAME for session isolation UPDATE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS SET STATUS = 'COMPLETED', EXPORT_TIMESTAMP = SYSTIMESTAMP, ERROR_MESSAGE = NULL - WHERE CHUNK_ID = pStartId; + WHERE CHUNK_ID = pStartId + AND TASK_NAME = vTaskName; COMMIT; ENV_MANAGER.LOG_PROCESS_EVENT('Completed parallel export for partition ' || vYear || '/' || vMonth, 'DEBUG', vParameters); EXCEPTION WHEN OTHERS THEN -- Capture error details in variable (SQLERRM cannot be used directly in SQL) - vgMsgTmp := 'Parallel task error for partition ' || vYear || '/' || vMonth || ' (ChunkID: ' || pStartId || '): ' || SQLERRM || cgBL || DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; + vgMsgTmp := 'Parallel task error for partition ' || vYear || '/' || vMonth || ' (ChunkID: ' || pStartId || ', TaskName: ' || vTaskName || '): ' || SQLERRM || cgBL || DBMS_UTILITY.FORMAT_ERROR_BACKTRACE; ENV_MANAGER.LOG_PROCESS_EVENT(vgMsgTmp, 'ERROR', vParameters); -- Mark chunk as FAILED with error message + -- CRITICAL: Use both CHUNK_ID AND TASK_NAME for session isolation -- Use vgMsgTmp variable instead of SQLERRM directly (Oracle limitation in SQL context) UPDATE CT_MRDS.A_PARALLEL_EXPORT_CHUNKS SET STATUS = 'FAILED', ERROR_MESSAGE = SUBSTR(vgMsgTmp, 1, 4000) - WHERE CHUNK_ID = pStartId; + WHERE CHUNK_ID = pStartId + AND TASK_NAME = vTaskName; COMMIT; RAISE; @@ -1129,8 +1142,8 @@ AS -- Populate chunks table (insert new chunks, preserve FAILED chunks for retry) FOR i IN 1 .. vPartitions.COUNT LOOP MERGE INTO CT_MRDS.A_PARALLEL_EXPORT_CHUNKS t - USING (SELECT i AS chunk_id, vPartitions(i).year AS yr, vPartitions(i).month AS mn FROM DUAL) s - ON (t.CHUNK_ID = s.chunk_id) + USING (SELECT i AS chunk_id, vTaskName AS task_name, vPartitions(i).year AS yr, vPartitions(i).month AS mn FROM DUAL) s + ON (t.CHUNK_ID = s.chunk_id AND t.TASK_NAME = s.task_name) WHEN NOT MATCHED THEN INSERT (CHUNK_ID, TASK_NAME, YEAR_VALUE, MONTH_VALUE, SCHEMA_NAME, TABLE_NAME, KEY_COLUMN_NAME, BUCKET_URI, FOLDER_NAME, PROCESSED_COLUMNS, MIN_DATE, MAX_DATE, @@ -1139,33 +1152,34 @@ AS vBucketUri, pFolderName, vProcessedColumnList, pMinDate, pMaxDate, pCredentialName, 'PARQUET', NULL, pTemplateTableName, 104857600, pJobClass, 'PENDING') WHEN MATCHED THEN - UPDATE SET TASK_NAME = vTaskName, - STATUS = CASE WHEN t.STATUS = 'FAILED' THEN 'PENDING' ELSE t.STATUS END, + -- Match found: chunk exists for SAME task (composite PK: TASK_NAME, CHUNK_ID) + -- This handles retry scenario: reset FAILED chunks to PENDING for re-processing + UPDATE SET STATUS = CASE WHEN t.STATUS = 'FAILED' THEN 'PENDING' ELSE t.STATUS END, ERROR_MESSAGE = CASE WHEN t.STATUS = 'FAILED' THEN NULL ELSE t.ERROR_MESSAGE END; END LOOP; COMMIT; - -- Log chunk statistics + -- Log chunk statistics (session-safe: only count chunks for THIS task) DECLARE vPendingCount NUMBER; vFailedCount NUMBER; BEGIN - SELECT COUNT(*) INTO vPendingCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'PENDING'; - SELECT COUNT(*) INTO vFailedCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'FAILED'; + SELECT COUNT(*) INTO vPendingCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'PENDING' AND TASK_NAME = vTaskName; + SELECT COUNT(*) INTO vFailedCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'FAILED' AND TASK_NAME = vTaskName; - ENV_MANAGER.LOG_PROCESS_EVENT('Chunk statistics: PENDING=' || vPendingCount || ', FAILED (retry)=' || vFailedCount, 'INFO', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Chunk statistics for task ' || vTaskName || ': PENDING=' || vPendingCount || ', FAILED (retry)=' || vFailedCount, 'INFO', vParameters); END; -- Create parallel task DBMS_PARALLEL_EXECUTE.CREATE_TASK(task_name => vTaskName); - -- Define chunks by number range (1 to partition count) - DBMS_PARALLEL_EXECUTE.CREATE_CHUNKS_BY_NUMBER_COL( + -- Define chunks using SQL query to ensure TASK_NAME isolation + -- CRITICAL: Filter by TASK_NAME to avoid selecting chunks from other concurrent sessions + -- CRITICAL: Use START_ID and END_ID aliases to avoid ORA-00960 ambiguous column naming + DBMS_PARALLEL_EXECUTE.CREATE_CHUNKS_BY_SQL( task_name => vTaskName, - table_owner => 'CT_MRDS', - table_name => 'A_PARALLEL_EXPORT_CHUNKS', - table_column => 'CHUNK_ID', - chunk_size => 1 -- Each partition is one chunk + sql_stmt => 'SELECT CHUNK_ID AS START_ID, CHUNK_ID AS END_ID FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE TASK_NAME = ''' || vTaskName || ''' ORDER BY CHUNK_ID', + by_rowid => FALSE ); -- Execute task in parallel @@ -1174,7 +1188,7 @@ AS IF pJobClass IS NOT NULL THEN DBMS_PARALLEL_EXECUTE.RUN_TASK( task_name => vTaskName, - sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;', + sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id, ''' || vTaskName || '''); END;', language_flag => DBMS_SQL.NATIVE, parallel_level => pParallelDegree, job_class => pJobClass @@ -1182,7 +1196,7 @@ AS ELSE DBMS_PARALLEL_EXECUTE.RUN_TASK( task_name => vTaskName, - sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;', + sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id, ''' || vTaskName || '''); END;', language_flag => DBMS_SQL.NATIVE, parallel_level => pParallelDegree ); @@ -1433,8 +1447,8 @@ AS -- Populate chunks table (insert new chunks, preserve FAILED chunks for retry) FOR i IN 1 .. vPartitions.COUNT LOOP MERGE INTO CT_MRDS.A_PARALLEL_EXPORT_CHUNKS t - USING (SELECT i AS chunk_id, vPartitions(i).year AS yr, vPartitions(i).month AS mn FROM DUAL) s - ON (t.CHUNK_ID = s.chunk_id) + USING (SELECT i AS chunk_id, vTaskName AS task_name, vPartitions(i).year AS yr, vPartitions(i).month AS mn FROM DUAL) s + ON (t.CHUNK_ID = s.chunk_id AND t.TASK_NAME = s.task_name) WHEN NOT MATCHED THEN INSERT (CHUNK_ID, TASK_NAME, YEAR_VALUE, MONTH_VALUE, SCHEMA_NAME, TABLE_NAME, KEY_COLUMN_NAME, BUCKET_URI, FOLDER_NAME, PROCESSED_COLUMNS, MIN_DATE, MAX_DATE, @@ -1443,33 +1457,34 @@ AS vBucketUri, pFolderName, vProcessedColumnList, pMinDate, pMaxDate, pCredentialName, 'CSV', vFileBaseName, pTemplateTableName, pMaxFileSize, pJobClass, 'PENDING') WHEN MATCHED THEN - UPDATE SET TASK_NAME = vTaskName, - STATUS = CASE WHEN t.STATUS = 'FAILED' THEN 'PENDING' ELSE t.STATUS END, + -- Match found: chunk exists for SAME task (composite PK: TASK_NAME, CHUNK_ID) + -- This handles retry scenario: reset FAILED chunks to PENDING for re-processing + UPDATE SET STATUS = CASE WHEN t.STATUS = 'FAILED' THEN 'PENDING' ELSE t.STATUS END, ERROR_MESSAGE = CASE WHEN t.STATUS = 'FAILED' THEN NULL ELSE t.ERROR_MESSAGE END; END LOOP; COMMIT; - -- Log chunk statistics + -- Log chunk statistics (session-safe: only count chunks for THIS task) DECLARE vPendingCount NUMBER; vFailedCount NUMBER; BEGIN - SELECT COUNT(*) INTO vPendingCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'PENDING'; - SELECT COUNT(*) INTO vFailedCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'FAILED'; + SELECT COUNT(*) INTO vPendingCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'PENDING' AND TASK_NAME = vTaskName; + SELECT COUNT(*) INTO vFailedCount FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE STATUS = 'FAILED' AND TASK_NAME = vTaskName; - ENV_MANAGER.LOG_PROCESS_EVENT('Chunk statistics: PENDING=' || vPendingCount || ', FAILED (retry)=' || vFailedCount, 'INFO', vParameters); + ENV_MANAGER.LOG_PROCESS_EVENT('Chunk statistics for task ' || vTaskName || ': PENDING=' || vPendingCount || ', FAILED (retry)=' || vFailedCount, 'INFO', vParameters); END; -- Create parallel task DBMS_PARALLEL_EXECUTE.CREATE_TASK(task_name => vTaskName); - -- Define chunks by number range (1 to partition count) - DBMS_PARALLEL_EXECUTE.CREATE_CHUNKS_BY_NUMBER_COL( + -- Define chunks using SQL query to ensure TASK_NAME isolation + -- CRITICAL: Filter by TASK_NAME to avoid selecting chunks from other concurrent sessions + -- CRITICAL: Use START_ID and END_ID aliases to avoid ORA-00960 ambiguous column naming + DBMS_PARALLEL_EXECUTE.CREATE_CHUNKS_BY_SQL( task_name => vTaskName, - table_owner => 'CT_MRDS', - table_name => 'A_PARALLEL_EXPORT_CHUNKS', - table_column => 'CHUNK_ID', - chunk_size => 1 -- Each partition is one chunk + sql_stmt => 'SELECT CHUNK_ID AS START_ID, CHUNK_ID AS END_ID FROM CT_MRDS.A_PARALLEL_EXPORT_CHUNKS WHERE TASK_NAME = ''' || vTaskName || ''' ORDER BY CHUNK_ID', + by_rowid => FALSE ); -- Execute task in parallel @@ -1478,7 +1493,7 @@ AS IF pJobClass IS NOT NULL THEN DBMS_PARALLEL_EXECUTE.RUN_TASK( task_name => vTaskName, - sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;', + sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id, ''' || vTaskName || '''); END;', language_flag => DBMS_SQL.NATIVE, parallel_level => pParallelDegree, job_class => pJobClass @@ -1486,7 +1501,7 @@ AS ELSE DBMS_PARALLEL_EXECUTE.RUN_TASK( task_name => vTaskName, - sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id); END;', + sql_stmt => 'BEGIN CT_MRDS.DATA_EXPORTER.EXPORT_PARTITION_PARALLEL(:start_id, :end_id, ''' || vTaskName || '''); END;', language_flag => DBMS_SQL.NATIVE, parallel_level => pParallelDegree ); diff --git a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkg b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkg index 768759a..e8498b4 100644 --- a/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkg +++ b/MARS_Packages/REL01_ADDITIONS/MARS-835-PREHOOK/new_version/DATA_EXPORTER.pkg @@ -9,17 +9,17 @@ AS **/ -- Package Version Information - PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.12.0'; - PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2026-02-24 14:30:00'; + PACKAGE_VERSION CONSTANT VARCHAR2(10) := '2.14.0'; + PACKAGE_BUILD_DATE CONSTANT VARCHAR2(20) := '2026-02-25 09:00:00'; PACKAGE_AUTHOR CONSTANT VARCHAR2(100) := 'Grzegorz Michalski'; -- Version History (last 3-5 changes) VERSION_HISTORY CONSTANT VARCHAR2(4000) := + 'v2.14.0 (2026-02-25): OPTIMIZATION - Added pTaskName parameter to EXPORT_PARTITION_PARALLEL for deterministic filtering. Replaced FETCH FIRST 1 ROW ONLY safeguard with precise WHERE CHUNK_ID AND TASK_NAME filter. Eliminates ORDER BY overhead and provides cleaner session isolation.' || CHR(10) || + 'v2.13.1 (2026-02-25): CRITICAL FIX - Added START_ID and END_ID aliasses in CREATE_CHUNKS_BY_SQL to avoid ORA-00960 ambiguous column naming error.' || CHR(10) || + 'v2.13.0 (2026-02-25): CRITICAL SESSION ISOLATION FIX - Changed CREATE_CHUNKS_BY_NUMBER_COL to CREATE_CHUNKS_BY_SQL with TASK_NAME filter (fixes ORA-01422 in concurrent sessions). Added ORDER BY CREATED_DATE DESC FETCH FIRST 1 ROW safeguard to EXPORT_PARTITION_PARALLEL SELECT. Composite PK (TASK_NAME, CHUNK_ID) now fully functional.' || CHR(10) || 'v2.12.0 (2026-02-24): CRITICAL FIX - Rewritten DELETE_FAILED_EXPORT_FILE to use file-specific pattern matching (prevents deleting parallel CSV chunks in shared folder). Added vQuery logging before DBMS_CLOUD calls. Added CSV maxfilesize logging.' || CHR(10) || - 'v2.11.0 (2026-02-18): Added pJobClass parameter to EXPORT_TABLE_DATA_BY_DATE and EXPORT_TABLE_DATA_TO_CSV_BY_DATE for Oracle Scheduler job class support (resource/priority management).' || CHR(10) || - 'v2.10.1 (2026-02-17): CRITICAL FIX - Remove redundant COMPLETED chunks deletion before parallel export that caused ORA-01403 errors (phantom chunks created by CREATE_CHUNKS_BY_NUMBER_COL).' || CHR(10) || - 'v2.10.0 (2026-02-13): CRITICAL FIX - Register ALL files created by DBMS_CLOUD.EXPORT_DATA (multi-file support due to Oracle parallel processing on large instances). Prevents orphaned files in rollback.' || CHR(10) || - 'v2.9.0 (2026-02-13): Added pProcessName parameter to EXPORT_TABLE_DATA and EXPORT_TABLE_DATA_TO_CSV_BY_DATE procedures for process tracking in A_SOURCE_FILE_RECEIVED table.' || CHR(10); + 'v2.11.0 (2026-02-18): Added pJobClass parameter to EXPORT_TABLE_DATA_BY_DATE and EXPORT_TABLE_DATA_TO_CSV_BY_DATE for Oracle Scheduler job class support (resource/priority management).' || CHR(10); cgBL CONSTANT VARCHAR2(2) := CHR(13)||CHR(10); vgMsgTmp VARCHAR2(32000); @@ -54,10 +54,12 @@ AS * but should NOT be called directly by external code. * @param pStartId - Chunk start ID (CHUNK_ID from A_PARALLEL_EXPORT_CHUNKS table) * @param pEndId - Chunk end ID (same as pStartId for single-row chunks) + * @param pTaskName - Task name for session isolation (optional, DEFAULT NULL for backward compatibility) **/ PROCEDURE EXPORT_PARTITION_PARALLEL ( pStartId IN NUMBER, - pEndId IN NUMBER + pEndId IN NUMBER, + pTaskName IN VARCHAR2 DEFAULT NULL ); ---------------------------------------------------------------------------------------------------------------------------