move the lock deletion to handle_output just after destination process

This commit is contained in:
ali
2025-10-14 15:11:15 +05:30
committed by ali
parent b8c8aa94af
commit 922d22e06f
2 changed files with 18 additions and 31 deletions

View File

@@ -700,6 +700,24 @@ class WorkerDestinationConnector:
except Exception as e:
self._handle_destination_error(exec_ctx, file_ctx, e)
raise
finally:
# Release lock after destination processing completes
# Critical section (stage set + data extraction + destination write) is done
# File history and stage updates don't need the lock (protected by stage checks)
try:
tracker = FileExecutionStatusTracker()
lock_key = tracker.get_destination_lock_key(
exec_ctx.execution_id, exec_ctx.file_execution_id
)
tracker.redis_client.delete(lock_key)
logger.info(
f"Released destination lock for '{file_ctx.file_name}' "
f"after destination processing (lock_key={lock_key})"
)
except Exception as lock_error:
logger.warning(
f"Failed to release destination lock for '{file_ctx.file_name}': {lock_error}"
)
# Log success
self._log_processing_success(exec_ctx, file_ctx, result.has_hitl)

View File

@@ -273,37 +273,6 @@ class WorkerWorkflowExecutionService:
f"TIMING: Destination processing END for {file_name} at {destination_end_time:.6f} (took {destination_end_time - destination_start_time:.3f}s)"
)
# Only clean up lock if we actually processed (not a duplicate skip)
# CRITICAL: Do not delete lock if we skipped due to duplicate - another worker might hold it!
if (
workflow_file_execution_id
and destination_result
and getattr(
destination_result, "processed", True
) # Only cleanup if we actually processed
):
try:
tracker = FileExecutionStatusTracker()
lock_key = tracker.get_destination_lock_key(
execution_id, workflow_file_execution_id
)
tracker.redis_client.delete(lock_key)
logger.debug(
f"Cleaned up destination lock for '{file_name}' (lock_key={lock_key})"
)
except Exception as lock_cleanup_error:
logger.warning(
f"Failed to clean up destination lock: {lock_cleanup_error}"
)
elif (
workflow_file_execution_id
and destination_result
and not getattr(destination_result, "processed", True)
):
logger.info(
f"Skipping lock cleanup for '{file_name}' - duplicate detected, lock held by another worker"
)
# Step 4: Build Final Result
final_time = time.time()
execution_time = final_time - start_time