@@ -220,7 +220,6 @@ async def _cleanup_expired_file_batches_if_needed(self) -> None:
220
220
current_time = int (time .time ())
221
221
cleanup_interval = 24 * 60 * 60 # 1 day in seconds
222
222
223
- # Check if enough time has passed since last cleanup
224
223
if current_time - self ._last_cleanup_time >= cleanup_interval :
225
224
logger .info ("Running throttled cleanup of expired file batches" )
226
225
await self ._cleanup_expired_file_batches ()
@@ -238,9 +237,7 @@ async def initialize_openai_vector_stores(self) -> None:
238
237
"""Load existing OpenAI vector stores and file batches into the in-memory cache."""
239
238
self .openai_vector_stores = await self ._load_openai_vector_stores ()
240
239
self .openai_file_batches = await self ._load_openai_vector_store_file_batches ()
241
- # Resume any incomplete file batches
242
240
await self ._resume_incomplete_batches ()
243
- # Initialize last cleanup time
244
241
self ._last_cleanup_time = 0
245
242
246
243
@abstractmethod
@@ -947,7 +944,7 @@ async def openai_create_vector_store_file_batch(
947
944
# Start background processing of files
948
945
asyncio .create_task (self ._process_file_batch_async (batch_id , batch_info ))
949
946
950
- # Run cleanup if needed (throttled to once every 7 days )
947
+ # Run cleanup if needed (throttled to once every 1 day )
951
948
asyncio .create_task (self ._cleanup_expired_file_batches_if_needed ())
952
949
953
950
return batch_object
@@ -994,7 +991,6 @@ async def _process_file_batch_async(
994
991
else :
995
992
batch_info ["status" ] = "completed" # Partial success counts as completed
996
993
997
- # Save final batch status to persistent storage (keep completed batches like vector stores)
998
994
await self ._save_openai_vector_store_file_batch (batch_id , batch_info )
999
995
1000
996
logger .info (f"File batch { batch_id } processing completed with status: { batch_info ['status' ]} " )
@@ -1064,7 +1060,6 @@ async def openai_retrieve_vector_store_file_batch(
1064
1060
) -> VectorStoreFileBatchObject :
1065
1061
"""Retrieve a vector store file batch."""
1066
1062
batch_info = self ._get_and_validate_batch (batch_id , vector_store_id )
1067
- # Convert dict back to Pydantic model for API response
1068
1063
return VectorStoreFileBatchObject (** batch_info )
1069
1064
1070
1065
async def openai_list_files_in_vector_store_file_batch (
@@ -1120,17 +1115,13 @@ async def openai_cancel_vector_store_file_batch(
1120
1115
"""Cancel a vector store file batch."""
1121
1116
batch_info = self ._get_and_validate_batch (batch_id , vector_store_id )
1122
1117
1123
- # Only allow cancellation if batch is in progress
1124
1118
if batch_info ["status" ] not in ["in_progress" ]:
1125
1119
raise ValueError (f"Cannot cancel batch { batch_id } with status { batch_info ['status' ]} " )
1126
1120
1127
- # Update batch with cancelled status
1128
1121
batch_info ["status" ] = "cancelled"
1129
1122
1130
- # Save cancelled batch status to persistent storage (keep cancelled batches like vector stores)
1131
1123
await self ._save_openai_vector_store_file_batch (batch_id , batch_info )
1132
1124
1133
- # Create updated batch object for API response
1134
1125
updated_batch = VectorStoreFileBatchObject (** batch_info )
1135
1126
1136
1127
return updated_batch
0 commit comments