Skip to content

Commit ac1aa70

Browse files
committed
remove unwanted comments
1 parent 2bb1932 commit ac1aa70

File tree

1 file changed

+1
-10
lines changed

1 file changed

+1
-10
lines changed

llama_stack/providers/utils/memory/openai_vector_store_mixin.py

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,6 @@ async def _cleanup_expired_file_batches_if_needed(self) -> None:
220220
current_time = int(time.time())
221221
cleanup_interval = 24 * 60 * 60 # 1 day in seconds
222222

223-
# Check if enough time has passed since last cleanup
224223
if current_time - self._last_cleanup_time >= cleanup_interval:
225224
logger.info("Running throttled cleanup of expired file batches")
226225
await self._cleanup_expired_file_batches()
@@ -238,9 +237,7 @@ async def initialize_openai_vector_stores(self) -> None:
238237
"""Load existing OpenAI vector stores and file batches into the in-memory cache."""
239238
self.openai_vector_stores = await self._load_openai_vector_stores()
240239
self.openai_file_batches = await self._load_openai_vector_store_file_batches()
241-
# Resume any incomplete file batches
242240
await self._resume_incomplete_batches()
243-
# Initialize last cleanup time
244241
self._last_cleanup_time = 0
245242

246243
@abstractmethod
@@ -947,7 +944,7 @@ async def openai_create_vector_store_file_batch(
947944
# Start background processing of files
948945
asyncio.create_task(self._process_file_batch_async(batch_id, batch_info))
949946

950-
# Run cleanup if needed (throttled to once every 7 days)
947+
# Run cleanup if needed (throttled to once every 1 day)
951948
asyncio.create_task(self._cleanup_expired_file_batches_if_needed())
952949

953950
return batch_object
@@ -994,7 +991,6 @@ async def _process_file_batch_async(
994991
else:
995992
batch_info["status"] = "completed" # Partial success counts as completed
996993

997-
# Save final batch status to persistent storage (keep completed batches like vector stores)
998994
await self._save_openai_vector_store_file_batch(batch_id, batch_info)
999995

1000996
logger.info(f"File batch {batch_id} processing completed with status: {batch_info['status']}")
@@ -1064,7 +1060,6 @@ async def openai_retrieve_vector_store_file_batch(
10641060
) -> VectorStoreFileBatchObject:
10651061
"""Retrieve a vector store file batch."""
10661062
batch_info = self._get_and_validate_batch(batch_id, vector_store_id)
1067-
# Convert dict back to Pydantic model for API response
10681063
return VectorStoreFileBatchObject(**batch_info)
10691064

10701065
async def openai_list_files_in_vector_store_file_batch(
@@ -1120,17 +1115,13 @@ async def openai_cancel_vector_store_file_batch(
11201115
"""Cancel a vector store file batch."""
11211116
batch_info = self._get_and_validate_batch(batch_id, vector_store_id)
11221117

1123-
# Only allow cancellation if batch is in progress
11241118
if batch_info["status"] not in ["in_progress"]:
11251119
raise ValueError(f"Cannot cancel batch {batch_id} with status {batch_info['status']}")
11261120

1127-
# Update batch with cancelled status
11281121
batch_info["status"] = "cancelled"
11291122

1130-
# Save cancelled batch status to persistent storage (keep cancelled batches like vector stores)
11311123
await self._save_openai_vector_store_file_batch(batch_id, batch_info)
11321124

1133-
# Create updated batch object for API response
11341125
updated_batch = VectorStoreFileBatchObject(**batch_info)
11351126

11361127
return updated_batch

0 commit comments

Comments
 (0)