Skip to content

Commit c9be8c1

Browse files
committed
add __init__ to the mixin
1 parent e58bf82 commit c9be8c1

File tree

8 files changed

+17
-54
lines changed

8 files changed

+17
-54
lines changed

llama_stack/providers/inline/vector_io/faiss/faiss.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -200,15 +200,10 @@ async def query_hybrid(
200200

201201
class FaissVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolPrivate):
202202
def __init__(self, config: FaissVectorIOConfig, inference_api: Inference, files_api: Files | None) -> None:
203+
super().__init__(files_api=files_api, kvstore=None)
203204
self.config = config
204205
self.inference_api = inference_api
205-
self.files_api = files_api
206206
self.cache: dict[str, VectorDBWithIndex] = {}
207-
self.kvstore: KVStore | None = None
208-
self.openai_vector_stores: dict[str, dict[str, Any]] = {}
209-
self.openai_file_batches: dict[str, dict[str, Any]] = {}
210-
self._file_batch_tasks: dict[str, asyncio.Task[None]] = {}
211-
self._last_file_batch_cleanup_time = 0
212207

213208
async def initialize(self) -> None:
214209
self.kvstore = await kvstore_impl(self.config.kvstore)

llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -410,15 +410,10 @@ class SQLiteVecVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtoc
410410
"""
411411

412412
def __init__(self, config, inference_api: Inference, files_api: Files | None) -> None:
413+
super().__init__(files_api=files_api, kvstore=None)
413414
self.config = config
414415
self.inference_api = inference_api
415-
self.files_api = files_api
416416
self.cache: dict[str, VectorDBWithIndex] = {}
417-
self.openai_vector_stores: dict[str, dict[str, Any]] = {}
418-
self.openai_file_batches: dict[str, dict[str, Any]] = {}
419-
self._file_batch_tasks: dict[str, asyncio.Task[None]] = {}
420-
self._last_file_batch_cleanup_time = 0
421-
self.kvstore: KVStore | None = None
422417

423418
async def initialize(self) -> None:
424419
self.kvstore = await kvstore_impl(self.config.kvstore)

llama_stack/providers/remote/vector_io/chroma/chroma.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -140,14 +140,13 @@ def __init__(
140140
inference_api: Api.inference,
141141
files_api: Files | None,
142142
) -> None:
143+
super().__init__(files_api=files_api, kvstore=None)
143144
log.info(f"Initializing ChromaVectorIOAdapter with url: {config}")
144145
self.config = config
145146
self.inference_api = inference_api
146147
self.client = None
147148
self.cache = {}
148-
self.kvstore: KVStore | None = None
149149
self.vector_db_store = None
150-
self.files_api = files_api
151150

152151
async def initialize(self) -> None:
153152
self.kvstore = await kvstore_impl(self.config.kvstore)
@@ -166,9 +165,6 @@ async def initialize(self) -> None:
166165
log.info(f"Connecting to Chroma local db at: {self.config.db_path}")
167166
self.client = chromadb.PersistentClient(path=self.config.db_path)
168167
self.openai_vector_stores = await self._load_openai_vector_stores()
169-
self.openai_file_batches: dict[str, dict[str, Any]] = {}
170-
self._file_batch_tasks: dict[str, asyncio.Task[None]] = {}
171-
self._last_file_batch_cleanup_time = 0
172168

173169
async def shutdown(self) -> None:
174170
pass

llama_stack/providers/remote/vector_io/milvus/milvus.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -309,17 +309,12 @@ def __init__(
309309
inference_api: Inference,
310310
files_api: Files | None,
311311
) -> None:
312+
super().__init__(files_api=files_api, kvstore=None)
312313
self.config = config
313314
self.cache = {}
314315
self.client = None
315316
self.inference_api = inference_api
316-
self.files_api = files_api
317-
self.kvstore: KVStore | None = None
318317
self.vector_db_store = None
319-
self.openai_vector_stores: dict[str, dict[str, Any]] = {}
320-
self.openai_file_batches: dict[str, dict[str, Any]] = {}
321-
self._file_batch_tasks: dict[str, asyncio.Task[None]] = {}
322-
self._last_file_batch_cleanup_time = 0
323318
self.metadata_collection_name = "openai_vector_stores_metadata"
324319

325320
async def initialize(self) -> None:

llama_stack/providers/remote/vector_io/pgvector/pgvector.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
# This source code is licensed under the terms described in the LICENSE file in
55
# the root directory of this source tree.
66

7-
import asyncio
87
import heapq
98
from typing import Any
109

@@ -346,17 +345,12 @@ def __init__(
346345
inference_api: Api.inference,
347346
files_api: Files | None = None,
348347
) -> None:
348+
super().__init__(files_api=files_api, kvstore=None)
349349
self.config = config
350350
self.inference_api = inference_api
351351
self.conn = None
352352
self.cache = {}
353-
self.files_api = files_api
354-
self.kvstore: KVStore | None = None
355353
self.vector_db_store = None
356-
self.openai_vector_stores: dict[str, dict[str, Any]] = {}
357-
self.openai_file_batches: dict[str, dict[str, Any]] = {}
358-
self._file_batch_tasks: dict[str, asyncio.Task[None]] = {}
359-
self._last_file_batch_cleanup_time = 0
360354
self.metadata_collection_name = "openai_vector_stores_metadata"
361355

362356
async def initialize(self) -> None:

llama_stack/providers/remote/vector_io/qdrant/qdrant.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
from llama_stack.log import get_logger
2828
from llama_stack.providers.datatypes import Api, VectorDBsProtocolPrivate
2929
from llama_stack.providers.inline.vector_io.qdrant import QdrantVectorIOConfig as InlineQdrantVectorIOConfig
30-
from llama_stack.providers.utils.kvstore import KVStore, kvstore_impl
30+
from llama_stack.providers.utils.kvstore import kvstore_impl
3131
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
3232
from llama_stack.providers.utils.memory.vector_store import (
3333
ChunkForDeletion,
@@ -162,17 +162,12 @@ def __init__(
162162
inference_api: Api.inference,
163163
files_api: Files | None = None,
164164
) -> None:
165+
super().__init__(files_api=files_api, kvstore=None)
165166
self.config = config
166167
self.client: AsyncQdrantClient = None
167168
self.cache = {}
168169
self.inference_api = inference_api
169-
self.files_api = files_api
170170
self.vector_db_store = None
171-
self.kvstore: KVStore | None = None
172-
self.openai_vector_stores: dict[str, dict[str, Any]] = {}
173-
self.openai_file_batches: dict[str, dict[str, Any]] = {}
174-
self._file_batch_tasks: dict[str, asyncio.Task[None]] = {}
175-
self._last_file_batch_cleanup_time = 0
176171
self._qdrant_lock = asyncio.Lock()
177172

178173
async def initialize(self) -> None:

llama_stack/providers/remote/vector_io/weaviate/weaviate.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
#
44
# This source code is licensed under the terms described in the LICENSE file in
55
# the root directory of this source tree.
6-
import asyncio
76
import json
87
from typing import Any
98

@@ -285,17 +284,12 @@ def __init__(
285284
inference_api: Api.inference,
286285
files_api: Files | None,
287286
) -> None:
287+
super().__init__(files_api=files_api, kvstore=None)
288288
self.config = config
289289
self.inference_api = inference_api
290290
self.client_cache = {}
291291
self.cache = {}
292-
self.files_api = files_api
293-
self.kvstore: KVStore | None = None
294292
self.vector_db_store = None
295-
self.openai_vector_stores: dict[str, dict[str, Any]] = {}
296-
self.openai_file_batches: dict[str, dict[str, Any]] = {}
297-
self._file_batch_tasks: dict[str, asyncio.Task[None]] = {}
298-
self._last_file_batch_cleanup_time = 0
299293
self.metadata_collection_name = "openai_vector_stores_metadata"
300294

301295
def _get_client(self) -> weaviate.WeaviateClient:

llama_stack/providers/utils/memory/openai_vector_store_mixin.py

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -71,16 +71,15 @@ class OpenAIVectorStoreMixin(ABC):
7171
an openai_vector_stores in-memory cache.
7272
"""
7373

74-
# These should be provided by the implementing class
75-
openai_vector_stores: dict[str, dict[str, Any]]
76-
openai_file_batches: dict[str, dict[str, Any]]
77-
files_api: Files | None
78-
# KV store for persisting OpenAI vector store metadata
79-
kvstore: KVStore | None
80-
# Track last cleanup time to throttle cleanup operations
81-
_last_file_batch_cleanup_time: int
82-
# Track running file batch processing tasks
83-
_file_batch_tasks: dict[str, asyncio.Task[None]]
74+
# Implementing classes should call super().__init__() in their __init__ method
75+
# to properly initialize the mixin attributes.
76+
def __init__(self, files_api: Files | None = None, kvstore: KVStore | None = None):
77+
self.openai_vector_stores: dict[str, dict[str, Any]] = {}
78+
self.openai_file_batches: dict[str, dict[str, Any]] = {}
79+
self.files_api = files_api
80+
self.kvstore = kvstore
81+
self._last_file_batch_cleanup_time = 0
82+
self._file_batch_tasks: dict[str, asyncio.Task[None]] = {}
8483

8584
async def _save_openai_vector_store(self, store_id: str, store_info: dict[str, Any]) -> None:
8685
"""Save vector store metadata to persistent storage."""

0 commit comments

Comments
 (0)