Skip to content

Commit 39ff06d

Browse files
committed
feat: implement graceful model discovery for vLLM provider
- Attempt model discovery first for backward compatibility - If discovery fails and refresh_models=false, continue without error - If discovery fails and refresh_models=true, fail hard with ValueError - Supports dynamic token authentication scenarios Fixes OAuth authentication issues when vLLM service requires dynamic tokens
1 parent 0a41c4e commit 39ff06d

File tree

1 file changed

+14
-11
lines changed
  • llama_stack/providers/remote/inference/vllm

1 file changed

+14
-11
lines changed

llama_stack/providers/remote/inference/vllm/vllm.py

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
from urllib.parse import urljoin
1010

1111
import httpx
12-
from openai import APIConnectionError, AsyncOpenAI
12+
from openai import AsyncOpenAI
1313
from openai.types.chat.chat_completion_chunk import (
1414
ChatCompletionChunk as OpenAIChatCompletionChunk,
1515
)
@@ -430,16 +430,19 @@ async def register_model(self, model: Model) -> Model:
430430
pass # Ignore statically unknown model, will check live listing
431431
try:
432432
res = self.client.models.list()
433-
except APIConnectionError as e:
434-
raise ValueError(
435-
f"Failed to connect to vLLM at {self.config.url}. Please check if vLLM is running and accessible at that URL."
436-
) from e
437-
available_models = [m.id async for m in res]
438-
if model.provider_resource_id not in available_models:
439-
raise ValueError(
440-
f"Model {model.provider_resource_id} is not being served by vLLM. "
441-
f"Available models: {', '.join(available_models)}"
442-
)
433+
available_models = [m.id async for m in res]
434+
if model.provider_resource_id not in available_models:
435+
raise ValueError(
436+
f"Model {model.provider_resource_id} is not being served by vLLM. "
437+
f"Available models: {', '.join(available_models)}"
438+
)
439+
except Exception as e:
440+
if self.config.refresh_models:
441+
raise ValueError(f"Model verification failed: {e}") from e
442+
# if refresh_models is false, gracefully continue without verification
443+
log.warning(f"Model verification failed for model {model.model_id} with error {e}")
444+
log.warning("Continuing without live check (refresh_models=false).")
445+
443446
return model
444447

445448
async def _get_params(self, request: ChatCompletionRequest) -> dict:

0 commit comments

Comments
 (0)