-
-
Notifications
You must be signed in to change notification settings - Fork 5.9k
[Feature] UI - Blog Dropdown in Navbar #21859
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
e37d54e
be1a543
7e5c63c
021a909
4b1ce1f
ca9111e
70f8e97
a82c8c1
98da524
e241e6f
1c0cfd7
929d592
a0965d5
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,16 @@ | ||
| { | ||
| "posts": [ | ||
| { | ||
| "title": "LiteLLM: Unified Interface for 100+ LLMs", | ||
| "description": "Learn how LiteLLM provides a single interface to call any LLM with OpenAI-compatible syntax.", | ||
| "date": "2026-02-01", | ||
| "url": "https://www.litellm.ai/blog/litellm" | ||
| }, | ||
| { | ||
| "title": "Using the LiteLLM Proxy for Load Balancing", | ||
| "description": "Set up the LiteLLM proxy server to load balance across multiple LLM providers and deployments.", | ||
| "date": "2026-01-15", | ||
| "url": "https://www.litellm.ai/blog/proxy-load-balancing" | ||
| } | ||
| ] | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,16 @@ | ||
| { | ||
| "posts": [ | ||
| { | ||
| "title": "LiteLLM: Unified Interface for 100+ LLMs", | ||
| "description": "Learn how LiteLLM provides a single interface to call any LLM with OpenAI-compatible syntax.", | ||
| "date": "2026-02-01", | ||
| "url": "https://www.litellm.ai/blog/litellm" | ||
| }, | ||
| { | ||
| "title": "Using the LiteLLM Proxy for Load Balancing", | ||
| "description": "Set up the LiteLLM proxy server to load balance across multiple LLM providers and deployments.", | ||
| "date": "2026-01-15", | ||
| "url": "https://www.litellm.ai/blog/proxy-load-balancing" | ||
| } | ||
| ] | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,134 @@ | ||
| """ | ||
| Pulls the latest LiteLLM blog posts from GitHub. | ||
|
|
||
| Falls back to the bundled local backup on any failure. | ||
| GitHub JSON can be overridden via LITELLM_BLOG_POSTS_URL env var. | ||
|
|
||
| Disable remote fetching entirely: | ||
| export LITELLM_LOCAL_BLOG_POSTS=True | ||
| """ | ||
|
|
||
| import json | ||
| import os | ||
| import time | ||
| from importlib.resources import files | ||
| from typing import Any, Dict, List, Optional | ||
|
|
||
| import httpx | ||
| from pydantic import BaseModel | ||
|
|
||
| from litellm import verbose_logger | ||
|
|
||
| BLOG_POSTS_GITHUB_URL: str = os.getenv( | ||
| "LITELLM_BLOG_POSTS_URL", | ||
| "https://raw.githubusercontent.com/BerriAI/litellm/main/blog_posts.json", | ||
| ) | ||
|
|
||
| BLOG_POSTS_TTL_SECONDS: int = 3600 # 1 hour | ||
|
|
||
|
|
||
| class BlogPost(BaseModel): | ||
| title: str | ||
| description: str | ||
| date: str | ||
| url: str | ||
|
|
||
|
|
||
| class BlogPostsResponse(BaseModel): | ||
| posts: List[BlogPost] | ||
|
|
||
|
|
||
| class GetBlogPosts: | ||
| """ | ||
| Fetches, validates, and caches LiteLLM blog posts. | ||
|
|
||
| Mirrors the structure of GetModelCostMap: | ||
| - Fetches from GitHub with a 5-second timeout | ||
| - Validates the response has a non-empty ``posts`` list | ||
| - Caches the result in-process for BLOG_POSTS_TTL_SECONDS (1 hour) | ||
| - Falls back to the bundled local backup on any failure | ||
| """ | ||
|
|
||
| _cached_posts: Optional[List[Dict[str, str]]] = None | ||
| _last_fetch_time: float = 0.0 | ||
|
Comment on lines
+52
to
+53
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Class-level mutable cache is not thread-safe
import threading
class GetBlogPosts:
_cached_posts: Optional[List[Dict[str, str]]] = None
_last_fetch_time: float = 0.0
_lock: threading.Lock = threading.Lock() |
||
|
|
||
| @staticmethod | ||
| def load_local_blog_posts() -> List[Dict[str, str]]: | ||
| """Load the bundled local backup blog posts.""" | ||
| content = json.loads( | ||
| files("litellm") | ||
| .joinpath("blog_posts_backup.json") | ||
| .read_text(encoding="utf-8") | ||
| ) | ||
| return content.get("posts", []) | ||
|
|
||
| @staticmethod | ||
| def fetch_remote_blog_posts(url: str, timeout: int = 5) -> dict: | ||
| """ | ||
| Fetch blog posts JSON from a remote URL. | ||
|
|
||
| Returns the parsed response. Raises on network/parse errors. | ||
| """ | ||
| response = httpx.get(url, timeout=timeout) | ||
| response.raise_for_status() | ||
| return response.json() | ||
|
|
||
| @staticmethod | ||
| def validate_blog_posts(data: Any) -> bool: | ||
| """Return True if data is a dict with a non-empty ``posts`` list.""" | ||
| if not isinstance(data, dict): | ||
| verbose_logger.warning( | ||
| "LiteLLM: Blog posts response is not a dict (type=%s). " | ||
| "Falling back to local backup.", | ||
| type(data).__name__, | ||
| ) | ||
| return False | ||
| posts = data.get("posts") | ||
| if not isinstance(posts, list) or len(posts) == 0: | ||
| verbose_logger.warning( | ||
| "LiteLLM: Blog posts response has no valid 'posts' list. " | ||
| "Falling back to local backup.", | ||
| ) | ||
| return False | ||
| return True | ||
|
|
||
| @classmethod | ||
| def get_blog_posts(cls, url: str = BLOG_POSTS_GITHUB_URL) -> List[Dict[str, str]]: | ||
| """ | ||
| Return the blog posts list. | ||
|
|
||
| Uses the in-process cache if within BLOG_POSTS_TTL_SECONDS. | ||
| Fetches from ``url`` otherwise, falling back to local backup on failure. | ||
| """ | ||
| if os.getenv("LITELLM_LOCAL_BLOG_POSTS", "").lower() == "true": | ||
| return cls.load_local_blog_posts() | ||
|
|
||
| now = time.time() | ||
| if ( | ||
| cls._cached_posts is not None | ||
| and (now - cls._last_fetch_time) < BLOG_POSTS_TTL_SECONDS | ||
| ): | ||
| return cls._cached_posts | ||
|
|
||
| try: | ||
| data = cls.fetch_remote_blog_posts(url) | ||
| except Exception as e: | ||
| verbose_logger.warning( | ||
| "LiteLLM: Failed to fetch blog posts from %s: %s. " | ||
| "Falling back to local backup.", | ||
| url, | ||
| str(e), | ||
| ) | ||
| return cls.load_local_blog_posts() | ||
|
|
||
| if not cls.validate_blog_posts(data): | ||
| return cls.load_local_blog_posts() | ||
|
|
||
| cls._cached_posts = data["posts"] | ||
| cls._last_fetch_time = now | ||
| return cls._cached_posts | ||
|
|
||
|
|
||
| def get_blog_posts(url: str = BLOG_POSTS_GITHUB_URL) -> List[Dict[str, str]]: | ||
| """Public entry point — returns the blog posts list.""" | ||
| return GetBlogPosts.get_blog_posts(url=url) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,80 @@ | ||
| """Tests for the /public/litellm_blog_posts endpoint.""" | ||
| from unittest.mock import patch | ||
|
|
||
| import pytest | ||
| from fastapi.testclient import TestClient | ||
|
|
||
| SAMPLE_POSTS = [ | ||
| { | ||
| "title": "Test Post", | ||
| "description": "A test post.", | ||
| "date": "2026-01-01", | ||
| "url": "https://www.litellm.ai/blog/test", | ||
| } | ||
| ] | ||
|
|
||
|
|
||
| @pytest.fixture | ||
| def client(): | ||
| """Create a TestClient with just the public_endpoints router.""" | ||
| from fastapi import FastAPI | ||
|
|
||
| from litellm.proxy.public_endpoints.public_endpoints import router | ||
|
|
||
| app = FastAPI() | ||
| app.include_router(router) | ||
| return TestClient(app) | ||
|
|
||
|
|
||
| def test_get_blog_posts_returns_response_shape(client): | ||
| with patch( | ||
| "litellm.proxy.public_endpoints.public_endpoints.get_blog_posts", | ||
| return_value=SAMPLE_POSTS, | ||
| ): | ||
| response = client.get("/public/litellm_blog_posts") | ||
|
|
||
| assert response.status_code == 200 | ||
| data = response.json() | ||
| assert "posts" in data | ||
| assert len(data["posts"]) == 1 | ||
| post = data["posts"][0] | ||
| assert post["title"] == "Test Post" | ||
| assert post["description"] == "A test post." | ||
| assert post["date"] == "2026-01-01" | ||
| assert post["url"] == "https://www.litellm.ai/blog/test" | ||
|
|
||
|
|
||
| def test_get_blog_posts_limits_to_five(client): | ||
| """Endpoint returns at most 5 posts.""" | ||
| many_posts = [ | ||
| { | ||
| "title": f"Post {i}", | ||
| "description": "desc", | ||
| "date": "2026-01-01", | ||
| "url": f"https://www.litellm.ai/blog/{i}", | ||
| } | ||
| for i in range(10) | ||
| ] | ||
|
|
||
| with patch( | ||
| "litellm.proxy.public_endpoints.public_endpoints.get_blog_posts", | ||
| return_value=many_posts, | ||
| ): | ||
| response = client.get("/public/litellm_blog_posts") | ||
|
|
||
| assert response.status_code == 200 | ||
| assert len(response.json()["posts"]) == 5 | ||
|
|
||
|
|
||
| def test_get_blog_posts_returns_local_backup_on_failure(client): | ||
| """Endpoint returns local backup (non-empty list) when fetcher fails.""" | ||
| with patch( | ||
| "litellm.proxy.public_endpoints.public_endpoints.get_blog_posts", | ||
| side_effect=Exception("fetch failed"), | ||
| ): | ||
| response = client.get("/public/litellm_blog_posts") | ||
|
|
||
| # Should not 500 — returns local backup | ||
| assert response.status_code == 200 | ||
| assert "posts" in response.json() | ||
| assert len(response.json()["posts"]) > 0 |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,13 @@ | ||
| def test_ui_settings_has_disable_show_blog_field(): | ||
| """UISettings model must include disable_show_blog.""" | ||
| from litellm.proxy.ui_crud_endpoints.proxy_setting_endpoints import UISettings | ||
|
|
||
| settings = UISettings() | ||
| assert hasattr(settings, "disable_show_blog") | ||
| assert settings.disable_show_blog is False # default | ||
|
|
||
|
|
||
| def test_allowed_ui_settings_fields_contains_disable_show_blog(): | ||
| from litellm.proxy.ui_crud_endpoints.proxy_setting_endpoints import ALLOWED_UI_SETTINGS_FIELDS | ||
|
|
||
| assert "disable_show_blog" in ALLOWED_UI_SETTINGS_FIELDS |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Env var is resolved at import time, not at call time
BLOG_POSTS_GITHUB_URLis computed once at module import viaos.getenv(). It then becomes the default parameter value forget_blog_posts()andget_blog_posts(). If a user sets/changes theLITELLM_BLOG_POSTS_URLenvironment variable after the module has been imported (e.g., through the proxy config), the change won't take effect until the process is restarted. Consider reading the env var insideget_blog_posts()instead: