Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions blog_posts.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
{
"posts": [
{
"title": "LiteLLM: Unified Interface for 100+ LLMs",
"description": "Learn how LiteLLM provides a single interface to call any LLM with OpenAI-compatible syntax.",
"date": "2026-02-01",
"url": "https://www.litellm.ai/blog/litellm"
},
{
"title": "Using the LiteLLM Proxy for Load Balancing",
"description": "Set up the LiteLLM proxy server to load balance across multiple LLM providers and deployments.",
"date": "2026-01-15",
"url": "https://www.litellm.ai/blog/proxy-load-balancing"
}
]
}
16 changes: 16 additions & 0 deletions litellm/blog_posts_backup.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
{
"posts": [
{
"title": "LiteLLM: Unified Interface for 100+ LLMs",
"description": "Learn how LiteLLM provides a single interface to call any LLM with OpenAI-compatible syntax.",
"date": "2026-02-01",
"url": "https://www.litellm.ai/blog/litellm"
},
{
"title": "Using the LiteLLM Proxy for Load Balancing",
"description": "Set up the LiteLLM proxy server to load balance across multiple LLM providers and deployments.",
"date": "2026-01-15",
"url": "https://www.litellm.ai/blog/proxy-load-balancing"
}
]
}
134 changes: 134 additions & 0 deletions litellm/litellm_core_utils/get_blog_posts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
"""
Pulls the latest LiteLLM blog posts from GitHub.

Falls back to the bundled local backup on any failure.
GitHub JSON can be overridden via LITELLM_BLOG_POSTS_URL env var.

Disable remote fetching entirely:
export LITELLM_LOCAL_BLOG_POSTS=True
"""

import json
import os
import time
from importlib.resources import files
from typing import Any, Dict, List, Optional

import httpx
from pydantic import BaseModel

from litellm import verbose_logger

BLOG_POSTS_GITHUB_URL: str = os.getenv(
"LITELLM_BLOG_POSTS_URL",
"https://raw.githubusercontent.com/BerriAI/litellm/main/blog_posts.json",
)
Comment on lines +22 to +25
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Env var is resolved at import time, not at call time

BLOG_POSTS_GITHUB_URL is computed once at module import via os.getenv(). It then becomes the default parameter value for get_blog_posts() and get_blog_posts(). If a user sets/changes the LITELLM_BLOG_POSTS_URL environment variable after the module has been imported (e.g., through the proxy config), the change won't take effect until the process is restarted. Consider reading the env var inside get_blog_posts() instead:

BLOG_POSTS_GITHUB_URL_DEFAULT = "https://raw.githubusercontent.com/BerriAI/litellm/main/blog_posts.json"

# Then inside get_blog_posts():
url = os.getenv("LITELLM_BLOG_POSTS_URL", BLOG_POSTS_GITHUB_URL_DEFAULT)


BLOG_POSTS_TTL_SECONDS: int = 3600 # 1 hour


class BlogPost(BaseModel):
title: str
description: str
date: str
url: str


class BlogPostsResponse(BaseModel):
posts: List[BlogPost]


class GetBlogPosts:
"""
Fetches, validates, and caches LiteLLM blog posts.

Mirrors the structure of GetModelCostMap:
- Fetches from GitHub with a 5-second timeout
- Validates the response has a non-empty ``posts`` list
- Caches the result in-process for BLOG_POSTS_TTL_SECONDS (1 hour)
- Falls back to the bundled local backup on any failure
"""

_cached_posts: Optional[List[Dict[str, str]]] = None
_last_fetch_time: float = 0.0
Comment on lines +52 to +53
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Class-level mutable cache is not thread-safe

_cached_posts and _last_fetch_time are class-level mutable variables shared across all threads. In a multi-threaded FastAPI deployment (or when using asyncio.to_thread), concurrent requests could create a race condition in get_blog_posts() where one thread reads _cached_posts while another thread is updating it after the TTL check. While the impact is low for a blog post list (worst case: an extra remote fetch), consider protecting these with a threading.Lock for correctness, particularly since the GetModelCostMap class this is modeled after has the same issue.

import threading

class GetBlogPosts:
    _cached_posts: Optional[List[Dict[str, str]]] = None
    _last_fetch_time: float = 0.0
    _lock: threading.Lock = threading.Lock()


@staticmethod
def load_local_blog_posts() -> List[Dict[str, str]]:
"""Load the bundled local backup blog posts."""
content = json.loads(
files("litellm")
.joinpath("blog_posts_backup.json")
.read_text(encoding="utf-8")
)
return content.get("posts", [])

@staticmethod
def fetch_remote_blog_posts(url: str, timeout: int = 5) -> dict:
"""
Fetch blog posts JSON from a remote URL.

Returns the parsed response. Raises on network/parse errors.
"""
response = httpx.get(url, timeout=timeout)
response.raise_for_status()
return response.json()

@staticmethod
def validate_blog_posts(data: Any) -> bool:
"""Return True if data is a dict with a non-empty ``posts`` list."""
if not isinstance(data, dict):
verbose_logger.warning(
"LiteLLM: Blog posts response is not a dict (type=%s). "
"Falling back to local backup.",
type(data).__name__,
)
return False
posts = data.get("posts")
if not isinstance(posts, list) or len(posts) == 0:
verbose_logger.warning(
"LiteLLM: Blog posts response has no valid 'posts' list. "
"Falling back to local backup.",
)
return False
return True

@classmethod
def get_blog_posts(cls, url: str = BLOG_POSTS_GITHUB_URL) -> List[Dict[str, str]]:
"""
Return the blog posts list.

Uses the in-process cache if within BLOG_POSTS_TTL_SECONDS.
Fetches from ``url`` otherwise, falling back to local backup on failure.
"""
if os.getenv("LITELLM_LOCAL_BLOG_POSTS", "").lower() == "true":
return cls.load_local_blog_posts()

now = time.time()
if (
cls._cached_posts is not None
and (now - cls._last_fetch_time) < BLOG_POSTS_TTL_SECONDS
):
return cls._cached_posts

try:
data = cls.fetch_remote_blog_posts(url)
except Exception as e:
verbose_logger.warning(
"LiteLLM: Failed to fetch blog posts from %s: %s. "
"Falling back to local backup.",
url,
str(e),
)
return cls.load_local_blog_posts()

if not cls.validate_blog_posts(data):
return cls.load_local_blog_posts()

cls._cached_posts = data["posts"]
cls._last_fetch_time = now
return cls._cached_posts


def get_blog_posts(url: str = BLOG_POSTS_GITHUB_URL) -> List[Dict[str, str]]:
"""Public entry point — returns the blog posts list."""
return GetBlogPosts.get_blog_posts(url=url)
31 changes: 31 additions & 0 deletions litellm/proxy/public_endpoints/public_endpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,13 @@

from fastapi import APIRouter, Depends, HTTPException

from litellm._logging import verbose_logger
from litellm.litellm_core_utils.get_blog_posts import (
BlogPost,
BlogPostsResponse,
GetBlogPosts,
get_blog_posts,
)
from litellm.proxy._types import CommonProxyErrors
from litellm.proxy.auth.user_api_key_auth import user_api_key_auth
from litellm.types.agents import AgentCard
Expand Down Expand Up @@ -193,6 +200,30 @@ async def get_litellm_model_cost_map():
)


@router.get(
"/public/litellm_blog_posts",
tags=["public"],
response_model=BlogPostsResponse,
)
async def get_litellm_blog_posts():
"""
Public endpoint to get the latest LiteLLM blog posts.

Fetches from GitHub with a 1-hour in-process cache.
Falls back to the bundled local backup on any failure.
"""
try:
posts_data = get_blog_posts()
except Exception as e:
verbose_logger.warning(
"LiteLLM: get_litellm_blog_posts endpoint fallback triggered: %s", str(e)
)
posts_data = GetBlogPosts.load_local_blog_posts()

posts = [BlogPost(**p) for p in posts_data[:5]]
return BlogPostsResponse(posts=posts)


@router.get(
"/public/agents/fields",
tags=["public", "[beta] Agents"],
Expand Down
6 changes: 6 additions & 0 deletions litellm/proxy/ui_crud_endpoints/proxy_setting_endpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,11 @@ class UISettings(BaseModel):
description="If enabled, forwards client headers (e.g. Authorization) to the LLM API. Required for Claude Code with Max subscription.",
)

disable_show_blog: bool = Field(
default=False,
description="If true, hides the Blog dropdown from the UI navbar.",
)


class UISettingsResponse(SettingsResponse):
"""Response model for UI settings"""
Expand All @@ -107,6 +112,7 @@ class UISettingsResponse(SettingsResponse):
"enabled_ui_pages_internal_users",
"require_auth_for_public_ai_hub",
"forward_client_headers_to_llm_api",
"disable_show_blog",
}


Expand Down
80 changes: 80 additions & 0 deletions tests/proxy_unit_tests/test_blog_posts_endpoint.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
"""Tests for the /public/litellm_blog_posts endpoint."""
from unittest.mock import patch

import pytest
from fastapi.testclient import TestClient

SAMPLE_POSTS = [
{
"title": "Test Post",
"description": "A test post.",
"date": "2026-01-01",
"url": "https://www.litellm.ai/blog/test",
}
]


@pytest.fixture
def client():
"""Create a TestClient with just the public_endpoints router."""
from fastapi import FastAPI

from litellm.proxy.public_endpoints.public_endpoints import router

app = FastAPI()
app.include_router(router)
return TestClient(app)


def test_get_blog_posts_returns_response_shape(client):
with patch(
"litellm.proxy.public_endpoints.public_endpoints.get_blog_posts",
return_value=SAMPLE_POSTS,
):
response = client.get("/public/litellm_blog_posts")

assert response.status_code == 200
data = response.json()
assert "posts" in data
assert len(data["posts"]) == 1
post = data["posts"][0]
assert post["title"] == "Test Post"
assert post["description"] == "A test post."
assert post["date"] == "2026-01-01"
assert post["url"] == "https://www.litellm.ai/blog/test"


def test_get_blog_posts_limits_to_five(client):
"""Endpoint returns at most 5 posts."""
many_posts = [
{
"title": f"Post {i}",
"description": "desc",
"date": "2026-01-01",
"url": f"https://www.litellm.ai/blog/{i}",
}
for i in range(10)
]

with patch(
"litellm.proxy.public_endpoints.public_endpoints.get_blog_posts",
return_value=many_posts,
):
response = client.get("/public/litellm_blog_posts")

assert response.status_code == 200
assert len(response.json()["posts"]) == 5


def test_get_blog_posts_returns_local_backup_on_failure(client):
"""Endpoint returns local backup (non-empty list) when fetcher fails."""
with patch(
"litellm.proxy.public_endpoints.public_endpoints.get_blog_posts",
side_effect=Exception("fetch failed"),
):
response = client.get("/public/litellm_blog_posts")

# Should not 500 — returns local backup
assert response.status_code == 200
assert "posts" in response.json()
assert len(response.json()["posts"]) > 0
13 changes: 13 additions & 0 deletions tests/proxy_unit_tests/test_proxy_setting_endpoints.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
def test_ui_settings_has_disable_show_blog_field():
"""UISettings model must include disable_show_blog."""
from litellm.proxy.ui_crud_endpoints.proxy_setting_endpoints import UISettings

settings = UISettings()
assert hasattr(settings, "disable_show_blog")
assert settings.disable_show_blog is False # default


def test_allowed_ui_settings_fields_contains_disable_show_blog():
from litellm.proxy.ui_crud_endpoints.proxy_setting_endpoints import ALLOWED_UI_SETTINGS_FIELDS

assert "disable_show_blog" in ALLOWED_UI_SETTINGS_FIELDS
Loading
Loading