|
16 | 16 | ContentPolicyViolationError, |
17 | 17 | ContextWindowExceededError, |
18 | 18 | ImageFetchError, |
| 19 | + MidStreamFallbackError, |
| 20 | + RateLimitError, |
19 | 21 | ) |
20 | 22 |
|
21 | 23 |
|
@@ -210,6 +212,46 @@ def test_bad_request_error_attributes(self): |
210 | 212 | assert error.num_retries == 1 |
211 | 213 | assert error.status_code == 400 |
212 | 214 |
|
| 215 | + def test_midstream_fallback_error_status_code_propagation(self): |
| 216 | + """ |
| 217 | + MidStreamFallbackError should preserve the original status code and keep |
| 218 | + message/request/response fields consistent after super().__init__(). |
| 219 | + """ |
| 220 | + original_req = httpx.Request("POST", "https://api.openai.com/v1/chat/completions") |
| 221 | + original_resp = httpx.Response(status_code=429, request=original_req) |
| 222 | + |
| 223 | + rate_limit_error = RateLimitError( |
| 224 | + message="Rate limit exceeded", |
| 225 | + llm_provider="openai", |
| 226 | + model="gpt-4o-mini", |
| 227 | + response=original_resp, |
| 228 | + ) |
| 229 | + |
| 230 | + midstream_error = MidStreamFallbackError( |
| 231 | + message="stream broke", |
| 232 | + model="gpt-4o-mini", |
| 233 | + llm_provider="openai", |
| 234 | + original_exception=rate_limit_error, |
| 235 | + ) |
| 236 | + |
| 237 | + assert midstream_error.status_code == 429 |
| 238 | + assert midstream_error.response.status_code == 429 |
| 239 | + assert str(midstream_error.response.request.url) == "https://openai.com/v1/" |
| 240 | + assert midstream_error.message == "litellm.MidStreamFallbackError: stream broke" |
| 241 | + assert midstream_error.args == ("litellm.MidStreamFallbackError: stream broke",) |
| 242 | + |
| 243 | + # With no original exception, should default to 503. |
| 244 | + midstream_fallback = MidStreamFallbackError( |
| 245 | + message="stream broke without original", |
| 246 | + model="gpt-4o-mini", |
| 247 | + llm_provider="openai", |
| 248 | + original_exception=None, |
| 249 | + ) |
| 250 | + |
| 251 | + assert midstream_fallback.status_code == 503 |
| 252 | + assert midstream_fallback.response.status_code == 503 |
| 253 | + assert str(midstream_fallback.response.request.url) == "https://openai.com/v1/" |
| 254 | + |
213 | 255 |
|
214 | 256 | class TestProxyHeaderExtraction: |
215 | 257 | """Test that proxy correctly extracts headers from exceptions.""" |
|
0 commit comments