From 8088d3b77531b48582367f5358c04e4db489ed8b Mon Sep 17 00:00:00 2001 From: raj Date: Thu, 28 Aug 2025 01:38:56 +0530 Subject: [PATCH 1/7] lmstudio wrapper setup --- rig-core/src/providers/groq.rs | 3 +- rig-core/src/providers/lmstudio.rs | 101 ++++++++++++++++++ rig-core/src/providers/mod.rs | 1 + .../src/providers/openai/completion/mod.rs | 8 ++ .../providers/openai/completion/streaming.rs | 3 +- 5 files changed, 114 insertions(+), 2 deletions(-) create mode 100644 rig-core/src/providers/lmstudio.rs diff --git a/rig-core/src/providers/groq.rs b/rig-core/src/providers/groq.rs index 282642bd2..8f72764ab 100644 --- a/rig-core/src/providers/groq.rs +++ b/rig-core/src/providers/groq.rs @@ -628,7 +628,8 @@ pub async fn send_compatible_streaming_request( let mut final_usage = Usage { prompt_tokens: 0, - total_tokens: 0 + total_tokens: 0, + extra: serde_json::Map::new(), }; let mut partial_data = None; diff --git a/rig-core/src/providers/lmstudio.rs b/rig-core/src/providers/lmstudio.rs new file mode 100644 index 000000000..f7e899f37 --- /dev/null +++ b/rig-core/src/providers/lmstudio.rs @@ -0,0 +1,101 @@ +//! A wrapper around the OpenAI provider to support LM Studio. +//! +//! This provider uses the `LMSTUDIO_API_KEY` and `LMSTUDIO_API_BASE` environment variables +//! to configure the underlying OpenAI client. + +use crate::client::{CompletionClient, EmbeddingsClient, ProviderClient, TranscriptionClient}; +use crate::providers::openai; + +// Re-export models +pub use openai::completion::*; + +const LMSTUDIO_API_BASE_URL: &str = "http://localhost:1234/v1"; + +/// A client for the LM Studio API. +/// +/// This is a wrapper around the OpenAI client, but configured for LM Studio. +#[derive(Clone, Debug)] +pub struct Client { + inner: openai::Client, +} + +impl ProviderClient for Client { + /// Create a new LM Studio client from environment variables. + /// + /// It uses `LMSTUDIO_API_BASE` for the base URL (defaulting to `http://localhost:1234/v1`) + /// and `LMSTUDIO_API_KEY` for the API key (defaulting to a placeholder). + fn from_env() -> Self { + let base_url = std::env::var("LMSTUDIO_API_BASE") + .unwrap_or_else(|_| LMSTUDIO_API_BASE_URL.to_string()); + let api_key = std::env::var("LMSTUDIO_API_KEY").unwrap_or_else(|_| "lmstudio".to_string()); + + let inner = openai::Client::builder(&api_key) + .base_url(&base_url) + .build() + .expect("Failed to build LM Studio client"); + + Self { inner } + } + + fn from_val(input: crate::client::ProviderValue) -> Self { + let crate::client::ProviderValue::Simple(api_key) = input else { + panic!("Incorrect provider value type") + }; + let base_url = std::env::var("LMSTUDIO_API_BASE") + .unwrap_or_else(|_| LMSTUDIO_API_BASE_URL.to_string()); + + let inner = openai::Client::builder(&api_key) + .base_url(&base_url) + .build() + .expect("Failed to build LM Studio client"); + + Self { inner } + } +} + +impl CompletionClient for Client { + type CompletionModel = openai::responses_api::ResponsesCompletionModel; + + fn completion_model(&self, model: &str) -> Self::CompletionModel { + self.inner.completion_model(model) + } +} + +impl EmbeddingsClient for Client { + type EmbeddingModel = openai::embedding::EmbeddingModel; + + fn embedding_model(&self, model: &str) -> Self::EmbeddingModel { + self.inner.embedding_model(model) + } + + fn embedding_model_with_ndims(&self, model: &str, ndims: usize) -> Self::EmbeddingModel { + self.inner.embedding_model_with_ndims(model, ndims) + } +} + +impl TranscriptionClient for Client { + type TranscriptionModel = openai::transcription::TranscriptionModel; + + fn transcription_model(&self, model: &str) -> Self::TranscriptionModel { + self.inner.transcription_model(model) + } +} + +// If image/audio features are enabled, delegate those clients as well. +#[cfg(feature = "image")] +impl crate::client::ImageGenerationClient for Client { + type ImageGenerationModel = openai::image_generation::ImageGenerationModel; + + fn image_generation_model(&self, model: &str) -> Self::ImageGenerationModel { + self.inner.image_generation_model(model) + } +} + +#[cfg(feature = "audio")] +impl crate::client::AudioGenerationClient for Client { + type AudioGenerationModel = openai::audio_generation::AudioGenerationModel; + + fn audio_generation_model(&self, model: &str) -> Self::AudioGenerationModel { + self.inner.audio_generation_model(model) + } +} diff --git a/rig-core/src/providers/mod.rs b/rig-core/src/providers/mod.rs index 53ca700f5..45d1835c7 100644 --- a/rig-core/src/providers/mod.rs +++ b/rig-core/src/providers/mod.rs @@ -64,3 +64,4 @@ pub mod perplexity; pub mod together; pub mod voyageai; pub mod xai; +pub mod lmstudio; diff --git a/rig-core/src/providers/openai/completion/mod.rs b/rig-core/src/providers/openai/completion/mod.rs index c51b02d79..4e5a176e5 100644 --- a/rig-core/src/providers/openai/completion/mod.rs +++ b/rig-core/src/providers/openai/completion/mod.rs @@ -129,6 +129,8 @@ pub enum Message { skip_serializing_if = "Vec::is_empty" )] tool_calls: Vec, + #[serde(flatten)] + extra: serde_json::Map, }, #[serde(rename = "tool")] ToolResult { @@ -382,6 +384,7 @@ impl TryFrom for Vec { .into_iter() .map(|tool_call| tool_call.into()) .collect::>(), + extra: serde_json::Map::new(), }]) } } @@ -556,6 +559,8 @@ pub struct CompletionResponse { pub system_fingerprint: Option, pub choices: Vec, pub usage: Option, + #[serde(flatten)] + pub extra: serde_json::Map, } impl TryFrom for completion::CompletionResponse { @@ -642,6 +647,8 @@ pub struct Choice { pub struct Usage { pub prompt_tokens: usize, pub total_tokens: usize, + #[serde(flatten)] + pub extra: serde_json::Map, } impl fmt::Display for Usage { @@ -649,6 +656,7 @@ impl fmt::Display for Usage { let Usage { prompt_tokens, total_tokens, + .. } = self; write!( f, diff --git a/rig-core/src/providers/openai/completion/streaming.rs b/rig-core/src/providers/openai/completion/streaming.rs index 6ebb6559a..174e9c7cc 100644 --- a/rig-core/src/providers/openai/completion/streaming.rs +++ b/rig-core/src/providers/openai/completion/streaming.rs @@ -100,7 +100,8 @@ pub async fn send_compatible_streaming_request( let mut final_usage = Usage { prompt_tokens: 0, - total_tokens: 0 + total_tokens: 0, + extra: serde_json::Map::new(), }; let mut partial_data = None; From e4324e4252cd1efe3d1d71da4fe311009e4826be Mon Sep 17 00:00:00 2001 From: raj Date: Thu, 28 Aug 2025 02:24:05 +0530 Subject: [PATCH 2/7] feat: add lmstudio model provider --- rig-core/examples/lmstudio_streaming.rs | 29 ++++++++++++++++++ rig-core/src/providers/lmstudio.rs | 39 ++++--------------------- rig-core/src/providers/mod.rs | 2 +- 3 files changed, 35 insertions(+), 35 deletions(-) create mode 100644 rig-core/examples/lmstudio_streaming.rs diff --git a/rig-core/examples/lmstudio_streaming.rs b/rig-core/examples/lmstudio_streaming.rs new file mode 100644 index 000000000..39148c88d --- /dev/null +++ b/rig-core/examples/lmstudio_streaming.rs @@ -0,0 +1,29 @@ +use rig::agent::stream_to_stdout; +use rig::prelude::*; +use rig::providers::lmstudio; +use rig::streaming::StreamingPrompt; + +#[tokio::main] +async fn main() -> Result<(), anyhow::Error> { + // Uncomment tracing for debugging + tracing_subscriber::fmt().init(); + + // Create streaming agent with a single context prompt + let agent = lmstudio::Client::from_env() + .agent(openai::GPT_4O) + .preamble("Be precise and concise.") + .temperature(0.5) + .build(); + + // Stream the response and print chunks as they arrive + let mut stream = agent + .stream_prompt("When and where and what type is the next solar eclipse?") + .await; + + let res = stream_to_stdout(&mut stream).await?; + + println!("Token usage response: {usage:?}", usage = res.usage()); + println!("Final text response: {message:?}", message = res.response()); + + Ok(()) +} diff --git a/rig-core/src/providers/lmstudio.rs b/rig-core/src/providers/lmstudio.rs index f7e899f37..dc1862186 100644 --- a/rig-core/src/providers/lmstudio.rs +++ b/rig-core/src/providers/lmstudio.rs @@ -1,29 +1,19 @@ -//! A wrapper around the OpenAI provider to support LM Studio. -//! -//! This provider uses the `LMSTUDIO_API_KEY` and `LMSTUDIO_API_BASE` environment variables -//! to configure the underlying OpenAI client. - -use crate::client::{CompletionClient, EmbeddingsClient, ProviderClient, TranscriptionClient}; +use crate::prelude::CompletionClient; +use crate::prelude::EmbeddingsClient; +use crate::prelude::ProviderClient; +use crate::prelude::TranscriptionClient; use crate::providers::openai; - -// Re-export models pub use openai::completion::*; -const LMSTUDIO_API_BASE_URL: &str = "http://localhost:1234/v1"; +const LMSTUDIO_API_BASE_URL: &str = "http://localhost:8080/v1"; /// A client for the LM Studio API. -/// -/// This is a wrapper around the OpenAI client, but configured for LM Studio. #[derive(Clone, Debug)] pub struct Client { inner: openai::Client, } impl ProviderClient for Client { - /// Create a new LM Studio client from environment variables. - /// - /// It uses `LMSTUDIO_API_BASE` for the base URL (defaulting to `http://localhost:1234/v1`) - /// and `LMSTUDIO_API_KEY` for the API key (defaulting to a placeholder). fn from_env() -> Self { let base_url = std::env::var("LMSTUDIO_API_BASE") .unwrap_or_else(|_| LMSTUDIO_API_BASE_URL.to_string()); @@ -80,22 +70,3 @@ impl TranscriptionClient for Client { self.inner.transcription_model(model) } } - -// If image/audio features are enabled, delegate those clients as well. -#[cfg(feature = "image")] -impl crate::client::ImageGenerationClient for Client { - type ImageGenerationModel = openai::image_generation::ImageGenerationModel; - - fn image_generation_model(&self, model: &str) -> Self::ImageGenerationModel { - self.inner.image_generation_model(model) - } -} - -#[cfg(feature = "audio")] -impl crate::client::AudioGenerationClient for Client { - type AudioGenerationModel = openai::audio_generation::AudioGenerationModel; - - fn audio_generation_model(&self, model: &str) -> Self::AudioGenerationModel { - self.inner.audio_generation_model(model) - } -} diff --git a/rig-core/src/providers/mod.rs b/rig-core/src/providers/mod.rs index 45d1835c7..068266cc0 100644 --- a/rig-core/src/providers/mod.rs +++ b/rig-core/src/providers/mod.rs @@ -54,6 +54,7 @@ pub mod gemini; pub mod groq; pub mod huggingface; pub mod hyperbolic; +pub mod lmstudio; pub mod mira; pub mod mistral; pub mod moonshot; @@ -64,4 +65,3 @@ pub mod perplexity; pub mod together; pub mod voyageai; pub mod xai; -pub mod lmstudio; From bfdae91d9b97ac0362d803ae9af7daa483c42882 Mon Sep 17 00:00:00 2001 From: raj Date: Thu, 28 Aug 2025 02:31:00 +0530 Subject: [PATCH 3/7] fix: added image and audio features --- rig-core/src/providers/lmstudio.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/rig-core/src/providers/lmstudio.rs b/rig-core/src/providers/lmstudio.rs index dc1862186..8f37a12cb 100644 --- a/rig-core/src/providers/lmstudio.rs +++ b/rig-core/src/providers/lmstudio.rs @@ -70,3 +70,21 @@ impl TranscriptionClient for Client { self.inner.transcription_model(model) } } + +#[cfg(feature = "image")] +impl crate::client::ImageGenerationClient for Client { + type ImageGenerationModel = openai::image_generation::ImageGenerationModel; + + fn image_generation_model(&self, model: &str) -> Self::ImageGenerationModel { + self.inner.image_generation_model(model) + } +} + +#[cfg(feature = "audio")] +impl crate::client::AudioGenerationClient for Client { + type AudioGenerationModel = openai::audio_generation::AudioGenerationModel; + + fn audio_generation_model(&self, model: &str) -> Self::AudioGenerationModel { + self.inner.audio_generation_model(model) + } +} From 55e0735aeeaf9895c07ef4e27c583e1f2990a534 Mon Sep 17 00:00:00 2001 From: raj Date: Thu, 28 Aug 2025 02:37:02 +0530 Subject: [PATCH 4/7] fix: clippy --- rig-core/src/providers/openai/client.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/rig-core/src/providers/openai/client.rs b/rig-core/src/providers/openai/client.rs index 820b4d3cd..a855e0803 100644 --- a/rig-core/src/providers/openai/client.rs +++ b/rig-core/src/providers/openai/client.rs @@ -523,6 +523,7 @@ mod tests { audio: None, name: None, tool_calls: vec![], + extra: serde_json::map::new(), }; let converted_user_message: message::Message = user_message.clone().try_into().unwrap(); From 791cd05dea4d59ae342e129af33dec4ad08587a6 Mon Sep 17 00:00:00 2001 From: raj Date: Thu, 28 Aug 2025 02:47:08 +0530 Subject: [PATCH 5/7] fix: clippy fixes --- rig-core/src/providers/openai/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rig-core/src/providers/openai/client.rs b/rig-core/src/providers/openai/client.rs index a855e0803..b1ee24080 100644 --- a/rig-core/src/providers/openai/client.rs +++ b/rig-core/src/providers/openai/client.rs @@ -523,7 +523,7 @@ mod tests { audio: None, name: None, tool_calls: vec![], - extra: serde_json::map::new(), + extra: serde_json::Map::new(), }; let converted_user_message: message::Message = user_message.clone().try_into().unwrap(); From d4a255cac7aba4fea2f91618c0ff9cf02a8f02ff Mon Sep 17 00:00:00 2001 From: raj Date: Thu, 28 Aug 2025 02:57:01 +0530 Subject: [PATCH 6/7] fix: clippy --- rig-core/examples/lmstudio_streaming.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rig-core/examples/lmstudio_streaming.rs b/rig-core/examples/lmstudio_streaming.rs index 39148c88d..81cef7f9d 100644 --- a/rig-core/examples/lmstudio_streaming.rs +++ b/rig-core/examples/lmstudio_streaming.rs @@ -1,6 +1,6 @@ use rig::agent::stream_to_stdout; use rig::prelude::*; -use rig::providers::lmstudio; +use rig::providers::{openai,lmstudio}; use rig::streaming::StreamingPrompt; #[tokio::main] From f3c7be665f68b1be258ee026ad864dcf13700483 Mon Sep 17 00:00:00 2001 From: raj Date: Thu, 28 Aug 2025 03:00:37 +0530 Subject: [PATCH 7/7] fix: clippy --- rig-core/examples/lmstudio_streaming.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rig-core/examples/lmstudio_streaming.rs b/rig-core/examples/lmstudio_streaming.rs index 81cef7f9d..457fcc5b6 100644 --- a/rig-core/examples/lmstudio_streaming.rs +++ b/rig-core/examples/lmstudio_streaming.rs @@ -1,6 +1,6 @@ use rig::agent::stream_to_stdout; use rig::prelude::*; -use rig::providers::{openai,lmstudio}; +use rig::providers::{lmstudio, openai}; use rig::streaming::StreamingPrompt; #[tokio::main]