From b904d5598c303740ac16f89afd24e0deb18fd7da Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Fri, 15 Aug 2025 07:41:06 +0900 Subject: [PATCH 01/32] Fix model struct --- examples/openrouter_models.rs | 22 ++++++++++++++++ src/v1/model.rs | 47 +++++++++++++++++++++++++++++++---- 2 files changed, 64 insertions(+), 5 deletions(-) create mode 100644 examples/openrouter_models.rs diff --git a/examples/openrouter_models.rs b/examples/openrouter_models.rs new file mode 100644 index 00000000..4223b2b6 --- /dev/null +++ b/examples/openrouter_models.rs @@ -0,0 +1,22 @@ +use openai_api_rs::v1::api::OpenAIClient; +use std::env; + +#[tokio::main] +async fn main() -> Result<(), Box> { + let api_key = env::var("OPENROUTER_API_KEY").unwrap().to_string(); + let mut client = OpenAIClient::builder() + .with_endpoint("https://openrouter.ai/api/v1") + .with_api_key(api_key) + .build()?; + + let result = client.list_models().await?; + let models = result.data; + + for model in models { + println!("Model id: {:?}", model.id); + } + + Ok(()) +} + +// OPENROUTER_API_KEY=xxxx cargo run --package openai-api-rs --example openrouter_models diff --git a/src/v1/model.rs b/src/v1/model.rs index 2b0a044d..5aa2bc61 100644 --- a/src/v1/model.rs +++ b/src/v1/model.rs @@ -2,14 +2,51 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, Deserialize, Serialize)] pub struct ModelsResponse { - pub object: String, + pub object: Option, pub data: Vec, } #[derive(Debug, Deserialize, Serialize)] pub struct ModelResponse { - pub id: String, - pub object: String, - pub created: i64, - pub owned_by: String, + pub id: Option, + pub name: Option, + pub created: Option, + pub description: Option, + pub architecture: Option, + pub top_provider: Option, + pub pricing: Option, + pub canonical_slug: Option, + pub context_length: Option, + pub hugging_face_id: Option, + pub per_request_limits: Option, + pub supported_parameters: Option>, + pub object: Option, + pub owned_by: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct Architecture { + pub input_modalities: Option>, + pub output_modalities: Option>, + pub tokenizer: Option, + pub instruct_type: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct TopProvider { + pub is_moderated: Option, + pub context_length: Option, + pub max_completion_tokens: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct Pricing { + pub prompt: Option, + pub completion: Option, + pub image: Option, + pub request: Option, + pub web_search: Option, + pub internal_reasoning: Option, + pub input_cache_read: Option, + pub input_cache_write: Option, } From abc8ca20c2edba1b76097c9d0605e95158f64159 Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Fri, 15 Aug 2025 07:43:46 +0900 Subject: [PATCH 02/32] v6.0.9 --- Cargo.toml | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f57c2f63..11f37e96 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openai-api-rs" -version = "6.0.8" +version = "6.0.9" edition = "2021" authors = ["Dongri Jin "] license = "MIT" diff --git a/README.md b/README.md index 3caee96c..ab2d5fd5 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ Check out the [docs.rs](https://docs.rs/openai-api-rs/). Cargo.toml ```toml [dependencies] -openai-api-rs = "6.0.8" +openai-api-rs = "6.0.9" ``` ## Usage From 94d2151bf2afc558f9bacff4471402ddf1d27671 Mon Sep 17 00:00:00 2001 From: "richardanaya2_2048b.Q6_K_M.gguf" Date: Wed, 27 Aug 2025 19:01:19 -0700 Subject: [PATCH 03/32] This is causing in error in llama-server because it's not skipped --- src/v1/embedding.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/v1/embedding.rs b/src/v1/embedding.rs index 3f68054c..67df9c0f 100644 --- a/src/v1/embedding.rs +++ b/src/v1/embedding.rs @@ -21,6 +21,7 @@ pub enum EncodingFormat { pub struct EmbeddingRequest { pub model: String, pub input: Vec, + #[serde(skip_serializing_if = "Option::is_none")] pub encoding_format: Option, #[serde(skip_serializing_if = "Option::is_none")] pub dimensions: Option, From ea03ac93d13173db7400371c36e0e97f8544d5b6 Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Thu, 28 Aug 2025 11:13:02 +0900 Subject: [PATCH 04/32] 6.0.10 --- Cargo.toml | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 11f37e96..ee826e09 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openai-api-rs" -version = "6.0.9" +version = "6.0.10" edition = "2021" authors = ["Dongri Jin "] license = "MIT" diff --git a/README.md b/README.md index ab2d5fd5..11f92505 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ Check out the [docs.rs](https://docs.rs/openai-api-rs/). Cargo.toml ```toml [dependencies] -openai-api-rs = "6.0.9" +openai-api-rs = "6.0.10" ``` ## Usage From 65cbe60d4c7af5d501b1d095465c1cb1b40be4b9 Mon Sep 17 00:00:00 2001 From: Morgan Ewing Date: Wed, 3 Sep 2025 11:06:03 +1000 Subject: [PATCH 05/32] =?UTF-8?q?=E2=9C=A8=20feat:=20add=20transforms=20fi?= =?UTF-8?q?eld=20to=20ChatCompletionRequest?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add optional transforms field to enable request preprocessing before API calls. Includes comprehensive test coverage for serialization, deserialization, and builder method functionality. --- src/v1/chat_completion.rs | 60 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 59 insertions(+), 1 deletion(-) diff --git a/src/v1/chat_completion.rs b/src/v1/chat_completion.rs index 35b891b8..09fdc0f0 100644 --- a/src/v1/chat_completion.rs +++ b/src/v1/chat_completion.rs @@ -77,6 +77,13 @@ pub struct ChatCompletionRequest { pub tool_choice: Option, #[serde(skip_serializing_if = "Option::is_none")] pub reasoning: Option, + /// Optional list of transforms to apply to the chat completion request. + /// + /// Transforms allow modifying the request before it's sent to the API, + /// enabling features like prompt rewriting, content filtering, or other + /// preprocessing steps. When None, no transforms are applied. + #[serde(skip_serializing_if = "Option::is_none")] + pub transforms: Option>, } impl ChatCompletionRequest { @@ -100,6 +107,7 @@ impl ChatCompletionRequest { parallel_tool_calls: None, tool_choice: None, reasoning: None, + transforms: None, } } } @@ -121,7 +129,8 @@ impl_builder_methods!( tools: Vec, parallel_tool_calls: bool, tool_choice: ToolChoiceType, - reasoning: Reasoning + reasoning: Reasoning, + transforms: Vec ); #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] @@ -418,4 +427,53 @@ mod tests { let serialized = serde_json::to_value(&req).unwrap(); assert_eq!(serialized["reasoning"]["effort"], "low"); } + + #[test] + fn test_transforms_none_serialization() { + let req = ChatCompletionRequest::new("gpt-4".to_string(), vec![]); + let serialised = serde_json::to_value(&req).unwrap(); + // Verify that the transforms field is completely omitted from JSON output + assert!(!serialised.as_object().unwrap().contains_key("transforms")); + } + + #[test] + fn test_transforms_some_serialization() { + let mut req = ChatCompletionRequest::new("gpt-4".to_string(), vec![]); + req.transforms = Some(vec!["transform1".to_string(), "transform2".to_string()]); + let serialised = serde_json::to_value(&req).unwrap(); + // Verify that the transforms field is included as a proper JSON array + assert_eq!( + serialised["transforms"], + serde_json::json!(["transform1", "transform2"]) + ); + } + + #[test] + fn test_transforms_some_deserialization() { + let json_str = + r#"{"model": "gpt-4", "messages": [], "transforms": ["transform1", "transform2"]}"#; + let req: ChatCompletionRequest = serde_json::from_str(json_str).unwrap(); + // Verify that the transforms field is properly populated with Some(vec) + assert_eq!( + req.transforms, + Some(vec!["transform1".to_string(), "transform2".to_string()]) + ); + } + + #[test] + fn test_transforms_none_deserialization() { + let json_str = r#"{"model": "gpt-4", "messages": []}"#; + let req: ChatCompletionRequest = serde_json::from_str(json_str).unwrap(); + // Verify that the transforms field is properly set to None when absent + assert_eq!(req.transforms, None); + } + + #[test] + fn test_transforms_builder_method() { + let transforms = vec!["transform1".to_string(), "transform2".to_string()]; + let req = + ChatCompletionRequest::new("gpt-4".to_string(), vec![]).transforms(transforms.clone()); + // Verify that the transforms field is properly set through the builder method + assert_eq!(req.transforms, Some(transforms)); + } } From f472d462b9dd1b7c548972132df32a8c60780ea5 Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Wed, 3 Sep 2025 13:05:21 +0900 Subject: [PATCH 06/32] v6.0.11 --- Cargo.toml | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ee826e09..4c671b92 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openai-api-rs" -version = "6.0.10" +version = "6.0.11" edition = "2021" authors = ["Dongri Jin "] license = "MIT" diff --git a/README.md b/README.md index 11f92505..6781f7b2 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ Check out the [docs.rs](https://docs.rs/openai-api-rs/). Cargo.toml ```toml [dependencies] -openai-api-rs = "6.0.10" +openai-api-rs = "6.0.11" ``` ## Usage From e6dd68eb0243458dbf3961396f4043cdd157863f Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Tue, 30 Sep 2025 18:29:42 +0900 Subject: [PATCH 07/32] Fix models --- src/v1/common.rs | 146 +++++++++++++++++++++++++++++++++++++---------- 1 file changed, 115 insertions(+), 31 deletions(-) diff --git a/src/v1/common.rs b/src/v1/common.rs index ab16946f..934df91c 100644 --- a/src/v1/common.rs +++ b/src/v1/common.rs @@ -31,60 +31,144 @@ macro_rules! impl_builder_methods { #[derive(Debug, Serialize, Deserialize)] pub struct EmptyRequestBody {} -// https://platform.openai.com/docs/models/o3 +// O-series models +pub const O1: &str = "o1"; +pub const O1_2024_12_17: &str = "o1-2024-12-17"; +pub const O1_MINI: &str = "o1-mini"; +pub const O1_MINI_2024_09_12: &str = "o1-mini-2024-09-12"; +pub const O1_PREVIEW: &str = "o1-preview"; +pub const O1_PREVIEW_2024_09_12: &str = "o1-preview-2024-09-12"; +pub const O1_PRO: &str = "o1-pro"; +pub const O1_PRO_2025_03_19: &str = "o1-pro-2025-03-19"; + pub const O3: &str = "o3"; pub const O3_2025_04_16: &str = "o3-2025-04-16"; pub const O3_MINI: &str = "o3-mini"; pub const O3_MINI_2025_01_31: &str = "o3-mini-2025-01-31"; -// https://platform.openai.com/docs/models#gpt-4-5 -pub const GPT4_5_PREVIEW: &str = "gpt-4.5-preview"; -pub const GPT4_5_PREVIEW_2025_02_27: &str = "gpt-4.5-preview-2025-02-27"; +pub const O4_MINI: &str = "o4-mini"; +pub const O4_MINI_2025_04_16: &str = "o4-mini-2025-04-16"; +pub const O4_MINI_DEEP_RESEARCH: &str = "o4-mini-deep-research"; +pub const O4_MINI_DEEP_RESEARCH_2025_06_26: &str = "o4-mini-deep-research-2025-06-26"; -// https://platform.openai.com/docs/models/o1 -pub const O1_PREVIEW: &str = "o1-preview"; -pub const O1_PREVIEW_2024_09_12: &str = "o1-preview-2024-09-12"; -pub const O1_MINI: &str = "o1-mini"; -pub const O1_MINI_2024_09_12: &str = "o1-mini-2024-09-12"; +// GPT-5 models +pub const GPT5: &str = "gpt-5"; +pub const GPT5_2025_08_07: &str = "gpt-5-2025-08-07"; +pub const GPT5_CHAT_LATEST: &str = "gpt-5-chat-latest"; +pub const GPT5_CODEX: &str = "gpt-5-codex"; +pub const GPT5_MINI: &str = "gpt-5-mini"; +pub const GPT5_MINI_2025_08_07: &str = "gpt-5-mini-2025-08-07"; +pub const GPT5_NANO: &str = "gpt-5-nano"; +pub const GPT5_NANO_2025_08_07: &str = "gpt-5-nano-2025-08-07"; -// https://platform.openai.com/docs/models/gpt-4o-mini -pub const GPT4_O_MINI: &str = "gpt-4o-mini"; -pub const GPT4_O_MINI_2024_07_18: &str = "gpt-4o-mini-2024-07-18"; +// GPT-4.1 models +pub const GPT4_1: &str = "gpt-4.1"; +pub const GPT4_1_2025_04_14: &str = "gpt-4.1-2025-04-14"; +pub const GPT4_1_MINI: &str = "gpt-4.1-mini"; +pub const GPT4_1_MINI_2025_04_14: &str = "gpt-4.1-mini-2025-04-14"; +pub const GPT4_1_NANO: &str = "gpt-4.1-nano"; +pub const GPT4_1_NANO_2025_04_14: &str = "gpt-4.1-nano-2025-04-14"; -// https://platform.openai.com/docs/models/gpt-4o +// GPT-4o models pub const GPT4_O: &str = "gpt-4o"; pub const GPT4_O_2024_05_13: &str = "gpt-4o-2024-05-13"; pub const GPT4_O_2024_08_06: &str = "gpt-4o-2024-08-06"; +pub const GPT4_O_2024_11_20: &str = "gpt-4o-2024-11-20"; pub const GPT4_O_LATEST: &str = "chatgpt-4o-latest"; -// https://platform.openai.com/docs/models/gpt-3-5 -pub const GPT3_5_TURBO_1106: &str = "gpt-3.5-turbo-1106"; -pub const GPT3_5_TURBO: &str = "gpt-3.5-turbo"; -pub const GPT3_5_TURBO_16K: &str = "gpt-3.5-turbo-16k"; -pub const GPT3_5_TURBO_INSTRUCT: &str = "gpt-3.5-turbo-instruct"; -// - legacy -pub const GPT3_5_TURBO_0613: &str = "gpt-3.5-turbo-0613"; -pub const GPT3_5_TURBO_16K_0613: &str = "gpt-3.5-turbo-16k-0613"; -pub const GPT3_5_TURBO_0301: &str = "gpt-3.5-turbo-0301"; +pub const GPT4_O_MINI: &str = "gpt-4o-mini"; +pub const GPT4_O_MINI_2024_07_18: &str = "gpt-4o-mini-2024-07-18"; -// https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo -pub const GPT4_0125_PREVIEW: &str = "gpt-4-0125-preview"; -pub const GPT4_TURBO_PREVIEW: &str = "gpt-4-turbo-preview"; -pub const GPT4_1106_PREVIEW: &str = "gpt-4-1106-preview"; -pub const GPT4_VISION_PREVIEW: &str = "gpt-4-vision-preview"; +// GPT-4o search models +pub const GPT4_O_SEARCH_PREVIEW: &str = "gpt-4o-search-preview"; +pub const GPT4_O_SEARCH_PREVIEW_2025_03_11: &str = "gpt-4o-search-preview-2025-03-11"; +pub const GPT4_O_MINI_SEARCH_PREVIEW: &str = "gpt-4o-mini-search-preview"; +pub const GPT4_O_MINI_SEARCH_PREVIEW_2025_03_11: &str = "gpt-4o-mini-search-preview-2025-03-11"; + +// GPT-4o realtime models +pub const GPT4_O_REALTIME_PREVIEW: &str = "gpt-4o-realtime-preview"; +pub const GPT4_O_REALTIME_PREVIEW_2024_10_01: &str = "gpt-4o-realtime-preview-2024-10-01"; +pub const GPT4_O_REALTIME_PREVIEW_2024_12_17: &str = "gpt-4o-realtime-preview-2024-12-17"; +pub const GPT4_O_REALTIME_PREVIEW_2025_06_03: &str = "gpt-4o-realtime-preview-2025-06-03"; +pub const GPT4_O_MINI_REALTIME_PREVIEW: &str = "gpt-4o-mini-realtime-preview"; +pub const GPT4_O_MINI_REALTIME_PREVIEW_2024_12_17: &str = "gpt-4o-mini-realtime-preview-2024-12-17"; + +// GPT-4o audio models +pub const GPT4_O_AUDIO_PREVIEW: &str = "gpt-4o-audio-preview"; +pub const GPT4_O_AUDIO_PREVIEW_2024_10_01: &str = "gpt-4o-audio-preview-2024-10-01"; +pub const GPT4_O_AUDIO_PREVIEW_2024_12_17: &str = "gpt-4o-audio-preview-2024-12-17"; +pub const GPT4_O_AUDIO_PREVIEW_2025_06_03: &str = "gpt-4o-audio-preview-2025-06-03"; +pub const GPT4_O_MINI_AUDIO_PREVIEW: &str = "gpt-4o-mini-audio-preview"; +pub const GPT4_O_MINI_AUDIO_PREVIEW_2024_12_17: &str = "gpt-4o-mini-audio-preview-2024-12-17"; + +// GPT-4o transcription models +pub const GPT4_O_TRANSCRIBE: &str = "gpt-4o-transcribe"; +pub const GPT4_O_MINI_TRANSCRIBE: &str = "gpt-4o-mini-transcribe"; + +// GPT-4 and GPT-4 Turbo models pub const GPT4: &str = "gpt-4"; -pub const GPT4_32K: &str = "gpt-4-32k"; pub const GPT4_0613: &str = "gpt-4-0613"; +pub const GPT4_32K: &str = "gpt-4-32k"; pub const GPT4_32K_0613: &str = "gpt-4-32k-0613"; -// - legacy pub const GPT4_0314: &str = "gpt-4-0314"; pub const GPT4_32K_0314: &str = "gpt-4-32k-0314"; -// https://platform.openai.com/docs/api-reference/images/object +pub const GPT4_TURBO: &str = "gpt-4-turbo"; +pub const GPT4_TURBO_2024_04_09: &str = "gpt-4-turbo-2024-04-09"; +pub const GPT4_TURBO_PREVIEW: &str = "gpt-4-turbo-preview"; +pub const GPT4_0125_PREVIEW: &str = "gpt-4-0125-preview"; +pub const GPT4_1106_PREVIEW: &str = "gpt-4-1106-preview"; +pub const GPT4_VISION_PREVIEW: &str = "gpt-4-vision-preview"; + +// GPT-3.5 Turbo models +pub const GPT3_5_TURBO: &str = "gpt-3.5-turbo"; +pub const GPT3_5_TURBO_0125: &str = "gpt-3.5-turbo-0125"; +pub const GPT3_5_TURBO_1106: &str = "gpt-3.5-turbo-1106"; +pub const GPT3_5_TURBO_16K: &str = "gpt-3.5-turbo-16k"; +pub const GPT3_5_TURBO_0613: &str = "gpt-3.5-turbo-0613"; +pub const GPT3_5_TURBO_16K_0613: &str = "gpt-3.5-turbo-16k-0613"; +pub const GPT3_5_TURBO_0301: &str = "gpt-3.5-turbo-0301"; + +pub const GPT3_5_TURBO_INSTRUCT: &str = "gpt-3.5-turbo-instruct"; +pub const GPT3_5_TURBO_INSTRUCT_0914: &str = "gpt-3.5-turbo-instruct-0914"; + +// Audio models +pub const GPT_AUDIO: &str = "gpt-audio"; +pub const GPT_AUDIO_2025_08_28: &str = "gpt-audio-2025-08-28"; +pub const GPT_REALTIME: &str = "gpt-realtime"; +pub const GPT_REALTIME_2025_08_28: &str = "gpt-realtime-2025-08-28"; + +// Text-to-Speech models +pub const TTS_1: &str = "tts-1"; +pub const TTS_1_HD: &str = "tts-1-hd"; +pub const TTS_1_1106: &str = "tts-1-1106"; +pub const TTS_1_HD_1106: &str = "tts-1-hd-1106"; +pub const GPT4_O_MINI_TTS: &str = "gpt-4o-mini-tts"; + +// Speech-to-Text models +pub const WHISPER_1: &str = "whisper-1"; + +// Image generation models pub const DALL_E_2: &str = "dall-e-2"; pub const DALL_E_3: &str = "dall-e-3"; +pub const GPT_IMAGE_1: &str = "gpt-image-1"; -// https://platform.openai.com/docs/guides/embeddings/embedding-models +// Embedding models pub const TEXT_EMBEDDING_3_SMALL: &str = "text-embedding-3-small"; pub const TEXT_EMBEDDING_3_LARGE: &str = "text-embedding-3-large"; pub const TEXT_EMBEDDING_ADA_002: &str = "text-embedding-ada-002"; + +// Moderation models +pub const OMNI_MODERATION_LATEST: &str = "omni-moderation-latest"; +pub const OMNI_MODERATION_2024_09_26: &str = "omni-moderation-2024-09-26"; + +// Legacy models +pub const DAVINCI_002: &str = "davinci-002"; +pub const BABBAGE_002: &str = "babbage-002"; + +// Code models +pub const CODEX_MINI_LATEST: &str = "codex-mini-latest"; + +// Preview models (GPT-4.5) +pub const GPT4_5_PREVIEW: &str = "gpt-4.5-preview"; +pub const GPT4_5_PREVIEW_2025_02_27: &str = "gpt-4.5-preview-2025-02-27"; From c56ed0ae037db99787ca91b6f9ad9cbcb0e0746e Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Tue, 30 Sep 2025 18:34:58 +0900 Subject: [PATCH 08/32] Fix yaml --- .github/workflows/rust-check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/rust-check.yml b/.github/workflows/rust-check.yml index 97e1029c..ac223b1a 100644 --- a/.github/workflows/rust-check.yml +++ b/.github/workflows/rust-check.yml @@ -19,7 +19,7 @@ jobs: profile: minimal toolchain: stable override: true - components: rustfmt + components: rustfmt clippy - name: Check formatting run: cargo fmt -- --check From aef88eac3913bf124d95fe7b93c2b90d3a5d6524 Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Tue, 30 Sep 2025 18:37:21 +0900 Subject: [PATCH 09/32] Fix yaml --- .github/workflows/rust-check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/rust-check.yml b/.github/workflows/rust-check.yml index ac223b1a..d9fc54d8 100644 --- a/.github/workflows/rust-check.yml +++ b/.github/workflows/rust-check.yml @@ -19,7 +19,7 @@ jobs: profile: minimal toolchain: stable override: true - components: rustfmt clippy + components: rustfmt, clippy - name: Check formatting run: cargo fmt -- --check From c1edf2f2f59b430a9cfcca82d7a0bbd7e689b9a0 Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Tue, 30 Sep 2025 18:56:30 +0900 Subject: [PATCH 10/32] Add TimestampGranularity --- src/v1/audio.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/v1/audio.rs b/src/v1/audio.rs index 4ab93f4e..3ea2b3bd 100644 --- a/src/v1/audio.rs +++ b/src/v1/audio.rs @@ -5,6 +5,13 @@ use crate::impl_builder_methods; pub const WHISPER_1: &str = "whisper-1"; +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum TimestampGranularity { + Word, + Segment, +} + #[derive(Debug, Serialize, Clone)] pub struct AudioTranscriptionRequest { pub model: String, @@ -19,6 +26,8 @@ pub struct AudioTranscriptionRequest { pub temperature: Option, #[serde(skip_serializing_if = "Option::is_none")] pub language: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub timestamp_granularities: Option>, } impl AudioTranscriptionRequest { @@ -31,6 +40,7 @@ impl AudioTranscriptionRequest { response_format: None, temperature: None, language: None, + timestamp_granularities: None, } } @@ -43,6 +53,7 @@ impl AudioTranscriptionRequest { response_format: None, temperature: None, language: None, + timestamp_granularities: None, } } } @@ -52,7 +63,8 @@ impl_builder_methods!( prompt: String, response_format: String, temperature: f32, - language: String + language: String, + timestamp_granularities: Vec ); #[derive(Debug, Deserialize, Serialize)] From 8213cb9ac7fcd95ab6156af0744b9b63c2c591c9 Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Tue, 30 Sep 2025 18:59:35 +0900 Subject: [PATCH 11/32] v6.0.12 --- Cargo.toml | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 4c671b92..070729ef 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openai-api-rs" -version = "6.0.11" +version = "6.0.12" edition = "2021" authors = ["Dongri Jin "] license = "MIT" diff --git a/README.md b/README.md index 6781f7b2..0bd023fe 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ Check out the [docs.rs](https://docs.rs/openai-api-rs/). Cargo.toml ```toml [dependencies] -openai-api-rs = "6.0.11" +openai-api-rs = "6.0.12" ``` ## Usage From 21175580e2030487f81889736e41d700c2d7ae9a Mon Sep 17 00:00:00 2001 From: Baptiste Parmantier Date: Thu, 9 Oct 2025 17:29:47 +0200 Subject: [PATCH 12/32] feat: upgrade tokio-tungstenite version from 0.24 to 0.28 --- Cargo.toml | 2 +- src/realtime/client_event.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 070729ef..e3161c3b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,7 +34,7 @@ version = "1" version = "1.7.1" [dependencies.tokio-tungstenite] -version = "0.24.0" +version = "0.28.0" features = ["connect"] [dependencies.futures-util] diff --git a/src/realtime/client_event.rs b/src/realtime/client_event.rs index 53805381..1c43fd09 100644 --- a/src/realtime/client_event.rs +++ b/src/realtime/client_event.rs @@ -92,7 +92,7 @@ pub enum ClientEvent { impl From for Message { fn from(value: ClientEvent) -> Self { - Message::Text(String::from(&value)) + Message::Text(String::from(&value).into()) } } From 6a62547aae0506ed0910bdbe9dc39f37188f8e30 Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Fri, 10 Oct 2025 10:16:08 +0900 Subject: [PATCH 13/32] v6.0.13 --- Cargo.toml | 2 +- README.md | 18 ++++++++++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e3161c3b..1ac1a62e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openai-api-rs" -version = "6.0.12" +version = "6.0.13" edition = "2021" authors = ["Dongri Jin "] license = "MIT" diff --git a/README.md b/README.md index 0bd023fe..58996582 100644 --- a/README.md +++ b/README.md @@ -1,19 +1,24 @@ # OpenAI API client library for Rust (unofficial) + The OpenAI API client Rust library provides convenient access to the OpenAI API from Rust applications. Check out the [docs.rs](https://docs.rs/openai-api-rs/). ## Installation: + Cargo.toml + ```toml [dependencies] -openai-api-rs = "6.0.12" +openai-api-rs = "6.0.13" ``` ## Usage + The library needs to be configured with your account's secret key, which is available on the [website](https://platform.openai.com/account/api-keys). We recommend setting it as an environment variable. Here's an example of initializing the library with the API key loaded from an environment variable and creating a completion: ### Set OPENAI_API_KEY or OPENROUTER_API_KEY to environment variable + ```bash $ export OPENAI_API_KEY=sk-xxxxxxx or @@ -21,12 +26,14 @@ $ export OPENROUTER_API_KEY=sk-xxxxxxx ``` ### Create OpenAI client + ```rust let api_key = env::var("OPENAI_API_KEY").unwrap().to_string(); let mut client = OpenAIClient::builder().with_api_key(api_key).build()?; ``` ### Create OpenRouter client + ```rust let api_key = env::var("OPENROUTER_API_KEY").unwrap().to_string(); let mut client = OpenAIClient::builder() @@ -36,6 +43,7 @@ let mut client = OpenAIClient::builder() ``` ### Create request + ```rust let req = ChatCompletionRequest::new( GPT4_O.to_string(), @@ -50,6 +58,7 @@ let req = ChatCompletionRequest::new( ``` ### Send request + ```rust let result = client.chat_completion(req)?; println!("Content: {:?}", result.choices[0].message.content); @@ -60,11 +69,13 @@ for (key, value) in client.headers.unwrap().iter() { ``` ### Set OPENAI_API_BASE to environment variable (optional) + ```bash $ export OPENAI_API_BASE=https://api.openai.com/v1 ``` ## Example of chat completion + ```rust use openai_api_rs::v1::api::OpenAIClient; use openai_api_rs::v1::chat_completion::{self, ChatCompletionRequest}; @@ -99,6 +110,7 @@ async fn main() -> Result<(), Box> { ``` ## Example for OpenRouter + ```rust use openai_api_rs::v1::api::OpenAIClient; use openai_api_rs::v1::chat_completion::{self, ChatCompletionRequest}; @@ -126,7 +138,7 @@ async fn main() -> Result<(), Box> { let result = client.chat_completion(req).await?; println!("Content: {:?}", result.choices[0].message.content); - + for (key, value) in client.headers.unwrap().iter() { println!("{}: {:?}", key, value); } @@ -140,6 +152,7 @@ More Examples: [examples](https://github.com/dongri/openai-api-rs/tree/main/exam Check out the [full API documentation](https://platform.openai.com/docs/api-reference/completions) for examples of all the available functions. ## Supported APIs + - [x] [Completions](https://platform.openai.com/docs/api-reference/completions) - [x] [Chat](https://platform.openai.com/docs/api-reference/chat) - [x] [Edits](https://platform.openai.com/docs/api-reference/edits) @@ -155,4 +168,5 @@ Check out the [full API documentation](https://platform.openai.com/docs/api-refe - [x] [Realtime](https://platform.openai.com/docs/api-reference/realtime) ## License + This project is licensed under [MIT license](https://github.com/dongri/openai-api-rs/blob/main/LICENSE). From f5b50742e6057a2394ae09f0487277d69ceec2ff Mon Sep 17 00:00:00 2001 From: Baptiste Parmantier Date: Mon, 13 Oct 2025 10:59:10 +0200 Subject: [PATCH 14/32] refactor: move cha_cCompletion related files to a module --- examples/chat_completion.rs | 3 +- examples/function_call.rs | 29 +- examples/function_call_role.rs | 3 +- examples/openrouter.rs | 3 +- examples/openrouter_reasoning.rs | 5 +- examples/vision.rs | 3 +- src/v1/api.rs | 2 +- .../{ => chat_completion}/chat_completion.rs | 266 +----------------- src/v1/chat_completion/mod.rs | 254 +++++++++++++++++ 9 files changed, 292 insertions(+), 276 deletions(-) rename src/v1/{ => chat_completion}/chat_completion.rs (51%) create mode 100644 src/v1/chat_completion/mod.rs diff --git a/examples/chat_completion.rs b/examples/chat_completion.rs index 635add58..3556de08 100644 --- a/examples/chat_completion.rs +++ b/examples/chat_completion.rs @@ -1,5 +1,6 @@ use openai_api_rs::v1::api::OpenAIClient; -use openai_api_rs::v1::chat_completion::{self, ChatCompletionRequest}; +use openai_api_rs::v1::chat_completion::chat_completion::ChatCompletionRequest; +use openai_api_rs::v1::chat_completion::{self}; use openai_api_rs::v1::common::GPT4_O_MINI; use std::env; diff --git a/examples/function_call.rs b/examples/function_call.rs index 3935599e..b18769f6 100644 --- a/examples/function_call.rs +++ b/examples/function_call.rs @@ -1,5 +1,10 @@ use openai_api_rs::v1::api::OpenAIClient; -use openai_api_rs::v1::chat_completion::{self, ChatCompletionRequest}; +use openai_api_rs::v1::chat_completion::{ + chat_completion::ChatCompletionRequest, ChatCompletionMessage, +}; +use openai_api_rs::v1::chat_completion::{ + Content, FinishReason, MessageRole, Tool, ToolChoiceType, ToolType, +}; use openai_api_rs::v1::common::GPT4_O; use openai_api_rs::v1::types; use serde::{Deserialize, Serialize}; @@ -32,16 +37,16 @@ async fn main() -> Result<(), Box> { let req = ChatCompletionRequest::new( GPT4_O.to_string(), - vec![chat_completion::ChatCompletionMessage { - role: chat_completion::MessageRole::user, - content: chat_completion::Content::Text(String::from("What is the price of Ethereum?")), + vec![ChatCompletionMessage { + role: MessageRole::user, + content: Content::Text(String::from("What is the price of Ethereum?")), name: None, tool_calls: None, tool_call_id: None, }], ) - .tools(vec![chat_completion::Tool { - r#type: chat_completion::ToolType::Function, + .tools(vec![Tool { + r#type: ToolType::Function, function: types::Function { name: String::from("get_coin_price"), description: Some(String::from("Get the price of a cryptocurrency")), @@ -52,7 +57,7 @@ async fn main() -> Result<(), Box> { }, }, }]) - .tool_choice(chat_completion::ToolChoiceType::Auto); + .tool_choice(ToolChoiceType::Auto); // debug request json // let serialized = serde_json::to_string(&req).unwrap(); @@ -65,14 +70,14 @@ async fn main() -> Result<(), Box> { println!("No finish_reason"); println!("{:?}", result.choices[0].message.content); } - Some(chat_completion::FinishReason::stop) => { + Some(FinishReason::stop) => { println!("Stop"); println!("{:?}", result.choices[0].message.content); } - Some(chat_completion::FinishReason::length) => { + Some(FinishReason::length) => { println!("Length"); } - Some(chat_completion::FinishReason::tool_calls) => { + Some(FinishReason::tool_calls) => { println!("ToolCalls"); #[derive(Deserialize, Serialize)] struct Currency { @@ -90,10 +95,10 @@ async fn main() -> Result<(), Box> { } } } - Some(chat_completion::FinishReason::content_filter) => { + Some(FinishReason::content_filter) => { println!("ContentFilter"); } - Some(chat_completion::FinishReason::null) => { + Some(FinishReason::null) => { println!("Null"); } } diff --git a/examples/function_call_role.rs b/examples/function_call_role.rs index 901d5d7f..a18d355c 100644 --- a/examples/function_call_role.rs +++ b/examples/function_call_role.rs @@ -1,5 +1,6 @@ use openai_api_rs::v1::api::OpenAIClient; -use openai_api_rs::v1::chat_completion::{self, ChatCompletionRequest}; +use openai_api_rs::v1::chat_completion::chat_completion::ChatCompletionRequest; +use openai_api_rs::v1::chat_completion::{self}; use openai_api_rs::v1::common::GPT4_O; use openai_api_rs::v1::types; use serde::{Deserialize, Serialize}; diff --git a/examples/openrouter.rs b/examples/openrouter.rs index 5295bf41..79b8ec01 100644 --- a/examples/openrouter.rs +++ b/examples/openrouter.rs @@ -1,5 +1,6 @@ use openai_api_rs::v1::api::OpenAIClient; -use openai_api_rs::v1::chat_completion::{self, ChatCompletionRequest}; +use openai_api_rs::v1::chat_completion::chat_completion::ChatCompletionRequest; +use openai_api_rs::v1::chat_completion::{self}; use openai_api_rs::v1::common::GPT4_O_MINI; use std::env; diff --git a/examples/openrouter_reasoning.rs b/examples/openrouter_reasoning.rs index 9bfac3dd..9dd3c125 100644 --- a/examples/openrouter_reasoning.rs +++ b/examples/openrouter_reasoning.rs @@ -1,7 +1,6 @@ use openai_api_rs::v1::api::OpenAIClient; -use openai_api_rs::v1::chat_completion::{ - self, ChatCompletionRequest, Reasoning, ReasoningEffort, ReasoningMode, -}; +use openai_api_rs::v1::chat_completion::chat_completion::ChatCompletionRequest; +use openai_api_rs::v1::chat_completion::{self, Reasoning, ReasoningEffort, ReasoningMode}; use std::env; #[tokio::main] diff --git a/examples/vision.rs b/examples/vision.rs index 7bad362b..67c9af5d 100644 --- a/examples/vision.rs +++ b/examples/vision.rs @@ -1,5 +1,6 @@ use openai_api_rs::v1::api::OpenAIClient; -use openai_api_rs::v1::chat_completion::{self, ChatCompletionRequest}; +use openai_api_rs::v1::chat_completion::chat_completion::ChatCompletionRequest; +use openai_api_rs::v1::chat_completion::{self}; use openai_api_rs::v1::common::GPT4_O; use std::env; diff --git a/src/v1/api.rs b/src/v1/api.rs index ce0b1c4e..8cb49809 100644 --- a/src/v1/api.rs +++ b/src/v1/api.rs @@ -7,7 +7,7 @@ use crate::v1::audio::{ AudioTranslationRequest, AudioTranslationResponse, }; use crate::v1::batch::{BatchResponse, CreateBatchRequest, ListBatchResponse}; -use crate::v1::chat_completion::{ChatCompletionRequest, ChatCompletionResponse}; +use crate::v1::chat_completion::chat_completion::{ChatCompletionRequest, ChatCompletionResponse}; use crate::v1::common; use crate::v1::completion::{CompletionRequest, CompletionResponse}; use crate::v1::edit::{EditRequest, EditResponse}; diff --git a/src/v1/chat_completion.rs b/src/v1/chat_completion/chat_completion.rs similarity index 51% rename from src/v1/chat_completion.rs rename to src/v1/chat_completion/chat_completion.rs index 09fdc0f0..2c56287d 100644 --- a/src/v1/chat_completion.rs +++ b/src/v1/chat_completion/chat_completion.rs @@ -1,44 +1,13 @@ -use super::{common, types}; -use crate::impl_builder_methods; - -use serde::de::{self, MapAccess, SeqAccess, Visitor}; -use serde::ser::SerializeMap; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use crate::v1::chat_completion::{ChatCompletionChoice, Reasoning, Tool, ToolChoiceType}; +use crate::v1::common; +use crate::{ + impl_builder_methods, + v1::chat_completion::{serialize_tool_choice, ChatCompletionMessage}, +}; + +use serde::{Deserialize, Serialize}; use serde_json::Value; use std::collections::HashMap; -use std::fmt; -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] -pub enum ToolChoiceType { - None, - Auto, - Required, - ToolChoice { tool: Tool }, -} - -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] -#[serde(rename_all = "lowercase")] -pub enum ReasoningEffort { - Low, - Medium, - High, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -#[serde(untagged)] -pub enum ReasoningMode { - Effort { effort: ReasoningEffort }, - MaxTokens { max_tokens: i64 }, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct Reasoning { - #[serde(flatten)] - pub mode: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub exclude: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub enabled: Option, -} #[derive(Debug, Serialize, Deserialize, Clone)] pub struct ChatCompletionRequest { @@ -53,8 +22,6 @@ pub struct ChatCompletionRequest { #[serde(skip_serializing_if = "Option::is_none")] pub response_format: Option, #[serde(skip_serializing_if = "Option::is_none")] - pub stream: Option, - #[serde(skip_serializing_if = "Option::is_none")] pub stop: Option>, #[serde(skip_serializing_if = "Option::is_none")] pub max_tokens: Option, @@ -93,7 +60,6 @@ impl ChatCompletionRequest { messages, temperature: None, top_p: None, - stream: None, n: None, response_format: None, stop: None, @@ -118,7 +84,6 @@ impl_builder_methods!( top_p: f64, n: i64, response_format: Value, - stream: bool, stop: Vec, max_tokens: i64, presence_penalty: f64, @@ -133,154 +98,6 @@ impl_builder_methods!( transforms: Vec ); -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] -#[allow(non_camel_case_types)] -pub enum MessageRole { - user, - system, - assistant, - function, - tool, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum Content { - Text(String), - ImageUrl(Vec), -} - -impl serde::Serialize for Content { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - match *self { - Content::Text(ref text) => { - if text.is_empty() { - serializer.serialize_none() - } else { - serializer.serialize_str(text) - } - } - Content::ImageUrl(ref image_url) => image_url.serialize(serializer), - } - } -} - -impl<'de> Deserialize<'de> for Content { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - struct ContentVisitor; - - impl<'de> Visitor<'de> for ContentVisitor { - type Value = Content; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a valid content type") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - Ok(Content::Text(value.to_string())) - } - - fn visit_seq(self, seq: A) -> Result - where - A: SeqAccess<'de>, - { - let image_urls: Vec = - Deserialize::deserialize(de::value::SeqAccessDeserializer::new(seq))?; - Ok(Content::ImageUrl(image_urls)) - } - - fn visit_map(self, map: M) -> Result - where - M: MapAccess<'de>, - { - let image_urls: Vec = - Deserialize::deserialize(de::value::MapAccessDeserializer::new(map))?; - Ok(Content::ImageUrl(image_urls)) - } - - fn visit_none(self) -> Result - where - E: de::Error, - { - Ok(Content::Text(String::new())) - } - - fn visit_unit(self) -> Result - where - E: de::Error, - { - Ok(Content::Text(String::new())) - } - } - - deserializer.deserialize_any(ContentVisitor) - } -} - -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] -#[allow(non_camel_case_types)] -pub enum ContentType { - text, - image_url, -} - -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] -#[allow(non_camel_case_types)] -pub struct ImageUrlType { - pub url: String, -} - -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] -#[allow(non_camel_case_types)] -pub struct ImageUrl { - pub r#type: ContentType, - #[serde(skip_serializing_if = "Option::is_none")] - pub text: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub image_url: Option, -} - -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct ChatCompletionMessage { - pub role: MessageRole, - pub content: Content, - #[serde(skip_serializing_if = "Option::is_none")] - pub name: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub tool_calls: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - pub tool_call_id: Option, -} - -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct ChatCompletionMessageForResponse { - pub role: MessageRole, - #[serde(skip_serializing_if = "Option::is_none")] - pub content: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub reasoning_content: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub name: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub tool_calls: Option>, -} - -#[derive(Debug, Deserialize, Serialize)] -pub struct ChatCompletionChoice { - pub index: i64, - pub message: ChatCompletionMessageForResponse, - pub finish_reason: Option, - pub finish_details: Option, -} - #[derive(Debug, Deserialize, Serialize)] pub struct ChatCompletionResponse { pub id: Option, @@ -292,73 +109,10 @@ pub struct ChatCompletionResponse { pub system_fingerprint: Option, } -#[derive(Debug, Deserialize, Serialize, PartialEq, Eq)] -#[allow(non_camel_case_types)] -pub enum FinishReason { - stop, - length, - content_filter, - tool_calls, - null, -} - -#[derive(Debug, Deserialize, Serialize)] -#[allow(non_camel_case_types)] -pub struct FinishDetails { - pub r#type: FinishReason, - pub stop: String, -} - -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct ToolCall { - pub id: String, - pub r#type: String, - pub function: ToolCallFunction, -} - -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct ToolCallFunction { - #[serde(skip_serializing_if = "Option::is_none")] - pub name: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub arguments: Option, -} - -fn serialize_tool_choice( - value: &Option, - serializer: S, -) -> Result -where - S: Serializer, -{ - match value { - Some(ToolChoiceType::None) => serializer.serialize_str("none"), - Some(ToolChoiceType::Auto) => serializer.serialize_str("auto"), - Some(ToolChoiceType::Required) => serializer.serialize_str("required"), - Some(ToolChoiceType::ToolChoice { tool }) => { - let mut map = serializer.serialize_map(Some(2))?; - map.serialize_entry("type", &tool.r#type)?; - map.serialize_entry("function", &tool.function)?; - map.end() - } - None => serializer.serialize_none(), - } -} - -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] -pub struct Tool { - pub r#type: ToolType, - pub function: types::Function, -} - -#[derive(Debug, Deserialize, Serialize, Copy, Clone, PartialEq, Eq)] -#[serde(rename_all = "snake_case")] -pub enum ToolType { - Function, -} - #[cfg(test)] mod tests { + use crate::v1::chat_completion::{ReasoningEffort, ReasoningMode}; + use super::*; use serde_json::json; diff --git a/src/v1/chat_completion/mod.rs b/src/v1/chat_completion/mod.rs new file mode 100644 index 00000000..d327e5db --- /dev/null +++ b/src/v1/chat_completion/mod.rs @@ -0,0 +1,254 @@ +use crate::v1::types; +use serde::de::{self, MapAccess, SeqAccess, Visitor}; +use serde::ser::SerializeMap; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use std::fmt; + +pub mod chat_completion; +pub mod chat_completion_stream; + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +pub enum ToolChoiceType { + None, + Auto, + Required, + ToolChoice { tool: Tool }, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +#[serde(rename_all = "lowercase")] +pub enum ReasoningEffort { + Low, + Medium, + High, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum ReasoningMode { + Effort { effort: ReasoningEffort }, + MaxTokens { max_tokens: i64 }, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct Reasoning { + #[serde(flatten)] + pub mode: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub exclude: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub enabled: Option, +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] +#[allow(non_camel_case_types)] +pub enum MessageRole { + user, + system, + assistant, + function, + tool, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Content { + Text(String), + ImageUrl(Vec), +} + +impl serde::Serialize for Content { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + match *self { + Content::Text(ref text) => { + if text.is_empty() { + serializer.serialize_none() + } else { + serializer.serialize_str(text) + } + } + Content::ImageUrl(ref image_url) => image_url.serialize(serializer), + } + } +} + +impl<'de> Deserialize<'de> for Content { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct ContentVisitor; + + impl<'de> Visitor<'de> for ContentVisitor { + type Value = Content; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a valid content type") + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + Ok(Content::Text(value.to_string())) + } + + fn visit_seq(self, seq: A) -> Result + where + A: SeqAccess<'de>, + { + let image_urls: Vec = + Deserialize::deserialize(de::value::SeqAccessDeserializer::new(seq))?; + Ok(Content::ImageUrl(image_urls)) + } + + fn visit_map(self, map: M) -> Result + where + M: MapAccess<'de>, + { + let image_urls: Vec = + Deserialize::deserialize(de::value::MapAccessDeserializer::new(map))?; + Ok(Content::ImageUrl(image_urls)) + } + + fn visit_none(self) -> Result + where + E: de::Error, + { + Ok(Content::Text(String::new())) + } + + fn visit_unit(self) -> Result + where + E: de::Error, + { + Ok(Content::Text(String::new())) + } + } + + deserializer.deserialize_any(ContentVisitor) + } +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] +#[allow(non_camel_case_types)] +pub enum ContentType { + text, + image_url, +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] +#[allow(non_camel_case_types)] +pub struct ImageUrlType { + pub url: String, +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] +#[allow(non_camel_case_types)] +pub struct ImageUrl { + pub r#type: ContentType, + #[serde(skip_serializing_if = "Option::is_none")] + pub text: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub image_url: Option, +} + +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct ChatCompletionMessage { + pub role: MessageRole, + pub content: Content, + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub tool_calls: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub tool_call_id: Option, +} + +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct ChatCompletionMessageForResponse { + pub role: MessageRole, + #[serde(skip_serializing_if = "Option::is_none")] + pub content: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub reasoning_content: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub tool_calls: Option>, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct ChatCompletionChoice { + pub index: i64, + pub message: ChatCompletionMessageForResponse, + pub finish_reason: Option, + pub finish_details: Option, +} + +#[derive(Debug, Deserialize, Serialize, PartialEq, Eq)] +#[allow(non_camel_case_types)] +pub enum FinishReason { + stop, + length, + content_filter, + tool_calls, + null, +} + +#[derive(Debug, Deserialize, Serialize)] +#[allow(non_camel_case_types)] +pub struct FinishDetails { + pub r#type: FinishReason, + pub stop: String, +} + +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct ToolCall { + pub id: String, + pub r#type: String, + pub function: ToolCallFunction, +} + +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct ToolCallFunction { + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub arguments: Option, +} + +pub fn serialize_tool_choice( + value: &Option, + serializer: S, +) -> Result +where + S: Serializer, +{ + match value { + Some(ToolChoiceType::None) => serializer.serialize_str("none"), + Some(ToolChoiceType::Auto) => serializer.serialize_str("auto"), + Some(ToolChoiceType::Required) => serializer.serialize_str("required"), + Some(ToolChoiceType::ToolChoice { tool }) => { + let mut map = serializer.serialize_map(Some(2))?; + map.serialize_entry("type", &tool.r#type)?; + map.serialize_entry("function", &tool.function)?; + map.end() + } + None => serializer.serialize_none(), + } +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] +pub struct Tool { + pub r#type: ToolType, + pub function: types::Function, +} + +#[derive(Debug, Deserialize, Serialize, Copy, Clone, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum ToolType { + Function, +} From 59a720ce5c6cd1f353adff100fa4496937efd0e1 Mon Sep 17 00:00:00 2001 From: Baptiste Parmantier Date: Mon, 13 Oct 2025 15:37:33 +0200 Subject: [PATCH 15/32] feat: implement chat_completion_stream --- Cargo.toml | 2 +- examples/chat_completion_stream.rs | 44 +++ src/v1/api.rs | 40 ++- .../chat_completion/chat_completion_stream.rs | 323 ++++++++++++++++++ 4 files changed, 407 insertions(+), 2 deletions(-) create mode 100644 examples/chat_completion_stream.rs create mode 100644 src/v1/chat_completion/chat_completion_stream.rs diff --git a/Cargo.toml b/Cargo.toml index 1ac1a62e..41e22230 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,7 @@ default-tls = ["reqwest/default-tls", "tokio-tungstenite/native-tls"] [dependencies.reqwest] version = "0.12" default-features = false -features = ["charset", "http2", "json", "multipart", "socks"] +features = ["charset", "http2", "json", "multipart", "socks", "stream"] [dependencies.tokio] version = "1" diff --git a/examples/chat_completion_stream.rs b/examples/chat_completion_stream.rs new file mode 100644 index 00000000..9cb07bd1 --- /dev/null +++ b/examples/chat_completion_stream.rs @@ -0,0 +1,44 @@ +use futures_util::StreamExt; +use openai_api_rs::v1::api::OpenAIClient; +use openai_api_rs::v1::chat_completion::chat_completion_stream::{ + ChatCompletionStreamRequest, ChatCompletionStreamResponse, +}; +use openai_api_rs::v1::chat_completion::{self}; +use openai_api_rs::v1::common::GPT4_O_MINI; +use std::env; + +#[tokio::main] +async fn main() -> Result<(), Box> { + let api_key = env::var("OPENAI_API_KEY").unwrap().to_string(); + let mut client = OpenAIClient::builder().with_api_key(api_key).build()?; + + let req = ChatCompletionStreamRequest::new( + GPT4_O_MINI.to_string(), + vec![chat_completion::ChatCompletionMessage { + role: chat_completion::MessageRole::user, + content: chat_completion::Content::Text(String::from("What is bitcoin?")), + name: None, + tool_calls: None, + tool_call_id: None, + }], + ); + + let mut result = client.chat_completion_stream(req).await?; + while let Some(response) = result.next().await { + match response.clone() { + ChatCompletionStreamResponse::ToolCall(toolcalls) => { + println!("Tool Call: {:?}", toolcalls); + } + ChatCompletionStreamResponse::Content(content) => { + println!("Content: {:?}", content); + } + ChatCompletionStreamResponse::Done => { + println!("Done"); + } + } + } + + Ok(()) +} + +// OPENAI_API_KEY=xxxx cargo run --package openai-api-rs --example chat_completion diff --git a/src/v1/api.rs b/src/v1/api.rs index 8cb49809..8ff11652 100644 --- a/src/v1/api.rs +++ b/src/v1/api.rs @@ -8,6 +8,9 @@ use crate::v1::audio::{ }; use crate::v1::batch::{BatchResponse, CreateBatchRequest, ListBatchResponse}; use crate::v1::chat_completion::chat_completion::{ChatCompletionRequest, ChatCompletionResponse}; +use crate::v1::chat_completion::chat_completion_stream::{ + ChatCompletionStream, ChatCompletionStreamRequest, ChatCompletionStreamResponse, +}; use crate::v1::common; use crate::v1::completion::{CompletionRequest, CompletionResponse}; use crate::v1::edit::{EditRequest, EditResponse}; @@ -39,11 +42,12 @@ use crate::v1::run::{ use crate::v1::thread::{CreateThreadRequest, ModifyThreadRequest, ThreadObject}; use bytes::Bytes; +use futures_util::Stream; use reqwest::header::{HeaderMap, HeaderName, HeaderValue}; use reqwest::multipart::{Form, Part}; use reqwest::{Client, Method, Response}; use serde::Serialize; -use serde_json::Value; +use serde_json::{to_value, Value}; use url::Url; use std::error::Error; @@ -334,6 +338,40 @@ impl OpenAIClient { self.post("chat/completions", &req).await } + pub async fn chat_completion_stream( + &mut self, + req: ChatCompletionStreamRequest, + ) -> Result, APIError> { + let mut payload = to_value(&req).map_err(|err| APIError::CustomError { + message: format!("Failed to serialize request: {}", err), + })?; + + if let Some(obj) = payload.as_object_mut() { + obj.insert("stream".into(), Value::Bool(true)); + } + + let request = self.build_request(Method::POST, "chat/completions").await; + let request = request.json(&payload); + let response = request.send().await?; + + if response.status().is_success() { + Ok(ChatCompletionStream { + response: Box::pin(response.bytes_stream()), + buffer: String::new(), + first_chunk: true, + }) + } else { + let error_text = response + .text() + .await + .unwrap_or_else(|_| String::from("Unknown error")); + + Err(APIError::CustomError { + message: error_text, + }) + } + } + pub async fn audio_transcription( &mut self, req: AudioTranscriptionRequest, diff --git a/src/v1/chat_completion/chat_completion_stream.rs b/src/v1/chat_completion/chat_completion_stream.rs new file mode 100644 index 00000000..8e6de497 --- /dev/null +++ b/src/v1/chat_completion/chat_completion_stream.rs @@ -0,0 +1,323 @@ +use crate::v1::chat_completion::{Reasoning, Tool, ToolCall, ToolChoiceType}; +use crate::{ + impl_builder_methods, + v1::chat_completion::{serialize_tool_choice, ChatCompletionMessage}, +}; + +use futures_util::Stream; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use std::collections::HashMap; +use std::pin::Pin; +use std::task::{Context, Poll}; + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ChatCompletionStreamRequest { + pub model: String, + pub messages: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub temperature: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub top_p: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub n: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub response_format: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub stop: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub max_tokens: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub presence_penalty: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub frequency_penalty: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub logit_bias: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub user: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub seed: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub tools: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub parallel_tool_calls: Option, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(serialize_with = "serialize_tool_choice")] + pub tool_choice: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub reasoning: Option, + /// Optional list of transforms to apply to the chat completion request. + /// + /// Transforms allow modifying the request before it's sent to the API, + /// enabling features like prompt rewriting, content filtering, or other + /// preprocessing steps. When None, no transforms are applied. + #[serde(skip_serializing_if = "Option::is_none")] + pub transforms: Option>, +} + +impl ChatCompletionStreamRequest { + pub fn new(model: String, messages: Vec) -> Self { + Self { + model, + messages, + temperature: None, + top_p: None, + n: None, + response_format: None, + stop: None, + max_tokens: None, + presence_penalty: None, + frequency_penalty: None, + logit_bias: None, + user: None, + seed: None, + tools: None, + parallel_tool_calls: None, + tool_choice: None, + reasoning: None, + transforms: None, + } + } +} + +impl_builder_methods!( + ChatCompletionStreamRequest, + temperature: f64, + top_p: f64, + n: i64, + response_format: Value, + stop: Vec, + max_tokens: i64, + presence_penalty: f64, + frequency_penalty: f64, + logit_bias: HashMap, + user: String, + seed: i64, + tools: Vec, + parallel_tool_calls: bool, + tool_choice: ToolChoiceType, + reasoning: Reasoning, + transforms: Vec +); + +#[derive(Debug, Clone)] +pub enum ChatCompletionStreamResponse { + Content(String), + ToolCall(Vec), + Done, +} + +pub struct ChatCompletionStream> + Unpin> { + pub response: S, + pub buffer: String, + pub first_chunk: bool, +} + +impl> + Unpin> Stream + for ChatCompletionStream +{ + type Item = ChatCompletionStreamResponse; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + match Pin::new(&mut self.as_mut().response).poll_next(cx) { + Poll::Ready(Some(Ok(chunk))) => { + let mut utf8_str = String::from_utf8_lossy(&chunk).to_string(); + + if self.first_chunk { + let lines: Vec<&str> = utf8_str.lines().collect(); + utf8_str = if lines.len() >= 2 { + lines[lines.len() - 2].to_string() + } else { + utf8_str.clone() + }; + self.first_chunk = false; + } + + let trimmed_str = utf8_str.trim_start_matches("data: "); + if trimmed_str.contains("[DONE]") { + return Poll::Ready(Some(ChatCompletionStreamResponse::Done)); + } + + self.buffer.push_str(trimmed_str); + let json_result: Result = serde_json::from_str(&self.buffer); + + match json_result { + Ok(json) => { + self.buffer.clear(); + + if let Some(choices) = json.get("choices") { + if let Some(choice) = choices.get(0) { + if let Some(delta) = choice.get("delta") { + if let Some(tool_calls) = delta.get("tool_calls") { + if let Some(tool_calls_array) = tool_calls.as_array() { + let tool_calls_vec: Vec = + tool_calls_array + .iter() + .filter_map(|v| { + serde_json::from_value(v.clone()).ok() + }) + .collect(); + + return Poll::Ready(Some( + ChatCompletionStreamResponse::ToolCall( + tool_calls_vec, + ), + )); + } + } + + if let Some(content) = + delta.get("content").and_then(|c| c.as_str()) + { + let output = content.replace("\\n", "\n"); + return Poll::Ready(Some( + ChatCompletionStreamResponse::Content(output), + )); + } + } + } + } + } + Err(err) => { + eprintln!("Failed to parse response: {:?}", err) + } + } + } + Poll::Ready(Some(Err(error))) => { + eprintln!("Error in stream: {:?}", error); + return Poll::Ready(None); + } + Poll::Ready(None) => { + return Poll::Ready(None); + } + Poll::Pending => { + return Poll::Pending; + } + } + } + } +} + +#[cfg(test)] +mod tests { + use crate::v1::chat_completion::{ReasoningEffort, ReasoningMode}; + + use super::*; + use serde_json::json; + + #[test] + fn test_reasoning_effort_serialization() { + let reasoning = Reasoning { + mode: Some(ReasoningMode::Effort { + effort: ReasoningEffort::High, + }), + exclude: Some(false), + enabled: None, + }; + + let serialized = serde_json::to_value(&reasoning).unwrap(); + let expected = json!({ + "effort": "high", + "exclude": false + }); + + assert_eq!(serialized, expected); + } + + #[test] + fn test_reasoning_max_tokens_serialization() { + let reasoning = Reasoning { + mode: Some(ReasoningMode::MaxTokens { max_tokens: 2000 }), + exclude: None, + enabled: Some(true), + }; + + let serialized = serde_json::to_value(&reasoning).unwrap(); + let expected = json!({ + "max_tokens": 2000, + "enabled": true + }); + + assert_eq!(serialized, expected); + } + + #[test] + fn test_reasoning_deserialization() { + let json_str = r#"{"effort": "medium", "exclude": true}"#; + let reasoning: Reasoning = serde_json::from_str(json_str).unwrap(); + + match reasoning.mode { + Some(ReasoningMode::Effort { effort }) => { + assert_eq!(effort, ReasoningEffort::Medium); + } + _ => panic!("Expected effort mode"), + } + assert_eq!(reasoning.exclude, Some(true)); + } + + #[test] + fn test_chat_completion_request_with_reasoning() { + let mut req = ChatCompletionStreamRequest::new("gpt-4".to_string(), vec![]); + + req.reasoning = Some(Reasoning { + mode: Some(ReasoningMode::Effort { + effort: ReasoningEffort::Low, + }), + exclude: None, + enabled: None, + }); + + let serialized = serde_json::to_value(&req).unwrap(); + assert_eq!(serialized["reasoning"]["effort"], "low"); + } + + #[test] + fn test_transforms_none_serialization() { + let req = ChatCompletionStreamRequest::new("gpt-4".to_string(), vec![]); + let serialised = serde_json::to_value(&req).unwrap(); + // Verify that the transforms field is completely omitted from JSON output + assert!(!serialised.as_object().unwrap().contains_key("transforms")); + } + + #[test] + fn test_transforms_some_serialization() { + let mut req = ChatCompletionStreamRequest::new("gpt-4".to_string(), vec![]); + req.transforms = Some(vec!["transform1".to_string(), "transform2".to_string()]); + let serialised = serde_json::to_value(&req).unwrap(); + // Verify that the transforms field is included as a proper JSON array + assert_eq!( + serialised["transforms"], + serde_json::json!(["transform1", "transform2"]) + ); + } + + #[test] + fn test_transforms_some_deserialization() { + let json_str = + r#"{"model": "gpt-4", "messages": [], "transforms": ["transform1", "transform2"]}"#; + let req: ChatCompletionStreamRequest = serde_json::from_str(json_str).unwrap(); + // Verify that the transforms field is properly populated with Some(vec) + assert_eq!( + req.transforms, + Some(vec!["transform1".to_string(), "transform2".to_string()]) + ); + } + + #[test] + fn test_transforms_none_deserialization() { + let json_str = r#"{"model": "gpt-4", "messages": []}"#; + let req: ChatCompletionStreamRequest = serde_json::from_str(json_str).unwrap(); + // Verify that the transforms field is properly set to None when absent + assert_eq!(req.transforms, None); + } + + #[test] + fn test_transforms_builder_method() { + let transforms = vec!["transform1".to_string(), "transform2".to_string()]; + let req = ChatCompletionStreamRequest::new("gpt-4".to_string(), vec![]) + .transforms(transforms.clone()); + // Verify that the transforms field is properly set through the builder method + assert_eq!(req.transforms, Some(transforms)); + } +} From 098f77b8fbcde59e9e3671382af19ad4ac2f5fd7 Mon Sep 17 00:00:00 2001 From: Baptiste Parmantier Date: Mon, 13 Oct 2025 15:46:30 +0200 Subject: [PATCH 16/32] feat: same name as its containing module --- examples/chat_completion_stream.rs | 2 +- src/v1/chat_completion/mod.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/chat_completion_stream.rs b/examples/chat_completion_stream.rs index 9cb07bd1..010e3f6a 100644 --- a/examples/chat_completion_stream.rs +++ b/examples/chat_completion_stream.rs @@ -41,4 +41,4 @@ async fn main() -> Result<(), Box> { Ok(()) } -// OPENAI_API_KEY=xxxx cargo run --package openai-api-rs --example chat_completion +// OPENAI_API_KEY=xxxx cargo run --package openai-api-rs --example chat_completion_stream diff --git a/src/v1/chat_completion/mod.rs b/src/v1/chat_completion/mod.rs index d327e5db..757e5052 100644 --- a/src/v1/chat_completion/mod.rs +++ b/src/v1/chat_completion/mod.rs @@ -4,6 +4,7 @@ use serde::ser::SerializeMap; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::fmt; +#[allow(clippy::module_inception)] pub mod chat_completion; pub mod chat_completion_stream; From b27fc91175bb0412e27de05e8320425b665b19c1 Mon Sep 17 00:00:00 2001 From: Baptiste Parmantier Date: Tue, 14 Oct 2025 13:21:25 +0200 Subject: [PATCH 17/32] feat: ignore serde error --- .../chat_completion/chat_completion_stream.rs | 58 +++++++++---------- 1 file changed, 26 insertions(+), 32 deletions(-) diff --git a/src/v1/chat_completion/chat_completion_stream.rs b/src/v1/chat_completion/chat_completion_stream.rs index 8e6de497..32382acc 100644 --- a/src/v1/chat_completion/chat_completion_stream.rs +++ b/src/v1/chat_completion/chat_completion_stream.rs @@ -142,46 +142,40 @@ impl> + Unpin> Stream self.buffer.push_str(trimmed_str); let json_result: Result = serde_json::from_str(&self.buffer); - match json_result { - Ok(json) => { - self.buffer.clear(); - - if let Some(choices) = json.get("choices") { - if let Some(choice) = choices.get(0) { - if let Some(delta) = choice.get("delta") { - if let Some(tool_calls) = delta.get("tool_calls") { - if let Some(tool_calls_array) = tool_calls.as_array() { - let tool_calls_vec: Vec = - tool_calls_array - .iter() - .filter_map(|v| { - serde_json::from_value(v.clone()).ok() - }) - .collect(); - - return Poll::Ready(Some( - ChatCompletionStreamResponse::ToolCall( - tool_calls_vec, - ), - )); - } - } + if let Ok(json) = json_result { + self.buffer.clear(); + + if let Some(choices) = json.get("choices") { + if let Some(choice) = choices.get(0) { + if let Some(delta) = choice.get("delta") { + if let Some(tool_calls) = delta.get("tool_calls") { + if let Some(tool_calls_array) = tool_calls.as_array() { + let tool_calls_vec: Vec = tool_calls_array + .iter() + .filter_map(|v| { + serde_json::from_value(v.clone()).ok() + }) + .collect(); - if let Some(content) = - delta.get("content").and_then(|c| c.as_str()) - { - let output = content.replace("\\n", "\n"); return Poll::Ready(Some( - ChatCompletionStreamResponse::Content(output), + ChatCompletionStreamResponse::ToolCall( + tool_calls_vec, + ), )); } } + + if let Some(content) = + delta.get("content").and_then(|c| c.as_str()) + { + let output = content.replace("\\n", "\n"); + return Poll::Ready(Some( + ChatCompletionStreamResponse::Content(output), + )); + } } } } - Err(err) => { - eprintln!("Failed to parse response: {:?}", err) - } } } Poll::Ready(Some(Err(error))) => { From 54a84ca0c64ee13a27bc2d9348021fd51a7d345f Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Wed, 15 Oct 2025 14:21:31 +0900 Subject: [PATCH 18/32] Fix response buffer --- .../chat_completion/chat_completion_stream.rs | 147 ++++++++++++------ 1 file changed, 96 insertions(+), 51 deletions(-) diff --git a/src/v1/chat_completion/chat_completion_stream.rs b/src/v1/chat_completion/chat_completion_stream.rs index 32382acc..0a6fb95a 100644 --- a/src/v1/chat_completion/chat_completion_stream.rs +++ b/src/v1/chat_completion/chat_completion_stream.rs @@ -113,71 +113,116 @@ pub struct ChatCompletionStream> + Unpin> Stream - for ChatCompletionStream +impl ChatCompletionStream +where + S: Stream> + Unpin, { - type Item = ChatCompletionStreamResponse; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - loop { - match Pin::new(&mut self.as_mut().response).poll_next(cx) { - Poll::Ready(Some(Ok(chunk))) => { - let mut utf8_str = String::from_utf8_lossy(&chunk).to_string(); + fn find_event_delimiter(buffer: &str) -> Option<(usize, usize)> { + let carriage_idx = buffer.find("\r\n\r\n"); + let newline_idx = buffer.find("\n\n"); + + match (carriage_idx, newline_idx) { + (Some(r_idx), Some(n_idx)) => { + if r_idx <= n_idx { + Some((r_idx, 4)) + } else { + Some((n_idx, 2)) + } + } + (Some(r_idx), None) => Some((r_idx, 4)), + (None, Some(n_idx)) => Some((n_idx, 2)), + (None, None) => None, + } + } - if self.first_chunk { - let lines: Vec<&str> = utf8_str.lines().collect(); - utf8_str = if lines.len() >= 2 { - lines[lines.len() - 2].to_string() - } else { - utf8_str.clone() - }; - self.first_chunk = false; + fn next_response_from_buffer(&mut self) -> Option { + while let Some((idx, delimiter_len)) = Self::find_event_delimiter(&self.buffer) { + let event = self.buffer[..idx].to_owned(); + self.buffer = self.buffer[idx + delimiter_len..].to_owned(); + + let mut data_payload = String::new(); + for line in event.lines() { + let trimmed_line = line.trim_end_matches('\r'); + if let Some(content) = trimmed_line + .strip_prefix("data: ") + .or_else(|| trimmed_line.strip_prefix("data:")) + { + if !content.is_empty() { + if !data_payload.is_empty() { + data_payload.push('\n'); + } + data_payload.push_str(content); } + } + } - let trimmed_str = utf8_str.trim_start_matches("data: "); - if trimmed_str.contains("[DONE]") { - return Poll::Ready(Some(ChatCompletionStreamResponse::Done)); - } + if data_payload.is_empty() { + continue; + } + + if data_payload == "[DONE]" { + return Some(ChatCompletionStreamResponse::Done); + } - self.buffer.push_str(trimmed_str); - let json_result: Result = serde_json::from_str(&self.buffer); - - if let Ok(json) = json_result { - self.buffer.clear(); - - if let Some(choices) = json.get("choices") { - if let Some(choice) = choices.get(0) { - if let Some(delta) = choice.get("delta") { - if let Some(tool_calls) = delta.get("tool_calls") { - if let Some(tool_calls_array) = tool_calls.as_array() { - let tool_calls_vec: Vec = tool_calls_array - .iter() - .filter_map(|v| { - serde_json::from_value(v.clone()).ok() - }) - .collect(); - - return Poll::Ready(Some( - ChatCompletionStreamResponse::ToolCall( - tool_calls_vec, - ), + match serde_json::from_str::(&data_payload) { + Ok(json) => { + if let Some(choices) = json.get("choices") { + if let Some(choice) = choices.get(0) { + if let Some(delta) = choice.get("delta") { + if let Some(tool_calls) = delta.get("tool_calls") { + if let Some(tool_calls_array) = tool_calls.as_array() { + let tool_calls_vec: Vec = tool_calls_array + .iter() + .filter_map(|v| serde_json::from_value(v.clone()).ok()) + .collect(); + + if !tool_calls_vec.is_empty() { + return Some(ChatCompletionStreamResponse::ToolCall( + tool_calls_vec, )); } } + } - if let Some(content) = - delta.get("content").and_then(|c| c.as_str()) - { - let output = content.replace("\\n", "\n"); - return Poll::Ready(Some( - ChatCompletionStreamResponse::Content(output), - )); - } + if let Some(content) = delta.get("content").and_then(|c| c.as_str()) + { + let output = content.replace("\\n", "\n"); + return Some(ChatCompletionStreamResponse::Content(output)); } } } } } + Err(error) => { + eprintln!("Failed to parse SSE chunk as JSON: {}", error); + } + } + } + + None + } +} + +impl> + Unpin> Stream + for ChatCompletionStream +{ + type Item = ChatCompletionStreamResponse; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + if let Some(response) = self.next_response_from_buffer() { + return Poll::Ready(Some(response)); + } + + match Pin::new(&mut self.as_mut().response).poll_next(cx) { + Poll::Ready(Some(Ok(chunk))) => { + let chunk_str = String::from_utf8_lossy(&chunk).to_string(); + + if self.first_chunk { + self.first_chunk = false; + } + self.buffer.push_str(&chunk_str); + } Poll::Ready(Some(Err(error))) => { eprintln!("Error in stream: {:?}", error); return Poll::Ready(None); From a6821db342e7a77aa0a077cfa911eb90bfe51632 Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Wed, 15 Oct 2025 14:24:53 +0900 Subject: [PATCH 19/32] Refactoring chat completion stream --- .../chat_completion/chat_completion_stream.rs | 47 +++++++++---------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/src/v1/chat_completion/chat_completion_stream.rs b/src/v1/chat_completion/chat_completion_stream.rs index 0a6fb95a..f5b3283e 100644 --- a/src/v1/chat_completion/chat_completion_stream.rs +++ b/src/v1/chat_completion/chat_completion_stream.rs @@ -166,30 +166,29 @@ where match serde_json::from_str::(&data_payload) { Ok(json) => { - if let Some(choices) = json.get("choices") { - if let Some(choice) = choices.get(0) { - if let Some(delta) = choice.get("delta") { - if let Some(tool_calls) = delta.get("tool_calls") { - if let Some(tool_calls_array) = tool_calls.as_array() { - let tool_calls_vec: Vec = tool_calls_array - .iter() - .filter_map(|v| serde_json::from_value(v.clone()).ok()) - .collect(); - - if !tool_calls_vec.is_empty() { - return Some(ChatCompletionStreamResponse::ToolCall( - tool_calls_vec, - )); - } - } - } - - if let Some(content) = delta.get("content").and_then(|c| c.as_str()) - { - let output = content.replace("\\n", "\n"); - return Some(ChatCompletionStreamResponse::Content(output)); - } - } + if let Some(delta) = json + .get("choices") + .and_then(|choices| choices.get(0)) + .and_then(|choice| choice.get("delta")) + { + if let Some(tool_call_response) = delta + .get("tool_calls") + .and_then(|tool_calls| tool_calls.as_array()) + .map(|tool_calls_array| { + tool_calls_array + .iter() + .filter_map(|v| serde_json::from_value(v.clone()).ok()) + .collect::>() + }) + .filter(|tool_calls_vec| !tool_calls_vec.is_empty()) + .map(ChatCompletionStreamResponse::ToolCall) + { + return Some(tool_call_response); + } + + if let Some(content) = delta.get("content").and_then(|c| c.as_str()) { + let output = content.replace("\\n", "\n"); + return Some(ChatCompletionStreamResponse::Content(output)); } } } From 1013dc7005f45b5cf05f1117aa5bebdcabd3c4b2 Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Wed, 15 Oct 2025 14:32:30 +0900 Subject: [PATCH 20/32] v7.0.0 --- Cargo.toml | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 41e22230..1f70db08 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openai-api-rs" -version = "6.0.13" +version = "7.0.0" edition = "2021" authors = ["Dongri Jin "] license = "MIT" diff --git a/README.md b/README.md index 58996582..13fe3f67 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Cargo.toml ```toml [dependencies] -openai-api-rs = "6.0.13" +openai-api-rs = "7.0.0" ``` ## Usage From 6d0bf87c94768a756976e1fcc06b49b30a52387f Mon Sep 17 00:00:00 2001 From: Serban Bajdechi Date: Tue, 28 Oct 2025 13:06:27 +0200 Subject: [PATCH 21/32] Do not serialize any AudioTranscription property if its null. --- src/realtime/types.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/realtime/types.rs b/src/realtime/types.rs index a90ff27f..4cc433da 100644 --- a/src/realtime/types.rs +++ b/src/realtime/types.rs @@ -51,8 +51,11 @@ pub enum AudioFormat { #[derive(Debug, Serialize, Deserialize, Clone)] pub struct AudioTranscription { + #[serde(skip_serializing_if = "Option::is_none")] pub language: Option, + #[serde(skip_serializing_if = "Option::is_none")] pub model: Option, + #[serde(skip_serializing_if = "Option::is_none")] pub prompt: Option, } From 526f833c5d60d2f9172bd923c4c117ce7285f9d9 Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Wed, 29 Oct 2025 15:24:14 +0900 Subject: [PATCH 22/32] v7.0.1 --- Cargo.toml | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1f70db08..f7b84108 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openai-api-rs" -version = "7.0.0" +version = "7.0.1" edition = "2021" authors = ["Dongri Jin "] license = "MIT" diff --git a/README.md b/README.md index 13fe3f67..573e4b8a 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Cargo.toml ```toml [dependencies] -openai-api-rs = "7.0.0" +openai-api-rs = "7.0.1" ``` ## Usage From 8e859cf9ddd0dc1aac57c585c1dc9143e621030c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ken=20=E2=99=BE=EF=B8=8F?= Date: Wed, 29 Oct 2025 21:50:53 -0700 Subject: [PATCH 23/32] make object field in openai-compatible response optional --- src/v1/chat_completion/chat_completion.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/v1/chat_completion/chat_completion.rs b/src/v1/chat_completion/chat_completion.rs index 2c56287d..bd2807a9 100644 --- a/src/v1/chat_completion/chat_completion.rs +++ b/src/v1/chat_completion/chat_completion.rs @@ -101,7 +101,7 @@ impl_builder_methods!( #[derive(Debug, Deserialize, Serialize)] pub struct ChatCompletionResponse { pub id: Option, - pub object: String, + pub object: Option, pub created: i64, pub model: String, pub choices: Vec, From 84d6eea41844fbbba1210fc66878996a1c734d4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ken=20=E2=99=BE=EF=B8=8F?= Date: Wed, 29 Oct 2025 22:04:58 -0700 Subject: [PATCH 24/32] add '-prime' to package name so it can be published to crates.io --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index f7b84108..7733ce01 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "openai-api-rs" +name = "openai-api-rs-prime" version = "7.0.1" edition = "2021" authors = ["Dongri Jin "] From f1ec7609b9cf74d390b4c8e7f00806385dd991c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ken=20=E2=99=BE=EF=B8=8F?= Date: Wed, 29 Oct 2025 22:06:27 -0700 Subject: [PATCH 25/32] update repository url --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 7733ce01..c617438a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" authors = ["Dongri Jin "] license = "MIT" description = "OpenAI API client library for Rust (unofficial)" -repository = "https://github.com/dongri/openai-api-rs" +repository = "https://github.com/MoonKraken/openai-api-rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html From 931f234b4eb9dcc36d91bc88079c787aa17be522 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ken=20=E2=99=BE=EF=B8=8F?= Date: Thu, 30 Oct 2025 11:33:54 -0700 Subject: [PATCH 26/32] revert Cargo.toml changes for PR to upstream repo --- Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c617438a..f7b84108 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,11 +1,11 @@ [package] -name = "openai-api-rs-prime" +name = "openai-api-rs" version = "7.0.1" edition = "2021" authors = ["Dongri Jin "] license = "MIT" description = "OpenAI API client library for Rust (unofficial)" -repository = "https://github.com/MoonKraken/openai-api-rs" +repository = "https://github.com/dongri/openai-api-rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html From ef09eddb6ad90de5f39d5a51c9fd74e76544eb78 Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Fri, 31 Oct 2025 14:49:26 +0900 Subject: [PATCH 27/32] v8.0.0 --- Cargo.toml | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f7b84108..39af5104 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openai-api-rs" -version = "7.0.1" +version = "8.0.0" edition = "2021" authors = ["Dongri Jin "] license = "MIT" diff --git a/README.md b/README.md index 573e4b8a..16c3172f 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Cargo.toml ```toml [dependencies] -openai-api-rs = "7.0.1" +openai-api-rs = "8.0.0" ``` ## Usage From 97700ed18cfb315005219b2f09ec53d36933ebc2 Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Thu, 6 Nov 2025 13:35:40 +0900 Subject: [PATCH 28/32] Add response api --- README.md | 1 + examples/responses.rs | 21 +++ src/v1/api.rs | 67 +++++++++ src/v1/mod.rs | 1 + src/v1/responses.rs | 312 ++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 402 insertions(+) create mode 100644 examples/responses.rs create mode 100644 src/v1/responses.rs diff --git a/README.md b/README.md index 16c3172f..e3af4895 100644 --- a/README.md +++ b/README.md @@ -166,6 +166,7 @@ Check out the [full API documentation](https://platform.openai.com/docs/api-refe - [x] [Assistants](https://platform.openai.com/docs/assistants/overview) - [x] [Batch](https://platform.openai.com/docs/api-reference/batch) - [x] [Realtime](https://platform.openai.com/docs/api-reference/realtime) +- [x] [Responses](https://platform.openai.com/docs/api-reference/responses) ## License diff --git a/examples/responses.rs b/examples/responses.rs new file mode 100644 index 00000000..33734277 --- /dev/null +++ b/examples/responses.rs @@ -0,0 +1,21 @@ +use openai_api_rs::v1::api::OpenAIClient; +use openai_api_rs::v1::common::GPT4_1_MINI; +use openai_api_rs::v1::responses::CreateResponseRequest; +use serde_json::json; +use std::env; + +#[tokio::main] +async fn main() -> Result<(), Box> { + let api_key = env::var("OPENAI_API_KEY").unwrap(); + let mut client = OpenAIClient::builder().with_api_key(api_key).build()?; + + let mut req = CreateResponseRequest::new(); + req.model = Some(GPT4_1_MINI.to_string()); + req.input = Some(json!("Write a haiku about Rust.")); + req.extra.insert("temperature".to_string(), json!(0.7)); + + let resp = client.create_response(req).await?; + println!("response id: {} status: {:?}", resp.id, resp.status); + println!("response output: {:?}", resp.output); + Ok(()) +} diff --git a/src/v1/api.rs b/src/v1/api.rs index 8ff11652..5d8a13d4 100644 --- a/src/v1/api.rs +++ b/src/v1/api.rs @@ -35,6 +35,9 @@ use crate::v1::message::{ }; use crate::v1::model::{ModelResponse, ModelsResponse}; use crate::v1::moderation::{CreateModerationRequest, CreateModerationResponse}; +use crate::v1::responses::{ + CountTokensRequest, CountTokensResponse, CreateResponseRequest, ListResponses, ResponseObject, +}; use crate::v1::run::{ CreateRunRequest, CreateThreadAndRunRequest, ListRun, ListRunStep, ModifyRunRequest, RunObject, RunStepObject, @@ -819,6 +822,70 @@ impl OpenAIClient { self.get(&url).await } + // Responses API + pub async fn create_response( + &mut self, + req: CreateResponseRequest, + ) -> Result { + self.post("responses", &req).await + } + + pub async fn retrieve_response( + &mut self, + response_id: String, + ) -> Result { + self.get(&format!("responses/{response_id}")).await + } + + pub async fn delete_response( + &mut self, + response_id: String, + ) -> Result { + self.delete(&format!("responses/{response_id}")).await + } + + pub async fn cancel_response( + &mut self, + response_id: String, + ) -> Result { + self.post( + &format!("responses/{response_id}/cancel"), + &common::EmptyRequestBody {}, + ) + .await + } + + pub async fn list_response_input_items( + &mut self, + response_id: String, + after: Option, + limit: Option, + order: Option, + ) -> Result { + let mut url = format!("responses/{}/input_items", response_id); + let mut params = vec![]; + if let Some(after) = after { + params.push(format!("after={}", after)); + } + if let Some(limit) = limit { + params.push(format!("limit={}", limit)); + } + if let Some(order) = order { + params.push(format!("order={}", order)); + } + if !params.is_empty() { + url = format!("{}?{}", url, params.join("&")); + } + self.get(&url).await + } + + pub async fn count_response_input_tokens( + &mut self, + req: CountTokensRequest, + ) -> Result { + self.post("responses/input_tokens", &req).await + } + pub async fn list_models(&mut self) -> Result { self.get("models").await } diff --git a/src/v1/mod.rs b/src/v1/mod.rs index d44ed319..0dcbcbb6 100644 --- a/src/v1/mod.rs +++ b/src/v1/mod.rs @@ -13,6 +13,7 @@ pub mod fine_tuning; pub mod image; pub mod model; pub mod moderation; +pub mod responses; // beta pub mod assistant; diff --git a/src/v1/responses.rs b/src/v1/responses.rs new file mode 100644 index 00000000..348b1fea --- /dev/null +++ b/src/v1/responses.rs @@ -0,0 +1,312 @@ +use crate::v1::types::Tools; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use std::collections::BTreeMap; + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct CreateResponseRequest { + // background + #[serde(skip_serializing_if = "Option::is_none")] + pub background: Option, + + // conversation + #[serde(skip_serializing_if = "Option::is_none")] + pub conversation: Option, + + // include + #[serde(skip_serializing_if = "Option::is_none")] + pub include: Option>, + + // input + #[serde(skip_serializing_if = "Option::is_none")] + pub input: Option, + + // instructions + #[serde(skip_serializing_if = "Option::is_none")] + pub instructions: Option, + + // max_output_tokens + #[serde(skip_serializing_if = "Option::is_none")] + pub max_output_tokens: Option, + + // max_tool_calls + #[serde(skip_serializing_if = "Option::is_none")] + pub max_tool_calls: Option, + + // metadata + #[serde(skip_serializing_if = "Option::is_none")] + pub metadata: Option>, + + // model + #[serde(skip_serializing_if = "Option::is_none")] + pub model: Option, + + // parallel_tool_calls + #[serde(skip_serializing_if = "Option::is_none")] + pub parallel_tool_calls: Option, + + // previous_response_id + #[serde(skip_serializing_if = "Option::is_none")] + pub previous_response_id: Option, + + // prompt + #[serde(skip_serializing_if = "Option::is_none")] + pub prompt: Option, + + // prompt_cache_key + #[serde(skip_serializing_if = "Option::is_none")] + pub prompt_cache_key: Option, + + // reasoning + #[serde(skip_serializing_if = "Option::is_none")] + pub reasoning: Option, + + // safety_identifier + #[serde(skip_serializing_if = "Option::is_none")] + pub safety_identifier: Option, + + // service_tier + #[serde(skip_serializing_if = "Option::is_none")] + pub service_tier: Option, + + // store + #[serde(skip_serializing_if = "Option::is_none")] + pub store: Option, + + // stream + #[serde(skip_serializing_if = "Option::is_none")] + pub stream: Option, + + // stream_options + #[serde(skip_serializing_if = "Option::is_none")] + pub stream_options: Option, + + // temperature + #[serde(skip_serializing_if = "Option::is_none")] + pub temperature: Option, + + // text + #[serde(skip_serializing_if = "Option::is_none")] + pub text: Option, + + // tool_choice + #[serde(skip_serializing_if = "Option::is_none")] + pub tool_choice: Option, + + // tools + #[serde(skip_serializing_if = "Option::is_none")] + pub tools: Option>, + + // top_logprobs + #[serde(skip_serializing_if = "Option::is_none")] + pub top_logprobs: Option, + + // top_p + #[serde(skip_serializing_if = "Option::is_none")] + pub top_p: Option, + + // truncation + #[serde(skip_serializing_if = "Option::is_none")] + pub truncation: Option, + + // user (deprecated) + #[serde(skip_serializing_if = "Option::is_none")] + pub user: Option, + + // Future-proof + #[serde(flatten)] + pub extra: BTreeMap, +} + +impl CreateResponseRequest { + pub fn new() -> Self { + Self { + background: None, + conversation: None, + include: None, + input: None, + instructions: None, + max_output_tokens: None, + max_tool_calls: None, + metadata: None, + model: None, + parallel_tool_calls: None, + previous_response_id: None, + prompt: None, + prompt_cache_key: None, + reasoning: None, + safety_identifier: None, + service_tier: None, + store: None, + stream: None, + stream_options: None, + temperature: None, + text: None, + tool_choice: None, + tools: None, + top_logprobs: None, + top_p: None, + truncation: None, + user: None, + extra: BTreeMap::new(), + } + } +} + +impl Default for CreateResponseRequest { + fn default() -> Self { + Self::new() + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ResponseObject { + pub id: String, + pub object: String, + + // Core + #[serde(skip_serializing_if = "Option::is_none")] + pub created_at: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub model: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option, + + // Output + #[serde(skip_serializing_if = "Option::is_none")] + pub output: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub output_text: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub output_audio: Option, + + // Control / reasons + #[serde(skip_serializing_if = "Option::is_none")] + pub stop_reason: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub refusal: Option, + + // Tools + #[serde(skip_serializing_if = "Option::is_none")] + pub tool_calls: Option, + + // Misc + #[serde(skip_serializing_if = "Option::is_none")] + pub metadata: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub usage: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub system_fingerprint: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub service_tier: Option, + + // Errors / details + #[serde(skip_serializing_if = "Option::is_none")] + pub status_details: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub incomplete_details: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, + + // Future-proof + #[serde(flatten)] + pub extra: BTreeMap, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ListResponses { + pub object: String, + pub data: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub first_id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub last_id: Option, + pub has_more: bool, +} + +// Get input token counts (POST /v1/responses/input_tokens) +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct CountTokensRequest { + // conversation + #[serde(skip_serializing_if = "Option::is_none")] + pub conversation: Option, + + // input + #[serde(skip_serializing_if = "Option::is_none")] + pub input: Option, + + // instructions + #[serde(skip_serializing_if = "Option::is_none")] + pub instructions: Option, + + // model + #[serde(skip_serializing_if = "Option::is_none")] + pub model: Option, + + // parallel_tool_calls + #[serde(skip_serializing_if = "Option::is_none")] + pub parallel_tool_calls: Option, + + // previous_response_id + #[serde(skip_serializing_if = "Option::is_none")] + pub previous_response_id: Option, + + // reasoning + #[serde(skip_serializing_if = "Option::is_none")] + pub reasoning: Option, + + // text + #[serde(skip_serializing_if = "Option::is_none")] + pub text: Option, + + // tool_choice + #[serde(skip_serializing_if = "Option::is_none")] + pub tool_choice: Option, + + // tools + #[serde(skip_serializing_if = "Option::is_none")] + pub tools: Option>, + + // truncation + #[serde(skip_serializing_if = "Option::is_none")] + pub truncation: Option, + + // Future-proof + #[serde(flatten)] + pub extra: BTreeMap, +} + +impl CountTokensRequest { + pub fn new() -> Self { + Self { + conversation: None, + input: None, + instructions: None, + model: None, + parallel_tool_calls: None, + previous_response_id: None, + reasoning: None, + text: None, + tool_choice: None, + tools: None, + truncation: None, + extra: BTreeMap::new(), + } + } +} + +impl Default for CountTokensRequest { + fn default() -> Self { + Self::new() + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct CountTokensResponse { + #[serde(skip_serializing_if = "Option::is_none")] + pub object: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub input_tokens: Option, + #[serde(flatten)] + pub extra: BTreeMap, +} From df95319ec49a62958a2b40470943aba854ba7ede Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Thu, 6 Nov 2025 13:37:43 +0900 Subject: [PATCH 29/32] v8.0.1 --- Cargo.toml | 2 +- README.md | 2 +- examples/responses.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 39af5104..f766da4b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openai-api-rs" -version = "8.0.0" +version = "8.0.1" edition = "2021" authors = ["Dongri Jin "] license = "MIT" diff --git a/README.md b/README.md index e3af4895..46671eeb 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Cargo.toml ```toml [dependencies] -openai-api-rs = "8.0.0" +openai-api-rs = "8.0.1" ``` ## Usage diff --git a/examples/responses.rs b/examples/responses.rs index 33734277..eb90d889 100644 --- a/examples/responses.rs +++ b/examples/responses.rs @@ -11,7 +11,7 @@ async fn main() -> Result<(), Box> { let mut req = CreateResponseRequest::new(); req.model = Some(GPT4_1_MINI.to_string()); - req.input = Some(json!("Write a haiku about Rust.")); + req.input = Some(json!("Tell me a three sentence bedtime story about a unicorn.")); req.extra.insert("temperature".to_string(), json!(0.7)); let resp = client.create_response(req).await?; From efa99852e81a89253cbc6d15a3f0f53bd9e748c6 Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Thu, 6 Nov 2025 13:39:53 +0900 Subject: [PATCH 30/32] Fix format --- examples/responses.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/responses.rs b/examples/responses.rs index eb90d889..8bc24fec 100644 --- a/examples/responses.rs +++ b/examples/responses.rs @@ -11,7 +11,9 @@ async fn main() -> Result<(), Box> { let mut req = CreateResponseRequest::new(); req.model = Some(GPT4_1_MINI.to_string()); - req.input = Some(json!("Tell me a three sentence bedtime story about a unicorn.")); + req.input = Some(json!( + "Tell me a three sentence bedtime story about a unicorn." + )); req.extra.insert("temperature".to_string(), json!(0.7)); let resp = client.create_response(req).await?; From 780462abe0af7504bcd4238a27b3f2cd72b8fbff Mon Sep 17 00:00:00 2001 From: Serban Bajdechi Date: Tue, 11 Nov 2025 09:08:53 +0200 Subject: [PATCH 31/32] Add InputImage variant and image_url field to ItemContent struct --- src/realtime/types.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/realtime/types.rs b/src/realtime/types.rs index 4cc433da..076534a0 100644 --- a/src/realtime/types.rs +++ b/src/realtime/types.rs @@ -137,6 +137,7 @@ pub enum ItemRole { pub enum ItemContentType { InputText, InputAudio, + InputImage, Text, Audio, } @@ -150,6 +151,8 @@ pub struct ItemContent { pub audio: Option, #[serde(skip_serializing_if = "Option::is_none")] pub transcript: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub image_url: Option, } #[derive(Debug, Serialize, Deserialize, Clone, Default)] From 887505752ed0942617d82d43bb45ba975ce7db02 Mon Sep 17 00:00:00 2001 From: Dongri Jin Date: Tue, 11 Nov 2025 21:08:54 +0900 Subject: [PATCH 32/32] v8.0.2 --- Cargo.toml | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f766da4b..b8273f92 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openai-api-rs" -version = "8.0.1" +version = "8.0.2" edition = "2021" authors = ["Dongri Jin "] license = "MIT" diff --git a/README.md b/README.md index 46671eeb..9ee5c68c 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Cargo.toml ```toml [dependencies] -openai-api-rs = "8.0.1" +openai-api-rs = "8.0.2" ``` ## Usage