diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 32e0d8892c..2f8909f197 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.107.2" + ".": "2.6.1" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index e389718967..b4309cd4c3 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 118 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-94b1e3cb0bdc616ff0c2f267c33dadd95f133b1f64e647aab6c64afb292b2793.yml -openapi_spec_hash: 2395319ac9befd59b6536ae7f9564a05 -config_hash: 930dac3aa861344867e4ac84f037b5df +configured_endpoints: 136 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-a3c45d9bd3bb25bf4eaa49b7fb473a00038293dec659ffaa44f624ded884abf4.yml +openapi_spec_hash: 9c20aaf786a0700dabd13d9865481c9e +config_hash: 50ee3382a63c021a9f821a935950e926 diff --git a/CHANGELOG.md b/CHANGELOG.md index 31ccac5195..0ce541566d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,178 @@ # Changelog +## 2.6.1 (2025-10-24) + +Full Changelog: [v2.6.0...v2.6.1](https://github.com/openai/openai-python/compare/v2.6.0...v2.6.1) + +### Bug Fixes + +* **api:** docs updates ([d01a0c9](https://github.com/openai/openai-python/commit/d01a0c96ecb94c78b7e16546790c573704b7515b)) + + +### Chores + +* **client:** clean up custom translations code ([cfb9e25](https://github.com/openai/openai-python/commit/cfb9e25855b8eb020abe02cdd99566adf474e821)) + +## 2.6.0 (2025-10-20) + +Full Changelog: [v2.5.0...v2.6.0](https://github.com/openai/openai-python/compare/v2.5.0...v2.6.0) + +### Features + +* **api:** Add responses.input_tokens.count ([6dd09e2](https://github.com/openai/openai-python/commit/6dd09e2829f385f72b28620888d91a4493c96772)) + + +### Bug Fixes + +* **api:** internal openapi updates ([caabd7c](https://github.com/openai/openai-python/commit/caabd7c81f0f557f66dc0089af460185a5816c11)) + +## 2.5.0 (2025-10-17) + +Full Changelog: [v2.4.0...v2.5.0](https://github.com/openai/openai-python/compare/v2.4.0...v2.5.0) + +### Features + +* **api:** api update ([8b280d5](https://github.com/openai/openai-python/commit/8b280d57d6d361bc3a032e030158f6859c445291)) + + +### Chores + +* bump `httpx-aiohttp` version to 0.1.9 ([67f2f0a](https://github.com/openai/openai-python/commit/67f2f0afe51dab9d5899fe18b1a4e86b2c774d10)) + +## 2.4.0 (2025-10-16) + +Full Changelog: [v2.3.0...v2.4.0](https://github.com/openai/openai-python/compare/v2.3.0...v2.4.0) + +### Features + +* **api:** Add support for gpt-4o-transcribe-diarize on audio/transcriptions endpoint ([bdbe9b8](https://github.com/openai/openai-python/commit/bdbe9b8f440209afa2979db4a9eda9579b3d2550)) + + +### Chores + +* fix dangling comment ([da14e99](https://github.com/openai/openai-python/commit/da14e9960608f7ade6f5cdf91967830c8a6c1657)) +* **internal:** detect missing future annotations with ruff ([2672b8f](https://github.com/openai/openai-python/commit/2672b8f0726300f7c62c356f25545ef0b3c0bb2e)) + +## 2.3.0 (2025-10-10) + +Full Changelog: [v2.2.0...v2.3.0](https://github.com/openai/openai-python/compare/v2.2.0...v2.3.0) + +### Features + +* **api:** comparison filter in/not in ([aa49f62](https://github.com/openai/openai-python/commit/aa49f626a6ea9d77ad008badfb3741e16232d62f)) + + +### Chores + +* **package:** bump jiter to >=0.10.0 to support Python 3.14 ([#2618](https://github.com/openai/openai-python/issues/2618)) ([aa445ca](https://github.com/openai/openai-python/commit/aa445cab5c93c6908697fe98e73e16963330b141)) + +## 2.2.0 (2025-10-06) + +Full Changelog: [v2.1.0...v2.2.0](https://github.com/openai/openai-python/compare/v2.1.0...v2.2.0) + +### Features + +* **api:** dev day 2025 launches ([38ac009](https://github.com/openai/openai-python/commit/38ac0093ebb3419b1e2280d0dc2d26c74a2bbbec)) + + +### Bug Fixes + +* **client:** add chatkit to beta resource ([de3e561](https://github.com/openai/openai-python/commit/de3e5619d0a85b17906a9416039ef309e820dc0f)) + +## 2.1.0 (2025-10-02) + +Full Changelog: [v2.0.1...v2.1.0](https://github.com/openai/openai-python/compare/v2.0.1...v2.1.0) + +### Features + +* **api:** add support for realtime calls ([7f7925b](https://github.com/openai/openai-python/commit/7f7925b4074ecbf879714698000e10fa0519d51a)) + +## 2.0.1 (2025-10-01) + +Full Changelog: [v2.0.0...v2.0.1](https://github.com/openai/openai-python/compare/v2.0.0...v2.0.1) + +### Bug Fixes + +* **api:** add status, approval_request_id to MCP tool call ([2a02255](https://github.com/openai/openai-python/commit/2a022553f83b636defcfda3b1c6f4b12d901357b)) + +## 2.0.0 (2025-09-30) + +Full Changelog: [v1.109.1...v2.0.0](https://github.com/openai/openai-python/compare/v1.109.1...v2.0.0) + +### ⚠ BREAKING CHANGES + +* **api:** `ResponseFunctionToolCallOutputItem.output` and `ResponseCustomToolCallOutput.output` now return `string | Array` instead of `string` only. This may break existing callsites that assume `output` is always a string. + +### Features + +* **api:** Support images and files for function call outputs in responses, BatchUsage ([4105376](https://github.com/openai/openai-python/commit/4105376a60293581371fd5635b805b717d24aa19)) + +## 1.109.1 (2025-09-24) + +Full Changelog: [v1.109.0...v1.109.1](https://github.com/openai/openai-python/compare/v1.109.0...v1.109.1) + +### Bug Fixes + +* **compat:** compat with `pydantic<2.8.0` when using additional fields ([5d95ecf](https://github.com/openai/openai-python/commit/5d95ecf7abd65f3e4e273be14c80f9b4cd91ffe8)) + +## 1.109.0 (2025-09-23) + +Full Changelog: [v1.108.2...v1.109.0](https://github.com/openai/openai-python/compare/v1.108.2...v1.109.0) + +### Features + +* **api:** gpt-5-codex ([34502b5](https://github.com/openai/openai-python/commit/34502b5a175f8a10ea8694fcea38fe7308de89ef)) + +## 1.108.2 (2025-09-22) + +Full Changelog: [v1.108.1...v1.108.2](https://github.com/openai/openai-python/compare/v1.108.1...v1.108.2) + +### Bug Fixes + +* **api:** fix mcp tool name ([fd1c673](https://github.com/openai/openai-python/commit/fd1c673fa8d5581b38c69c37aa4fd1fd251259a2)) + + +### Chores + +* **api:** openapi updates for conversations ([3224f6f](https://github.com/openai/openai-python/commit/3224f6f9b4221b954a8f63de66bcaab389164ee5)) +* do not install brew dependencies in ./scripts/bootstrap by default ([6764b00](https://github.com/openai/openai-python/commit/6764b00bcb8aeab41e73d2fcaf6c7a18ea9f7909)) +* improve example values ([20b58e1](https://github.com/openai/openai-python/commit/20b58e164f9f28b9fc562968263fa3eacc6f5c7c)) + +## 1.108.1 (2025-09-19) + +Full Changelog: [v1.108.0...v1.108.1](https://github.com/openai/openai-python/compare/v1.108.0...v1.108.1) + +### Features + +* **api:** add reasoning_text ([18d8e12](https://github.com/openai/openai-python/commit/18d8e12061d1fd4e09d24986ff6e38c5063013e9)) + + +### Chores + +* **types:** change optional parameter type from NotGiven to Omit ([acc190a](https://github.com/openai/openai-python/commit/acc190a29526e64db6074e7f21aca800423c128c)) + +## 1.108.0 (2025-09-17) + +Full Changelog: [v1.107.3...v1.108.0](https://github.com/openai/openai-python/compare/v1.107.3...v1.108.0) + +### Features + +* **api:** type updates for conversations, reasoning_effort and results for evals ([c2ee28c](https://github.com/openai/openai-python/commit/c2ee28c1b77eed98766fbb01cf1ad2ee240f412e)) + + +### Chores + +* **internal:** update pydantic dependency ([369d10a](https://github.com/openai/openai-python/commit/369d10a40dfe744f6bfc10c99eb1f58176500120)) + +## 1.107.3 (2025-09-15) + +Full Changelog: [v1.107.2...v1.107.3](https://github.com/openai/openai-python/compare/v1.107.2...v1.107.3) + +### Chores + +* **api:** docs and spec refactoring ([9bab5da](https://github.com/openai/openai-python/commit/9bab5da1802c3575c58e73ed1470dd5fa61fd1d2)) +* **tests:** simplify `get_platform` test ([0b1f6a2](https://github.com/openai/openai-python/commit/0b1f6a28d5a59e10873264e976d2e332903eef29)) + ## 1.107.2 (2025-09-12) Full Changelog: [v1.107.1...v1.107.2](https://github.com/openai/openai-python/compare/v1.107.1...v1.107.2) diff --git a/api.md b/api.md index 73b8427387..96642c01ad 100644 --- a/api.md +++ b/api.md @@ -171,11 +171,14 @@ Types: ```python from openai.types.audio import ( Transcription, + TranscriptionDiarized, + TranscriptionDiarizedSegment, TranscriptionInclude, TranscriptionSegment, TranscriptionStreamEvent, TranscriptionTextDeltaEvent, TranscriptionTextDoneEvent, + TranscriptionTextSegmentEvent, TranscriptionVerbose, TranscriptionWord, TranscriptionCreateResponse, @@ -687,7 +690,7 @@ Methods: Types: ```python -from openai.types import Batch, BatchError, BatchRequestCounts +from openai.types import Batch, BatchError, BatchRequestCounts, BatchUsage ``` Methods: @@ -769,6 +772,8 @@ from openai.types.responses import ( ResponseFormatTextJSONSchemaConfig, ResponseFunctionCallArgumentsDeltaEvent, ResponseFunctionCallArgumentsDoneEvent, + ResponseFunctionCallOutputItem, + ResponseFunctionCallOutputItemList, ResponseFunctionToolCall, ResponseFunctionToolCallItem, ResponseFunctionToolCallOutputItem, @@ -784,11 +789,14 @@ from openai.types.responses import ( ResponseInputAudio, ResponseInputContent, ResponseInputFile, + ResponseInputFileContent, ResponseInputImage, + ResponseInputImageContent, ResponseInputItem, ResponseInputMessageContentList, ResponseInputMessageItem, ResponseInputText, + ResponseInputTextContent, ResponseItem, ResponseMcpCallArgumentsDeltaEvent, ResponseMcpCallArgumentsDoneEvent, @@ -857,6 +865,18 @@ Methods: - client.responses.input_items.list(response_id, \*\*params) -> SyncCursorPage[ResponseItem] +## InputTokens + +Types: + +```python +from openai.types.responses import InputTokenCountResponse +``` + +Methods: + +- client.responses.input_tokens.count(\*\*params) -> InputTokenCountResponse + # Realtime Types: @@ -984,6 +1004,16 @@ Methods: - client.realtime.client_secrets.create(\*\*params) -> ClientSecretCreateResponse +## Calls + +Methods: + +- client.realtime.calls.create(\*\*params) -> HttpxBinaryResponseContent +- client.realtime.calls.accept(call_id, \*\*params) -> None +- client.realtime.calls.hangup(call_id) -> None +- client.realtime.calls.refer(call_id, \*\*params) -> None +- client.realtime.calls.reject(call_id, \*\*params) -> None + # Conversations Types: @@ -991,22 +1021,17 @@ Types: ```python from openai.types.conversations import ( ComputerScreenshotContent, - ContainerFileCitationBody, Conversation, ConversationDeleted, ConversationDeletedResource, - FileCitationBody, - InputFileContent, - InputImageContent, - InputTextContent, - LobProb, Message, - OutputTextContent, - RefusalContent, SummaryTextContent, TextContent, - TopLogProb, - URLCitationBody, + InputTextContent, + OutputTextContent, + RefusalContent, + InputImageContent, + InputFileContent, ) ``` @@ -1129,3 +1154,29 @@ Methods: Methods: - client.containers.files.content.retrieve(file_id, \*, container_id) -> HttpxBinaryResponseContent + +# Videos + +Types: + +```python +from openai.types import ( + Video, + VideoCreateError, + VideoModel, + VideoSeconds, + VideoSize, + VideoDeleteResponse, +) +``` + +Methods: + +- client.videos.create(\*\*params) -> Video +- client.videos.retrieve(video_id) -> Video +- client.videos.list(\*\*params) -> SyncConversationCursorPage[Video] +- client.videos.delete(video_id) -> VideoDeleteResponse +- client.videos.download_content(video_id, \*\*params) -> HttpxBinaryResponseContent +- client.videos.remix(video_id, \*\*params) -> Video +- client.videos.create_and_poll(\*args) -> Video + diff --git a/examples/responses_input_tokens.py b/examples/responses_input_tokens.py new file mode 100644 index 0000000000..39809b928f --- /dev/null +++ b/examples/responses_input_tokens.py @@ -0,0 +1,54 @@ +from typing import List + +from openai import OpenAI +from openai.types.responses.tool_param import ToolParam +from openai.types.responses.response_input_item_param import ResponseInputItemParam + + +def main() -> None: + client = OpenAI() + tools: List[ToolParam] = [ + { + "type": "function", + "name": "get_current_weather", + "description": "Get current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "City and state, e.g. San Francisco, CA", + }, + "unit": { + "type": "string", + "enum": ["c", "f"], + "description": "Temperature unit to use", + }, + }, + "required": ["location", "unit"], + "additionalProperties": False, + }, + "strict": True, + } + ] + + input_items: List[ResponseInputItemParam] = [ + { + "type": "message", + "role": "user", + "content": [{"type": "input_text", "text": "What's the weather in San Francisco today?"}], + } + ] + + response = client.responses.input_tokens.count( + model="gpt-5", + instructions="You are a concise assistant.", + input=input_items, + tools=tools, + tool_choice={"type": "function", "name": "get_current_weather"}, + ) + print(f"input tokens: {response.input_tokens}") + + +if __name__ == "__main__": + main() diff --git a/examples/video.py b/examples/video.py new file mode 100644 index 0000000000..ee89e64697 --- /dev/null +++ b/examples/video.py @@ -0,0 +1,22 @@ +#!/usr/bin/env -S poetry run python + +import asyncio + +from openai import AsyncOpenAI + +client = AsyncOpenAI() + + +async def main() -> None: + video = await client.videos.create_and_poll( + model="sora-2", + prompt="A video of the words 'Thank you' in sparkling letters", + ) + + if video.status == "completed": + print("Video successfully completed: ", video) + else: + print("Video creation failed. Status: ", video.status) + + +asyncio.run(main()) diff --git a/helpers.md b/helpers.md index 21ad8ac2fb..89ff4498cf 100644 --- a/helpers.md +++ b/helpers.md @@ -514,4 +514,5 @@ client.beta.vector_stores.files.upload_and_poll(...) client.beta.vector_stores.files.create_and_poll(...) client.beta.vector_stores.file_batches.create_and_poll(...) client.beta.vector_stores.file_batches.upload_and_poll(...) +client.videos.create_and_poll(...) ``` diff --git a/pyproject.toml b/pyproject.toml index 7cb1ef4f76..e96101b51c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.107.2" +version = "2.6.1" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" @@ -15,7 +15,7 @@ dependencies = [ "distro>=1.7.0, <2", "sniffio", "tqdm > 4", - "jiter>=0.4.0, <1", + "jiter>=0.10.0, <1", ] requires-python = ">= 3.8" classifiers = [ @@ -44,7 +44,7 @@ Repository = "https://github.com/openai/openai-python" openai = "openai.cli:main" [project.optional-dependencies] -aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.8"] +aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.9"] realtime = ["websockets >= 13, < 16"] datalib = ["numpy >= 1", "pandas >= 1.2.3", "pandas-stubs >= 1.1.0.11"] voice_helpers = ["sounddevice>=0.5.1", "numpy>=2.0.2"] @@ -256,6 +256,8 @@ select = [ "B", # remove unused imports "F401", + # check for missing future annotations + "FA102", # bare except statements "E722", # unused arguments @@ -278,6 +280,8 @@ unfixable = [ "T203", ] +extend-safe-fixes = ["FA102"] + [tool.ruff.lint.flake8-tidy-imports.banned-api] "functools.lru_cache".msg = "This function does not retain type information for the wrapped function's arguments; The `lru_cache` function from `_utils` should be used instead" diff --git a/requirements-dev.lock b/requirements-dev.lock index eaf136f7e6..b454537b96 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -79,7 +79,7 @@ httpx==0.28.1 # via httpx-aiohttp # via openai # via respx -httpx-aiohttp==0.1.8 +httpx-aiohttp==0.1.9 # via openai idna==3.4 # via anyio @@ -91,7 +91,7 @@ importlib-metadata==7.0.0 iniconfig==2.0.0 # via pytest inline-snapshot==0.28.0 -jiter==0.5.0 +jiter==0.11.0 # via openai markdown-it-py==3.0.0 # via rich @@ -108,6 +108,7 @@ multidict==6.5.0 mypy==1.14.1 mypy-extensions==1.0.0 # via mypy +nest-asyncio==1.6.0 nodeenv==1.8.0 # via pyright nox==2023.4.22 @@ -133,11 +134,11 @@ portalocker==2.10.1 propcache==0.3.2 # via aiohttp # via yarl -pycparser==2.22 +pycparser==2.23 # via cffi -pydantic==2.10.3 +pydantic==2.11.9 # via openai -pydantic-core==2.27.1 +pydantic-core==2.33.2 # via pydantic pygments==2.18.0 # via pytest @@ -199,6 +200,9 @@ typing-extensions==4.12.2 # via pydantic # via pydantic-core # via pyright + # via typing-inspection +typing-inspection==0.4.1 + # via pydantic tzdata==2024.1 # via pandas urllib3==2.2.1 diff --git a/requirements.lock b/requirements.lock index 3b6ece87e2..b047cb3f88 100644 --- a/requirements.lock +++ b/requirements.lock @@ -45,13 +45,13 @@ httpcore==1.0.9 httpx==0.28.1 # via httpx-aiohttp # via openai -httpx-aiohttp==0.1.8 +httpx-aiohttp==0.1.9 # via openai idna==3.4 # via anyio # via httpx # via yarl -jiter==0.6.1 +jiter==0.11.0 # via openai multidict==6.5.0 # via aiohttp @@ -67,11 +67,11 @@ pandas-stubs==2.2.2.240807 propcache==0.3.2 # via aiohttp # via yarl -pycparser==2.22 +pycparser==2.23 # via cffi -pydantic==2.10.3 +pydantic==2.11.9 # via openai -pydantic-core==2.27.1 +pydantic-core==2.33.2 # via pydantic python-dateutil==2.9.0.post0 # via pandas @@ -93,7 +93,10 @@ typing-extensions==4.12.2 # via openai # via pydantic # via pydantic-core -tzdata==2024.1 + # via typing-inspection +typing-inspection==0.4.1 + # via pydantic +tzdata==2025.2 # via pandas websockets==15.0.1 # via openai diff --git a/scripts/bootstrap b/scripts/bootstrap index 9910ec05fc..953993addb 100755 --- a/scripts/bootstrap +++ b/scripts/bootstrap @@ -4,10 +4,18 @@ set -e cd "$(dirname "$0")/.." -if ! command -v rye >/dev/null 2>&1 && [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then +if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "$SKIP_BREW" != "1" ] && [ -t 0 ]; then brew bundle check >/dev/null 2>&1 || { - echo "==> Installing Homebrew dependencies…" - brew bundle + echo -n "==> Install Homebrew dependencies? (y/N): " + read -r response + case "$response" in + [yY][eE][sS]|[yY]) + brew bundle + ;; + *) + ;; + esac + echo } fi diff --git a/src/openai/__init__.py b/src/openai/__init__.py index a03b49e0c4..e7411b3886 100644 --- a/src/openai/__init__.py +++ b/src/openai/__init__.py @@ -7,7 +7,7 @@ from typing_extensions import override from . import types -from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes +from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes, omit, not_given from ._utils import file_from_path from ._client import Client, OpenAI, Stream, Timeout, Transport, AsyncClient, AsyncOpenAI, AsyncStream, RequestOptions from ._models import BaseModel @@ -46,7 +46,9 @@ "ProxiesTypes", "NotGiven", "NOT_GIVEN", + "not_given", "Omit", + "omit", "OpenAIError", "APIError", "APIStatusError", @@ -377,6 +379,7 @@ def _reset_client() -> None: # type: ignore[reportUnusedFunction] files as files, images as images, models as models, + videos as videos, batches as batches, uploads as uploads, realtime as realtime, diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index d5f1ab0903..58490e4430 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -42,7 +42,6 @@ from ._qs import Querystring from ._files import to_httpx_files, async_to_httpx_files from ._types import ( - NOT_GIVEN, Body, Omit, Query, @@ -57,6 +56,7 @@ RequestOptions, HttpxRequestFiles, ModelBuilderProtocol, + not_given, ) from ._utils import SensitiveHeadersFilter, is_dict, is_list, asyncify, is_given, lru_cache, is_mapping from ._compat import PYDANTIC_V1, model_copy, model_dump @@ -147,9 +147,9 @@ def __init__( def __init__( self, *, - url: URL | NotGiven = NOT_GIVEN, - json: Body | NotGiven = NOT_GIVEN, - params: Query | NotGiven = NOT_GIVEN, + url: URL | NotGiven = not_given, + json: Body | NotGiven = not_given, + params: Query | NotGiven = not_given, ) -> None: self.url = url self.json = json @@ -597,7 +597,7 @@ def _maybe_override_cast_to(self, cast_to: type[ResponseT], options: FinalReques # we internally support defining a temporary header to override the # default `cast_to` type for use with `.with_raw_response` and `.with_streaming_response` # see _response.py for implementation details - override_cast_to = headers.pop(OVERRIDE_CAST_TO_HEADER, NOT_GIVEN) + override_cast_to = headers.pop(OVERRIDE_CAST_TO_HEADER, not_given) if is_given(override_cast_to): options.headers = headers return cast(Type[ResponseT], override_cast_to) @@ -827,7 +827,7 @@ def __init__( version: str, base_url: str | URL, max_retries: int = DEFAULT_MAX_RETRIES, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | Timeout | None | NotGiven = not_given, http_client: httpx.Client | None = None, custom_headers: Mapping[str, str] | None = None, custom_query: Mapping[str, object] | None = None, @@ -1373,7 +1373,7 @@ def __init__( base_url: str | URL, _strict_response_validation: bool, max_retries: int = DEFAULT_MAX_RETRIES, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | Timeout | None | NotGiven = not_given, http_client: httpx.AsyncClient | None = None, custom_headers: Mapping[str, str] | None = None, custom_query: Mapping[str, object] | None = None, @@ -1850,8 +1850,8 @@ def make_request_options( extra_query: Query | None = None, extra_body: Body | None = None, idempotency_key: str | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - post_parser: PostParser | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + post_parser: PostParser | NotGiven = not_given, ) -> RequestOptions: """Create a dict of type RequestOptions without keys of NotGiven values.""" options: RequestOptions = {} diff --git a/src/openai/_client.py b/src/openai/_client.py index 2be32fe13f..a3b01b2ce6 100644 --- a/src/openai/_client.py +++ b/src/openai/_client.py @@ -3,7 +3,7 @@ from __future__ import annotations import os -from typing import TYPE_CHECKING, Any, Union, Mapping, Callable, Awaitable +from typing import TYPE_CHECKING, Any, Mapping, Callable, Awaitable from typing_extensions import Self, override import httpx @@ -11,13 +11,13 @@ from . import _exceptions from ._qs import Querystring from ._types import ( - NOT_GIVEN, Omit, Timeout, NotGiven, Transport, ProxiesTypes, RequestOptions, + not_given, ) from ._utils import ( is_given, @@ -44,6 +44,7 @@ files, images, models, + videos, batches, uploads, realtime, @@ -59,6 +60,7 @@ from .resources.files import Files, AsyncFiles from .resources.images import Images, AsyncImages from .resources.models import Models, AsyncModels + from .resources.videos import Videos, AsyncVideos from .resources.batches import Batches, AsyncBatches from .resources.webhooks import Webhooks, AsyncWebhooks from .resources.beta.beta import Beta, AsyncBeta @@ -103,7 +105,7 @@ def __init__( webhook_secret: str | None = None, base_url: str | httpx.URL | None = None, websocket_base_url: str | httpx.URL | None = None, - timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, + timeout: float | Timeout | None | NotGiven = not_given, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, @@ -288,6 +290,12 @@ def containers(self) -> Containers: return Containers(self) + @cached_property + def videos(self) -> Videos: + from .resources.videos import Videos + + return Videos(self) + @cached_property def with_raw_response(self) -> OpenAIWithRawResponse: return OpenAIWithRawResponse(self) @@ -339,9 +347,9 @@ def copy( webhook_secret: str | None = None, websocket_base_url: str | httpx.URL | None = None, base_url: str | httpx.URL | None = None, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | Timeout | None | NotGiven = not_given, http_client: httpx.Client | None = None, - max_retries: int | NotGiven = NOT_GIVEN, + max_retries: int | NotGiven = not_given, default_headers: Mapping[str, str] | None = None, set_default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, @@ -448,7 +456,7 @@ def __init__( webhook_secret: str | None = None, base_url: str | httpx.URL | None = None, websocket_base_url: str | httpx.URL | None = None, - timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, + timeout: float | Timeout | None | NotGiven = not_given, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, @@ -633,6 +641,12 @@ def containers(self) -> AsyncContainers: return AsyncContainers(self) + @cached_property + def videos(self) -> AsyncVideos: + from .resources.videos import AsyncVideos + + return AsyncVideos(self) + @cached_property def with_raw_response(self) -> AsyncOpenAIWithRawResponse: return AsyncOpenAIWithRawResponse(self) @@ -684,9 +698,9 @@ def copy( webhook_secret: str | None = None, websocket_base_url: str | httpx.URL | None = None, base_url: str | httpx.URL | None = None, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | Timeout | None | NotGiven = not_given, http_client: httpx.AsyncClient | None = None, - max_retries: int | NotGiven = NOT_GIVEN, + max_retries: int | NotGiven = not_given, default_headers: Mapping[str, str] | None = None, set_default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, @@ -883,6 +897,12 @@ def containers(self) -> containers.ContainersWithRawResponse: return ContainersWithRawResponse(self._client.containers) + @cached_property + def videos(self) -> videos.VideosWithRawResponse: + from .resources.videos import VideosWithRawResponse + + return VideosWithRawResponse(self._client.videos) + class AsyncOpenAIWithRawResponse: _client: AsyncOpenAI @@ -998,6 +1018,12 @@ def containers(self) -> containers.AsyncContainersWithRawResponse: return AsyncContainersWithRawResponse(self._client.containers) + @cached_property + def videos(self) -> videos.AsyncVideosWithRawResponse: + from .resources.videos import AsyncVideosWithRawResponse + + return AsyncVideosWithRawResponse(self._client.videos) + class OpenAIWithStreamedResponse: _client: OpenAI @@ -1113,6 +1139,12 @@ def containers(self) -> containers.ContainersWithStreamingResponse: return ContainersWithStreamingResponse(self._client.containers) + @cached_property + def videos(self) -> videos.VideosWithStreamingResponse: + from .resources.videos import VideosWithStreamingResponse + + return VideosWithStreamingResponse(self._client.videos) + class AsyncOpenAIWithStreamedResponse: _client: AsyncOpenAI @@ -1228,6 +1260,12 @@ def containers(self) -> containers.AsyncContainersWithStreamingResponse: return AsyncContainersWithStreamingResponse(self._client.containers) + @cached_property + def videos(self) -> videos.AsyncVideosWithStreamingResponse: + from .resources.videos import AsyncVideosWithStreamingResponse + + return AsyncVideosWithStreamingResponse(self._client.videos) + Client = OpenAI diff --git a/src/openai/_models.py b/src/openai/_models.py index 8ee8612d1e..af71a91850 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -281,7 +281,7 @@ def model_dump( mode: Literal["json", "python"] | str = "python", include: IncEx | None = None, exclude: IncEx | None = None, - by_alias: bool = False, + by_alias: bool | None = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, @@ -289,6 +289,7 @@ def model_dump( warnings: bool | Literal["none", "warn", "error"] = True, context: dict[str, Any] | None = None, serialize_as_any: bool = False, + fallback: Callable[[Any], Any] | None = None, ) -> dict[str, Any]: """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump @@ -320,10 +321,12 @@ def model_dump( raise ValueError("context is only supported in Pydantic v2") if serialize_as_any != False: raise ValueError("serialize_as_any is only supported in Pydantic v2") + if fallback is not None: + raise ValueError("fallback is only supported in Pydantic v2") dumped = super().dict( # pyright: ignore[reportDeprecated] include=include, exclude=exclude, - by_alias=by_alias, + by_alias=by_alias if by_alias is not None else False, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, @@ -338,13 +341,14 @@ def model_dump_json( indent: int | None = None, include: IncEx | None = None, exclude: IncEx | None = None, - by_alias: bool = False, + by_alias: bool | None = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, round_trip: bool = False, warnings: bool | Literal["none", "warn", "error"] = True, context: dict[str, Any] | None = None, + fallback: Callable[[Any], Any] | None = None, serialize_as_any: bool = False, ) -> str: """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump_json @@ -373,11 +377,13 @@ def model_dump_json( raise ValueError("context is only supported in Pydantic v2") if serialize_as_any != False: raise ValueError("serialize_as_any is only supported in Pydantic v2") + if fallback is not None: + raise ValueError("fallback is only supported in Pydantic v2") return super().json( # type: ignore[reportDeprecated] indent=indent, include=include, exclude=exclude, - by_alias=by_alias, + by_alias=by_alias if by_alias is not None else False, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, diff --git a/src/openai/_module_client.py b/src/openai/_module_client.py index 4ecc28420a..d0d721887b 100644 --- a/src/openai/_module_client.py +++ b/src/openai/_module_client.py @@ -9,6 +9,7 @@ from .resources.files import Files from .resources.images import Images from .resources.models import Models + from .resources.videos import Videos from .resources.batches import Batches from .resources.webhooks import Webhooks from .resources.beta.beta import Beta @@ -72,6 +73,12 @@ def __load__(self) -> Models: return _load_client().models +class VideosProxy(LazyProxy["Videos"]): + @override + def __load__(self) -> Videos: + return _load_client().videos + + class BatchesProxy(LazyProxy["Batches"]): @override def __load__(self) -> Batches: @@ -151,6 +158,7 @@ def __load__(self) -> Conversations: evals: Evals = EvalsProxy().__as_proxied__() images: Images = ImagesProxy().__as_proxied__() models: Models = ModelsProxy().__as_proxied__() +videos: Videos = VideosProxy().__as_proxied__() batches: Batches = BatchesProxy().__as_proxied__() uploads: Uploads = UploadsProxy().__as_proxied__() webhooks: Webhooks = WebhooksProxy().__as_proxied__() diff --git a/src/openai/_qs.py b/src/openai/_qs.py index 274320ca5e..ada6fd3f72 100644 --- a/src/openai/_qs.py +++ b/src/openai/_qs.py @@ -4,7 +4,7 @@ from urllib.parse import parse_qs, urlencode from typing_extensions import Literal, get_args -from ._types import NOT_GIVEN, NotGiven, NotGivenOr +from ._types import NotGiven, not_given from ._utils import flatten _T = TypeVar("_T") @@ -41,8 +41,8 @@ def stringify( self, params: Params, *, - array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN, - nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN, + array_format: ArrayFormat | NotGiven = not_given, + nested_format: NestedFormat | NotGiven = not_given, ) -> str: return urlencode( self.stringify_items( @@ -56,8 +56,8 @@ def stringify_items( self, params: Params, *, - array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN, - nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN, + array_format: ArrayFormat | NotGiven = not_given, + nested_format: NestedFormat | NotGiven = not_given, ) -> list[tuple[str, str]]: opts = Options( qs=self, @@ -143,8 +143,8 @@ def __init__( self, qs: Querystring = _qs, *, - array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN, - nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN, + array_format: ArrayFormat | NotGiven = not_given, + nested_format: NestedFormat | NotGiven = not_given, ) -> None: self.array_format = qs.array_format if isinstance(array_format, NotGiven) else array_format self.nested_format = qs.nested_format if isinstance(nested_format, NotGiven) else nested_format diff --git a/src/openai/_types.py b/src/openai/_types.py index 0e8ffa12aa..2387d7e01c 100644 --- a/src/openai/_types.py +++ b/src/openai/_types.py @@ -118,18 +118,21 @@ class RequestOptions(TypedDict, total=False): # Sentinel class used until PEP 0661 is accepted class NotGiven: """ - A sentinel singleton class used to distinguish omitted keyword arguments - from those passed in with the value None (which may have different behavior). + For parameters with a meaningful None value, we need to distinguish between + the user explicitly passing None, and the user not passing the parameter at + all. + + User code shouldn't need to use not_given directly. For example: ```py - def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ... + def create(timeout: Timeout | None | NotGiven = not_given): ... - get(timeout=1) # 1s timeout - get(timeout=None) # No timeout - get() # Default timeout behavior, which may not be statically known at the method definition. + create(timeout=1) # 1s timeout + create(timeout=None) # No timeout + create() # Default timeout behavior ``` """ @@ -141,13 +144,14 @@ def __repr__(self) -> str: return "NOT_GIVEN" -NotGivenOr = Union[_T, NotGiven] +not_given = NotGiven() +# for backwards compatibility: NOT_GIVEN = NotGiven() class Omit: - """In certain situations you need to be able to represent a case where a default value has - to be explicitly removed and `None` is not an appropriate substitute, for example: + """ + To explicitly omit something from being sent in a request, use `omit`. ```py # as the default `Content-Type` header is `application/json` that will be sent @@ -157,8 +161,8 @@ class Omit: # to look something like: 'multipart/form-data; boundary=0d8382fcf5f8c3be01ca2e11002d2983' client.post(..., headers={"Content-Type": "multipart/form-data"}) - # instead you can remove the default `application/json` header by passing Omit - client.post(..., headers={"Content-Type": Omit()}) + # instead you can remove the default `application/json` header by passing omit + client.post(..., headers={"Content-Type": omit}) ``` """ @@ -166,6 +170,11 @@ def __bool__(self) -> Literal[False]: return False +omit = Omit() + +Omittable = Union[_T, Omit] + + @runtime_checkable class ModelBuilderProtocol(Protocol): @classmethod diff --git a/src/openai/_utils/_transform.py b/src/openai/_utils/_transform.py index bc262ea339..414f38c340 100644 --- a/src/openai/_utils/_transform.py +++ b/src/openai/_utils/_transform.py @@ -268,7 +268,7 @@ def _transform_typeddict( annotations = get_type_hints(expected_type, include_extras=True) for key, value in data.items(): if not is_given(value): - # we don't need to include `NotGiven` values here as they'll + # we don't need to include omitted values here as they'll # be stripped out before the request is sent anyway continue @@ -434,7 +434,7 @@ async def _async_transform_typeddict( annotations = get_type_hints(expected_type, include_extras=True) for key, value in data.items(): if not is_given(value): - # we don't need to include `NotGiven` values here as they'll + # we don't need to include omitted values here as they'll # be stripped out before the request is sent anyway continue diff --git a/src/openai/_utils/_utils.py b/src/openai/_utils/_utils.py index 4a23c96c0a..cddf2c8da4 100644 --- a/src/openai/_utils/_utils.py +++ b/src/openai/_utils/_utils.py @@ -22,7 +22,7 @@ import sniffio -from .._types import NotGiven, FileTypes, NotGivenOr, HeadersLike +from .._types import Omit, NotGiven, FileTypes, HeadersLike _T = TypeVar("_T") _TupleT = TypeVar("_TupleT", bound=Tuple[object, ...]) @@ -67,7 +67,7 @@ def _extract_items( try: key = path[index] except IndexError: - if isinstance(obj, NotGiven): + if not is_given(obj): # no value was provided - we can safely ignore return [] @@ -130,8 +130,8 @@ def _extract_items( return [] -def is_given(obj: NotGivenOr[_T]) -> TypeGuard[_T]: - return not isinstance(obj, NotGiven) +def is_given(obj: _T | NotGiven | Omit) -> TypeGuard[_T]: + return not isinstance(obj, NotGiven) and not isinstance(obj, Omit) # Type safe methods for narrowing types with TypeVars. diff --git a/src/openai/_version.py b/src/openai/_version.py index 70f9958885..b0fe817996 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.107.2" # x-release-please-version +__version__ = "2.6.1" # x-release-please-version diff --git a/src/openai/cli/_api/audio.py b/src/openai/cli/_api/audio.py index 269c67df28..e7c3734e75 100644 --- a/src/openai/cli/_api/audio.py +++ b/src/openai/cli/_api/audio.py @@ -5,7 +5,7 @@ from argparse import ArgumentParser from .._utils import get_client, print_model -from ..._types import NOT_GIVEN +from ..._types import omit from .._models import BaseModel from .._progress import BufferReader from ...types.audio import Transcription @@ -72,9 +72,9 @@ def transcribe(args: CLITranscribeArgs) -> None: get_client().audio.transcriptions.create( file=(args.file, buffer_reader), model=args.model, - language=args.language or NOT_GIVEN, - temperature=args.temperature or NOT_GIVEN, - prompt=args.prompt or NOT_GIVEN, + language=args.language or omit, + temperature=args.temperature or omit, + prompt=args.prompt or omit, # casts required because the API is typed for enums # but we don't want to validate that here for forwards-compat response_format=cast(Any, args.response_format), @@ -95,8 +95,8 @@ def translate(args: CLITranslationArgs) -> None: get_client().audio.translations.create( file=(args.file, buffer_reader), model=args.model, - temperature=args.temperature or NOT_GIVEN, - prompt=args.prompt or NOT_GIVEN, + temperature=args.temperature or omit, + prompt=args.prompt or omit, # casts required because the API is typed for enums # but we don't want to validate that here for forwards-compat response_format=cast(Any, args.response_format), diff --git a/src/openai/cli/_api/completions.py b/src/openai/cli/_api/completions.py index cbdb35bf3a..b22ecde9ef 100644 --- a/src/openai/cli/_api/completions.py +++ b/src/openai/cli/_api/completions.py @@ -8,7 +8,7 @@ from openai.types.completion import Completion from .._utils import get_client -from ..._types import NOT_GIVEN, NotGivenOr +from ..._types import Omittable, omit from ..._utils import is_given from .._errors import CLIError from .._models import BaseModel @@ -95,18 +95,18 @@ class CLICompletionCreateArgs(BaseModel): stream: bool = False prompt: Optional[str] = None - n: NotGivenOr[int] = NOT_GIVEN - stop: NotGivenOr[str] = NOT_GIVEN - user: NotGivenOr[str] = NOT_GIVEN - echo: NotGivenOr[bool] = NOT_GIVEN - suffix: NotGivenOr[str] = NOT_GIVEN - best_of: NotGivenOr[int] = NOT_GIVEN - top_p: NotGivenOr[float] = NOT_GIVEN - logprobs: NotGivenOr[int] = NOT_GIVEN - max_tokens: NotGivenOr[int] = NOT_GIVEN - temperature: NotGivenOr[float] = NOT_GIVEN - presence_penalty: NotGivenOr[float] = NOT_GIVEN - frequency_penalty: NotGivenOr[float] = NOT_GIVEN + n: Omittable[int] = omit + stop: Omittable[str] = omit + user: Omittable[str] = omit + echo: Omittable[bool] = omit + suffix: Omittable[str] = omit + best_of: Omittable[int] = omit + top_p: Omittable[float] = omit + logprobs: Omittable[int] = omit + max_tokens: Omittable[int] = omit + temperature: Omittable[float] = omit + presence_penalty: Omittable[float] = omit + frequency_penalty: Omittable[float] = omit class CLICompletions: diff --git a/src/openai/cli/_api/fine_tuning/jobs.py b/src/openai/cli/_api/fine_tuning/jobs.py index 806fa0f788..a4e429108a 100644 --- a/src/openai/cli/_api/fine_tuning/jobs.py +++ b/src/openai/cli/_api/fine_tuning/jobs.py @@ -5,7 +5,8 @@ from argparse import ArgumentParser from ..._utils import get_client, print_model -from ...._types import NOT_GIVEN, NotGivenOr +from ...._types import Omittable, omit +from ...._utils import is_given from ..._models import BaseModel from ....pagination import SyncCursorPage from ....types.fine_tuning import ( @@ -105,9 +106,9 @@ def register(subparser: _SubParsersAction[ArgumentParser]) -> None: class CLIFineTuningJobsCreateArgs(BaseModel): model: str training_file: str - hyperparameters: NotGivenOr[str] = NOT_GIVEN - suffix: NotGivenOr[str] = NOT_GIVEN - validation_file: NotGivenOr[str] = NOT_GIVEN + hyperparameters: Omittable[str] = omit + suffix: Omittable[str] = omit + validation_file: Omittable[str] = omit class CLIFineTuningJobsRetrieveArgs(BaseModel): @@ -115,8 +116,8 @@ class CLIFineTuningJobsRetrieveArgs(BaseModel): class CLIFineTuningJobsListArgs(BaseModel): - after: NotGivenOr[str] = NOT_GIVEN - limit: NotGivenOr[int] = NOT_GIVEN + after: Omittable[str] = omit + limit: Omittable[int] = omit class CLIFineTuningJobsCancelArgs(BaseModel): @@ -125,14 +126,14 @@ class CLIFineTuningJobsCancelArgs(BaseModel): class CLIFineTuningJobsListEventsArgs(BaseModel): id: str - after: NotGivenOr[str] = NOT_GIVEN - limit: NotGivenOr[int] = NOT_GIVEN + after: Omittable[str] = omit + limit: Omittable[int] = omit class CLIFineTuningJobs: @staticmethod def create(args: CLIFineTuningJobsCreateArgs) -> None: - hyperparameters = json.loads(str(args.hyperparameters)) if args.hyperparameters is not NOT_GIVEN else NOT_GIVEN + hyperparameters = json.loads(str(args.hyperparameters)) if is_given(args.hyperparameters) else omit fine_tuning_job: FineTuningJob = get_client().fine_tuning.jobs.create( model=args.model, training_file=args.training_file, @@ -150,7 +151,7 @@ def retrieve(args: CLIFineTuningJobsRetrieveArgs) -> None: @staticmethod def list(args: CLIFineTuningJobsListArgs) -> None: fine_tuning_jobs: SyncCursorPage[FineTuningJob] = get_client().fine_tuning.jobs.list( - after=args.after or NOT_GIVEN, limit=args.limit or NOT_GIVEN + after=args.after or omit, limit=args.limit or omit ) print_model(fine_tuning_jobs) @@ -163,7 +164,7 @@ def cancel(args: CLIFineTuningJobsCancelArgs) -> None: def list_events(args: CLIFineTuningJobsListEventsArgs) -> None: fine_tuning_job_events: SyncCursorPage[FineTuningJobEvent] = get_client().fine_tuning.jobs.list_events( fine_tuning_job_id=args.id, - after=args.after or NOT_GIVEN, - limit=args.limit or NOT_GIVEN, + after=args.after or omit, + limit=args.limit or omit, ) print_model(fine_tuning_job_events) diff --git a/src/openai/cli/_api/image.py b/src/openai/cli/_api/image.py index 3e2a0a90f1..1d0cf810c1 100644 --- a/src/openai/cli/_api/image.py +++ b/src/openai/cli/_api/image.py @@ -4,7 +4,7 @@ from argparse import ArgumentParser from .._utils import get_client, print_model -from ..._types import NOT_GIVEN, NotGiven, NotGivenOr +from ..._types import Omit, Omittable, omit from .._models import BaseModel from .._progress import BufferReader @@ -63,7 +63,7 @@ class CLIImageCreateArgs(BaseModel): num_images: int size: str response_format: str - model: NotGivenOr[str] = NOT_GIVEN + model: Omittable[str] = omit class CLIImageCreateVariationArgs(BaseModel): @@ -71,7 +71,7 @@ class CLIImageCreateVariationArgs(BaseModel): num_images: int size: str response_format: str - model: NotGivenOr[str] = NOT_GIVEN + model: Omittable[str] = omit class CLIImageEditArgs(BaseModel): @@ -80,8 +80,8 @@ class CLIImageEditArgs(BaseModel): size: str response_format: str prompt: str - mask: NotGivenOr[str] = NOT_GIVEN - model: NotGivenOr[str] = NOT_GIVEN + mask: Omittable[str] = omit + model: Omittable[str] = omit class CLIImage: @@ -119,8 +119,8 @@ def edit(args: CLIImageEditArgs) -> None: with open(args.image, "rb") as file_reader: buffer_reader = BufferReader(file_reader.read(), desc="Image upload progress") - if isinstance(args.mask, NotGiven): - mask: NotGivenOr[BufferReader] = NOT_GIVEN + if isinstance(args.mask, Omit): + mask: Omittable[BufferReader] = omit else: with open(args.mask, "rb") as file_reader: mask = BufferReader(file_reader.read(), desc="Mask progress") @@ -130,7 +130,7 @@ def edit(args: CLIImageEditArgs) -> None: prompt=args.prompt, image=("image", buffer_reader), n=args.num_images, - mask=("mask", mask) if not isinstance(mask, NotGiven) else mask, + mask=("mask", mask) if not isinstance(mask, Omit) else mask, # casts required because the API is typed for enums # but we don't want to validate that here for forwards-compat size=cast(Any, args.size), diff --git a/src/openai/lib/_parsing/_completions.py b/src/openai/lib/_parsing/_completions.py index 4b8b78b70a..7903732a4a 100644 --- a/src/openai/lib/_parsing/_completions.py +++ b/src/openai/lib/_parsing/_completions.py @@ -8,7 +8,7 @@ import pydantic from .._tools import PydanticFunctionTool -from ..._types import NOT_GIVEN, NotGiven +from ..._types import Omit, omit from ..._utils import is_dict, is_given from ..._compat import PYDANTIC_V1, model_parse_json from ..._models import construct_type_unchecked @@ -53,20 +53,20 @@ def is_strict_chat_completion_tool_param( def select_strict_chat_completion_tools( - tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, -) -> Iterable[ChatCompletionFunctionToolParam] | NotGiven: + tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit, +) -> Iterable[ChatCompletionFunctionToolParam] | Omit: """Select only the strict ChatCompletionFunctionToolParams from the given tools.""" if not is_given(tools): - return NOT_GIVEN + return omit return [t for t in tools if is_strict_chat_completion_tool_param(t)] def validate_input_tools( - tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, -) -> Iterable[ChatCompletionFunctionToolParam] | NotGiven: + tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit, +) -> Iterable[ChatCompletionFunctionToolParam] | Omit: if not is_given(tools): - return NOT_GIVEN + return omit for tool in tools: if tool["type"] != "function": @@ -85,8 +85,8 @@ def validate_input_tools( def parse_chat_completion( *, - response_format: type[ResponseFormatT] | completion_create_params.ResponseFormat | NotGiven, - input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven, + response_format: type[ResponseFormatT] | completion_create_params.ResponseFormat | Omit, + input_tools: Iterable[ChatCompletionToolUnionParam] | Omit, chat_completion: ChatCompletion | ParsedChatCompletion[object], ) -> ParsedChatCompletion[ResponseFormatT]: if is_given(input_tools): @@ -192,7 +192,7 @@ def parse_function_tool_arguments( def maybe_parse_content( *, - response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, + response_format: type[ResponseFormatT] | ResponseFormatParam | Omit, message: ChatCompletionMessage | ParsedChatCompletionMessage[object], ) -> ResponseFormatT | None: if has_rich_response_format(response_format) and message.content and not message.refusal: @@ -202,7 +202,7 @@ def maybe_parse_content( def solve_response_format_t( - response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, + response_format: type[ResponseFormatT] | ResponseFormatParam | Omit, ) -> type[ResponseFormatT]: """Return the runtime type for the given response format. @@ -217,8 +217,8 @@ def solve_response_format_t( def has_parseable_input( *, - response_format: type | ResponseFormatParam | NotGiven, - input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, + response_format: type | ResponseFormatParam | Omit, + input_tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit, ) -> bool: if has_rich_response_format(response_format): return True @@ -231,7 +231,7 @@ def has_parseable_input( def has_rich_response_format( - response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, + response_format: type[ResponseFormatT] | ResponseFormatParam | Omit, ) -> TypeGuard[type[ResponseFormatT]]: if not is_given(response_format): return False @@ -271,10 +271,10 @@ def _parse_content(response_format: type[ResponseFormatT], content: str) -> Resp def type_to_response_format_param( - response_format: type | completion_create_params.ResponseFormat | NotGiven, -) -> ResponseFormatParam | NotGiven: + response_format: type | completion_create_params.ResponseFormat | Omit, +) -> ResponseFormatParam | Omit: if not is_given(response_format): - return NOT_GIVEN + return omit if is_response_format_param(response_format): return response_format diff --git a/src/openai/lib/_parsing/_responses.py b/src/openai/lib/_parsing/_responses.py index b6ebde0e8e..8a1bf3cf2c 100644 --- a/src/openai/lib/_parsing/_responses.py +++ b/src/openai/lib/_parsing/_responses.py @@ -7,7 +7,7 @@ import pydantic from .._tools import ResponsesPydanticFunctionTool -from ..._types import NotGiven +from ..._types import Omit from ..._utils import is_given from ..._compat import PYDANTIC_V1, model_parse_json from ..._models import construct_type_unchecked @@ -52,8 +52,8 @@ def type_to_text_format_param(type_: type) -> ResponseFormatTextConfigParam: def parse_response( *, - text_format: type[TextFormatT] | NotGiven, - input_tools: Iterable[ToolParam] | NotGiven | None, + text_format: type[TextFormatT] | Omit, + input_tools: Iterable[ToolParam] | Omit | None, response: Response | ParsedResponse[object], ) -> ParsedResponse[TextFormatT]: solved_t = solve_response_format_t(text_format) @@ -130,7 +130,7 @@ def parse_response( ) -def parse_text(text: str, text_format: type[TextFormatT] | NotGiven) -> TextFormatT | None: +def parse_text(text: str, text_format: type[TextFormatT] | Omit) -> TextFormatT | None: if not is_given(text_format): return None @@ -156,7 +156,7 @@ def get_input_tool_by_name(*, input_tools: Iterable[ToolParam], name: str) -> Fu def parse_function_tool_arguments( *, - input_tools: Iterable[ToolParam] | NotGiven | None, + input_tools: Iterable[ToolParam] | Omit | None, function_call: ParsedResponseFunctionToolCall | ResponseFunctionToolCall, ) -> object: if input_tools is None or not is_given(input_tools): diff --git a/src/openai/lib/_realtime.py b/src/openai/lib/_realtime.py new file mode 100644 index 0000000000..999d1e4463 --- /dev/null +++ b/src/openai/lib/_realtime.py @@ -0,0 +1,92 @@ +from __future__ import annotations + +import json +from typing_extensions import override + +import httpx + +from openai import _legacy_response +from openai._types import Body, Omit, Query, Headers, NotGiven, omit, not_given +from openai._utils import maybe_transform, async_maybe_transform +from openai._base_client import make_request_options +from openai.resources.realtime.calls import Calls, AsyncCalls +from openai.types.realtime.realtime_session_create_request_param import RealtimeSessionCreateRequestParam + +__all__ = ["_Calls", "_AsyncCalls"] + + +# Custom code to override the `create` method to have correct behavior with +# application/sdp and multipart/form-data. +# Ideally we can cutover to the generated code this overrides eventually and remove this. +class _Calls(Calls): + @override + def create( + self, + *, + sdp: str, + session: RealtimeSessionCreateRequestParam | Omit = omit, + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> _legacy_response.HttpxBinaryResponseContent: + if session is omit: + extra_headers = {"Accept": "application/sdp", "Content-Type": "application/sdp", **(extra_headers or {})} + return self._post( + "/realtime/calls", + body=sdp.encode("utf-8"), + options=make_request_options(extra_headers=extra_headers, extra_query=extra_query, timeout=timeout), + cast_to=_legacy_response.HttpxBinaryResponseContent, + ) + + extra_headers = {"Accept": "application/sdp", "Content-Type": "multipart/form-data", **(extra_headers or {})} + session_payload = maybe_transform(session, RealtimeSessionCreateRequestParam) + files = [ + ("sdp", (None, sdp.encode("utf-8"), "application/sdp")), + ("session", (None, json.dumps(session_payload).encode("utf-8"), "application/json")), + ] + return self._post( + "/realtime/calls", + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=_legacy_response.HttpxBinaryResponseContent, + ) + + +class _AsyncCalls(AsyncCalls): + @override + async def create( + self, + *, + sdp: str, + session: RealtimeSessionCreateRequestParam | Omit = omit, + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> _legacy_response.HttpxBinaryResponseContent: + if session is omit: + extra_headers = {"Accept": "application/sdp", "Content-Type": "application/sdp", **(extra_headers or {})} + return await self._post( + "/realtime/calls", + body=sdp.encode("utf-8"), + options=make_request_options(extra_headers=extra_headers, extra_query=extra_query, timeout=timeout), + cast_to=_legacy_response.HttpxBinaryResponseContent, + ) + + extra_headers = {"Accept": "application/sdp", "Content-Type": "multipart/form-data", **(extra_headers or {})} + session_payload = await async_maybe_transform(session, RealtimeSessionCreateRequestParam) + files = [ + ("sdp", (None, sdp.encode("utf-8"), "application/sdp")), + ("session", (None, json.dumps(session_payload).encode("utf-8"), "application/json")), + ] + return await self._post( + "/realtime/calls", + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=_legacy_response.HttpxBinaryResponseContent, + ) diff --git a/src/openai/lib/streaming/chat/_completions.py b/src/openai/lib/streaming/chat/_completions.py index 52a6a550b2..c4610e2120 100644 --- a/src/openai/lib/streaming/chat/_completions.py +++ b/src/openai/lib/streaming/chat/_completions.py @@ -23,7 +23,7 @@ FunctionToolCallArgumentsDeltaEvent, ) from .._deltas import accumulate_delta -from ...._types import NOT_GIVEN, IncEx, NotGiven +from ...._types import Omit, IncEx, omit from ...._utils import is_given, consume_sync_iterator, consume_async_iterator from ...._compat import model_dump from ...._models import build, construct_type @@ -57,8 +57,8 @@ def __init__( self, *, raw_stream: Stream[ChatCompletionChunk], - response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, - input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven, + response_format: type[ResponseFormatT] | ResponseFormatParam | Omit, + input_tools: Iterable[ChatCompletionToolUnionParam] | Omit, ) -> None: self._raw_stream = raw_stream self._response = raw_stream.response @@ -138,8 +138,8 @@ def __init__( self, api_request: Callable[[], Stream[ChatCompletionChunk]], *, - response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, - input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven, + response_format: type[ResponseFormatT] | ResponseFormatParam | Omit, + input_tools: Iterable[ChatCompletionToolUnionParam] | Omit, ) -> None: self.__stream: ChatCompletionStream[ResponseFormatT] | None = None self.__api_request = api_request @@ -180,8 +180,8 @@ def __init__( self, *, raw_stream: AsyncStream[ChatCompletionChunk], - response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, - input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven, + response_format: type[ResponseFormatT] | ResponseFormatParam | Omit, + input_tools: Iterable[ChatCompletionToolUnionParam] | Omit, ) -> None: self._raw_stream = raw_stream self._response = raw_stream.response @@ -261,8 +261,8 @@ def __init__( self, api_request: Awaitable[AsyncStream[ChatCompletionChunk]], *, - response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, - input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven, + response_format: type[ResponseFormatT] | ResponseFormatParam | Omit, + input_tools: Iterable[ChatCompletionToolUnionParam] | Omit, ) -> None: self.__stream: AsyncChatCompletionStream[ResponseFormatT] | None = None self.__api_request = api_request @@ -314,15 +314,15 @@ class ChatCompletionStreamState(Generic[ResponseFormatT]): def __init__( self, *, - input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, - response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven = NOT_GIVEN, + input_tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit, + response_format: type[ResponseFormatT] | ResponseFormatParam | Omit = omit, ) -> None: self.__current_completion_snapshot: ParsedChatCompletionSnapshot | None = None self.__choice_event_states: list[ChoiceEventState] = [] self._input_tools = [tool for tool in input_tools] if is_given(input_tools) else [] self._response_format = response_format - self._rich_response_format: type | NotGiven = response_format if inspect.isclass(response_format) else NOT_GIVEN + self._rich_response_format: type | Omit = response_format if inspect.isclass(response_format) else omit def get_final_completion(self) -> ParsedChatCompletion[ResponseFormatT]: """Parse the final completion object. @@ -599,7 +599,7 @@ def get_done_events( *, choice_chunk: ChoiceChunk, choice_snapshot: ParsedChoiceSnapshot, - response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, + response_format: type[ResponseFormatT] | ResponseFormatParam | Omit, ) -> list[ChatCompletionStreamEvent[ResponseFormatT]]: events_to_fire: list[ChatCompletionStreamEvent[ResponseFormatT]] = [] @@ -639,7 +639,7 @@ def _content_done_events( self, *, choice_snapshot: ParsedChoiceSnapshot, - response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, + response_format: type[ResponseFormatT] | ResponseFormatParam | Omit, ) -> list[ChatCompletionStreamEvent[ResponseFormatT]]: events_to_fire: list[ChatCompletionStreamEvent[ResponseFormatT]] = [] diff --git a/src/openai/lib/streaming/responses/_responses.py b/src/openai/lib/streaming/responses/_responses.py index d45664de45..6975a9260d 100644 --- a/src/openai/lib/streaming/responses/_responses.py +++ b/src/openai/lib/streaming/responses/_responses.py @@ -13,7 +13,7 @@ ResponseTextDeltaEvent, ResponseFunctionCallArgumentsDeltaEvent, ) -from ...._types import NOT_GIVEN, NotGiven +from ...._types import Omit, omit from ...._utils import is_given, consume_sync_iterator, consume_async_iterator from ...._models import build, construct_type_unchecked from ...._streaming import Stream, AsyncStream @@ -32,8 +32,8 @@ def __init__( self, *, raw_stream: Stream[RawResponseStreamEvent], - text_format: type[TextFormatT] | NotGiven, - input_tools: Iterable[ToolParam] | NotGiven, + text_format: type[TextFormatT] | Omit, + input_tools: Iterable[ToolParam] | Omit, starting_after: int | None, ) -> None: self._raw_stream = raw_stream @@ -97,8 +97,8 @@ def __init__( self, api_request: Callable[[], Stream[RawResponseStreamEvent]], *, - text_format: type[TextFormatT] | NotGiven, - input_tools: Iterable[ToolParam] | NotGiven, + text_format: type[TextFormatT] | Omit, + input_tools: Iterable[ToolParam] | Omit, starting_after: int | None, ) -> None: self.__stream: ResponseStream[TextFormatT] | None = None @@ -134,8 +134,8 @@ def __init__( self, *, raw_stream: AsyncStream[RawResponseStreamEvent], - text_format: type[TextFormatT] | NotGiven, - input_tools: Iterable[ToolParam] | NotGiven, + text_format: type[TextFormatT] | Omit, + input_tools: Iterable[ToolParam] | Omit, starting_after: int | None, ) -> None: self._raw_stream = raw_stream @@ -199,8 +199,8 @@ def __init__( self, api_request: Awaitable[AsyncStream[RawResponseStreamEvent]], *, - text_format: type[TextFormatT] | NotGiven, - input_tools: Iterable[ToolParam] | NotGiven, + text_format: type[TextFormatT] | Omit, + input_tools: Iterable[ToolParam] | Omit, starting_after: int | None, ) -> None: self.__stream: AsyncResponseStream[TextFormatT] | None = None @@ -235,14 +235,14 @@ class ResponseStreamState(Generic[TextFormatT]): def __init__( self, *, - input_tools: Iterable[ToolParam] | NotGiven, - text_format: type[TextFormatT] | NotGiven, + input_tools: Iterable[ToolParam] | Omit, + text_format: type[TextFormatT] | Omit, ) -> None: self.__current_snapshot: ParsedResponseSnapshot | None = None self._completed_response: ParsedResponse[TextFormatT] | None = None self._input_tools = [tool for tool in input_tools] if is_given(input_tools) else [] self._text_format = text_format - self._rich_text_format: type | NotGiven = text_format if inspect.isclass(text_format) else NOT_GIVEN + self._rich_text_format: type | Omit = text_format if inspect.isclass(text_format) else omit def handle_event(self, event: RawResponseStreamEvent) -> List[ResponseStreamEvent[TextFormatT]]: self.__current_snapshot = snapshot = self.accumulate_event(event) diff --git a/src/openai/resources/__init__.py b/src/openai/resources/__init__.py index 82c9f037d9..b793fbc7b0 100644 --- a/src/openai/resources/__init__.py +++ b/src/openai/resources/__init__.py @@ -56,6 +56,14 @@ ModelsWithStreamingResponse, AsyncModelsWithStreamingResponse, ) +from .videos import ( + Videos, + AsyncVideos, + VideosWithRawResponse, + AsyncVideosWithRawResponse, + VideosWithStreamingResponse, + AsyncVideosWithStreamingResponse, +) from .batches import ( Batches, AsyncBatches, @@ -212,4 +220,10 @@ "AsyncContainersWithRawResponse", "ContainersWithStreamingResponse", "AsyncContainersWithStreamingResponse", + "Videos", + "AsyncVideos", + "VideosWithRawResponse", + "AsyncVideosWithRawResponse", + "VideosWithStreamingResponse", + "AsyncVideosWithStreamingResponse", ] diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py index 64ce5eec49..992fb5971a 100644 --- a/src/openai/resources/audio/speech.py +++ b/src/openai/resources/audio/speech.py @@ -8,7 +8,7 @@ import httpx from ... import _legacy_response -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -53,16 +53,16 @@ def create( voice: Union[ str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"] ], - instructions: str | NotGiven = NOT_GIVEN, - response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, - speed: float | NotGiven = NOT_GIVEN, - stream_format: Literal["sse", "audio"] | NotGiven = NOT_GIVEN, + instructions: str | Omit = omit, + response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | Omit = omit, + speed: float | Omit = omit, + stream_format: Literal["sse", "audio"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> _legacy_response.HttpxBinaryResponseContent: """ Generates audio from the input text. @@ -149,16 +149,16 @@ async def create( voice: Union[ str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"] ], - instructions: str | NotGiven = NOT_GIVEN, - response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, - speed: float | NotGiven = NOT_GIVEN, - stream_format: Literal["sse", "audio"] | NotGiven = NOT_GIVEN, + instructions: str | Omit = omit, + response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | Omit = omit, + speed: float | Omit = omit, + stream_format: Literal["sse", "audio"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> _legacy_response.HttpxBinaryResponseContent: """ Generates audio from the input text. diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py index 208f6e8b05..a5c86146d4 100644 --- a/src/openai/resources/audio/transcriptions.py +++ b/src/openai/resources/audio/transcriptions.py @@ -9,8 +9,17 @@ import httpx from ... import _legacy_response -from ...types import AudioResponseFormat -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes +from ..._types import ( + Body, + Omit, + Query, + Headers, + NotGiven, + FileTypes, + SequenceNotStr, + omit, + not_given, +) from ..._utils import extract_files, required_args, maybe_transform, deepcopy_minimal, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -23,6 +32,7 @@ from ...types.audio_response_format import AudioResponseFormat from ...types.audio.transcription_include import TranscriptionInclude from ...types.audio.transcription_verbose import TranscriptionVerbose +from ...types.audio.transcription_diarized import TranscriptionDiarized from ...types.audio.transcription_stream_event import TranscriptionStreamEvent from ...types.audio.transcription_create_response import TranscriptionCreateResponse @@ -57,20 +67,82 @@ def create( *, file: FileTypes, model: Union[str, AudioModel], - chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, - include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, - response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN, - language: str | NotGiven = NOT_GIVEN, - prompt: str | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit, + include: List[TranscriptionInclude] | Omit = omit, + language: str | Omit = omit, + prompt: str | Omit = omit, + response_format: Union[Literal["json"], Omit] = omit, + stream: Optional[Literal[False]] | Omit = omit, + temperature: float | Omit = omit, + timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Transcription: ... + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> Transcription: + """ + Transcribes audio into the input language. + + Args: + file: + The audio file object (not file name) to transcribe, in one of these formats: + flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + + model: ID of the model to use. The options are `gpt-4o-transcribe`, + `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + Whisper V2 model). + + chunking_strategy: Controls how the audio is cut into chunks. When set to `"auto"`, the server + first normalizes loudness and then uses voice activity detection (VAD) to choose + boundaries. `server_vad` object can be provided to tweak VAD detection + parameters manually. If unset, the audio is transcribed as a single block. + + include: Additional information to include in the transcription response. `logprobs` will + return the log probabilities of the tokens in the response to understand the + model's confidence in the transcription. `logprobs` only works with + response_format set to `json` and only with the models `gpt-4o-transcribe` and + `gpt-4o-mini-transcribe`. + + language: The language of the input audio. Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. + + prompt: An optional text to guide the model's style or continue a previous audio + segment. The + [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + should match the audio language. + + response_format: The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + the only supported format is `json`. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) + for more information. + + Note: Streaming is not supported for the `whisper-1` model and will be ignored. + + temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + output more random, while lower values like 0.2 will make it more focused and + deterministic. If set to 0, the model will use + [log probability](https://en.wikipedia.org/wiki/Log_probability) to + automatically increase the temperature until certain thresholds are hit. + + timestamp_granularities: The timestamp granularities to populate for this transcription. + `response_format` must be set `verbose_json` to use timestamp granularities. + Either or both of these options are supported: `word`, or `segment`. Note: There + is no additional latency for segment timestamps, but generating word timestamps + incurs additional latency. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + """ @overload def create( @@ -78,19 +150,19 @@ def create( *, file: FileTypes, model: Union[str, AudioModel], - chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, - include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit, + include: List[TranscriptionInclude] | Omit = omit, response_format: Literal["verbose_json"], - language: str | NotGiven = NOT_GIVEN, - prompt: str | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + language: str | Omit = omit, + prompt: str | Omit = omit, + temperature: float | Omit = omit, + timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> TranscriptionVerbose: ... @overload @@ -99,21 +171,42 @@ def create( *, file: FileTypes, model: Union[str, AudioModel], - chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit, response_format: Literal["text", "srt", "vtt"], - include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, - language: str | NotGiven = NOT_GIVEN, - prompt: str | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + include: List[TranscriptionInclude] | Omit = omit, + language: str | Omit = omit, + prompt: str | Omit = omit, + temperature: float | Omit = omit, + timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> str: ... + @overload + def create( + self, + *, + file: FileTypes, + model: Union[str, AudioModel], + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit, + response_format: Literal["diarized_json"], + known_speaker_names: SequenceNotStr[str] | Omit = omit, + known_speaker_references: SequenceNotStr[str] | Omit = omit, + language: str | Omit = omit, + temperature: float | Omit = omit, + timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> TranscriptionDiarized: ... + @overload def create( self, @@ -121,19 +214,21 @@ def create( file: FileTypes, model: Union[str, AudioModel], stream: Literal[True], - chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, - include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, - language: str | NotGiven = NOT_GIVEN, - prompt: str | NotGiven = NOT_GIVEN, - response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit, + include: List[TranscriptionInclude] | Omit = omit, + known_speaker_names: SequenceNotStr[str] | Omit = omit, + known_speaker_references: SequenceNotStr[str] | Omit = omit, + language: str | Omit = omit, + prompt: str | Omit = omit, + response_format: Union[AudioResponseFormat, Omit] = omit, + temperature: float | Omit = omit, + timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Stream[TranscriptionStreamEvent]: """ Transcribes audio into the input language. @@ -144,8 +239,8 @@ def create( flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. model: ID of the model to use. The options are `gpt-4o-transcribe`, - `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source - Whisper V2 model). + `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source + Whisper V2 model), and `gpt-4o-transcribe-diarize`. stream: If set to true, the model response data will be streamed to the client as it is generated using @@ -160,12 +255,25 @@ def create( first normalizes loudness and then uses voice activity detection (VAD) to choose boundaries. `server_vad` object can be provided to tweak VAD detection parameters manually. If unset, the audio is transcribed as a single block. + Required when using `gpt-4o-transcribe-diarize` for inputs longer than 30 + seconds. include: Additional information to include in the transcription response. `logprobs` will return the log probabilities of the tokens in the response to understand the model's confidence in the transcription. `logprobs` only works with response_format set to `json` and only with the models `gpt-4o-transcribe` and - `gpt-4o-mini-transcribe`. + `gpt-4o-mini-transcribe`. This field is not supported when using + `gpt-4o-transcribe-diarize`. + + known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in + `known_speaker_references[]`. Each entry should be a short identifier (for + example `customer` or `agent`). Up to 4 speakers are supported. + + known_speaker_references: Optional list of audio samples (as + [data URLs](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs)) + that contain known speaker references matching `known_speaker_names[]`. Each + sample must be between 2 and 10 seconds, and can use any of the same input audio + formats supported by `file`. language: The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) @@ -174,11 +282,14 @@ def create( prompt: An optional text to guide the model's style or continue a previous audio segment. The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - should match the audio language. + should match the audio language. This field is not supported when using + `gpt-4o-transcribe-diarize`. response_format: The format of the output, in one of these options: `json`, `text`, `srt`, - `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, - the only supported format is `json`. + `verbose_json`, `vtt`, or `diarized_json`. For `gpt-4o-transcribe` and + `gpt-4o-mini-transcribe`, the only supported format is `json`. For + `gpt-4o-transcribe-diarize`, the supported formats are `json`, `text`, and + `diarized_json`, with `diarized_json` required to receive speaker annotations. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and @@ -190,7 +301,8 @@ def create( `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps - incurs additional latency. + incurs additional latency. This option is not available for + `gpt-4o-transcribe-diarize`. extra_headers: Send extra headers @@ -209,19 +321,21 @@ def create( file: FileTypes, model: Union[str, AudioModel], stream: bool, - chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, - include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, - language: str | NotGiven = NOT_GIVEN, - prompt: str | NotGiven = NOT_GIVEN, - response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit, + include: List[TranscriptionInclude] | Omit = omit, + known_speaker_names: SequenceNotStr[str] | Omit = omit, + known_speaker_references: SequenceNotStr[str] | Omit = omit, + language: str | Omit = omit, + prompt: str | Omit = omit, + response_format: Union[AudioResponseFormat, Omit] = omit, + temperature: float | Omit = omit, + timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> TranscriptionCreateResponse | Stream[TranscriptionStreamEvent]: """ Transcribes audio into the input language. @@ -232,8 +346,8 @@ def create( flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. model: ID of the model to use. The options are `gpt-4o-transcribe`, - `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source - Whisper V2 model). + `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source + Whisper V2 model), and `gpt-4o-transcribe-diarize`. stream: If set to true, the model response data will be streamed to the client as it is generated using @@ -248,12 +362,25 @@ def create( first normalizes loudness and then uses voice activity detection (VAD) to choose boundaries. `server_vad` object can be provided to tweak VAD detection parameters manually. If unset, the audio is transcribed as a single block. + Required when using `gpt-4o-transcribe-diarize` for inputs longer than 30 + seconds. include: Additional information to include in the transcription response. `logprobs` will return the log probabilities of the tokens in the response to understand the model's confidence in the transcription. `logprobs` only works with response_format set to `json` and only with the models `gpt-4o-transcribe` and - `gpt-4o-mini-transcribe`. + `gpt-4o-mini-transcribe`. This field is not supported when using + `gpt-4o-transcribe-diarize`. + + known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in + `known_speaker_references[]`. Each entry should be a short identifier (for + example `customer` or `agent`). Up to 4 speakers are supported. + + known_speaker_references: Optional list of audio samples (as + [data URLs](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs)) + that contain known speaker references matching `known_speaker_names[]`. Each + sample must be between 2 and 10 seconds, and can use any of the same input audio + formats supported by `file`. language: The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) @@ -262,11 +389,14 @@ def create( prompt: An optional text to guide the model's style or continue a previous audio segment. The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - should match the audio language. + should match the audio language. This field is not supported when using + `gpt-4o-transcribe-diarize`. response_format: The format of the output, in one of these options: `json`, `text`, `srt`, - `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, - the only supported format is `json`. + `verbose_json`, `vtt`, or `diarized_json`. For `gpt-4o-transcribe` and + `gpt-4o-mini-transcribe`, the only supported format is `json`. For + `gpt-4o-transcribe-diarize`, the supported formats are `json`, `text`, and + `diarized_json`, with `diarized_json` required to receive speaker annotations. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and @@ -278,7 +408,8 @@ def create( `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps - incurs additional latency. + incurs additional latency. This option is not available for + `gpt-4o-transcribe-diarize`. extra_headers: Send extra headers @@ -296,27 +427,31 @@ def create( *, file: FileTypes, model: Union[str, AudioModel], - chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, - include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, - language: str | NotGiven = NOT_GIVEN, - prompt: str | NotGiven = NOT_GIVEN, - response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit, + include: List[TranscriptionInclude] | Omit = omit, + known_speaker_names: SequenceNotStr[str] | Omit = omit, + known_speaker_references: SequenceNotStr[str] | Omit = omit, + language: str | Omit = omit, + prompt: str | Omit = omit, + response_format: Union[AudioResponseFormat, Omit] = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + temperature: float | Omit = omit, + timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> str | Transcription | TranscriptionVerbose | Stream[TranscriptionStreamEvent]: + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> str | Transcription | TranscriptionDiarized | TranscriptionVerbose | Stream[TranscriptionStreamEvent]: body = deepcopy_minimal( { "file": file, "model": model, "chunking_strategy": chunking_strategy, "include": include, + "known_speaker_names": known_speaker_names, + "known_speaker_references": known_speaker_references, "language": language, "prompt": prompt, "response_format": response_format, @@ -374,20 +509,22 @@ async def create( *, file: FileTypes, model: Union[str, AudioModel], - chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, - include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, - language: str | NotGiven = NOT_GIVEN, - prompt: str | NotGiven = NOT_GIVEN, - response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit, + include: List[TranscriptionInclude] | Omit = omit, + known_speaker_names: SequenceNotStr[str] | Omit = omit, + known_speaker_references: SequenceNotStr[str] | Omit = omit, + language: str | Omit = omit, + prompt: str | Omit = omit, + response_format: Union[Literal["json"], Omit] = omit, + stream: Optional[Literal[False]] | Omit = omit, + temperature: float | Omit = omit, + timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> TranscriptionCreateResponse: """ Transcribes audio into the input language. @@ -398,19 +535,32 @@ async def create( flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. model: ID of the model to use. The options are `gpt-4o-transcribe`, - `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source - Whisper V2 model). + `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source + Whisper V2 model), and `gpt-4o-transcribe-diarize`. chunking_strategy: Controls how the audio is cut into chunks. When set to `"auto"`, the server first normalizes loudness and then uses voice activity detection (VAD) to choose boundaries. `server_vad` object can be provided to tweak VAD detection parameters manually. If unset, the audio is transcribed as a single block. + Required when using `gpt-4o-transcribe-diarize` for inputs longer than 30 + seconds. include: Additional information to include in the transcription response. `logprobs` will return the log probabilities of the tokens in the response to understand the model's confidence in the transcription. `logprobs` only works with response_format set to `json` and only with the models `gpt-4o-transcribe` and - `gpt-4o-mini-transcribe`. + `gpt-4o-mini-transcribe`. This field is not supported when using + `gpt-4o-transcribe-diarize`. + + known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in + `known_speaker_references[]`. Each entry should be a short identifier (for + example `customer` or `agent`). Up to 4 speakers are supported. + + known_speaker_references: Optional list of audio samples (as + [data URLs](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs)) + that contain known speaker references matching `known_speaker_names[]`. Each + sample must be between 2 and 10 seconds, and can use any of the same input audio + formats supported by `file`. language: The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) @@ -419,11 +569,14 @@ async def create( prompt: An optional text to guide the model's style or continue a previous audio segment. The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - should match the audio language. + should match the audio language. This field is not supported when using + `gpt-4o-transcribe-diarize`. response_format: The format of the output, in one of these options: `json`, `text`, `srt`, - `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, - the only supported format is `json`. + `verbose_json`, `vtt`, or `diarized_json`. For `gpt-4o-transcribe` and + `gpt-4o-mini-transcribe`, the only supported format is `json`. For + `gpt-4o-transcribe-diarize`, the supported formats are `json`, `text`, and + `diarized_json`, with `diarized_json` required to receive speaker annotations. stream: If set to true, the model response data will be streamed to the client as it is generated using @@ -444,7 +597,8 @@ async def create( `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps - incurs additional latency. + incurs additional latency. This option is not available for + `gpt-4o-transcribe-diarize`. extra_headers: Send extra headers @@ -457,19 +611,19 @@ async def create( *, file: FileTypes, model: Union[str, AudioModel], - chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, - include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit, + include: List[TranscriptionInclude] | Omit = omit, response_format: Literal["verbose_json"], - language: str | NotGiven = NOT_GIVEN, - prompt: str | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + language: str | Omit = omit, + prompt: str | Omit = omit, + temperature: float | Omit = omit, + timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> TranscriptionVerbose: ... @overload @@ -478,19 +632,19 @@ async def create( *, file: FileTypes, model: Union[str, AudioModel], - chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, - include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit, + include: List[TranscriptionInclude] | Omit = omit, response_format: Literal["text", "srt", "vtt"], - language: str | NotGiven = NOT_GIVEN, - prompt: str | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + language: str | Omit = omit, + prompt: str | Omit = omit, + temperature: float | Omit = omit, + timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> str: ... @overload @@ -500,19 +654,21 @@ async def create( file: FileTypes, model: Union[str, AudioModel], stream: Literal[True], - chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, - include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, - language: str | NotGiven = NOT_GIVEN, - prompt: str | NotGiven = NOT_GIVEN, - response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit, + include: List[TranscriptionInclude] | Omit = omit, + known_speaker_names: SequenceNotStr[str] | Omit = omit, + known_speaker_references: SequenceNotStr[str] | Omit = omit, + language: str | Omit = omit, + prompt: str | Omit = omit, + response_format: Union[AudioResponseFormat, Omit] = omit, + temperature: float | Omit = omit, + timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncStream[TranscriptionStreamEvent]: """ Transcribes audio into the input language. @@ -523,8 +679,8 @@ async def create( flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. model: ID of the model to use. The options are `gpt-4o-transcribe`, - `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source - Whisper V2 model). + `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source + Whisper V2 model), and `gpt-4o-transcribe-diarize`. stream: If set to true, the model response data will be streamed to the client as it is generated using @@ -539,12 +695,25 @@ async def create( first normalizes loudness and then uses voice activity detection (VAD) to choose boundaries. `server_vad` object can be provided to tweak VAD detection parameters manually. If unset, the audio is transcribed as a single block. + Required when using `gpt-4o-transcribe-diarize` for inputs longer than 30 + seconds. include: Additional information to include in the transcription response. `logprobs` will return the log probabilities of the tokens in the response to understand the model's confidence in the transcription. `logprobs` only works with response_format set to `json` and only with the models `gpt-4o-transcribe` and - `gpt-4o-mini-transcribe`. + `gpt-4o-mini-transcribe`. This field is not supported when using + `gpt-4o-transcribe-diarize`. + + known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in + `known_speaker_references[]`. Each entry should be a short identifier (for + example `customer` or `agent`). Up to 4 speakers are supported. + + known_speaker_references: Optional list of audio samples (as + [data URLs](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs)) + that contain known speaker references matching `known_speaker_names[]`. Each + sample must be between 2 and 10 seconds, and can use any of the same input audio + formats supported by `file`. language: The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) @@ -553,11 +722,14 @@ async def create( prompt: An optional text to guide the model's style or continue a previous audio segment. The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - should match the audio language. + should match the audio language. This field is not supported when using + `gpt-4o-transcribe-diarize`. response_format: The format of the output, in one of these options: `json`, `text`, `srt`, - `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, - the only supported format is `json`. + `verbose_json`, `vtt`, or `diarized_json`. For `gpt-4o-transcribe` and + `gpt-4o-mini-transcribe`, the only supported format is `json`. For + `gpt-4o-transcribe-diarize`, the supported formats are `json`, `text`, and + `diarized_json`, with `diarized_json` required to receive speaker annotations. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and @@ -569,7 +741,8 @@ async def create( `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps - incurs additional latency. + incurs additional latency. This option is not available for + `gpt-4o-transcribe-diarize`. extra_headers: Send extra headers @@ -588,19 +761,21 @@ async def create( file: FileTypes, model: Union[str, AudioModel], stream: bool, - chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, - include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, - language: str | NotGiven = NOT_GIVEN, - prompt: str | NotGiven = NOT_GIVEN, - response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit, + include: List[TranscriptionInclude] | Omit = omit, + known_speaker_names: SequenceNotStr[str] | Omit = omit, + known_speaker_references: SequenceNotStr[str] | Omit = omit, + language: str | Omit = omit, + prompt: str | Omit = omit, + response_format: Union[AudioResponseFormat, Omit] = omit, + temperature: float | Omit = omit, + timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> TranscriptionCreateResponse | AsyncStream[TranscriptionStreamEvent]: """ Transcribes audio into the input language. @@ -611,8 +786,8 @@ async def create( flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. model: ID of the model to use. The options are `gpt-4o-transcribe`, - `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source - Whisper V2 model). + `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source + Whisper V2 model), and `gpt-4o-transcribe-diarize`. stream: If set to true, the model response data will be streamed to the client as it is generated using @@ -627,12 +802,25 @@ async def create( first normalizes loudness and then uses voice activity detection (VAD) to choose boundaries. `server_vad` object can be provided to tweak VAD detection parameters manually. If unset, the audio is transcribed as a single block. + Required when using `gpt-4o-transcribe-diarize` for inputs longer than 30 + seconds. include: Additional information to include in the transcription response. `logprobs` will return the log probabilities of the tokens in the response to understand the model's confidence in the transcription. `logprobs` only works with response_format set to `json` and only with the models `gpt-4o-transcribe` and - `gpt-4o-mini-transcribe`. + `gpt-4o-mini-transcribe`. This field is not supported when using + `gpt-4o-transcribe-diarize`. + + known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in + `known_speaker_references[]`. Each entry should be a short identifier (for + example `customer` or `agent`). Up to 4 speakers are supported. + + known_speaker_references: Optional list of audio samples (as + [data URLs](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs)) + that contain known speaker references matching `known_speaker_names[]`. Each + sample must be between 2 and 10 seconds, and can use any of the same input audio + formats supported by `file`. language: The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) @@ -641,11 +829,14 @@ async def create( prompt: An optional text to guide the model's style or continue a previous audio segment. The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - should match the audio language. + should match the audio language. This field is not supported when using + `gpt-4o-transcribe-diarize`. response_format: The format of the output, in one of these options: `json`, `text`, `srt`, - `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, - the only supported format is `json`. + `verbose_json`, `vtt`, or `diarized_json`. For `gpt-4o-transcribe` and + `gpt-4o-mini-transcribe`, the only supported format is `json`. For + `gpt-4o-transcribe-diarize`, the supported formats are `json`, `text`, and + `diarized_json`, with `diarized_json` required to receive speaker annotations. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and @@ -657,7 +848,8 @@ async def create( `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps - incurs additional latency. + incurs additional latency. This option is not available for + `gpt-4o-transcribe-diarize`. extra_headers: Send extra headers @@ -675,27 +867,31 @@ async def create( *, file: FileTypes, model: Union[str, AudioModel], - chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, - include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, - language: str | NotGiven = NOT_GIVEN, - prompt: str | NotGiven = NOT_GIVEN, - response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit, + include: List[TranscriptionInclude] | Omit = omit, + known_speaker_names: SequenceNotStr[str] | Omit = omit, + known_speaker_references: SequenceNotStr[str] | Omit = omit, + language: str | Omit = omit, + prompt: str | Omit = omit, + response_format: Union[AudioResponseFormat, Omit] = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + temperature: float | Omit = omit, + timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Transcription | TranscriptionVerbose | str | AsyncStream[TranscriptionStreamEvent]: + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> Transcription | TranscriptionVerbose | TranscriptionDiarized | str | AsyncStream[TranscriptionStreamEvent]: body = deepcopy_minimal( { "file": file, "model": model, "chunking_strategy": chunking_strategy, "include": include, + "known_speaker_names": known_speaker_names, + "known_speaker_references": known_speaker_references, "language": language, "prompt": prompt, "response_format": response_format, @@ -764,15 +960,17 @@ def __init__(self, transcriptions: AsyncTranscriptions) -> None: def _get_response_format_type( - response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven, -) -> type[Transcription | TranscriptionVerbose | str]: - if isinstance(response_format, NotGiven) or response_format is None: # pyright: ignore[reportUnnecessaryComparison] + response_format: AudioResponseFormat | Omit, +) -> type[Transcription | TranscriptionVerbose | TranscriptionDiarized | str]: + if isinstance(response_format, Omit) or response_format is None: # pyright: ignore[reportUnnecessaryComparison] return Transcription if response_format == "json": return Transcription elif response_format == "verbose_json": return TranscriptionVerbose + elif response_format == "diarized_json": + return TranscriptionDiarized elif response_format == "srt" or response_format == "text" or response_format == "vtt": return str elif TYPE_CHECKING: # type: ignore[unreachable] diff --git a/src/openai/resources/audio/translations.py b/src/openai/resources/audio/translations.py index 28b577ce2e..310f901fb3 100644 --- a/src/openai/resources/audio/translations.py +++ b/src/openai/resources/audio/translations.py @@ -9,7 +9,7 @@ import httpx from ... import _legacy_response -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes +from ..._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given from ..._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -52,15 +52,15 @@ def create( *, file: FileTypes, model: Union[str, AudioModel], - response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN, - prompt: str | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, + response_format: Union[Literal["json"], Omit] = omit, + prompt: str | Omit = omit, + temperature: float | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Translation: ... @overload @@ -70,14 +70,14 @@ def create( file: FileTypes, model: Union[str, AudioModel], response_format: Literal["verbose_json"], - prompt: str | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, + prompt: str | Omit = omit, + temperature: float | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> TranslationVerbose: ... @overload @@ -87,14 +87,14 @@ def create( file: FileTypes, model: Union[str, AudioModel], response_format: Literal["text", "srt", "vtt"], - prompt: str | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, + prompt: str | Omit = omit, + temperature: float | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> str: ... def create( @@ -102,15 +102,15 @@ def create( *, file: FileTypes, model: Union[str, AudioModel], - prompt: str | NotGiven = NOT_GIVEN, - response_format: Union[Literal["json", "text", "srt", "verbose_json", "vtt"], NotGiven] = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, + prompt: str | Omit = omit, + response_format: Union[Literal["json", "text", "srt", "verbose_json", "vtt"], Omit] = omit, + temperature: float | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Translation | TranslationVerbose | str: """ Translates audio into English. @@ -195,15 +195,15 @@ async def create( *, file: FileTypes, model: Union[str, AudioModel], - response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN, - prompt: str | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, + response_format: Union[Literal["json"], Omit] = omit, + prompt: str | Omit = omit, + temperature: float | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Translation: ... @overload @@ -213,14 +213,14 @@ async def create( file: FileTypes, model: Union[str, AudioModel], response_format: Literal["verbose_json"], - prompt: str | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, + prompt: str | Omit = omit, + temperature: float | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> TranslationVerbose: ... @overload @@ -230,14 +230,14 @@ async def create( file: FileTypes, model: Union[str, AudioModel], response_format: Literal["text", "srt", "vtt"], - prompt: str | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, + prompt: str | Omit = omit, + temperature: float | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> str: ... async def create( @@ -245,15 +245,15 @@ async def create( *, file: FileTypes, model: Union[str, AudioModel], - prompt: str | NotGiven = NOT_GIVEN, - response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, + prompt: str | Omit = omit, + response_format: Union[AudioResponseFormat, Omit] = omit, + temperature: float | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Translation | TranslationVerbose | str: """ Translates audio into English. @@ -349,9 +349,9 @@ def __init__(self, translations: AsyncTranslations) -> None: def _get_response_format_type( - response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven, + response_format: AudioResponseFormat | Omit, ) -> type[Translation | TranslationVerbose | str]: - if isinstance(response_format, NotGiven) or response_format is None: # pyright: ignore[reportUnnecessaryComparison] + if isinstance(response_format, Omit) or response_format is None: # pyright: ignore[reportUnnecessaryComparison] return Translation if response_format == "json": @@ -360,8 +360,8 @@ def _get_response_format_type( return TranslationVerbose elif response_format == "srt" or response_format == "text" or response_format == "vtt": return str - elif TYPE_CHECKING: # type: ignore[unreachable] + elif TYPE_CHECKING and response_format != "diarized_json": # type: ignore[unreachable] assert_never(response_format) else: - log.warn("Unexpected audio response format: %s", response_format) - return Transcription + log.warning("Unexpected audio response format: %s", response_format) + return Translation diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py index 2340bd2e32..afc7fa6eb9 100644 --- a/src/openai/resources/batches.py +++ b/src/openai/resources/batches.py @@ -9,7 +9,7 @@ from .. import _legacy_response from ..types import batch_list_params, batch_create_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from .._utils import maybe_transform, async_maybe_transform from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource @@ -48,14 +48,14 @@ def create( completion_window: Literal["24h"], endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], input_file_id: str, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - output_expires_after: batch_create_params.OutputExpiresAfter | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | Omit = omit, + output_expires_after: batch_create_params.OutputExpiresAfter | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Batch: """ Creates and executes a batch from an uploaded file of requests @@ -124,7 +124,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Batch: """ Retrieves a batch. @@ -151,14 +151,14 @@ def retrieve( def list( self, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[Batch]: """List your organization's batches. @@ -209,7 +209,7 @@ def cancel( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Batch: """Cancels an in-progress batch. @@ -263,14 +263,14 @@ async def create( completion_window: Literal["24h"], endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], input_file_id: str, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - output_expires_after: batch_create_params.OutputExpiresAfter | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | Omit = omit, + output_expires_after: batch_create_params.OutputExpiresAfter | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Batch: """ Creates and executes a batch from an uploaded file of requests @@ -339,7 +339,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Batch: """ Retrieves a batch. @@ -366,14 +366,14 @@ async def retrieve( def list( self, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[Batch, AsyncCursorPage[Batch]]: """List your organization's batches. @@ -424,7 +424,7 @@ async def cancel( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Batch: """Cancels an in-progress batch. diff --git a/src/openai/resources/beta/__init__.py b/src/openai/resources/beta/__init__.py index 87fea25267..6d6f538670 100644 --- a/src/openai/resources/beta/__init__.py +++ b/src/openai/resources/beta/__init__.py @@ -8,6 +8,14 @@ BetaWithStreamingResponse, AsyncBetaWithStreamingResponse, ) +from .chatkit import ( + ChatKit, + AsyncChatKit, + ChatKitWithRawResponse, + AsyncChatKitWithRawResponse, + ChatKitWithStreamingResponse, + AsyncChatKitWithStreamingResponse, +) from .threads import ( Threads, AsyncThreads, @@ -26,6 +34,12 @@ ) __all__ = [ + "ChatKit", + "AsyncChatKit", + "ChatKitWithRawResponse", + "AsyncChatKitWithRawResponse", + "ChatKitWithStreamingResponse", + "AsyncChatKitWithStreamingResponse", "Assistants", "AsyncAssistants", "AssistantsWithRawResponse", diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index fe0c99c88a..a958c0caa1 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -8,7 +8,7 @@ import httpx from ... import _legacy_response -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -55,22 +55,22 @@ def create( self, *, model: Union[str, ChatModel], - description: Optional[str] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - name: Optional[str] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Iterable[AssistantToolParam] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, + description: Optional[str] | Omit = omit, + instructions: Optional[str] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + name: Optional[str] | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_resources: Optional[assistant_create_params.ToolResources] | Omit = omit, + tools: Iterable[AssistantToolParam] | Omit = omit, + top_p: Optional[float] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Assistant: """ Create an assistant with a model and instructions. @@ -102,6 +102,9 @@ def create( effort can result in faster responses and fewer tokens used on reasoning in a response. + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -184,7 +187,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Assistant: """ Retrieves an assistant. @@ -213,9 +216,9 @@ def update( self, assistant_id: str, *, - description: Optional[str] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + description: Optional[str] | Omit = omit, + instructions: Optional[str] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, model: Union[ str, Literal[ @@ -263,20 +266,20 @@ def update( "gpt-3.5-turbo-16k-0613", ], ] - | NotGiven = NOT_GIVEN, - name: Optional[str] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Iterable[AssistantToolParam] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, + | Omit = omit, + name: Optional[str] | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_resources: Optional[assistant_update_params.ToolResources] | Omit = omit, + tools: Iterable[AssistantToolParam] | Omit = omit, + top_p: Optional[float] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Assistant: """Modifies an assistant. @@ -309,6 +312,9 @@ def update( effort can result in faster responses and fewer tokens used on reasoning in a response. + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -387,16 +393,16 @@ def update( def list( self, *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + before: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[Assistant]: """Returns a list of assistants. @@ -458,7 +464,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AssistantDeleted: """ Delete an assistant. @@ -508,22 +514,22 @@ async def create( self, *, model: Union[str, ChatModel], - description: Optional[str] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - name: Optional[str] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Iterable[AssistantToolParam] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, + description: Optional[str] | Omit = omit, + instructions: Optional[str] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + name: Optional[str] | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_resources: Optional[assistant_create_params.ToolResources] | Omit = omit, + tools: Iterable[AssistantToolParam] | Omit = omit, + top_p: Optional[float] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Assistant: """ Create an assistant with a model and instructions. @@ -555,6 +561,9 @@ async def create( effort can result in faster responses and fewer tokens used on reasoning in a response. + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -637,7 +646,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Assistant: """ Retrieves an assistant. @@ -666,9 +675,9 @@ async def update( self, assistant_id: str, *, - description: Optional[str] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + description: Optional[str] | Omit = omit, + instructions: Optional[str] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, model: Union[ str, Literal[ @@ -716,20 +725,20 @@ async def update( "gpt-3.5-turbo-16k-0613", ], ] - | NotGiven = NOT_GIVEN, - name: Optional[str] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Iterable[AssistantToolParam] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, + | Omit = omit, + name: Optional[str] | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_resources: Optional[assistant_update_params.ToolResources] | Omit = omit, + tools: Iterable[AssistantToolParam] | Omit = omit, + top_p: Optional[float] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Assistant: """Modifies an assistant. @@ -762,6 +771,9 @@ async def update( effort can result in faster responses and fewer tokens used on reasoning in a response. + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -840,16 +852,16 @@ async def update( def list( self, *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + before: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[Assistant, AsyncCursorPage[Assistant]]: """Returns a list of assistants. @@ -911,7 +923,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AssistantDeleted: """ Delete an assistant. diff --git a/src/openai/resources/beta/beta.py b/src/openai/resources/beta/beta.py index 9084c477e9..5ee3639db1 100644 --- a/src/openai/resources/beta/beta.py +++ b/src/openai/resources/beta/beta.py @@ -12,6 +12,14 @@ AsyncAssistantsWithStreamingResponse, ) from ..._resource import SyncAPIResource, AsyncAPIResource +from .chatkit.chatkit import ( + ChatKit, + AsyncChatKit, + ChatKitWithRawResponse, + AsyncChatKitWithRawResponse, + ChatKitWithStreamingResponse, + AsyncChatKitWithStreamingResponse, +) from .threads.threads import ( Threads, AsyncThreads, @@ -38,6 +46,10 @@ def chat(self) -> Chat: def realtime(self) -> Realtime: return Realtime(self._client) + @cached_property + def chatkit(self) -> ChatKit: + return ChatKit(self._client) + @cached_property def assistants(self) -> Assistants: return Assistants(self._client) @@ -75,6 +87,10 @@ def chat(self) -> AsyncChat: def realtime(self) -> AsyncRealtime: return AsyncRealtime(self._client) + @cached_property + def chatkit(self) -> AsyncChatKit: + return AsyncChatKit(self._client) + @cached_property def assistants(self) -> AsyncAssistants: return AsyncAssistants(self._client) @@ -107,6 +123,10 @@ class BetaWithRawResponse: def __init__(self, beta: Beta) -> None: self._beta = beta + @cached_property + def chatkit(self) -> ChatKitWithRawResponse: + return ChatKitWithRawResponse(self._beta.chatkit) + @cached_property def assistants(self) -> AssistantsWithRawResponse: return AssistantsWithRawResponse(self._beta.assistants) @@ -120,6 +140,10 @@ class AsyncBetaWithRawResponse: def __init__(self, beta: AsyncBeta) -> None: self._beta = beta + @cached_property + def chatkit(self) -> AsyncChatKitWithRawResponse: + return AsyncChatKitWithRawResponse(self._beta.chatkit) + @cached_property def assistants(self) -> AsyncAssistantsWithRawResponse: return AsyncAssistantsWithRawResponse(self._beta.assistants) @@ -133,6 +157,10 @@ class BetaWithStreamingResponse: def __init__(self, beta: Beta) -> None: self._beta = beta + @cached_property + def chatkit(self) -> ChatKitWithStreamingResponse: + return ChatKitWithStreamingResponse(self._beta.chatkit) + @cached_property def assistants(self) -> AssistantsWithStreamingResponse: return AssistantsWithStreamingResponse(self._beta.assistants) @@ -146,6 +174,10 @@ class AsyncBetaWithStreamingResponse: def __init__(self, beta: AsyncBeta) -> None: self._beta = beta + @cached_property + def chatkit(self) -> AsyncChatKitWithStreamingResponse: + return AsyncChatKitWithStreamingResponse(self._beta.chatkit) + @cached_property def assistants(self) -> AsyncAssistantsWithStreamingResponse: return AsyncAssistantsWithStreamingResponse(self._beta.assistants) diff --git a/src/openai/resources/beta/chatkit/__init__.py b/src/openai/resources/beta/chatkit/__init__.py new file mode 100644 index 0000000000..05f24d6238 --- /dev/null +++ b/src/openai/resources/beta/chatkit/__init__.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .chatkit import ( + ChatKit, + AsyncChatKit, + ChatKitWithRawResponse, + AsyncChatKitWithRawResponse, + ChatKitWithStreamingResponse, + AsyncChatKitWithStreamingResponse, +) +from .threads import ( + Threads, + AsyncThreads, + ThreadsWithRawResponse, + AsyncThreadsWithRawResponse, + ThreadsWithStreamingResponse, + AsyncThreadsWithStreamingResponse, +) +from .sessions import ( + Sessions, + AsyncSessions, + SessionsWithRawResponse, + AsyncSessionsWithRawResponse, + SessionsWithStreamingResponse, + AsyncSessionsWithStreamingResponse, +) + +__all__ = [ + "Sessions", + "AsyncSessions", + "SessionsWithRawResponse", + "AsyncSessionsWithRawResponse", + "SessionsWithStreamingResponse", + "AsyncSessionsWithStreamingResponse", + "Threads", + "AsyncThreads", + "ThreadsWithRawResponse", + "AsyncThreadsWithRawResponse", + "ThreadsWithStreamingResponse", + "AsyncThreadsWithStreamingResponse", + "ChatKit", + "AsyncChatKit", + "ChatKitWithRawResponse", + "AsyncChatKitWithRawResponse", + "ChatKitWithStreamingResponse", + "AsyncChatKitWithStreamingResponse", +] diff --git a/src/openai/resources/beta/chatkit/chatkit.py b/src/openai/resources/beta/chatkit/chatkit.py new file mode 100644 index 0000000000..5a10a39c7b --- /dev/null +++ b/src/openai/resources/beta/chatkit/chatkit.py @@ -0,0 +1,134 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .threads import ( + Threads, + AsyncThreads, + ThreadsWithRawResponse, + AsyncThreadsWithRawResponse, + ThreadsWithStreamingResponse, + AsyncThreadsWithStreamingResponse, +) +from .sessions import ( + Sessions, + AsyncSessions, + SessionsWithRawResponse, + AsyncSessionsWithRawResponse, + SessionsWithStreamingResponse, + AsyncSessionsWithStreamingResponse, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["ChatKit", "AsyncChatKit"] + + +class ChatKit(SyncAPIResource): + @cached_property + def sessions(self) -> Sessions: + return Sessions(self._client) + + @cached_property + def threads(self) -> Threads: + return Threads(self._client) + + @cached_property + def with_raw_response(self) -> ChatKitWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return ChatKitWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ChatKitWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return ChatKitWithStreamingResponse(self) + + +class AsyncChatKit(AsyncAPIResource): + @cached_property + def sessions(self) -> AsyncSessions: + return AsyncSessions(self._client) + + @cached_property + def threads(self) -> AsyncThreads: + return AsyncThreads(self._client) + + @cached_property + def with_raw_response(self) -> AsyncChatKitWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncChatKitWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncChatKitWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncChatKitWithStreamingResponse(self) + + +class ChatKitWithRawResponse: + def __init__(self, chatkit: ChatKit) -> None: + self._chatkit = chatkit + + @cached_property + def sessions(self) -> SessionsWithRawResponse: + return SessionsWithRawResponse(self._chatkit.sessions) + + @cached_property + def threads(self) -> ThreadsWithRawResponse: + return ThreadsWithRawResponse(self._chatkit.threads) + + +class AsyncChatKitWithRawResponse: + def __init__(self, chatkit: AsyncChatKit) -> None: + self._chatkit = chatkit + + @cached_property + def sessions(self) -> AsyncSessionsWithRawResponse: + return AsyncSessionsWithRawResponse(self._chatkit.sessions) + + @cached_property + def threads(self) -> AsyncThreadsWithRawResponse: + return AsyncThreadsWithRawResponse(self._chatkit.threads) + + +class ChatKitWithStreamingResponse: + def __init__(self, chatkit: ChatKit) -> None: + self._chatkit = chatkit + + @cached_property + def sessions(self) -> SessionsWithStreamingResponse: + return SessionsWithStreamingResponse(self._chatkit.sessions) + + @cached_property + def threads(self) -> ThreadsWithStreamingResponse: + return ThreadsWithStreamingResponse(self._chatkit.threads) + + +class AsyncChatKitWithStreamingResponse: + def __init__(self, chatkit: AsyncChatKit) -> None: + self._chatkit = chatkit + + @cached_property + def sessions(self) -> AsyncSessionsWithStreamingResponse: + return AsyncSessionsWithStreamingResponse(self._chatkit.sessions) + + @cached_property + def threads(self) -> AsyncThreadsWithStreamingResponse: + return AsyncThreadsWithStreamingResponse(self._chatkit.threads) diff --git a/src/openai/resources/beta/chatkit/sessions.py b/src/openai/resources/beta/chatkit/sessions.py new file mode 100644 index 0000000000..a814f1058e --- /dev/null +++ b/src/openai/resources/beta/chatkit/sessions.py @@ -0,0 +1,301 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from .... import _legacy_response +from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ...._base_client import make_request_options +from ....types.beta.chatkit import ( + ChatSessionWorkflowParam, + ChatSessionRateLimitsParam, + ChatSessionExpiresAfterParam, + ChatSessionChatKitConfigurationParam, + session_create_params, +) +from ....types.beta.chatkit.chat_session import ChatSession +from ....types.beta.chatkit.chat_session_workflow_param import ChatSessionWorkflowParam +from ....types.beta.chatkit.chat_session_rate_limits_param import ChatSessionRateLimitsParam +from ....types.beta.chatkit.chat_session_expires_after_param import ChatSessionExpiresAfterParam +from ....types.beta.chatkit.chat_session_chatkit_configuration_param import ChatSessionChatKitConfigurationParam + +__all__ = ["Sessions", "AsyncSessions"] + + +class Sessions(SyncAPIResource): + @cached_property + def with_raw_response(self) -> SessionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return SessionsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> SessionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return SessionsWithStreamingResponse(self) + + def create( + self, + *, + user: str, + workflow: ChatSessionWorkflowParam, + chatkit_configuration: ChatSessionChatKitConfigurationParam | Omit = omit, + expires_after: ChatSessionExpiresAfterParam | Omit = omit, + rate_limits: ChatSessionRateLimitsParam | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> ChatSession: + """ + Create a ChatKit session + + Args: + user: A free-form string that identifies your end user; ensures this Session can + access other objects that have the same `user` scope. + + workflow: Workflow that powers the session. + + chatkit_configuration: Optional overrides for ChatKit runtime configuration features + + expires_after: Optional override for session expiration timing in seconds from creation. + Defaults to 10 minutes. + + rate_limits: Optional override for per-minute request limits. When omitted, defaults to 10. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})} + return self._post( + "/chatkit/sessions", + body=maybe_transform( + { + "user": user, + "workflow": workflow, + "chatkit_configuration": chatkit_configuration, + "expires_after": expires_after, + "rate_limits": rate_limits, + }, + session_create_params.SessionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChatSession, + ) + + def cancel( + self, + session_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> ChatSession: + """ + Cancel a ChatKit session + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not session_id: + raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}") + extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})} + return self._post( + f"/chatkit/sessions/{session_id}/cancel", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChatSession, + ) + + +class AsyncSessions(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncSessionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncSessionsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncSessionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncSessionsWithStreamingResponse(self) + + async def create( + self, + *, + user: str, + workflow: ChatSessionWorkflowParam, + chatkit_configuration: ChatSessionChatKitConfigurationParam | Omit = omit, + expires_after: ChatSessionExpiresAfterParam | Omit = omit, + rate_limits: ChatSessionRateLimitsParam | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> ChatSession: + """ + Create a ChatKit session + + Args: + user: A free-form string that identifies your end user; ensures this Session can + access other objects that have the same `user` scope. + + workflow: Workflow that powers the session. + + chatkit_configuration: Optional overrides for ChatKit runtime configuration features + + expires_after: Optional override for session expiration timing in seconds from creation. + Defaults to 10 minutes. + + rate_limits: Optional override for per-minute request limits. When omitted, defaults to 10. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})} + return await self._post( + "/chatkit/sessions", + body=await async_maybe_transform( + { + "user": user, + "workflow": workflow, + "chatkit_configuration": chatkit_configuration, + "expires_after": expires_after, + "rate_limits": rate_limits, + }, + session_create_params.SessionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChatSession, + ) + + async def cancel( + self, + session_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> ChatSession: + """ + Cancel a ChatKit session + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not session_id: + raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}") + extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})} + return await self._post( + f"/chatkit/sessions/{session_id}/cancel", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChatSession, + ) + + +class SessionsWithRawResponse: + def __init__(self, sessions: Sessions) -> None: + self._sessions = sessions + + self.create = _legacy_response.to_raw_response_wrapper( + sessions.create, + ) + self.cancel = _legacy_response.to_raw_response_wrapper( + sessions.cancel, + ) + + +class AsyncSessionsWithRawResponse: + def __init__(self, sessions: AsyncSessions) -> None: + self._sessions = sessions + + self.create = _legacy_response.async_to_raw_response_wrapper( + sessions.create, + ) + self.cancel = _legacy_response.async_to_raw_response_wrapper( + sessions.cancel, + ) + + +class SessionsWithStreamingResponse: + def __init__(self, sessions: Sessions) -> None: + self._sessions = sessions + + self.create = to_streamed_response_wrapper( + sessions.create, + ) + self.cancel = to_streamed_response_wrapper( + sessions.cancel, + ) + + +class AsyncSessionsWithStreamingResponse: + def __init__(self, sessions: AsyncSessions) -> None: + self._sessions = sessions + + self.create = async_to_streamed_response_wrapper( + sessions.create, + ) + self.cancel = async_to_streamed_response_wrapper( + sessions.cancel, + ) diff --git a/src/openai/resources/beta/chatkit/threads.py b/src/openai/resources/beta/chatkit/threads.py new file mode 100644 index 0000000000..37cd57295a --- /dev/null +++ b/src/openai/resources/beta/chatkit/threads.py @@ -0,0 +1,521 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Any, cast +from typing_extensions import Literal + +import httpx + +from .... import _legacy_response +from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given +from ...._utils import maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ....pagination import SyncConversationCursorPage, AsyncConversationCursorPage +from ...._base_client import AsyncPaginator, make_request_options +from ....types.beta.chatkit import thread_list_params, thread_list_items_params +from ....types.beta.chatkit.chatkit_thread import ChatKitThread +from ....types.beta.chatkit.thread_delete_response import ThreadDeleteResponse +from ....types.beta.chatkit.chatkit_thread_item_list import Data + +__all__ = ["Threads", "AsyncThreads"] + + +class Threads(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ThreadsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return ThreadsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ThreadsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return ThreadsWithStreamingResponse(self) + + def retrieve( + self, + thread_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> ChatKitThread: + """ + Retrieve a ChatKit thread + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})} + return self._get( + f"/chatkit/threads/{thread_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChatKitThread, + ) + + def list( + self, + *, + after: str | Omit = omit, + before: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, + user: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> SyncConversationCursorPage[ChatKitThread]: + """ + List ChatKit threads + + Args: + after: List items created after this thread item ID. Defaults to null for the first + page. + + before: List items created before this thread item ID. Defaults to null for the newest + results. + + limit: Maximum number of thread items to return. Defaults to 20. + + order: Sort order for results by creation time. Defaults to `desc`. + + user: Filter threads that belong to this user identifier. Defaults to null to return + all users. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})} + return self._get_api_list( + "/chatkit/threads", + page=SyncConversationCursorPage[ChatKitThread], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + "user": user, + }, + thread_list_params.ThreadListParams, + ), + ), + model=ChatKitThread, + ) + + def delete( + self, + thread_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> ThreadDeleteResponse: + """ + Delete a ChatKit thread + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})} + return self._delete( + f"/chatkit/threads/{thread_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ThreadDeleteResponse, + ) + + def list_items( + self, + thread_id: str, + *, + after: str | Omit = omit, + before: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> SyncConversationCursorPage[Data]: + """ + List ChatKit thread items + + Args: + after: List items created after this thread item ID. Defaults to null for the first + page. + + before: List items created before this thread item ID. Defaults to null for the newest + results. + + limit: Maximum number of thread items to return. Defaults to 20. + + order: Sort order for results by creation time. Defaults to `desc`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})} + return self._get_api_list( + f"/chatkit/threads/{thread_id}/items", + page=SyncConversationCursorPage[Data], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + thread_list_items_params.ThreadListItemsParams, + ), + ), + model=cast(Any, Data), # Union types cannot be passed in as arguments in the type system + ) + + +class AsyncThreads(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncThreadsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncThreadsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncThreadsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncThreadsWithStreamingResponse(self) + + async def retrieve( + self, + thread_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> ChatKitThread: + """ + Retrieve a ChatKit thread + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})} + return await self._get( + f"/chatkit/threads/{thread_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChatKitThread, + ) + + def list( + self, + *, + after: str | Omit = omit, + before: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, + user: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> AsyncPaginator[ChatKitThread, AsyncConversationCursorPage[ChatKitThread]]: + """ + List ChatKit threads + + Args: + after: List items created after this thread item ID. Defaults to null for the first + page. + + before: List items created before this thread item ID. Defaults to null for the newest + results. + + limit: Maximum number of thread items to return. Defaults to 20. + + order: Sort order for results by creation time. Defaults to `desc`. + + user: Filter threads that belong to this user identifier. Defaults to null to return + all users. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})} + return self._get_api_list( + "/chatkit/threads", + page=AsyncConversationCursorPage[ChatKitThread], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + "user": user, + }, + thread_list_params.ThreadListParams, + ), + ), + model=ChatKitThread, + ) + + async def delete( + self, + thread_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> ThreadDeleteResponse: + """ + Delete a ChatKit thread + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})} + return await self._delete( + f"/chatkit/threads/{thread_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ThreadDeleteResponse, + ) + + def list_items( + self, + thread_id: str, + *, + after: str | Omit = omit, + before: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> AsyncPaginator[Data, AsyncConversationCursorPage[Data]]: + """ + List ChatKit thread items + + Args: + after: List items created after this thread item ID. Defaults to null for the first + page. + + before: List items created before this thread item ID. Defaults to null for the newest + results. + + limit: Maximum number of thread items to return. Defaults to 20. + + order: Sort order for results by creation time. Defaults to `desc`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})} + return self._get_api_list( + f"/chatkit/threads/{thread_id}/items", + page=AsyncConversationCursorPage[Data], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + thread_list_items_params.ThreadListItemsParams, + ), + ), + model=cast(Any, Data), # Union types cannot be passed in as arguments in the type system + ) + + +class ThreadsWithRawResponse: + def __init__(self, threads: Threads) -> None: + self._threads = threads + + self.retrieve = _legacy_response.to_raw_response_wrapper( + threads.retrieve, + ) + self.list = _legacy_response.to_raw_response_wrapper( + threads.list, + ) + self.delete = _legacy_response.to_raw_response_wrapper( + threads.delete, + ) + self.list_items = _legacy_response.to_raw_response_wrapper( + threads.list_items, + ) + + +class AsyncThreadsWithRawResponse: + def __init__(self, threads: AsyncThreads) -> None: + self._threads = threads + + self.retrieve = _legacy_response.async_to_raw_response_wrapper( + threads.retrieve, + ) + self.list = _legacy_response.async_to_raw_response_wrapper( + threads.list, + ) + self.delete = _legacy_response.async_to_raw_response_wrapper( + threads.delete, + ) + self.list_items = _legacy_response.async_to_raw_response_wrapper( + threads.list_items, + ) + + +class ThreadsWithStreamingResponse: + def __init__(self, threads: Threads) -> None: + self._threads = threads + + self.retrieve = to_streamed_response_wrapper( + threads.retrieve, + ) + self.list = to_streamed_response_wrapper( + threads.list, + ) + self.delete = to_streamed_response_wrapper( + threads.delete, + ) + self.list_items = to_streamed_response_wrapper( + threads.list_items, + ) + + +class AsyncThreadsWithStreamingResponse: + def __init__(self, threads: AsyncThreads) -> None: + self._threads = threads + + self.retrieve = async_to_streamed_response_wrapper( + threads.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + threads.list, + ) + self.delete = async_to_streamed_response_wrapper( + threads.delete, + ) + self.list_items = async_to_streamed_response_wrapper( + threads.list_items, + ) diff --git a/src/openai/resources/beta/realtime/sessions.py b/src/openai/resources/beta/realtime/sessions.py index eaddb384ce..9b85e02d17 100644 --- a/src/openai/resources/beta/realtime/sessions.py +++ b/src/openai/resources/beta/realtime/sessions.py @@ -51,6 +51,8 @@ def create( max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, model: Literal[ + "gpt-realtime", + "gpt-realtime-2025-08-28", "gpt-4o-realtime-preview", "gpt-4o-realtime-preview-2024-10-01", "gpt-4o-realtime-preview-2024-12-17", @@ -233,6 +235,8 @@ async def create( max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, model: Literal[ + "gpt-realtime", + "gpt-realtime-2025-08-28", "gpt-4o-realtime-preview", "gpt-4o-realtime-preview-2024-10-01", "gpt-4o-realtime-preview-2024-12-17", diff --git a/src/openai/resources/beta/threads/messages.py b/src/openai/resources/beta/threads/messages.py index 8903ff0316..d94ecca9a2 100644 --- a/src/openai/resources/beta/threads/messages.py +++ b/src/openai/resources/beta/threads/messages.py @@ -9,7 +9,7 @@ import httpx from .... import _legacy_response -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -55,14 +55,14 @@ def create( *, content: Union[str, Iterable[MessageContentPartParam]], role: Literal["user", "assistant"], - attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + attachments: Optional[Iterable[message_create_params.Attachment]] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Message: """ Create a message. @@ -126,7 +126,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Message: """ Retrieve a message. @@ -159,13 +159,13 @@ def update( message_id: str, *, thread_id: str, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Message: """ Modifies a message. @@ -205,17 +205,17 @@ def list( self, thread_id: str, *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - run_id: str | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + before: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, + run_id: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[Message]: """ Returns a list of messages for a given thread. @@ -283,7 +283,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> MessageDeleted: """ Deletes a message. @@ -338,14 +338,14 @@ async def create( *, content: Union[str, Iterable[MessageContentPartParam]], role: Literal["user", "assistant"], - attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + attachments: Optional[Iterable[message_create_params.Attachment]] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Message: """ Create a message. @@ -409,7 +409,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Message: """ Retrieve a message. @@ -442,13 +442,13 @@ async def update( message_id: str, *, thread_id: str, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Message: """ Modifies a message. @@ -488,17 +488,17 @@ def list( self, thread_id: str, *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - run_id: str | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + before: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, + run_id: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[Message, AsyncCursorPage[Message]]: """ Returns a list of messages for a given thread. @@ -566,7 +566,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> MessageDeleted: """ Deletes a message. diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index e97d519a40..2753f5817e 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -18,7 +18,7 @@ StepsWithStreamingResponse, AsyncStepsWithStreamingResponse, ) -from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._types import NOT_GIVEN, Body, Omit, Query, Headers, NotGiven, omit, not_given from ....._utils import ( is_given, required_args, @@ -89,29 +89,29 @@ def create( thread_id: str, *, assistant_id: str, - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | Omit = omit, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run: """ Create a run. @@ -173,6 +173,9 @@ def create( effort can result in faster responses and fewer tokens used on reasoning in a response. + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -240,28 +243,28 @@ def create( *, assistant_id: str, stream: Literal[True], - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | Omit = omit, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Stream[AssistantStreamEvent]: """ Create a run. @@ -327,6 +330,9 @@ def create( effort can result in faster responses and fewer tokens used on reasoning in a response. + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -390,28 +396,28 @@ def create( *, assistant_id: str, stream: bool, - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | Omit = omit, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run | Stream[AssistantStreamEvent]: """ Create a run. @@ -477,6 +483,9 @@ def create( effort can result in faster responses and fewer tokens used on reasoning in a response. + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -539,29 +548,29 @@ def create( thread_id: str, *, assistant_id: str, - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | Omit = omit, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run | Stream[AssistantStreamEvent]: if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") @@ -613,7 +622,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run: """ Retrieves a run. @@ -646,13 +655,13 @@ def update( run_id: str, *, thread_id: str, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run: """ Modifies a run. @@ -692,16 +701,16 @@ def list( self, thread_id: str, *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + before: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[Run]: """ Returns a list of runs belonging to a thread. @@ -766,7 +775,7 @@ def cancel( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run: """ Cancels a run that is `in_progress`. @@ -798,23 +807,23 @@ def create_and_poll( self, *, assistant_id: str, - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - poll_interval_ms: int | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | Omit = omit, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, + poll_interval_ms: int | Omit = omit, thread_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -870,21 +879,21 @@ def create_and_stream( self, *, assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, thread_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -902,21 +911,21 @@ def create_and_stream( self, *, assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, thread_id: str, event_handler: AssistantEventHandlerT, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -934,21 +943,21 @@ def create_and_stream( self, *, assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, thread_id: str, event_handler: AssistantEventHandlerT | None = None, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -1010,8 +1019,8 @@ def poll( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - poll_interval_ms: int | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + poll_interval_ms: int | Omit = omit, ) -> Run: """ A helper to poll a run status until it reaches a terminal state. More @@ -1054,22 +1063,22 @@ def stream( self, *, assistant_id: str, - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | Omit = omit, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, thread_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -1087,22 +1096,22 @@ def stream( self, *, assistant_id: str, - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | Omit = omit, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, thread_id: str, event_handler: AssistantEventHandlerT, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -1120,22 +1129,22 @@ def stream( self, *, assistant_id: str, - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | Omit = omit, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, thread_id: str, event_handler: AssistantEventHandlerT | None = None, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -1201,13 +1210,13 @@ def submit_tool_outputs( *, thread_id: str, tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run: """ When a run has the `status: "requires_action"` and `required_action.type` is @@ -1246,7 +1255,7 @@ def submit_tool_outputs( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Stream[AssistantStreamEvent]: """ When a run has the `status: "requires_action"` and `required_action.type` is @@ -1285,7 +1294,7 @@ def submit_tool_outputs( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run | Stream[AssistantStreamEvent]: """ When a run has the `status: "requires_action"` and `required_action.type` is @@ -1319,13 +1328,13 @@ def submit_tool_outputs( *, thread_id: str, tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run | Stream[AssistantStreamEvent]: if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") @@ -1358,7 +1367,7 @@ def submit_tool_outputs_and_poll( tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], run_id: str, thread_id: str, - poll_interval_ms: int | NotGiven = NOT_GIVEN, + poll_interval_ms: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1519,29 +1528,29 @@ async def create( thread_id: str, *, assistant_id: str, - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | Omit = omit, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run: """ Create a run. @@ -1603,6 +1612,9 @@ async def create( effort can result in faster responses and fewer tokens used on reasoning in a response. + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -1670,28 +1682,28 @@ async def create( *, assistant_id: str, stream: Literal[True], - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | Omit = omit, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncStream[AssistantStreamEvent]: """ Create a run. @@ -1757,6 +1769,9 @@ async def create( effort can result in faster responses and fewer tokens used on reasoning in a response. + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -1820,28 +1835,28 @@ async def create( *, assistant_id: str, stream: bool, - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | Omit = omit, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run | AsyncStream[AssistantStreamEvent]: """ Create a run. @@ -1907,6 +1922,9 @@ async def create( effort can result in faster responses and fewer tokens used on reasoning in a response. + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), @@ -1970,29 +1988,29 @@ async def create( thread_id: str, *, assistant_id: str, - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | Omit = omit, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run | AsyncStream[AssistantStreamEvent]: if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") @@ -2044,7 +2062,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run: """ Retrieves a run. @@ -2077,13 +2095,13 @@ async def update( run_id: str, *, thread_id: str, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run: """ Modifies a run. @@ -2123,16 +2141,16 @@ def list( self, thread_id: str, *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + before: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[Run, AsyncCursorPage[Run]]: """ Returns a list of runs belonging to a thread. @@ -2197,7 +2215,7 @@ async def cancel( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run: """ Cancels a run that is `in_progress`. @@ -2229,23 +2247,23 @@ async def create_and_poll( self, *, assistant_id: str, - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - poll_interval_ms: int | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | Omit = omit, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, + poll_interval_ms: int | Omit = omit, thread_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -2301,20 +2319,20 @@ def create_and_stream( self, *, assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, thread_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -2332,20 +2350,20 @@ def create_and_stream( self, *, assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, thread_id: str, event_handler: AsyncAssistantEventHandlerT, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -2363,20 +2381,20 @@ def create_and_stream( self, *, assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, thread_id: str, event_handler: AsyncAssistantEventHandlerT | None = None, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -2439,8 +2457,8 @@ async def poll( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - poll_interval_ms: int | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + poll_interval_ms: int | Omit = omit, ) -> Run: """ A helper to poll a run status until it reaches a terminal state. More @@ -2483,21 +2501,21 @@ def stream( self, *, assistant_id: str, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, thread_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -2515,22 +2533,22 @@ def stream( self, *, assistant_id: str, - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | Omit = omit, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, thread_id: str, event_handler: AsyncAssistantEventHandlerT, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -2548,22 +2566,22 @@ def stream( self, *, assistant_id: str, - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | Omit = omit, + additional_instructions: Optional[str] | Omit = omit, + additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit, thread_id: str, event_handler: AsyncAssistantEventHandlerT | None = None, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -2631,13 +2649,13 @@ async def submit_tool_outputs( *, thread_id: str, tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run: """ When a run has the `status: "requires_action"` and `required_action.type` is @@ -2676,7 +2694,7 @@ async def submit_tool_outputs( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncStream[AssistantStreamEvent]: """ When a run has the `status: "requires_action"` and `required_action.type` is @@ -2715,7 +2733,7 @@ async def submit_tool_outputs( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run | AsyncStream[AssistantStreamEvent]: """ When a run has the `status: "requires_action"` and `required_action.type` is @@ -2749,13 +2767,13 @@ async def submit_tool_outputs( *, thread_id: str, tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run | AsyncStream[AssistantStreamEvent]: if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") @@ -2788,7 +2806,7 @@ async def submit_tool_outputs_and_poll( tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], run_id: str, thread_id: str, - poll_interval_ms: int | NotGiven = NOT_GIVEN, + poll_interval_ms: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/src/openai/resources/beta/threads/runs/steps.py b/src/openai/resources/beta/threads/runs/steps.py index 8e34210bd7..254a94435c 100644 --- a/src/openai/resources/beta/threads/runs/steps.py +++ b/src/openai/resources/beta/threads/runs/steps.py @@ -9,7 +9,7 @@ import httpx from ..... import _legacy_response -from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ....._utils import maybe_transform, async_maybe_transform from ....._compat import cached_property from ....._resource import SyncAPIResource, AsyncAPIResource @@ -50,13 +50,13 @@ def retrieve( *, thread_id: str, run_id: str, - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RunStep: """ Retrieves a run step. @@ -103,17 +103,17 @@ def list( run_id: str, *, thread_id: str, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + before: str | Omit = omit, + include: List[RunStepInclude] | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[RunStep]: """ Returns a list of run steps belonging to a run. @@ -206,13 +206,13 @@ async def retrieve( *, thread_id: str, run_id: str, - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RunStep: """ Retrieves a run step. @@ -259,17 +259,17 @@ def list( run_id: str, *, thread_id: str, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - include: List[RunStepInclude] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + before: str | Omit = omit, + include: List[RunStepInclude] | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[RunStep, AsyncCursorPage[RunStep]]: """ Returns a list of run steps belonging to a run. diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index 7121851cab..681d3c2933 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -18,7 +18,7 @@ MessagesWithStreamingResponse, AsyncMessagesWithStreamingResponse, ) -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import NOT_GIVEN, Body, Omit, Query, Headers, NotGiven, omit, not_given from ...._utils import required_args, maybe_transform, async_maybe_transform from .runs.runs import ( Runs, @@ -91,15 +91,15 @@ def with_streaming_response(self) -> ThreadsWithStreamingResponse: def create( self, *, - messages: Iterable[thread_create_params.Message] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN, + messages: Iterable[thread_create_params.Message] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + tool_resources: Optional[thread_create_params.ToolResources] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Thread: """ Create a thread. @@ -155,7 +155,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Thread: """ Retrieves a thread. @@ -185,14 +185,14 @@ def update( self, thread_id: str, *, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | Omit = omit, + tool_resources: Optional[thread_update_params.ToolResources] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Thread: """ Modifies a thread. @@ -246,7 +246,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ThreadDeleted: """ Delete a thread. @@ -277,27 +277,27 @@ def create_and_run( self, *, assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + temperature: Optional[float] | Omit = omit, + thread: thread_create_and_run_params.Thread | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run: """ Create a thread and run it in one request. @@ -412,26 +412,26 @@ def create_and_run( *, assistant_id: str, stream: Literal[True], - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + thread: thread_create_and_run_params.Thread | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Stream[AssistantStreamEvent]: """ Create a thread and run it in one request. @@ -546,26 +546,26 @@ def create_and_run( *, assistant_id: str, stream: bool, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + thread: thread_create_and_run_params.Thread | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run | Stream[AssistantStreamEvent]: """ Create a thread and run it in one request. @@ -680,27 +680,27 @@ def create_and_run( self, *, assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + temperature: Optional[float] | Omit = omit, + thread: thread_create_and_run_params.Thread | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run | Stream[AssistantStreamEvent]: extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} return self._post( @@ -740,21 +740,21 @@ def create_and_run_poll( self, *, assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - poll_interval_ms: int | NotGiven = NOT_GIVEN, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + thread: thread_create_and_run_params.Thread | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, + poll_interval_ms: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -796,20 +796,20 @@ def create_and_run_stream( self, *, assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + thread: thread_create_and_run_params.Thread | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -825,20 +825,20 @@ def create_and_run_stream( self, *, assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + thread: thread_create_and_run_params.Thread | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, event_handler: AssistantEventHandlerT, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -854,20 +854,20 @@ def create_and_run_stream( self, *, assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + thread: thread_create_and_run_params.Thread | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, event_handler: AssistantEventHandlerT | None = None, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -949,15 +949,15 @@ def with_streaming_response(self) -> AsyncThreadsWithStreamingResponse: async def create( self, *, - messages: Iterable[thread_create_params.Message] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN, + messages: Iterable[thread_create_params.Message] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + tool_resources: Optional[thread_create_params.ToolResources] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Thread: """ Create a thread. @@ -1013,7 +1013,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Thread: """ Retrieves a thread. @@ -1043,14 +1043,14 @@ async def update( self, thread_id: str, *, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | Omit = omit, + tool_resources: Optional[thread_update_params.ToolResources] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Thread: """ Modifies a thread. @@ -1104,7 +1104,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ThreadDeleted: """ Delete a thread. @@ -1135,27 +1135,27 @@ async def create_and_run( self, *, assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + temperature: Optional[float] | Omit = omit, + thread: thread_create_and_run_params.Thread | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run: """ Create a thread and run it in one request. @@ -1270,26 +1270,26 @@ async def create_and_run( *, assistant_id: str, stream: Literal[True], - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + thread: thread_create_and_run_params.Thread | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncStream[AssistantStreamEvent]: """ Create a thread and run it in one request. @@ -1404,26 +1404,26 @@ async def create_and_run( *, assistant_id: str, stream: bool, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + thread: thread_create_and_run_params.Thread | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run | AsyncStream[AssistantStreamEvent]: """ Create a thread and run it in one request. @@ -1538,27 +1538,27 @@ async def create_and_run( self, *, assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + temperature: Optional[float] | Omit = omit, + thread: thread_create_and_run_params.Thread | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run | AsyncStream[AssistantStreamEvent]: extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} return await self._post( @@ -1598,21 +1598,21 @@ async def create_and_run_poll( self, *, assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, - poll_interval_ms: int | NotGiven = NOT_GIVEN, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + thread: thread_create_and_run_params.Thread | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, + poll_interval_ms: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1656,20 +1656,20 @@ def create_and_run_stream( self, *, assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + thread: thread_create_and_run_params.Thread | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1685,20 +1685,20 @@ def create_and_run_stream( self, *, assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + thread: thread_create_and_run_params.Thread | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, event_handler: AsyncAssistantEventHandlerT, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -1714,20 +1714,20 @@ def create_and_run_stream( self, *, assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_prompt_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: Union[str, ChatModel, None] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + thread: thread_create_and_run_params.Thread | Omit = omit, + tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, + tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, + tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, event_handler: AsyncAssistantEventHandlerT | None = None, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py index 168cf04dbc..4b73c69ae9 100644 --- a/src/openai/resources/chat/completions/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -19,7 +19,7 @@ MessagesWithStreamingResponse, AsyncMessagesWithStreamingResponse, ) -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr +from ...._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given from ...._utils import required_args, maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -86,43 +86,43 @@ def parse( *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], - audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, - response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, - web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, + audio: Optional[ChatCompletionAudioParam] | Omit = omit, + response_format: type[ResponseFormatT] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + function_call: completion_create_params.FunctionCall | Omit = omit, + functions: Iterable[completion_create_params.Function] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit, + n: Optional[int] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + safety_identifier: str | Omit = omit, + seed: Optional[int] | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit, + tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, + verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit, + web_search_options: completion_create_params.WebSearchOptions | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ParsedChatCompletion[ResponseFormatT]: """Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class. @@ -240,44 +240,44 @@ def create( *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], - audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, - web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, + audio: Optional[ChatCompletionAudioParam] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + function_call: completion_create_params.FunctionCall | Omit = omit, + functions: Iterable[completion_create_params.Function] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit, + n: Optional[int] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: completion_create_params.ResponseFormat | Omit = omit, + safety_identifier: str | Omit = omit, + seed: Optional[int] | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit, + tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, + verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit, + web_search_options: completion_create_params.WebSearchOptions | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ChatCompletion: """ **Starting a new project?** We recommend trying @@ -407,6 +407,9 @@ def create( effort can result in faster responses and fewer tokens used on reasoning in a response. + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + response_format: An object specifying the format that the model must output. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -529,43 +532,43 @@ def create( messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], stream: Literal[True], - audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, - web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, + audio: Optional[ChatCompletionAudioParam] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + function_call: completion_create_params.FunctionCall | Omit = omit, + functions: Iterable[completion_create_params.Function] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit, + n: Optional[int] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: completion_create_params.ResponseFormat | Omit = omit, + safety_identifier: str | Omit = omit, + seed: Optional[int] | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit, + tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, + verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit, + web_search_options: completion_create_params.WebSearchOptions | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Stream[ChatCompletionChunk]: """ **Starting a new project?** We recommend trying @@ -704,6 +707,9 @@ def create( effort can result in faster responses and fewer tokens used on reasoning in a response. + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + response_format: An object specifying the format that the model must output. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -817,43 +823,43 @@ def create( messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], stream: bool, - audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, - web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, + audio: Optional[ChatCompletionAudioParam] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + function_call: completion_create_params.FunctionCall | Omit = omit, + functions: Iterable[completion_create_params.Function] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit, + n: Optional[int] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: completion_create_params.ResponseFormat | Omit = omit, + safety_identifier: str | Omit = omit, + seed: Optional[int] | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit, + tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, + verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit, + web_search_options: completion_create_params.WebSearchOptions | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ChatCompletion | Stream[ChatCompletionChunk]: """ **Starting a new project?** We recommend trying @@ -992,6 +998,9 @@ def create( effort can result in faster responses and fewer tokens used on reasoning in a response. + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + response_format: An object specifying the format that the model must output. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1104,44 +1113,44 @@ def create( *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], - audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, - web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, + audio: Optional[ChatCompletionAudioParam] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + function_call: completion_create_params.FunctionCall | Omit = omit, + functions: Iterable[completion_create_params.Function] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit, + n: Optional[int] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: completion_create_params.ResponseFormat | Omit = omit, + safety_identifier: str | Omit = omit, + seed: Optional[int] | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit, + tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, + verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit, + web_search_options: completion_create_params.WebSearchOptions | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ChatCompletion | Stream[ChatCompletionChunk]: validate_response_format(response_format) return self._post( @@ -1204,7 +1213,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ChatCompletion: """Get a stored chat completion. @@ -1240,7 +1249,7 @@ def update( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ChatCompletion: """Modify a stored chat completion. @@ -1278,17 +1287,17 @@ def update( def list( self, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: str | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: str | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[ChatCompletion]: """List stored Chat Completions. @@ -1300,10 +1309,12 @@ def list( limit: Number of Chat Completions to retrieve. - metadata: - A list of metadata keys to filter the Chat Completions by. Example: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. - `metadata[key1]=value1&metadata[key2]=value2` + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The model used to generate the Chat Completions. @@ -1349,7 +1360,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ChatCompletionDeleted: """Delete a stored chat completion. @@ -1380,43 +1391,43 @@ def stream( *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], - audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, - response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, - web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, + audio: Optional[ChatCompletionAudioParam] | Omit = omit, + response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + function_call: completion_create_params.FunctionCall | Omit = omit, + functions: Iterable[completion_create_params.Function] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit, + n: Optional[int] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + safety_identifier: str | Omit = omit, + seed: Optional[int] | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit, + tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, + verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit, + web_search_options: completion_create_params.WebSearchOptions | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ChatCompletionStreamManager[ResponseFormatT]: """Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API and automatic accumulation of each delta. @@ -1522,43 +1533,43 @@ async def parse( *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], - audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, - response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, - web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, + audio: Optional[ChatCompletionAudioParam] | Omit = omit, + response_format: type[ResponseFormatT] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + function_call: completion_create_params.FunctionCall | Omit = omit, + functions: Iterable[completion_create_params.Function] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit, + n: Optional[int] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + safety_identifier: str | Omit = omit, + seed: Optional[int] | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit, + tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, + verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit, + web_search_options: completion_create_params.WebSearchOptions | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ParsedChatCompletion[ResponseFormatT]: """Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class. @@ -1676,44 +1687,44 @@ async def create( *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], - audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, - web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, + audio: Optional[ChatCompletionAudioParam] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + function_call: completion_create_params.FunctionCall | Omit = omit, + functions: Iterable[completion_create_params.Function] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit, + n: Optional[int] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: completion_create_params.ResponseFormat | Omit = omit, + safety_identifier: str | Omit = omit, + seed: Optional[int] | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit, + tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, + verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit, + web_search_options: completion_create_params.WebSearchOptions | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ChatCompletion: """ **Starting a new project?** We recommend trying @@ -1843,6 +1854,9 @@ async def create( effort can result in faster responses and fewer tokens used on reasoning in a response. + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + response_format: An object specifying the format that the model must output. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1965,43 +1979,43 @@ async def create( messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], stream: Literal[True], - audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, - web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, + audio: Optional[ChatCompletionAudioParam] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + function_call: completion_create_params.FunctionCall | Omit = omit, + functions: Iterable[completion_create_params.Function] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit, + n: Optional[int] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: completion_create_params.ResponseFormat | Omit = omit, + safety_identifier: str | Omit = omit, + seed: Optional[int] | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit, + tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, + verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit, + web_search_options: completion_create_params.WebSearchOptions | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncStream[ChatCompletionChunk]: """ **Starting a new project?** We recommend trying @@ -2140,6 +2154,9 @@ async def create( effort can result in faster responses and fewer tokens used on reasoning in a response. + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + response_format: An object specifying the format that the model must output. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -2253,43 +2270,43 @@ async def create( messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], stream: bool, - audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, - web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, + audio: Optional[ChatCompletionAudioParam] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + function_call: completion_create_params.FunctionCall | Omit = omit, + functions: Iterable[completion_create_params.Function] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit, + n: Optional[int] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: completion_create_params.ResponseFormat | Omit = omit, + safety_identifier: str | Omit = omit, + seed: Optional[int] | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit, + tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, + verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit, + web_search_options: completion_create_params.WebSearchOptions | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ChatCompletion | AsyncStream[ChatCompletionChunk]: """ **Starting a new project?** We recommend trying @@ -2428,6 +2445,9 @@ async def create( effort can result in faster responses and fewer tokens used on reasoning in a response. + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + response_format: An object specifying the format that the model must output. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -2540,44 +2560,44 @@ async def create( *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], - audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, - web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, + audio: Optional[ChatCompletionAudioParam] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + function_call: completion_create_params.FunctionCall | Omit = omit, + functions: Iterable[completion_create_params.Function] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit, + n: Optional[int] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + response_format: completion_create_params.ResponseFormat | Omit = omit, + safety_identifier: str | Omit = omit, + seed: Optional[int] | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit, + tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, + verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit, + web_search_options: completion_create_params.WebSearchOptions | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ChatCompletion | AsyncStream[ChatCompletionChunk]: validate_response_format(response_format) return await self._post( @@ -2640,7 +2660,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ChatCompletion: """Get a stored chat completion. @@ -2676,7 +2696,7 @@ async def update( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ChatCompletion: """Modify a stored chat completion. @@ -2714,17 +2734,17 @@ async def update( def list( self, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: str | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: str | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[ChatCompletion, AsyncCursorPage[ChatCompletion]]: """List stored Chat Completions. @@ -2736,10 +2756,12 @@ def list( limit: Number of Chat Completions to retrieve. - metadata: - A list of metadata keys to filter the Chat Completions by. Example: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. - `metadata[key1]=value1&metadata[key2]=value2` + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The model used to generate the Chat Completions. @@ -2785,7 +2807,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ChatCompletionDeleted: """Delete a stored chat completion. @@ -2816,43 +2838,43 @@ def stream( *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], - audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, - response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, - web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, + audio: Optional[ChatCompletionAudioParam] | Omit = omit, + response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + function_call: completion_create_params.FunctionCall | Omit = omit, + functions: Iterable[completion_create_params.Function] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit, + n: Optional[int] | Omit = omit, + parallel_tool_calls: bool | Omit = omit, + prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning_effort: Optional[ReasoningEffort] | Omit = omit, + safety_identifier: str | Omit = omit, + seed: Optional[int] | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit, + tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, + verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit, + web_search_options: completion_create_params.WebSearchOptions | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncChatCompletionStreamManager[ResponseFormatT]: """Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API and automatic accumulation of each delta. diff --git a/src/openai/resources/chat/completions/messages.py b/src/openai/resources/chat/completions/messages.py index fac15fba8b..3d6dc79cd6 100644 --- a/src/openai/resources/chat/completions/messages.py +++ b/src/openai/resources/chat/completions/messages.py @@ -7,7 +7,7 @@ import httpx from .... import _legacy_response -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ...._utils import maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -44,15 +44,15 @@ def list( self, completion_id: str, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[ChatCompletionStoreMessage]: """Get the messages in a stored chat completion. @@ -122,15 +122,15 @@ def list( self, completion_id: str, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[ChatCompletionStoreMessage, AsyncCursorPage[ChatCompletionStoreMessage]]: """Get the messages in a stored chat completion. diff --git a/src/openai/resources/completions.py b/src/openai/resources/completions.py index 97a84575ab..2f2284a622 100644 --- a/src/openai/resources/completions.py +++ b/src/openai/resources/completions.py @@ -9,7 +9,7 @@ from .. import _legacy_response from ..types import completion_create_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr +from .._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given from .._utils import required_args, maybe_transform, async_maybe_transform from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource @@ -50,28 +50,28 @@ def create( *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None], - best_of: Optional[int] | NotGiven = NOT_GIVEN, - echo: Optional[bool] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - suffix: Optional[str] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + best_of: Optional[int] | Omit = omit, + echo: Optional[bool] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + seed: Optional[int] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + suffix: Optional[str] | Omit = omit, + temperature: Optional[float] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Completion: """ Creates a completion for the provided prompt and parameters. @@ -206,27 +206,27 @@ def create( model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None], stream: Literal[True], - best_of: Optional[int] | NotGiven = NOT_GIVEN, - echo: Optional[bool] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - suffix: Optional[str] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + best_of: Optional[int] | Omit = omit, + echo: Optional[bool] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + seed: Optional[int] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + suffix: Optional[str] | Omit = omit, + temperature: Optional[float] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Stream[Completion]: """ Creates a completion for the provided prompt and parameters. @@ -361,27 +361,27 @@ def create( model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None], stream: bool, - best_of: Optional[int] | NotGiven = NOT_GIVEN, - echo: Optional[bool] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - suffix: Optional[str] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + best_of: Optional[int] | Omit = omit, + echo: Optional[bool] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + seed: Optional[int] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + suffix: Optional[str] | Omit = omit, + temperature: Optional[float] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Completion | Stream[Completion]: """ Creates a completion for the provided prompt and parameters. @@ -515,28 +515,28 @@ def create( *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None], - best_of: Optional[int] | NotGiven = NOT_GIVEN, - echo: Optional[bool] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - suffix: Optional[str] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + best_of: Optional[int] | Omit = omit, + echo: Optional[bool] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + seed: Optional[int] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + suffix: Optional[str] | Omit = omit, + temperature: Optional[float] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Completion | Stream[Completion]: return self._post( "/completions", @@ -600,28 +600,28 @@ async def create( *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None], - best_of: Optional[int] | NotGiven = NOT_GIVEN, - echo: Optional[bool] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - suffix: Optional[str] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + best_of: Optional[int] | Omit = omit, + echo: Optional[bool] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + seed: Optional[int] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + suffix: Optional[str] | Omit = omit, + temperature: Optional[float] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Completion: """ Creates a completion for the provided prompt and parameters. @@ -756,27 +756,27 @@ async def create( model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None], stream: Literal[True], - best_of: Optional[int] | NotGiven = NOT_GIVEN, - echo: Optional[bool] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - suffix: Optional[str] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + best_of: Optional[int] | Omit = omit, + echo: Optional[bool] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + seed: Optional[int] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + suffix: Optional[str] | Omit = omit, + temperature: Optional[float] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncStream[Completion]: """ Creates a completion for the provided prompt and parameters. @@ -911,27 +911,27 @@ async def create( model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None], stream: bool, - best_of: Optional[int] | NotGiven = NOT_GIVEN, - echo: Optional[bool] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - suffix: Optional[str] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + best_of: Optional[int] | Omit = omit, + echo: Optional[bool] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + seed: Optional[int] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + suffix: Optional[str] | Omit = omit, + temperature: Optional[float] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Completion | AsyncStream[Completion]: """ Creates a completion for the provided prompt and parameters. @@ -1065,28 +1065,28 @@ async def create( *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None], - best_of: Optional[int] | NotGiven = NOT_GIVEN, - echo: Optional[bool] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - suffix: Optional[str] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + best_of: Optional[int] | Omit = omit, + echo: Optional[bool] | Omit = omit, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + seed: Optional[int] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit, + suffix: Optional[str] | Omit = omit, + temperature: Optional[float] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Completion | AsyncStream[Completion]: return await self._post( "/completions", diff --git a/src/openai/resources/containers/containers.py b/src/openai/resources/containers/containers.py index 30e9eff127..dcdc3e1a3e 100644 --- a/src/openai/resources/containers/containers.py +++ b/src/openai/resources/containers/containers.py @@ -8,7 +8,7 @@ from ... import _legacy_response from ...types import container_list_params, container_create_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, SequenceNotStr +from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -58,14 +58,14 @@ def create( self, *, name: str, - expires_after: container_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, - file_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, + expires_after: container_create_params.ExpiresAfter | Omit = omit, + file_ids: SequenceNotStr[str] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ContainerCreateResponse: """ Create Container @@ -110,7 +110,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ContainerRetrieveResponse: """ Retrieve Container @@ -137,15 +137,15 @@ def retrieve( def list( self, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[ContainerListResponse]: """List Containers @@ -200,7 +200,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ Delete Container @@ -254,14 +254,14 @@ async def create( self, *, name: str, - expires_after: container_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, - file_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, + expires_after: container_create_params.ExpiresAfter | Omit = omit, + file_ids: SequenceNotStr[str] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ContainerCreateResponse: """ Create Container @@ -306,7 +306,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ContainerRetrieveResponse: """ Retrieve Container @@ -333,15 +333,15 @@ async def retrieve( def list( self, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[ContainerListResponse, AsyncCursorPage[ContainerListResponse]]: """List Containers @@ -396,7 +396,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ Delete Container diff --git a/src/openai/resources/containers/files/content.py b/src/openai/resources/containers/files/content.py index a200383407..a3dbd0e8c7 100644 --- a/src/openai/resources/containers/files/content.py +++ b/src/openai/resources/containers/files/content.py @@ -5,7 +5,7 @@ import httpx from .... import _legacy_response -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import Body, Query, Headers, NotGiven, not_given from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import ( @@ -49,7 +49,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> _legacy_response.HttpxBinaryResponseContent: """ Retrieve Container File Content @@ -107,7 +107,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> _legacy_response.HttpxBinaryResponseContent: """ Retrieve Container File Content diff --git a/src/openai/resources/containers/files/files.py b/src/openai/resources/containers/files/files.py index 624398b97b..a472cfc9f3 100644 --- a/src/openai/resources/containers/files/files.py +++ b/src/openai/resources/containers/files/files.py @@ -16,7 +16,7 @@ ContentWithStreamingResponse, AsyncContentWithStreamingResponse, ) -from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, FileTypes +from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, FileTypes, omit, not_given from ...._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -59,14 +59,14 @@ def create( self, container_id: str, *, - file: FileTypes | NotGiven = NOT_GIVEN, - file_id: str | NotGiven = NOT_GIVEN, + file: FileTypes | Omit = omit, + file_id: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FileCreateResponse: """ Create a Container File @@ -120,7 +120,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FileRetrieveResponse: """ Retrieve Container File @@ -150,15 +150,15 @@ def list( self, container_id: str, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[FileListResponse]: """List Container files @@ -216,7 +216,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ Delete Container File @@ -272,14 +272,14 @@ async def create( self, container_id: str, *, - file: FileTypes | NotGiven = NOT_GIVEN, - file_id: str | NotGiven = NOT_GIVEN, + file: FileTypes | Omit = omit, + file_id: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FileCreateResponse: """ Create a Container File @@ -333,7 +333,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FileRetrieveResponse: """ Retrieve Container File @@ -363,15 +363,15 @@ def list( self, container_id: str, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[FileListResponse, AsyncCursorPage[FileListResponse]]: """List Container files @@ -429,7 +429,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ Delete Container File diff --git a/src/openai/resources/conversations/conversations.py b/src/openai/resources/conversations/conversations.py index 802620e6ad..da037a4e22 100644 --- a/src/openai/resources/conversations/conversations.py +++ b/src/openai/resources/conversations/conversations.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Dict, Iterable, Optional +from typing import Iterable, Optional import httpx @@ -15,7 +15,7 @@ ItemsWithStreamingResponse, AsyncItemsWithStreamingResponse, ) -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -57,14 +57,14 @@ def with_streaming_response(self) -> ConversationsWithStreamingResponse: def create( self, *, - items: Optional[Iterable[ResponseInputItemParam]] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + items: Optional[Iterable[ResponseInputItemParam]] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Conversation: """ Create a conversation. @@ -73,8 +73,12 @@ def create( items: Initial items to include in the conversation context. You may add up to 20 items at a time. - metadata: Set of 16 key-value pairs that can be attached to an object. Useful for storing - additional information about the object in a structured format. + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers @@ -108,10 +112,10 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Conversation: """ - Get a conversation with the given ID. + Get a conversation Args: extra_headers: Send extra headers @@ -136,23 +140,24 @@ def update( self, conversation_id: str, *, - metadata: Dict[str, str], + metadata: Optional[Metadata], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Conversation: """ - Update a conversation's metadata with the given ID. + Update a conversation Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. Keys are strings with a maximum - length of 64 characters. Values are strings with a maximum length of 512 - characters. + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers @@ -182,10 +187,11 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ConversationDeletedResource: - """ - Delete a conversation with the given ID. + """Delete a conversation. + + Items in the conversation will not be deleted. Args: extra_headers: Send extra headers @@ -234,14 +240,14 @@ def with_streaming_response(self) -> AsyncConversationsWithStreamingResponse: async def create( self, *, - items: Optional[Iterable[ResponseInputItemParam]] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + items: Optional[Iterable[ResponseInputItemParam]] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Conversation: """ Create a conversation. @@ -250,8 +256,12 @@ async def create( items: Initial items to include in the conversation context. You may add up to 20 items at a time. - metadata: Set of 16 key-value pairs that can be attached to an object. Useful for storing - additional information about the object in a structured format. + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers @@ -285,10 +295,10 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Conversation: """ - Get a conversation with the given ID. + Get a conversation Args: extra_headers: Send extra headers @@ -313,23 +323,24 @@ async def update( self, conversation_id: str, *, - metadata: Dict[str, str], + metadata: Optional[Metadata], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Conversation: """ - Update a conversation's metadata with the given ID. + Update a conversation Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. Keys are strings with a maximum - length of 64 characters. Values are strings with a maximum length of 512 - characters. + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers @@ -361,10 +372,11 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ConversationDeletedResource: - """ - Delete a conversation with the given ID. + """Delete a conversation. + + Items in the conversation will not be deleted. Args: extra_headers: Send extra headers diff --git a/src/openai/resources/conversations/items.py b/src/openai/resources/conversations/items.py index 01811f956b..3dba144849 100644 --- a/src/openai/resources/conversations/items.py +++ b/src/openai/resources/conversations/items.py @@ -8,7 +8,7 @@ import httpx from ... import _legacy_response -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -50,13 +50,13 @@ def create( conversation_id: str, *, items: Iterable[ResponseInputItemParam], - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ConversationItemList: """ Create items in a conversation with the given ID. @@ -96,13 +96,13 @@ def retrieve( item_id: str, *, conversation_id: str, - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ConversationItem: """ Get a single item from a conversation with the given IDs. @@ -143,16 +143,16 @@ def list( self, conversation_id: str, *, - after: str | NotGiven = NOT_GIVEN, - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + include: List[ResponseIncludable] | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncConversationCursorPage[ConversationItem]: """ List all items for a conversation with the given ID. @@ -228,7 +228,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Conversation: """ Delete an item from a conversation with the given IDs. @@ -280,13 +280,13 @@ async def create( conversation_id: str, *, items: Iterable[ResponseInputItemParam], - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ConversationItemList: """ Create items in a conversation with the given ID. @@ -326,13 +326,13 @@ async def retrieve( item_id: str, *, conversation_id: str, - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ConversationItem: """ Get a single item from a conversation with the given IDs. @@ -373,16 +373,16 @@ def list( self, conversation_id: str, *, - after: str | NotGiven = NOT_GIVEN, - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + include: List[ResponseIncludable] | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[ConversationItem, AsyncConversationCursorPage[ConversationItem]]: """ List all items for a conversation with the given ID. @@ -458,7 +458,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Conversation: """ Delete an item from a conversation with the given IDs. diff --git a/src/openai/resources/embeddings.py b/src/openai/resources/embeddings.py index a8cf179850..5dc3dfa9b3 100644 --- a/src/openai/resources/embeddings.py +++ b/src/openai/resources/embeddings.py @@ -11,7 +11,7 @@ from .. import _legacy_response from ..types import embedding_create_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr +from .._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given from .._utils import is_given, maybe_transform from .._compat import cached_property from .._extras import numpy as np, has_numpy @@ -49,15 +49,15 @@ def create( *, input: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]]], model: Union[str, EmbeddingModel], - dimensions: int | NotGiven = NOT_GIVEN, - encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + dimensions: int | Omit = omit, + encoding_format: Literal["float", "base64"] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> CreateEmbeddingResponse: """ Creates an embedding vector representing the input text. @@ -168,15 +168,15 @@ async def create( *, input: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]]], model: Union[str, EmbeddingModel], - dimensions: int | NotGiven = NOT_GIVEN, - encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + dimensions: int | Omit = omit, + encoding_format: Literal["float", "base64"] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> CreateEmbeddingResponse: """ Creates an embedding vector representing the input text. diff --git a/src/openai/resources/evals/evals.py b/src/openai/resources/evals/evals.py index 7aba192c51..40c4a3e9a3 100644 --- a/src/openai/resources/evals/evals.py +++ b/src/openai/resources/evals/evals.py @@ -9,7 +9,7 @@ from ... import _legacy_response from ...types import eval_list_params, eval_create_params, eval_update_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from .runs.runs import ( @@ -63,14 +63,14 @@ def create( *, data_source_config: eval_create_params.DataSourceConfig, testing_criteria: Iterable[eval_create_params.TestingCriterion], - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvalCreateResponse: """ Create the structure of an evaluation that can be used to test a model's @@ -132,7 +132,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvalRetrieveResponse: """ Get an evaluation by ID. @@ -160,14 +160,14 @@ def update( self, eval_id: str, *, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvalUpdateResponse: """ Update certain properties of an evaluation. @@ -210,16 +210,16 @@ def update( def list( self, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - order_by: Literal["created_at", "updated_at"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, + order_by: Literal["created_at", "updated_at"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[EvalListResponse]: """ List evaluations for a project. @@ -273,7 +273,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvalDeleteResponse: """ Delete an evaluation. @@ -327,14 +327,14 @@ async def create( *, data_source_config: eval_create_params.DataSourceConfig, testing_criteria: Iterable[eval_create_params.TestingCriterion], - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvalCreateResponse: """ Create the structure of an evaluation that can be used to test a model's @@ -396,7 +396,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvalRetrieveResponse: """ Get an evaluation by ID. @@ -424,14 +424,14 @@ async def update( self, eval_id: str, *, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvalUpdateResponse: """ Update certain properties of an evaluation. @@ -474,16 +474,16 @@ async def update( def list( self, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - order_by: Literal["created_at", "updated_at"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, + order_by: Literal["created_at", "updated_at"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[EvalListResponse, AsyncCursorPage[EvalListResponse]]: """ List evaluations for a project. @@ -537,7 +537,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvalDeleteResponse: """ Delete an evaluation. diff --git a/src/openai/resources/evals/runs/output_items.py b/src/openai/resources/evals/runs/output_items.py index 8fd0fdea92..c2dee72122 100644 --- a/src/openai/resources/evals/runs/output_items.py +++ b/src/openai/resources/evals/runs/output_items.py @@ -7,7 +7,7 @@ import httpx from .... import _legacy_response -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ...._utils import maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -52,7 +52,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> OutputItemRetrieveResponse: """ Get an evaluation run output item by ID. @@ -85,16 +85,16 @@ def list( run_id: str, *, eval_id: str, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - status: Literal["fail", "pass"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, + status: Literal["fail", "pass"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[OutputItemListResponse]: """ Get a list of output items for an evaluation run. @@ -175,7 +175,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> OutputItemRetrieveResponse: """ Get an evaluation run output item by ID. @@ -208,16 +208,16 @@ def list( run_id: str, *, eval_id: str, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - status: Literal["fail", "pass"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, + status: Literal["fail", "pass"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[OutputItemListResponse, AsyncCursorPage[OutputItemListResponse]]: """ Get a list of output items for an evaluation run. diff --git a/src/openai/resources/evals/runs/runs.py b/src/openai/resources/evals/runs/runs.py index 7efc61292c..b747b198f8 100644 --- a/src/openai/resources/evals/runs/runs.py +++ b/src/openai/resources/evals/runs/runs.py @@ -8,7 +8,7 @@ import httpx from .... import _legacy_response -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -63,14 +63,14 @@ def create( eval_id: str, *, data_source: run_create_params.DataSource, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RunCreateResponse: """ Kicks off a new run for a given evaluation, specifying the data source, and what @@ -125,7 +125,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RunRetrieveResponse: """ Get an evaluation run by ID. @@ -155,16 +155,16 @@ def list( self, eval_id: str, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - status: Literal["queued", "in_progress", "completed", "canceled", "failed"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, + status: Literal["queued", "in_progress", "completed", "canceled", "failed"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[RunListResponse]: """ Get a list of runs for an evaluation. @@ -221,7 +221,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RunDeleteResponse: """ Delete an eval run. @@ -257,7 +257,7 @@ def cancel( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RunCancelResponse: """ Cancel an ongoing evaluation run. @@ -313,14 +313,14 @@ async def create( eval_id: str, *, data_source: run_create_params.DataSource, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RunCreateResponse: """ Kicks off a new run for a given evaluation, specifying the data source, and what @@ -375,7 +375,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RunRetrieveResponse: """ Get an evaluation run by ID. @@ -405,16 +405,16 @@ def list( self, eval_id: str, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - status: Literal["queued", "in_progress", "completed", "canceled", "failed"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, + status: Literal["queued", "in_progress", "completed", "canceled", "failed"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[RunListResponse, AsyncCursorPage[RunListResponse]]: """ Get a list of runs for an evaluation. @@ -471,7 +471,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RunDeleteResponse: """ Delete an eval run. @@ -507,7 +507,7 @@ async def cancel( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RunCancelResponse: """ Cancel an ongoing evaluation run. diff --git a/src/openai/resources/files.py b/src/openai/resources/files.py index 963c3c0a9f..cc117e7f15 100644 --- a/src/openai/resources/files.py +++ b/src/openai/resources/files.py @@ -11,7 +11,7 @@ from .. import _legacy_response from ..types import FilePurpose, file_list_params, file_create_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes +from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource @@ -57,13 +57,13 @@ def create( *, file: FileTypes, purpose: FilePurpose, - expires_after: file_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, + expires_after: file_create_params.ExpiresAfter | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FileObject: """Upload a file that can be used across various endpoints. @@ -71,20 +71,19 @@ def create( up to 512 MB, and the size of all files uploaded by one organization can be up to 1 TB. - The Assistants API supports files up to 2 million tokens and of specific file - types. See the - [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for - details. - - The Fine-tuning API only supports `.jsonl` files. The input also has certain - required formats for fine-tuning - [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or - [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) - models. - - The Batch API only supports `.jsonl` files up to 200 MB in size. The input also - has a specific required - [format](https://platform.openai.com/docs/api-reference/batch/request-input). + - The Assistants API supports files up to 2 million tokens and of specific file + types. See the + [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) + for details. + - The Fine-tuning API only supports `.jsonl` files. The input also has certain + required formats for fine-tuning + [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) + or + [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + models. + - The Batch API only supports `.jsonl` files up to 200 MB in size. The input + also has a specific required + [format](https://platform.openai.com/docs/api-reference/batch/request-input). Please [contact us](https://help.openai.com/) if you need to increase these storage limits. @@ -139,7 +138,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FileObject: """ Returns information about a specific file. @@ -166,16 +165,16 @@ def retrieve( def list( self, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - purpose: str | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, + purpose: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[FileObject]: """Returns a list of files. @@ -233,10 +232,10 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FileDeleted: """ - Delete a file. + Delete a file and remove it from all vector stores. Args: extra_headers: Send extra headers @@ -266,7 +265,7 @@ def content( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> _legacy_response.HttpxBinaryResponseContent: """ Returns the contents of the specified file. @@ -301,7 +300,7 @@ def retrieve_content( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> str: """ Returns the contents of the specified file. @@ -374,13 +373,13 @@ async def create( *, file: FileTypes, purpose: FilePurpose, - expires_after: file_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, + expires_after: file_create_params.ExpiresAfter | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FileObject: """Upload a file that can be used across various endpoints. @@ -388,20 +387,19 @@ async def create( up to 512 MB, and the size of all files uploaded by one organization can be up to 1 TB. - The Assistants API supports files up to 2 million tokens and of specific file - types. See the - [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for - details. - - The Fine-tuning API only supports `.jsonl` files. The input also has certain - required formats for fine-tuning - [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or - [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) - models. - - The Batch API only supports `.jsonl` files up to 200 MB in size. The input also - has a specific required - [format](https://platform.openai.com/docs/api-reference/batch/request-input). + - The Assistants API supports files up to 2 million tokens and of specific file + types. See the + [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) + for details. + - The Fine-tuning API only supports `.jsonl` files. The input also has certain + required formats for fine-tuning + [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) + or + [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + models. + - The Batch API only supports `.jsonl` files up to 200 MB in size. The input + also has a specific required + [format](https://platform.openai.com/docs/api-reference/batch/request-input). Please [contact us](https://help.openai.com/) if you need to increase these storage limits. @@ -456,7 +454,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FileObject: """ Returns information about a specific file. @@ -483,16 +481,16 @@ async def retrieve( def list( self, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - purpose: str | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, + purpose: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[FileObject, AsyncCursorPage[FileObject]]: """Returns a list of files. @@ -550,10 +548,10 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FileDeleted: """ - Delete a file. + Delete a file and remove it from all vector stores. Args: extra_headers: Send extra headers @@ -583,7 +581,7 @@ async def content( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> _legacy_response.HttpxBinaryResponseContent: """ Returns the contents of the specified file. @@ -618,7 +616,7 @@ async def retrieve_content( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> str: """ Returns the contents of the specified file. diff --git a/src/openai/resources/fine_tuning/alpha/graders.py b/src/openai/resources/fine_tuning/alpha/graders.py index 387e6c72ff..e7a9b925ea 100644 --- a/src/openai/resources/fine_tuning/alpha/graders.py +++ b/src/openai/resources/fine_tuning/alpha/graders.py @@ -5,7 +5,7 @@ import httpx from .... import _legacy_response -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -43,13 +43,13 @@ def run( *, grader: grader_run_params.Grader, model_sample: str, - item: object | NotGiven = NOT_GIVEN, + item: object | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GraderRunResponse: """ Run a grader. @@ -100,7 +100,7 @@ def validate( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GraderValidateResponse: """ Validate a grader. @@ -151,13 +151,13 @@ async def run( *, grader: grader_run_params.Grader, model_sample: str, - item: object | NotGiven = NOT_GIVEN, + item: object | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GraderRunResponse: """ Run a grader. @@ -208,7 +208,7 @@ async def validate( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GraderValidateResponse: """ Validate a grader. diff --git a/src/openai/resources/fine_tuning/checkpoints/permissions.py b/src/openai/resources/fine_tuning/checkpoints/permissions.py index f8ae125941..e7f55b82d9 100644 --- a/src/openai/resources/fine_tuning/checkpoints/permissions.py +++ b/src/openai/resources/fine_tuning/checkpoints/permissions.py @@ -7,7 +7,7 @@ import httpx from .... import _legacy_response -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr +from ...._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -52,7 +52,7 @@ def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncPage[PermissionCreateResponse]: """ **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys). @@ -90,16 +90,16 @@ def retrieve( self, fine_tuned_model_checkpoint: str, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["ascending", "descending"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["ascending", "descending"] | Omit = omit, + project_id: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> PermissionRetrieveResponse: """ **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). @@ -158,7 +158,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> PermissionDeleteResponse: """ **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). @@ -220,7 +220,7 @@ def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[PermissionCreateResponse, AsyncPage[PermissionCreateResponse]]: """ **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys). @@ -258,16 +258,16 @@ async def retrieve( self, fine_tuned_model_checkpoint: str, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["ascending", "descending"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["ascending", "descending"] | Omit = omit, + project_id: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> PermissionRetrieveResponse: """ **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). @@ -326,7 +326,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> PermissionDeleteResponse: """ **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). diff --git a/src/openai/resources/fine_tuning/jobs/checkpoints.py b/src/openai/resources/fine_tuning/jobs/checkpoints.py index f86462e513..f65856f0c6 100644 --- a/src/openai/resources/fine_tuning/jobs/checkpoints.py +++ b/src/openai/resources/fine_tuning/jobs/checkpoints.py @@ -5,7 +5,7 @@ import httpx from .... import _legacy_response -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ...._utils import maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -45,14 +45,14 @@ def list( self, fine_tuning_job_id: str, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[FineTuningJobCheckpoint]: """ List checkpoints for a fine-tuning job. @@ -116,14 +116,14 @@ def list( self, fine_tuning_job_id: str, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[FineTuningJobCheckpoint, AsyncCursorPage[FineTuningJobCheckpoint]]: """ List checkpoints for a fine-tuning job. diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py index ee21cdd280..b292e057cf 100644 --- a/src/openai/resources/fine_tuning/jobs/jobs.py +++ b/src/openai/resources/fine_tuning/jobs/jobs.py @@ -8,7 +8,7 @@ import httpx from .... import _legacy_response -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from .checkpoints import ( @@ -63,19 +63,19 @@ def create( *, model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]], training_file: str, - hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, - integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - method: job_create_params.Method | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - suffix: Optional[str] | NotGiven = NOT_GIVEN, - validation_file: Optional[str] | NotGiven = NOT_GIVEN, + hyperparameters: job_create_params.Hyperparameters | Omit = omit, + integrations: Optional[Iterable[job_create_params.Integration]] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + method: job_create_params.Method | Omit = omit, + seed: Optional[int] | Omit = omit, + suffix: Optional[str] | Omit = omit, + validation_file: Optional[str] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FineTuningJob: """ Creates a fine-tuning job which begins the process of creating a new model from @@ -186,7 +186,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FineTuningJob: """ Get info about a fine-tuning job. @@ -215,15 +215,15 @@ def retrieve( def list( self, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + metadata: Optional[Dict[str, str]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[FineTuningJob]: """ List your organization's fine-tuning jobs @@ -273,7 +273,7 @@ def cancel( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FineTuningJob: """ Immediately cancel a fine-tune job. @@ -301,14 +301,14 @@ def list_events( self, fine_tuning_job_id: str, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[FineTuningJobEvent]: """ Get status updates for a fine-tuning job. @@ -356,7 +356,7 @@ def pause( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FineTuningJob: """ Pause a fine-tune job. @@ -389,7 +389,7 @@ def resume( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FineTuningJob: """ Resume a fine-tune job. @@ -443,19 +443,19 @@ async def create( *, model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]], training_file: str, - hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, - integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - method: job_create_params.Method | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - suffix: Optional[str] | NotGiven = NOT_GIVEN, - validation_file: Optional[str] | NotGiven = NOT_GIVEN, + hyperparameters: job_create_params.Hyperparameters | Omit = omit, + integrations: Optional[Iterable[job_create_params.Integration]] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + method: job_create_params.Method | Omit = omit, + seed: Optional[int] | Omit = omit, + suffix: Optional[str] | Omit = omit, + validation_file: Optional[str] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FineTuningJob: """ Creates a fine-tuning job which begins the process of creating a new model from @@ -566,7 +566,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FineTuningJob: """ Get info about a fine-tuning job. @@ -595,15 +595,15 @@ async def retrieve( def list( self, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, + metadata: Optional[Dict[str, str]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[FineTuningJob, AsyncCursorPage[FineTuningJob]]: """ List your organization's fine-tuning jobs @@ -653,7 +653,7 @@ async def cancel( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FineTuningJob: """ Immediately cancel a fine-tune job. @@ -681,14 +681,14 @@ def list_events( self, fine_tuning_job_id: str, *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + limit: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[FineTuningJobEvent, AsyncCursorPage[FineTuningJobEvent]]: """ Get status updates for a fine-tuning job. @@ -736,7 +736,7 @@ async def pause( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FineTuningJob: """ Pause a fine-tune job. @@ -769,7 +769,7 @@ async def resume( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FineTuningJob: """ Resume a fine-tune job. diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py index 17ec264b6a..9bb332230f 100644 --- a/src/openai/resources/images.py +++ b/src/openai/resources/images.py @@ -9,7 +9,7 @@ from .. import _legacy_response from ..types import image_edit_params, image_generate_params, image_create_variation_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes, SequenceNotStr +from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, SequenceNotStr, omit, not_given from .._utils import extract_files, required_args, maybe_transform, deepcopy_minimal, async_maybe_transform from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource @@ -48,17 +48,17 @@ def create_variation( self, *, image: FileTypes, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | Omit = omit, + n: Optional[int] | Omit = omit, + response_format: Optional[Literal["url", "b64_json"]] | Omit = omit, + size: Optional[Literal["256x256", "512x512", "1024x1024"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImagesResponse: """Creates a variation of a given image. @@ -123,26 +123,25 @@ def edit( *, image: Union[FileTypes, SequenceNotStr[FileTypes]], prompt: str, - background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, - input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, - mask: FileTypes | NotGiven = NOT_GIVEN, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - output_compression: Optional[int] | NotGiven = NOT_GIVEN, - output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, - partial_images: Optional[int] | NotGiven = NOT_GIVEN, - quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] - | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit, + input_fidelity: Optional[Literal["high", "low"]] | Omit = omit, + mask: FileTypes | Omit = omit, + model: Union[str, ImageModel, None] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit, + response_format: Optional[Literal["url", "b64_json"]] | Omit = omit, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImagesResponse: """Creates an edited or extended image given one or more source images and a prompt. @@ -169,9 +168,7 @@ def edit( If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. - input_fidelity: Control how much effort the model will exert to match the style and features, - especially facial features, of input images. This parameter is only supported - for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + input_fidelity: Control how much effort the model will exert to match the style and features, especially facial features, of input images. This parameter is only supported for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`. mask: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. If there are multiple images provided, @@ -237,25 +234,24 @@ def edit( image: Union[FileTypes, SequenceNotStr[FileTypes]], prompt: str, stream: Literal[True], - background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, - input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, - mask: FileTypes | NotGiven = NOT_GIVEN, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - output_compression: Optional[int] | NotGiven = NOT_GIVEN, - output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, - partial_images: Optional[int] | NotGiven = NOT_GIVEN, - quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] - | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit, + input_fidelity: Optional[Literal["high", "low"]] | Omit = omit, + mask: FileTypes | Omit = omit, + model: Union[str, ImageModel, None] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit, + response_format: Optional[Literal["url", "b64_json"]] | Omit = omit, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Stream[ImageEditStreamEvent]: """Creates an edited or extended image given one or more source images and a prompt. @@ -286,9 +282,7 @@ def edit( If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. - input_fidelity: Control how much effort the model will exert to match the style and features, - especially facial features, of input images. This parameter is only supported - for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + input_fidelity: Control how much effort the model will exert to match the style and features, especially facial features, of input images. This parameter is only supported for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`. mask: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. If there are multiple images provided, @@ -350,25 +344,24 @@ def edit( image: Union[FileTypes, SequenceNotStr[FileTypes]], prompt: str, stream: bool, - background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, - input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, - mask: FileTypes | NotGiven = NOT_GIVEN, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - output_compression: Optional[int] | NotGiven = NOT_GIVEN, - output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, - partial_images: Optional[int] | NotGiven = NOT_GIVEN, - quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] - | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit, + input_fidelity: Optional[Literal["high", "low"]] | Omit = omit, + mask: FileTypes | Omit = omit, + model: Union[str, ImageModel, None] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit, + response_format: Optional[Literal["url", "b64_json"]] | Omit = omit, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImagesResponse | Stream[ImageEditStreamEvent]: """Creates an edited or extended image given one or more source images and a prompt. @@ -399,9 +392,7 @@ def edit( If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. - input_fidelity: Control how much effort the model will exert to match the style and features, - especially facial features, of input images. This parameter is only supported - for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + input_fidelity: Control how much effort the model will exert to match the style and features, especially facial features, of input images. This parameter is only supported for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`. mask: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. If there are multiple images provided, @@ -462,26 +453,25 @@ def edit( *, image: Union[FileTypes, SequenceNotStr[FileTypes]], prompt: str, - background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, - input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, - mask: FileTypes | NotGiven = NOT_GIVEN, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - output_compression: Optional[int] | NotGiven = NOT_GIVEN, - output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, - partial_images: Optional[int] | NotGiven = NOT_GIVEN, - quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] - | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit, + input_fidelity: Optional[Literal["high", "low"]] | Omit = omit, + mask: FileTypes | Omit = omit, + model: Union[str, ImageModel, None] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit, + response_format: Optional[Literal["url", "b64_json"]] | Omit = omit, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImagesResponse | Stream[ImageEditStreamEvent]: body = deepcopy_minimal( { @@ -527,28 +517,28 @@ def generate( self, *, prompt: str, - background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - output_compression: Optional[int] | NotGiven = NOT_GIVEN, - output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, - partial_images: Optional[int] | NotGiven = NOT_GIVEN, - quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit, + model: Union[str, ImageModel, None] | Omit = omit, + moderation: Optional[Literal["low", "auto"]] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit, + response_format: Optional[Literal["url", "b64_json"]] | Omit = omit, size: Optional[ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] ] - | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + style: Optional[Literal["vivid", "natural"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImagesResponse: """ Creates an image given a prompt. @@ -638,27 +628,27 @@ def generate( *, prompt: str, stream: Literal[True], - background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - output_compression: Optional[int] | NotGiven = NOT_GIVEN, - output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, - partial_images: Optional[int] | NotGiven = NOT_GIVEN, - quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit, + model: Union[str, ImageModel, None] | Omit = omit, + moderation: Optional[Literal["low", "auto"]] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit, + response_format: Optional[Literal["url", "b64_json"]] | Omit = omit, size: Optional[ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] ] - | NotGiven = NOT_GIVEN, - style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + | Omit = omit, + style: Optional[Literal["vivid", "natural"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Stream[ImageGenStreamEvent]: """ Creates an image given a prompt. @@ -748,27 +738,27 @@ def generate( *, prompt: str, stream: bool, - background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - output_compression: Optional[int] | NotGiven = NOT_GIVEN, - output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, - partial_images: Optional[int] | NotGiven = NOT_GIVEN, - quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit, + model: Union[str, ImageModel, None] | Omit = omit, + moderation: Optional[Literal["low", "auto"]] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit, + response_format: Optional[Literal["url", "b64_json"]] | Omit = omit, size: Optional[ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] ] - | NotGiven = NOT_GIVEN, - style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + | Omit = omit, + style: Optional[Literal["vivid", "natural"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImagesResponse | Stream[ImageGenStreamEvent]: """ Creates an image given a prompt. @@ -857,28 +847,28 @@ def generate( self, *, prompt: str, - background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - output_compression: Optional[int] | NotGiven = NOT_GIVEN, - output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, - partial_images: Optional[int] | NotGiven = NOT_GIVEN, - quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit, + model: Union[str, ImageModel, None] | Omit = omit, + moderation: Optional[Literal["low", "auto"]] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit, + response_format: Optional[Literal["url", "b64_json"]] | Omit = omit, size: Optional[ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] ] - | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + style: Optional[Literal["vivid", "natural"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImagesResponse | Stream[ImageGenStreamEvent]: return self._post( "/images/generations", @@ -936,17 +926,17 @@ async def create_variation( self, *, image: FileTypes, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | Omit = omit, + n: Optional[int] | Omit = omit, + response_format: Optional[Literal["url", "b64_json"]] | Omit = omit, + size: Optional[Literal["256x256", "512x512", "1024x1024"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImagesResponse: """Creates a variation of a given image. @@ -1011,26 +1001,25 @@ async def edit( *, image: Union[FileTypes, SequenceNotStr[FileTypes]], prompt: str, - background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, - input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, - mask: FileTypes | NotGiven = NOT_GIVEN, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - output_compression: Optional[int] | NotGiven = NOT_GIVEN, - output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, - partial_images: Optional[int] | NotGiven = NOT_GIVEN, - quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] - | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit, + input_fidelity: Optional[Literal["high", "low"]] | Omit = omit, + mask: FileTypes | Omit = omit, + model: Union[str, ImageModel, None] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit, + response_format: Optional[Literal["url", "b64_json"]] | Omit = omit, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImagesResponse: """Creates an edited or extended image given one or more source images and a prompt. @@ -1057,9 +1046,7 @@ async def edit( If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. - input_fidelity: Control how much effort the model will exert to match the style and features, - especially facial features, of input images. This parameter is only supported - for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + input_fidelity: Control how much effort the model will exert to match the style and features, especially facial features, of input images. This parameter is only supported for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`. mask: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. If there are multiple images provided, @@ -1125,25 +1112,24 @@ async def edit( image: Union[FileTypes, SequenceNotStr[FileTypes]], prompt: str, stream: Literal[True], - background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, - input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, - mask: FileTypes | NotGiven = NOT_GIVEN, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - output_compression: Optional[int] | NotGiven = NOT_GIVEN, - output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, - partial_images: Optional[int] | NotGiven = NOT_GIVEN, - quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] - | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit, + input_fidelity: Optional[Literal["high", "low"]] | Omit = omit, + mask: FileTypes | Omit = omit, + model: Union[str, ImageModel, None] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit, + response_format: Optional[Literal["url", "b64_json"]] | Omit = omit, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncStream[ImageEditStreamEvent]: """Creates an edited or extended image given one or more source images and a prompt. @@ -1174,9 +1160,7 @@ async def edit( If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. - input_fidelity: Control how much effort the model will exert to match the style and features, - especially facial features, of input images. This parameter is only supported - for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + input_fidelity: Control how much effort the model will exert to match the style and features, especially facial features, of input images. This parameter is only supported for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`. mask: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. If there are multiple images provided, @@ -1238,25 +1222,24 @@ async def edit( image: Union[FileTypes, SequenceNotStr[FileTypes]], prompt: str, stream: bool, - background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, - input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, - mask: FileTypes | NotGiven = NOT_GIVEN, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - output_compression: Optional[int] | NotGiven = NOT_GIVEN, - output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, - partial_images: Optional[int] | NotGiven = NOT_GIVEN, - quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] - | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit, + input_fidelity: Optional[Literal["high", "low"]] | Omit = omit, + mask: FileTypes | Omit = omit, + model: Union[str, ImageModel, None] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit, + response_format: Optional[Literal["url", "b64_json"]] | Omit = omit, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]: """Creates an edited or extended image given one or more source images and a prompt. @@ -1287,9 +1270,7 @@ async def edit( If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. - input_fidelity: Control how much effort the model will exert to match the style and features, - especially facial features, of input images. This parameter is only supported - for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + input_fidelity: Control how much effort the model will exert to match the style and features, especially facial features, of input images. This parameter is only supported for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`. mask: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. If there are multiple images provided, @@ -1350,26 +1331,25 @@ async def edit( *, image: Union[FileTypes, SequenceNotStr[FileTypes]], prompt: str, - background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, - input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, - mask: FileTypes | NotGiven = NOT_GIVEN, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - output_compression: Optional[int] | NotGiven = NOT_GIVEN, - output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, - partial_images: Optional[int] | NotGiven = NOT_GIVEN, - quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] - | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit, + input_fidelity: Optional[Literal["high", "low"]] | Omit = omit, + mask: FileTypes | Omit = omit, + model: Union[str, ImageModel, None] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit, + response_format: Optional[Literal["url", "b64_json"]] | Omit = omit, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]: body = deepcopy_minimal( { @@ -1415,28 +1395,28 @@ async def generate( self, *, prompt: str, - background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - output_compression: Optional[int] | NotGiven = NOT_GIVEN, - output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, - partial_images: Optional[int] | NotGiven = NOT_GIVEN, - quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit, + model: Union[str, ImageModel, None] | Omit = omit, + moderation: Optional[Literal["low", "auto"]] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit, + response_format: Optional[Literal["url", "b64_json"]] | Omit = omit, size: Optional[ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] ] - | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + style: Optional[Literal["vivid", "natural"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImagesResponse: """ Creates an image given a prompt. @@ -1526,27 +1506,27 @@ async def generate( *, prompt: str, stream: Literal[True], - background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - output_compression: Optional[int] | NotGiven = NOT_GIVEN, - output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, - partial_images: Optional[int] | NotGiven = NOT_GIVEN, - quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit, + model: Union[str, ImageModel, None] | Omit = omit, + moderation: Optional[Literal["low", "auto"]] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit, + response_format: Optional[Literal["url", "b64_json"]] | Omit = omit, size: Optional[ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] ] - | NotGiven = NOT_GIVEN, - style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + | Omit = omit, + style: Optional[Literal["vivid", "natural"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncStream[ImageGenStreamEvent]: """ Creates an image given a prompt. @@ -1636,27 +1616,27 @@ async def generate( *, prompt: str, stream: bool, - background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - output_compression: Optional[int] | NotGiven = NOT_GIVEN, - output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, - partial_images: Optional[int] | NotGiven = NOT_GIVEN, - quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit, + model: Union[str, ImageModel, None] | Omit = omit, + moderation: Optional[Literal["low", "auto"]] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit, + response_format: Optional[Literal["url", "b64_json"]] | Omit = omit, size: Optional[ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] ] - | NotGiven = NOT_GIVEN, - style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + | Omit = omit, + style: Optional[Literal["vivid", "natural"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImagesResponse | AsyncStream[ImageGenStreamEvent]: """ Creates an image given a prompt. @@ -1745,28 +1725,28 @@ async def generate( self, *, prompt: str, - background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - output_compression: Optional[int] | NotGiven = NOT_GIVEN, - output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, - partial_images: Optional[int] | NotGiven = NOT_GIVEN, - quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit, + model: Union[str, ImageModel, None] | Omit = omit, + moderation: Optional[Literal["low", "auto"]] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit, + response_format: Optional[Literal["url", "b64_json"]] | Omit = omit, size: Optional[ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] ] - | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + style: Optional[Literal["vivid", "natural"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImagesResponse | AsyncStream[ImageGenStreamEvent]: return await self._post( "/images/generations", diff --git a/src/openai/resources/models.py b/src/openai/resources/models.py index a9693a6b0a..a8f7691055 100644 --- a/src/openai/resources/models.py +++ b/src/openai/resources/models.py @@ -5,7 +5,7 @@ import httpx from .. import _legacy_response -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._types import Body, Query, Headers, NotGiven, not_given from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper @@ -49,7 +49,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Model: """ Retrieves a model instance, providing basic information about the model such as @@ -82,7 +82,7 @@ def list( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncPage[Model]: """ Lists the currently available models, and provides basic information about each @@ -106,7 +106,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ModelDeleted: """Delete a fine-tuned model. @@ -162,7 +162,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Model: """ Retrieves a model instance, providing basic information about the model such as @@ -195,7 +195,7 @@ def list( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[Model, AsyncPage[Model]]: """ Lists the currently available models, and provides basic information about each @@ -219,7 +219,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ModelDeleted: """Delete a fine-tuned model. diff --git a/src/openai/resources/moderations.py b/src/openai/resources/moderations.py index 91c0df4358..5f378f71e7 100644 --- a/src/openai/resources/moderations.py +++ b/src/openai/resources/moderations.py @@ -8,7 +8,7 @@ from .. import _legacy_response from ..types import moderation_create_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr +from .._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given from .._utils import maybe_transform, async_maybe_transform from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource @@ -45,13 +45,13 @@ def create( self, *, input: Union[str, SequenceNotStr[str], Iterable[ModerationMultiModalInputParam]], - model: Union[str, ModerationModel] | NotGiven = NOT_GIVEN, + model: Union[str, ModerationModel] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ModerationCreateResponse: """Classifies if text and/or image inputs are potentially harmful. @@ -115,13 +115,13 @@ async def create( self, *, input: Union[str, SequenceNotStr[str], Iterable[ModerationMultiModalInputParam]], - model: Union[str, ModerationModel] | NotGiven = NOT_GIVEN, + model: Union[str, ModerationModel] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ModerationCreateResponse: """Classifies if text and/or image inputs are potentially harmful. diff --git a/src/openai/resources/realtime/__init__.py b/src/openai/resources/realtime/__init__.py index 7a41de8648..c11841017f 100644 --- a/src/openai/resources/realtime/__init__.py +++ b/src/openai/resources/realtime/__init__.py @@ -1,5 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .calls import ( + Calls, + AsyncCalls, + CallsWithRawResponse, + AsyncCallsWithRawResponse, + CallsWithStreamingResponse, + AsyncCallsWithStreamingResponse, +) from .realtime import ( Realtime, AsyncRealtime, @@ -24,6 +32,12 @@ "AsyncClientSecretsWithRawResponse", "ClientSecretsWithStreamingResponse", "AsyncClientSecretsWithStreamingResponse", + "Calls", + "AsyncCalls", + "CallsWithRawResponse", + "AsyncCallsWithRawResponse", + "CallsWithStreamingResponse", + "AsyncCallsWithStreamingResponse", "Realtime", "AsyncRealtime", "RealtimeWithRawResponse", diff --git a/src/openai/resources/realtime/calls.py b/src/openai/resources/realtime/calls.py new file mode 100644 index 0000000000..a8c4761717 --- /dev/null +++ b/src/openai/resources/realtime/calls.py @@ -0,0 +1,742 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from typing_extensions import Literal + +import httpx + +from ... import _legacy_response +from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + StreamedBinaryAPIResponse, + AsyncStreamedBinaryAPIResponse, + to_streamed_response_wrapper, + async_to_streamed_response_wrapper, + to_custom_streamed_response_wrapper, + async_to_custom_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.realtime import ( + call_refer_params, + call_accept_params, + call_create_params, + call_reject_params, +) +from ...types.responses.response_prompt_param import ResponsePromptParam +from ...types.realtime.realtime_truncation_param import RealtimeTruncationParam +from ...types.realtime.realtime_audio_config_param import RealtimeAudioConfigParam +from ...types.realtime.realtime_tools_config_param import RealtimeToolsConfigParam +from ...types.realtime.realtime_tracing_config_param import RealtimeTracingConfigParam +from ...types.realtime.realtime_tool_choice_config_param import RealtimeToolChoiceConfigParam +from ...types.realtime.realtime_session_create_request_param import RealtimeSessionCreateRequestParam + +__all__ = ["Calls", "AsyncCalls"] + + +class Calls(SyncAPIResource): + @cached_property + def with_raw_response(self) -> CallsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return CallsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> CallsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return CallsWithStreamingResponse(self) + + def create( + self, + *, + sdp: str, + session: RealtimeSessionCreateRequestParam | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> _legacy_response.HttpxBinaryResponseContent: + """ + Create a new Realtime API call over WebRTC and receive the SDP answer needed to + complete the peer connection. + + Args: + sdp: WebRTC Session Description Protocol (SDP) offer generated by the caller. + + session: Realtime session object configuration. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "application/sdp", **(extra_headers or {})} + return self._post( + "/realtime/calls", + body=maybe_transform( + { + "sdp": sdp, + "session": session, + }, + call_create_params.CallCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=_legacy_response.HttpxBinaryResponseContent, + ) + + def accept( + self, + call_id: str, + *, + type: Literal["realtime"], + audio: RealtimeAudioConfigParam | Omit = omit, + include: List[Literal["item.input_audio_transcription.logprobs"]] | Omit = omit, + instructions: str | Omit = omit, + max_output_tokens: Union[int, Literal["inf"]] | Omit = omit, + model: Union[ + str, + Literal[ + "gpt-realtime", + "gpt-realtime-2025-08-28", + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-realtime-preview-2025-06-03", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + "gpt-realtime-mini", + "gpt-realtime-mini-2025-10-06", + "gpt-audio-mini", + "gpt-audio-mini-2025-10-06", + ], + ] + | Omit = omit, + output_modalities: List[Literal["text", "audio"]] | Omit = omit, + prompt: Optional[ResponsePromptParam] | Omit = omit, + tool_choice: RealtimeToolChoiceConfigParam | Omit = omit, + tools: RealtimeToolsConfigParam | Omit = omit, + tracing: Optional[RealtimeTracingConfigParam] | Omit = omit, + truncation: RealtimeTruncationParam | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """ + Accept an incoming SIP call and configure the realtime session that will handle + it. + + Args: + type: The type of session to create. Always `realtime` for the Realtime API. + + audio: Configuration for input and output audio. + + include: Additional fields to include in server outputs. + + `item.input_audio_transcription.logprobs`: Include logprobs for input audio + transcription. + + instructions: The default system instructions (i.e. system message) prepended to model calls. + This field allows the client to guide the model on desired responses. The model + can be instructed on response content and format, (e.g. "be extremely succinct", + "act friendly", "here are examples of good responses") and on audio behavior + (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + instructions are not guaranteed to be followed by the model, but they provide + guidance to the model on the desired behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + + max_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + + model: The Realtime model used for this session. + + output_modalities: The set of modalities the model can respond with. It defaults to `["audio"]`, + indicating that the model will respond with audio plus a transcript. `["text"]` + can be used to make the model respond with text only. It is not possible to + request both `text` and `audio` at the same time. + + prompt: Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + + tool_choice: How the model chooses tools. Provide one of the string modes or force a specific + function/MCP tool. + + tools: Tools available to the model. + + tracing: Realtime API can write session traces to the + [Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once + tracing is enabled for a session, the configuration cannot be modified. + + `auto` will create a trace for the session with default values for the workflow + name, group id, and metadata. + + truncation: Controls how the realtime conversation is truncated prior to model inference. + The default is `auto`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not call_id: + raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._post( + f"/realtime/calls/{call_id}/accept", + body=maybe_transform( + { + "type": type, + "audio": audio, + "include": include, + "instructions": instructions, + "max_output_tokens": max_output_tokens, + "model": model, + "output_modalities": output_modalities, + "prompt": prompt, + "tool_choice": tool_choice, + "tools": tools, + "tracing": tracing, + "truncation": truncation, + }, + call_accept_params.CallAcceptParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def hangup( + self, + call_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """ + End an active Realtime API call, whether it was initiated over SIP or WebRTC. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not call_id: + raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._post( + f"/realtime/calls/{call_id}/hangup", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def refer( + self, + call_id: str, + *, + target_uri: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """ + Transfer an active SIP call to a new destination using the SIP REFER verb. + + Args: + target_uri: URI that should appear in the SIP Refer-To header. Supports values like + `tel:+14155550123` or `sip:agent@example.com`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not call_id: + raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._post( + f"/realtime/calls/{call_id}/refer", + body=maybe_transform({"target_uri": target_uri}, call_refer_params.CallReferParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def reject( + self, + call_id: str, + *, + status_code: int | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """ + Decline an incoming SIP call by returning a SIP status code to the caller. + + Args: + status_code: SIP response code to send back to the caller. Defaults to `603` (Decline) when + omitted. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not call_id: + raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._post( + f"/realtime/calls/{call_id}/reject", + body=maybe_transform({"status_code": status_code}, call_reject_params.CallRejectParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncCalls(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncCallsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncCallsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncCallsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncCallsWithStreamingResponse(self) + + async def create( + self, + *, + sdp: str, + session: RealtimeSessionCreateRequestParam | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> _legacy_response.HttpxBinaryResponseContent: + """ + Create a new Realtime API call over WebRTC and receive the SDP answer needed to + complete the peer connection. + + Args: + sdp: WebRTC Session Description Protocol (SDP) offer generated by the caller. + + session: Realtime session object configuration. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "application/sdp", **(extra_headers or {})} + return await self._post( + "/realtime/calls", + body=await async_maybe_transform( + { + "sdp": sdp, + "session": session, + }, + call_create_params.CallCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=_legacy_response.HttpxBinaryResponseContent, + ) + + async def accept( + self, + call_id: str, + *, + type: Literal["realtime"], + audio: RealtimeAudioConfigParam | Omit = omit, + include: List[Literal["item.input_audio_transcription.logprobs"]] | Omit = omit, + instructions: str | Omit = omit, + max_output_tokens: Union[int, Literal["inf"]] | Omit = omit, + model: Union[ + str, + Literal[ + "gpt-realtime", + "gpt-realtime-2025-08-28", + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-realtime-preview-2025-06-03", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + "gpt-realtime-mini", + "gpt-realtime-mini-2025-10-06", + "gpt-audio-mini", + "gpt-audio-mini-2025-10-06", + ], + ] + | Omit = omit, + output_modalities: List[Literal["text", "audio"]] | Omit = omit, + prompt: Optional[ResponsePromptParam] | Omit = omit, + tool_choice: RealtimeToolChoiceConfigParam | Omit = omit, + tools: RealtimeToolsConfigParam | Omit = omit, + tracing: Optional[RealtimeTracingConfigParam] | Omit = omit, + truncation: RealtimeTruncationParam | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """ + Accept an incoming SIP call and configure the realtime session that will handle + it. + + Args: + type: The type of session to create. Always `realtime` for the Realtime API. + + audio: Configuration for input and output audio. + + include: Additional fields to include in server outputs. + + `item.input_audio_transcription.logprobs`: Include logprobs for input audio + transcription. + + instructions: The default system instructions (i.e. system message) prepended to model calls. + This field allows the client to guide the model on desired responses. The model + can be instructed on response content and format, (e.g. "be extremely succinct", + "act friendly", "here are examples of good responses") and on audio behavior + (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + instructions are not guaranteed to be followed by the model, but they provide + guidance to the model on the desired behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + + max_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + + model: The Realtime model used for this session. + + output_modalities: The set of modalities the model can respond with. It defaults to `["audio"]`, + indicating that the model will respond with audio plus a transcript. `["text"]` + can be used to make the model respond with text only. It is not possible to + request both `text` and `audio` at the same time. + + prompt: Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + + tool_choice: How the model chooses tools. Provide one of the string modes or force a specific + function/MCP tool. + + tools: Tools available to the model. + + tracing: Realtime API can write session traces to the + [Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once + tracing is enabled for a session, the configuration cannot be modified. + + `auto` will create a trace for the session with default values for the workflow + name, group id, and metadata. + + truncation: Controls how the realtime conversation is truncated prior to model inference. + The default is `auto`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not call_id: + raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._post( + f"/realtime/calls/{call_id}/accept", + body=await async_maybe_transform( + { + "type": type, + "audio": audio, + "include": include, + "instructions": instructions, + "max_output_tokens": max_output_tokens, + "model": model, + "output_modalities": output_modalities, + "prompt": prompt, + "tool_choice": tool_choice, + "tools": tools, + "tracing": tracing, + "truncation": truncation, + }, + call_accept_params.CallAcceptParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def hangup( + self, + call_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """ + End an active Realtime API call, whether it was initiated over SIP or WebRTC. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not call_id: + raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._post( + f"/realtime/calls/{call_id}/hangup", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def refer( + self, + call_id: str, + *, + target_uri: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """ + Transfer an active SIP call to a new destination using the SIP REFER verb. + + Args: + target_uri: URI that should appear in the SIP Refer-To header. Supports values like + `tel:+14155550123` or `sip:agent@example.com`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not call_id: + raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._post( + f"/realtime/calls/{call_id}/refer", + body=await async_maybe_transform({"target_uri": target_uri}, call_refer_params.CallReferParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def reject( + self, + call_id: str, + *, + status_code: int | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """ + Decline an incoming SIP call by returning a SIP status code to the caller. + + Args: + status_code: SIP response code to send back to the caller. Defaults to `603` (Decline) when + omitted. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not call_id: + raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._post( + f"/realtime/calls/{call_id}/reject", + body=await async_maybe_transform({"status_code": status_code}, call_reject_params.CallRejectParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class CallsWithRawResponse: + def __init__(self, calls: Calls) -> None: + self._calls = calls + + self.create = _legacy_response.to_raw_response_wrapper( + calls.create, + ) + self.accept = _legacy_response.to_raw_response_wrapper( + calls.accept, + ) + self.hangup = _legacy_response.to_raw_response_wrapper( + calls.hangup, + ) + self.refer = _legacy_response.to_raw_response_wrapper( + calls.refer, + ) + self.reject = _legacy_response.to_raw_response_wrapper( + calls.reject, + ) + + +class AsyncCallsWithRawResponse: + def __init__(self, calls: AsyncCalls) -> None: + self._calls = calls + + self.create = _legacy_response.async_to_raw_response_wrapper( + calls.create, + ) + self.accept = _legacy_response.async_to_raw_response_wrapper( + calls.accept, + ) + self.hangup = _legacy_response.async_to_raw_response_wrapper( + calls.hangup, + ) + self.refer = _legacy_response.async_to_raw_response_wrapper( + calls.refer, + ) + self.reject = _legacy_response.async_to_raw_response_wrapper( + calls.reject, + ) + + +class CallsWithStreamingResponse: + def __init__(self, calls: Calls) -> None: + self._calls = calls + + self.create = to_custom_streamed_response_wrapper( + calls.create, + StreamedBinaryAPIResponse, + ) + self.accept = to_streamed_response_wrapper( + calls.accept, + ) + self.hangup = to_streamed_response_wrapper( + calls.hangup, + ) + self.refer = to_streamed_response_wrapper( + calls.refer, + ) + self.reject = to_streamed_response_wrapper( + calls.reject, + ) + + +class AsyncCallsWithStreamingResponse: + def __init__(self, calls: AsyncCalls) -> None: + self._calls = calls + + self.create = async_to_custom_streamed_response_wrapper( + calls.create, + AsyncStreamedBinaryAPIResponse, + ) + self.accept = async_to_streamed_response_wrapper( + calls.accept, + ) + self.hangup = async_to_streamed_response_wrapper( + calls.hangup, + ) + self.refer = async_to_streamed_response_wrapper( + calls.refer, + ) + self.reject = async_to_streamed_response_wrapper( + calls.reject, + ) diff --git a/src/openai/resources/realtime/client_secrets.py b/src/openai/resources/realtime/client_secrets.py index a79460746d..5ceba7bef1 100644 --- a/src/openai/resources/realtime/client_secrets.py +++ b/src/openai/resources/realtime/client_secrets.py @@ -5,7 +5,7 @@ import httpx from ... import _legacy_response -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -40,14 +40,14 @@ def with_streaming_response(self) -> ClientSecretsWithStreamingResponse: def create( self, *, - expires_after: client_secret_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, - session: client_secret_create_params.Session | NotGiven = NOT_GIVEN, + expires_after: client_secret_create_params.ExpiresAfter | Omit = omit, + session: client_secret_create_params.Session | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ClientSecretCreateResponse: """ Create a Realtime client secret with an associated session configuration. @@ -108,14 +108,14 @@ def with_streaming_response(self) -> AsyncClientSecretsWithStreamingResponse: async def create( self, *, - expires_after: client_secret_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, - session: client_secret_create_params.Session | NotGiven = NOT_GIVEN, + expires_after: client_secret_create_params.ExpiresAfter | Omit = omit, + session: client_secret_create_params.Session | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ClientSecretCreateResponse: """ Create a Realtime client secret with an associated session configuration. diff --git a/src/openai/resources/realtime/realtime.py b/src/openai/resources/realtime/realtime.py index 64fca72915..6e69258616 100644 --- a/src/openai/resources/realtime/realtime.py +++ b/src/openai/resources/realtime/realtime.py @@ -11,7 +11,15 @@ import httpx from pydantic import BaseModel -from ..._types import NOT_GIVEN, Query, Headers, NotGiven +from .calls import ( + Calls, + AsyncCalls, + CallsWithRawResponse, + AsyncCallsWithRawResponse, + CallsWithStreamingResponse, + AsyncCallsWithStreamingResponse, +) +from ..._types import Omit, Query, Headers, omit from ..._utils import ( is_azure_client, maybe_transform, @@ -56,6 +64,12 @@ class Realtime(SyncAPIResource): def client_secrets(self) -> ClientSecrets: return ClientSecrets(self._client) + @cached_property + def calls(self) -> Calls: + from ...lib._realtime import _Calls + + return _Calls(self._client) + @cached_property def with_raw_response(self) -> RealtimeWithRawResponse: """ @@ -78,7 +92,8 @@ def with_streaming_response(self) -> RealtimeWithStreamingResponse: def connect( self, *, - model: str, + call_id: str | Omit = omit, + model: str | Omit = omit, extra_query: Query = {}, extra_headers: Headers = {}, websocket_connection_options: WebsocketConnectionOptions = {}, @@ -99,6 +114,7 @@ def connect( extra_query=extra_query, extra_headers=extra_headers, websocket_connection_options=websocket_connection_options, + call_id=call_id, model=model, ) @@ -108,6 +124,12 @@ class AsyncRealtime(AsyncAPIResource): def client_secrets(self) -> AsyncClientSecrets: return AsyncClientSecrets(self._client) + @cached_property + def calls(self) -> AsyncCalls: + from ...lib._realtime import _AsyncCalls + + return _AsyncCalls(self._client) + @cached_property def with_raw_response(self) -> AsyncRealtimeWithRawResponse: """ @@ -130,7 +152,8 @@ def with_streaming_response(self) -> AsyncRealtimeWithStreamingResponse: def connect( self, *, - model: str, + call_id: str | Omit = omit, + model: str | Omit = omit, extra_query: Query = {}, extra_headers: Headers = {}, websocket_connection_options: WebsocketConnectionOptions = {}, @@ -151,6 +174,7 @@ def connect( extra_query=extra_query, extra_headers=extra_headers, websocket_connection_options=websocket_connection_options, + call_id=call_id, model=model, ) @@ -163,6 +187,10 @@ def __init__(self, realtime: Realtime) -> None: def client_secrets(self) -> ClientSecretsWithRawResponse: return ClientSecretsWithRawResponse(self._realtime.client_secrets) + @cached_property + def calls(self) -> CallsWithRawResponse: + return CallsWithRawResponse(self._realtime.calls) + class AsyncRealtimeWithRawResponse: def __init__(self, realtime: AsyncRealtime) -> None: @@ -172,6 +200,10 @@ def __init__(self, realtime: AsyncRealtime) -> None: def client_secrets(self) -> AsyncClientSecretsWithRawResponse: return AsyncClientSecretsWithRawResponse(self._realtime.client_secrets) + @cached_property + def calls(self) -> AsyncCallsWithRawResponse: + return AsyncCallsWithRawResponse(self._realtime.calls) + class RealtimeWithStreamingResponse: def __init__(self, realtime: Realtime) -> None: @@ -181,6 +213,10 @@ def __init__(self, realtime: Realtime) -> None: def client_secrets(self) -> ClientSecretsWithStreamingResponse: return ClientSecretsWithStreamingResponse(self._realtime.client_secrets) + @cached_property + def calls(self) -> CallsWithStreamingResponse: + return CallsWithStreamingResponse(self._realtime.calls) + class AsyncRealtimeWithStreamingResponse: def __init__(self, realtime: AsyncRealtime) -> None: @@ -190,6 +226,10 @@ def __init__(self, realtime: AsyncRealtime) -> None: def client_secrets(self) -> AsyncClientSecretsWithStreamingResponse: return AsyncClientSecretsWithStreamingResponse(self._realtime.client_secrets) + @cached_property + def calls(self) -> AsyncCallsWithStreamingResponse: + return AsyncCallsWithStreamingResponse(self._realtime.calls) + class AsyncRealtimeConnection: """Represents a live websocket connection to the Realtime API""" @@ -290,12 +330,14 @@ def __init__( self, *, client: AsyncOpenAI, - model: str, + call_id: str | Omit = omit, + model: str | Omit = omit, extra_query: Query, extra_headers: Headers, websocket_connection_options: WebsocketConnectionOptions, ) -> None: self.__client = client + self.__call_id = call_id self.__model = model self.__connection: AsyncRealtimeConnection | None = None self.__extra_query = extra_query @@ -323,13 +365,20 @@ async def __aenter__(self) -> AsyncRealtimeConnection: extra_query = self.__extra_query await self.__client._refresh_api_key() auth_headers = self.__client.auth_headers + extra_query = self.__extra_query + if self.__call_id is not omit: + extra_query = {**extra_query, "call_id": self.__call_id} if is_async_azure_client(self.__client): - url, auth_headers = await self.__client._configure_realtime(self.__model, extra_query) + model = self.__model + if not model: + raise OpenAIError("`model` is required for Azure Realtime API") + else: + url, auth_headers = await self.__client._configure_realtime(model, extra_query) else: url = self._prepare_url().copy_with( params={ **self.__client.base_url.params, - "model": self.__model, + **({"model": self.__model} if self.__model is not omit else {}), **extra_query, }, ) @@ -470,12 +519,14 @@ def __init__( self, *, client: OpenAI, - model: str, + call_id: str | Omit = omit, + model: str | Omit = omit, extra_query: Query, extra_headers: Headers, websocket_connection_options: WebsocketConnectionOptions, ) -> None: self.__client = client + self.__call_id = call_id self.__model = model self.__connection: RealtimeConnection | None = None self.__extra_query = extra_query @@ -503,13 +554,20 @@ def __enter__(self) -> RealtimeConnection: extra_query = self.__extra_query self.__client._refresh_api_key() auth_headers = self.__client.auth_headers + extra_query = self.__extra_query + if self.__call_id is not omit: + extra_query = {**extra_query, "call_id": self.__call_id} if is_azure_client(self.__client): - url, auth_headers = self.__client._configure_realtime(self.__model, extra_query) + model = self.__model + if not model: + raise OpenAIError("`model` is required for Azure Realtime API") + else: + url, auth_headers = self.__client._configure_realtime(model, extra_query) else: url = self._prepare_url().copy_with( params={ **self.__client.base_url.params, - "model": self.__model, + **({"model": self.__model} if self.__model is not omit else {}), **extra_query, }, ) @@ -557,7 +615,7 @@ def __init__(self, connection: RealtimeConnection) -> None: class RealtimeSessionResource(BaseRealtimeConnectionResource): - def update(self, *, session: session_update_event_param.Session, event_id: str | NotGiven = NOT_GIVEN) -> None: + def update(self, *, session: session_update_event_param.Session, event_id: str | Omit = omit) -> None: """ Send this event to update the session’s configuration. The client may send this event at any time to update any field @@ -578,12 +636,7 @@ def update(self, *, session: session_update_event_param.Session, event_id: str | class RealtimeResponseResource(BaseRealtimeConnectionResource): - def create( - self, - *, - event_id: str | NotGiven = NOT_GIVEN, - response: RealtimeResponseCreateParamsParam | NotGiven = NOT_GIVEN, - ) -> None: + def create(self, *, event_id: str | Omit = omit, response: RealtimeResponseCreateParamsParam | Omit = omit) -> None: """ This event instructs the server to create a Response, which means triggering model inference. When in Server VAD mode, the server will create Responses @@ -618,7 +671,7 @@ def create( ) ) - def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str | NotGiven = NOT_GIVEN) -> None: + def cancel(self, *, event_id: str | Omit = omit, response_id: str | Omit = omit) -> None: """Send this event to cancel an in-progress response. The server will respond @@ -636,7 +689,7 @@ def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str | Not class RealtimeInputAudioBufferResource(BaseRealtimeConnectionResource): - def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + def clear(self, *, event_id: str | Omit = omit) -> None: """Send this event to clear the audio bytes in the buffer. The server will @@ -646,7 +699,7 @@ def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.clear", "event_id": event_id})) ) - def commit(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + def commit(self, *, event_id: str | Omit = omit) -> None: """ Send this event to commit the user input audio buffer, which will create a new user message item in the conversation. This event will produce an error if the input audio buffer is empty. When in Server VAD mode, the client does not need to send this event, the server will commit the audio buffer automatically. @@ -656,7 +709,7 @@ def commit(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.commit", "event_id": event_id})) ) - def append(self, *, audio: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + def append(self, *, audio: str, event_id: str | Omit = omit) -> None: """Send this event to append audio bytes to the input audio buffer. The audio @@ -688,7 +741,7 @@ def item(self) -> RealtimeConversationItemResource: class RealtimeConversationItemResource(BaseRealtimeConnectionResource): - def delete(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + def delete(self, *, item_id: str, event_id: str | Omit = omit) -> None: """Send this event when you want to remove any item from the conversation history. @@ -704,11 +757,7 @@ def delete(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None: ) def create( - self, - *, - item: ConversationItemParam, - event_id: str | NotGiven = NOT_GIVEN, - previous_item_id: str | NotGiven = NOT_GIVEN, + self, *, item: ConversationItemParam, event_id: str | Omit = omit, previous_item_id: str | Omit = omit ) -> None: """ Add a new Item to the Conversation's context, including messages, function @@ -733,9 +782,7 @@ def create( ) ) - def truncate( - self, *, audio_end_ms: int, content_index: int, item_id: str, event_id: str | NotGiven = NOT_GIVEN - ) -> None: + def truncate(self, *, audio_end_ms: int, content_index: int, item_id: str, event_id: str | Omit = omit) -> None: """Send this event to truncate a previous assistant message’s audio. The server @@ -765,7 +812,7 @@ def truncate( ) ) - def retrieve(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + def retrieve(self, *, item_id: str, event_id: str | Omit = omit) -> None: """ Send this event when you want to retrieve the server's representation of a specific item in the conversation history. This is useful, for example, to inspect user audio after noise cancellation and VAD. The server will respond with a `conversation.item.retrieved` event, @@ -781,7 +828,7 @@ def retrieve(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> Non class RealtimeOutputAudioBufferResource(BaseRealtimeConnectionResource): - def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + def clear(self, *, event_id: str | Omit = omit) -> None: """**WebRTC Only:** Emit to cut off the current audio response. This will trigger the server to @@ -801,9 +848,7 @@ def __init__(self, connection: AsyncRealtimeConnection) -> None: class AsyncRealtimeSessionResource(BaseAsyncRealtimeConnectionResource): - async def update( - self, *, session: session_update_event_param.Session, event_id: str | NotGiven = NOT_GIVEN - ) -> None: + async def update(self, *, session: session_update_event_param.Session, event_id: str | Omit = omit) -> None: """ Send this event to update the session’s configuration. The client may send this event at any time to update any field @@ -825,10 +870,7 @@ async def update( class AsyncRealtimeResponseResource(BaseAsyncRealtimeConnectionResource): async def create( - self, - *, - event_id: str | NotGiven = NOT_GIVEN, - response: RealtimeResponseCreateParamsParam | NotGiven = NOT_GIVEN, + self, *, event_id: str | Omit = omit, response: RealtimeResponseCreateParamsParam | Omit = omit ) -> None: """ This event instructs the server to create a Response, which means triggering @@ -864,7 +906,7 @@ async def create( ) ) - async def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str | NotGiven = NOT_GIVEN) -> None: + async def cancel(self, *, event_id: str | Omit = omit, response_id: str | Omit = omit) -> None: """Send this event to cancel an in-progress response. The server will respond @@ -882,7 +924,7 @@ async def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str class AsyncRealtimeInputAudioBufferResource(BaseAsyncRealtimeConnectionResource): - async def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + async def clear(self, *, event_id: str | Omit = omit) -> None: """Send this event to clear the audio bytes in the buffer. The server will @@ -892,7 +934,7 @@ async def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.clear", "event_id": event_id})) ) - async def commit(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + async def commit(self, *, event_id: str | Omit = omit) -> None: """ Send this event to commit the user input audio buffer, which will create a new user message item in the conversation. This event will produce an error if the input audio buffer is empty. When in Server VAD mode, the client does not need to send this event, the server will commit the audio buffer automatically. @@ -902,7 +944,7 @@ async def commit(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.commit", "event_id": event_id})) ) - async def append(self, *, audio: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + async def append(self, *, audio: str, event_id: str | Omit = omit) -> None: """Send this event to append audio bytes to the input audio buffer. The audio @@ -934,7 +976,7 @@ def item(self) -> AsyncRealtimeConversationItemResource: class AsyncRealtimeConversationItemResource(BaseAsyncRealtimeConnectionResource): - async def delete(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + async def delete(self, *, item_id: str, event_id: str | Omit = omit) -> None: """Send this event when you want to remove any item from the conversation history. @@ -950,11 +992,7 @@ async def delete(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> ) async def create( - self, - *, - item: ConversationItemParam, - event_id: str | NotGiven = NOT_GIVEN, - previous_item_id: str | NotGiven = NOT_GIVEN, + self, *, item: ConversationItemParam, event_id: str | Omit = omit, previous_item_id: str | Omit = omit ) -> None: """ Add a new Item to the Conversation's context, including messages, function @@ -980,7 +1018,7 @@ async def create( ) async def truncate( - self, *, audio_end_ms: int, content_index: int, item_id: str, event_id: str | NotGiven = NOT_GIVEN + self, *, audio_end_ms: int, content_index: int, item_id: str, event_id: str | Omit = omit ) -> None: """Send this event to truncate a previous assistant message’s audio. @@ -1011,7 +1049,7 @@ async def truncate( ) ) - async def retrieve(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + async def retrieve(self, *, item_id: str, event_id: str | Omit = omit) -> None: """ Send this event when you want to retrieve the server's representation of a specific item in the conversation history. This is useful, for example, to inspect user audio after noise cancellation and VAD. The server will respond with a `conversation.item.retrieved` event, @@ -1027,7 +1065,7 @@ async def retrieve(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) class AsyncRealtimeOutputAudioBufferResource(BaseAsyncRealtimeConnectionResource): - async def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + async def clear(self, *, event_id: str | Omit = omit) -> None: """**WebRTC Only:** Emit to cut off the current audio response. This will trigger the server to diff --git a/src/openai/resources/responses/__init__.py b/src/openai/resources/responses/__init__.py index ad19218b01..51d318ad8d 100644 --- a/src/openai/resources/responses/__init__.py +++ b/src/openai/resources/responses/__init__.py @@ -16,6 +16,14 @@ InputItemsWithStreamingResponse, AsyncInputItemsWithStreamingResponse, ) +from .input_tokens import ( + InputTokens, + AsyncInputTokens, + InputTokensWithRawResponse, + AsyncInputTokensWithRawResponse, + InputTokensWithStreamingResponse, + AsyncInputTokensWithStreamingResponse, +) __all__ = [ "InputItems", @@ -24,6 +32,12 @@ "AsyncInputItemsWithRawResponse", "InputItemsWithStreamingResponse", "AsyncInputItemsWithStreamingResponse", + "InputTokens", + "AsyncInputTokens", + "InputTokensWithRawResponse", + "AsyncInputTokensWithRawResponse", + "InputTokensWithStreamingResponse", + "AsyncInputTokensWithStreamingResponse", "Responses", "AsyncResponses", "ResponsesWithRawResponse", diff --git a/src/openai/resources/responses/input_items.py b/src/openai/resources/responses/input_items.py index 9f3ef637ce..3311bfe10a 100644 --- a/src/openai/resources/responses/input_items.py +++ b/src/openai/resources/responses/input_items.py @@ -8,7 +8,7 @@ import httpx from ... import _legacy_response -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ..._utils import maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -46,16 +46,16 @@ def list( self, response_id: str, *, - after: str | NotGiven = NOT_GIVEN, - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + include: List[ResponseIncludable] | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[ResponseItem]: """ Returns a list of input items for a given response. @@ -130,16 +130,16 @@ def list( self, response_id: str, *, - after: str | NotGiven = NOT_GIVEN, - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + include: List[ResponseIncludable] | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[ResponseItem, AsyncCursorPage[ResponseItem]]: """ Returns a list of input items for a given response. diff --git a/src/openai/resources/responses/input_tokens.py b/src/openai/resources/responses/input_tokens.py new file mode 100644 index 0000000000..0f47955fe4 --- /dev/null +++ b/src/openai/resources/responses/input_tokens.py @@ -0,0 +1,309 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable, Optional +from typing_extensions import Literal + +import httpx + +from ... import _legacy_response +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ..._base_client import make_request_options +from ...types.responses import input_token_count_params +from ...types.responses.tool_param import ToolParam +from ...types.shared_params.reasoning import Reasoning +from ...types.responses.response_input_item_param import ResponseInputItemParam +from ...types.responses.input_token_count_response import InputTokenCountResponse + +__all__ = ["InputTokens", "AsyncInputTokens"] + + +class InputTokens(SyncAPIResource): + @cached_property + def with_raw_response(self) -> InputTokensWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return InputTokensWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> InputTokensWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return InputTokensWithStreamingResponse(self) + + def count( + self, + *, + conversation: Optional[input_token_count_params.Conversation] | Omit = omit, + input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit, + instructions: Optional[str] | Omit = omit, + model: Optional[str] | Omit = omit, + parallel_tool_calls: Optional[bool] | Omit = omit, + previous_response_id: Optional[str] | Omit = omit, + reasoning: Optional[Reasoning] | Omit = omit, + text: Optional[input_token_count_params.Text] | Omit = omit, + tool_choice: Optional[input_token_count_params.ToolChoice] | Omit = omit, + tools: Optional[Iterable[ToolParam]] | Omit = omit, + truncation: Literal["auto", "disabled"] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> InputTokenCountResponse: + """ + Get input token counts + + Args: + conversation: The conversation that this response belongs to. Items from this conversation are + prepended to `input_items` for this response request. Input items and output + items from this response are automatically added to this conversation after this + response completes. + + input: Text, image, or file inputs to the model, used to generate a response + + instructions: A system (or developer) message inserted into the model's context. When used + along with `previous_response_id`, the instructions from a previous response + will not be carried over to the next response. This makes it simple to swap out + system (or developer) messages in new responses. + + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. + + previous_response_id: The unique ID of the previous response to the model. Use this to create + multi-turn conversations. Learn more about + [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. + + reasoning: **gpt-5 and o-series models only** Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + + tool_choice: How the model should select which tool (or tools) to use when generating a + response. See the `tools` parameter to see how to specify which tools the model + can call. + + tools: An array of tools the model may call while generating a response. You can + specify which tool to use by setting the `tool_choice` parameter. + + truncation: The truncation strategy to use for the model response. - `auto`: If the input to + this Response exceeds the model's context window size, the model will truncate + the response to fit the context window by dropping items from the beginning of + the conversation. - `disabled` (default): If the input size will exceed the + context window size for a model, the request will fail with a 400 error. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/responses/input_tokens", + body=maybe_transform( + { + "conversation": conversation, + "input": input, + "instructions": instructions, + "model": model, + "parallel_tool_calls": parallel_tool_calls, + "previous_response_id": previous_response_id, + "reasoning": reasoning, + "text": text, + "tool_choice": tool_choice, + "tools": tools, + "truncation": truncation, + }, + input_token_count_params.InputTokenCountParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=InputTokenCountResponse, + ) + + +class AsyncInputTokens(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncInputTokensWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncInputTokensWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncInputTokensWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncInputTokensWithStreamingResponse(self) + + async def count( + self, + *, + conversation: Optional[input_token_count_params.Conversation] | Omit = omit, + input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit, + instructions: Optional[str] | Omit = omit, + model: Optional[str] | Omit = omit, + parallel_tool_calls: Optional[bool] | Omit = omit, + previous_response_id: Optional[str] | Omit = omit, + reasoning: Optional[Reasoning] | Omit = omit, + text: Optional[input_token_count_params.Text] | Omit = omit, + tool_choice: Optional[input_token_count_params.ToolChoice] | Omit = omit, + tools: Optional[Iterable[ToolParam]] | Omit = omit, + truncation: Literal["auto", "disabled"] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> InputTokenCountResponse: + """ + Get input token counts + + Args: + conversation: The conversation that this response belongs to. Items from this conversation are + prepended to `input_items` for this response request. Input items and output + items from this response are automatically added to this conversation after this + response completes. + + input: Text, image, or file inputs to the model, used to generate a response + + instructions: A system (or developer) message inserted into the model's context. When used + along with `previous_response_id`, the instructions from a previous response + will not be carried over to the next response. This makes it simple to swap out + system (or developer) messages in new responses. + + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. + + previous_response_id: The unique ID of the previous response to the model. Use this to create + multi-turn conversations. Learn more about + [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. + + reasoning: **gpt-5 and o-series models only** Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + + tool_choice: How the model should select which tool (or tools) to use when generating a + response. See the `tools` parameter to see how to specify which tools the model + can call. + + tools: An array of tools the model may call while generating a response. You can + specify which tool to use by setting the `tool_choice` parameter. + + truncation: The truncation strategy to use for the model response. - `auto`: If the input to + this Response exceeds the model's context window size, the model will truncate + the response to fit the context window by dropping items from the beginning of + the conversation. - `disabled` (default): If the input size will exceed the + context window size for a model, the request will fail with a 400 error. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/responses/input_tokens", + body=await async_maybe_transform( + { + "conversation": conversation, + "input": input, + "instructions": instructions, + "model": model, + "parallel_tool_calls": parallel_tool_calls, + "previous_response_id": previous_response_id, + "reasoning": reasoning, + "text": text, + "tool_choice": tool_choice, + "tools": tools, + "truncation": truncation, + }, + input_token_count_params.InputTokenCountParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=InputTokenCountResponse, + ) + + +class InputTokensWithRawResponse: + def __init__(self, input_tokens: InputTokens) -> None: + self._input_tokens = input_tokens + + self.count = _legacy_response.to_raw_response_wrapper( + input_tokens.count, + ) + + +class AsyncInputTokensWithRawResponse: + def __init__(self, input_tokens: AsyncInputTokens) -> None: + self._input_tokens = input_tokens + + self.count = _legacy_response.async_to_raw_response_wrapper( + input_tokens.count, + ) + + +class InputTokensWithStreamingResponse: + def __init__(self, input_tokens: InputTokens) -> None: + self._input_tokens = input_tokens + + self.count = to_streamed_response_wrapper( + input_tokens.count, + ) + + +class AsyncInputTokensWithStreamingResponse: + def __init__(self, input_tokens: AsyncInputTokens) -> None: + self._input_tokens = input_tokens + + self.count = async_to_streamed_response_wrapper( + input_tokens.count, + ) diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index 8acdb10b51..439cf8d3ad 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -9,7 +9,7 @@ import httpx from ... import _legacy_response -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ..._types import NOT_GIVEN, Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given from ..._utils import is_given, maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -24,6 +24,14 @@ ) from ..._streaming import Stream, AsyncStream from ...lib._tools import PydanticFunctionTool, ResponsesPydanticFunctionTool +from .input_tokens import ( + InputTokens, + AsyncInputTokens, + InputTokensWithRawResponse, + AsyncInputTokensWithRawResponse, + InputTokensWithStreamingResponse, + AsyncInputTokensWithStreamingResponse, +) from ..._base_client import make_request_options from ...types.responses import response_create_params, response_retrieve_params from ...lib._parsing._responses import ( @@ -52,6 +60,10 @@ class Responses(SyncAPIResource): def input_items(self) -> InputItems: return InputItems(self._client) + @cached_property + def input_tokens(self) -> InputTokens: + return InputTokens(self._client) + @cached_property def with_raw_response(self) -> ResponsesWithRawResponse: """ @@ -75,39 +87,39 @@ def with_streaming_response(self) -> ResponsesWithStreamingResponse: def create( self, *, - background: Optional[bool] | NotGiven = NOT_GIVEN, - conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, - include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, - input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: ResponsesModel | NotGiven = NOT_GIVEN, - parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, - previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, - prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, - tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + background: Optional[bool] | Omit = omit, + conversation: Optional[response_create_params.Conversation] | Omit = omit, + include: Optional[List[ResponseIncludable]] | Omit = omit, + input: Union[str, ResponseInputParam] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_output_tokens: Optional[int] | Omit = omit, + max_tool_calls: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: ResponsesModel | Omit = omit, + parallel_tool_calls: Optional[bool] | Omit = omit, + previous_response_id: Optional[str] | Omit = omit, + prompt: Optional[ResponsePromptParam] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning: Optional[Reasoning] | Omit = omit, + safety_identifier: str | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + stream_options: Optional[response_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + text: ResponseTextConfigParam | Omit = omit, + tool_choice: response_create_params.ToolChoice | Omit = omit, + tools: Iterable[ToolParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation: Optional[Literal["auto", "disabled"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Response: """Creates a model response. @@ -315,38 +327,38 @@ def create( self, *, stream: Literal[True], - background: Optional[bool] | NotGiven = NOT_GIVEN, - conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, - include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, - input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: ResponsesModel | NotGiven = NOT_GIVEN, - parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, - previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, - prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, - tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + background: Optional[bool] | Omit = omit, + conversation: Optional[response_create_params.Conversation] | Omit = omit, + include: Optional[List[ResponseIncludable]] | Omit = omit, + input: Union[str, ResponseInputParam] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_output_tokens: Optional[int] | Omit = omit, + max_tool_calls: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: ResponsesModel | Omit = omit, + parallel_tool_calls: Optional[bool] | Omit = omit, + previous_response_id: Optional[str] | Omit = omit, + prompt: Optional[ResponsePromptParam] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning: Optional[Reasoning] | Omit = omit, + safety_identifier: str | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream_options: Optional[response_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + text: ResponseTextConfigParam | Omit = omit, + tool_choice: response_create_params.ToolChoice | Omit = omit, + tools: Iterable[ToolParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation: Optional[Literal["auto", "disabled"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Stream[ResponseStreamEvent]: """Creates a model response. @@ -554,38 +566,38 @@ def create( self, *, stream: bool, - background: Optional[bool] | NotGiven = NOT_GIVEN, - conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, - include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, - input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: ResponsesModel | NotGiven = NOT_GIVEN, - parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, - previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, - prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, - tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + background: Optional[bool] | Omit = omit, + conversation: Optional[response_create_params.Conversation] | Omit = omit, + include: Optional[List[ResponseIncludable]] | Omit = omit, + input: Union[str, ResponseInputParam] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_output_tokens: Optional[int] | Omit = omit, + max_tool_calls: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: ResponsesModel | Omit = omit, + parallel_tool_calls: Optional[bool] | Omit = omit, + previous_response_id: Optional[str] | Omit = omit, + prompt: Optional[ResponsePromptParam] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning: Optional[Reasoning] | Omit = omit, + safety_identifier: str | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream_options: Optional[response_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + text: ResponseTextConfigParam | Omit = omit, + tool_choice: response_create_params.ToolChoice | Omit = omit, + tools: Iterable[ToolParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation: Optional[Literal["auto", "disabled"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Response | Stream[ResponseStreamEvent]: """Creates a model response. @@ -791,39 +803,39 @@ def create( def create( self, *, - background: Optional[bool] | NotGiven = NOT_GIVEN, - conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, - include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, - input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: ResponsesModel | NotGiven = NOT_GIVEN, - parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, - previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, - prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, - tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + background: Optional[bool] | Omit = omit, + conversation: Optional[response_create_params.Conversation] | Omit = omit, + include: Optional[List[ResponseIncludable]] | Omit = omit, + input: Union[str, ResponseInputParam] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_output_tokens: Optional[int] | Omit = omit, + max_tool_calls: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: ResponsesModel | Omit = omit, + parallel_tool_calls: Optional[bool] | Omit = omit, + previous_response_id: Optional[str] | Omit = omit, + prompt: Optional[ResponsePromptParam] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning: Optional[Reasoning] | Omit = omit, + safety_identifier: str | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + stream_options: Optional[response_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + text: ResponseTextConfigParam | Omit = omit, + tool_choice: response_create_params.ToolChoice | Omit = omit, + tools: Iterable[ToolParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation: Optional[Literal["auto", "disabled"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Response | Stream[ResponseStreamEvent]: return self._post( "/responses", @@ -874,9 +886,9 @@ def stream( self, *, response_id: str, - text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, - starting_after: int | NotGiven = NOT_GIVEN, - tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, + text_format: type[TextFormatT] | Omit = omit, + starting_after: int | Omit = omit, + tools: Iterable[ParseableToolParam] | Omit = omit, # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, @@ -890,31 +902,31 @@ def stream( *, input: Union[str, ResponseInputParam], model: ResponsesModel, - background: Optional[bool] | NotGiven = NOT_GIVEN, - text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, - tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, - conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, - include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, - previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, - prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, - tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + background: Optional[bool] | Omit = omit, + text_format: type[TextFormatT] | Omit = omit, + tools: Iterable[ParseableToolParam] | Omit = omit, + conversation: Optional[response_create_params.Conversation] | Omit = omit, + include: Optional[List[ResponseIncludable]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_output_tokens: Optional[int] | Omit = omit, + max_tool_calls: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + parallel_tool_calls: Optional[bool] | Omit = omit, + previous_response_id: Optional[str] | Omit = omit, + prompt: Optional[ResponsePromptParam] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning: Optional[Reasoning] | Omit = omit, + safety_identifier: str | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream_options: Optional[response_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + text: ResponseTextConfigParam | Omit = omit, + tool_choice: response_create_params.ToolChoice | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation: Optional[Literal["auto", "disabled"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -926,35 +938,35 @@ def stream( def stream( self, *, - response_id: str | NotGiven = NOT_GIVEN, - input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, - model: ResponsesModel | NotGiven = NOT_GIVEN, - background: Optional[bool] | NotGiven = NOT_GIVEN, - text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, - tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, - conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, - include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, - previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, - prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, - tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - starting_after: int | NotGiven = NOT_GIVEN, + response_id: str | Omit = omit, + input: Union[str, ResponseInputParam] | Omit = omit, + model: ResponsesModel | Omit = omit, + background: Optional[bool] | Omit = omit, + text_format: type[TextFormatT] | Omit = omit, + tools: Iterable[ParseableToolParam] | Omit = omit, + conversation: Optional[response_create_params.Conversation] | Omit = omit, + include: Optional[List[ResponseIncludable]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_output_tokens: Optional[int] | Omit = omit, + max_tool_calls: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + parallel_tool_calls: Optional[bool] | Omit = omit, + previous_response_id: Optional[str] | Omit = omit, + prompt: Optional[ResponsePromptParam] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning: Optional[Reasoning] | Omit = omit, + safety_identifier: str | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream_options: Optional[response_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + text: ResponseTextConfigParam | Omit = omit, + tool_choice: response_create_params.ToolChoice | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation: Optional[Literal["auto", "disabled"]] | Omit = omit, + user: str | Omit = omit, + starting_after: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1061,7 +1073,7 @@ def stream( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, - starting_after=NOT_GIVEN, + starting_after=omit, timeout=timeout, ), text_format=text_format, @@ -1072,35 +1084,35 @@ def stream( def parse( self, *, - text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, - background: Optional[bool] | NotGiven = NOT_GIVEN, - conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, - include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, - input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: ResponsesModel | NotGiven = NOT_GIVEN, - parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, - previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, - prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, - tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, + text_format: type[TextFormatT] | Omit = omit, + background: Optional[bool] | Omit = omit, + conversation: Optional[response_create_params.Conversation] | Omit = omit, + include: Optional[List[ResponseIncludable]] | Omit = omit, + input: Union[str, ResponseInputParam] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_output_tokens: Optional[int] | Omit = omit, + max_tool_calls: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: ResponsesModel | Omit = omit, + parallel_tool_calls: Optional[bool] | Omit = omit, + previous_response_id: Optional[str] | Omit = omit, + prompt: Optional[ResponsePromptParam] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning: Optional[Reasoning] | Omit = omit, + safety_identifier: str | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + stream_options: Optional[response_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + text: ResponseTextConfigParam | Omit = omit, + tool_choice: response_create_params.ToolChoice | Omit = omit, + tools: Iterable[ParseableToolParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation: Optional[Literal["auto", "disabled"]] | Omit = omit, + user: str | Omit = omit, + verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1178,16 +1190,16 @@ def retrieve( self, response_id: str, *, - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, - include_obfuscation: bool | NotGiven = NOT_GIVEN, - starting_after: int | NotGiven = NOT_GIVEN, - stream: Literal[False] | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | Omit = omit, + include_obfuscation: bool | Omit = omit, + starting_after: int | Omit = omit, + stream: Literal[False] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Response: ... @overload @@ -1196,8 +1208,8 @@ def retrieve( response_id: str, *, stream: Literal[True], - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, - starting_after: int | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | Omit = omit, + starting_after: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1212,8 +1224,8 @@ def retrieve( response_id: str, *, stream: bool, - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, - starting_after: int | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | Omit = omit, + starting_after: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1228,8 +1240,8 @@ def retrieve( response_id: str, *, stream: bool = False, - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, - starting_after: int | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | Omit = omit, + starting_after: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1276,15 +1288,15 @@ def retrieve( response_id: str, *, stream: Literal[True], - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, - include_obfuscation: bool | NotGiven = NOT_GIVEN, - starting_after: int | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | Omit = omit, + include_obfuscation: bool | Omit = omit, + starting_after: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Stream[ResponseStreamEvent]: """ Retrieves a model response with the given ID. @@ -1325,15 +1337,15 @@ def retrieve( response_id: str, *, stream: bool, - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, - include_obfuscation: bool | NotGiven = NOT_GIVEN, - starting_after: int | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | Omit = omit, + include_obfuscation: bool | Omit = omit, + starting_after: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Response | Stream[ResponseStreamEvent]: """ Retrieves a model response with the given ID. @@ -1372,16 +1384,16 @@ def retrieve( self, response_id: str, *, - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, - include_obfuscation: bool | NotGiven = NOT_GIVEN, - starting_after: int | NotGiven = NOT_GIVEN, - stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | Omit = omit, + include_obfuscation: bool | Omit = omit, + starting_after: int | Omit = omit, + stream: Literal[False] | Literal[True] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Response | Stream[ResponseStreamEvent]: if not response_id: raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") @@ -1416,7 +1428,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ Deletes a model response with the given ID. @@ -1450,7 +1462,7 @@ def cancel( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Response: """Cancels a model response with the given ID. @@ -1483,6 +1495,10 @@ class AsyncResponses(AsyncAPIResource): def input_items(self) -> AsyncInputItems: return AsyncInputItems(self._client) + @cached_property + def input_tokens(self) -> AsyncInputTokens: + return AsyncInputTokens(self._client) + @cached_property def with_raw_response(self) -> AsyncResponsesWithRawResponse: """ @@ -1506,39 +1522,39 @@ def with_streaming_response(self) -> AsyncResponsesWithStreamingResponse: async def create( self, *, - background: Optional[bool] | NotGiven = NOT_GIVEN, - conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, - include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, - input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: ResponsesModel | NotGiven = NOT_GIVEN, - parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, - previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, - prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, - tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + background: Optional[bool] | Omit = omit, + conversation: Optional[response_create_params.Conversation] | Omit = omit, + include: Optional[List[ResponseIncludable]] | Omit = omit, + input: Union[str, ResponseInputParam] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_output_tokens: Optional[int] | Omit = omit, + max_tool_calls: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: ResponsesModel | Omit = omit, + parallel_tool_calls: Optional[bool] | Omit = omit, + previous_response_id: Optional[str] | Omit = omit, + prompt: Optional[ResponsePromptParam] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning: Optional[Reasoning] | Omit = omit, + safety_identifier: str | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + stream_options: Optional[response_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + text: ResponseTextConfigParam | Omit = omit, + tool_choice: response_create_params.ToolChoice | Omit = omit, + tools: Iterable[ToolParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation: Optional[Literal["auto", "disabled"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Response: """Creates a model response. @@ -1746,38 +1762,38 @@ async def create( self, *, stream: Literal[True], - background: Optional[bool] | NotGiven = NOT_GIVEN, - conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, - include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, - input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: ResponsesModel | NotGiven = NOT_GIVEN, - parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, - previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, - prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, - tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + background: Optional[bool] | Omit = omit, + conversation: Optional[response_create_params.Conversation] | Omit = omit, + include: Optional[List[ResponseIncludable]] | Omit = omit, + input: Union[str, ResponseInputParam] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_output_tokens: Optional[int] | Omit = omit, + max_tool_calls: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: ResponsesModel | Omit = omit, + parallel_tool_calls: Optional[bool] | Omit = omit, + previous_response_id: Optional[str] | Omit = omit, + prompt: Optional[ResponsePromptParam] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning: Optional[Reasoning] | Omit = omit, + safety_identifier: str | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream_options: Optional[response_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + text: ResponseTextConfigParam | Omit = omit, + tool_choice: response_create_params.ToolChoice | Omit = omit, + tools: Iterable[ToolParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation: Optional[Literal["auto", "disabled"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncStream[ResponseStreamEvent]: """Creates a model response. @@ -1985,38 +2001,38 @@ async def create( self, *, stream: bool, - background: Optional[bool] | NotGiven = NOT_GIVEN, - conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, - include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, - input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: ResponsesModel | NotGiven = NOT_GIVEN, - parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, - previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, - prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, - tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + background: Optional[bool] | Omit = omit, + conversation: Optional[response_create_params.Conversation] | Omit = omit, + include: Optional[List[ResponseIncludable]] | Omit = omit, + input: Union[str, ResponseInputParam] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_output_tokens: Optional[int] | Omit = omit, + max_tool_calls: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: ResponsesModel | Omit = omit, + parallel_tool_calls: Optional[bool] | Omit = omit, + previous_response_id: Optional[str] | Omit = omit, + prompt: Optional[ResponsePromptParam] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning: Optional[Reasoning] | Omit = omit, + safety_identifier: str | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream_options: Optional[response_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + text: ResponseTextConfigParam | Omit = omit, + tool_choice: response_create_params.ToolChoice | Omit = omit, + tools: Iterable[ToolParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation: Optional[Literal["auto", "disabled"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Response | AsyncStream[ResponseStreamEvent]: """Creates a model response. @@ -2222,39 +2238,39 @@ async def create( async def create( self, *, - background: Optional[bool] | NotGiven = NOT_GIVEN, - conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, - include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, - input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: ResponsesModel | NotGiven = NOT_GIVEN, - parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, - previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, - prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, - tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + background: Optional[bool] | Omit = omit, + conversation: Optional[response_create_params.Conversation] | Omit = omit, + include: Optional[List[ResponseIncludable]] | Omit = omit, + input: Union[str, ResponseInputParam] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_output_tokens: Optional[int] | Omit = omit, + max_tool_calls: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: ResponsesModel | Omit = omit, + parallel_tool_calls: Optional[bool] | Omit = omit, + previous_response_id: Optional[str] | Omit = omit, + prompt: Optional[ResponsePromptParam] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning: Optional[Reasoning] | Omit = omit, + safety_identifier: str | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + stream_options: Optional[response_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + text: ResponseTextConfigParam | Omit = omit, + tool_choice: response_create_params.ToolChoice | Omit = omit, + tools: Iterable[ToolParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation: Optional[Literal["auto", "disabled"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Response | AsyncStream[ResponseStreamEvent]: return await self._post( "/responses", @@ -2305,9 +2321,9 @@ def stream( self, *, response_id: str, - text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, - starting_after: int | NotGiven = NOT_GIVEN, - tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, + text_format: type[TextFormatT] | Omit = omit, + starting_after: int | Omit = omit, + tools: Iterable[ParseableToolParam] | Omit = omit, # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, @@ -2321,31 +2337,31 @@ def stream( *, input: Union[str, ResponseInputParam], model: ResponsesModel, - background: Optional[bool] | NotGiven = NOT_GIVEN, - text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, - tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, - conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, - include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, - previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, - prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, - tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + background: Optional[bool] | Omit = omit, + text_format: type[TextFormatT] | Omit = omit, + tools: Iterable[ParseableToolParam] | Omit = omit, + conversation: Optional[response_create_params.Conversation] | Omit = omit, + include: Optional[List[ResponseIncludable]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_output_tokens: Optional[int] | Omit = omit, + max_tool_calls: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + parallel_tool_calls: Optional[bool] | Omit = omit, + previous_response_id: Optional[str] | Omit = omit, + prompt: Optional[ResponsePromptParam] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning: Optional[Reasoning] | Omit = omit, + safety_identifier: str | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream_options: Optional[response_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + text: ResponseTextConfigParam | Omit = omit, + tool_choice: response_create_params.ToolChoice | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation: Optional[Literal["auto", "disabled"]] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -2357,35 +2373,35 @@ def stream( def stream( self, *, - response_id: str | NotGiven = NOT_GIVEN, - input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, - model: ResponsesModel | NotGiven = NOT_GIVEN, - background: Optional[bool] | NotGiven = NOT_GIVEN, - text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, - tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, - conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, - include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, - previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, - prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, - tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - starting_after: int | NotGiven = NOT_GIVEN, + response_id: str | Omit = omit, + input: Union[str, ResponseInputParam] | Omit = omit, + model: ResponsesModel | Omit = omit, + background: Optional[bool] | Omit = omit, + text_format: type[TextFormatT] | Omit = omit, + tools: Iterable[ParseableToolParam] | Omit = omit, + conversation: Optional[response_create_params.Conversation] | Omit = omit, + include: Optional[List[ResponseIncludable]] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_output_tokens: Optional[int] | Omit = omit, + max_tool_calls: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + parallel_tool_calls: Optional[bool] | Omit = omit, + previous_response_id: Optional[str] | Omit = omit, + prompt: Optional[ResponsePromptParam] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning: Optional[Reasoning] | Omit = omit, + safety_identifier: str | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream_options: Optional[response_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + text: ResponseTextConfigParam | Omit = omit, + tool_choice: response_create_params.ToolChoice | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation: Optional[Literal["auto", "disabled"]] | Omit = omit, + user: str | Omit = omit, + starting_after: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -2486,7 +2502,7 @@ def stream( starting_after=None, ) else: - if isinstance(response_id, NotGiven): + if isinstance(response_id, Omit): raise ValueError("response_id must be provided when streaming an existing response") api_request = self.retrieve( @@ -2508,35 +2524,35 @@ def stream( async def parse( self, *, - text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, - background: Optional[bool] | NotGiven = NOT_GIVEN, - conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, - include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, - input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - model: ResponsesModel | NotGiven = NOT_GIVEN, - parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, - previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, - prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, - prompt_cache_key: str | NotGiven = NOT_GIVEN, - reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - safety_identifier: str | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, - tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, + text_format: type[TextFormatT] | Omit = omit, + background: Optional[bool] | Omit = omit, + conversation: Optional[response_create_params.Conversation] | Omit = omit, + include: Optional[List[ResponseIncludable]] | Omit = omit, + input: Union[str, ResponseInputParam] | Omit = omit, + instructions: Optional[str] | Omit = omit, + max_output_tokens: Optional[int] | Omit = omit, + max_tool_calls: Optional[int] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + model: ResponsesModel | Omit = omit, + parallel_tool_calls: Optional[bool] | Omit = omit, + previous_response_id: Optional[str] | Omit = omit, + prompt: Optional[ResponsePromptParam] | Omit = omit, + prompt_cache_key: str | Omit = omit, + reasoning: Optional[Reasoning] | Omit = omit, + safety_identifier: str | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, + store: Optional[bool] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + stream_options: Optional[response_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + text: ResponseTextConfigParam | Omit = omit, + tool_choice: response_create_params.ToolChoice | Omit = omit, + tools: Iterable[ParseableToolParam] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + truncation: Optional[Literal["auto", "disabled"]] | Omit = omit, + user: str | Omit = omit, + verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -2614,16 +2630,16 @@ async def retrieve( self, response_id: str, *, - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, - include_obfuscation: bool | NotGiven = NOT_GIVEN, - starting_after: int | NotGiven = NOT_GIVEN, - stream: Literal[False] | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | Omit = omit, + include_obfuscation: bool | Omit = omit, + starting_after: int | Omit = omit, + stream: Literal[False] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Response: ... @overload @@ -2632,8 +2648,8 @@ async def retrieve( response_id: str, *, stream: Literal[True], - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, - starting_after: int | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | Omit = omit, + starting_after: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -2648,8 +2664,8 @@ async def retrieve( response_id: str, *, stream: bool, - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, - starting_after: int | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | Omit = omit, + starting_after: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -2664,8 +2680,8 @@ async def retrieve( response_id: str, *, stream: bool = False, - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, - starting_after: int | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | Omit = omit, + starting_after: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -2712,15 +2728,15 @@ async def retrieve( response_id: str, *, stream: Literal[True], - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, - include_obfuscation: bool | NotGiven = NOT_GIVEN, - starting_after: int | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | Omit = omit, + include_obfuscation: bool | Omit = omit, + starting_after: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncStream[ResponseStreamEvent]: """ Retrieves a model response with the given ID. @@ -2761,15 +2777,15 @@ async def retrieve( response_id: str, *, stream: bool, - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, - include_obfuscation: bool | NotGiven = NOT_GIVEN, - starting_after: int | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | Omit = omit, + include_obfuscation: bool | Omit = omit, + starting_after: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Response | AsyncStream[ResponseStreamEvent]: """ Retrieves a model response with the given ID. @@ -2808,16 +2824,16 @@ async def retrieve( self, response_id: str, *, - include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, - include_obfuscation: bool | NotGiven = NOT_GIVEN, - starting_after: int | NotGiven = NOT_GIVEN, - stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | Omit = omit, + include_obfuscation: bool | Omit = omit, + starting_after: int | Omit = omit, + stream: Literal[False] | Literal[True] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Response | AsyncStream[ResponseStreamEvent]: if not response_id: raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") @@ -2852,7 +2868,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ Deletes a model response with the given ID. @@ -2886,7 +2902,7 @@ async def cancel( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Response: """Cancels a model response with the given ID. @@ -2938,6 +2954,10 @@ def __init__(self, responses: Responses) -> None: def input_items(self) -> InputItemsWithRawResponse: return InputItemsWithRawResponse(self._responses.input_items) + @cached_property + def input_tokens(self) -> InputTokensWithRawResponse: + return InputTokensWithRawResponse(self._responses.input_tokens) + class AsyncResponsesWithRawResponse: def __init__(self, responses: AsyncResponses) -> None: @@ -2963,6 +2983,10 @@ def __init__(self, responses: AsyncResponses) -> None: def input_items(self) -> AsyncInputItemsWithRawResponse: return AsyncInputItemsWithRawResponse(self._responses.input_items) + @cached_property + def input_tokens(self) -> AsyncInputTokensWithRawResponse: + return AsyncInputTokensWithRawResponse(self._responses.input_tokens) + class ResponsesWithStreamingResponse: def __init__(self, responses: Responses) -> None: @@ -2985,6 +3009,10 @@ def __init__(self, responses: Responses) -> None: def input_items(self) -> InputItemsWithStreamingResponse: return InputItemsWithStreamingResponse(self._responses.input_items) + @cached_property + def input_tokens(self) -> InputTokensWithStreamingResponse: + return InputTokensWithStreamingResponse(self._responses.input_tokens) + class AsyncResponsesWithStreamingResponse: def __init__(self, responses: AsyncResponses) -> None: @@ -3007,10 +3035,14 @@ def __init__(self, responses: AsyncResponses) -> None: def input_items(self) -> AsyncInputItemsWithStreamingResponse: return AsyncInputItemsWithStreamingResponse(self._responses.input_items) + @cached_property + def input_tokens(self) -> AsyncInputTokensWithStreamingResponse: + return AsyncInputTokensWithStreamingResponse(self._responses.input_tokens) + -def _make_tools(tools: Iterable[ParseableToolParam] | NotGiven) -> List[ToolParam] | NotGiven: +def _make_tools(tools: Iterable[ParseableToolParam] | Omit) -> List[ToolParam] | Omit: if not is_given(tools): - return NOT_GIVEN + return omit converted_tools: List[ToolParam] = [] for tool in tools: diff --git a/src/openai/resources/uploads/parts.py b/src/openai/resources/uploads/parts.py index a32f4eb1d2..73eabd4083 100644 --- a/src/openai/resources/uploads/parts.py +++ b/src/openai/resources/uploads/parts.py @@ -7,7 +7,7 @@ import httpx from ... import _legacy_response -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes +from ..._types import Body, Query, Headers, NotGiven, FileTypes, not_given from ..._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -49,7 +49,7 @@ def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> UploadPart: """ Adds a @@ -124,7 +124,7 @@ async def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> UploadPart: """ Adds a diff --git a/src/openai/resources/uploads/uploads.py b/src/openai/resources/uploads/uploads.py index 8811bed48c..8953256f2a 100644 --- a/src/openai/resources/uploads/uploads.py +++ b/src/openai/resources/uploads/uploads.py @@ -22,7 +22,7 @@ AsyncPartsWithStreamingResponse, ) from ...types import FilePurpose, upload_create_params, upload_complete_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr +from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -73,7 +73,7 @@ def upload_file_chunked( purpose: FilePurpose, bytes: int | None = None, part_size: int | None = None, - md5: str | NotGiven = NOT_GIVEN, + md5: str | Omit = omit, ) -> Upload: """Splits a file into multiple 64MB parts and uploads them sequentially.""" @@ -87,7 +87,7 @@ def upload_file_chunked( mime_type: str, purpose: FilePurpose, part_size: int | None = None, - md5: str | NotGiven = NOT_GIVEN, + md5: str | Omit = omit, ) -> Upload: """Splits an in-memory file into multiple 64MB parts and uploads them sequentially.""" @@ -100,7 +100,7 @@ def upload_file_chunked( filename: str | None = None, bytes: int | None = None, part_size: int | None = None, - md5: str | NotGiven = NOT_GIVEN, + md5: str | Omit = omit, ) -> Upload: """Splits the given file into multiple parts and uploads them sequentially. @@ -170,13 +170,13 @@ def create( filename: str, mime_type: str, purpose: FilePurpose, - expires_after: upload_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, + expires_after: upload_create_params.ExpiresAfter | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Upload: """ Creates an intermediate @@ -252,7 +252,7 @@ def cancel( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Upload: """Cancels the Upload. @@ -282,13 +282,13 @@ def complete( upload_id: str, *, part_ids: SequenceNotStr[str], - md5: str | NotGiven = NOT_GIVEN, + md5: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Upload: """ Completes the @@ -370,7 +370,7 @@ async def upload_file_chunked( purpose: FilePurpose, bytes: int | None = None, part_size: int | None = None, - md5: str | NotGiven = NOT_GIVEN, + md5: str | Omit = omit, ) -> Upload: """Splits a file into multiple 64MB parts and uploads them sequentially.""" @@ -384,7 +384,7 @@ async def upload_file_chunked( mime_type: str, purpose: FilePurpose, part_size: int | None = None, - md5: str | NotGiven = NOT_GIVEN, + md5: str | Omit = omit, ) -> Upload: """Splits an in-memory file into multiple 64MB parts and uploads them sequentially.""" @@ -397,7 +397,7 @@ async def upload_file_chunked( filename: str | None = None, bytes: int | None = None, part_size: int | None = None, - md5: str | NotGiven = NOT_GIVEN, + md5: str | Omit = omit, ) -> Upload: """Splits the given file into multiple parts and uploads them sequentially. @@ -478,13 +478,13 @@ async def create( filename: str, mime_type: str, purpose: FilePurpose, - expires_after: upload_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, + expires_after: upload_create_params.ExpiresAfter | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Upload: """ Creates an intermediate @@ -560,7 +560,7 @@ async def cancel( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Upload: """Cancels the Upload. @@ -590,13 +590,13 @@ async def complete( upload_id: str, *, part_ids: SequenceNotStr[str], - md5: str | NotGiven = NOT_GIVEN, + md5: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Upload: """ Completes the diff --git a/src/openai/resources/vector_stores/file_batches.py b/src/openai/resources/vector_stores/file_batches.py index adf399d8de..0f989821de 100644 --- a/src/openai/resources/vector_stores/file_batches.py +++ b/src/openai/resources/vector_stores/file_batches.py @@ -12,7 +12,7 @@ from ... import _legacy_response from ...types import FileChunkingStrategyParam -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes, SequenceNotStr +from ..._types import Body, Omit, Query, Headers, NotGiven, FileTypes, SequenceNotStr, omit, not_given from ..._utils import is_given, maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -53,14 +53,14 @@ def create( vector_store_id: str, *, file_ids: SequenceNotStr[str], - attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, - chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, + attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit, + chunking_strategy: FileChunkingStrategyParam | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStoreFileBatch: """ Create a vector store file batch. @@ -116,7 +116,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStoreFileBatch: """ Retrieves a vector store file batch. @@ -153,7 +153,7 @@ def cancel( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStoreFileBatch: """Cancel a vector store file batch. @@ -187,8 +187,8 @@ def create_and_poll( vector_store_id: str, *, file_ids: SequenceNotStr[str], - poll_interval_ms: int | NotGiven = NOT_GIVEN, - chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, + poll_interval_ms: int | Omit = omit, + chunking_strategy: FileChunkingStrategyParam | Omit = omit, ) -> VectorStoreFileBatch: """Create a vector store batch and poll until all files have been processed.""" batch = self.create( @@ -208,17 +208,17 @@ def list_files( batch_id: str, *, vector_store_id: str, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + before: str | Omit = omit, + filter: Literal["in_progress", "completed", "failed", "cancelled"] | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[VectorStoreFile]: """ Returns a list of vector store files in a batch. @@ -282,7 +282,7 @@ def poll( batch_id: str, *, vector_store_id: str, - poll_interval_ms: int | NotGiven = NOT_GIVEN, + poll_interval_ms: int | Omit = omit, ) -> VectorStoreFileBatch: """Wait for the given file batch to be processed. @@ -321,8 +321,8 @@ def upload_and_poll( files: Iterable[FileTypes], max_concurrency: int = 5, file_ids: SequenceNotStr[str] = [], - poll_interval_ms: int | NotGiven = NOT_GIVEN, - chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, + poll_interval_ms: int | Omit = omit, + chunking_strategy: FileChunkingStrategyParam | Omit = omit, ) -> VectorStoreFileBatch: """Uploads the given files concurrently and then creates a vector store file batch. @@ -390,14 +390,14 @@ async def create( vector_store_id: str, *, file_ids: SequenceNotStr[str], - attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, - chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, + attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit, + chunking_strategy: FileChunkingStrategyParam | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStoreFileBatch: """ Create a vector store file batch. @@ -453,7 +453,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStoreFileBatch: """ Retrieves a vector store file batch. @@ -490,7 +490,7 @@ async def cancel( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStoreFileBatch: """Cancel a vector store file batch. @@ -524,8 +524,8 @@ async def create_and_poll( vector_store_id: str, *, file_ids: SequenceNotStr[str], - poll_interval_ms: int | NotGiven = NOT_GIVEN, - chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, + poll_interval_ms: int | Omit = omit, + chunking_strategy: FileChunkingStrategyParam | Omit = omit, ) -> VectorStoreFileBatch: """Create a vector store batch and poll until all files have been processed.""" batch = await self.create( @@ -545,17 +545,17 @@ def list_files( batch_id: str, *, vector_store_id: str, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + before: str | Omit = omit, + filter: Literal["in_progress", "completed", "failed", "cancelled"] | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[VectorStoreFile, AsyncCursorPage[VectorStoreFile]]: """ Returns a list of vector store files in a batch. @@ -619,7 +619,7 @@ async def poll( batch_id: str, *, vector_store_id: str, - poll_interval_ms: int | NotGiven = NOT_GIVEN, + poll_interval_ms: int | Omit = omit, ) -> VectorStoreFileBatch: """Wait for the given file batch to be processed. @@ -658,8 +658,8 @@ async def upload_and_poll( files: Iterable[FileTypes], max_concurrency: int = 5, file_ids: SequenceNotStr[str] = [], - poll_interval_ms: int | NotGiven = NOT_GIVEN, - chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, + poll_interval_ms: int | Omit = omit, + chunking_strategy: FileChunkingStrategyParam | Omit = omit, ) -> VectorStoreFileBatch: """Uploads the given files concurrently and then creates a vector store file batch. diff --git a/src/openai/resources/vector_stores/files.py b/src/openai/resources/vector_stores/files.py index 2c90bb7a1f..d2eb4e16ed 100644 --- a/src/openai/resources/vector_stores/files.py +++ b/src/openai/resources/vector_stores/files.py @@ -9,7 +9,7 @@ from ... import _legacy_response from ...types import FileChunkingStrategyParam -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes +from ..._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given from ..._utils import is_given, maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -50,14 +50,14 @@ def create( vector_store_id: str, *, file_id: str, - attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, - chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, + attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit, + chunking_strategy: FileChunkingStrategyParam | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStoreFile: """ Create a vector store file by attaching a @@ -115,7 +115,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStoreFile: """ Retrieves a vector store file. @@ -153,7 +153,7 @@ def update( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStoreFile: """ Update attributes on a vector store file. @@ -191,17 +191,17 @@ def list( self, vector_store_id: str, *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + before: str | Omit = omit, + filter: Literal["in_progress", "completed", "failed", "cancelled"] | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[VectorStoreFile]: """ Returns a list of vector store files. @@ -268,7 +268,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStoreFileDeleted: """Delete a vector store file. @@ -304,9 +304,9 @@ def create_and_poll( file_id: str, *, vector_store_id: str, - attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, - poll_interval_ms: int | NotGiven = NOT_GIVEN, - chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, + attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit, + poll_interval_ms: int | Omit = omit, + chunking_strategy: FileChunkingStrategyParam | Omit = omit, ) -> VectorStoreFile: """Attach a file to the given vector store and wait for it to be processed.""" self.create( @@ -324,7 +324,7 @@ def poll( file_id: str, *, vector_store_id: str, - poll_interval_ms: int | NotGiven = NOT_GIVEN, + poll_interval_ms: int | Omit = omit, ) -> VectorStoreFile: """Wait for the vector store file to finish processing. @@ -365,7 +365,7 @@ def upload( *, vector_store_id: str, file: FileTypes, - chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | Omit = omit, ) -> VectorStoreFile: """Upload a file to the `files` API and then attach it to the given vector store. @@ -380,9 +380,9 @@ def upload_and_poll( *, vector_store_id: str, file: FileTypes, - attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, - poll_interval_ms: int | NotGiven = NOT_GIVEN, - chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, + attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit, + poll_interval_ms: int | Omit = omit, + chunking_strategy: FileChunkingStrategyParam | Omit = omit, ) -> VectorStoreFile: """Add a file to a vector store and poll until processing is complete.""" file_obj = self._client.files.create(file=file, purpose="assistants") @@ -404,7 +404,7 @@ def content( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncPage[FileContentResponse]: """ Retrieve the parsed contents of a vector store file. @@ -458,14 +458,14 @@ async def create( vector_store_id: str, *, file_id: str, - attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, - chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, + attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit, + chunking_strategy: FileChunkingStrategyParam | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStoreFile: """ Create a vector store file by attaching a @@ -523,7 +523,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStoreFile: """ Retrieves a vector store file. @@ -561,7 +561,7 @@ async def update( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStoreFile: """ Update attributes on a vector store file. @@ -599,17 +599,17 @@ def list( self, vector_store_id: str, *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + before: str | Omit = omit, + filter: Literal["in_progress", "completed", "failed", "cancelled"] | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[VectorStoreFile, AsyncCursorPage[VectorStoreFile]]: """ Returns a list of vector store files. @@ -676,7 +676,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStoreFileDeleted: """Delete a vector store file. @@ -712,9 +712,9 @@ async def create_and_poll( file_id: str, *, vector_store_id: str, - attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, - poll_interval_ms: int | NotGiven = NOT_GIVEN, - chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, + attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit, + poll_interval_ms: int | Omit = omit, + chunking_strategy: FileChunkingStrategyParam | Omit = omit, ) -> VectorStoreFile: """Attach a file to the given vector store and wait for it to be processed.""" await self.create( @@ -732,7 +732,7 @@ async def poll( file_id: str, *, vector_store_id: str, - poll_interval_ms: int | NotGiven = NOT_GIVEN, + poll_interval_ms: int | Omit = omit, ) -> VectorStoreFile: """Wait for the vector store file to finish processing. @@ -773,7 +773,7 @@ async def upload( *, vector_store_id: str, file: FileTypes, - chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | Omit = omit, ) -> VectorStoreFile: """Upload a file to the `files` API and then attach it to the given vector store. @@ -790,9 +790,9 @@ async def upload_and_poll( *, vector_store_id: str, file: FileTypes, - attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, - poll_interval_ms: int | NotGiven = NOT_GIVEN, - chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, + attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit, + poll_interval_ms: int | Omit = omit, + chunking_strategy: FileChunkingStrategyParam | Omit = omit, ) -> VectorStoreFile: """Add a file to a vector store and poll until processing is complete.""" file_obj = await self._client.files.create(file=file, purpose="assistants") @@ -814,7 +814,7 @@ def content( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[FileContentResponse, AsyncPage[FileContentResponse]]: """ Retrieve the parsed contents of a vector store file. diff --git a/src/openai/resources/vector_stores/vector_stores.py b/src/openai/resources/vector_stores/vector_stores.py index 4f211ea25a..490e3e7fdb 100644 --- a/src/openai/resources/vector_stores/vector_stores.py +++ b/src/openai/resources/vector_stores/vector_stores.py @@ -23,7 +23,7 @@ vector_store_search_params, vector_store_update_params, ) -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr +from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -78,17 +78,18 @@ def with_streaming_response(self) -> VectorStoresWithStreamingResponse: def create( self, *, - chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, - expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, - file_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | Omit = omit, + description: str | Omit = omit, + expires_after: vector_store_create_params.ExpiresAfter | Omit = omit, + file_ids: SequenceNotStr[str] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStore: """ Create a vector store. @@ -97,6 +98,9 @@ def create( chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. + description: A description for the vector store. Can be used to describe the vector store's + purpose. + expires_after: The expiration policy for a vector store. file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that @@ -126,6 +130,7 @@ def create( body=maybe_transform( { "chunking_strategy": chunking_strategy, + "description": description, "expires_after": expires_after, "file_ids": file_ids, "metadata": metadata, @@ -148,7 +153,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStore: """ Retrieves a vector store. @@ -177,15 +182,15 @@ def update( self, vector_store_id: str, *, - expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - name: Optional[str] | NotGiven = NOT_GIVEN, + expires_after: Optional[vector_store_update_params.ExpiresAfter] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + name: Optional[str] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStore: """ Modifies a vector store. @@ -232,16 +237,16 @@ def update( def list( self, *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + before: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncCursorPage[VectorStore]: """Returns a list of vector stores. @@ -303,7 +308,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStoreDeleted: """ Delete a vector store. @@ -333,16 +338,16 @@ def search( vector_store_id: str, *, query: Union[str, SequenceNotStr[str]], - filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN, - max_num_results: int | NotGiven = NOT_GIVEN, - ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN, - rewrite_query: bool | NotGiven = NOT_GIVEN, + filters: vector_store_search_params.Filters | Omit = omit, + max_num_results: int | Omit = omit, + ranking_options: vector_store_search_params.RankingOptions | Omit = omit, + rewrite_query: bool | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SyncPage[VectorStoreSearchResponse]: """ Search a vector store for relevant chunks based on a query and file attributes @@ -423,17 +428,18 @@ def with_streaming_response(self) -> AsyncVectorStoresWithStreamingResponse: async def create( self, *, - chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, - expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, - file_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | Omit = omit, + description: str | Omit = omit, + expires_after: vector_store_create_params.ExpiresAfter | Omit = omit, + file_ids: SequenceNotStr[str] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStore: """ Create a vector store. @@ -442,6 +448,9 @@ async def create( chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. + description: A description for the vector store. Can be used to describe the vector store's + purpose. + expires_after: The expiration policy for a vector store. file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that @@ -471,6 +480,7 @@ async def create( body=await async_maybe_transform( { "chunking_strategy": chunking_strategy, + "description": description, "expires_after": expires_after, "file_ids": file_ids, "metadata": metadata, @@ -493,7 +503,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStore: """ Retrieves a vector store. @@ -522,15 +532,15 @@ async def update( self, vector_store_id: str, *, - expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - name: Optional[str] | NotGiven = NOT_GIVEN, + expires_after: Optional[vector_store_update_params.ExpiresAfter] | Omit = omit, + metadata: Optional[Metadata] | Omit = omit, + name: Optional[str] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStore: """ Modifies a vector store. @@ -577,16 +587,16 @@ async def update( def list( self, *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + after: str | Omit = omit, + before: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[VectorStore, AsyncCursorPage[VectorStore]]: """Returns a list of vector stores. @@ -648,7 +658,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VectorStoreDeleted: """ Delete a vector store. @@ -678,16 +688,16 @@ def search( vector_store_id: str, *, query: Union[str, SequenceNotStr[str]], - filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN, - max_num_results: int | NotGiven = NOT_GIVEN, - ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN, - rewrite_query: bool | NotGiven = NOT_GIVEN, + filters: vector_store_search_params.Filters | Omit = omit, + max_num_results: int | Omit = omit, + ranking_options: vector_store_search_params.RankingOptions | Omit = omit, + rewrite_query: bool | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncPaginator[VectorStoreSearchResponse, AsyncPage[VectorStoreSearchResponse]]: """ Search a vector store for relevant chunks based on a query and file attributes diff --git a/src/openai/resources/videos.py b/src/openai/resources/videos.py new file mode 100644 index 0000000000..4df5f02004 --- /dev/null +++ b/src/openai/resources/videos.py @@ -0,0 +1,847 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import TYPE_CHECKING, Mapping, cast +from typing_extensions import Literal, assert_never + +import httpx + +from .. import _legacy_response +from ..types import ( + VideoSize, + VideoModel, + VideoSeconds, + video_list_params, + video_remix_params, + video_create_params, + video_download_content_params, +) +from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given +from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + StreamedBinaryAPIResponse, + AsyncStreamedBinaryAPIResponse, + to_streamed_response_wrapper, + async_to_streamed_response_wrapper, + to_custom_streamed_response_wrapper, + async_to_custom_streamed_response_wrapper, +) +from ..pagination import SyncConversationCursorPage, AsyncConversationCursorPage +from ..types.video import Video +from .._base_client import AsyncPaginator, make_request_options +from .._utils._utils import is_given +from ..types.video_size import VideoSize +from ..types.video_model import VideoModel +from ..types.video_seconds import VideoSeconds +from ..types.video_delete_response import VideoDeleteResponse + +__all__ = ["Videos", "AsyncVideos"] + + +class Videos(SyncAPIResource): + @cached_property + def with_raw_response(self) -> VideosWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return VideosWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> VideosWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return VideosWithStreamingResponse(self) + + def create( + self, + *, + prompt: str, + input_reference: FileTypes | Omit = omit, + model: VideoModel | Omit = omit, + seconds: VideoSeconds | Omit = omit, + size: VideoSize | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> Video: + """ + Create a video + + Args: + prompt: Text prompt that describes the video to generate. + + input_reference: Optional image reference that guides generation. + + model: The video generation model to use. Defaults to `sora-2`. + + seconds: Clip duration in seconds. Defaults to 4 seconds. + + size: Output resolution formatted as width x height. Defaults to 720x1280. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + body = deepcopy_minimal( + { + "prompt": prompt, + "input_reference": input_reference, + "model": model, + "seconds": seconds, + "size": size, + } + ) + files = extract_files(cast(Mapping[str, object], body), paths=[["input_reference"]]) + if files: + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return self._post( + "/videos", + body=maybe_transform(body, video_create_params.VideoCreateParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Video, + ) + + def create_and_poll( + self, + *, + prompt: str, + input_reference: FileTypes | Omit = omit, + model: VideoModel | Omit = omit, + seconds: VideoSeconds | Omit = omit, + size: VideoSize | Omit = omit, + poll_interval_ms: int | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> Video: + """Create a video and wait for it to be processed.""" + video = self.create( + model=model, + prompt=prompt, + input_reference=input_reference, + seconds=seconds, + size=size, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + + return self.poll( + video.id, + poll_interval_ms=poll_interval_ms, + ) + + def poll( + self, + video_id: str, + *, + poll_interval_ms: int | Omit = omit, + ) -> Video: + """Wait for the vector store file to finish processing. + + Note: this will return even if the file failed to process, you need to check + file.last_error and file.status to handle these cases + """ + headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"} + if is_given(poll_interval_ms): + headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms) + + while True: + response = self.with_raw_response.retrieve( + video_id, + extra_headers=headers, + ) + + video = response.parse() + if video.status == "in_progress" or video.status == "queued": + if not is_given(poll_interval_ms): + from_header = response.headers.get("openai-poll-after-ms") + if from_header is not None: + poll_interval_ms = int(from_header) + else: + poll_interval_ms = 1000 + + self._sleep(poll_interval_ms / 1000) + elif video.status == "completed" or video.status == "failed": + return video + else: + if TYPE_CHECKING: # type: ignore[unreachable] + assert_never(video.status) + else: + return video + + def retrieve( + self, + video_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> Video: + """ + Retrieve a video + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not video_id: + raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}") + return self._get( + f"/videos/{video_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Video, + ) + + def list( + self, + *, + after: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> SyncConversationCursorPage[Video]: + """ + List videos + + Args: + after: Identifier for the last item from the previous pagination request + + limit: Number of items to retrieve + + order: Sort order of results by timestamp. Use `asc` for ascending order or `desc` for + descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get_api_list( + "/videos", + page=SyncConversationCursorPage[Video], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + "order": order, + }, + video_list_params.VideoListParams, + ), + ), + model=Video, + ) + + def delete( + self, + video_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> VideoDeleteResponse: + """ + Delete a video + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not video_id: + raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}") + return self._delete( + f"/videos/{video_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VideoDeleteResponse, + ) + + def download_content( + self, + video_id: str, + *, + variant: Literal["video", "thumbnail", "spritesheet"] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> _legacy_response.HttpxBinaryResponseContent: + """Download video content + + Args: + variant: Which downloadable asset to return. + + Defaults to the MP4 video. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not video_id: + raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}") + extra_headers = {"Accept": "application/binary", **(extra_headers or {})} + return self._get( + f"/videos/{video_id}/content", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"variant": variant}, video_download_content_params.VideoDownloadContentParams), + ), + cast_to=_legacy_response.HttpxBinaryResponseContent, + ) + + def remix( + self, + video_id: str, + *, + prompt: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> Video: + """ + Create a video remix + + Args: + prompt: Updated text prompt that directs the remix generation. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not video_id: + raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}") + return self._post( + f"/videos/{video_id}/remix", + body=maybe_transform({"prompt": prompt}, video_remix_params.VideoRemixParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Video, + ) + + +class AsyncVideos(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncVideosWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncVideosWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncVideosWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncVideosWithStreamingResponse(self) + + async def create( + self, + *, + prompt: str, + input_reference: FileTypes | Omit = omit, + model: VideoModel | Omit = omit, + seconds: VideoSeconds | Omit = omit, + size: VideoSize | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> Video: + """ + Create a video + + Args: + prompt: Text prompt that describes the video to generate. + + input_reference: Optional image reference that guides generation. + + model: The video generation model to use. Defaults to `sora-2`. + + seconds: Clip duration in seconds. Defaults to 4 seconds. + + size: Output resolution formatted as width x height. Defaults to 720x1280. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + body = deepcopy_minimal( + { + "prompt": prompt, + "input_reference": input_reference, + "model": model, + "seconds": seconds, + "size": size, + } + ) + files = extract_files(cast(Mapping[str, object], body), paths=[["input_reference"]]) + if files: + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return await self._post( + "/videos", + body=await async_maybe_transform(body, video_create_params.VideoCreateParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Video, + ) + + async def create_and_poll( + self, + *, + prompt: str, + input_reference: FileTypes | Omit = omit, + model: VideoModel | Omit = omit, + seconds: VideoSeconds | Omit = omit, + size: VideoSize | Omit = omit, + poll_interval_ms: int | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> Video: + """Create a video and wait for it to be processed.""" + video = await self.create( + model=model, + prompt=prompt, + input_reference=input_reference, + seconds=seconds, + size=size, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + + return await self.poll( + video.id, + poll_interval_ms=poll_interval_ms, + ) + + async def poll( + self, + video_id: str, + *, + poll_interval_ms: int | Omit = omit, + ) -> Video: + """Wait for the vector store file to finish processing. + + Note: this will return even if the file failed to process, you need to check + file.last_error and file.status to handle these cases + """ + headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"} + if is_given(poll_interval_ms): + headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms) + + while True: + response = await self.with_raw_response.retrieve( + video_id, + extra_headers=headers, + ) + + video = response.parse() + if video.status == "in_progress" or video.status == "queued": + if not is_given(poll_interval_ms): + from_header = response.headers.get("openai-poll-after-ms") + if from_header is not None: + poll_interval_ms = int(from_header) + else: + poll_interval_ms = 1000 + + await self._sleep(poll_interval_ms / 1000) + elif video.status == "completed" or video.status == "failed": + return video + else: + if TYPE_CHECKING: # type: ignore[unreachable] + assert_never(video.status) + else: + return video + + async def retrieve( + self, + video_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> Video: + """ + Retrieve a video + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not video_id: + raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}") + return await self._get( + f"/videos/{video_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Video, + ) + + def list( + self, + *, + after: str | Omit = omit, + limit: int | Omit = omit, + order: Literal["asc", "desc"] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> AsyncPaginator[Video, AsyncConversationCursorPage[Video]]: + """ + List videos + + Args: + after: Identifier for the last item from the previous pagination request + + limit: Number of items to retrieve + + order: Sort order of results by timestamp. Use `asc` for ascending order or `desc` for + descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get_api_list( + "/videos", + page=AsyncConversationCursorPage[Video], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + "order": order, + }, + video_list_params.VideoListParams, + ), + ), + model=Video, + ) + + async def delete( + self, + video_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> VideoDeleteResponse: + """ + Delete a video + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not video_id: + raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}") + return await self._delete( + f"/videos/{video_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VideoDeleteResponse, + ) + + async def download_content( + self, + video_id: str, + *, + variant: Literal["video", "thumbnail", "spritesheet"] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> _legacy_response.HttpxBinaryResponseContent: + """Download video content + + Args: + variant: Which downloadable asset to return. + + Defaults to the MP4 video. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not video_id: + raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}") + extra_headers = {"Accept": "application/binary", **(extra_headers or {})} + return await self._get( + f"/videos/{video_id}/content", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + {"variant": variant}, video_download_content_params.VideoDownloadContentParams + ), + ), + cast_to=_legacy_response.HttpxBinaryResponseContent, + ) + + async def remix( + self, + video_id: str, + *, + prompt: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> Video: + """ + Create a video remix + + Args: + prompt: Updated text prompt that directs the remix generation. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not video_id: + raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}") + return await self._post( + f"/videos/{video_id}/remix", + body=await async_maybe_transform({"prompt": prompt}, video_remix_params.VideoRemixParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Video, + ) + + +class VideosWithRawResponse: + def __init__(self, videos: Videos) -> None: + self._videos = videos + + self.create = _legacy_response.to_raw_response_wrapper( + videos.create, + ) + self.retrieve = _legacy_response.to_raw_response_wrapper( + videos.retrieve, + ) + self.list = _legacy_response.to_raw_response_wrapper( + videos.list, + ) + self.delete = _legacy_response.to_raw_response_wrapper( + videos.delete, + ) + self.download_content = _legacy_response.to_raw_response_wrapper( + videos.download_content, + ) + self.remix = _legacy_response.to_raw_response_wrapper( + videos.remix, + ) + + +class AsyncVideosWithRawResponse: + def __init__(self, videos: AsyncVideos) -> None: + self._videos = videos + + self.create = _legacy_response.async_to_raw_response_wrapper( + videos.create, + ) + self.retrieve = _legacy_response.async_to_raw_response_wrapper( + videos.retrieve, + ) + self.list = _legacy_response.async_to_raw_response_wrapper( + videos.list, + ) + self.delete = _legacy_response.async_to_raw_response_wrapper( + videos.delete, + ) + self.download_content = _legacy_response.async_to_raw_response_wrapper( + videos.download_content, + ) + self.remix = _legacy_response.async_to_raw_response_wrapper( + videos.remix, + ) + + +class VideosWithStreamingResponse: + def __init__(self, videos: Videos) -> None: + self._videos = videos + + self.create = to_streamed_response_wrapper( + videos.create, + ) + self.retrieve = to_streamed_response_wrapper( + videos.retrieve, + ) + self.list = to_streamed_response_wrapper( + videos.list, + ) + self.delete = to_streamed_response_wrapper( + videos.delete, + ) + self.download_content = to_custom_streamed_response_wrapper( + videos.download_content, + StreamedBinaryAPIResponse, + ) + self.remix = to_streamed_response_wrapper( + videos.remix, + ) + + +class AsyncVideosWithStreamingResponse: + def __init__(self, videos: AsyncVideos) -> None: + self._videos = videos + + self.create = async_to_streamed_response_wrapper( + videos.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + videos.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + videos.list, + ) + self.delete = async_to_streamed_response_wrapper( + videos.delete, + ) + self.download_content = async_to_custom_streamed_response_wrapper( + videos.download_content, + AsyncStreamedBinaryAPIResponse, + ) + self.remix = async_to_streamed_response_wrapper( + videos.remix, + ) diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index 1844f71ba7..a98ca16ee9 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -5,6 +5,7 @@ from .batch import Batch as Batch from .image import Image as Image from .model import Model as Model +from .video import Video as Video from .shared import ( Metadata as Metadata, AllModels as AllModels, @@ -29,15 +30,19 @@ from .chat_model import ChatModel as ChatModel from .completion import Completion as Completion from .moderation import Moderation as Moderation +from .video_size import VideoSize as VideoSize from .audio_model import AudioModel as AudioModel from .batch_error import BatchError as BatchError +from .batch_usage import BatchUsage as BatchUsage from .file_object import FileObject as FileObject from .image_model import ImageModel as ImageModel +from .video_model import VideoModel as VideoModel from .file_content import FileContent as FileContent from .file_deleted import FileDeleted as FileDeleted from .file_purpose import FilePurpose as FilePurpose from .vector_store import VectorStore as VectorStore from .model_deleted import ModelDeleted as ModelDeleted +from .video_seconds import VideoSeconds as VideoSeconds from .embedding_model import EmbeddingModel as EmbeddingModel from .images_response import ImagesResponse as ImagesResponse from .completion_usage import CompletionUsage as CompletionUsage @@ -47,11 +52,15 @@ from .batch_list_params import BatchListParams as BatchListParams from .completion_choice import CompletionChoice as CompletionChoice from .image_edit_params import ImageEditParams as ImageEditParams +from .video_list_params import VideoListParams as VideoListParams from .eval_create_params import EvalCreateParams as EvalCreateParams from .eval_list_response import EvalListResponse as EvalListResponse from .eval_update_params import EvalUpdateParams as EvalUpdateParams from .file_create_params import FileCreateParams as FileCreateParams +from .video_create_error import VideoCreateError as VideoCreateError +from .video_remix_params import VideoRemixParams as VideoRemixParams from .batch_create_params import BatchCreateParams as BatchCreateParams +from .video_create_params import VideoCreateParams as VideoCreateParams from .batch_request_counts import BatchRequestCounts as BatchRequestCounts from .eval_create_response import EvalCreateResponse as EvalCreateResponse from .eval_delete_response import EvalDeleteResponse as EvalDeleteResponse @@ -61,6 +70,7 @@ from .audio_response_format import AudioResponseFormat as AudioResponseFormat from .container_list_params import ContainerListParams as ContainerListParams from .image_generate_params import ImageGenerateParams as ImageGenerateParams +from .video_delete_response import VideoDeleteResponse as VideoDeleteResponse from .eval_retrieve_response import EvalRetrieveResponse as EvalRetrieveResponse from .file_chunking_strategy import FileChunkingStrategy as FileChunkingStrategy from .image_gen_stream_event import ImageGenStreamEvent as ImageGenStreamEvent @@ -88,6 +98,7 @@ from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams from .image_gen_partial_image_event import ImageGenPartialImageEvent as ImageGenPartialImageEvent from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy +from .video_download_content_params import VideoDownloadContentParams as VideoDownloadContentParams from .eval_custom_data_source_config import EvalCustomDataSourceConfig as EvalCustomDataSourceConfig from .image_edit_partial_image_event import ImageEditPartialImageEvent as ImageEditPartialImageEvent from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam diff --git a/src/openai/types/audio/__init__.py b/src/openai/types/audio/__init__.py index 396944ee47..2ff2b8185d 100644 --- a/src/openai/types/audio/__init__.py +++ b/src/openai/types/audio/__init__.py @@ -11,10 +11,13 @@ from .transcription_include import TranscriptionInclude as TranscriptionInclude from .transcription_segment import TranscriptionSegment as TranscriptionSegment from .transcription_verbose import TranscriptionVerbose as TranscriptionVerbose +from .transcription_diarized import TranscriptionDiarized as TranscriptionDiarized from .translation_create_params import TranslationCreateParams as TranslationCreateParams from .transcription_stream_event import TranscriptionStreamEvent as TranscriptionStreamEvent from .transcription_create_params import TranscriptionCreateParams as TranscriptionCreateParams from .translation_create_response import TranslationCreateResponse as TranslationCreateResponse from .transcription_create_response import TranscriptionCreateResponse as TranscriptionCreateResponse from .transcription_text_done_event import TranscriptionTextDoneEvent as TranscriptionTextDoneEvent +from .transcription_diarized_segment import TranscriptionDiarizedSegment as TranscriptionDiarizedSegment from .transcription_text_delta_event import TranscriptionTextDeltaEvent as TranscriptionTextDeltaEvent +from .transcription_text_segment_event import TranscriptionTextSegmentEvent as TranscriptionTextSegmentEvent diff --git a/src/openai/types/audio/transcription_create_params.py b/src/openai/types/audio/transcription_create_params.py index 8271b054ab..adaef9f5fe 100644 --- a/src/openai/types/audio/transcription_create_params.py +++ b/src/openai/types/audio/transcription_create_params.py @@ -5,7 +5,7 @@ from typing import List, Union, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict -from ..._types import FileTypes +from ..._types import FileTypes, SequenceNotStr from ..audio_model import AudioModel from .transcription_include import TranscriptionInclude from ..audio_response_format import AudioResponseFormat @@ -29,8 +29,9 @@ class TranscriptionCreateParamsBase(TypedDict, total=False): model: Required[Union[str, AudioModel]] """ID of the model to use. - The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, and `whisper-1` - (which is powered by our open source Whisper V2 model). + The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, `whisper-1` + (which is powered by our open source Whisper V2 model), and + `gpt-4o-transcribe-diarize`. """ chunking_strategy: Optional[ChunkingStrategy] @@ -39,16 +40,34 @@ class TranscriptionCreateParamsBase(TypedDict, total=False): When set to `"auto"`, the server first normalizes loudness and then uses voice activity detection (VAD) to choose boundaries. `server_vad` object can be provided to tweak VAD detection parameters manually. If unset, the audio is - transcribed as a single block. + transcribed as a single block. Required when using `gpt-4o-transcribe-diarize` + for inputs longer than 30 seconds. """ include: List[TranscriptionInclude] - """Additional information to include in the transcription response. + """ + Additional information to include in the transcription response. `logprobs` will + return the log probabilities of the tokens in the response to understand the + model's confidence in the transcription. `logprobs` only works with + response_format set to `json` and only with the models `gpt-4o-transcribe` and + `gpt-4o-mini-transcribe`. This field is not supported when using + `gpt-4o-transcribe-diarize`. + """ + + known_speaker_names: SequenceNotStr[str] + """ + Optional list of speaker names that correspond to the audio samples provided in + `known_speaker_references[]`. Each entry should be a short identifier (for + example `customer` or `agent`). Up to 4 speakers are supported. + """ - `logprobs` will return the log probabilities of the tokens in the response to - understand the model's confidence in the transcription. `logprobs` only works - with response_format set to `json` and only with the models `gpt-4o-transcribe` - and `gpt-4o-mini-transcribe`. + known_speaker_references: SequenceNotStr[str] + """ + Optional list of audio samples (as + [data URLs](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs)) + that contain known speaker references matching `known_speaker_names[]`. Each + sample must be between 2 and 10 seconds, and can use any of the same input audio + formats supported by `file`. """ language: str @@ -64,14 +83,17 @@ class TranscriptionCreateParamsBase(TypedDict, total=False): segment. The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - should match the audio language. + should match the audio language. This field is not supported when using + `gpt-4o-transcribe-diarize`. """ response_format: AudioResponseFormat """ The format of the output, in one of these options: `json`, `text`, `srt`, - `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, - the only supported format is `json`. + `verbose_json`, `vtt`, or `diarized_json`. For `gpt-4o-transcribe` and + `gpt-4o-mini-transcribe`, the only supported format is `json`. For + `gpt-4o-transcribe-diarize`, the supported formats are `json`, `text`, and + `diarized_json`, with `diarized_json` required to receive speaker annotations. """ temperature: float @@ -89,7 +111,8 @@ class TranscriptionCreateParamsBase(TypedDict, total=False): `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps - incurs additional latency. + incurs additional latency. This option is not available for + `gpt-4o-transcribe-diarize`. """ diff --git a/src/openai/types/audio/transcription_create_response.py b/src/openai/types/audio/transcription_create_response.py index 2f7bed8114..5717a3e701 100644 --- a/src/openai/types/audio/transcription_create_response.py +++ b/src/openai/types/audio/transcription_create_response.py @@ -5,7 +5,8 @@ from .transcription import Transcription from .transcription_verbose import TranscriptionVerbose +from .transcription_diarized import TranscriptionDiarized __all__ = ["TranscriptionCreateResponse"] -TranscriptionCreateResponse: TypeAlias = Union[Transcription, TranscriptionVerbose] +TranscriptionCreateResponse: TypeAlias = Union[Transcription, TranscriptionDiarized, TranscriptionVerbose] diff --git a/src/openai/types/audio/transcription_diarized.py b/src/openai/types/audio/transcription_diarized.py new file mode 100644 index 0000000000..b7dd2b8ebb --- /dev/null +++ b/src/openai/types/audio/transcription_diarized.py @@ -0,0 +1,63 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel +from .transcription_diarized_segment import TranscriptionDiarizedSegment + +__all__ = ["TranscriptionDiarized", "Usage", "UsageTokens", "UsageTokensInputTokenDetails", "UsageDuration"] + + +class UsageTokensInputTokenDetails(BaseModel): + audio_tokens: Optional[int] = None + """Number of audio tokens billed for this request.""" + + text_tokens: Optional[int] = None + """Number of text tokens billed for this request.""" + + +class UsageTokens(BaseModel): + input_tokens: int + """Number of input tokens billed for this request.""" + + output_tokens: int + """Number of output tokens generated.""" + + total_tokens: int + """Total number of tokens used (input + output).""" + + type: Literal["tokens"] + """The type of the usage object. Always `tokens` for this variant.""" + + input_token_details: Optional[UsageTokensInputTokenDetails] = None + """Details about the input tokens billed for this request.""" + + +class UsageDuration(BaseModel): + seconds: float + """Duration of the input audio in seconds.""" + + type: Literal["duration"] + """The type of the usage object. Always `duration` for this variant.""" + + +Usage: TypeAlias = Annotated[Union[UsageTokens, UsageDuration], PropertyInfo(discriminator="type")] + + +class TranscriptionDiarized(BaseModel): + duration: float + """Duration of the input audio in seconds.""" + + segments: List[TranscriptionDiarizedSegment] + """Segments of the transcript annotated with timestamps and speaker labels.""" + + task: Literal["transcribe"] + """The type of task that was run. Always `transcribe`.""" + + text: str + """The concatenated transcript text for the entire audio input.""" + + usage: Optional[Usage] = None + """Token or duration usage statistics for the request.""" diff --git a/src/openai/types/audio/transcription_diarized_segment.py b/src/openai/types/audio/transcription_diarized_segment.py new file mode 100644 index 0000000000..fe87bb4fb8 --- /dev/null +++ b/src/openai/types/audio/transcription_diarized_segment.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["TranscriptionDiarizedSegment"] + + +class TranscriptionDiarizedSegment(BaseModel): + id: str + """Unique identifier for the segment.""" + + end: float + """End timestamp of the segment in seconds.""" + + speaker: str + """Speaker label for this segment. + + When known speakers are provided, the label matches `known_speaker_names[]`. + Otherwise speakers are labeled sequentially using capital letters (`A`, `B`, + ...). + """ + + start: float + """Start timestamp of the segment in seconds.""" + + text: str + """Transcript text for this segment.""" + + type: Literal["transcript.text.segment"] + """The type of the segment. Always `transcript.text.segment`.""" diff --git a/src/openai/types/audio/transcription_stream_event.py b/src/openai/types/audio/transcription_stream_event.py index 757077a280..77d3a3aeec 100644 --- a/src/openai/types/audio/transcription_stream_event.py +++ b/src/openai/types/audio/transcription_stream_event.py @@ -6,9 +6,11 @@ from ..._utils import PropertyInfo from .transcription_text_done_event import TranscriptionTextDoneEvent from .transcription_text_delta_event import TranscriptionTextDeltaEvent +from .transcription_text_segment_event import TranscriptionTextSegmentEvent __all__ = ["TranscriptionStreamEvent"] TranscriptionStreamEvent: TypeAlias = Annotated[ - Union[TranscriptionTextDeltaEvent, TranscriptionTextDoneEvent], PropertyInfo(discriminator="type") + Union[TranscriptionTextSegmentEvent, TranscriptionTextDeltaEvent, TranscriptionTextDoneEvent], + PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/audio/transcription_text_delta_event.py b/src/openai/types/audio/transcription_text_delta_event.py index 36c52f0623..363b6a6335 100644 --- a/src/openai/types/audio/transcription_text_delta_event.py +++ b/src/openai/types/audio/transcription_text_delta_event.py @@ -33,3 +33,9 @@ class TranscriptionTextDeltaEvent(BaseModel): [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) with the `include[]` parameter set to `logprobs`. """ + + segment_id: Optional[str] = None + """Identifier of the diarized segment that this delta belongs to. + + Only present when using `gpt-4o-transcribe-diarize`. + """ diff --git a/src/openai/types/audio/transcription_text_segment_event.py b/src/openai/types/audio/transcription_text_segment_event.py new file mode 100644 index 0000000000..d4f7664578 --- /dev/null +++ b/src/openai/types/audio/transcription_text_segment_event.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["TranscriptionTextSegmentEvent"] + + +class TranscriptionTextSegmentEvent(BaseModel): + id: str + """Unique identifier for the segment.""" + + end: float + """End timestamp of the segment in seconds.""" + + speaker: str + """Speaker label for this segment.""" + + start: float + """Start timestamp of the segment in seconds.""" + + text: str + """Transcript text for this segment.""" + + type: Literal["transcript.text.segment"] + """The type of the event. Always `transcript.text.segment`.""" diff --git a/src/openai/types/audio_model.py b/src/openai/types/audio_model.py index 4d14d60181..68031a2198 100644 --- a/src/openai/types/audio_model.py +++ b/src/openai/types/audio_model.py @@ -4,4 +4,4 @@ __all__ = ["AudioModel"] -AudioModel: TypeAlias = Literal["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe"] +AudioModel: TypeAlias = Literal["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe", "gpt-4o-transcribe-diarize"] diff --git a/src/openai/types/audio_response_format.py b/src/openai/types/audio_response_format.py index f8c8d45945..1897aaf6ed 100644 --- a/src/openai/types/audio_response_format.py +++ b/src/openai/types/audio_response_format.py @@ -4,4 +4,4 @@ __all__ = ["AudioResponseFormat"] -AudioResponseFormat: TypeAlias = Literal["json", "text", "srt", "verbose_json", "vtt"] +AudioResponseFormat: TypeAlias = Literal["json", "text", "srt", "verbose_json", "vtt", "diarized_json"] diff --git a/src/openai/types/batch.py b/src/openai/types/batch.py index 35de90ac85..ece0513b35 100644 --- a/src/openai/types/batch.py +++ b/src/openai/types/batch.py @@ -5,6 +5,7 @@ from .._models import BaseModel from .batch_error import BatchError +from .batch_usage import BatchUsage from .shared.metadata import Metadata from .batch_request_counts import BatchRequestCounts @@ -80,8 +81,24 @@ class Batch(BaseModel): a maximum length of 512 characters. """ + model: Optional[str] = None + """Model ID used to process the batch, like `gpt-5-2025-08-07`. + + OpenAI offers a wide range of models with different capabilities, performance + characteristics, and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + """ + output_file_id: Optional[str] = None """The ID of the file containing the outputs of successfully executed requests.""" request_counts: Optional[BatchRequestCounts] = None """The request counts for different statuses within the batch.""" + + usage: Optional[BatchUsage] = None + """ + Represents token usage details including input tokens, output tokens, a + breakdown of output tokens, and the total tokens used. Only populated on batches + created after September 7, 2025. + """ diff --git a/src/openai/types/batch_usage.py b/src/openai/types/batch_usage.py new file mode 100644 index 0000000000..578f64a5e2 --- /dev/null +++ b/src/openai/types/batch_usage.py @@ -0,0 +1,35 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .._models import BaseModel + +__all__ = ["BatchUsage", "InputTokensDetails", "OutputTokensDetails"] + + +class InputTokensDetails(BaseModel): + cached_tokens: int + """The number of tokens that were retrieved from the cache. + + [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). + """ + + +class OutputTokensDetails(BaseModel): + reasoning_tokens: int + """The number of reasoning tokens.""" + + +class BatchUsage(BaseModel): + input_tokens: int + """The number of input tokens.""" + + input_tokens_details: InputTokensDetails + """A detailed breakdown of the input tokens.""" + + output_tokens: int + """The number of output tokens.""" + + output_tokens_details: OutputTokensDetails + """A detailed breakdown of the output tokens.""" + + total_tokens: int + """The total number of tokens used.""" diff --git a/src/openai/types/beta/__init__.py b/src/openai/types/beta/__init__.py index 5ba3eadf3c..deb2369677 100644 --- a/src/openai/types/beta/__init__.py +++ b/src/openai/types/beta/__init__.py @@ -7,6 +7,7 @@ from .function_tool import FunctionTool as FunctionTool from .assistant_tool import AssistantTool as AssistantTool from .thread_deleted import ThreadDeleted as ThreadDeleted +from .chatkit_workflow import ChatKitWorkflow as ChatKitWorkflow from .file_search_tool import FileSearchTool as FileSearchTool from .assistant_deleted import AssistantDeleted as AssistantDeleted from .function_tool_param import FunctionToolParam as FunctionToolParam diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index 07f8f28f02..6fb1551fa5 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -65,6 +65,9 @@ class AssistantCreateParams(TypedDict, total=False): supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. """ response_format: Optional[AssistantResponseFormatOptionParam] diff --git a/src/openai/types/beta/assistant_update_params.py b/src/openai/types/beta/assistant_update_params.py index 45d9f984b2..6d20b8e01f 100644 --- a/src/openai/types/beta/assistant_update_params.py +++ b/src/openai/types/beta/assistant_update_params.py @@ -100,6 +100,9 @@ class AssistantUpdateParams(TypedDict, total=False): supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. """ response_format: Optional[AssistantResponseFormatOptionParam] diff --git a/src/openai/types/beta/chatkit/__init__.py b/src/openai/types/beta/chatkit/__init__.py new file mode 100644 index 0000000000..eafed9dd99 --- /dev/null +++ b/src/openai/types/beta/chatkit/__init__.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .chat_session import ChatSession as ChatSession +from .chatkit_thread import ChatKitThread as ChatKitThread +from .chatkit_attachment import ChatKitAttachment as ChatKitAttachment +from .thread_list_params import ThreadListParams as ThreadListParams +from .chat_session_status import ChatSessionStatus as ChatSessionStatus +from .chatkit_widget_item import ChatKitWidgetItem as ChatKitWidgetItem +from .chat_session_history import ChatSessionHistory as ChatSessionHistory +from .session_create_params import SessionCreateParams as SessionCreateParams +from .thread_delete_response import ThreadDeleteResponse as ThreadDeleteResponse +from .chat_session_file_upload import ChatSessionFileUpload as ChatSessionFileUpload +from .chat_session_rate_limits import ChatSessionRateLimits as ChatSessionRateLimits +from .chatkit_thread_item_list import ChatKitThreadItemList as ChatKitThreadItemList +from .thread_list_items_params import ThreadListItemsParams as ThreadListItemsParams +from .chat_session_workflow_param import ChatSessionWorkflowParam as ChatSessionWorkflowParam +from .chatkit_response_output_text import ChatKitResponseOutputText as ChatKitResponseOutputText +from .chat_session_rate_limits_param import ChatSessionRateLimitsParam as ChatSessionRateLimitsParam +from .chat_session_expires_after_param import ChatSessionExpiresAfterParam as ChatSessionExpiresAfterParam +from .chatkit_thread_user_message_item import ChatKitThreadUserMessageItem as ChatKitThreadUserMessageItem +from .chat_session_chatkit_configuration import ChatSessionChatKitConfiguration as ChatSessionChatKitConfiguration +from .chat_session_automatic_thread_titling import ( + ChatSessionAutomaticThreadTitling as ChatSessionAutomaticThreadTitling, +) +from .chatkit_thread_assistant_message_item import ( + ChatKitThreadAssistantMessageItem as ChatKitThreadAssistantMessageItem, +) +from .chat_session_chatkit_configuration_param import ( + ChatSessionChatKitConfigurationParam as ChatSessionChatKitConfigurationParam, +) diff --git a/src/openai/types/beta/chatkit/chat_session.py b/src/openai/types/beta/chatkit/chat_session.py new file mode 100644 index 0000000000..82baea211c --- /dev/null +++ b/src/openai/types/beta/chatkit/chat_session.py @@ -0,0 +1,43 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel +from ..chatkit_workflow import ChatKitWorkflow +from .chat_session_status import ChatSessionStatus +from .chat_session_rate_limits import ChatSessionRateLimits +from .chat_session_chatkit_configuration import ChatSessionChatKitConfiguration + +__all__ = ["ChatSession"] + + +class ChatSession(BaseModel): + id: str + """Identifier for the ChatKit session.""" + + chatkit_configuration: ChatSessionChatKitConfiguration + """Resolved ChatKit feature configuration for the session.""" + + client_secret: str + """Ephemeral client secret that authenticates session requests.""" + + expires_at: int + """Unix timestamp (in seconds) for when the session expires.""" + + max_requests_per_1_minute: int + """Convenience copy of the per-minute request limit.""" + + object: Literal["chatkit.session"] + """Type discriminator that is always `chatkit.session`.""" + + rate_limits: ChatSessionRateLimits + """Resolved rate limit values.""" + + status: ChatSessionStatus + """Current lifecycle state of the session.""" + + user: str + """User identifier associated with the session.""" + + workflow: ChatKitWorkflow + """Workflow metadata for the session.""" diff --git a/src/openai/types/beta/chatkit/chat_session_automatic_thread_titling.py b/src/openai/types/beta/chatkit/chat_session_automatic_thread_titling.py new file mode 100644 index 0000000000..4fa96a4433 --- /dev/null +++ b/src/openai/types/beta/chatkit/chat_session_automatic_thread_titling.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from ...._models import BaseModel + +__all__ = ["ChatSessionAutomaticThreadTitling"] + + +class ChatSessionAutomaticThreadTitling(BaseModel): + enabled: bool + """Whether automatic thread titling is enabled.""" diff --git a/src/openai/types/beta/chatkit/chat_session_chatkit_configuration.py b/src/openai/types/beta/chatkit/chat_session_chatkit_configuration.py new file mode 100644 index 0000000000..6205b172cf --- /dev/null +++ b/src/openai/types/beta/chatkit/chat_session_chatkit_configuration.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from ...._models import BaseModel +from .chat_session_history import ChatSessionHistory +from .chat_session_file_upload import ChatSessionFileUpload +from .chat_session_automatic_thread_titling import ChatSessionAutomaticThreadTitling + +__all__ = ["ChatSessionChatKitConfiguration"] + + +class ChatSessionChatKitConfiguration(BaseModel): + automatic_thread_titling: ChatSessionAutomaticThreadTitling + """Automatic thread titling preferences.""" + + file_upload: ChatSessionFileUpload + """Upload settings for the session.""" + + history: ChatSessionHistory + """History retention configuration.""" diff --git a/src/openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py b/src/openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py new file mode 100644 index 0000000000..0a5ae80a76 --- /dev/null +++ b/src/openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py @@ -0,0 +1,59 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["ChatSessionChatKitConfigurationParam", "AutomaticThreadTitling", "FileUpload", "History"] + + +class AutomaticThreadTitling(TypedDict, total=False): + enabled: bool + """Enable automatic thread title generation. Defaults to true.""" + + +class FileUpload(TypedDict, total=False): + enabled: bool + """Enable uploads for this session. Defaults to false.""" + + max_file_size: int + """Maximum size in megabytes for each uploaded file. + + Defaults to 512 MB, which is the maximum allowable size. + """ + + max_files: int + """Maximum number of files that can be uploaded to the session. Defaults to 10.""" + + +class History(TypedDict, total=False): + enabled: bool + """Enables chat users to access previous ChatKit threads. Defaults to true.""" + + recent_threads: int + """Number of recent ChatKit threads users have access to. + + Defaults to unlimited when unset. + """ + + +class ChatSessionChatKitConfigurationParam(TypedDict, total=False): + automatic_thread_titling: AutomaticThreadTitling + """Configuration for automatic thread titling. + + When omitted, automatic thread titling is enabled by default. + """ + + file_upload: FileUpload + """Configuration for upload enablement and limits. + + When omitted, uploads are disabled by default (max_files 10, max_file_size 512 + MB). + """ + + history: History + """Configuration for chat history retention. + + When omitted, history is enabled by default with no limit on recent_threads + (null). + """ diff --git a/src/openai/types/beta/chatkit/chat_session_expires_after_param.py b/src/openai/types/beta/chatkit/chat_session_expires_after_param.py new file mode 100644 index 0000000000..ceb5a984c5 --- /dev/null +++ b/src/openai/types/beta/chatkit/chat_session_expires_after_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatSessionExpiresAfterParam"] + + +class ChatSessionExpiresAfterParam(TypedDict, total=False): + anchor: Required[Literal["created_at"]] + """Base timestamp used to calculate expiration. Currently fixed to `created_at`.""" + + seconds: Required[int] + """Number of seconds after the anchor when the session expires.""" diff --git a/src/openai/types/beta/chatkit/chat_session_file_upload.py b/src/openai/types/beta/chatkit/chat_session_file_upload.py new file mode 100644 index 0000000000..c63c7a0149 --- /dev/null +++ b/src/openai/types/beta/chatkit/chat_session_file_upload.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["ChatSessionFileUpload"] + + +class ChatSessionFileUpload(BaseModel): + enabled: bool + """Indicates if uploads are enabled for the session.""" + + max_file_size: Optional[int] = None + """Maximum upload size in megabytes.""" + + max_files: Optional[int] = None + """Maximum number of uploads allowed during the session.""" diff --git a/src/openai/types/beta/chatkit/chat_session_history.py b/src/openai/types/beta/chatkit/chat_session_history.py new file mode 100644 index 0000000000..66ebe00877 --- /dev/null +++ b/src/openai/types/beta/chatkit/chat_session_history.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["ChatSessionHistory"] + + +class ChatSessionHistory(BaseModel): + enabled: bool + """Indicates if chat history is persisted for the session.""" + + recent_threads: Optional[int] = None + """Number of prior threads surfaced in history views. + + Defaults to null when all history is retained. + """ diff --git a/src/openai/types/beta/chatkit/chat_session_rate_limits.py b/src/openai/types/beta/chatkit/chat_session_rate_limits.py new file mode 100644 index 0000000000..392225e347 --- /dev/null +++ b/src/openai/types/beta/chatkit/chat_session_rate_limits.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from ...._models import BaseModel + +__all__ = ["ChatSessionRateLimits"] + + +class ChatSessionRateLimits(BaseModel): + max_requests_per_1_minute: int + """Maximum allowed requests per one-minute window.""" diff --git a/src/openai/types/beta/chatkit/chat_session_rate_limits_param.py b/src/openai/types/beta/chatkit/chat_session_rate_limits_param.py new file mode 100644 index 0000000000..7894c06484 --- /dev/null +++ b/src/openai/types/beta/chatkit/chat_session_rate_limits_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["ChatSessionRateLimitsParam"] + + +class ChatSessionRateLimitsParam(TypedDict, total=False): + max_requests_per_1_minute: int + """Maximum number of requests allowed per minute for the session. Defaults to 10.""" diff --git a/src/openai/types/beta/chatkit/chat_session_status.py b/src/openai/types/beta/chatkit/chat_session_status.py new file mode 100644 index 0000000000..a483099c6c --- /dev/null +++ b/src/openai/types/beta/chatkit/chat_session_status.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["ChatSessionStatus"] + +ChatSessionStatus: TypeAlias = Literal["active", "expired", "cancelled"] diff --git a/src/openai/types/beta/chatkit/chat_session_workflow_param.py b/src/openai/types/beta/chatkit/chat_session_workflow_param.py new file mode 100644 index 0000000000..5542922102 --- /dev/null +++ b/src/openai/types/beta/chatkit/chat_session_workflow_param.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union +from typing_extensions import Required, TypedDict + +__all__ = ["ChatSessionWorkflowParam", "Tracing"] + + +class Tracing(TypedDict, total=False): + enabled: bool + """Whether tracing is enabled during the session. Defaults to true.""" + + +class ChatSessionWorkflowParam(TypedDict, total=False): + id: Required[str] + """Identifier for the workflow invoked by the session.""" + + state_variables: Dict[str, Union[str, bool, float]] + """State variables forwarded to the workflow. + + Keys may be up to 64 characters, values must be primitive types, and the map + defaults to an empty object. + """ + + tracing: Tracing + """Optional tracing overrides for the workflow invocation. + + When omitted, tracing is enabled by default. + """ + + version: str + """Specific workflow version to run. Defaults to the latest deployed version.""" diff --git a/src/openai/types/beta/chatkit/chatkit_attachment.py b/src/openai/types/beta/chatkit/chatkit_attachment.py new file mode 100644 index 0000000000..8d8ad3e128 --- /dev/null +++ b/src/openai/types/beta/chatkit/chatkit_attachment.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ChatKitAttachment"] + + +class ChatKitAttachment(BaseModel): + id: str + """Identifier for the attachment.""" + + mime_type: str + """MIME type of the attachment.""" + + name: str + """Original display name for the attachment.""" + + preview_url: Optional[str] = None + """Preview URL for rendering the attachment inline.""" + + type: Literal["image", "file"] + """Attachment discriminator.""" diff --git a/src/openai/types/beta/chatkit/chatkit_response_output_text.py b/src/openai/types/beta/chatkit/chatkit_response_output_text.py new file mode 100644 index 0000000000..116b797ec2 --- /dev/null +++ b/src/openai/types/beta/chatkit/chatkit_response_output_text.py @@ -0,0 +1,62 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union +from typing_extensions import Literal, Annotated, TypeAlias + +from ...._utils import PropertyInfo +from ...._models import BaseModel + +__all__ = [ + "ChatKitResponseOutputText", + "Annotation", + "AnnotationFile", + "AnnotationFileSource", + "AnnotationURL", + "AnnotationURLSource", +] + + +class AnnotationFileSource(BaseModel): + filename: str + """Filename referenced by the annotation.""" + + type: Literal["file"] + """Type discriminator that is always `file`.""" + + +class AnnotationFile(BaseModel): + source: AnnotationFileSource + """File attachment referenced by the annotation.""" + + type: Literal["file"] + """Type discriminator that is always `file` for this annotation.""" + + +class AnnotationURLSource(BaseModel): + type: Literal["url"] + """Type discriminator that is always `url`.""" + + url: str + """URL referenced by the annotation.""" + + +class AnnotationURL(BaseModel): + source: AnnotationURLSource + """URL referenced by the annotation.""" + + type: Literal["url"] + """Type discriminator that is always `url` for this annotation.""" + + +Annotation: TypeAlias = Annotated[Union[AnnotationFile, AnnotationURL], PropertyInfo(discriminator="type")] + + +class ChatKitResponseOutputText(BaseModel): + annotations: List[Annotation] + """Ordered list of annotations attached to the response text.""" + + text: str + """Assistant generated text.""" + + type: Literal["output_text"] + """Type discriminator that is always `output_text`.""" diff --git a/src/openai/types/beta/chatkit/chatkit_thread.py b/src/openai/types/beta/chatkit/chatkit_thread.py new file mode 100644 index 0000000000..abd1a9ea01 --- /dev/null +++ b/src/openai/types/beta/chatkit/chatkit_thread.py @@ -0,0 +1,56 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ...._utils import PropertyInfo +from ...._models import BaseModel + +__all__ = ["ChatKitThread", "Status", "StatusActive", "StatusLocked", "StatusClosed"] + + +class StatusActive(BaseModel): + type: Literal["active"] + """Status discriminator that is always `active`.""" + + +class StatusLocked(BaseModel): + reason: Optional[str] = None + """Reason that the thread was locked. Defaults to null when no reason is recorded.""" + + type: Literal["locked"] + """Status discriminator that is always `locked`.""" + + +class StatusClosed(BaseModel): + reason: Optional[str] = None + """Reason that the thread was closed. Defaults to null when no reason is recorded.""" + + type: Literal["closed"] + """Status discriminator that is always `closed`.""" + + +Status: TypeAlias = Annotated[Union[StatusActive, StatusLocked, StatusClosed], PropertyInfo(discriminator="type")] + + +class ChatKitThread(BaseModel): + id: str + """Identifier of the thread.""" + + created_at: int + """Unix timestamp (in seconds) for when the thread was created.""" + + object: Literal["chatkit.thread"] + """Type discriminator that is always `chatkit.thread`.""" + + status: Status + """Current status for the thread. Defaults to `active` for newly created threads.""" + + title: Optional[str] = None + """Optional human-readable title for the thread. + + Defaults to null when no title has been generated. + """ + + user: str + """Free-form string that identifies your end user who owns the thread.""" diff --git a/src/openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py b/src/openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py new file mode 100644 index 0000000000..f4afd053b6 --- /dev/null +++ b/src/openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import Literal + +from ...._models import BaseModel +from .chatkit_response_output_text import ChatKitResponseOutputText + +__all__ = ["ChatKitThreadAssistantMessageItem"] + + +class ChatKitThreadAssistantMessageItem(BaseModel): + id: str + """Identifier of the thread item.""" + + content: List[ChatKitResponseOutputText] + """Ordered assistant response segments.""" + + created_at: int + """Unix timestamp (in seconds) for when the item was created.""" + + object: Literal["chatkit.thread_item"] + """Type discriminator that is always `chatkit.thread_item`.""" + + thread_id: str + """Identifier of the parent thread.""" + + type: Literal["chatkit.assistant_message"] + """Type discriminator that is always `chatkit.assistant_message`.""" diff --git a/src/openai/types/beta/chatkit/chatkit_thread_item_list.py b/src/openai/types/beta/chatkit/chatkit_thread_item_list.py new file mode 100644 index 0000000000..173bd15055 --- /dev/null +++ b/src/openai/types/beta/chatkit/chatkit_thread_item_list.py @@ -0,0 +1,144 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ...._utils import PropertyInfo +from ...._models import BaseModel +from .chatkit_widget_item import ChatKitWidgetItem +from .chatkit_thread_user_message_item import ChatKitThreadUserMessageItem +from .chatkit_thread_assistant_message_item import ChatKitThreadAssistantMessageItem + +__all__ = [ + "ChatKitThreadItemList", + "Data", + "DataChatKitClientToolCall", + "DataChatKitTask", + "DataChatKitTaskGroup", + "DataChatKitTaskGroupTask", +] + + +class DataChatKitClientToolCall(BaseModel): + id: str + """Identifier of the thread item.""" + + arguments: str + """JSON-encoded arguments that were sent to the tool.""" + + call_id: str + """Identifier for the client tool call.""" + + created_at: int + """Unix timestamp (in seconds) for when the item was created.""" + + name: str + """Tool name that was invoked.""" + + object: Literal["chatkit.thread_item"] + """Type discriminator that is always `chatkit.thread_item`.""" + + output: Optional[str] = None + """JSON-encoded output captured from the tool. + + Defaults to null while execution is in progress. + """ + + status: Literal["in_progress", "completed"] + """Execution status for the tool call.""" + + thread_id: str + """Identifier of the parent thread.""" + + type: Literal["chatkit.client_tool_call"] + """Type discriminator that is always `chatkit.client_tool_call`.""" + + +class DataChatKitTask(BaseModel): + id: str + """Identifier of the thread item.""" + + created_at: int + """Unix timestamp (in seconds) for when the item was created.""" + + heading: Optional[str] = None + """Optional heading for the task. Defaults to null when not provided.""" + + object: Literal["chatkit.thread_item"] + """Type discriminator that is always `chatkit.thread_item`.""" + + summary: Optional[str] = None + """Optional summary that describes the task. Defaults to null when omitted.""" + + task_type: Literal["custom", "thought"] + """Subtype for the task.""" + + thread_id: str + """Identifier of the parent thread.""" + + type: Literal["chatkit.task"] + """Type discriminator that is always `chatkit.task`.""" + + +class DataChatKitTaskGroupTask(BaseModel): + heading: Optional[str] = None + """Optional heading for the grouped task. Defaults to null when not provided.""" + + summary: Optional[str] = None + """Optional summary that describes the grouped task. + + Defaults to null when omitted. + """ + + type: Literal["custom", "thought"] + """Subtype for the grouped task.""" + + +class DataChatKitTaskGroup(BaseModel): + id: str + """Identifier of the thread item.""" + + created_at: int + """Unix timestamp (in seconds) for when the item was created.""" + + object: Literal["chatkit.thread_item"] + """Type discriminator that is always `chatkit.thread_item`.""" + + tasks: List[DataChatKitTaskGroupTask] + """Tasks included in the group.""" + + thread_id: str + """Identifier of the parent thread.""" + + type: Literal["chatkit.task_group"] + """Type discriminator that is always `chatkit.task_group`.""" + + +Data: TypeAlias = Annotated[ + Union[ + ChatKitThreadUserMessageItem, + ChatKitThreadAssistantMessageItem, + ChatKitWidgetItem, + DataChatKitClientToolCall, + DataChatKitTask, + DataChatKitTaskGroup, + ], + PropertyInfo(discriminator="type"), +] + + +class ChatKitThreadItemList(BaseModel): + data: List[Data] + """A list of items""" + + first_id: Optional[str] = None + """The ID of the first item in the list.""" + + has_more: bool + """Whether there are more items available.""" + + last_id: Optional[str] = None + """The ID of the last item in the list.""" + + object: Literal["list"] + """The type of object returned, must be `list`.""" diff --git a/src/openai/types/beta/chatkit/chatkit_thread_user_message_item.py b/src/openai/types/beta/chatkit/chatkit_thread_user_message_item.py new file mode 100644 index 0000000000..233d07232f --- /dev/null +++ b/src/openai/types/beta/chatkit/chatkit_thread_user_message_item.py @@ -0,0 +1,77 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ...._utils import PropertyInfo +from ...._models import BaseModel +from .chatkit_attachment import ChatKitAttachment + +__all__ = [ + "ChatKitThreadUserMessageItem", + "Content", + "ContentInputText", + "ContentQuotedText", + "InferenceOptions", + "InferenceOptionsToolChoice", +] + + +class ContentInputText(BaseModel): + text: str + """Plain-text content supplied by the user.""" + + type: Literal["input_text"] + """Type discriminator that is always `input_text`.""" + + +class ContentQuotedText(BaseModel): + text: str + """Quoted text content.""" + + type: Literal["quoted_text"] + """Type discriminator that is always `quoted_text`.""" + + +Content: TypeAlias = Annotated[Union[ContentInputText, ContentQuotedText], PropertyInfo(discriminator="type")] + + +class InferenceOptionsToolChoice(BaseModel): + id: str + """Identifier of the requested tool.""" + + +class InferenceOptions(BaseModel): + model: Optional[str] = None + """Model name that generated the response. + + Defaults to null when using the session default. + """ + + tool_choice: Optional[InferenceOptionsToolChoice] = None + """Preferred tool to invoke. Defaults to null when ChatKit should auto-select.""" + + +class ChatKitThreadUserMessageItem(BaseModel): + id: str + """Identifier of the thread item.""" + + attachments: List[ChatKitAttachment] + """Attachments associated with the user message. Defaults to an empty list.""" + + content: List[Content] + """Ordered content elements supplied by the user.""" + + created_at: int + """Unix timestamp (in seconds) for when the item was created.""" + + inference_options: Optional[InferenceOptions] = None + """Inference overrides applied to the message. Defaults to null when unset.""" + + object: Literal["chatkit.thread_item"] + """Type discriminator that is always `chatkit.thread_item`.""" + + thread_id: str + """Identifier of the parent thread.""" + + type: Literal["chatkit.user_message"] diff --git a/src/openai/types/beta/chatkit/chatkit_widget_item.py b/src/openai/types/beta/chatkit/chatkit_widget_item.py new file mode 100644 index 0000000000..c7f182259a --- /dev/null +++ b/src/openai/types/beta/chatkit/chatkit_widget_item.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ChatKitWidgetItem"] + + +class ChatKitWidgetItem(BaseModel): + id: str + """Identifier of the thread item.""" + + created_at: int + """Unix timestamp (in seconds) for when the item was created.""" + + object: Literal["chatkit.thread_item"] + """Type discriminator that is always `chatkit.thread_item`.""" + + thread_id: str + """Identifier of the parent thread.""" + + type: Literal["chatkit.widget"] + """Type discriminator that is always `chatkit.widget`.""" + + widget: str + """Serialized widget payload rendered in the UI.""" diff --git a/src/openai/types/beta/chatkit/session_create_params.py b/src/openai/types/beta/chatkit/session_create_params.py new file mode 100644 index 0000000000..1803d18cf6 --- /dev/null +++ b/src/openai/types/beta/chatkit/session_create_params.py @@ -0,0 +1,35 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +from .chat_session_workflow_param import ChatSessionWorkflowParam +from .chat_session_rate_limits_param import ChatSessionRateLimitsParam +from .chat_session_expires_after_param import ChatSessionExpiresAfterParam +from .chat_session_chatkit_configuration_param import ChatSessionChatKitConfigurationParam + +__all__ = ["SessionCreateParams"] + + +class SessionCreateParams(TypedDict, total=False): + user: Required[str] + """ + A free-form string that identifies your end user; ensures this Session can + access other objects that have the same `user` scope. + """ + + workflow: Required[ChatSessionWorkflowParam] + """Workflow that powers the session.""" + + chatkit_configuration: ChatSessionChatKitConfigurationParam + """Optional overrides for ChatKit runtime configuration features""" + + expires_after: ChatSessionExpiresAfterParam + """Optional override for session expiration timing in seconds from creation. + + Defaults to 10 minutes. + """ + + rate_limits: ChatSessionRateLimitsParam + """Optional override for per-minute request limits. When omitted, defaults to 10.""" diff --git a/src/openai/types/beta/chatkit/thread_delete_response.py b/src/openai/types/beta/chatkit/thread_delete_response.py new file mode 100644 index 0000000000..03fdec9c2c --- /dev/null +++ b/src/openai/types/beta/chatkit/thread_delete_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ThreadDeleteResponse"] + + +class ThreadDeleteResponse(BaseModel): + id: str + """Identifier of the deleted thread.""" + + deleted: bool + """Indicates that the thread has been deleted.""" + + object: Literal["chatkit.thread.deleted"] + """Type discriminator that is always `chatkit.thread.deleted`.""" diff --git a/src/openai/types/beta/chatkit/thread_list_items_params.py b/src/openai/types/beta/chatkit/thread_list_items_params.py new file mode 100644 index 0000000000..95c959d719 --- /dev/null +++ b/src/openai/types/beta/chatkit/thread_list_items_params.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["ThreadListItemsParams"] + + +class ThreadListItemsParams(TypedDict, total=False): + after: str + """List items created after this thread item ID. + + Defaults to null for the first page. + """ + + before: str + """List items created before this thread item ID. + + Defaults to null for the newest results. + """ + + limit: int + """Maximum number of thread items to return. Defaults to 20.""" + + order: Literal["asc", "desc"] + """Sort order for results by creation time. Defaults to `desc`.""" diff --git a/src/openai/types/beta/chatkit/thread_list_params.py b/src/openai/types/beta/chatkit/thread_list_params.py new file mode 100644 index 0000000000..bb759c7ea3 --- /dev/null +++ b/src/openai/types/beta/chatkit/thread_list_params.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["ThreadListParams"] + + +class ThreadListParams(TypedDict, total=False): + after: str + """List items created after this thread item ID. + + Defaults to null for the first page. + """ + + before: str + """List items created before this thread item ID. + + Defaults to null for the newest results. + """ + + limit: int + """Maximum number of thread items to return. Defaults to 20.""" + + order: Literal["asc", "desc"] + """Sort order for results by creation time. Defaults to `desc`.""" + + user: str + """Filter threads that belong to this user identifier. + + Defaults to null to return all users. + """ diff --git a/src/openai/types/beta/chatkit_workflow.py b/src/openai/types/beta/chatkit_workflow.py new file mode 100644 index 0000000000..00fbcf41ce --- /dev/null +++ b/src/openai/types/beta/chatkit_workflow.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, Union, Optional + +from ..._models import BaseModel + +__all__ = ["ChatKitWorkflow", "Tracing"] + + +class Tracing(BaseModel): + enabled: bool + """Indicates whether tracing is enabled.""" + + +class ChatKitWorkflow(BaseModel): + id: str + """Identifier of the workflow backing the session.""" + + state_variables: Optional[Dict[str, Union[str, bool, float]]] = None + """State variable key-value pairs applied when invoking the workflow. + + Defaults to null when no overrides were provided. + """ + + tracing: Tracing + """Tracing settings applied to the workflow.""" + + version: Optional[str] = None + """Specific workflow version used for the session. + + Defaults to null when using the latest deployment. + """ diff --git a/src/openai/types/beta/realtime/session.py b/src/openai/types/beta/realtime/session.py index f478a92fbb..2e099a2e98 100644 --- a/src/openai/types/beta/realtime/session.py +++ b/src/openai/types/beta/realtime/session.py @@ -203,6 +203,8 @@ class Session(BaseModel): model: Optional[ Literal[ + "gpt-realtime", + "gpt-realtime-2025-08-28", "gpt-4o-realtime-preview", "gpt-4o-realtime-preview-2024-10-01", "gpt-4o-realtime-preview-2024-12-17", diff --git a/src/openai/types/beta/realtime/session_create_params.py b/src/openai/types/beta/realtime/session_create_params.py index 8a477f9843..38465a56c3 100644 --- a/src/openai/types/beta/realtime/session_create_params.py +++ b/src/openai/types/beta/realtime/session_create_params.py @@ -81,6 +81,8 @@ class SessionCreateParams(TypedDict, total=False): """ model: Literal[ + "gpt-realtime", + "gpt-realtime-2025-08-28", "gpt-4o-realtime-preview", "gpt-4o-realtime-preview-2024-10-01", "gpt-4o-realtime-preview-2024-12-17", diff --git a/src/openai/types/beta/realtime/session_update_event.py b/src/openai/types/beta/realtime/session_update_event.py index 11929ab376..78d2e4bb18 100644 --- a/src/openai/types/beta/realtime/session_update_event.py +++ b/src/openai/types/beta/realtime/session_update_event.py @@ -225,6 +225,8 @@ class Session(BaseModel): model: Optional[ Literal[ + "gpt-realtime", + "gpt-realtime-2025-08-28", "gpt-4o-realtime-preview", "gpt-4o-realtime-preview-2024-10-01", "gpt-4o-realtime-preview-2024-12-17", diff --git a/src/openai/types/beta/realtime/session_update_event_param.py b/src/openai/types/beta/realtime/session_update_event_param.py index e939f4cc79..c58b202a71 100644 --- a/src/openai/types/beta/realtime/session_update_event_param.py +++ b/src/openai/types/beta/realtime/session_update_event_param.py @@ -224,6 +224,8 @@ class Session(TypedDict, total=False): """ model: Literal[ + "gpt-realtime", + "gpt-realtime-2025-08-28", "gpt-4o-realtime-preview", "gpt-4o-realtime-preview-2024-10-01", "gpt-4o-realtime-preview-2024-12-17", diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index cfd272f5ad..3190c8b308 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -114,6 +114,9 @@ class RunCreateParamsBase(TypedDict, total=False): supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. """ response_format: Optional[AssistantResponseFormatOptionParam] diff --git a/src/openai/types/chat/chat_completion_assistant_message_param.py b/src/openai/types/chat/chat_completion_assistant_message_param.py index 212d933e9b..1a08a959db 100644 --- a/src/openai/types/chat/chat_completion_assistant_message_param.py +++ b/src/openai/types/chat/chat_completion_assistant_message_param.py @@ -38,8 +38,8 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False): """The role of the messages author, in this case `assistant`.""" audio: Optional[Audio] - """Data about a previous audio response from the model. - + """ + Data about a previous audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio). """ diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 2ae81dfbc2..8b0fdd04b3 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -192,6 +192,9 @@ class CompletionCreateParamsBase(TypedDict, total=False): supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. """ response_format: ResponseFormat diff --git a/src/openai/types/chat/completion_list_params.py b/src/openai/types/chat/completion_list_params.py index d93da834a3..32bd3f5c0a 100644 --- a/src/openai/types/chat/completion_list_params.py +++ b/src/openai/types/chat/completion_list_params.py @@ -18,9 +18,13 @@ class CompletionListParams(TypedDict, total=False): """Number of Chat Completions to retrieve.""" metadata: Optional[Metadata] - """A list of metadata keys to filter the Chat Completions by. Example: + """Set of 16 key-value pairs that can be attached to an object. - `metadata[key1]=value1&metadata[key2]=value2` + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ model: str diff --git a/src/openai/types/conversations/__init__.py b/src/openai/types/conversations/__init__.py index 538966db4f..9dec848737 100644 --- a/src/openai/types/conversations/__init__.py +++ b/src/openai/types/conversations/__init__.py @@ -3,15 +3,11 @@ from __future__ import annotations from .message import Message as Message -from .lob_prob import LobProb as LobProb from .conversation import Conversation as Conversation from .text_content import TextContent as TextContent -from .top_log_prob import TopLogProb as TopLogProb from .refusal_content import RefusalContent as RefusalContent from .item_list_params import ItemListParams as ItemListParams from .conversation_item import ConversationItem as ConversationItem -from .url_citation_body import URLCitationBody as URLCitationBody -from .file_citation_body import FileCitationBody as FileCitationBody from .input_file_content import InputFileContent as InputFileContent from .input_text_content import InputTextContent as InputTextContent from .item_create_params import ItemCreateParams as ItemCreateParams @@ -19,9 +15,13 @@ from .output_text_content import OutputTextContent as OutputTextContent from .item_retrieve_params import ItemRetrieveParams as ItemRetrieveParams from .summary_text_content import SummaryTextContent as SummaryTextContent +from .refusal_content_param import RefusalContentParam as RefusalContentParam from .conversation_item_list import ConversationItemList as ConversationItemList +from .input_file_content_param import InputFileContentParam as InputFileContentParam +from .input_text_content_param import InputTextContentParam as InputTextContentParam +from .input_image_content_param import InputImageContentParam as InputImageContentParam +from .output_text_content_param import OutputTextContentParam as OutputTextContentParam from .conversation_create_params import ConversationCreateParams as ConversationCreateParams from .conversation_update_params import ConversationUpdateParams as ConversationUpdateParams from .computer_screenshot_content import ComputerScreenshotContent as ComputerScreenshotContent -from .container_file_citation_body import ContainerFileCitationBody as ContainerFileCitationBody from .conversation_deleted_resource import ConversationDeletedResource as ConversationDeletedResource diff --git a/src/openai/types/conversations/container_file_citation_body.py b/src/openai/types/conversations/container_file_citation_body.py deleted file mode 100644 index ea460df2e2..0000000000 --- a/src/openai/types/conversations/container_file_citation_body.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["ContainerFileCitationBody"] - - -class ContainerFileCitationBody(BaseModel): - container_id: str - """The ID of the container file.""" - - end_index: int - """The index of the last character of the container file citation in the message.""" - - file_id: str - """The ID of the file.""" - - filename: str - """The filename of the container file cited.""" - - start_index: int - """The index of the first character of the container file citation in the message.""" - - type: Literal["container_file_citation"] - """The type of the container file citation. Always `container_file_citation`.""" diff --git a/src/openai/types/conversations/conversation_create_params.py b/src/openai/types/conversations/conversation_create_params.py index 7ad3f8ae2d..5f38d2aca7 100644 --- a/src/openai/types/conversations/conversation_create_params.py +++ b/src/openai/types/conversations/conversation_create_params.py @@ -13,14 +13,17 @@ class ConversationCreateParams(TypedDict, total=False): items: Optional[Iterable[ResponseInputItemParam]] - """ - Initial items to include in the conversation context. You may add up to 20 items - at a time. + """Initial items to include in the conversation context. + + You may add up to 20 items at a time. """ metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. - Useful for storing additional information about the object in a structured - format. + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ diff --git a/src/openai/types/conversations/conversation_item.py b/src/openai/types/conversations/conversation_item.py index a7cd355f36..9e9fb40033 100644 --- a/src/openai/types/conversations/conversation_item.py +++ b/src/openai/types/conversations/conversation_item.py @@ -177,12 +177,25 @@ class McpCall(BaseModel): type: Literal["mcp_call"] """The type of the item. Always `mcp_call`.""" + approval_request_id: Optional[str] = None + """ + Unique identifier for the MCP tool call approval request. Include this value in + a subsequent `mcp_approval_response` input to approve or reject the + corresponding tool call. + """ + error: Optional[str] = None """The error from the tool call, if any.""" output: Optional[str] = None """The output from the tool call.""" + status: Optional[Literal["in_progress", "completed", "incomplete", "calling", "failed"]] = None + """The status of the tool call. + + One of `in_progress`, `completed`, `incomplete`, `calling`, or `failed`. + """ + ConversationItem: TypeAlias = Annotated[ Union[ diff --git a/src/openai/types/conversations/conversation_update_params.py b/src/openai/types/conversations/conversation_update_params.py index f2aa42d833..1f0dd09e50 100644 --- a/src/openai/types/conversations/conversation_update_params.py +++ b/src/openai/types/conversations/conversation_update_params.py @@ -2,18 +2,21 @@ from __future__ import annotations -from typing import Dict +from typing import Optional from typing_extensions import Required, TypedDict +from ..shared_params.metadata import Metadata + __all__ = ["ConversationUpdateParams"] class ConversationUpdateParams(TypedDict, total=False): - metadata: Required[Dict[str, str]] + metadata: Required[Optional[Metadata]] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. Keys are - strings with a maximum length of 64 characters. Values are strings with a - maximum length of 512 characters. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ diff --git a/src/openai/types/conversations/file_citation_body.py b/src/openai/types/conversations/file_citation_body.py deleted file mode 100644 index ea90ae381d..0000000000 --- a/src/openai/types/conversations/file_citation_body.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["FileCitationBody"] - - -class FileCitationBody(BaseModel): - file_id: str - """The ID of the file.""" - - filename: str - """The filename of the file cited.""" - - index: int - """The index of the file in the list of files.""" - - type: Literal["file_citation"] - """The type of the file citation. Always `file_citation`.""" diff --git a/src/openai/types/conversations/input_file_content.py b/src/openai/types/conversations/input_file_content.py index 6aef7a89d9..ca555d85fc 100644 --- a/src/openai/types/conversations/input_file_content.py +++ b/src/openai/types/conversations/input_file_content.py @@ -1,22 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel +from ..responses.response_input_file import ResponseInputFile __all__ = ["InputFileContent"] - -class InputFileContent(BaseModel): - file_id: Optional[str] = None - """The ID of the file to be sent to the model.""" - - type: Literal["input_file"] - """The type of the input item. Always `input_file`.""" - - file_url: Optional[str] = None - """The URL of the file to be sent to the model.""" - - filename: Optional[str] = None - """The name of the file to be sent to the model.""" +InputFileContent = ResponseInputFile diff --git a/src/openai/types/conversations/input_file_content_param.py b/src/openai/types/conversations/input_file_content_param.py new file mode 100644 index 0000000000..1ed8b8b9d1 --- /dev/null +++ b/src/openai/types/conversations/input_file_content_param.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..responses.response_input_file_param import ResponseInputFileParam + +InputFileContentParam = ResponseInputFileParam diff --git a/src/openai/types/conversations/input_image_content.py b/src/openai/types/conversations/input_image_content.py index f2587e0adc..4304323c3a 100644 --- a/src/openai/types/conversations/input_image_content.py +++ b/src/openai/types/conversations/input_image_content.py @@ -1,28 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel +from ..responses.response_input_image import ResponseInputImage __all__ = ["InputImageContent"] - -class InputImageContent(BaseModel): - detail: Literal["low", "high", "auto"] - """The detail level of the image to be sent to the model. - - One of `high`, `low`, or `auto`. Defaults to `auto`. - """ - - file_id: Optional[str] = None - """The ID of the file to be sent to the model.""" - - image_url: Optional[str] = None - """The URL of the image to be sent to the model. - - A fully qualified URL or base64 encoded image in a data URL. - """ - - type: Literal["input_image"] - """The type of the input item. Always `input_image`.""" +InputImageContent = ResponseInputImage diff --git a/src/openai/types/conversations/input_image_content_param.py b/src/openai/types/conversations/input_image_content_param.py new file mode 100644 index 0000000000..a0ef9f545c --- /dev/null +++ b/src/openai/types/conversations/input_image_content_param.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..responses.response_input_image_param import ResponseInputImageParam + +InputImageContentParam = ResponseInputImageParam diff --git a/src/openai/types/conversations/input_text_content.py b/src/openai/types/conversations/input_text_content.py index 5e2daebdc5..cab8b26cb1 100644 --- a/src/openai/types/conversations/input_text_content.py +++ b/src/openai/types/conversations/input_text_content.py @@ -1,15 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing_extensions import Literal - -from ..._models import BaseModel +from ..responses.response_input_text import ResponseInputText __all__ = ["InputTextContent"] - -class InputTextContent(BaseModel): - text: str - """The text input to the model.""" - - type: Literal["input_text"] - """The type of the input item. Always `input_text`.""" +InputTextContent = ResponseInputText diff --git a/src/openai/types/conversations/input_text_content_param.py b/src/openai/types/conversations/input_text_content_param.py new file mode 100644 index 0000000000..b1fd9a5f1c --- /dev/null +++ b/src/openai/types/conversations/input_text_content_param.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..responses.response_input_text_param import ResponseInputTextParam + +InputTextContentParam = ResponseInputTextParam diff --git a/src/openai/types/conversations/lob_prob.py b/src/openai/types/conversations/lob_prob.py deleted file mode 100644 index f7dcd62a5e..0000000000 --- a/src/openai/types/conversations/lob_prob.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from ..._models import BaseModel -from .top_log_prob import TopLogProb - -__all__ = ["LobProb"] - - -class LobProb(BaseModel): - token: str - - bytes: List[int] - - logprob: float - - top_logprobs: List[TopLogProb] diff --git a/src/openai/types/conversations/message.py b/src/openai/types/conversations/message.py index a070cf2869..dbf5a14680 100644 --- a/src/openai/types/conversations/message.py +++ b/src/openai/types/conversations/message.py @@ -6,26 +6,36 @@ from ..._utils import PropertyInfo from ..._models import BaseModel from .text_content import TextContent -from .refusal_content import RefusalContent -from .input_file_content import InputFileContent -from .input_text_content import InputTextContent -from .input_image_content import InputImageContent -from .output_text_content import OutputTextContent from .summary_text_content import SummaryTextContent from .computer_screenshot_content import ComputerScreenshotContent +from ..responses.response_input_file import ResponseInputFile +from ..responses.response_input_text import ResponseInputText +from ..responses.response_input_image import ResponseInputImage +from ..responses.response_output_text import ResponseOutputText +from ..responses.response_output_refusal import ResponseOutputRefusal + +__all__ = ["Message", "Content", "ContentReasoningText"] + + +class ContentReasoningText(BaseModel): + text: str + """The reasoning text from the model.""" + + type: Literal["reasoning_text"] + """The type of the reasoning text. Always `reasoning_text`.""" -__all__ = ["Message", "Content"] Content: TypeAlias = Annotated[ Union[ - InputTextContent, - OutputTextContent, + ResponseInputText, + ResponseOutputText, TextContent, SummaryTextContent, - RefusalContent, - InputImageContent, + ContentReasoningText, + ResponseOutputRefusal, + ResponseInputImage, ComputerScreenshotContent, - InputFileContent, + ResponseInputFile, ], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/conversations/output_text_content.py b/src/openai/types/conversations/output_text_content.py index 2ffee76526..cfe9307d74 100644 --- a/src/openai/types/conversations/output_text_content.py +++ b/src/openai/types/conversations/output_text_content.py @@ -1,30 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Union, Optional -from typing_extensions import Literal, Annotated, TypeAlias +from ..responses.response_output_text import ResponseOutputText -from ..._utils import PropertyInfo -from .lob_prob import LobProb -from ..._models import BaseModel -from .url_citation_body import URLCitationBody -from .file_citation_body import FileCitationBody -from .container_file_citation_body import ContainerFileCitationBody +__all__ = ["OutputTextContent"] -__all__ = ["OutputTextContent", "Annotation"] - -Annotation: TypeAlias = Annotated[ - Union[FileCitationBody, URLCitationBody, ContainerFileCitationBody], PropertyInfo(discriminator="type") -] - - -class OutputTextContent(BaseModel): - annotations: List[Annotation] - """The annotations of the text output.""" - - text: str - """The text output from the model.""" - - type: Literal["output_text"] - """The type of the output text. Always `output_text`.""" - - logprobs: Optional[List[LobProb]] = None +OutputTextContent = ResponseOutputText diff --git a/src/openai/types/conversations/output_text_content_param.py b/src/openai/types/conversations/output_text_content_param.py new file mode 100644 index 0000000000..dc3e2026f6 --- /dev/null +++ b/src/openai/types/conversations/output_text_content_param.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..responses.response_output_text_param import ResponseOutputTextParam + +OutputTextContentParam = ResponseOutputTextParam diff --git a/src/openai/types/conversations/refusal_content.py b/src/openai/types/conversations/refusal_content.py index 3c8bd5e35f..6206c267dc 100644 --- a/src/openai/types/conversations/refusal_content.py +++ b/src/openai/types/conversations/refusal_content.py @@ -1,15 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing_extensions import Literal - -from ..._models import BaseModel +from ..responses.response_output_refusal import ResponseOutputRefusal __all__ = ["RefusalContent"] - -class RefusalContent(BaseModel): - refusal: str - """The refusal explanation from the model.""" - - type: Literal["refusal"] - """The type of the refusal. Always `refusal`.""" +RefusalContent = ResponseOutputRefusal diff --git a/src/openai/types/conversations/refusal_content_param.py b/src/openai/types/conversations/refusal_content_param.py new file mode 100644 index 0000000000..9b83da5f2d --- /dev/null +++ b/src/openai/types/conversations/refusal_content_param.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..responses.response_output_refusal_param import ResponseOutputRefusalParam + +RefusalContentParam = ResponseOutputRefusalParam diff --git a/src/openai/types/conversations/summary_text_content.py b/src/openai/types/conversations/summary_text_content.py index 047769ed67..d357b15725 100644 --- a/src/openai/types/conversations/summary_text_content.py +++ b/src/openai/types/conversations/summary_text_content.py @@ -9,5 +9,7 @@ class SummaryTextContent(BaseModel): text: str + """A summary of the reasoning output from the model so far.""" type: Literal["summary_text"] + """The type of the object. Always `summary_text`.""" diff --git a/src/openai/types/conversations/top_log_prob.py b/src/openai/types/conversations/top_log_prob.py deleted file mode 100644 index fafca756ae..0000000000 --- a/src/openai/types/conversations/top_log_prob.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from ..._models import BaseModel - -__all__ = ["TopLogProb"] - - -class TopLogProb(BaseModel): - token: str - - bytes: List[int] - - logprob: float diff --git a/src/openai/types/conversations/url_citation_body.py b/src/openai/types/conversations/url_citation_body.py deleted file mode 100644 index 1becb44bc0..0000000000 --- a/src/openai/types/conversations/url_citation_body.py +++ /dev/null @@ -1,24 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["URLCitationBody"] - - -class URLCitationBody(BaseModel): - end_index: int - """The index of the last character of the URL citation in the message.""" - - start_index: int - """The index of the first character of the URL citation in the message.""" - - title: str - """The title of the web resource.""" - - type: Literal["url_citation"] - """The type of the URL citation. Always `url_citation`.""" - - url: str - """The URL of the web resource.""" diff --git a/src/openai/types/evals/create_eval_completions_run_data_source.py b/src/openai/types/evals/create_eval_completions_run_data_source.py index edf70c8ad4..a9f2fd0858 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source.py @@ -6,6 +6,7 @@ from ..._utils import PropertyInfo from ..._models import BaseModel from ..shared.metadata import Metadata +from ..shared.reasoning_effort import ReasoningEffort from ..shared.response_format_text import ResponseFormatText from ..responses.easy_input_message import EasyInputMessage from ..responses.response_input_text import ResponseInputText @@ -167,6 +168,18 @@ class SamplingParams(BaseModel): max_completion_tokens: Optional[int] = None """The maximum number of tokens in the generated output.""" + reasoning_effort: Optional[ReasoningEffort] = None + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + """ + response_format: Optional[SamplingParamsResponseFormat] = None """An object specifying the format that the model must output. diff --git a/src/openai/types/evals/create_eval_completions_run_data_source_param.py b/src/openai/types/evals/create_eval_completions_run_data_source_param.py index c14360ac80..e682e2db5e 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source_param.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source_param.py @@ -6,6 +6,7 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..shared_params.metadata import Metadata +from ..shared.reasoning_effort import ReasoningEffort from ..responses.easy_input_message_param import EasyInputMessageParam from ..shared_params.response_format_text import ResponseFormatText from ..responses.response_input_text_param import ResponseInputTextParam @@ -163,6 +164,18 @@ class SamplingParams(TypedDict, total=False): max_completion_tokens: int """The maximum number of tokens in the generated output.""" + reasoning_effort: Optional[ReasoningEffort] + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + """ + response_format: SamplingParamsResponseFormat """An object specifying the format that the model must output. diff --git a/src/openai/types/evals/run_cancel_response.py b/src/openai/types/evals/run_cancel_response.py index 44f9cfc453..084dd6ce5c 100644 --- a/src/openai/types/evals/run_cancel_response.py +++ b/src/openai/types/evals/run_cancel_response.py @@ -100,9 +100,15 @@ class DataSourceResponsesSourceResponses(BaseModel): """ reasoning_effort: Optional[ReasoningEffort] = None - """Optional reasoning effort parameter. - - This is a query parameter used to select responses. + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. """ temperature: Optional[float] = None @@ -231,6 +237,18 @@ class DataSourceResponsesSamplingParams(BaseModel): max_completion_tokens: Optional[int] = None """The maximum number of tokens in the generated output.""" + reasoning_effort: Optional[ReasoningEffort] = None + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + """ + seed: Optional[int] = None """A seed value to initialize the randomness, during sampling.""" diff --git a/src/openai/types/evals/run_create_params.py b/src/openai/types/evals/run_create_params.py index ef9541ff0a..f114fae6a4 100644 --- a/src/openai/types/evals/run_create_params.py +++ b/src/openai/types/evals/run_create_params.py @@ -113,9 +113,15 @@ class DataSourceCreateEvalResponsesRunDataSourceSourceResponses(TypedDict, total """ reasoning_effort: Optional[ReasoningEffort] - """Optional reasoning effort parameter. - - This is a query parameter used to select responses. + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. """ temperature: Optional[float] @@ -249,6 +255,18 @@ class DataSourceCreateEvalResponsesRunDataSourceSamplingParams(TypedDict, total= max_completion_tokens: int """The maximum number of tokens in the generated output.""" + reasoning_effort: Optional[ReasoningEffort] + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + """ + seed: int """A seed value to initialize the randomness, during sampling.""" diff --git a/src/openai/types/evals/run_create_response.py b/src/openai/types/evals/run_create_response.py index 70641d6db8..1343335e0d 100644 --- a/src/openai/types/evals/run_create_response.py +++ b/src/openai/types/evals/run_create_response.py @@ -100,9 +100,15 @@ class DataSourceResponsesSourceResponses(BaseModel): """ reasoning_effort: Optional[ReasoningEffort] = None - """Optional reasoning effort parameter. - - This is a query parameter used to select responses. + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. """ temperature: Optional[float] = None @@ -231,6 +237,18 @@ class DataSourceResponsesSamplingParams(BaseModel): max_completion_tokens: Optional[int] = None """The maximum number of tokens in the generated output.""" + reasoning_effort: Optional[ReasoningEffort] = None + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + """ + seed: Optional[int] = None """A seed value to initialize the randomness, during sampling.""" diff --git a/src/openai/types/evals/run_list_response.py b/src/openai/types/evals/run_list_response.py index e31d570a84..7c32ce54a2 100644 --- a/src/openai/types/evals/run_list_response.py +++ b/src/openai/types/evals/run_list_response.py @@ -100,9 +100,15 @@ class DataSourceResponsesSourceResponses(BaseModel): """ reasoning_effort: Optional[ReasoningEffort] = None - """Optional reasoning effort parameter. - - This is a query parameter used to select responses. + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. """ temperature: Optional[float] = None @@ -231,6 +237,18 @@ class DataSourceResponsesSamplingParams(BaseModel): max_completion_tokens: Optional[int] = None """The maximum number of tokens in the generated output.""" + reasoning_effort: Optional[ReasoningEffort] = None + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + """ + seed: Optional[int] = None """A seed value to initialize the randomness, during sampling.""" diff --git a/src/openai/types/evals/run_retrieve_response.py b/src/openai/types/evals/run_retrieve_response.py index 62213d3edd..f1212c1671 100644 --- a/src/openai/types/evals/run_retrieve_response.py +++ b/src/openai/types/evals/run_retrieve_response.py @@ -100,9 +100,15 @@ class DataSourceResponsesSourceResponses(BaseModel): """ reasoning_effort: Optional[ReasoningEffort] = None - """Optional reasoning effort parameter. - - This is a query parameter used to select responses. + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. """ temperature: Optional[float] = None @@ -231,6 +237,18 @@ class DataSourceResponsesSamplingParams(BaseModel): max_completion_tokens: Optional[int] = None """The maximum number of tokens in the generated output.""" + reasoning_effort: Optional[ReasoningEffort] = None + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + """ + seed: Optional[int] = None """A seed value to initialize the randomness, during sampling.""" diff --git a/src/openai/types/evals/runs/output_item_list_response.py b/src/openai/types/evals/runs/output_item_list_response.py index 72b1049f7b..e88c21766f 100644 --- a/src/openai/types/evals/runs/output_item_list_response.py +++ b/src/openai/types/evals/runs/output_item_list_response.py @@ -1,13 +1,43 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import builtins -from typing import Dict, List, Optional +from typing import TYPE_CHECKING, Dict, List, Optional from typing_extensions import Literal +from pydantic import Field as FieldInfo + from ...._models import BaseModel from ..eval_api_error import EvalAPIError -__all__ = ["OutputItemListResponse", "Sample", "SampleInput", "SampleOutput", "SampleUsage"] +__all__ = ["OutputItemListResponse", "Result", "Sample", "SampleInput", "SampleOutput", "SampleUsage"] + + +class Result(BaseModel): + name: str + """The name of the grader.""" + + passed: bool + """Whether the grader considered the output a pass.""" + + score: float + """The numeric score produced by the grader.""" + + sample: Optional[Dict[str, object]] = None + """Optional sample or intermediate data produced by the grader.""" + + type: Optional[str] = None + """The grader type (for example, "string-check-grader").""" + + if TYPE_CHECKING: + # Some versions of Pydantic <2.8.0 have a bug and don’t allow assigning a + # value to this field, so for compatibility we avoid doing it at runtime. + __pydantic_extra__: Dict[str, object] = FieldInfo(init=False) # pyright: ignore[reportIncompatibleVariableOverride] + + # Stub to indicate that arbitrary properties are accepted. + # To access properties that are not valid identifiers you can use `getattr`, e.g. + # `getattr(obj, '$type')` + def __getattr__(self, attr: str) -> object: ... + else: + __pydantic_extra__: Dict[str, object] class SampleInput(BaseModel): @@ -91,8 +121,8 @@ class OutputItemListResponse(BaseModel): object: Literal["eval.run.output_item"] """The type of the object. Always "eval.run.output_item".""" - results: List[Dict[str, builtins.object]] - """A list of results from the evaluation run.""" + results: List[Result] + """A list of grader results for this output item.""" run_id: str """The identifier of the evaluation run associated with this output item.""" diff --git a/src/openai/types/evals/runs/output_item_retrieve_response.py b/src/openai/types/evals/runs/output_item_retrieve_response.py index 63aab5565f..c728629b41 100644 --- a/src/openai/types/evals/runs/output_item_retrieve_response.py +++ b/src/openai/types/evals/runs/output_item_retrieve_response.py @@ -1,13 +1,43 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import builtins -from typing import Dict, List, Optional +from typing import TYPE_CHECKING, Dict, List, Optional from typing_extensions import Literal +from pydantic import Field as FieldInfo + from ...._models import BaseModel from ..eval_api_error import EvalAPIError -__all__ = ["OutputItemRetrieveResponse", "Sample", "SampleInput", "SampleOutput", "SampleUsage"] +__all__ = ["OutputItemRetrieveResponse", "Result", "Sample", "SampleInput", "SampleOutput", "SampleUsage"] + + +class Result(BaseModel): + name: str + """The name of the grader.""" + + passed: bool + """Whether the grader considered the output a pass.""" + + score: float + """The numeric score produced by the grader.""" + + sample: Optional[Dict[str, object]] = None + """Optional sample or intermediate data produced by the grader.""" + + type: Optional[str] = None + """The grader type (for example, "string-check-grader").""" + + if TYPE_CHECKING: + # Some versions of Pydantic <2.8.0 have a bug and don’t allow assigning a + # value to this field, so for compatibility we avoid doing it at runtime. + __pydantic_extra__: Dict[str, object] = FieldInfo(init=False) # pyright: ignore[reportIncompatibleVariableOverride] + + # Stub to indicate that arbitrary properties are accepted. + # To access properties that are not valid identifiers you can use `getattr`, e.g. + # `getattr(obj, '$type')` + def __getattr__(self, attr: str) -> object: ... + else: + __pydantic_extra__: Dict[str, object] class SampleInput(BaseModel): @@ -91,8 +121,8 @@ class OutputItemRetrieveResponse(BaseModel): object: Literal["eval.run.output_item"] """The type of the object. Always "eval.run.output_item".""" - results: List[Dict[str, builtins.object]] - """A list of results from the evaluation run.""" + results: List[Result] + """A list of grader results for this output item.""" run_id: str """The identifier of the evaluation run associated with this output item.""" diff --git a/src/openai/types/graders/score_model_grader.py b/src/openai/types/graders/score_model_grader.py index fc221b8e41..35e2dc1468 100644 --- a/src/openai/types/graders/score_model_grader.py +++ b/src/openai/types/graders/score_model_grader.py @@ -4,10 +4,18 @@ from typing_extensions import Literal, TypeAlias from ..._models import BaseModel +from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text import ResponseInputText from ..responses.response_input_audio import ResponseInputAudio -__all__ = ["ScoreModelGrader", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"] +__all__ = [ + "ScoreModelGrader", + "Input", + "InputContent", + "InputContentOutputText", + "InputContentInputImage", + "SamplingParams", +] class InputContentOutputText(BaseModel): @@ -51,6 +59,32 @@ class Input(BaseModel): """The type of the message input. Always `message`.""" +class SamplingParams(BaseModel): + max_completions_tokens: Optional[int] = None + """The maximum number of tokens the grader model may generate in its response.""" + + reasoning_effort: Optional[ReasoningEffort] = None + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + """ + + seed: Optional[int] = None + """A seed value to initialize the randomness, during sampling.""" + + temperature: Optional[float] = None + """A higher temperature increases randomness in the outputs.""" + + top_p: Optional[float] = None + """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" + + class ScoreModelGrader(BaseModel): input: List[Input] """The input text. This may include template strings.""" @@ -67,5 +101,5 @@ class ScoreModelGrader(BaseModel): range: Optional[List[float]] = None """The range of the score. Defaults to `[0, 1]`.""" - sampling_params: Optional[object] = None + sampling_params: Optional[SamplingParams] = None """The sampling parameters for the model.""" diff --git a/src/openai/types/graders/score_model_grader_param.py b/src/openai/types/graders/score_model_grader_param.py index 15100bb74b..168feeae13 100644 --- a/src/openai/types/graders/score_model_grader_param.py +++ b/src/openai/types/graders/score_model_grader_param.py @@ -2,13 +2,21 @@ from __future__ import annotations -from typing import Union, Iterable +from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text_param import ResponseInputTextParam from ..responses.response_input_audio_param import ResponseInputAudioParam -__all__ = ["ScoreModelGraderParam", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"] +__all__ = [ + "ScoreModelGraderParam", + "Input", + "InputContent", + "InputContentOutputText", + "InputContentInputImage", + "SamplingParams", +] class InputContentOutputText(TypedDict, total=False): @@ -57,6 +65,32 @@ class Input(TypedDict, total=False): """The type of the message input. Always `message`.""" +class SamplingParams(TypedDict, total=False): + max_completions_tokens: Optional[int] + """The maximum number of tokens the grader model may generate in its response.""" + + reasoning_effort: Optional[ReasoningEffort] + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + """ + + seed: Optional[int] + """A seed value to initialize the randomness, during sampling.""" + + temperature: Optional[float] + """A higher temperature increases randomness in the outputs.""" + + top_p: Optional[float] + """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" + + class ScoreModelGraderParam(TypedDict, total=False): input: Required[Iterable[Input]] """The input text. This may include template strings.""" @@ -73,5 +107,5 @@ class ScoreModelGraderParam(TypedDict, total=False): range: Iterable[float] """The range of the score. Defaults to `[0, 1]`.""" - sampling_params: object + sampling_params: SamplingParams """The sampling parameters for the model.""" diff --git a/src/openai/types/image_edit_params.py b/src/openai/types/image_edit_params.py index 065d9789fc..2a8fab0f20 100644 --- a/src/openai/types/image_edit_params.py +++ b/src/openai/types/image_edit_params.py @@ -30,11 +30,11 @@ class ImageEditParamsBase(TypedDict, total=False): """ background: Optional[Literal["transparent", "opaque", "auto"]] - """Allows to set transparency for the background of the generated image(s). - - This parameter is only supported for `gpt-image-1`. Must be one of - `transparent`, `opaque` or `auto` (default value). When `auto` is used, the - model will automatically determine the best background for the image. + """ + Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. @@ -44,7 +44,8 @@ class ImageEditParamsBase(TypedDict, total=False): """ Control how much effort the model will exert to match the style and features, especially facial features, of input images. This parameter is only supported - for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and + `low`. Defaults to `low`. """ mask: FileTypes diff --git a/src/openai/types/image_generate_params.py b/src/openai/types/image_generate_params.py index e9e9292cc2..3270ca1d6e 100644 --- a/src/openai/types/image_generate_params.py +++ b/src/openai/types/image_generate_params.py @@ -19,11 +19,11 @@ class ImageGenerateParamsBase(TypedDict, total=False): """ background: Optional[Literal["transparent", "opaque", "auto"]] - """Allows to set transparency for the background of the generated image(s). - - This parameter is only supported for `gpt-image-1`. Must be one of - `transparent`, `opaque` or `auto` (default value). When `auto` is used, the - model will automatically determine the best background for the image. + """ + Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. diff --git a/src/openai/types/image_model.py b/src/openai/types/image_model.py index 7fed69ed82..22b1281fa9 100644 --- a/src/openai/types/image_model.py +++ b/src/openai/types/image_model.py @@ -4,4 +4,4 @@ __all__ = ["ImageModel"] -ImageModel: TypeAlias = Literal["dall-e-2", "dall-e-3", "gpt-image-1"] +ImageModel: TypeAlias = Literal["dall-e-2", "dall-e-3", "gpt-image-1", "gpt-image-1-mini"] diff --git a/src/openai/types/realtime/__init__.py b/src/openai/types/realtime/__init__.py index 2d947c8a2f..83e81a034a 100644 --- a/src/openai/types/realtime/__init__.py +++ b/src/openai/types/realtime/__init__.py @@ -3,8 +3,12 @@ from __future__ import annotations from .realtime_error import RealtimeError as RealtimeError +from .call_refer_params import CallReferParams as CallReferParams from .conversation_item import ConversationItem as ConversationItem from .realtime_response import RealtimeResponse as RealtimeResponse +from .call_accept_params import CallAcceptParams as CallAcceptParams +from .call_create_params import CallCreateParams as CallCreateParams +from .call_reject_params import CallRejectParams as CallRejectParams from .audio_transcription import AudioTranscription as AudioTranscription from .log_prob_properties import LogProbProperties as LogProbProperties from .realtime_truncation import RealtimeTruncation as RealtimeTruncation diff --git a/src/openai/types/realtime/audio_transcription.py b/src/openai/types/realtime/audio_transcription.py index cf662b3aa2..3e5c8e0cb4 100644 --- a/src/openai/types/realtime/audio_transcription.py +++ b/src/openai/types/realtime/audio_transcription.py @@ -17,13 +17,14 @@ class AudioTranscription(BaseModel): format will improve accuracy and latency. """ - model: Optional[Literal["whisper-1", "gpt-4o-transcribe-latest", "gpt-4o-mini-transcribe", "gpt-4o-transcribe"]] = ( - None - ) + model: Optional[ + Literal["whisper-1", "gpt-4o-mini-transcribe", "gpt-4o-transcribe", "gpt-4o-transcribe-diarize"] + ] = None """The model to use for transcription. - Current options are `whisper-1`, `gpt-4o-transcribe-latest`, - `gpt-4o-mini-transcribe`, and `gpt-4o-transcribe`. + Current options are `whisper-1`, `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`, + and `gpt-4o-transcribe-diarize`. Use `gpt-4o-transcribe-diarize` when you need + diarization with speaker labels. """ prompt: Optional[str] = None @@ -31,6 +32,6 @@ class AudioTranscription(BaseModel): An optional text to guide the model's style or continue a previous audio segment. For `whisper-1`, the [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). - For `gpt-4o-transcribe` models, the prompt is a free text string, for example - "expect words related to technology". + For `gpt-4o-transcribe` models (excluding `gpt-4o-transcribe-diarize`), the + prompt is a free text string, for example "expect words related to technology". """ diff --git a/src/openai/types/realtime/audio_transcription_param.py b/src/openai/types/realtime/audio_transcription_param.py index fb09f105b8..3b65e42c8f 100644 --- a/src/openai/types/realtime/audio_transcription_param.py +++ b/src/openai/types/realtime/audio_transcription_param.py @@ -16,11 +16,12 @@ class AudioTranscriptionParam(TypedDict, total=False): format will improve accuracy and latency. """ - model: Literal["whisper-1", "gpt-4o-transcribe-latest", "gpt-4o-mini-transcribe", "gpt-4o-transcribe"] + model: Literal["whisper-1", "gpt-4o-mini-transcribe", "gpt-4o-transcribe", "gpt-4o-transcribe-diarize"] """The model to use for transcription. - Current options are `whisper-1`, `gpt-4o-transcribe-latest`, - `gpt-4o-mini-transcribe`, and `gpt-4o-transcribe`. + Current options are `whisper-1`, `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`, + and `gpt-4o-transcribe-diarize`. Use `gpt-4o-transcribe-diarize` when you need + diarization with speaker labels. """ prompt: str @@ -28,6 +29,6 @@ class AudioTranscriptionParam(TypedDict, total=False): An optional text to guide the model's style or continue a previous audio segment. For `whisper-1`, the [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). - For `gpt-4o-transcribe` models, the prompt is a free text string, for example - "expect words related to technology". + For `gpt-4o-transcribe` models (excluding `gpt-4o-transcribe-diarize`), the + prompt is a free text string, for example "expect words related to technology". """ diff --git a/src/openai/types/realtime/call_accept_params.py b/src/openai/types/realtime/call_accept_params.py new file mode 100644 index 0000000000..0cfb01e7cf --- /dev/null +++ b/src/openai/types/realtime/call_accept_params.py @@ -0,0 +1,111 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from typing_extensions import Literal, Required, TypedDict + +from .realtime_truncation_param import RealtimeTruncationParam +from .realtime_audio_config_param import RealtimeAudioConfigParam +from .realtime_tools_config_param import RealtimeToolsConfigParam +from .realtime_tracing_config_param import RealtimeTracingConfigParam +from ..responses.response_prompt_param import ResponsePromptParam +from .realtime_tool_choice_config_param import RealtimeToolChoiceConfigParam + +__all__ = ["CallAcceptParams"] + + +class CallAcceptParams(TypedDict, total=False): + type: Required[Literal["realtime"]] + """The type of session to create. Always `realtime` for the Realtime API.""" + + audio: RealtimeAudioConfigParam + """Configuration for input and output audio.""" + + include: List[Literal["item.input_audio_transcription.logprobs"]] + """Additional fields to include in server outputs. + + `item.input_audio_transcription.logprobs`: Include logprobs for input audio + transcription. + """ + + instructions: str + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_output_tokens: Union[int, Literal["inf"]] + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + model: Union[ + str, + Literal[ + "gpt-realtime", + "gpt-realtime-2025-08-28", + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-realtime-preview-2025-06-03", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + "gpt-realtime-mini", + "gpt-realtime-mini-2025-10-06", + "gpt-audio-mini", + "gpt-audio-mini-2025-10-06", + ], + ] + """The Realtime model used for this session.""" + + output_modalities: List[Literal["text", "audio"]] + """The set of modalities the model can respond with. + + It defaults to `["audio"]`, indicating that the model will respond with audio + plus a transcript. `["text"]` can be used to make the model respond with text + only. It is not possible to request both `text` and `audio` at the same time. + """ + + prompt: Optional[ResponsePromptParam] + """ + Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + """ + + tool_choice: RealtimeToolChoiceConfigParam + """How the model chooses tools. + + Provide one of the string modes or force a specific function/MCP tool. + """ + + tools: RealtimeToolsConfigParam + """Tools available to the model.""" + + tracing: Optional[RealtimeTracingConfigParam] + """ + Realtime API can write session traces to the + [Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once + tracing is enabled for a session, the configuration cannot be modified. + + `auto` will create a trace for the session with default values for the workflow + name, group id, and metadata. + """ + + truncation: RealtimeTruncationParam + """ + Controls how the realtime conversation is truncated prior to model inference. + The default is `auto`. + """ diff --git a/src/openai/types/realtime/call_create_params.py b/src/openai/types/realtime/call_create_params.py new file mode 100644 index 0000000000..a378092a66 --- /dev/null +++ b/src/openai/types/realtime/call_create_params.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +from .realtime_session_create_request_param import RealtimeSessionCreateRequestParam + +__all__ = ["CallCreateParams"] + + +class CallCreateParams(TypedDict, total=False): + sdp: Required[str] + """WebRTC Session Description Protocol (SDP) offer generated by the caller.""" + + session: RealtimeSessionCreateRequestParam + """Realtime session object configuration.""" diff --git a/src/openai/types/realtime/call_refer_params.py b/src/openai/types/realtime/call_refer_params.py new file mode 100644 index 0000000000..3d8623855b --- /dev/null +++ b/src/openai/types/realtime/call_refer_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["CallReferParams"] + + +class CallReferParams(TypedDict, total=False): + target_uri: Required[str] + """URI that should appear in the SIP Refer-To header. + + Supports values like `tel:+14155550123` or `sip:agent@example.com`. + """ diff --git a/src/openai/types/realtime/call_reject_params.py b/src/openai/types/realtime/call_reject_params.py new file mode 100644 index 0000000000..f12222cded --- /dev/null +++ b/src/openai/types/realtime/call_reject_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["CallRejectParams"] + + +class CallRejectParams(TypedDict, total=False): + status_code: int + """SIP response code to send back to the caller. + + Defaults to `603` (Decline) when omitted. + """ diff --git a/src/openai/types/realtime/realtime_connect_params.py b/src/openai/types/realtime/realtime_connect_params.py index 76474f3de4..950f36212f 100644 --- a/src/openai/types/realtime/realtime_connect_params.py +++ b/src/openai/types/realtime/realtime_connect_params.py @@ -2,10 +2,12 @@ from __future__ import annotations -from typing_extensions import Required, TypedDict +from typing_extensions import TypedDict __all__ = ["RealtimeConnectParams"] class RealtimeConnectParams(TypedDict, total=False): - model: Required[str] + call_id: str + + model: str diff --git a/src/openai/types/realtime/realtime_mcp_tool_call.py b/src/openai/types/realtime/realtime_mcp_tool_call.py index 533175e55b..019aee25c0 100644 --- a/src/openai/types/realtime/realtime_mcp_tool_call.py +++ b/src/openai/types/realtime/realtime_mcp_tool_call.py @@ -30,8 +30,8 @@ class RealtimeMcpToolCall(BaseModel): server_label: str """The label of the MCP server running the tool.""" - type: Literal["mcp_tool_call"] - """The type of the item. Always `mcp_tool_call`.""" + type: Literal["mcp_call"] + """The type of the item. Always `mcp_call`.""" approval_request_id: Optional[str] = None """The ID of an associated approval request, if any.""" diff --git a/src/openai/types/realtime/realtime_mcp_tool_call_param.py b/src/openai/types/realtime/realtime_mcp_tool_call_param.py index afdc9d1d17..0ba16d3dc1 100644 --- a/src/openai/types/realtime/realtime_mcp_tool_call_param.py +++ b/src/openai/types/realtime/realtime_mcp_tool_call_param.py @@ -27,8 +27,8 @@ class RealtimeMcpToolCallParam(TypedDict, total=False): server_label: Required[str] """The label of the MCP server running the tool.""" - type: Required[Literal["mcp_tool_call"]] - """The type of the item. Always `mcp_tool_call`.""" + type: Required[Literal["mcp_call"]] + """The type of the item. Always `mcp_call`.""" approval_request_id: Optional[str] """The ID of an associated approval request, if any.""" diff --git a/src/openai/types/realtime/realtime_response_create_params.py b/src/openai/types/realtime/realtime_response_create_params.py index 4dfd1fd386..e8486220bf 100644 --- a/src/openai/types/realtime/realtime_response_create_params.py +++ b/src/openai/types/realtime/realtime_response_create_params.py @@ -83,8 +83,8 @@ class RealtimeResponseCreateParams(BaseModel): """ prompt: Optional[ResponsePrompt] = None - """Reference to a prompt template and its variables. - + """ + Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). """ diff --git a/src/openai/types/realtime/realtime_response_create_params_param.py b/src/openai/types/realtime/realtime_response_create_params_param.py index eceffcccb7..116384bd82 100644 --- a/src/openai/types/realtime/realtime_response_create_params_param.py +++ b/src/openai/types/realtime/realtime_response_create_params_param.py @@ -84,8 +84,8 @@ class RealtimeResponseCreateParamsParam(TypedDict, total=False): """ prompt: Optional[ResponsePromptParam] - """Reference to a prompt template and its variables. - + """ + Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). """ diff --git a/src/openai/types/realtime/realtime_session_create_request.py b/src/openai/types/realtime/realtime_session_create_request.py index 578bc43821..bc205bd3b5 100644 --- a/src/openai/types/realtime/realtime_session_create_request.py +++ b/src/openai/types/realtime/realtime_session_create_request.py @@ -62,6 +62,10 @@ class RealtimeSessionCreateRequest(BaseModel): "gpt-4o-realtime-preview-2025-06-03", "gpt-4o-mini-realtime-preview", "gpt-4o-mini-realtime-preview-2024-12-17", + "gpt-realtime-mini", + "gpt-realtime-mini-2025-10-06", + "gpt-audio-mini", + "gpt-audio-mini-2025-10-06", ], None, ] = None @@ -76,8 +80,8 @@ class RealtimeSessionCreateRequest(BaseModel): """ prompt: Optional[ResponsePrompt] = None - """Reference to a prompt template and its variables. - + """ + Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). """ diff --git a/src/openai/types/realtime/realtime_session_create_request_param.py b/src/openai/types/realtime/realtime_session_create_request_param.py index 5f7819fa61..d1fa2b35d2 100644 --- a/src/openai/types/realtime/realtime_session_create_request_param.py +++ b/src/openai/types/realtime/realtime_session_create_request_param.py @@ -63,6 +63,10 @@ class RealtimeSessionCreateRequestParam(TypedDict, total=False): "gpt-4o-realtime-preview-2025-06-03", "gpt-4o-mini-realtime-preview", "gpt-4o-mini-realtime-preview-2024-12-17", + "gpt-realtime-mini", + "gpt-realtime-mini-2025-10-06", + "gpt-audio-mini", + "gpt-audio-mini-2025-10-06", ], ] """The Realtime model used for this session.""" @@ -76,8 +80,8 @@ class RealtimeSessionCreateRequestParam(TypedDict, total=False): """ prompt: Optional[ResponsePromptParam] - """Reference to a prompt template and its variables. - + """ + Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). """ diff --git a/src/openai/types/realtime/realtime_session_create_response.py b/src/openai/types/realtime/realtime_session_create_response.py index 8d7bfd6d8e..bb6b94e900 100644 --- a/src/openai/types/realtime/realtime_session_create_response.py +++ b/src/openai/types/realtime/realtime_session_create_response.py @@ -415,6 +415,10 @@ class RealtimeSessionCreateResponse(BaseModel): "gpt-4o-realtime-preview-2025-06-03", "gpt-4o-mini-realtime-preview", "gpt-4o-mini-realtime-preview-2024-12-17", + "gpt-realtime-mini", + "gpt-realtime-mini-2025-10-06", + "gpt-audio-mini", + "gpt-audio-mini-2025-10-06", ], None, ] = None @@ -429,8 +433,8 @@ class RealtimeSessionCreateResponse(BaseModel): """ prompt: Optional[ResponsePrompt] = None - """Reference to a prompt template and its variables. - + """ + Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). """ diff --git a/src/openai/types/responses/__init__.py b/src/openai/types/responses/__init__.py index d59f0a74b8..fd70836e50 100644 --- a/src/openai/types/responses/__init__.py +++ b/src/openai/types/responses/__init__.py @@ -62,6 +62,7 @@ from .tool_choice_types_param import ToolChoiceTypesParam as ToolChoiceTypesParam from .web_search_preview_tool import WebSearchPreviewTool as WebSearchPreviewTool from .easy_input_message_param import EasyInputMessageParam as EasyInputMessageParam +from .input_token_count_params import InputTokenCountParams as InputTokenCountParams from .response_completed_event import ResponseCompletedEvent as ResponseCompletedEvent from .response_retrieve_params import ResponseRetrieveParams as ResponseRetrieveParams from .response_text_done_event import ResponseTextDoneEvent as ResponseTextDoneEvent @@ -74,6 +75,7 @@ from .response_input_text_param import ResponseInputTextParam as ResponseInputTextParam from .response_text_delta_event import ResponseTextDeltaEvent as ResponseTextDeltaEvent from .tool_choice_allowed_param import ToolChoiceAllowedParam as ToolChoiceAllowedParam +from .input_token_count_response import InputTokenCountResponse as InputTokenCountResponse from .response_audio_delta_event import ResponseAudioDeltaEvent as ResponseAudioDeltaEvent from .response_in_progress_event import ResponseInProgressEvent as ResponseInProgressEvent from .response_input_audio_param import ResponseInputAudioParam as ResponseInputAudioParam @@ -85,10 +87,13 @@ from .response_conversation_param import ResponseConversationParam as ResponseConversationParam from .response_format_text_config import ResponseFormatTextConfig as ResponseFormatTextConfig from .response_function_tool_call import ResponseFunctionToolCall as ResponseFunctionToolCall +from .response_input_file_content import ResponseInputFileContent as ResponseInputFileContent from .response_input_message_item import ResponseInputMessageItem as ResponseInputMessageItem +from .response_input_text_content import ResponseInputTextContent as ResponseInputTextContent from .response_refusal_done_event import ResponseRefusalDoneEvent as ResponseRefusalDoneEvent from .response_function_web_search import ResponseFunctionWebSearch as ResponseFunctionWebSearch from .response_input_content_param import ResponseInputContentParam as ResponseInputContentParam +from .response_input_image_content import ResponseInputImageContent as ResponseInputImageContent from .response_refusal_delta_event import ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent from .response_output_message_param import ResponseOutputMessageParam as ResponseOutputMessageParam from .response_output_refusal_param import ResponseOutputRefusalParam as ResponseOutputRefusalParam @@ -106,8 +111,12 @@ from .response_content_part_added_event import ResponseContentPartAddedEvent as ResponseContentPartAddedEvent from .response_format_text_config_param import ResponseFormatTextConfigParam as ResponseFormatTextConfigParam from .response_function_tool_call_param import ResponseFunctionToolCallParam as ResponseFunctionToolCallParam +from .response_input_file_content_param import ResponseInputFileContentParam as ResponseInputFileContentParam +from .response_input_text_content_param import ResponseInputTextContentParam as ResponseInputTextContentParam from .response_mcp_call_completed_event import ResponseMcpCallCompletedEvent as ResponseMcpCallCompletedEvent +from .response_function_call_output_item import ResponseFunctionCallOutputItem as ResponseFunctionCallOutputItem from .response_function_web_search_param import ResponseFunctionWebSearchParam as ResponseFunctionWebSearchParam +from .response_input_image_content_param import ResponseInputImageContentParam as ResponseInputImageContentParam from .response_reasoning_text_done_event import ResponseReasoningTextDoneEvent as ResponseReasoningTextDoneEvent from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall as ResponseCodeInterpreterToolCall from .response_input_message_content_list import ResponseInputMessageContentList as ResponseInputMessageContentList @@ -131,6 +140,9 @@ from .response_format_text_json_schema_config import ( ResponseFormatTextJSONSchemaConfig as ResponseFormatTextJSONSchemaConfig, ) +from .response_function_call_output_item_list import ( + ResponseFunctionCallOutputItemList as ResponseFunctionCallOutputItemList, +) from .response_function_tool_call_output_item import ( ResponseFunctionToolCallOutputItem as ResponseFunctionToolCallOutputItem, ) @@ -143,6 +155,9 @@ from .response_mcp_list_tools_completed_event import ( ResponseMcpListToolsCompletedEvent as ResponseMcpListToolsCompletedEvent, ) +from .response_function_call_output_item_param import ( + ResponseFunctionCallOutputItemParam as ResponseFunctionCallOutputItemParam, +) from .response_image_gen_call_generating_event import ( ResponseImageGenCallGeneratingEvent as ResponseImageGenCallGeneratingEvent, ) @@ -212,6 +227,9 @@ from .response_format_text_json_schema_config_param import ( ResponseFormatTextJSONSchemaConfigParam as ResponseFormatTextJSONSchemaConfigParam, ) +from .response_function_call_output_item_list_param import ( + ResponseFunctionCallOutputItemListParam as ResponseFunctionCallOutputItemListParam, +) from .response_code_interpreter_call_code_done_event import ( ResponseCodeInterpreterCallCodeDoneEvent as ResponseCodeInterpreterCallCodeDoneEvent, ) diff --git a/src/openai/types/responses/input_token_count_params.py b/src/openai/types/responses/input_token_count_params.py new file mode 100644 index 0000000000..d442a2d171 --- /dev/null +++ b/src/openai/types/responses/input_token_count_params.py @@ -0,0 +1,138 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable, Optional +from typing_extensions import Literal, TypeAlias, TypedDict + +from .tool_param import ToolParam +from .tool_choice_options import ToolChoiceOptions +from .tool_choice_mcp_param import ToolChoiceMcpParam +from .tool_choice_types_param import ToolChoiceTypesParam +from ..shared_params.reasoning import Reasoning +from .tool_choice_custom_param import ToolChoiceCustomParam +from .response_input_item_param import ResponseInputItemParam +from .tool_choice_allowed_param import ToolChoiceAllowedParam +from .tool_choice_function_param import ToolChoiceFunctionParam +from .response_conversation_param import ResponseConversationParam +from .response_format_text_config_param import ResponseFormatTextConfigParam + +__all__ = ["InputTokenCountParams", "Conversation", "Text", "ToolChoice"] + + +class InputTokenCountParams(TypedDict, total=False): + conversation: Optional[Conversation] + """The conversation that this response belongs to. + + Items from this conversation are prepended to `input_items` for this response + request. Input items and output items from this response are automatically added + to this conversation after this response completes. + """ + + input: Union[str, Iterable[ResponseInputItemParam], None] + """Text, image, or file inputs to the model, used to generate a response""" + + instructions: Optional[str] + """ + A system (or developer) message inserted into the model's context. When used + along with `previous_response_id`, the instructions from a previous response + will not be carried over to the next response. This makes it simple to swap out + system (or developer) messages in new responses. + """ + + model: Optional[str] + """Model ID used to generate the response, like `gpt-4o` or `o3`. + + OpenAI offers a wide range of models with different capabilities, performance + characteristics, and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + """ + + parallel_tool_calls: Optional[bool] + """Whether to allow the model to run tool calls in parallel.""" + + previous_response_id: Optional[str] + """The unique ID of the previous response to the model. + + Use this to create multi-turn conversations. Learn more about + [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. + """ + + reasoning: Optional[Reasoning] + """ + **gpt-5 and o-series models only** Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + """ + + text: Optional[Text] + """Configuration options for a text response from the model. + + Can be plain text or structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + + tool_choice: Optional[ToolChoice] + """ + How the model should select which tool (or tools) to use when generating a + response. See the `tools` parameter to see how to specify which tools the model + can call. + """ + + tools: Optional[Iterable[ToolParam]] + """An array of tools the model may call while generating a response. + + You can specify which tool to use by setting the `tool_choice` parameter. + """ + + truncation: Literal["auto", "disabled"] + """The truncation strategy to use for the model response. + + - `auto`: If the input to this Response exceeds the model's context window size, + the model will truncate the response to fit the context window by dropping + items from the beginning of the conversation. - `disabled` (default): If the + input size will exceed the context window size for a model, the request will + fail with a 400 error. + """ + + +Conversation: TypeAlias = Union[str, ResponseConversationParam] + + +class Text(TypedDict, total=False): + format: ResponseFormatTextConfigParam + """An object specifying the format that the model must output. + + Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + ensures the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + The default format is `{ "type": "text" }` with no additional options. + + **Not recommended for gpt-4o and newer models:** + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ + + verbosity: Optional[Literal["low", "medium", "high"]] + """Constrains the verbosity of the model's response. + + Lower values will result in more concise responses, while higher values will + result in more verbose responses. Currently supported values are `low`, + `medium`, and `high`. + """ + + +ToolChoice: TypeAlias = Union[ + ToolChoiceOptions, + ToolChoiceAllowedParam, + ToolChoiceTypesParam, + ToolChoiceFunctionParam, + ToolChoiceMcpParam, + ToolChoiceCustomParam, +] diff --git a/src/openai/types/responses/input_token_count_response.py b/src/openai/types/responses/input_token_count_response.py new file mode 100644 index 0000000000..30ddfc1217 --- /dev/null +++ b/src/openai/types/responses/input_token_count_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["InputTokenCountResponse"] + + +class InputTokenCountResponse(BaseModel): + input_tokens: int + + object: Literal["response.input_tokens"] diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index 423b6f20f1..a1133a41f5 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -180,8 +180,8 @@ class Response(BaseModel): """ prompt: Optional[ResponsePrompt] = None - """Reference to a prompt template and its variables. - + """ + Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). """ diff --git a/src/openai/types/responses/response_code_interpreter_tool_call.py b/src/openai/types/responses/response_code_interpreter_tool_call.py index 257937118b..b651581520 100644 --- a/src/openai/types/responses/response_code_interpreter_tool_call.py +++ b/src/openai/types/responses/response_code_interpreter_tool_call.py @@ -14,12 +14,12 @@ class OutputLogs(BaseModel): """The logs output from the code interpreter.""" type: Literal["logs"] - """The type of the output. Always 'logs'.""" + """The type of the output. Always `logs`.""" class OutputImage(BaseModel): type: Literal["image"] - """The type of the output. Always 'image'.""" + """The type of the output. Always `image`.""" url: str """The URL of the image output from the code interpreter.""" @@ -39,9 +39,9 @@ class ResponseCodeInterpreterToolCall(BaseModel): """The ID of the container used to run the code.""" outputs: Optional[List[Output]] = None - """The outputs generated by the code interpreter, such as logs or images. - - Can be null if no outputs are available. + """ + The outputs generated by the code interpreter, such as logs or images. Can be + null if no outputs are available. """ status: Literal["in_progress", "completed", "incomplete", "interpreting", "failed"] diff --git a/src/openai/types/responses/response_code_interpreter_tool_call_param.py b/src/openai/types/responses/response_code_interpreter_tool_call_param.py index 435091001f..d402b872a4 100644 --- a/src/openai/types/responses/response_code_interpreter_tool_call_param.py +++ b/src/openai/types/responses/response_code_interpreter_tool_call_param.py @@ -13,12 +13,12 @@ class OutputLogs(TypedDict, total=False): """The logs output from the code interpreter.""" type: Required[Literal["logs"]] - """The type of the output. Always 'logs'.""" + """The type of the output. Always `logs`.""" class OutputImage(TypedDict, total=False): type: Required[Literal["image"]] - """The type of the output. Always 'image'.""" + """The type of the output. Always `image`.""" url: Required[str] """The URL of the image output from the code interpreter.""" @@ -38,9 +38,9 @@ class ResponseCodeInterpreterToolCallParam(TypedDict, total=False): """The ID of the container used to run the code.""" outputs: Required[Optional[Iterable[Output]]] - """The outputs generated by the code interpreter, such as logs or images. - - Can be null if no outputs are available. + """ + The outputs generated by the code interpreter, such as logs or images. Can be + null if no outputs are available. """ status: Required[Literal["in_progress", "completed", "incomplete", "interpreting", "failed"]] diff --git a/src/openai/types/responses/response_computer_tool_call.py b/src/openai/types/responses/response_computer_tool_call.py index 994837567a..f1476fa0fb 100644 --- a/src/openai/types/responses/response_computer_tool_call.py +++ b/src/openai/types/responses/response_computer_tool_call.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Union +from typing import List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias from ..._utils import PropertyInfo @@ -31,10 +31,7 @@ class ActionClick(BaseModel): """ type: Literal["click"] - """Specifies the event type. - - For a click action, this property is always set to `click`. - """ + """Specifies the event type. For a click action, this property is always `click`.""" x: int """The x-coordinate where the click occurred.""" @@ -181,10 +178,10 @@ class PendingSafetyCheck(BaseModel): id: str """The ID of the pending safety check.""" - code: str + code: Optional[str] = None """The type of the pending safety check.""" - message: str + message: Optional[str] = None """Details about the pending safety check.""" diff --git a/src/openai/types/responses/response_computer_tool_call_output_item.py b/src/openai/types/responses/response_computer_tool_call_output_item.py index a2dd68f579..e1ac358cc6 100644 --- a/src/openai/types/responses/response_computer_tool_call_output_item.py +++ b/src/openai/types/responses/response_computer_tool_call_output_item.py @@ -13,10 +13,10 @@ class AcknowledgedSafetyCheck(BaseModel): id: str """The ID of the pending safety check.""" - code: str + code: Optional[str] = None """The type of the pending safety check.""" - message: str + message: Optional[str] = None """Details about the pending safety check.""" diff --git a/src/openai/types/responses/response_computer_tool_call_param.py b/src/openai/types/responses/response_computer_tool_call_param.py index 0be63db2fe..228f76bac9 100644 --- a/src/openai/types/responses/response_computer_tool_call_param.py +++ b/src/openai/types/responses/response_computer_tool_call_param.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Union, Iterable +from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..._types import SequenceNotStr @@ -32,10 +32,7 @@ class ActionClick(TypedDict, total=False): """ type: Required[Literal["click"]] - """Specifies the event type. - - For a click action, this property is always set to `click`. - """ + """Specifies the event type. For a click action, this property is always `click`.""" x: Required[int] """The x-coordinate where the click occurred.""" @@ -179,10 +176,10 @@ class PendingSafetyCheck(TypedDict, total=False): id: Required[str] """The ID of the pending safety check.""" - code: Required[str] + code: Optional[str] """The type of the pending safety check.""" - message: Required[str] + message: Optional[str] """Details about the pending safety check.""" diff --git a/src/openai/types/responses/response_content_part_added_event.py b/src/openai/types/responses/response_content_part_added_event.py index 11e0ac7c92..c78e80d1c4 100644 --- a/src/openai/types/responses/response_content_part_added_event.py +++ b/src/openai/types/responses/response_content_part_added_event.py @@ -8,9 +8,20 @@ from .response_output_text import ResponseOutputText from .response_output_refusal import ResponseOutputRefusal -__all__ = ["ResponseContentPartAddedEvent", "Part"] +__all__ = ["ResponseContentPartAddedEvent", "Part", "PartReasoningText"] -Part: TypeAlias = Annotated[Union[ResponseOutputText, ResponseOutputRefusal], PropertyInfo(discriminator="type")] + +class PartReasoningText(BaseModel): + text: str + """The reasoning text from the model.""" + + type: Literal["reasoning_text"] + """The type of the reasoning text. Always `reasoning_text`.""" + + +Part: TypeAlias = Annotated[ + Union[ResponseOutputText, ResponseOutputRefusal, PartReasoningText], PropertyInfo(discriminator="type") +] class ResponseContentPartAddedEvent(BaseModel): diff --git a/src/openai/types/responses/response_content_part_done_event.py b/src/openai/types/responses/response_content_part_done_event.py index e1b411bb45..732f2303ef 100644 --- a/src/openai/types/responses/response_content_part_done_event.py +++ b/src/openai/types/responses/response_content_part_done_event.py @@ -8,9 +8,20 @@ from .response_output_text import ResponseOutputText from .response_output_refusal import ResponseOutputRefusal -__all__ = ["ResponseContentPartDoneEvent", "Part"] +__all__ = ["ResponseContentPartDoneEvent", "Part", "PartReasoningText"] -Part: TypeAlias = Annotated[Union[ResponseOutputText, ResponseOutputRefusal], PropertyInfo(discriminator="type")] + +class PartReasoningText(BaseModel): + text: str + """The reasoning text from the model.""" + + type: Literal["reasoning_text"] + """The type of the reasoning text. Always `reasoning_text`.""" + + +Part: TypeAlias = Annotated[ + Union[ResponseOutputText, ResponseOutputRefusal, PartReasoningText], PropertyInfo(discriminator="type") +] class ResponseContentPartDoneEvent(BaseModel): diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index af0d5e7483..ba5c45ffee 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -134,8 +134,8 @@ class ResponseCreateParamsBase(TypedDict, total=False): """ prompt: Optional[ResponsePromptParam] - """Reference to a prompt template and its variables. - + """ + Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). """ diff --git a/src/openai/types/responses/response_custom_tool_call_output.py b/src/openai/types/responses/response_custom_tool_call_output.py index a2b4cc3000..9db9e7e5cf 100644 --- a/src/openai/types/responses/response_custom_tool_call_output.py +++ b/src/openai/types/responses/response_custom_tool_call_output.py @@ -1,19 +1,30 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional -from typing_extensions import Literal +from typing import List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias +from ..._utils import PropertyInfo from ..._models import BaseModel +from .response_input_file import ResponseInputFile +from .response_input_text import ResponseInputText +from .response_input_image import ResponseInputImage -__all__ = ["ResponseCustomToolCallOutput"] +__all__ = ["ResponseCustomToolCallOutput", "OutputOutputContentList"] + +OutputOutputContentList: TypeAlias = Annotated[ + Union[ResponseInputText, ResponseInputImage, ResponseInputFile], PropertyInfo(discriminator="type") +] class ResponseCustomToolCallOutput(BaseModel): call_id: str """The call ID, used to map this custom tool call output to a custom tool call.""" - output: str - """The output from the custom tool call generated by your code.""" + output: Union[str, List[OutputOutputContentList]] + """ + The output from the custom tool call generated by your code. Can be a string or + an list of output content. + """ type: Literal["custom_tool_call_output"] """The type of the custom tool call output. Always `custom_tool_call_output`.""" diff --git a/src/openai/types/responses/response_custom_tool_call_output_param.py b/src/openai/types/responses/response_custom_tool_call_output_param.py index d52c525467..e967a37cff 100644 --- a/src/openai/types/responses/response_custom_tool_call_output_param.py +++ b/src/openai/types/responses/response_custom_tool_call_output_param.py @@ -2,17 +2,27 @@ from __future__ import annotations -from typing_extensions import Literal, Required, TypedDict +from typing import Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict -__all__ = ["ResponseCustomToolCallOutputParam"] +from .response_input_file_param import ResponseInputFileParam +from .response_input_text_param import ResponseInputTextParam +from .response_input_image_param import ResponseInputImageParam + +__all__ = ["ResponseCustomToolCallOutputParam", "OutputOutputContentList"] + +OutputOutputContentList: TypeAlias = Union[ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam] class ResponseCustomToolCallOutputParam(TypedDict, total=False): call_id: Required[str] """The call ID, used to map this custom tool call output to a custom tool call.""" - output: Required[str] - """The output from the custom tool call generated by your code.""" + output: Required[Union[str, Iterable[OutputOutputContentList]]] + """ + The output from the custom tool call generated by your code. Can be a string or + an list of output content. + """ type: Required[Literal["custom_tool_call_output"]] """The type of the custom tool call output. Always `custom_tool_call_output`.""" diff --git a/src/openai/types/responses/response_function_call_arguments_done_event.py b/src/openai/types/responses/response_function_call_arguments_done_event.py index 875e7a6875..4ee5ed7fe1 100644 --- a/src/openai/types/responses/response_function_call_arguments_done_event.py +++ b/src/openai/types/responses/response_function_call_arguments_done_event.py @@ -14,6 +14,9 @@ class ResponseFunctionCallArgumentsDoneEvent(BaseModel): item_id: str """The ID of the item.""" + name: str + """The name of the function that was called.""" + output_index: int """The index of the output item.""" diff --git a/src/openai/types/responses/response_function_call_output_item.py b/src/openai/types/responses/response_function_call_output_item.py new file mode 100644 index 0000000000..41898f9eda --- /dev/null +++ b/src/openai/types/responses/response_function_call_output_item.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ..._utils import PropertyInfo +from .response_input_file_content import ResponseInputFileContent +from .response_input_text_content import ResponseInputTextContent +from .response_input_image_content import ResponseInputImageContent + +__all__ = ["ResponseFunctionCallOutputItem"] + +ResponseFunctionCallOutputItem: TypeAlias = Annotated[ + Union[ResponseInputTextContent, ResponseInputImageContent, ResponseInputFileContent], + PropertyInfo(discriminator="type"), +] diff --git a/src/openai/types/responses/response_function_call_output_item_list.py b/src/openai/types/responses/response_function_call_output_item_list.py new file mode 100644 index 0000000000..13db577160 --- /dev/null +++ b/src/openai/types/responses/response_function_call_output_item_list.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import TypeAlias + +from .response_function_call_output_item import ResponseFunctionCallOutputItem + +__all__ = ["ResponseFunctionCallOutputItemList"] + +ResponseFunctionCallOutputItemList: TypeAlias = List[ResponseFunctionCallOutputItem] diff --git a/src/openai/types/responses/response_function_call_output_item_list_param.py b/src/openai/types/responses/response_function_call_output_item_list_param.py new file mode 100644 index 0000000000..8c286d3cf0 --- /dev/null +++ b/src/openai/types/responses/response_function_call_output_item_list_param.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union +from typing_extensions import TypeAlias + +from .response_input_file_content_param import ResponseInputFileContentParam +from .response_input_text_content_param import ResponseInputTextContentParam +from .response_input_image_content_param import ResponseInputImageContentParam + +__all__ = ["ResponseFunctionCallOutputItemListParam", "ResponseFunctionCallOutputItemParam"] + +ResponseFunctionCallOutputItemParam: TypeAlias = Union[ + ResponseInputTextContentParam, ResponseInputImageContentParam, ResponseInputFileContentParam +] + +ResponseFunctionCallOutputItemListParam: TypeAlias = List[ResponseFunctionCallOutputItemParam] diff --git a/src/openai/types/responses/response_function_call_output_item_param.py b/src/openai/types/responses/response_function_call_output_item_param.py new file mode 100644 index 0000000000..2a703cac1e --- /dev/null +++ b/src/openai/types/responses/response_function_call_output_item_param.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from .response_input_file_content_param import ResponseInputFileContentParam +from .response_input_text_content_param import ResponseInputTextContentParam +from .response_input_image_content_param import ResponseInputImageContentParam + +__all__ = ["ResponseFunctionCallOutputItemParam"] + +ResponseFunctionCallOutputItemParam: TypeAlias = Union[ + ResponseInputTextContentParam, ResponseInputImageContentParam, ResponseInputFileContentParam +] diff --git a/src/openai/types/responses/response_function_tool_call_output_item.py b/src/openai/types/responses/response_function_tool_call_output_item.py index 4c8c41a6fe..1a2c848cb3 100644 --- a/src/openai/types/responses/response_function_tool_call_output_item.py +++ b/src/openai/types/responses/response_function_tool_call_output_item.py @@ -1,11 +1,19 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional -from typing_extensions import Literal +from typing import List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias +from ..._utils import PropertyInfo from ..._models import BaseModel +from .response_input_file import ResponseInputFile +from .response_input_text import ResponseInputText +from .response_input_image import ResponseInputImage -__all__ = ["ResponseFunctionToolCallOutputItem"] +__all__ = ["ResponseFunctionToolCallOutputItem", "OutputOutputContentList"] + +OutputOutputContentList: TypeAlias = Annotated[ + Union[ResponseInputText, ResponseInputImage, ResponseInputFile], PropertyInfo(discriminator="type") +] class ResponseFunctionToolCallOutputItem(BaseModel): @@ -15,8 +23,11 @@ class ResponseFunctionToolCallOutputItem(BaseModel): call_id: str """The unique ID of the function tool call generated by the model.""" - output: str - """A JSON string of the output of the function tool call.""" + output: Union[str, List[OutputOutputContentList]] + """ + The output from the function call generated by your code. Can be a string or an + list of output content. + """ type: Literal["function_call_output"] """The type of the function tool call output. Always `function_call_output`.""" diff --git a/src/openai/types/responses/response_includable.py b/src/openai/types/responses/response_includable.py index c17a02560f..675c83405a 100644 --- a/src/openai/types/responses/response_includable.py +++ b/src/openai/types/responses/response_includable.py @@ -5,10 +5,12 @@ __all__ = ["ResponseIncludable"] ResponseIncludable: TypeAlias = Literal[ - "code_interpreter_call.outputs", - "computer_call_output.output.image_url", "file_search_call.results", + "web_search_call.results", + "web_search_call.action.sources", "message.input_image.image_url", - "message.output_text.logprobs", + "computer_call_output.output.image_url", + "code_interpreter_call.outputs", "reasoning.encrypted_content", + "message.output_text.logprobs", ] diff --git a/src/openai/types/responses/response_input_file_content.py b/src/openai/types/responses/response_input_file_content.py new file mode 100644 index 0000000000..d832bb0e26 --- /dev/null +++ b/src/openai/types/responses/response_input_file_content.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseInputFileContent"] + + +class ResponseInputFileContent(BaseModel): + type: Literal["input_file"] + """The type of the input item. Always `input_file`.""" + + file_data: Optional[str] = None + """The base64-encoded data of the file to be sent to the model.""" + + file_id: Optional[str] = None + """The ID of the file to be sent to the model.""" + + file_url: Optional[str] = None + """The URL of the file to be sent to the model.""" + + filename: Optional[str] = None + """The name of the file to be sent to the model.""" diff --git a/src/openai/types/responses/response_input_file_content_param.py b/src/openai/types/responses/response_input_file_content_param.py new file mode 100644 index 0000000000..71f7b3a281 --- /dev/null +++ b/src/openai/types/responses/response_input_file_content_param.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseInputFileContentParam"] + + +class ResponseInputFileContentParam(TypedDict, total=False): + type: Required[Literal["input_file"]] + """The type of the input item. Always `input_file`.""" + + file_data: Optional[str] + """The base64-encoded data of the file to be sent to the model.""" + + file_id: Optional[str] + """The ID of the file to be sent to the model.""" + + file_url: Optional[str] + """The URL of the file to be sent to the model.""" + + filename: Optional[str] + """The name of the file to be sent to the model.""" diff --git a/src/openai/types/responses/response_input_image_content.py b/src/openai/types/responses/response_input_image_content.py new file mode 100644 index 0000000000..fb90cb57eb --- /dev/null +++ b/src/openai/types/responses/response_input_image_content.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseInputImageContent"] + + +class ResponseInputImageContent(BaseModel): + type: Literal["input_image"] + """The type of the input item. Always `input_image`.""" + + detail: Optional[Literal["low", "high", "auto"]] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + file_id: Optional[str] = None + """The ID of the file to be sent to the model.""" + + image_url: Optional[str] = None + """The URL of the image to be sent to the model. + + A fully qualified URL or base64 encoded image in a data URL. + """ diff --git a/src/openai/types/responses/response_input_image_content_param.py b/src/openai/types/responses/response_input_image_content_param.py new file mode 100644 index 0000000000..c51509a3f3 --- /dev/null +++ b/src/openai/types/responses/response_input_image_content_param.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseInputImageContentParam"] + + +class ResponseInputImageContentParam(TypedDict, total=False): + type: Required[Literal["input_image"]] + """The type of the input item. Always `input_image`.""" + + detail: Optional[Literal["low", "high", "auto"]] + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + file_id: Optional[str] + """The ID of the file to be sent to the model.""" + + image_url: Optional[str] + """The URL of the image to be sent to the model. + + A fully qualified URL or base64 encoded image in a data URL. + """ diff --git a/src/openai/types/responses/response_input_item.py b/src/openai/types/responses/response_input_item.py index d2b454fd2c..0a487b8bef 100644 --- a/src/openai/types/responses/response_input_item.py +++ b/src/openai/types/responses/response_input_item.py @@ -16,6 +16,7 @@ from .response_custom_tool_call_output import ResponseCustomToolCallOutput from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall from .response_input_message_content_list import ResponseInputMessageContentList +from .response_function_call_output_item_list import ResponseFunctionCallOutputItemList from .response_computer_tool_call_output_screenshot import ResponseComputerToolCallOutputScreenshot __all__ = [ @@ -100,8 +101,8 @@ class FunctionCallOutput(BaseModel): call_id: str """The unique ID of the function tool call generated by the model.""" - output: str - """A JSON string of the output of the function tool call.""" + output: Union[str, ResponseFunctionCallOutputItemList] + """Text, image, or file output of the function tool call.""" type: Literal["function_call_output"] """The type of the function tool call output. Always `function_call_output`.""" @@ -266,12 +267,25 @@ class McpCall(BaseModel): type: Literal["mcp_call"] """The type of the item. Always `mcp_call`.""" + approval_request_id: Optional[str] = None + """ + Unique identifier for the MCP tool call approval request. Include this value in + a subsequent `mcp_approval_response` input to approve or reject the + corresponding tool call. + """ + error: Optional[str] = None """The error from the tool call, if any.""" output: Optional[str] = None """The output from the tool call.""" + status: Optional[Literal["in_progress", "completed", "incomplete", "calling", "failed"]] = None + """The status of the tool call. + + One of `in_progress`, `completed`, `incomplete`, `calling`, or `failed`. + """ + class ItemReference(BaseModel): id: str diff --git a/src/openai/types/responses/response_input_item_param.py b/src/openai/types/responses/response_input_item_param.py index 5ad83fc03a..115147dc4b 100644 --- a/src/openai/types/responses/response_input_item_param.py +++ b/src/openai/types/responses/response_input_item_param.py @@ -17,6 +17,7 @@ from .response_custom_tool_call_output_param import ResponseCustomToolCallOutputParam from .response_code_interpreter_tool_call_param import ResponseCodeInterpreterToolCallParam from .response_input_message_content_list_param import ResponseInputMessageContentListParam +from .response_function_call_output_item_list_param import ResponseFunctionCallOutputItemListParam from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam __all__ = [ @@ -101,8 +102,8 @@ class FunctionCallOutput(TypedDict, total=False): call_id: Required[str] """The unique ID of the function tool call generated by the model.""" - output: Required[str] - """A JSON string of the output of the function tool call.""" + output: Required[Union[str, ResponseFunctionCallOutputItemListParam]] + """Text, image, or file output of the function tool call.""" type: Required[Literal["function_call_output"]] """The type of the function tool call output. Always `function_call_output`.""" @@ -267,12 +268,25 @@ class McpCall(TypedDict, total=False): type: Required[Literal["mcp_call"]] """The type of the item. Always `mcp_call`.""" + approval_request_id: Optional[str] + """ + Unique identifier for the MCP tool call approval request. Include this value in + a subsequent `mcp_approval_response` input to approve or reject the + corresponding tool call. + """ + error: Optional[str] """The error from the tool call, if any.""" output: Optional[str] """The output from the tool call.""" + status: Literal["in_progress", "completed", "incomplete", "calling", "failed"] + """The status of the tool call. + + One of `in_progress`, `completed`, `incomplete`, `calling`, or `failed`. + """ + class ItemReference(TypedDict, total=False): id: Required[str] diff --git a/src/openai/types/responses/response_input_param.py b/src/openai/types/responses/response_input_param.py index 73eac62428..9a999c7252 100644 --- a/src/openai/types/responses/response_input_param.py +++ b/src/openai/types/responses/response_input_param.py @@ -17,6 +17,7 @@ from .response_custom_tool_call_output_param import ResponseCustomToolCallOutputParam from .response_code_interpreter_tool_call_param import ResponseCodeInterpreterToolCallParam from .response_input_message_content_list_param import ResponseInputMessageContentListParam +from .response_function_call_output_item_list_param import ResponseFunctionCallOutputItemListParam from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam __all__ = [ @@ -102,8 +103,8 @@ class FunctionCallOutput(TypedDict, total=False): call_id: Required[str] """The unique ID of the function tool call generated by the model.""" - output: Required[str] - """A JSON string of the output of the function tool call.""" + output: Required[Union[str, ResponseFunctionCallOutputItemListParam]] + """Text, image, or file output of the function tool call.""" type: Required[Literal["function_call_output"]] """The type of the function tool call output. Always `function_call_output`.""" @@ -268,12 +269,25 @@ class McpCall(TypedDict, total=False): type: Required[Literal["mcp_call"]] """The type of the item. Always `mcp_call`.""" + approval_request_id: Optional[str] + """ + Unique identifier for the MCP tool call approval request. Include this value in + a subsequent `mcp_approval_response` input to approve or reject the + corresponding tool call. + """ + error: Optional[str] """The error from the tool call, if any.""" output: Optional[str] """The output from the tool call.""" + status: Literal["in_progress", "completed", "incomplete", "calling", "failed"] + """The status of the tool call. + + One of `in_progress`, `completed`, `incomplete`, `calling`, or `failed`. + """ + class ItemReference(TypedDict, total=False): id: Required[str] diff --git a/src/openai/types/responses/response_input_text_content.py b/src/openai/types/responses/response_input_text_content.py new file mode 100644 index 0000000000..2cce849855 --- /dev/null +++ b/src/openai/types/responses/response_input_text_content.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseInputTextContent"] + + +class ResponseInputTextContent(BaseModel): + text: str + """The text input to the model.""" + + type: Literal["input_text"] + """The type of the input item. Always `input_text`.""" diff --git a/src/openai/types/responses/response_input_text_content_param.py b/src/openai/types/responses/response_input_text_content_param.py new file mode 100644 index 0000000000..85b57df2bd --- /dev/null +++ b/src/openai/types/responses/response_input_text_content_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseInputTextContentParam"] + + +class ResponseInputTextContentParam(TypedDict, total=False): + text: Required[str] + """The text input to the model.""" + + type: Required[Literal["input_text"]] + """The type of the input item. Always `input_text`.""" diff --git a/src/openai/types/responses/response_item.py b/src/openai/types/responses/response_item.py index cba89390ed..bdd2523baf 100644 --- a/src/openai/types/responses/response_item.py +++ b/src/openai/types/responses/response_item.py @@ -175,12 +175,25 @@ class McpCall(BaseModel): type: Literal["mcp_call"] """The type of the item. Always `mcp_call`.""" + approval_request_id: Optional[str] = None + """ + Unique identifier for the MCP tool call approval request. Include this value in + a subsequent `mcp_approval_response` input to approve or reject the + corresponding tool call. + """ + error: Optional[str] = None """The error from the tool call, if any.""" output: Optional[str] = None """The output from the tool call.""" + status: Optional[Literal["in_progress", "completed", "incomplete", "calling", "failed"]] = None + """The status of the tool call. + + One of `in_progress`, `completed`, `incomplete`, `calling`, or `failed`. + """ + ResponseItem: TypeAlias = Annotated[ Union[ diff --git a/src/openai/types/responses/response_output_item.py b/src/openai/types/responses/response_output_item.py index 2d3ee7b64e..e33d59cefe 100644 --- a/src/openai/types/responses/response_output_item.py +++ b/src/openai/types/responses/response_output_item.py @@ -93,12 +93,25 @@ class McpCall(BaseModel): type: Literal["mcp_call"] """The type of the item. Always `mcp_call`.""" + approval_request_id: Optional[str] = None + """ + Unique identifier for the MCP tool call approval request. Include this value in + a subsequent `mcp_approval_response` input to approve or reject the + corresponding tool call. + """ + error: Optional[str] = None """The error from the tool call, if any.""" output: Optional[str] = None """The output from the tool call.""" + status: Optional[Literal["in_progress", "completed", "incomplete", "calling", "failed"]] = None + """The status of the tool call. + + One of `in_progress`, `completed`, `incomplete`, `calling`, or `failed`. + """ + class McpListToolsTool(BaseModel): input_schema: object diff --git a/src/openai/types/responses/response_reasoning_item.py b/src/openai/types/responses/response_reasoning_item.py index e5cb094e62..fc582cf7c5 100644 --- a/src/openai/types/responses/response_reasoning_item.py +++ b/src/openai/types/responses/response_reasoning_item.py @@ -18,10 +18,10 @@ class Summary(BaseModel): class Content(BaseModel): text: str - """Reasoning text output from the model.""" + """The reasoning text from the model.""" type: Literal["reasoning_text"] - """The type of the object. Always `reasoning_text`.""" + """The type of the reasoning text. Always `reasoning_text`.""" class ResponseReasoningItem(BaseModel): diff --git a/src/openai/types/responses/response_reasoning_item_param.py b/src/openai/types/responses/response_reasoning_item_param.py index 042b6c05db..56e88ba28d 100644 --- a/src/openai/types/responses/response_reasoning_item_param.py +++ b/src/openai/types/responses/response_reasoning_item_param.py @@ -18,10 +18,10 @@ class Summary(TypedDict, total=False): class Content(TypedDict, total=False): text: Required[str] - """Reasoning text output from the model.""" + """The reasoning text from the model.""" type: Required[Literal["reasoning_text"]] - """The type of the object. Always `reasoning_text`.""" + """The type of the reasoning text. Always `reasoning_text`.""" class ResponseReasoningItemParam(TypedDict, total=False): diff --git a/src/openai/types/responses/tool.py b/src/openai/types/responses/tool.py index 482d4e75c1..6239b818c9 100644 --- a/src/openai/types/responses/tool.py +++ b/src/openai/types/responses/tool.py @@ -34,6 +34,7 @@ WebSearchToolFilters = web_search_tool.Filters WebSearchToolUserLocation = web_search_tool.UserLocation + class McpAllowedToolsMcpToolFilter(BaseModel): read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -198,7 +199,8 @@ class ImageGeneration(BaseModel): """ Control how much effort the model will exert to match the style and features, especially facial features, of input images. This parameter is only supported - for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and + `low`. Defaults to `low`. """ input_image_mask: Optional[ImageGenerationInputImageMask] = None @@ -207,7 +209,7 @@ class ImageGeneration(BaseModel): Contains `image_url` (string, optional) and `file_id` (string, optional). """ - model: Optional[Literal["gpt-image-1"]] = None + model: Optional[Literal["gpt-image-1", "gpt-image-1-mini"]] = None """The image generation model to use. Default: `gpt-image-1`.""" moderation: Optional[Literal["auto", "low"]] = None diff --git a/src/openai/types/responses/tool_param.py b/src/openai/types/responses/tool_param.py index 54bc271c0f..ff4ac2b953 100644 --- a/src/openai/types/responses/tool_param.py +++ b/src/openai/types/responses/tool_param.py @@ -36,6 +36,7 @@ WebSearchToolFilters = web_search_tool_param.Filters WebSearchToolUserLocation = web_search_tool_param.UserLocation + class McpAllowedToolsMcpToolFilter(TypedDict, total=False): read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -198,7 +199,8 @@ class ImageGeneration(TypedDict, total=False): """ Control how much effort the model will exert to match the style and features, especially facial features, of input images. This parameter is only supported - for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and + `low`. Defaults to `low`. """ input_image_mask: ImageGenerationInputImageMask @@ -207,7 +209,7 @@ class ImageGeneration(TypedDict, total=False): Contains `image_url` (string, optional) and `file_id` (string, optional). """ - model: Literal["gpt-image-1"] + model: Literal["gpt-image-1", "gpt-image-1-mini"] """The image generation model to use. Default: `gpt-image-1`.""" moderation: Literal["auto", "low"] diff --git a/src/openai/types/shared/all_models.py b/src/openai/types/shared/all_models.py index 828f3b5669..3e0b09e2d1 100644 --- a/src/openai/types/shared/all_models.py +++ b/src/openai/types/shared/all_models.py @@ -21,5 +21,8 @@ "o4-mini-deep-research-2025-06-26", "computer-use-preview", "computer-use-preview-2025-03-11", + "gpt-5-codex", + "gpt-5-pro", + "gpt-5-pro-2025-10-06", ], ] diff --git a/src/openai/types/shared/comparison_filter.py b/src/openai/types/shared/comparison_filter.py index 2ec2651ff2..33415ca4f9 100644 --- a/src/openai/types/shared/comparison_filter.py +++ b/src/openai/types/shared/comparison_filter.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Union +from typing import List, Union from typing_extensions import Literal from ..._models import BaseModel @@ -13,7 +13,9 @@ class ComparisonFilter(BaseModel): """The key to compare against the value.""" type: Literal["eq", "ne", "gt", "gte", "lt", "lte"] - """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + """ + Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`, + `nin`. - `eq`: equals - `ne`: not equal @@ -21,9 +23,11 @@ class ComparisonFilter(BaseModel): - `gte`: greater than or equal - `lt`: less than - `lte`: less than or equal + - `in`: in + - `nin`: not in """ - value: Union[str, float, bool] + value: Union[str, float, bool, List[Union[str, float]]] """ The value to compare against the attribute key; supports string, number, or boolean types. diff --git a/src/openai/types/shared/reasoning.py b/src/openai/types/shared/reasoning.py index 24ce301526..6ea2fe82bf 100644 --- a/src/openai/types/shared/reasoning.py +++ b/src/openai/types/shared/reasoning.py @@ -17,6 +17,9 @@ class Reasoning(BaseModel): supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. """ generate_summary: Optional[Literal["auto", "concise", "detailed"]] = None @@ -32,4 +35,6 @@ class Reasoning(BaseModel): This can be useful for debugging and understanding the model's reasoning process. One of `auto`, `concise`, or `detailed`. + + `concise` is only supported for `computer-use-preview` models. """ diff --git a/src/openai/types/shared/responses_model.py b/src/openai/types/shared/responses_model.py index 4d35356806..432cb82afd 100644 --- a/src/openai/types/shared/responses_model.py +++ b/src/openai/types/shared/responses_model.py @@ -21,5 +21,8 @@ "o4-mini-deep-research-2025-06-26", "computer-use-preview", "computer-use-preview-2025-03-11", + "gpt-5-codex", + "gpt-5-pro", + "gpt-5-pro-2025-10-06", ], ] diff --git a/src/openai/types/shared_params/comparison_filter.py b/src/openai/types/shared_params/comparison_filter.py index 38edd315ed..1c40729c19 100644 --- a/src/openai/types/shared_params/comparison_filter.py +++ b/src/openai/types/shared_params/comparison_filter.py @@ -5,6 +5,8 @@ from typing import Union from typing_extensions import Literal, Required, TypedDict +from ..._types import SequenceNotStr + __all__ = ["ComparisonFilter"] @@ -13,7 +15,9 @@ class ComparisonFilter(TypedDict, total=False): """The key to compare against the value.""" type: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte"]] - """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + """ + Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`, + `nin`. - `eq`: equals - `ne`: not equal @@ -21,9 +25,11 @@ class ComparisonFilter(TypedDict, total=False): - `gte`: greater than or equal - `lt`: less than - `lte`: less than or equal + - `in`: in + - `nin`: not in """ - value: Required[Union[str, float, bool]] + value: Required[Union[str, float, bool, SequenceNotStr[Union[str, float]]]] """ The value to compare against the attribute key; supports string, number, or boolean types. diff --git a/src/openai/types/shared_params/reasoning.py b/src/openai/types/shared_params/reasoning.py index 7eab2c76f7..5c1eff683f 100644 --- a/src/openai/types/shared_params/reasoning.py +++ b/src/openai/types/shared_params/reasoning.py @@ -18,6 +18,9 @@ class Reasoning(TypedDict, total=False): supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. """ generate_summary: Optional[Literal["auto", "concise", "detailed"]] @@ -33,4 +36,6 @@ class Reasoning(TypedDict, total=False): This can be useful for debugging and understanding the model's reasoning process. One of `auto`, `concise`, or `detailed`. + + `concise` is only supported for `computer-use-preview` models. """ diff --git a/src/openai/types/shared_params/responses_model.py b/src/openai/types/shared_params/responses_model.py index adfcecf1e5..fe34eb0f62 100644 --- a/src/openai/types/shared_params/responses_model.py +++ b/src/openai/types/shared_params/responses_model.py @@ -23,5 +23,8 @@ "o4-mini-deep-research-2025-06-26", "computer-use-preview", "computer-use-preview-2025-03-11", + "gpt-5-codex", + "gpt-5-pro", + "gpt-5-pro-2025-10-06", ], ] diff --git a/src/openai/types/vector_store_create_params.py b/src/openai/types/vector_store_create_params.py index 945a9886a3..f373a6ed28 100644 --- a/src/openai/types/vector_store_create_params.py +++ b/src/openai/types/vector_store_create_params.py @@ -20,6 +20,12 @@ class VectorStoreCreateParams(TypedDict, total=False): non-empty. """ + description: str + """A description for the vector store. + + Can be used to describe the vector store's purpose. + """ + expires_after: ExpiresAfter """The expiration policy for a vector store.""" diff --git a/src/openai/types/vector_stores/vector_store_file.py b/src/openai/types/vector_stores/vector_store_file.py index b59a61dfb0..001584dfd7 100644 --- a/src/openai/types/vector_stores/vector_store_file.py +++ b/src/openai/types/vector_stores/vector_store_file.py @@ -11,7 +11,7 @@ class LastError(BaseModel): code: Literal["server_error", "unsupported_file", "invalid_file"] - """One of `server_error` or `rate_limit_exceeded`.""" + """One of `server_error`, `unsupported_file`, or `invalid_file`.""" message: str """A human-readable description of the error.""" diff --git a/src/openai/types/video.py b/src/openai/types/video.py new file mode 100644 index 0000000000..2c804f75b8 --- /dev/null +++ b/src/openai/types/video.py @@ -0,0 +1,50 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from .._models import BaseModel +from .video_size import VideoSize +from .video_model import VideoModel +from .video_seconds import VideoSeconds +from .video_create_error import VideoCreateError + +__all__ = ["Video"] + + +class Video(BaseModel): + id: str + """Unique identifier for the video job.""" + + completed_at: Optional[int] = None + """Unix timestamp (seconds) for when the job completed, if finished.""" + + created_at: int + """Unix timestamp (seconds) for when the job was created.""" + + error: Optional[VideoCreateError] = None + """Error payload that explains why generation failed, if applicable.""" + + expires_at: Optional[int] = None + """Unix timestamp (seconds) for when the downloadable assets expire, if set.""" + + model: VideoModel + """The video generation model that produced the job.""" + + object: Literal["video"] + """The object type, which is always `video`.""" + + progress: int + """Approximate completion percentage for the generation task.""" + + remixed_from_video_id: Optional[str] = None + """Identifier of the source video if this video is a remix.""" + + seconds: VideoSeconds + """Duration of the generated clip in seconds.""" + + size: VideoSize + """The resolution of the generated video.""" + + status: Literal["queued", "in_progress", "completed", "failed"] + """Current lifecycle status of the video job.""" diff --git a/src/openai/types/video_create_error.py b/src/openai/types/video_create_error.py new file mode 100644 index 0000000000..ae328b78ea --- /dev/null +++ b/src/openai/types/video_create_error.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .._models import BaseModel + +__all__ = ["VideoCreateError"] + + +class VideoCreateError(BaseModel): + code: str + + message: str diff --git a/src/openai/types/video_create_params.py b/src/openai/types/video_create_params.py new file mode 100644 index 0000000000..527d62d193 --- /dev/null +++ b/src/openai/types/video_create_params.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +from .._types import FileTypes +from .video_size import VideoSize +from .video_model import VideoModel +from .video_seconds import VideoSeconds + +__all__ = ["VideoCreateParams"] + + +class VideoCreateParams(TypedDict, total=False): + prompt: Required[str] + """Text prompt that describes the video to generate.""" + + input_reference: FileTypes + """Optional image reference that guides generation.""" + + model: VideoModel + """The video generation model to use. Defaults to `sora-2`.""" + + seconds: VideoSeconds + """Clip duration in seconds. Defaults to 4 seconds.""" + + size: VideoSize + """Output resolution formatted as width x height. Defaults to 720x1280.""" diff --git a/src/openai/types/video_delete_response.py b/src/openai/types/video_delete_response.py new file mode 100644 index 0000000000..e2673ffe2b --- /dev/null +++ b/src/openai/types/video_delete_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["VideoDeleteResponse"] + + +class VideoDeleteResponse(BaseModel): + id: str + """Identifier of the deleted video.""" + + deleted: bool + """Indicates that the video resource was deleted.""" + + object: Literal["video.deleted"] + """The object type that signals the deletion response.""" diff --git a/src/openai/types/video_download_content_params.py b/src/openai/types/video_download_content_params.py new file mode 100644 index 0000000000..8c113d6715 --- /dev/null +++ b/src/openai/types/video_download_content_params.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["VideoDownloadContentParams"] + + +class VideoDownloadContentParams(TypedDict, total=False): + variant: Literal["video", "thumbnail", "spritesheet"] + """Which downloadable asset to return. Defaults to the MP4 video.""" diff --git a/src/openai/types/video_list_params.py b/src/openai/types/video_list_params.py new file mode 100644 index 0000000000..bf55ba7fa2 --- /dev/null +++ b/src/openai/types/video_list_params.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["VideoListParams"] + + +class VideoListParams(TypedDict, total=False): + after: str + """Identifier for the last item from the previous pagination request""" + + limit: int + """Number of items to retrieve""" + + order: Literal["asc", "desc"] + """Sort order of results by timestamp. + + Use `asc` for ascending order or `desc` for descending order. + """ diff --git a/src/openai/types/video_model.py b/src/openai/types/video_model.py new file mode 100644 index 0000000000..0b0835fca4 --- /dev/null +++ b/src/openai/types/video_model.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["VideoModel"] + +VideoModel: TypeAlias = Literal["sora-2", "sora-2-pro"] diff --git a/src/openai/types/video_remix_params.py b/src/openai/types/video_remix_params.py new file mode 100644 index 0000000000..15388d6172 --- /dev/null +++ b/src/openai/types/video_remix_params.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["VideoRemixParams"] + + +class VideoRemixParams(TypedDict, total=False): + prompt: Required[str] + """Updated text prompt that directs the remix generation.""" diff --git a/src/openai/types/video_seconds.py b/src/openai/types/video_seconds.py new file mode 100644 index 0000000000..e50d37dc51 --- /dev/null +++ b/src/openai/types/video_seconds.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["VideoSeconds"] + +VideoSeconds: TypeAlias = Literal["4", "8", "12"] diff --git a/src/openai/types/video_size.py b/src/openai/types/video_size.py new file mode 100644 index 0000000000..215ac8815a --- /dev/null +++ b/src/openai/types/video_size.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["VideoSize"] + +VideoSize: TypeAlias = Literal["720x1280", "1280x720", "1024x1792", "1792x1024"] diff --git a/tests/api_resources/audio/test_transcriptions.py b/tests/api_resources/audio/test_transcriptions.py index 11cbe2349c..b5eaa4be1f 100644 --- a/tests/api_resources/audio/test_transcriptions.py +++ b/tests/api_resources/audio/test_transcriptions.py @@ -32,6 +32,8 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: model="gpt-4o-transcribe", chunking_strategy="auto", include=["logprobs"], + known_speaker_names=["string"], + known_speaker_references=["string"], language="language", prompt="prompt", response_format="json", @@ -84,6 +86,8 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: stream=True, chunking_strategy="auto", include=["logprobs"], + known_speaker_names=["string"], + known_speaker_references=["string"], language="language", prompt="prompt", response_format="json", @@ -140,6 +144,8 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn model="gpt-4o-transcribe", chunking_strategy="auto", include=["logprobs"], + known_speaker_names=["string"], + known_speaker_references=["string"], language="language", prompt="prompt", response_format="json", @@ -192,6 +198,8 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn stream=True, chunking_strategy="auto", include=["logprobs"], + known_speaker_names=["string"], + known_speaker_references=["string"], language="language", prompt="prompt", response_format="json", diff --git a/tests/api_resources/beta/chatkit/__init__.py b/tests/api_resources/beta/chatkit/__init__.py new file mode 100644 index 0000000000..fd8019a9a1 --- /dev/null +++ b/tests/api_resources/beta/chatkit/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/beta/chatkit/test_sessions.py b/tests/api_resources/beta/chatkit/test_sessions.py new file mode 100644 index 0000000000..c94e4c92ae --- /dev/null +++ b/tests/api_resources/beta/chatkit/test_sessions.py @@ -0,0 +1,230 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from openai import OpenAI, AsyncOpenAI +from tests.utils import assert_matches_type +from openai.types.beta.chatkit import ( + ChatSession, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestSessions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + def test_method_create(self, client: OpenAI) -> None: + session = client.beta.chatkit.sessions.create( + user="x", + workflow={"id": "id"}, + ) + assert_matches_type(ChatSession, session, path=["response"]) + + @parametrize + def test_method_create_with_all_params(self, client: OpenAI) -> None: + session = client.beta.chatkit.sessions.create( + user="x", + workflow={ + "id": "id", + "state_variables": {"foo": "string"}, + "tracing": {"enabled": True}, + "version": "version", + }, + chatkit_configuration={ + "automatic_thread_titling": {"enabled": True}, + "file_upload": { + "enabled": True, + "max_file_size": 1, + "max_files": 1, + }, + "history": { + "enabled": True, + "recent_threads": 1, + }, + }, + expires_after={ + "anchor": "created_at", + "seconds": 1, + }, + rate_limits={"max_requests_per_1_minute": 1}, + ) + assert_matches_type(ChatSession, session, path=["response"]) + + @parametrize + def test_raw_response_create(self, client: OpenAI) -> None: + response = client.beta.chatkit.sessions.with_raw_response.create( + user="x", + workflow={"id": "id"}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + session = response.parse() + assert_matches_type(ChatSession, session, path=["response"]) + + @parametrize + def test_streaming_response_create(self, client: OpenAI) -> None: + with client.beta.chatkit.sessions.with_streaming_response.create( + user="x", + workflow={"id": "id"}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + session = response.parse() + assert_matches_type(ChatSession, session, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_method_cancel(self, client: OpenAI) -> None: + session = client.beta.chatkit.sessions.cancel( + "cksess_123", + ) + assert_matches_type(ChatSession, session, path=["response"]) + + @parametrize + def test_raw_response_cancel(self, client: OpenAI) -> None: + response = client.beta.chatkit.sessions.with_raw_response.cancel( + "cksess_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + session = response.parse() + assert_matches_type(ChatSession, session, path=["response"]) + + @parametrize + def test_streaming_response_cancel(self, client: OpenAI) -> None: + with client.beta.chatkit.sessions.with_streaming_response.cancel( + "cksess_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + session = response.parse() + assert_matches_type(ChatSession, session, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_cancel(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"): + client.beta.chatkit.sessions.with_raw_response.cancel( + "", + ) + + +class TestAsyncSessions: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @parametrize + async def test_method_create(self, async_client: AsyncOpenAI) -> None: + session = await async_client.beta.chatkit.sessions.create( + user="x", + workflow={"id": "id"}, + ) + assert_matches_type(ChatSession, session, path=["response"]) + + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: + session = await async_client.beta.chatkit.sessions.create( + user="x", + workflow={ + "id": "id", + "state_variables": {"foo": "string"}, + "tracing": {"enabled": True}, + "version": "version", + }, + chatkit_configuration={ + "automatic_thread_titling": {"enabled": True}, + "file_upload": { + "enabled": True, + "max_file_size": 1, + "max_files": 1, + }, + "history": { + "enabled": True, + "recent_threads": 1, + }, + }, + expires_after={ + "anchor": "created_at", + "seconds": 1, + }, + rate_limits={"max_requests_per_1_minute": 1}, + ) + assert_matches_type(ChatSession, session, path=["response"]) + + @parametrize + async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: + response = await async_client.beta.chatkit.sessions.with_raw_response.create( + user="x", + workflow={"id": "id"}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + session = response.parse() + assert_matches_type(ChatSession, session, path=["response"]) + + @parametrize + async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: + async with async_client.beta.chatkit.sessions.with_streaming_response.create( + user="x", + workflow={"id": "id"}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + session = await response.parse() + assert_matches_type(ChatSession, session, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_method_cancel(self, async_client: AsyncOpenAI) -> None: + session = await async_client.beta.chatkit.sessions.cancel( + "cksess_123", + ) + assert_matches_type(ChatSession, session, path=["response"]) + + @parametrize + async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None: + response = await async_client.beta.chatkit.sessions.with_raw_response.cancel( + "cksess_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + session = response.parse() + assert_matches_type(ChatSession, session, path=["response"]) + + @parametrize + async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None: + async with async_client.beta.chatkit.sessions.with_streaming_response.cancel( + "cksess_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + session = await response.parse() + assert_matches_type(ChatSession, session, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"): + await async_client.beta.chatkit.sessions.with_raw_response.cancel( + "", + ) diff --git a/tests/api_resources/beta/chatkit/test_threads.py b/tests/api_resources/beta/chatkit/test_threads.py new file mode 100644 index 0000000000..6395b72b2f --- /dev/null +++ b/tests/api_resources/beta/chatkit/test_threads.py @@ -0,0 +1,348 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from openai import OpenAI, AsyncOpenAI +from tests.utils import assert_matches_type +from openai.pagination import SyncConversationCursorPage, AsyncConversationCursorPage +from openai.types.beta.chatkit import ChatKitThread, ThreadDeleteResponse +from openai.types.beta.chatkit.chatkit_thread_item_list import Data + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestThreads: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + def test_method_retrieve(self, client: OpenAI) -> None: + thread = client.beta.chatkit.threads.retrieve( + "cthr_123", + ) + assert_matches_type(ChatKitThread, thread, path=["response"]) + + @parametrize + def test_raw_response_retrieve(self, client: OpenAI) -> None: + response = client.beta.chatkit.threads.with_raw_response.retrieve( + "cthr_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + thread = response.parse() + assert_matches_type(ChatKitThread, thread, path=["response"]) + + @parametrize + def test_streaming_response_retrieve(self, client: OpenAI) -> None: + with client.beta.chatkit.threads.with_streaming_response.retrieve( + "cthr_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + thread = response.parse() + assert_matches_type(ChatKitThread, thread, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_retrieve(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.beta.chatkit.threads.with_raw_response.retrieve( + "", + ) + + @parametrize + def test_method_list(self, client: OpenAI) -> None: + thread = client.beta.chatkit.threads.list() + assert_matches_type(SyncConversationCursorPage[ChatKitThread], thread, path=["response"]) + + @parametrize + def test_method_list_with_all_params(self, client: OpenAI) -> None: + thread = client.beta.chatkit.threads.list( + after="after", + before="before", + limit=0, + order="asc", + user="x", + ) + assert_matches_type(SyncConversationCursorPage[ChatKitThread], thread, path=["response"]) + + @parametrize + def test_raw_response_list(self, client: OpenAI) -> None: + response = client.beta.chatkit.threads.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + thread = response.parse() + assert_matches_type(SyncConversationCursorPage[ChatKitThread], thread, path=["response"]) + + @parametrize + def test_streaming_response_list(self, client: OpenAI) -> None: + with client.beta.chatkit.threads.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + thread = response.parse() + assert_matches_type(SyncConversationCursorPage[ChatKitThread], thread, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_method_delete(self, client: OpenAI) -> None: + thread = client.beta.chatkit.threads.delete( + "cthr_123", + ) + assert_matches_type(ThreadDeleteResponse, thread, path=["response"]) + + @parametrize + def test_raw_response_delete(self, client: OpenAI) -> None: + response = client.beta.chatkit.threads.with_raw_response.delete( + "cthr_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + thread = response.parse() + assert_matches_type(ThreadDeleteResponse, thread, path=["response"]) + + @parametrize + def test_streaming_response_delete(self, client: OpenAI) -> None: + with client.beta.chatkit.threads.with_streaming_response.delete( + "cthr_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + thread = response.parse() + assert_matches_type(ThreadDeleteResponse, thread, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_delete(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.beta.chatkit.threads.with_raw_response.delete( + "", + ) + + @parametrize + def test_method_list_items(self, client: OpenAI) -> None: + thread = client.beta.chatkit.threads.list_items( + thread_id="cthr_123", + ) + assert_matches_type(SyncConversationCursorPage[Data], thread, path=["response"]) + + @parametrize + def test_method_list_items_with_all_params(self, client: OpenAI) -> None: + thread = client.beta.chatkit.threads.list_items( + thread_id="cthr_123", + after="after", + before="before", + limit=0, + order="asc", + ) + assert_matches_type(SyncConversationCursorPage[Data], thread, path=["response"]) + + @parametrize + def test_raw_response_list_items(self, client: OpenAI) -> None: + response = client.beta.chatkit.threads.with_raw_response.list_items( + thread_id="cthr_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + thread = response.parse() + assert_matches_type(SyncConversationCursorPage[Data], thread, path=["response"]) + + @parametrize + def test_streaming_response_list_items(self, client: OpenAI) -> None: + with client.beta.chatkit.threads.with_streaming_response.list_items( + thread_id="cthr_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + thread = response.parse() + assert_matches_type(SyncConversationCursorPage[Data], thread, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_list_items(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.beta.chatkit.threads.with_raw_response.list_items( + thread_id="", + ) + + +class TestAsyncThreads: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @parametrize + async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: + thread = await async_client.beta.chatkit.threads.retrieve( + "cthr_123", + ) + assert_matches_type(ChatKitThread, thread, path=["response"]) + + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: + response = await async_client.beta.chatkit.threads.with_raw_response.retrieve( + "cthr_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + thread = response.parse() + assert_matches_type(ChatKitThread, thread, path=["response"]) + + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: + async with async_client.beta.chatkit.threads.with_streaming_response.retrieve( + "cthr_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + thread = await response.parse() + assert_matches_type(ChatKitThread, thread, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.beta.chatkit.threads.with_raw_response.retrieve( + "", + ) + + @parametrize + async def test_method_list(self, async_client: AsyncOpenAI) -> None: + thread = await async_client.beta.chatkit.threads.list() + assert_matches_type(AsyncConversationCursorPage[ChatKitThread], thread, path=["response"]) + + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: + thread = await async_client.beta.chatkit.threads.list( + after="after", + before="before", + limit=0, + order="asc", + user="x", + ) + assert_matches_type(AsyncConversationCursorPage[ChatKitThread], thread, path=["response"]) + + @parametrize + async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: + response = await async_client.beta.chatkit.threads.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + thread = response.parse() + assert_matches_type(AsyncConversationCursorPage[ChatKitThread], thread, path=["response"]) + + @parametrize + async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: + async with async_client.beta.chatkit.threads.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + thread = await response.parse() + assert_matches_type(AsyncConversationCursorPage[ChatKitThread], thread, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_method_delete(self, async_client: AsyncOpenAI) -> None: + thread = await async_client.beta.chatkit.threads.delete( + "cthr_123", + ) + assert_matches_type(ThreadDeleteResponse, thread, path=["response"]) + + @parametrize + async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: + response = await async_client.beta.chatkit.threads.with_raw_response.delete( + "cthr_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + thread = response.parse() + assert_matches_type(ThreadDeleteResponse, thread, path=["response"]) + + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: + async with async_client.beta.chatkit.threads.with_streaming_response.delete( + "cthr_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + thread = await response.parse() + assert_matches_type(ThreadDeleteResponse, thread, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.beta.chatkit.threads.with_raw_response.delete( + "", + ) + + @parametrize + async def test_method_list_items(self, async_client: AsyncOpenAI) -> None: + thread = await async_client.beta.chatkit.threads.list_items( + thread_id="cthr_123", + ) + assert_matches_type(AsyncConversationCursorPage[Data], thread, path=["response"]) + + @parametrize + async def test_method_list_items_with_all_params(self, async_client: AsyncOpenAI) -> None: + thread = await async_client.beta.chatkit.threads.list_items( + thread_id="cthr_123", + after="after", + before="before", + limit=0, + order="asc", + ) + assert_matches_type(AsyncConversationCursorPage[Data], thread, path=["response"]) + + @parametrize + async def test_raw_response_list_items(self, async_client: AsyncOpenAI) -> None: + response = await async_client.beta.chatkit.threads.with_raw_response.list_items( + thread_id="cthr_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + thread = response.parse() + assert_matches_type(AsyncConversationCursorPage[Data], thread, path=["response"]) + + @parametrize + async def test_streaming_response_list_items(self, async_client: AsyncOpenAI) -> None: + async with async_client.beta.chatkit.threads.with_streaming_response.list_items( + thread_id="cthr_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + thread = await response.parse() + assert_matches_type(AsyncConversationCursorPage[Data], thread, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_list_items(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.beta.chatkit.threads.with_raw_response.list_items( + thread_id="", + ) diff --git a/tests/api_resources/conversations/test_items.py b/tests/api_resources/conversations/test_items.py index c308160543..0503301f16 100644 --- a/tests/api_resources/conversations/test_items.py +++ b/tests/api_resources/conversations/test_items.py @@ -30,6 +30,7 @@ def test_method_create(self, client: OpenAI) -> None: { "content": "string", "role": "user", + "type": "message", } ], ) @@ -46,7 +47,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "type": "message", } ], - include=["code_interpreter_call.outputs"], + include=["file_search_call.results"], ) assert_matches_type(ConversationItemList, item, path=["response"]) @@ -58,6 +59,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: { "content": "string", "role": "user", + "type": "message", } ], ) @@ -75,6 +77,7 @@ def test_streaming_response_create(self, client: OpenAI) -> None: { "content": "string", "role": "user", + "type": "message", } ], ) as response: @@ -95,6 +98,7 @@ def test_path_params_create(self, client: OpenAI) -> None: { "content": "string", "role": "user", + "type": "message", } ], ) @@ -112,7 +116,7 @@ def test_method_retrieve_with_all_params(self, client: OpenAI) -> None: item = client.conversations.items.retrieve( item_id="msg_abc", conversation_id="conv_123", - include=["code_interpreter_call.outputs"], + include=["file_search_call.results"], ) assert_matches_type(ConversationItem, item, path=["response"]) @@ -168,7 +172,7 @@ def test_method_list_with_all_params(self, client: OpenAI) -> None: item = client.conversations.items.list( conversation_id="conv_123", after="after", - include=["code_interpreter_call.outputs"], + include=["file_search_call.results"], limit=0, order="asc", ) @@ -267,6 +271,7 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: { "content": "string", "role": "user", + "type": "message", } ], ) @@ -283,7 +288,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "type": "message", } ], - include=["code_interpreter_call.outputs"], + include=["file_search_call.results"], ) assert_matches_type(ConversationItemList, item, path=["response"]) @@ -295,6 +300,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: { "content": "string", "role": "user", + "type": "message", } ], ) @@ -312,6 +318,7 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> Non { "content": "string", "role": "user", + "type": "message", } ], ) as response: @@ -332,6 +339,7 @@ async def test_path_params_create(self, async_client: AsyncOpenAI) -> None: { "content": "string", "role": "user", + "type": "message", } ], ) @@ -349,7 +357,7 @@ async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI) item = await async_client.conversations.items.retrieve( item_id="msg_abc", conversation_id="conv_123", - include=["code_interpreter_call.outputs"], + include=["file_search_call.results"], ) assert_matches_type(ConversationItem, item, path=["response"]) @@ -405,7 +413,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> N item = await async_client.conversations.items.list( conversation_id="conv_123", after="after", - include=["code_interpreter_call.outputs"], + include=["file_search_call.results"], limit=0, order="asc", ) diff --git a/tests/api_resources/realtime/test_calls.py b/tests/api_resources/realtime/test_calls.py new file mode 100644 index 0000000000..5495a58a4e --- /dev/null +++ b/tests/api_resources/realtime/test_calls.py @@ -0,0 +1,692 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import httpx +import pytest +from respx import MockRouter + +import openai._legacy_response as _legacy_response +from openai import OpenAI, AsyncOpenAI +from tests.utils import assert_matches_type + +# pyright: reportDeprecated=false + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestCalls: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + @pytest.mark.respx(base_url=base_url) + def test_method_create(self, client: OpenAI, respx_mock: MockRouter) -> None: + respx_mock.post("/realtime/calls").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + call = client.realtime.calls.create( + sdp="sdp", + ) + assert isinstance(call, _legacy_response.HttpxBinaryResponseContent) + assert call.json() == {"foo": "bar"} + + @parametrize + @pytest.mark.respx(base_url=base_url) + def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRouter) -> None: + respx_mock.post("/realtime/calls").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + call = client.realtime.calls.create( + sdp="sdp", + session={ + "type": "realtime", + "audio": { + "input": { + "format": { + "rate": 24000, + "type": "audio/pcm", + }, + "noise_reduction": {"type": "near_field"}, + "transcription": { + "language": "language", + "model": "whisper-1", + "prompt": "prompt", + }, + "turn_detection": { + "type": "server_vad", + "create_response": True, + "idle_timeout_ms": 5000, + "interrupt_response": True, + "prefix_padding_ms": 0, + "silence_duration_ms": 0, + "threshold": 0, + }, + }, + "output": { + "format": { + "rate": 24000, + "type": "audio/pcm", + }, + "speed": 0.25, + "voice": "ash", + }, + }, + "include": ["item.input_audio_transcription.logprobs"], + "instructions": "instructions", + "max_output_tokens": 0, + "model": "string", + "output_modalities": ["text"], + "prompt": { + "id": "id", + "variables": {"foo": "string"}, + "version": "version", + }, + "tool_choice": "none", + "tools": [ + { + "description": "description", + "name": "name", + "parameters": {}, + "type": "function", + } + ], + "tracing": "auto", + "truncation": "auto", + }, + ) + assert isinstance(call, _legacy_response.HttpxBinaryResponseContent) + assert call.json() == {"foo": "bar"} + + @parametrize + @pytest.mark.respx(base_url=base_url) + def test_raw_response_create(self, client: OpenAI, respx_mock: MockRouter) -> None: + respx_mock.post("/realtime/calls").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + response = client.realtime.calls.with_raw_response.create( + sdp="sdp", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + call = response.parse() + assert_matches_type(_legacy_response.HttpxBinaryResponseContent, call, path=["response"]) + + @parametrize + @pytest.mark.respx(base_url=base_url) + def test_streaming_response_create(self, client: OpenAI, respx_mock: MockRouter) -> None: + respx_mock.post("/realtime/calls").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + with client.realtime.calls.with_streaming_response.create( + sdp="sdp", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + call = response.parse() + assert_matches_type(bytes, call, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_method_accept(self, client: OpenAI) -> None: + call = client.realtime.calls.accept( + call_id="call_id", + type="realtime", + ) + assert call is None + + @parametrize + def test_method_accept_with_all_params(self, client: OpenAI) -> None: + call = client.realtime.calls.accept( + call_id="call_id", + type="realtime", + audio={ + "input": { + "format": { + "rate": 24000, + "type": "audio/pcm", + }, + "noise_reduction": {"type": "near_field"}, + "transcription": { + "language": "language", + "model": "whisper-1", + "prompt": "prompt", + }, + "turn_detection": { + "type": "server_vad", + "create_response": True, + "idle_timeout_ms": 5000, + "interrupt_response": True, + "prefix_padding_ms": 0, + "silence_duration_ms": 0, + "threshold": 0, + }, + }, + "output": { + "format": { + "rate": 24000, + "type": "audio/pcm", + }, + "speed": 0.25, + "voice": "ash", + }, + }, + include=["item.input_audio_transcription.logprobs"], + instructions="instructions", + max_output_tokens=0, + model="string", + output_modalities=["text"], + prompt={ + "id": "id", + "variables": {"foo": "string"}, + "version": "version", + }, + tool_choice="none", + tools=[ + { + "description": "description", + "name": "name", + "parameters": {}, + "type": "function", + } + ], + tracing="auto", + truncation="auto", + ) + assert call is None + + @parametrize + def test_raw_response_accept(self, client: OpenAI) -> None: + response = client.realtime.calls.with_raw_response.accept( + call_id="call_id", + type="realtime", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + call = response.parse() + assert call is None + + @parametrize + def test_streaming_response_accept(self, client: OpenAI) -> None: + with client.realtime.calls.with_streaming_response.accept( + call_id="call_id", + type="realtime", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + call = response.parse() + assert call is None + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_accept(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `call_id` but received ''"): + client.realtime.calls.with_raw_response.accept( + call_id="", + type="realtime", + ) + + @parametrize + def test_method_hangup(self, client: OpenAI) -> None: + call = client.realtime.calls.hangup( + "call_id", + ) + assert call is None + + @parametrize + def test_raw_response_hangup(self, client: OpenAI) -> None: + response = client.realtime.calls.with_raw_response.hangup( + "call_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + call = response.parse() + assert call is None + + @parametrize + def test_streaming_response_hangup(self, client: OpenAI) -> None: + with client.realtime.calls.with_streaming_response.hangup( + "call_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + call = response.parse() + assert call is None + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_hangup(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `call_id` but received ''"): + client.realtime.calls.with_raw_response.hangup( + "", + ) + + @parametrize + def test_method_refer(self, client: OpenAI) -> None: + call = client.realtime.calls.refer( + call_id="call_id", + target_uri="tel:+14155550123", + ) + assert call is None + + @parametrize + def test_raw_response_refer(self, client: OpenAI) -> None: + response = client.realtime.calls.with_raw_response.refer( + call_id="call_id", + target_uri="tel:+14155550123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + call = response.parse() + assert call is None + + @parametrize + def test_streaming_response_refer(self, client: OpenAI) -> None: + with client.realtime.calls.with_streaming_response.refer( + call_id="call_id", + target_uri="tel:+14155550123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + call = response.parse() + assert call is None + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_refer(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `call_id` but received ''"): + client.realtime.calls.with_raw_response.refer( + call_id="", + target_uri="tel:+14155550123", + ) + + @parametrize + def test_method_reject(self, client: OpenAI) -> None: + call = client.realtime.calls.reject( + call_id="call_id", + ) + assert call is None + + @parametrize + def test_method_reject_with_all_params(self, client: OpenAI) -> None: + call = client.realtime.calls.reject( + call_id="call_id", + status_code=486, + ) + assert call is None + + @parametrize + def test_raw_response_reject(self, client: OpenAI) -> None: + response = client.realtime.calls.with_raw_response.reject( + call_id="call_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + call = response.parse() + assert call is None + + @parametrize + def test_streaming_response_reject(self, client: OpenAI) -> None: + with client.realtime.calls.with_streaming_response.reject( + call_id="call_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + call = response.parse() + assert call is None + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_reject(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `call_id` but received ''"): + client.realtime.calls.with_raw_response.reject( + call_id="", + ) + + +class TestAsyncCalls: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @parametrize + @pytest.mark.respx(base_url=base_url) + async def test_method_create(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: + respx_mock.post("/realtime/calls").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + call = await async_client.realtime.calls.create( + sdp="sdp", + ) + assert isinstance(call, _legacy_response.HttpxBinaryResponseContent) + assert call.json() == {"foo": "bar"} + + @parametrize + @pytest.mark.respx(base_url=base_url) + async def test_method_create_with_all_params(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: + respx_mock.post("/realtime/calls").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + call = await async_client.realtime.calls.create( + sdp="sdp", + session={ + "type": "realtime", + "audio": { + "input": { + "format": { + "rate": 24000, + "type": "audio/pcm", + }, + "noise_reduction": {"type": "near_field"}, + "transcription": { + "language": "language", + "model": "whisper-1", + "prompt": "prompt", + }, + "turn_detection": { + "type": "server_vad", + "create_response": True, + "idle_timeout_ms": 5000, + "interrupt_response": True, + "prefix_padding_ms": 0, + "silence_duration_ms": 0, + "threshold": 0, + }, + }, + "output": { + "format": { + "rate": 24000, + "type": "audio/pcm", + }, + "speed": 0.25, + "voice": "ash", + }, + }, + "include": ["item.input_audio_transcription.logprobs"], + "instructions": "instructions", + "max_output_tokens": 0, + "model": "string", + "output_modalities": ["text"], + "prompt": { + "id": "id", + "variables": {"foo": "string"}, + "version": "version", + }, + "tool_choice": "none", + "tools": [ + { + "description": "description", + "name": "name", + "parameters": {}, + "type": "function", + } + ], + "tracing": "auto", + "truncation": "auto", + }, + ) + assert isinstance(call, _legacy_response.HttpxBinaryResponseContent) + assert call.json() == {"foo": "bar"} + + @parametrize + @pytest.mark.respx(base_url=base_url) + async def test_raw_response_create(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: + respx_mock.post("/realtime/calls").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + response = await async_client.realtime.calls.with_raw_response.create( + sdp="sdp", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + call = response.parse() + assert_matches_type(_legacy_response.HttpxBinaryResponseContent, call, path=["response"]) + + @parametrize + @pytest.mark.respx(base_url=base_url) + async def test_streaming_response_create(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: + respx_mock.post("/realtime/calls").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + async with async_client.realtime.calls.with_streaming_response.create( + sdp="sdp", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + call = await response.parse() + assert_matches_type(bytes, call, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_method_accept(self, async_client: AsyncOpenAI) -> None: + call = await async_client.realtime.calls.accept( + call_id="call_id", + type="realtime", + ) + assert call is None + + @parametrize + async def test_method_accept_with_all_params(self, async_client: AsyncOpenAI) -> None: + call = await async_client.realtime.calls.accept( + call_id="call_id", + type="realtime", + audio={ + "input": { + "format": { + "rate": 24000, + "type": "audio/pcm", + }, + "noise_reduction": {"type": "near_field"}, + "transcription": { + "language": "language", + "model": "whisper-1", + "prompt": "prompt", + }, + "turn_detection": { + "type": "server_vad", + "create_response": True, + "idle_timeout_ms": 5000, + "interrupt_response": True, + "prefix_padding_ms": 0, + "silence_duration_ms": 0, + "threshold": 0, + }, + }, + "output": { + "format": { + "rate": 24000, + "type": "audio/pcm", + }, + "speed": 0.25, + "voice": "ash", + }, + }, + include=["item.input_audio_transcription.logprobs"], + instructions="instructions", + max_output_tokens=0, + model="string", + output_modalities=["text"], + prompt={ + "id": "id", + "variables": {"foo": "string"}, + "version": "version", + }, + tool_choice="none", + tools=[ + { + "description": "description", + "name": "name", + "parameters": {}, + "type": "function", + } + ], + tracing="auto", + truncation="auto", + ) + assert call is None + + @parametrize + async def test_raw_response_accept(self, async_client: AsyncOpenAI) -> None: + response = await async_client.realtime.calls.with_raw_response.accept( + call_id="call_id", + type="realtime", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + call = response.parse() + assert call is None + + @parametrize + async def test_streaming_response_accept(self, async_client: AsyncOpenAI) -> None: + async with async_client.realtime.calls.with_streaming_response.accept( + call_id="call_id", + type="realtime", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + call = await response.parse() + assert call is None + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_accept(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `call_id` but received ''"): + await async_client.realtime.calls.with_raw_response.accept( + call_id="", + type="realtime", + ) + + @parametrize + async def test_method_hangup(self, async_client: AsyncOpenAI) -> None: + call = await async_client.realtime.calls.hangup( + "call_id", + ) + assert call is None + + @parametrize + async def test_raw_response_hangup(self, async_client: AsyncOpenAI) -> None: + response = await async_client.realtime.calls.with_raw_response.hangup( + "call_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + call = response.parse() + assert call is None + + @parametrize + async def test_streaming_response_hangup(self, async_client: AsyncOpenAI) -> None: + async with async_client.realtime.calls.with_streaming_response.hangup( + "call_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + call = await response.parse() + assert call is None + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_hangup(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `call_id` but received ''"): + await async_client.realtime.calls.with_raw_response.hangup( + "", + ) + + @parametrize + async def test_method_refer(self, async_client: AsyncOpenAI) -> None: + call = await async_client.realtime.calls.refer( + call_id="call_id", + target_uri="tel:+14155550123", + ) + assert call is None + + @parametrize + async def test_raw_response_refer(self, async_client: AsyncOpenAI) -> None: + response = await async_client.realtime.calls.with_raw_response.refer( + call_id="call_id", + target_uri="tel:+14155550123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + call = response.parse() + assert call is None + + @parametrize + async def test_streaming_response_refer(self, async_client: AsyncOpenAI) -> None: + async with async_client.realtime.calls.with_streaming_response.refer( + call_id="call_id", + target_uri="tel:+14155550123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + call = await response.parse() + assert call is None + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_refer(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `call_id` but received ''"): + await async_client.realtime.calls.with_raw_response.refer( + call_id="", + target_uri="tel:+14155550123", + ) + + @parametrize + async def test_method_reject(self, async_client: AsyncOpenAI) -> None: + call = await async_client.realtime.calls.reject( + call_id="call_id", + ) + assert call is None + + @parametrize + async def test_method_reject_with_all_params(self, async_client: AsyncOpenAI) -> None: + call = await async_client.realtime.calls.reject( + call_id="call_id", + status_code=486, + ) + assert call is None + + @parametrize + async def test_raw_response_reject(self, async_client: AsyncOpenAI) -> None: + response = await async_client.realtime.calls.with_raw_response.reject( + call_id="call_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + call = response.parse() + assert call is None + + @parametrize + async def test_streaming_response_reject(self, async_client: AsyncOpenAI) -> None: + async with async_client.realtime.calls.with_streaming_response.reject( + call_id="call_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + call = await response.parse() + assert call is None + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_reject(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `call_id` but received ''"): + await async_client.realtime.calls.with_raw_response.reject( + call_id="", + ) diff --git a/tests/api_resources/responses/test_input_items.py b/tests/api_resources/responses/test_input_items.py index eda20c9a0b..ed6fddf33a 100644 --- a/tests/api_resources/responses/test_input_items.py +++ b/tests/api_resources/responses/test_input_items.py @@ -30,7 +30,7 @@ def test_method_list_with_all_params(self, client: OpenAI) -> None: input_item = client.responses.input_items.list( response_id="response_id", after="after", - include=["code_interpreter_call.outputs"], + include=["file_search_call.results"], limit=0, order="asc", ) @@ -85,7 +85,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> N input_item = await async_client.responses.input_items.list( response_id="response_id", after="after", - include=["code_interpreter_call.outputs"], + include=["file_search_call.results"], limit=0, order="asc", ) diff --git a/tests/api_resources/responses/test_input_tokens.py b/tests/api_resources/responses/test_input_tokens.py new file mode 100644 index 0000000000..54ba2d25c2 --- /dev/null +++ b/tests/api_resources/responses/test_input_tokens.py @@ -0,0 +1,138 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from openai import OpenAI, AsyncOpenAI +from tests.utils import assert_matches_type +from openai.types.responses import InputTokenCountResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestInputTokens: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + def test_method_count(self, client: OpenAI) -> None: + input_token = client.responses.input_tokens.count() + assert_matches_type(InputTokenCountResponse, input_token, path=["response"]) + + @parametrize + def test_method_count_with_all_params(self, client: OpenAI) -> None: + input_token = client.responses.input_tokens.count( + conversation="string", + input="string", + instructions="instructions", + model="model", + parallel_tool_calls=True, + previous_response_id="resp_123", + reasoning={ + "effort": "minimal", + "generate_summary": "auto", + "summary": "auto", + }, + text={ + "format": {"type": "text"}, + "verbosity": "low", + }, + tool_choice="none", + tools=[ + { + "name": "name", + "parameters": {"foo": "bar"}, + "strict": True, + "type": "function", + "description": "description", + } + ], + truncation="auto", + ) + assert_matches_type(InputTokenCountResponse, input_token, path=["response"]) + + @parametrize + def test_raw_response_count(self, client: OpenAI) -> None: + response = client.responses.input_tokens.with_raw_response.count() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + input_token = response.parse() + assert_matches_type(InputTokenCountResponse, input_token, path=["response"]) + + @parametrize + def test_streaming_response_count(self, client: OpenAI) -> None: + with client.responses.input_tokens.with_streaming_response.count() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + input_token = response.parse() + assert_matches_type(InputTokenCountResponse, input_token, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncInputTokens: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @parametrize + async def test_method_count(self, async_client: AsyncOpenAI) -> None: + input_token = await async_client.responses.input_tokens.count() + assert_matches_type(InputTokenCountResponse, input_token, path=["response"]) + + @parametrize + async def test_method_count_with_all_params(self, async_client: AsyncOpenAI) -> None: + input_token = await async_client.responses.input_tokens.count( + conversation="string", + input="string", + instructions="instructions", + model="model", + parallel_tool_calls=True, + previous_response_id="resp_123", + reasoning={ + "effort": "minimal", + "generate_summary": "auto", + "summary": "auto", + }, + text={ + "format": {"type": "text"}, + "verbosity": "low", + }, + tool_choice="none", + tools=[ + { + "name": "name", + "parameters": {"foo": "bar"}, + "strict": True, + "type": "function", + "description": "description", + } + ], + truncation="auto", + ) + assert_matches_type(InputTokenCountResponse, input_token, path=["response"]) + + @parametrize + async def test_raw_response_count(self, async_client: AsyncOpenAI) -> None: + response = await async_client.responses.input_tokens.with_raw_response.count() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + input_token = response.parse() + assert_matches_type(InputTokenCountResponse, input_token, path=["response"]) + + @parametrize + async def test_streaming_response_count(self, async_client: AsyncOpenAI) -> None: + async with async_client.responses.input_tokens.with_streaming_response.count() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + input_token = await response.parse() + assert_matches_type(InputTokenCountResponse, input_token, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 0cc20e926b..a329aa4d9e 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -30,7 +30,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: response = client.responses.create( background=True, conversation="string", - include=["code_interpreter_call.outputs"], + include=["file_search_call.results"], input="string", instructions="instructions", max_output_tokens=0, @@ -110,7 +110,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: stream=True, background=True, conversation="string", - include=["code_interpreter_call.outputs"], + include=["file_search_call.results"], input="string", instructions="instructions", max_output_tokens=0, @@ -190,7 +190,7 @@ def test_method_retrieve_overload_1(self, client: OpenAI) -> None: def test_method_retrieve_with_all_params_overload_1(self, client: OpenAI) -> None: response = client.responses.retrieve( response_id="resp_677efb5139a88190b512bc3fef8e535d", - include=["code_interpreter_call.outputs"], + include=["file_search_call.results"], include_obfuscation=True, starting_after=0, stream=False, @@ -241,7 +241,7 @@ def test_method_retrieve_with_all_params_overload_2(self, client: OpenAI) -> Non response_stream = client.responses.retrieve( response_id="resp_677efb5139a88190b512bc3fef8e535d", stream=True, - include=["code_interpreter_call.outputs"], + include=["file_search_call.results"], include_obfuscation=True, starting_after=0, ) @@ -383,7 +383,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn response = await async_client.responses.create( background=True, conversation="string", - include=["code_interpreter_call.outputs"], + include=["file_search_call.results"], input="string", instructions="instructions", max_output_tokens=0, @@ -463,7 +463,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn stream=True, background=True, conversation="string", - include=["code_interpreter_call.outputs"], + include=["file_search_call.results"], input="string", instructions="instructions", max_output_tokens=0, @@ -543,7 +543,7 @@ async def test_method_retrieve_overload_1(self, async_client: AsyncOpenAI) -> No async def test_method_retrieve_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None: response = await async_client.responses.retrieve( response_id="resp_677efb5139a88190b512bc3fef8e535d", - include=["code_interpreter_call.outputs"], + include=["file_search_call.results"], include_obfuscation=True, starting_after=0, stream=False, @@ -594,7 +594,7 @@ async def test_method_retrieve_with_all_params_overload_2(self, async_client: As response_stream = await async_client.responses.retrieve( response_id="resp_677efb5139a88190b512bc3fef8e535d", stream=True, - include=["code_interpreter_call.outputs"], + include=["file_search_call.results"], include_obfuscation=True, starting_after=0, ) diff --git a/tests/api_resources/test_vector_stores.py b/tests/api_resources/test_vector_stores.py index dffd2b1d07..cce9c52cea 100644 --- a/tests/api_resources/test_vector_stores.py +++ b/tests/api_resources/test_vector_stores.py @@ -31,6 +31,7 @@ def test_method_create(self, client: OpenAI) -> None: def test_method_create_with_all_params(self, client: OpenAI) -> None: vector_store = client.vector_stores.create( chunking_strategy={"type": "auto"}, + description="description", expires_after={ "anchor": "last_active_at", "days": 1, @@ -299,6 +300,7 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: vector_store = await async_client.vector_stores.create( chunking_strategy={"type": "auto"}, + description="description", expires_after={ "anchor": "last_active_at", "days": 1, diff --git a/tests/api_resources/test_videos.py b/tests/api_resources/test_videos.py new file mode 100644 index 0000000000..623cfc2153 --- /dev/null +++ b/tests/api_resources/test_videos.py @@ -0,0 +1,551 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import httpx +import pytest +from respx import MockRouter + +import openai._legacy_response as _legacy_response +from openai import OpenAI, AsyncOpenAI +from tests.utils import assert_matches_type +from openai.types import ( + Video, + VideoDeleteResponse, +) +from openai._utils import assert_signatures_in_sync +from openai.pagination import SyncConversationCursorPage, AsyncConversationCursorPage + +# pyright: reportDeprecated=false + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestVideos: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + def test_method_create(self, client: OpenAI) -> None: + video = client.videos.create( + prompt="x", + ) + assert_matches_type(Video, video, path=["response"]) + + @parametrize + def test_method_create_with_all_params(self, client: OpenAI) -> None: + video = client.videos.create( + prompt="x", + input_reference=b"raw file contents", + model="sora-2", + seconds="4", + size="720x1280", + ) + assert_matches_type(Video, video, path=["response"]) + + @parametrize + def test_raw_response_create(self, client: OpenAI) -> None: + response = client.videos.with_raw_response.create( + prompt="x", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + video = response.parse() + assert_matches_type(Video, video, path=["response"]) + + @parametrize + def test_streaming_response_create(self, client: OpenAI) -> None: + with client.videos.with_streaming_response.create( + prompt="x", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + video = response.parse() + assert_matches_type(Video, video, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_method_retrieve(self, client: OpenAI) -> None: + video = client.videos.retrieve( + "video_123", + ) + assert_matches_type(Video, video, path=["response"]) + + @parametrize + def test_raw_response_retrieve(self, client: OpenAI) -> None: + response = client.videos.with_raw_response.retrieve( + "video_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + video = response.parse() + assert_matches_type(Video, video, path=["response"]) + + @parametrize + def test_streaming_response_retrieve(self, client: OpenAI) -> None: + with client.videos.with_streaming_response.retrieve( + "video_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + video = response.parse() + assert_matches_type(Video, video, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_retrieve(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"): + client.videos.with_raw_response.retrieve( + "", + ) + + @parametrize + def test_method_list(self, client: OpenAI) -> None: + video = client.videos.list() + assert_matches_type(SyncConversationCursorPage[Video], video, path=["response"]) + + @parametrize + def test_method_list_with_all_params(self, client: OpenAI) -> None: + video = client.videos.list( + after="after", + limit=0, + order="asc", + ) + assert_matches_type(SyncConversationCursorPage[Video], video, path=["response"]) + + @parametrize + def test_raw_response_list(self, client: OpenAI) -> None: + response = client.videos.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + video = response.parse() + assert_matches_type(SyncConversationCursorPage[Video], video, path=["response"]) + + @parametrize + def test_streaming_response_list(self, client: OpenAI) -> None: + with client.videos.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + video = response.parse() + assert_matches_type(SyncConversationCursorPage[Video], video, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_method_delete(self, client: OpenAI) -> None: + video = client.videos.delete( + "video_123", + ) + assert_matches_type(VideoDeleteResponse, video, path=["response"]) + + @parametrize + def test_raw_response_delete(self, client: OpenAI) -> None: + response = client.videos.with_raw_response.delete( + "video_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + video = response.parse() + assert_matches_type(VideoDeleteResponse, video, path=["response"]) + + @parametrize + def test_streaming_response_delete(self, client: OpenAI) -> None: + with client.videos.with_streaming_response.delete( + "video_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + video = response.parse() + assert_matches_type(VideoDeleteResponse, video, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_delete(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"): + client.videos.with_raw_response.delete( + "", + ) + + @parametrize + @pytest.mark.respx(base_url=base_url) + def test_method_download_content(self, client: OpenAI, respx_mock: MockRouter) -> None: + respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + video = client.videos.download_content( + video_id="video_123", + ) + assert isinstance(video, _legacy_response.HttpxBinaryResponseContent) + assert video.json() == {"foo": "bar"} + + @parametrize + @pytest.mark.respx(base_url=base_url) + def test_method_download_content_with_all_params(self, client: OpenAI, respx_mock: MockRouter) -> None: + respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + video = client.videos.download_content( + video_id="video_123", + variant="video", + ) + assert isinstance(video, _legacy_response.HttpxBinaryResponseContent) + assert video.json() == {"foo": "bar"} + + @parametrize + @pytest.mark.respx(base_url=base_url) + def test_raw_response_download_content(self, client: OpenAI, respx_mock: MockRouter) -> None: + respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + response = client.videos.with_raw_response.download_content( + video_id="video_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + video = response.parse() + assert_matches_type(_legacy_response.HttpxBinaryResponseContent, video, path=["response"]) + + @parametrize + @pytest.mark.respx(base_url=base_url) + def test_streaming_response_download_content(self, client: OpenAI, respx_mock: MockRouter) -> None: + respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + with client.videos.with_streaming_response.download_content( + video_id="video_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + video = response.parse() + assert_matches_type(bytes, video, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + @pytest.mark.respx(base_url=base_url) + def test_path_params_download_content(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"): + client.videos.with_raw_response.download_content( + video_id="", + ) + + @parametrize + def test_method_remix(self, client: OpenAI) -> None: + video = client.videos.remix( + video_id="video_123", + prompt="x", + ) + assert_matches_type(Video, video, path=["response"]) + + @parametrize + def test_raw_response_remix(self, client: OpenAI) -> None: + response = client.videos.with_raw_response.remix( + video_id="video_123", + prompt="x", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + video = response.parse() + assert_matches_type(Video, video, path=["response"]) + + @parametrize + def test_streaming_response_remix(self, client: OpenAI) -> None: + with client.videos.with_streaming_response.remix( + video_id="video_123", + prompt="x", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + video = response.parse() + assert_matches_type(Video, video, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_remix(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"): + client.videos.with_raw_response.remix( + video_id="", + prompt="x", + ) + + +class TestAsyncVideos: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @parametrize + async def test_method_create(self, async_client: AsyncOpenAI) -> None: + video = await async_client.videos.create( + prompt="x", + ) + assert_matches_type(Video, video, path=["response"]) + + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: + video = await async_client.videos.create( + prompt="x", + input_reference=b"raw file contents", + model="sora-2", + seconds="4", + size="720x1280", + ) + assert_matches_type(Video, video, path=["response"]) + + @parametrize + async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: + response = await async_client.videos.with_raw_response.create( + prompt="x", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + video = response.parse() + assert_matches_type(Video, video, path=["response"]) + + @parametrize + async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: + async with async_client.videos.with_streaming_response.create( + prompt="x", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + video = await response.parse() + assert_matches_type(Video, video, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: + video = await async_client.videos.retrieve( + "video_123", + ) + assert_matches_type(Video, video, path=["response"]) + + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: + response = await async_client.videos.with_raw_response.retrieve( + "video_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + video = response.parse() + assert_matches_type(Video, video, path=["response"]) + + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: + async with async_client.videos.with_streaming_response.retrieve( + "video_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + video = await response.parse() + assert_matches_type(Video, video, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"): + await async_client.videos.with_raw_response.retrieve( + "", + ) + + @parametrize + async def test_method_list(self, async_client: AsyncOpenAI) -> None: + video = await async_client.videos.list() + assert_matches_type(AsyncConversationCursorPage[Video], video, path=["response"]) + + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: + video = await async_client.videos.list( + after="after", + limit=0, + order="asc", + ) + assert_matches_type(AsyncConversationCursorPage[Video], video, path=["response"]) + + @parametrize + async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: + response = await async_client.videos.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + video = response.parse() + assert_matches_type(AsyncConversationCursorPage[Video], video, path=["response"]) + + @parametrize + async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: + async with async_client.videos.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + video = await response.parse() + assert_matches_type(AsyncConversationCursorPage[Video], video, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_method_delete(self, async_client: AsyncOpenAI) -> None: + video = await async_client.videos.delete( + "video_123", + ) + assert_matches_type(VideoDeleteResponse, video, path=["response"]) + + @parametrize + async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: + response = await async_client.videos.with_raw_response.delete( + "video_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + video = response.parse() + assert_matches_type(VideoDeleteResponse, video, path=["response"]) + + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: + async with async_client.videos.with_streaming_response.delete( + "video_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + video = await response.parse() + assert_matches_type(VideoDeleteResponse, video, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"): + await async_client.videos.with_raw_response.delete( + "", + ) + + @parametrize + @pytest.mark.respx(base_url=base_url) + async def test_method_download_content(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: + respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + video = await async_client.videos.download_content( + video_id="video_123", + ) + assert isinstance(video, _legacy_response.HttpxBinaryResponseContent) + assert video.json() == {"foo": "bar"} + + @parametrize + @pytest.mark.respx(base_url=base_url) + async def test_method_download_content_with_all_params( + self, async_client: AsyncOpenAI, respx_mock: MockRouter + ) -> None: + respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + video = await async_client.videos.download_content( + video_id="video_123", + variant="video", + ) + assert isinstance(video, _legacy_response.HttpxBinaryResponseContent) + assert video.json() == {"foo": "bar"} + + @parametrize + @pytest.mark.respx(base_url=base_url) + async def test_raw_response_download_content(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: + respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + response = await async_client.videos.with_raw_response.download_content( + video_id="video_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + video = response.parse() + assert_matches_type(_legacy_response.HttpxBinaryResponseContent, video, path=["response"]) + + @parametrize + @pytest.mark.respx(base_url=base_url) + async def test_streaming_response_download_content(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: + respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + async with async_client.videos.with_streaming_response.download_content( + video_id="video_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + video = await response.parse() + assert_matches_type(bytes, video, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + @pytest.mark.respx(base_url=base_url) + async def test_path_params_download_content(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"): + await async_client.videos.with_raw_response.download_content( + video_id="", + ) + + @parametrize + async def test_method_remix(self, async_client: AsyncOpenAI) -> None: + video = await async_client.videos.remix( + video_id="video_123", + prompt="x", + ) + assert_matches_type(Video, video, path=["response"]) + + @parametrize + async def test_raw_response_remix(self, async_client: AsyncOpenAI) -> None: + response = await async_client.videos.with_raw_response.remix( + video_id="video_123", + prompt="x", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + video = response.parse() + assert_matches_type(Video, video, path=["response"]) + + @parametrize + async def test_streaming_response_remix(self, async_client: AsyncOpenAI) -> None: + async with async_client.videos.with_streaming_response.remix( + video_id="video_123", + prompt="x", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + video = await response.parse() + assert_matches_type(Video, video, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_remix(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"): + await async_client.videos.with_raw_response.remix( + video_id="", + prompt="x", + ) + + +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +def test_create_and_poll_method_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None: + checking_client: OpenAI | AsyncOpenAI = client if sync else async_client + + assert_signatures_in_sync( + checking_client.videos.create, + checking_client.videos.create_and_poll, + exclude_params={"extra_headers", "extra_query", "extra_body", "timeout"}, + ) diff --git a/tests/lib/test_audio.py b/tests/lib/test_audio.py index ff8dba4714..93ed3a33b2 100644 --- a/tests/lib/test_audio.py +++ b/tests/lib/test_audio.py @@ -44,7 +44,8 @@ def test_translation_create_overloads_in_sync(sync: bool, client: OpenAI, async_ elif is_literal_type(typ): overload_response_formats.update(get_args(typ)) - src_response_formats: set[str] = set(get_args(AudioResponseFormat)) + # 'diarized_json' applies only to transcriptions, not translations. + src_response_formats: set[str] = set(get_args(AudioResponseFormat)) - {"diarized_json"} diff = src_response_formats.difference(overload_response_formats) assert len(diff) == 0, f"some response format options don't have overloads" @@ -57,18 +58,27 @@ def test_transcription_create_overloads_in_sync(sync: bool, client: OpenAI, asyn overload_response_formats: set[str] = set() for i, overload in enumerate(typing_extensions.get_overloads(fn)): - assert_signatures_in_sync( - fn, - overload, - exclude_params={"response_format", "stream"}, - description=f" for overload {i}", - ) - sig = inspect.signature(overload) typ = evaluate_forwardref( sig.parameters["response_format"].annotation, globalns=sys.modules[fn.__module__].__dict__, ) + + exclude_params = {"response_format", "stream"} + # known_speaker_names and known_speaker_references are only supported by diarized_json + if not (is_literal_type(typ) and set(get_args(typ)) == {"diarized_json"}): + exclude_params.update({"known_speaker_names", "known_speaker_references"}) + + # diarized_json does not support these parameters + if is_literal_type(typ) and set(get_args(typ)) == {"diarized_json"}: + exclude_params.update({"include", "prompt", "timestamp_granularities"}) + + assert_signatures_in_sync( + fn, + overload, + exclude_params=exclude_params, + description=f" for overload {i}", + ) if is_union_type(typ): for arg in get_args(typ): if not is_literal_type(arg): diff --git a/tests/test_transform.py b/tests/test_transform.py index 036cfdfb06..bece75dfc7 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -8,7 +8,7 @@ import pytest -from openai._types import NOT_GIVEN, Base64FileInput +from openai._types import Base64FileInput, omit, not_given from openai._utils import ( PropertyInfo, transform as _transform, @@ -450,4 +450,11 @@ async def test_transform_skipping(use_async: bool) -> None: @pytest.mark.asyncio async def test_strips_notgiven(use_async: bool) -> None: assert await transform({"foo_bar": "bar"}, Foo1, use_async) == {"fooBar": "bar"} - assert await transform({"foo_bar": NOT_GIVEN}, Foo1, use_async) == {} + assert await transform({"foo_bar": not_given}, Foo1, use_async) == {} + + +@parametrize +@pytest.mark.asyncio +async def test_strips_omit(use_async: bool) -> None: + assert await transform({"foo_bar": "bar"}, Foo1, use_async) == {"fooBar": "bar"} + assert await transform({"foo_bar": omit}, Foo1, use_async) == {}