diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 986390db98..28c811f943 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.36.0" + ".": "2.37.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 9b6dc7e58b..38a36fd922 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 233 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai/openai-371f497afe4d6070f6e252e5febbe8f453c7058a8dff0c26a01b4d88442a4ac2.yml -openapi_spec_hash: d39f46e8fda45f77096448105efd175a +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai/openai-50d816559ef0935e64d07789ff936a2b762e26ab0714a2fa6bc06d06d4484294.yml +openapi_spec_hash: c5d8f37edbf66c1fef627d787b4c54fd config_hash: b64135fff1fe9cf4069b9ecf59ae8b07 diff --git a/CHANGELOG.md b/CHANGELOG.md index af067330f6..02769287df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 2.37.0 (2026-05-11) + +Full Changelog: [v2.36.0...v2.37.0](https://github.com/openai/openai-python/compare/v2.36.0...v2.37.0) + +### Features + +* **api:** add service_tier parameter to responses compact method ([625827c](https://github.com/openai/openai-python/commit/625827c5509ece3c40e5002be37a9bd9d91b5374)) +* **internal/types:** support eagerly validating pydantic iterators ([7e527bc](https://github.com/openai/openai-python/commit/7e527bc927cc58b74d7619abf7f1fbcfff8bddfa)) + + +### Bug Fixes + +* **client:** add missing f-string prefix in file type error message ([c85ebd9](https://github.com/openai/openai-python/commit/c85ebd935cb4b80e7e97ce255437684f6411fb00)) + ## 2.36.0 (2026-05-07) Full Changelog: [v2.35.1...v2.36.0](https://github.com/openai/openai-python/compare/v2.35.1...v2.36.0) diff --git a/pyproject.toml b/pyproject.toml index ec1af48c8b..452ac3125a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "2.36.0" +version = "2.37.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_files.py b/src/openai/_files.py index 4cc4f35d8f..1a2cc77478 100644 --- a/src/openai/_files.py +++ b/src/openai/_files.py @@ -99,7 +99,7 @@ async def async_to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles elif is_sequence_t(files): files = [(key, await _async_transform_file(file)) for key, file in files] else: - raise TypeError("Unexpected file type input {type(files)}, expected mapping or sequence") + raise TypeError(f"Unexpected file type input {type(files)}, expected mapping or sequence") return files diff --git a/src/openai/_models.py b/src/openai/_models.py index 5f12232437..ed4c1f82d6 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -27,7 +27,9 @@ Protocol, Required, Sequence, + Annotated, ParamSpec, + TypeAlias, TypedDict, TypeGuard, final, @@ -81,7 +83,15 @@ from ._constants import RAW_RESPONSE_HEADER if TYPE_CHECKING: + from pydantic import GetCoreSchemaHandler, ValidatorFunctionWrapHandler + from pydantic_core import CoreSchema, core_schema from pydantic_core.core_schema import ModelField, ModelSchema, LiteralSchema, ModelFieldsSchema +else: + try: + from pydantic_core import CoreSchema, core_schema + except ImportError: + CoreSchema = None + core_schema = None __all__ = ["BaseModel", "GenericModel"] @@ -422,6 +432,76 @@ def model_dump_json( ) +class _EagerIterable(list[_T], Generic[_T]): + """ + Accepts any Iterable[T] input (including generators), consumes it + eagerly, and validates all items upfront. + + Validation preserves the original container type where possible + (e.g. a set[T] stays a set[T]). Serialization (model_dump / JSON) + always emits a list — round-tripping through model_dump() will not + restore the original container type. + """ + + @classmethod + def __get_pydantic_core_schema__( + cls, + source_type: Any, + handler: GetCoreSchemaHandler, + ) -> CoreSchema: + (item_type,) = get_args(source_type) or (Any,) + item_schema: CoreSchema = handler.generate_schema(item_type) + list_of_items_schema: CoreSchema = core_schema.list_schema(item_schema) + + return core_schema.no_info_wrap_validator_function( + cls._validate, + list_of_items_schema, + serialization=core_schema.plain_serializer_function_ser_schema( + cls._serialize, + info_arg=False, + ), + ) + + @staticmethod + def _validate(v: Iterable[_T], handler: "ValidatorFunctionWrapHandler") -> Any: + original_type: type[Any] = type(v) + + # Normalize to list so list_schema can validate each item + if isinstance(v, list): + items: list[_T] = v + else: + try: + items = list(v) + except TypeError as e: + raise TypeError("Value is not iterable") from e + + # Validate items against the inner schema + validated: list[_T] = handler(items) + + # Reconstruct original container type + if original_type is list: + return validated + # str(list) produces the list's repr, not a string built from items, + # so skip reconstruction for str and its subclasses. + if issubclass(original_type, str): + return validated + try: + return original_type(validated) + except (TypeError, ValueError): + # If the type cannot be reconstructed, just return the validated list + return validated + + @staticmethod + def _serialize(v: Iterable[_T]) -> list[_T]: + """Always serialize as a list so Pydantic's JSON encoder is happy.""" + if isinstance(v, list): + return v + return list(v) + + +EagerIterable: TypeAlias = Annotated[Iterable[_T], _EagerIterable] + + def _construct_field(value: object, field: FieldInfo, key: str) -> object: if value is None: return field_get_default(field) diff --git a/src/openai/_version.py b/src/openai/_version.py index a6435eede3..43d6a12d19 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "2.36.0" # x-release-please-version +__version__ = "2.37.0" # x-release-please-version diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index 4b8bd9af21..e83f9824be 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -1704,6 +1704,7 @@ def compact( previous_response_id: Optional[str] | Omit = omit, prompt_cache_key: Optional[str] | Omit = omit, prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1743,6 +1744,8 @@ def compact( prompt_cache_retention: How long to retain a prompt cache entry created by this request. + service_tier: The service tier to use for this request. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -1761,6 +1764,7 @@ def compact( "previous_response_id": previous_response_id, "prompt_cache_key": prompt_cache_key, "prompt_cache_retention": prompt_cache_retention, + "service_tier": service_tier, }, response_compact_params.ResponseCompactParams, ), @@ -3410,6 +3414,7 @@ async def compact( previous_response_id: Optional[str] | Omit = omit, prompt_cache_key: Optional[str] | Omit = omit, prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, + service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -3449,6 +3454,8 @@ async def compact( prompt_cache_retention: How long to retain a prompt cache entry created by this request. + service_tier: The service tier to use for this request. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -3467,6 +3474,7 @@ async def compact( "previous_response_id": previous_response_id, "prompt_cache_key": prompt_cache_key, "prompt_cache_retention": prompt_cache_retention, + "service_tier": service_tier, }, response_compact_params.ResponseCompactParams, ), diff --git a/src/openai/types/responses/response_compact_params.py b/src/openai/types/responses/response_compact_params.py index 2575438b34..923a09e56d 100644 --- a/src/openai/types/responses/response_compact_params.py +++ b/src/openai/types/responses/response_compact_params.py @@ -143,3 +143,6 @@ class ResponseCompactParams(TypedDict, total=False): prompt_cache_retention: Optional[Literal["in_memory", "24h"]] """How long to retain a prompt cache entry created by this request.""" + + service_tier: Optional[Literal["auto", "default", "flex", "priority"]] + """The service tier to use for this request.""" diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 40405f61b2..094687c2c6 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -389,6 +389,7 @@ def test_method_compact_with_all_params(self, client: OpenAI) -> None: previous_response_id="resp_123", prompt_cache_key="prompt_cache_key", prompt_cache_retention="in_memory", + service_tier="auto", ) assert_matches_type(CompactedResponse, response, path=["response"]) @@ -801,6 +802,7 @@ async def test_method_compact_with_all_params(self, async_client: AsyncOpenAI) - previous_response_id="resp_123", prompt_cache_key="prompt_cache_key", prompt_cache_retention="in_memory", + service_tier="auto", ) assert_matches_type(CompactedResponse, response, path=["response"]) diff --git a/tests/test_models.py b/tests/test_models.py index 588869ee35..cc204bac1d 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -1,7 +1,8 @@ import json -from typing import TYPE_CHECKING, Any, Dict, List, Union, Optional, cast +from typing import TYPE_CHECKING, Any, Dict, List, Union, Iterable, Optional, cast from datetime import datetime, timezone -from typing_extensions import Literal, Annotated, TypeAliasType +from collections import deque +from typing_extensions import Literal, Annotated, TypedDict, TypeAliasType import pytest import pydantic @@ -9,7 +10,7 @@ from openai._utils import PropertyInfo from openai._compat import PYDANTIC_V1, parse_obj, model_dump, model_json -from openai._models import DISCRIMINATOR_CACHE, BaseModel, construct_type +from openai._models import DISCRIMINATOR_CACHE, BaseModel, EagerIterable, construct_type class BasicModel(BaseModel): @@ -961,3 +962,56 @@ def __getattr__(self, attr: str) -> Item: ... assert model.a.prop == 1 assert isinstance(model.a, Item) assert model.other == "foo" + + +# NOTE: Workaround for Pydantic Iterable behavior. +# Iterable fields are replaced with a ValidatorIterator and may be consumed +# during serialization, which can cause subsequent dumps to return empty data. +# See: https://github.com/pydantic/pydantic/issues/9541 +@pytest.mark.parametrize( + "data, expected_validated", + [ + ([1, 2, 3], [1, 2, 3]), + ((1, 2, 3), (1, 2, 3)), + (set([1, 2, 3]), set([1, 2, 3])), + (iter([1, 2, 3]), [1, 2, 3]), + ([], []), + ((x for x in [1, 2, 3]), [1, 2, 3]), + (map(lambda x: x, [1, 2, 3]), [1, 2, 3]), + (frozenset([1, 2, 3]), frozenset([1, 2, 3])), + (deque([1, 2, 3]), deque([1, 2, 3])), + ], + ids=["list", "tuple", "set", "iterator", "empty", "generator", "map", "frozenset", "deque"], +) +@pytest.mark.skipif(PYDANTIC_V1, reason="this is only supported in pydantic v2") +def test_iterable_construction(data: Iterable[int], expected_validated: Iterable[int]) -> None: + class TypeWithIterable(TypedDict): + items: EagerIterable[int] + + class Model(BaseModel): + data: TypeWithIterable + + m = Model.model_validate({"data": {"items": data}}) + assert m.data["items"] == expected_validated + + # Verify repeated dumps don't lose data (the original bug) + assert m.model_dump()["data"]["items"] == list(expected_validated) + assert m.model_dump()["data"]["items"] == list(expected_validated) + + +@pytest.mark.skipif(PYDANTIC_V1, reason="this is only supported in pydantic v2") +def test_iterable_construction_str_falls_back_to_list() -> None: + # str is iterable (over chars), but str(list_of_chars) produces the list's repr + # rather than reconstructing a string from items. We special-case str to fall + # back to list instead of attempting reconstruction. + class TypeWithIterable(TypedDict): + items: EagerIterable[str] + + class Model(BaseModel): + data: TypeWithIterable + + m = Model.model_validate({"data": {"items": "hello"}}) + + # falls back to list of chars rather than calling str(["h", "e", "l", "l", "o"]) + assert m.data["items"] == ["h", "e", "l", "l", "o"] + assert m.model_dump()["data"]["items"] == ["h", "e", "l", "l", "o"]