Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "2.36.0"
".": "2.37.0"
}
4 changes: 2 additions & 2 deletions .stats.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
configured_endpoints: 233
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai/openai-371f497afe4d6070f6e252e5febbe8f453c7058a8dff0c26a01b4d88442a4ac2.yml
openapi_spec_hash: d39f46e8fda45f77096448105efd175a
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai/openai-50d816559ef0935e64d07789ff936a2b762e26ab0714a2fa6bc06d06d4484294.yml
openapi_spec_hash: c5d8f37edbf66c1fef627d787b4c54fd
config_hash: b64135fff1fe9cf4069b9ecf59ae8b07
14 changes: 14 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,19 @@
# Changelog

## 2.37.0 (2026-05-11)

Full Changelog: [v2.36.0...v2.37.0](https://github.com/openai/openai-python/compare/v2.36.0...v2.37.0)

### Features

* **api:** add service_tier parameter to responses compact method ([625827c](https://github.com/openai/openai-python/commit/625827c5509ece3c40e5002be37a9bd9d91b5374))
* **internal/types:** support eagerly validating pydantic iterators ([7e527bc](https://github.com/openai/openai-python/commit/7e527bc927cc58b74d7619abf7f1fbcfff8bddfa))


### Bug Fixes

* **client:** add missing f-string prefix in file type error message ([c85ebd9](https://github.com/openai/openai-python/commit/c85ebd935cb4b80e7e97ce255437684f6411fb00))

## 2.36.0 (2026-05-07)

Full Changelog: [v2.35.1...v2.36.0](https://github.com/openai/openai-python/compare/v2.35.1...v2.36.0)
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "openai"
version = "2.36.0"
version = "2.37.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
Expand Down
2 changes: 1 addition & 1 deletion src/openai/_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ async def async_to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles
elif is_sequence_t(files):
files = [(key, await _async_transform_file(file)) for key, file in files]
else:
raise TypeError("Unexpected file type input {type(files)}, expected mapping or sequence")
raise TypeError(f"Unexpected file type input {type(files)}, expected mapping or sequence")

return files

Expand Down
80 changes: 80 additions & 0 deletions src/openai/_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@
Protocol,
Required,
Sequence,
Annotated,
ParamSpec,
TypeAlias,
TypedDict,
TypeGuard,
final,
Expand Down Expand Up @@ -81,7 +83,15 @@
from ._constants import RAW_RESPONSE_HEADER

if TYPE_CHECKING:
from pydantic import GetCoreSchemaHandler, ValidatorFunctionWrapHandler
from pydantic_core import CoreSchema, core_schema
from pydantic_core.core_schema import ModelField, ModelSchema, LiteralSchema, ModelFieldsSchema
else:
try:
from pydantic_core import CoreSchema, core_schema
except ImportError:
CoreSchema = None
core_schema = None

__all__ = ["BaseModel", "GenericModel"]

Expand Down Expand Up @@ -422,6 +432,76 @@ def model_dump_json(
)


class _EagerIterable(list[_T], Generic[_T]):
"""
Accepts any Iterable[T] input (including generators), consumes it
eagerly, and validates all items upfront.

Validation preserves the original container type where possible
(e.g. a set[T] stays a set[T]). Serialization (model_dump / JSON)
always emits a list — round-tripping through model_dump() will not
restore the original container type.
"""

@classmethod
def __get_pydantic_core_schema__(
cls,
source_type: Any,
handler: GetCoreSchemaHandler,
) -> CoreSchema:
(item_type,) = get_args(source_type) or (Any,)
item_schema: CoreSchema = handler.generate_schema(item_type)
list_of_items_schema: CoreSchema = core_schema.list_schema(item_schema)

return core_schema.no_info_wrap_validator_function(
cls._validate,
list_of_items_schema,
serialization=core_schema.plain_serializer_function_ser_schema(
cls._serialize,
info_arg=False,
),
)

@staticmethod
def _validate(v: Iterable[_T], handler: "ValidatorFunctionWrapHandler") -> Any:
original_type: type[Any] = type(v)

# Normalize to list so list_schema can validate each item
if isinstance(v, list):
items: list[_T] = v
else:
try:
items = list(v)
except TypeError as e:
raise TypeError("Value is not iterable") from e

# Validate items against the inner schema
validated: list[_T] = handler(items)

# Reconstruct original container type
if original_type is list:
return validated
# str(list) produces the list's repr, not a string built from items,
# so skip reconstruction for str and its subclasses.
if issubclass(original_type, str):
return validated
try:
return original_type(validated)
except (TypeError, ValueError):
# If the type cannot be reconstructed, just return the validated list
return validated

@staticmethod
def _serialize(v: Iterable[_T]) -> list[_T]:
"""Always serialize as a list so Pydantic's JSON encoder is happy."""
if isinstance(v, list):
return v
return list(v)


EagerIterable: TypeAlias = Annotated[Iterable[_T], _EagerIterable]


def _construct_field(value: object, field: FieldInfo, key: str) -> object:
if value is None:
return field_get_default(field)
Expand Down
2 changes: 1 addition & 1 deletion src/openai/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

__title__ = "openai"
__version__ = "2.36.0" # x-release-please-version
__version__ = "2.37.0" # x-release-please-version
8 changes: 8 additions & 0 deletions src/openai/resources/responses/responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -1704,6 +1704,7 @@ def compact(
previous_response_id: Optional[str] | Omit = omit,
prompt_cache_key: Optional[str] | Omit = omit,
prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
Expand Down Expand Up @@ -1743,6 +1744,8 @@ def compact(

prompt_cache_retention: How long to retain a prompt cache entry created by this request.

service_tier: The service tier to use for this request.

extra_headers: Send extra headers

extra_query: Add additional query parameters to the request
Expand All @@ -1761,6 +1764,7 @@ def compact(
"previous_response_id": previous_response_id,
"prompt_cache_key": prompt_cache_key,
"prompt_cache_retention": prompt_cache_retention,
"service_tier": service_tier,
},
response_compact_params.ResponseCompactParams,
),
Expand Down Expand Up @@ -3410,6 +3414,7 @@ async def compact(
previous_response_id: Optional[str] | Omit = omit,
prompt_cache_key: Optional[str] | Omit = omit,
prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
Expand Down Expand Up @@ -3449,6 +3454,8 @@ async def compact(

prompt_cache_retention: How long to retain a prompt cache entry created by this request.

service_tier: The service tier to use for this request.

extra_headers: Send extra headers

extra_query: Add additional query parameters to the request
Expand All @@ -3467,6 +3474,7 @@ async def compact(
"previous_response_id": previous_response_id,
"prompt_cache_key": prompt_cache_key,
"prompt_cache_retention": prompt_cache_retention,
"service_tier": service_tier,
},
response_compact_params.ResponseCompactParams,
),
Expand Down
3 changes: 3 additions & 0 deletions src/openai/types/responses/response_compact_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,3 +143,6 @@ class ResponseCompactParams(TypedDict, total=False):

prompt_cache_retention: Optional[Literal["in_memory", "24h"]]
"""How long to retain a prompt cache entry created by this request."""

service_tier: Optional[Literal["auto", "default", "flex", "priority"]]
"""The service tier to use for this request."""
2 changes: 2 additions & 0 deletions tests/api_resources/test_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,6 +389,7 @@ def test_method_compact_with_all_params(self, client: OpenAI) -> None:
previous_response_id="resp_123",
prompt_cache_key="prompt_cache_key",
prompt_cache_retention="in_memory",
service_tier="auto",
)
assert_matches_type(CompactedResponse, response, path=["response"])

Expand Down Expand Up @@ -801,6 +802,7 @@ async def test_method_compact_with_all_params(self, async_client: AsyncOpenAI) -
previous_response_id="resp_123",
prompt_cache_key="prompt_cache_key",
prompt_cache_retention="in_memory",
service_tier="auto",
)
assert_matches_type(CompactedResponse, response, path=["response"])

Expand Down
60 changes: 57 additions & 3 deletions tests/test_models.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
import json
from typing import TYPE_CHECKING, Any, Dict, List, Union, Optional, cast
from typing import TYPE_CHECKING, Any, Dict, List, Union, Iterable, Optional, cast
from datetime import datetime, timezone
from typing_extensions import Literal, Annotated, TypeAliasType
from collections import deque
from typing_extensions import Literal, Annotated, TypedDict, TypeAliasType

import pytest
import pydantic
from pydantic import Field

from openai._utils import PropertyInfo
from openai._compat import PYDANTIC_V1, parse_obj, model_dump, model_json
from openai._models import DISCRIMINATOR_CACHE, BaseModel, construct_type
from openai._models import DISCRIMINATOR_CACHE, BaseModel, EagerIterable, construct_type


class BasicModel(BaseModel):
Expand Down Expand Up @@ -961,3 +962,56 @@ def __getattr__(self, attr: str) -> Item: ...
assert model.a.prop == 1
assert isinstance(model.a, Item)
assert model.other == "foo"


# NOTE: Workaround for Pydantic Iterable behavior.
# Iterable fields are replaced with a ValidatorIterator and may be consumed
# during serialization, which can cause subsequent dumps to return empty data.
# See: https://github.com/pydantic/pydantic/issues/9541
@pytest.mark.parametrize(
"data, expected_validated",
[
([1, 2, 3], [1, 2, 3]),
((1, 2, 3), (1, 2, 3)),
(set([1, 2, 3]), set([1, 2, 3])),
(iter([1, 2, 3]), [1, 2, 3]),
([], []),
((x for x in [1, 2, 3]), [1, 2, 3]),
(map(lambda x: x, [1, 2, 3]), [1, 2, 3]),
(frozenset([1, 2, 3]), frozenset([1, 2, 3])),
(deque([1, 2, 3]), deque([1, 2, 3])),
],
ids=["list", "tuple", "set", "iterator", "empty", "generator", "map", "frozenset", "deque"],
)
@pytest.mark.skipif(PYDANTIC_V1, reason="this is only supported in pydantic v2")
def test_iterable_construction(data: Iterable[int], expected_validated: Iterable[int]) -> None:
class TypeWithIterable(TypedDict):
items: EagerIterable[int]

class Model(BaseModel):
data: TypeWithIterable

m = Model.model_validate({"data": {"items": data}})
assert m.data["items"] == expected_validated

# Verify repeated dumps don't lose data (the original bug)
assert m.model_dump()["data"]["items"] == list(expected_validated)
assert m.model_dump()["data"]["items"] == list(expected_validated)


@pytest.mark.skipif(PYDANTIC_V1, reason="this is only supported in pydantic v2")
def test_iterable_construction_str_falls_back_to_list() -> None:
# str is iterable (over chars), but str(list_of_chars) produces the list's repr
# rather than reconstructing a string from items. We special-case str to fall
# back to list instead of attempting reconstruction.
class TypeWithIterable(TypedDict):
items: EagerIterable[str]

class Model(BaseModel):
data: TypeWithIterable

m = Model.model_validate({"data": {"items": "hello"}})

# falls back to list of chars rather than calling str(["h", "e", "l", "l", "o"])
assert m.data["items"] == ["h", "e", "l", "l", "o"]
assert m.model_dump()["data"]["items"] == ["h", "e", "l", "l", "o"]
Loading