Commit 2954945e

Robert Craigie <robert@craigie.dev>
2025-03-12 00:22:03
feat(api): add /v1/responses and built-in tools
[platform.openai.com/docs/changelog](http://platform.openai.com/docs/changelog)
1 parent 71f7354
Changed files (196)
examples
src
openai
lib
resources
types
beta
chat
responses
shared
shared_params
vector_stores
tests
examples/responses/__init__.py
examples/responses/streaming.py
@@ -0,0 +1,30 @@
+from typing import List
+
+import rich
+from pydantic import BaseModel
+
+from openai import OpenAI
+
+
+class Step(BaseModel):
+    explanation: str
+    output: str
+
+
+class MathResponse(BaseModel):
+    steps: List[Step]
+    final_answer: str
+
+
+client = OpenAI()
+
+with client.responses.stream(
+    input="solve 8x + 31 = 2",
+    model="gpt-4o-2024-08-06",
+    text_format=MathResponse,
+) as stream:
+    for event in stream:
+        if "output_text" in event.type:
+            rich.print(event)
+
+rich.print(stream.get_final_response())
examples/responses/streaming_tools.py
@@ -0,0 +1,68 @@
+from enum import Enum
+from typing import List, Union
+
+import rich
+from pydantic import BaseModel
+
+import openai
+from openai import OpenAI
+
+
+class Table(str, Enum):
+    orders = "orders"
+    customers = "customers"
+    products = "products"
+
+
+class Column(str, Enum):
+    id = "id"
+    status = "status"
+    expected_delivery_date = "expected_delivery_date"
+    delivered_at = "delivered_at"
+    shipped_at = "shipped_at"
+    ordered_at = "ordered_at"
+    canceled_at = "canceled_at"
+
+
+class Operator(str, Enum):
+    eq = "="
+    gt = ">"
+    lt = "<"
+    le = "<="
+    ge = ">="
+    ne = "!="
+
+
+class OrderBy(str, Enum):
+    asc = "asc"
+    desc = "desc"
+
+
+class DynamicValue(BaseModel):
+    column_name: str
+
+
+class Condition(BaseModel):
+    column: str
+    operator: Operator
+    value: Union[str, int, DynamicValue]
+
+
+class Query(BaseModel):
+    table_name: Table
+    columns: List[Column]
+    conditions: List[Condition]
+    order_by: OrderBy
+
+
+client = OpenAI()
+
+with client.responses.stream(
+    model="gpt-4o-2024-08-06",
+    input="look up all my orders in november of last year that were fulfilled but not delivered on time",
+    tools=[
+        openai.pydantic_function_tool(Query),
+    ],
+) as stream:
+    for event in stream:
+        rich.print(event)
examples/responses/structured_outputs.py
@@ -0,0 +1,55 @@
+from typing import List
+
+import rich
+from pydantic import BaseModel
+
+from openai import OpenAI
+
+
+class Step(BaseModel):
+    explanation: str
+    output: str
+
+
+class MathResponse(BaseModel):
+    steps: List[Step]
+    final_answer: str
+
+
+client = OpenAI()
+
+rsp = client.responses.parse(
+    input="solve 8x + 31 = 2",
+    model="gpt-4o-2024-08-06",
+    text_format=MathResponse,
+)
+
+for output in rsp.output:
+    if output.type != "message":
+        raise Exception("Unexpected non message")
+
+    for item in output.content:
+        if item.type != "output_text":
+            raise Exception("unexpected output type")
+
+        if not item.parsed:
+            raise Exception("Could not parse response")
+
+        rich.print(item.parsed)
+
+        print("answer: ", item.parsed.final_answer)
+
+# or
+
+message = rsp.output[0]
+assert message.type == "message"
+
+text = message.content[0]
+assert text.type == "output_text"
+
+if not text.parsed:
+    raise Exception("Could not parse response")
+
+rich.print(text.parsed)
+
+print("answer: ", text.parsed.final_answer)
examples/responses/structured_outputs_tools.py
@@ -0,0 +1,73 @@
+from enum import Enum
+from typing import List, Union
+
+import rich
+from pydantic import BaseModel
+
+import openai
+from openai import OpenAI
+
+
+class Table(str, Enum):
+    orders = "orders"
+    customers = "customers"
+    products = "products"
+
+
+class Column(str, Enum):
+    id = "id"
+    status = "status"
+    expected_delivery_date = "expected_delivery_date"
+    delivered_at = "delivered_at"
+    shipped_at = "shipped_at"
+    ordered_at = "ordered_at"
+    canceled_at = "canceled_at"
+
+
+class Operator(str, Enum):
+    eq = "="
+    gt = ">"
+    lt = "<"
+    le = "<="
+    ge = ">="
+    ne = "!="
+
+
+class OrderBy(str, Enum):
+    asc = "asc"
+    desc = "desc"
+
+
+class DynamicValue(BaseModel):
+    column_name: str
+
+
+class Condition(BaseModel):
+    column: str
+    operator: Operator
+    value: Union[str, int, DynamicValue]
+
+
+class Query(BaseModel):
+    table_name: Table
+    columns: List[Column]
+    conditions: List[Condition]
+    order_by: OrderBy
+
+
+client = OpenAI()
+
+response = client.responses.parse(
+    model="gpt-4o-2024-08-06",
+    input="look up all my orders in november of last year that were fulfilled but not delivered on time",
+    tools=[
+        openai.pydantic_function_tool(Query),
+    ],
+)
+
+rich.print(response)
+
+function_call = response.output[0]
+assert function_call.type == "function_call"
+assert isinstance(function_call.parsed_arguments, Query)
+print("table name:", function_call.parsed_arguments.table_name)
src/openai/lib/_parsing/_responses.py
@@ -0,0 +1,168 @@
+from __future__ import annotations
+
+import json
+from typing import TYPE_CHECKING, Any, List, Iterable, cast
+from typing_extensions import TypeVar, assert_never
+
+import pydantic
+
+from .._tools import ResponsesPydanticFunctionTool
+from ..._types import NotGiven
+from ..._utils import is_given
+from ..._compat import PYDANTIC_V2, model_parse_json
+from ..._models import construct_type_unchecked
+from .._pydantic import is_basemodel_type, is_dataclass_like_type
+from ._completions import solve_response_format_t, type_to_response_format_param
+from ...types.responses import (
+    Response,
+    ToolParam,
+    ParsedContent,
+    ParsedResponse,
+    FunctionToolParam,
+    ParsedResponseOutputItem,
+    ParsedResponseOutputText,
+    ResponseFunctionToolCall,
+    ParsedResponseOutputMessage,
+    ResponseFormatTextConfigParam,
+    ParsedResponseFunctionToolCall,
+)
+from ...types.chat.completion_create_params import ResponseFormat
+
+TextFormatT = TypeVar(
+    "TextFormatT",
+    # if it isn't given then we don't do any parsing
+    default=None,
+)
+
+
+def type_to_text_format_param(type_: type) -> ResponseFormatTextConfigParam:
+    response_format_dict = type_to_response_format_param(type_)
+    assert is_given(response_format_dict)
+    response_format_dict = cast(ResponseFormat, response_format_dict)  # pyright: ignore[reportUnnecessaryCast]
+    assert response_format_dict["type"] == "json_schema"
+    assert "schema" in response_format_dict["json_schema"]
+
+    return {
+        "type": "json_schema",
+        "strict": True,
+        "name": response_format_dict["json_schema"]["name"],
+        "schema": response_format_dict["json_schema"]["schema"],
+    }
+
+
+def parse_response(
+    *,
+    text_format: type[TextFormatT] | NotGiven,
+    input_tools: Iterable[ToolParam] | NotGiven | None,
+    response: Response | ParsedResponse[object],
+) -> ParsedResponse[TextFormatT]:
+    solved_t = solve_response_format_t(text_format)
+    output_list: List[ParsedResponseOutputItem[TextFormatT]] = []
+
+    for output in response.output:
+        if output.type == "message":
+            content_list: List[ParsedContent[TextFormatT]] = []
+            for item in output.content:
+                if item.type != "output_text":
+                    content_list.append(item)
+                    continue
+
+                content_list.append(
+                    construct_type_unchecked(
+                        type_=cast(Any, ParsedResponseOutputText)[solved_t],
+                        value={
+                            **item.to_dict(),
+                            "parsed": parse_text(item.text, text_format=text_format),
+                        },
+                    )
+                )
+
+            output_list.append(
+                construct_type_unchecked(
+                    type_=cast(Any, ParsedResponseOutputMessage)[solved_t],
+                    value={
+                        **output.to_dict(),
+                        "content": content_list,
+                    },
+                )
+            )
+        elif output.type == "function_call":
+            output_list.append(
+                construct_type_unchecked(
+                    type_=ParsedResponseFunctionToolCall,
+                    value={
+                        **output.to_dict(),
+                        "parsed_arguments": parse_function_tool_arguments(
+                            input_tools=input_tools, function_call=output
+                        ),
+                    },
+                )
+            )
+        elif (
+            output.type == "computer_call"
+            or output.type == "file_search_call"
+            or output.type == "web_search_call"
+            or output.type == "reasoning"
+        ):
+            output_list.append(output)
+        elif TYPE_CHECKING:  # type: ignore
+            assert_never(output)
+        else:
+            output_list.append(output)
+
+    return cast(
+        ParsedResponse[TextFormatT],
+        construct_type_unchecked(
+            type_=cast(Any, ParsedResponse)[solved_t],
+            value={
+                **response.to_dict(),
+                "output": output_list,
+            },
+        ),
+    )
+
+
+def parse_text(text: str, text_format: type[TextFormatT] | NotGiven) -> TextFormatT | None:
+    if not is_given(text_format):
+        return None
+
+    if is_basemodel_type(text_format):
+        return cast(TextFormatT, model_parse_json(text_format, text))
+
+    if is_dataclass_like_type(text_format):
+        if not PYDANTIC_V2:
+            raise TypeError(f"Non BaseModel types are only supported with Pydantic v2 - {text_format}")
+
+        return pydantic.TypeAdapter(text_format).validate_json(text)
+
+    raise TypeError(f"Unable to automatically parse response format type {text_format}")
+
+
+def get_input_tool_by_name(*, input_tools: Iterable[ToolParam], name: str) -> FunctionToolParam | None:
+    for tool in input_tools:
+        if tool["type"] == "function" and tool.get("name") == name:
+            return tool
+
+    return None
+
+
+def parse_function_tool_arguments(
+    *,
+    input_tools: Iterable[ToolParam] | NotGiven | None,
+    function_call: ParsedResponseFunctionToolCall | ResponseFunctionToolCall,
+) -> object:
+    if input_tools is None or not is_given(input_tools):
+        return None
+
+    input_tool = get_input_tool_by_name(input_tools=input_tools, name=function_call.name)
+    if not input_tool:
+        return None
+
+    tool = cast(object, input_tool)
+    if isinstance(tool, ResponsesPydanticFunctionTool):
+        return model_parse_json(tool.model, function_call.arguments)
+
+    if not input_tool.get("strict"):
+        return None
+
+    return json.loads(function_call.arguments)
src/openai/lib/streaming/responses/__init__.py
@@ -0,0 +1,13 @@
+from ._events import (
+    ResponseTextDoneEvent as ResponseTextDoneEvent,
+    ResponseTextDeltaEvent as ResponseTextDeltaEvent,
+    ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent,
+)
+from ._responses import (
+    ResponseStream as ResponseStream,
+    AsyncResponseStream as AsyncResponseStream,
+    ResponseStreamEvent as ResponseStreamEvent,
+    ResponseStreamState as ResponseStreamState,
+    ResponseStreamManager as ResponseStreamManager,
+    AsyncResponseStreamManager as AsyncResponseStreamManager,
+)
src/openai/lib/streaming/responses/_events.py
@@ -0,0 +1,106 @@
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Union, Generic, TypeVar, Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from ...._compat import GenericModel
+from ....types.responses import (
+    ParsedResponse,
+    ResponseErrorEvent,
+    ResponseFailedEvent,
+    ResponseCreatedEvent,
+    ResponseTextDoneEvent as RawResponseTextDoneEvent,
+    ResponseAudioDoneEvent,
+    ResponseCompletedEvent as RawResponseCompletedEvent,
+    ResponseTextDeltaEvent as RawResponseTextDeltaEvent,
+    ResponseAudioDeltaEvent,
+    ResponseIncompleteEvent,
+    ResponseInProgressEvent,
+    ResponseRefusalDoneEvent,
+    ResponseRefusalDeltaEvent,
+    ResponseOutputItemDoneEvent,
+    ResponseContentPartDoneEvent,
+    ResponseOutputItemAddedEvent,
+    ResponseContentPartAddedEvent,
+    ResponseAudioTranscriptDoneEvent,
+    ResponseTextAnnotationDeltaEvent,
+    ResponseAudioTranscriptDeltaEvent,
+    ResponseWebSearchCallCompletedEvent,
+    ResponseWebSearchCallSearchingEvent,
+    ResponseFileSearchCallCompletedEvent,
+    ResponseFileSearchCallSearchingEvent,
+    ResponseWebSearchCallInProgressEvent,
+    ResponseFileSearchCallInProgressEvent,
+    ResponseFunctionCallArgumentsDoneEvent,
+    ResponseFunctionCallArgumentsDeltaEvent as RawResponseFunctionCallArgumentsDeltaEvent,
+    ResponseCodeInterpreterCallCodeDoneEvent,
+    ResponseCodeInterpreterCallCodeDeltaEvent,
+    ResponseCodeInterpreterCallCompletedEvent,
+    ResponseCodeInterpreterCallInProgressEvent,
+    ResponseCodeInterpreterCallInterpretingEvent,
+)
+
+TextFormatT = TypeVar(
+    "TextFormatT",
+    # if it isn't given then we don't do any parsing
+    default=None,
+)
+
+
+class ResponseTextDeltaEvent(RawResponseTextDeltaEvent):
+    snapshot: str
+
+
+class ResponseTextDoneEvent(RawResponseTextDoneEvent, GenericModel, Generic[TextFormatT]):
+    parsed: Optional[TextFormatT] = None
+
+
+class ResponseFunctionCallArgumentsDeltaEvent(RawResponseFunctionCallArgumentsDeltaEvent):
+    snapshot: str
+
+
+class ResponseCompletedEvent(RawResponseCompletedEvent, GenericModel, Generic[TextFormatT]):
+    response: ParsedResponse[TextFormatT]  # type: ignore[assignment]
+
+
+ResponseStreamEvent: TypeAlias = Annotated[
+    Union[
+        # wrappers with snapshots added on
+        ResponseTextDeltaEvent,
+        ResponseTextDoneEvent[TextFormatT],
+        ResponseFunctionCallArgumentsDeltaEvent,
+        ResponseCompletedEvent[TextFormatT],
+        # the same as the non-accumulated API
+        ResponseAudioDeltaEvent,
+        ResponseAudioDoneEvent,
+        ResponseAudioTranscriptDeltaEvent,
+        ResponseAudioTranscriptDoneEvent,
+        ResponseCodeInterpreterCallCodeDeltaEvent,
+        ResponseCodeInterpreterCallCodeDoneEvent,
+        ResponseCodeInterpreterCallCompletedEvent,
+        ResponseCodeInterpreterCallInProgressEvent,
+        ResponseCodeInterpreterCallInterpretingEvent,
+        ResponseContentPartAddedEvent,
+        ResponseContentPartDoneEvent,
+        ResponseCreatedEvent,
+        ResponseErrorEvent,
+        ResponseFileSearchCallCompletedEvent,
+        ResponseFileSearchCallInProgressEvent,
+        ResponseFileSearchCallSearchingEvent,
+        ResponseFunctionCallArgumentsDoneEvent,
+        ResponseInProgressEvent,
+        ResponseFailedEvent,
+        ResponseIncompleteEvent,
+        ResponseOutputItemAddedEvent,
+        ResponseOutputItemDoneEvent,
+        ResponseRefusalDeltaEvent,
+        ResponseRefusalDoneEvent,
+        ResponseTextAnnotationDeltaEvent,
+        ResponseTextDoneEvent,
+        ResponseWebSearchCallCompletedEvent,
+        ResponseWebSearchCallInProgressEvent,
+        ResponseWebSearchCallSearchingEvent,
+    ],
+    PropertyInfo(discriminator="type"),
+]
src/openai/lib/streaming/responses/_responses.py
@@ -0,0 +1,354 @@
+from __future__ import annotations
+
+import inspect
+from types import TracebackType
+from typing import Any, List, Generic, Iterable, Awaitable, cast
+from typing_extensions import Self, Callable, Iterator, AsyncIterator
+
+from ._types import ParsedResponseSnapshot
+from ._events import (
+    ResponseStreamEvent,
+    ResponseTextDoneEvent,
+    ResponseCompletedEvent,
+    ResponseTextDeltaEvent,
+    ResponseFunctionCallArgumentsDeltaEvent,
+)
+from ...._types import NOT_GIVEN, NotGiven
+from ...._utils import is_given, consume_sync_iterator, consume_async_iterator
+from ...._models import build, construct_type_unchecked
+from ...._streaming import Stream, AsyncStream
+from ....types.responses import ParsedResponse, ResponseStreamEvent as RawResponseStreamEvent
+from ..._parsing._responses import TextFormatT, parse_text, parse_response
+from ....types.responses.tool_param import ToolParam
+from ....types.responses.parsed_response import (
+    ParsedContent,
+    ParsedResponseOutputMessage,
+    ParsedResponseFunctionToolCall,
+)
+
+
+class ResponseStream(Generic[TextFormatT]):
+    def __init__(
+        self,
+        *,
+        raw_stream: Stream[RawResponseStreamEvent],
+        text_format: type[TextFormatT] | NotGiven,
+        input_tools: Iterable[ToolParam] | NotGiven,
+    ) -> None:
+        self._raw_stream = raw_stream
+        self._response = raw_stream.response
+        self._iterator = self.__stream__()
+        self._state = ResponseStreamState(text_format=text_format, input_tools=input_tools)
+
+    def __next__(self) -> ResponseStreamEvent[TextFormatT]:
+        return self._iterator.__next__()
+
+    def __iter__(self) -> Iterator[ResponseStreamEvent[TextFormatT]]:
+        for item in self._iterator:
+            yield item
+
+    def __enter__(self) -> Self:
+        return self
+
+    def __stream__(self) -> Iterator[ResponseStreamEvent[TextFormatT]]:
+        for sse_event in self._raw_stream:
+            events_to_fire = self._state.handle_event(sse_event)
+            for event in events_to_fire:
+                yield event
+
+    def __exit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        self.close()
+
+    def close(self) -> None:
+        """
+        Close the response and release the connection.
+
+        Automatically called if the response body is read to completion.
+        """
+        self._response.close()
+
+    def get_final_response(self) -> ParsedResponse[TextFormatT]:
+        """Waits until the stream has been read to completion and returns
+        the accumulated `ParsedResponse` object.
+        """
+        self.until_done()
+        response = self._state._completed_response
+        if not response:
+            raise RuntimeError("Didn't receive a `response.completed` event.")
+
+        return response
+
+    def until_done(self) -> Self:
+        """Blocks until the stream has been consumed."""
+        consume_sync_iterator(self)
+        return self
+
+
+class ResponseStreamManager(Generic[TextFormatT]):
+    def __init__(
+        self,
+        api_request: Callable[[], Stream[RawResponseStreamEvent]],
+        *,
+        text_format: type[TextFormatT] | NotGiven,
+        input_tools: Iterable[ToolParam] | NotGiven,
+    ) -> None:
+        self.__stream: ResponseStream[TextFormatT] | None = None
+        self.__api_request = api_request
+        self.__text_format = text_format
+        self.__input_tools = input_tools
+
+    def __enter__(self) -> ResponseStream[TextFormatT]:
+        raw_stream = self.__api_request()
+
+        self.__stream = ResponseStream(
+            raw_stream=raw_stream,
+            text_format=self.__text_format,
+            input_tools=self.__input_tools,
+        )
+
+        return self.__stream
+
+    def __exit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        if self.__stream is not None:
+            self.__stream.close()
+
+
+class AsyncResponseStream(Generic[TextFormatT]):
+    def __init__(
+        self,
+        *,
+        raw_stream: AsyncStream[RawResponseStreamEvent],
+        text_format: type[TextFormatT] | NotGiven,
+        input_tools: Iterable[ToolParam] | NotGiven,
+    ) -> None:
+        self._raw_stream = raw_stream
+        self._response = raw_stream.response
+        self._iterator = self.__stream__()
+        self._state = ResponseStreamState(text_format=text_format, input_tools=input_tools)
+
+    async def __anext__(self) -> ResponseStreamEvent[TextFormatT]:
+        return await self._iterator.__anext__()
+
+    async def __aiter__(self) -> AsyncIterator[ResponseStreamEvent[TextFormatT]]:
+        async for item in self._iterator:
+            yield item
+
+    async def __stream__(self) -> AsyncIterator[ResponseStreamEvent[TextFormatT]]:
+        async for sse_event in self._raw_stream:
+            events_to_fire = self._state.handle_event(sse_event)
+            for event in events_to_fire:
+                yield event
+
+    async def __aenter__(self) -> Self:
+        return self
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        await self.close()
+
+    async def close(self) -> None:
+        """
+        Close the response and release the connection.
+
+        Automatically called if the response body is read to completion.
+        """
+        await self._response.aclose()
+
+    async def get_final_response(self) -> ParsedResponse[TextFormatT]:
+        """Waits until the stream has been read to completion and returns
+        the accumulated `ParsedResponse` object.
+        """
+        await self.until_done()
+        response = self._state._completed_response
+        if not response:
+            raise RuntimeError("Didn't receive a `response.completed` event.")
+
+        return response
+
+    async def until_done(self) -> Self:
+        """Blocks until the stream has been consumed."""
+        await consume_async_iterator(self)
+        return self
+
+
+class AsyncResponseStreamManager(Generic[TextFormatT]):
+    def __init__(
+        self,
+        api_request: Awaitable[AsyncStream[RawResponseStreamEvent]],
+        *,
+        text_format: type[TextFormatT] | NotGiven,
+        input_tools: Iterable[ToolParam] | NotGiven,
+    ) -> None:
+        self.__stream: AsyncResponseStream[TextFormatT] | None = None
+        self.__api_request = api_request
+        self.__text_format = text_format
+        self.__input_tools = input_tools
+
+    async def __aenter__(self) -> AsyncResponseStream[TextFormatT]:
+        raw_stream = await self.__api_request
+
+        self.__stream = AsyncResponseStream(
+            raw_stream=raw_stream,
+            text_format=self.__text_format,
+            input_tools=self.__input_tools,
+        )
+
+        return self.__stream
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        if self.__stream is not None:
+            await self.__stream.close()
+
+
+class ResponseStreamState(Generic[TextFormatT]):
+    def __init__(
+        self,
+        *,
+        input_tools: Iterable[ToolParam] | NotGiven,
+        text_format: type[TextFormatT] | NotGiven,
+    ) -> None:
+        self.__current_snapshot: ParsedResponseSnapshot | None = None
+        self._completed_response: ParsedResponse[TextFormatT] | None = None
+        self._input_tools = [tool for tool in input_tools] if is_given(input_tools) else []
+        self._text_format = text_format
+        self._rich_text_format: type | NotGiven = text_format if inspect.isclass(text_format) else NOT_GIVEN
+
+    def handle_event(self, event: RawResponseStreamEvent) -> List[ResponseStreamEvent[TextFormatT]]:
+        self.__current_snapshot = snapshot = self.accumulate_event(event)
+
+        events: List[ResponseStreamEvent[TextFormatT]] = []
+
+        if event.type == "response.output_text.delta":
+            output = snapshot.output[event.output_index]
+            assert output.type == "message"
+
+            content = output.content[event.content_index]
+            assert content.type == "output_text"
+
+            events.append(
+                build(
+                    ResponseTextDeltaEvent,
+                    content_index=event.content_index,
+                    delta=event.delta,
+                    item_id=event.item_id,
+                    output_index=event.output_index,
+                    type="response.output_text.delta",
+                    snapshot=content.text,
+                )
+            )
+        elif event.type == "response.output_text.done":
+            output = snapshot.output[event.output_index]
+            assert output.type == "message"
+
+            content = output.content[event.content_index]
+            assert content.type == "output_text"
+
+            events.append(
+                build(
+                    ResponseTextDoneEvent[TextFormatT],
+                    content_index=event.content_index,
+                    item_id=event.item_id,
+                    output_index=event.output_index,
+                    type="response.output_text.done",
+                    text=event.text,
+                    parsed=parse_text(event.text, text_format=self._text_format),
+                )
+            )
+        elif event.type == "response.function_call_arguments.delta":
+            output = snapshot.output[event.output_index]
+            assert output.type == "function_call"
+
+            events.append(
+                build(
+                    ResponseFunctionCallArgumentsDeltaEvent,
+                    delta=event.delta,
+                    item_id=event.item_id,
+                    output_index=event.output_index,
+                    type="response.function_call_arguments.delta",
+                    snapshot=output.arguments,
+                )
+            )
+
+        elif event.type == "response.completed":
+            response = self._completed_response
+            assert response is not None
+
+            events.append(
+                build(
+                    ResponseCompletedEvent,
+                    type="response.completed",
+                    response=response,
+                )
+            )
+        else:
+            events.append(event)
+
+        return events
+
+    def accumulate_event(self, event: RawResponseStreamEvent) -> ParsedResponseSnapshot:
+        snapshot = self.__current_snapshot
+        if snapshot is None:
+            return self._create_initial_response(event)
+
+        if event.type == "response.output_item.added":
+            if event.item.type == "function_call":
+                snapshot.output.append(
+                    construct_type_unchecked(
+                        type_=cast(Any, ParsedResponseFunctionToolCall), value=event.item.to_dict()
+                    )
+                )
+            elif event.item.type == "message":
+                snapshot.output.append(
+                    construct_type_unchecked(type_=cast(Any, ParsedResponseOutputMessage), value=event.item.to_dict())
+                )
+            else:
+                snapshot.output.append(event.item)
+        elif event.type == "response.content_part.added":
+            output = snapshot.output[event.output_index]
+            if output.type == "message":
+                output.content.append(
+                    construct_type_unchecked(type_=cast(Any, ParsedContent), value=event.part.to_dict())
+                )
+        elif event.type == "response.output_text.delta":
+            output = snapshot.output[event.output_index]
+            if output.type == "message":
+                content = output.content[event.content_index]
+                assert content.type == "output_text"
+                content.text += event.delta
+        elif event.type == "response.function_call_arguments.delta":
+            output = snapshot.output[event.output_index]
+            if output.type == "function_call":
+                output.arguments += event.delta
+        elif event.type == "response.completed":
+            self._completed_response = parse_response(
+                text_format=self._text_format,
+                response=event.response,
+                input_tools=self._input_tools,
+            )
+
+        return snapshot
+
+    def _create_initial_response(self, event: RawResponseStreamEvent) -> ParsedResponseSnapshot:
+        if event.type != "response.created":
+            raise RuntimeError(f"Expected to have received `response.created` before `{event.type}`")
+
+        return construct_type_unchecked(type_=ParsedResponseSnapshot, value=event.response.to_dict())
src/openai/lib/streaming/responses/_types.py
@@ -0,0 +1,10 @@
+from __future__ import annotations
+
+from typing_extensions import TypeAlias
+
+from ....types.responses import ParsedResponse
+
+ParsedResponseSnapshot: TypeAlias = ParsedResponse[object]
+"""Snapshot type representing an in-progress accumulation of
+a `ParsedResponse` object.
+"""
src/openai/lib/_tools.py
@@ -7,6 +7,7 @@ import pydantic
 from ._pydantic import to_strict_json_schema
 from ..types.chat import ChatCompletionToolParam
 from ..types.shared_params import FunctionDefinition
+from ..types.responses.function_tool_param import FunctionToolParam as ResponsesFunctionToolParam
 
 
 class PydanticFunctionTool(Dict[str, Any]):
@@ -25,6 +26,17 @@ class PydanticFunctionTool(Dict[str, Any]):
         return cast(FunctionDefinition, self)
 
 
+class ResponsesPydanticFunctionTool(Dict[str, Any]):
+    model: type[pydantic.BaseModel]
+
+    def __init__(self, tool: ResponsesFunctionToolParam, model: type[pydantic.BaseModel]) -> None:
+        super().__init__(tool)
+        self.model = model
+
+    def cast(self) -> ResponsesFunctionToolParam:
+        return cast(ResponsesFunctionToolParam, self)
+
+
 def pydantic_function_tool(
     model: type[pydantic.BaseModel],
     *,
src/openai/resources/beta/chat/completions.py
@@ -15,10 +15,7 @@ from ...._compat import cached_property
 from ...._resource import SyncAPIResource, AsyncAPIResource
 from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
 from ...._streaming import Stream
-from ....types.chat import (
-    ChatCompletionReasoningEffort,
-    completion_create_params,
-)
+from ....types.chat import completion_create_params
 from ...._base_client import make_request_options
 from ....lib._parsing import (
     ResponseFormatT,
@@ -28,11 +25,10 @@ from ....lib._parsing import (
 )
 from ....types.chat_model import ChatModel
 from ....lib.streaming.chat import ChatCompletionStreamManager, AsyncChatCompletionStreamManager
-from ....types.shared_params import Metadata
+from ....types.shared_params import Metadata, ReasoningEffort
 from ....types.chat.chat_completion import ChatCompletion
 from ....types.chat.chat_completion_chunk import ChatCompletionChunk
 from ....types.chat.parsed_chat_completion import ParsedChatCompletion
-from ....types.chat.chat_completion_modality import ChatCompletionModality
 from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam
 from ....types.chat.chat_completion_audio_param import ChatCompletionAudioParam
 from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam
@@ -78,15 +74,15 @@ class Completions(SyncAPIResource):
         max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
-        modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
         n: Optional[int] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
         prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
-        stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -95,6 +91,7 @@ class Completions(SyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -192,6 +189,7 @@ class Completions(SyncAPIResource):
                     "top_logprobs": top_logprobs,
                     "top_p": top_p,
                     "user": user,
+                    "web_search_options": web_search_options,
                 },
                 completion_create_params.CompletionCreateParams,
             ),
@@ -223,15 +221,15 @@ class Completions(SyncAPIResource):
         max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
-        modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
         n: Optional[int] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
         prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
-        stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -240,6 +238,7 @@ class Completions(SyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -306,6 +305,7 @@ class Completions(SyncAPIResource):
             top_logprobs=top_logprobs,
             top_p=top_p,
             user=user,
+            web_search_options=web_search_options,
             extra_headers=extra_headers,
             extra_query=extra_query,
             extra_body=extra_body,
@@ -353,15 +353,15 @@ class AsyncCompletions(AsyncAPIResource):
         max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
-        modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
         n: Optional[int] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
         prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
-        stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -370,6 +370,7 @@ class AsyncCompletions(AsyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -467,6 +468,7 @@ class AsyncCompletions(AsyncAPIResource):
                     "top_logprobs": top_logprobs,
                     "top_p": top_p,
                     "user": user,
+                    "web_search_options": web_search_options,
                 },
                 completion_create_params.CompletionCreateParams,
             ),
@@ -498,15 +500,15 @@ class AsyncCompletions(AsyncAPIResource):
         max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
-        modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
         n: Optional[int] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
         prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
-        stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -515,6 +517,7 @@ class AsyncCompletions(AsyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -586,6 +589,7 @@ class AsyncCompletions(AsyncAPIResource):
             extra_query=extra_query,
             extra_body=extra_body,
             timeout=timeout,
+            web_search_options=web_search_options,
         )
         return AsyncChatCompletionStreamManager(
             api_request,
src/openai/resources/beta/threads/runs/runs.py
@@ -48,6 +48,7 @@ from .....types.beta.threads import (
 from .....types.beta.threads.run import Run
 from .....types.shared.chat_model import ChatModel
 from .....types.shared_params.metadata import Metadata
+from .....types.shared.reasoning_effort import ReasoningEffort
 from .....types.beta.assistant_tool_param import AssistantToolParam
 from .....types.beta.assistant_stream_event import AssistantStreamEvent
 from .....types.beta.threads.runs.run_step_include import RunStepInclude
@@ -96,7 +97,7 @@ class Runs(SyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -165,7 +166,7 @@ class Runs(SyncAPIResource):
               [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
               during tool use.
 
-          reasoning_effort: **o1 and o3-mini models only**
+          reasoning_effort: **o-series models only**
 
               Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -247,7 +248,7 @@ class Runs(SyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -319,7 +320,7 @@ class Runs(SyncAPIResource):
               [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
               during tool use.
 
-          reasoning_effort: **o1 and o3-mini models only**
+          reasoning_effort: **o-series models only**
 
               Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -397,7 +398,7 @@ class Runs(SyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -469,7 +470,7 @@ class Runs(SyncAPIResource):
               [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
               during tool use.
 
-          reasoning_effort: **o1 and o3-mini models only**
+          reasoning_effort: **o-series models only**
 
               Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -546,7 +547,7 @@ class Runs(SyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -800,7 +801,7 @@ class Runs(SyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -871,7 +872,7 @@ class Runs(SyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -903,7 +904,7 @@ class Runs(SyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -935,7 +936,7 @@ class Runs(SyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -1054,7 +1055,7 @@ class Runs(SyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -1086,7 +1087,7 @@ class Runs(SyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -1118,7 +1119,7 @@ class Runs(SyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -1505,7 +1506,7 @@ class AsyncRuns(AsyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -1574,7 +1575,7 @@ class AsyncRuns(AsyncAPIResource):
               [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
               during tool use.
 
-          reasoning_effort: **o1 and o3-mini models only**
+          reasoning_effort: **o-series models only**
 
               Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -1656,7 +1657,7 @@ class AsyncRuns(AsyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -1728,7 +1729,7 @@ class AsyncRuns(AsyncAPIResource):
               [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
               during tool use.
 
-          reasoning_effort: **o1 and o3-mini models only**
+          reasoning_effort: **o-series models only**
 
               Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -1806,7 +1807,7 @@ class AsyncRuns(AsyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -1878,7 +1879,7 @@ class AsyncRuns(AsyncAPIResource):
               [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
               during tool use.
 
-          reasoning_effort: **o1 and o3-mini models only**
+          reasoning_effort: **o-series models only**
 
               Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -1955,7 +1956,7 @@ class AsyncRuns(AsyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -2209,7 +2210,7 @@ class AsyncRuns(AsyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -2460,7 +2461,7 @@ class AsyncRuns(AsyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -2492,7 +2493,7 @@ class AsyncRuns(AsyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -2524,7 +2525,7 @@ class AsyncRuns(AsyncAPIResource):
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
src/openai/resources/beta/__init__.py
@@ -24,22 +24,8 @@ from .assistants import (
     AssistantsWithStreamingResponse,
     AsyncAssistantsWithStreamingResponse,
 )
-from .vector_stores import (
-    VectorStores,
-    AsyncVectorStores,
-    VectorStoresWithRawResponse,
-    AsyncVectorStoresWithRawResponse,
-    VectorStoresWithStreamingResponse,
-    AsyncVectorStoresWithStreamingResponse,
-)
 
 __all__ = [
-    "VectorStores",
-    "AsyncVectorStores",
-    "VectorStoresWithRawResponse",
-    "AsyncVectorStoresWithRawResponse",
-    "VectorStoresWithStreamingResponse",
-    "AsyncVectorStoresWithStreamingResponse",
     "Assistants",
     "AsyncAssistants",
     "AssistantsWithRawResponse",
src/openai/resources/beta/assistants.py
@@ -27,6 +27,7 @@ from ...types.beta.assistant import Assistant
 from ...types.shared.chat_model import ChatModel
 from ...types.beta.assistant_deleted import AssistantDeleted
 from ...types.shared_params.metadata import Metadata
+from ...types.shared.reasoning_effort import ReasoningEffort
 from ...types.beta.assistant_tool_param import AssistantToolParam
 from ...types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam
 
@@ -61,7 +62,7 @@ class Assistants(SyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         name: Optional[str] | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN,
@@ -98,7 +99,7 @@ class Assistants(SyncAPIResource):
 
           name: The name of the assistant. The maximum length is 256 characters.
 
-          reasoning_effort: **o1 and o3-mini models only**
+          reasoning_effort: **o-series models only**
 
               Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -256,7 +257,7 @@ class Assistants(SyncAPIResource):
         ]
         | NotGiven = NOT_GIVEN,
         name: Optional[str] | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN,
@@ -294,7 +295,7 @@ class Assistants(SyncAPIResource):
 
           name: The name of the assistant. The maximum length is 256 characters.
 
-          reasoning_effort: **o1 and o3-mini models only**
+          reasoning_effort: **o-series models only**
 
               Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -504,7 +505,7 @@ class AsyncAssistants(AsyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         name: Optional[str] | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN,
@@ -541,7 +542,7 @@ class AsyncAssistants(AsyncAPIResource):
 
           name: The name of the assistant. The maximum length is 256 characters.
 
-          reasoning_effort: **o1 and o3-mini models only**
+          reasoning_effort: **o-series models only**
 
               Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -699,7 +700,7 @@ class AsyncAssistants(AsyncAPIResource):
         ]
         | NotGiven = NOT_GIVEN,
         name: Optional[str] | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN,
@@ -737,7 +738,7 @@ class AsyncAssistants(AsyncAPIResource):
 
           name: The name of the assistant. The maximum length is 256 characters.
 
-          reasoning_effort: **o1 and o3-mini models only**
+          reasoning_effort: **o-series models only**
 
               Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
src/openai/resources/beta/beta.py
@@ -29,14 +29,6 @@ from .realtime.realtime import (
     RealtimeWithStreamingResponse,
     AsyncRealtimeWithStreamingResponse,
 )
-from .vector_stores.vector_stores import (
-    VectorStores,
-    AsyncVectorStores,
-    VectorStoresWithRawResponse,
-    AsyncVectorStoresWithRawResponse,
-    VectorStoresWithStreamingResponse,
-    AsyncVectorStoresWithStreamingResponse,
-)
 
 __all__ = ["Beta", "AsyncBeta"]
 
@@ -50,10 +42,6 @@ class Beta(SyncAPIResource):
     def realtime(self) -> Realtime:
         return Realtime(self._client)
 
-    @cached_property
-    def vector_stores(self) -> VectorStores:
-        return VectorStores(self._client)
-
     @cached_property
     def assistants(self) -> Assistants:
         return Assistants(self._client)
@@ -91,10 +79,6 @@ class AsyncBeta(AsyncAPIResource):
     def realtime(self) -> AsyncRealtime:
         return AsyncRealtime(self._client)
 
-    @cached_property
-    def vector_stores(self) -> AsyncVectorStores:
-        return AsyncVectorStores(self._client)
-
     @cached_property
     def assistants(self) -> AsyncAssistants:
         return AsyncAssistants(self._client)
@@ -131,10 +115,6 @@ class BetaWithRawResponse:
     def realtime(self) -> RealtimeWithRawResponse:
         return RealtimeWithRawResponse(self._beta.realtime)
 
-    @cached_property
-    def vector_stores(self) -> VectorStoresWithRawResponse:
-        return VectorStoresWithRawResponse(self._beta.vector_stores)
-
     @cached_property
     def assistants(self) -> AssistantsWithRawResponse:
         return AssistantsWithRawResponse(self._beta.assistants)
@@ -152,10 +132,6 @@ class AsyncBetaWithRawResponse:
     def realtime(self) -> AsyncRealtimeWithRawResponse:
         return AsyncRealtimeWithRawResponse(self._beta.realtime)
 
-    @cached_property
-    def vector_stores(self) -> AsyncVectorStoresWithRawResponse:
-        return AsyncVectorStoresWithRawResponse(self._beta.vector_stores)
-
     @cached_property
     def assistants(self) -> AsyncAssistantsWithRawResponse:
         return AsyncAssistantsWithRawResponse(self._beta.assistants)
@@ -173,10 +149,6 @@ class BetaWithStreamingResponse:
     def realtime(self) -> RealtimeWithStreamingResponse:
         return RealtimeWithStreamingResponse(self._beta.realtime)
 
-    @cached_property
-    def vector_stores(self) -> VectorStoresWithStreamingResponse:
-        return VectorStoresWithStreamingResponse(self._beta.vector_stores)
-
     @cached_property
     def assistants(self) -> AssistantsWithStreamingResponse:
         return AssistantsWithStreamingResponse(self._beta.assistants)
@@ -194,10 +166,6 @@ class AsyncBetaWithStreamingResponse:
     def realtime(self) -> AsyncRealtimeWithStreamingResponse:
         return AsyncRealtimeWithStreamingResponse(self._beta.realtime)
 
-    @cached_property
-    def vector_stores(self) -> AsyncVectorStoresWithStreamingResponse:
-        return AsyncVectorStoresWithStreamingResponse(self._beta.vector_stores)
-
     @cached_property
     def assistants(self) -> AsyncAssistantsWithStreamingResponse:
         return AsyncAssistantsWithStreamingResponse(self._beta.assistants)
src/openai/resources/chat/completions/completions.py
@@ -31,7 +31,6 @@ from ...._streaming import Stream, AsyncStream
 from ....pagination import SyncCursorPage, AsyncCursorPage
 from ....types.chat import (
     ChatCompletionAudioParam,
-    ChatCompletionReasoningEffort,
     completion_list_params,
     completion_create_params,
     completion_update_params,
@@ -40,13 +39,12 @@ from ...._base_client import AsyncPaginator, make_request_options
 from ....types.shared.chat_model import ChatModel
 from ....types.chat.chat_completion import ChatCompletion
 from ....types.shared_params.metadata import Metadata
+from ....types.shared.reasoning_effort import ReasoningEffort
 from ....types.chat.chat_completion_chunk import ChatCompletionChunk
 from ....types.chat.chat_completion_deleted import ChatCompletionDeleted
-from ....types.chat.chat_completion_modality import ChatCompletionModality
 from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam
 from ....types.chat.chat_completion_audio_param import ChatCompletionAudioParam
 from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam
-from ....types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort
 from ....types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
 from ....types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam
 from ....types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam
@@ -93,16 +91,16 @@ class Completions(SyncAPIResource):
         max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
-        modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
         n: Optional[int] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
         prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
-        stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
@@ -112,6 +110,7 @@ class Completions(SyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -119,9 +118,15 @@ class Completions(SyncAPIResource):
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
     ) -> ChatCompletion:
-        """Creates a model response for the given chat conversation.
+        """
+        **Starting a new project?** We recommend trying
+        [Responses](https://platform.openai.com/docs/api-reference/responses) to take
+        advantage of the latest OpenAI platform features. Compare
+        [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
+
+        ---
 
-        Learn more in the
+        Creates a model response for the given chat conversation. Learn more in the
         [text generation](https://platform.openai.com/docs/guides/text-generation),
         [vision](https://platform.openai.com/docs/guides/vision), and
         [audio](https://platform.openai.com/docs/guides/audio) guides.
@@ -140,9 +145,11 @@ class Completions(SyncAPIResource):
               [images](https://platform.openai.com/docs/guides/vision), and
               [audio](https://platform.openai.com/docs/guides/audio).
 
-          model: ID of the model to use. See the
-              [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
-              table for details on which models work with the Chat API.
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
 
           audio: Parameters for audio output. Required when audio output is requested with
               `modalities: ["audio"]`.
@@ -203,8 +210,8 @@ class Completions(SyncAPIResource):
               Keys are strings with a maximum length of 64 characters. Values are strings with
               a maximum length of 512 characters.
 
-          modalities: Output types that you would like the model to generate for this request. Most
-              models are capable of generating text, which is the default:
+          modalities: Output types that you would like the model to generate. Most models are capable
+              of generating text, which is the default:
 
               `["text"]`
 
@@ -229,7 +236,7 @@ class Completions(SyncAPIResource):
               whether they appear in the text so far, increasing the model's likelihood to
               talk about new topics.
 
-          reasoning_effort: **o1 and o3-mini models only**
+          reasoning_effort: **o-series models only**
 
               Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -243,16 +250,9 @@ class Completions(SyncAPIResource):
               in the
               [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
 
-              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
-              message the model generates is valid JSON.
-
-              **Important:** when using JSON mode, you **must** also instruct the model to
-              produce JSON yourself via a system or user message. Without this, the model may
-              generate an unending stream of whitespace until the generation reaches the token
-              limit, resulting in a long-running and seemingly "stuck" request. Also note that
-              the message content may be partially cut off if `finish_reason="length"`, which
-              indicates the generation exceeded `max_tokens` or the conversation exceeded the
-              max context length.
+              Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+              ensures the message the model generates is valid JSON. Using `json_schema` is
+              preferred for models that support it.
 
           seed: This feature is in Beta. If specified, our system will make a best effort to
               sample deterministically, such that repeated requests with the same `seed` and
@@ -267,23 +267,29 @@ class Completions(SyncAPIResource):
                 utilize scale tier credits until they are exhausted.
               - If set to 'auto', and the Project is not Scale tier enabled, the request will
                 be processed using the default service tier with a lower uptime SLA and no
-                latency guarantee.
+                latency guarentee.
               - If set to 'default', the request will be processed using the default service
-                tier with a lower uptime SLA and no latency guarantee.
+                tier with a lower uptime SLA and no latency guarentee.
               - When not set, the default behavior is 'auto'.
 
-          stop: Up to 4 sequences where the API will stop generating further tokens.
+              When this parameter is set, the response body will include the `service_tier`
+              utilized.
+
+          stop: Up to 4 sequences where the API will stop generating further tokens. The
+              returned text will not contain the stop sequence.
 
           store: Whether or not to store the output of this chat completion request for use in
               our [model distillation](https://platform.openai.com/docs/guides/distillation)
               or [evals](https://platform.openai.com/docs/guides/evals) products.
 
-          stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
-              sent as data-only
-              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
-              as they become available, with the stream terminated by a `data: [DONE]`
-              message.
-              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
+              for more information, along with the
+              [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
+              guide for more information on how to handle the streaming events.
 
           stream_options: Options for streaming response. Only set this when you set `stream: true`.
 
@@ -320,6 +326,10 @@ class Completions(SyncAPIResource):
               and detect abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
 
+          web_search_options: This tool searches the web for relevant results to use in a response. Learn more
+              about the
+              [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -346,16 +356,16 @@ class Completions(SyncAPIResource):
         max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
-        modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
         n: Optional[int] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
         prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
-        stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -364,6 +374,7 @@ class Completions(SyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -371,9 +382,15 @@ class Completions(SyncAPIResource):
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
     ) -> Stream[ChatCompletionChunk]:
-        """Creates a model response for the given chat conversation.
+        """
+        **Starting a new project?** We recommend trying
+        [Responses](https://platform.openai.com/docs/api-reference/responses) to take
+        advantage of the latest OpenAI platform features. Compare
+        [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
+
+        ---
 
-        Learn more in the
+        Creates a model response for the given chat conversation. Learn more in the
         [text generation](https://platform.openai.com/docs/guides/text-generation),
         [vision](https://platform.openai.com/docs/guides/vision), and
         [audio](https://platform.openai.com/docs/guides/audio) guides.
@@ -392,16 +409,20 @@ class Completions(SyncAPIResource):
               [images](https://platform.openai.com/docs/guides/vision), and
               [audio](https://platform.openai.com/docs/guides/audio).
 
-          model: ID of the model to use. See the
-              [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
-              table for details on which models work with the Chat API.
-
-          stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
-              sent as data-only
-              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
-              as they become available, with the stream terminated by a `data: [DONE]`
-              message.
-              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
+              for more information, along with the
+              [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
+              guide for more information on how to handle the streaming events.
 
           audio: Parameters for audio output. Required when audio output is requested with
               `modalities: ["audio"]`.
@@ -462,8 +483,8 @@ class Completions(SyncAPIResource):
               Keys are strings with a maximum length of 64 characters. Values are strings with
               a maximum length of 512 characters.
 
-          modalities: Output types that you would like the model to generate for this request. Most
-              models are capable of generating text, which is the default:
+          modalities: Output types that you would like the model to generate. Most models are capable
+              of generating text, which is the default:
 
               `["text"]`
 
@@ -488,7 +509,7 @@ class Completions(SyncAPIResource):
               whether they appear in the text so far, increasing the model's likelihood to
               talk about new topics.
 
-          reasoning_effort: **o1 and o3-mini models only**
+          reasoning_effort: **o-series models only**
 
               Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -502,16 +523,9 @@ class Completions(SyncAPIResource):
               in the
               [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
 
-              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
-              message the model generates is valid JSON.
-
-              **Important:** when using JSON mode, you **must** also instruct the model to
-              produce JSON yourself via a system or user message. Without this, the model may
-              generate an unending stream of whitespace until the generation reaches the token
-              limit, resulting in a long-running and seemingly "stuck" request. Also note that
-              the message content may be partially cut off if `finish_reason="length"`, which
-              indicates the generation exceeded `max_tokens` or the conversation exceeded the
-              max context length.
+              Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+              ensures the message the model generates is valid JSON. Using `json_schema` is
+              preferred for models that support it.
 
           seed: This feature is in Beta. If specified, our system will make a best effort to
               sample deterministically, such that repeated requests with the same `seed` and
@@ -526,12 +540,16 @@ class Completions(SyncAPIResource):
                 utilize scale tier credits until they are exhausted.
               - If set to 'auto', and the Project is not Scale tier enabled, the request will
                 be processed using the default service tier with a lower uptime SLA and no
-                latency guarantee.
+                latency guarentee.
               - If set to 'default', the request will be processed using the default service
-                tier with a lower uptime SLA and no latency guarantee.
+                tier with a lower uptime SLA and no latency guarentee.
               - When not set, the default behavior is 'auto'.
 
-          stop: Up to 4 sequences where the API will stop generating further tokens.
+              When this parameter is set, the response body will include the `service_tier`
+              utilized.
+
+          stop: Up to 4 sequences where the API will stop generating further tokens. The
+              returned text will not contain the stop sequence.
 
           store: Whether or not to store the output of this chat completion request for use in
               our [model distillation](https://platform.openai.com/docs/guides/distillation)
@@ -572,6 +590,10 @@ class Completions(SyncAPIResource):
               and detect abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
 
+          web_search_options: This tool searches the web for relevant results to use in a response. Learn more
+              about the
+              [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -598,16 +620,16 @@ class Completions(SyncAPIResource):
         max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
-        modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
         n: Optional[int] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
         prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
-        stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -616,6 +638,7 @@ class Completions(SyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -623,9 +646,15 @@ class Completions(SyncAPIResource):
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
     ) -> ChatCompletion | Stream[ChatCompletionChunk]:
-        """Creates a model response for the given chat conversation.
+        """
+        **Starting a new project?** We recommend trying
+        [Responses](https://platform.openai.com/docs/api-reference/responses) to take
+        advantage of the latest OpenAI platform features. Compare
+        [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
+
+        ---
 
-        Learn more in the
+        Creates a model response for the given chat conversation. Learn more in the
         [text generation](https://platform.openai.com/docs/guides/text-generation),
         [vision](https://platform.openai.com/docs/guides/vision), and
         [audio](https://platform.openai.com/docs/guides/audio) guides.
@@ -644,16 +673,20 @@ class Completions(SyncAPIResource):
               [images](https://platform.openai.com/docs/guides/vision), and
               [audio](https://platform.openai.com/docs/guides/audio).
 
-          model: ID of the model to use. See the
-              [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
-              table for details on which models work with the Chat API.
-
-          stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
-              sent as data-only
-              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
-              as they become available, with the stream terminated by a `data: [DONE]`
-              message.
-              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
+              for more information, along with the
+              [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
+              guide for more information on how to handle the streaming events.
 
           audio: Parameters for audio output. Required when audio output is requested with
               `modalities: ["audio"]`.
@@ -714,8 +747,8 @@ class Completions(SyncAPIResource):
               Keys are strings with a maximum length of 64 characters. Values are strings with
               a maximum length of 512 characters.
 
-          modalities: Output types that you would like the model to generate for this request. Most
-              models are capable of generating text, which is the default:
+          modalities: Output types that you would like the model to generate. Most models are capable
+              of generating text, which is the default:
 
               `["text"]`
 
@@ -740,7 +773,7 @@ class Completions(SyncAPIResource):
               whether they appear in the text so far, increasing the model's likelihood to
               talk about new topics.
 
-          reasoning_effort: **o1 and o3-mini models only**
+          reasoning_effort: **o-series models only**
 
               Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -754,16 +787,9 @@ class Completions(SyncAPIResource):
               in the
               [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
 
-              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
-              message the model generates is valid JSON.
-
-              **Important:** when using JSON mode, you **must** also instruct the model to
-              produce JSON yourself via a system or user message. Without this, the model may
-              generate an unending stream of whitespace until the generation reaches the token
-              limit, resulting in a long-running and seemingly "stuck" request. Also note that
-              the message content may be partially cut off if `finish_reason="length"`, which
-              indicates the generation exceeded `max_tokens` or the conversation exceeded the
-              max context length.
+              Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+              ensures the message the model generates is valid JSON. Using `json_schema` is
+              preferred for models that support it.
 
           seed: This feature is in Beta. If specified, our system will make a best effort to
               sample deterministically, such that repeated requests with the same `seed` and
@@ -778,12 +804,16 @@ class Completions(SyncAPIResource):
                 utilize scale tier credits until they are exhausted.
               - If set to 'auto', and the Project is not Scale tier enabled, the request will
                 be processed using the default service tier with a lower uptime SLA and no
-                latency guarantee.
+                latency guarentee.
               - If set to 'default', the request will be processed using the default service
-                tier with a lower uptime SLA and no latency guarantee.
+                tier with a lower uptime SLA and no latency guarentee.
               - When not set, the default behavior is 'auto'.
 
-          stop: Up to 4 sequences where the API will stop generating further tokens.
+              When this parameter is set, the response body will include the `service_tier`
+              utilized.
+
+          stop: Up to 4 sequences where the API will stop generating further tokens. The
+              returned text will not contain the stop sequence.
 
           store: Whether or not to store the output of this chat completion request for use in
               our [model distillation](https://platform.openai.com/docs/guides/distillation)
@@ -824,6 +854,10 @@ class Completions(SyncAPIResource):
               and detect abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
 
+          web_search_options: This tool searches the web for relevant results to use in a response. Learn more
+              about the
+              [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -849,16 +883,16 @@ class Completions(SyncAPIResource):
         max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
-        modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
         n: Optional[int] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
         prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
-        stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
@@ -868,6 +902,7 @@ class Completions(SyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -910,6 +945,7 @@ class Completions(SyncAPIResource):
                     "top_logprobs": top_logprobs,
                     "top_p": top_p,
                     "user": user,
+                    "web_search_options": web_search_options,
                 },
                 completion_create_params.CompletionCreateParams,
             ),
@@ -934,7 +970,7 @@ class Completions(SyncAPIResource):
     ) -> ChatCompletion:
         """Get a stored chat completion.
 
-        Only chat completions that have been created with
+        Only Chat Completions that have been created with
         the `store` parameter set to `true` will be returned.
 
         Args:
@@ -970,7 +1006,7 @@ class Completions(SyncAPIResource):
     ) -> ChatCompletion:
         """Modify a stored chat completion.
 
-        Only chat completions that have been created
+        Only Chat Completions that have been created
         with the `store` parameter set to `true` can be modified. Currently, the only
         supported modification is to update the `metadata` field.
 
@@ -1016,24 +1052,24 @@ class Completions(SyncAPIResource):
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
     ) -> SyncCursorPage[ChatCompletion]:
-        """List stored chat completions.
+        """List stored Chat Completions.
 
-        Only chat completions that have been stored with
+        Only Chat Completions that have been stored with
         the `store` parameter set to `true` will be returned.
 
         Args:
           after: Identifier for the last chat completion from the previous pagination request.
 
-          limit: Number of chat completions to retrieve.
+          limit: Number of Chat Completions to retrieve.
 
           metadata:
-              A list of metadata keys to filter the chat completions by. Example:
+              A list of metadata keys to filter the Chat Completions by. Example:
 
               `metadata[key1]=value1&metadata[key2]=value2`
 
-          model: The model used to generate the chat completions.
+          model: The model used to generate the Chat Completions.
 
-          order: Sort order for chat completions by timestamp. Use `asc` for ascending order or
+          order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or
               `desc` for descending order. Defaults to `asc`.
 
           extra_headers: Send extra headers
@@ -1079,7 +1115,7 @@ class Completions(SyncAPIResource):
     ) -> ChatCompletionDeleted:
         """Delete a stored chat completion.
 
-        Only chat completions that have been created
+        Only Chat Completions that have been created
         with the `store` parameter set to `true` can be deleted.
 
         Args:
@@ -1141,16 +1177,16 @@ class AsyncCompletions(AsyncAPIResource):
         max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
-        modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
         n: Optional[int] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
         prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
-        stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
@@ -1160,6 +1196,7 @@ class AsyncCompletions(AsyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1167,9 +1204,15 @@ class AsyncCompletions(AsyncAPIResource):
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
     ) -> ChatCompletion:
-        """Creates a model response for the given chat conversation.
+        """
+        **Starting a new project?** We recommend trying
+        [Responses](https://platform.openai.com/docs/api-reference/responses) to take
+        advantage of the latest OpenAI platform features. Compare
+        [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
+
+        ---
 
-        Learn more in the
+        Creates a model response for the given chat conversation. Learn more in the
         [text generation](https://platform.openai.com/docs/guides/text-generation),
         [vision](https://platform.openai.com/docs/guides/vision), and
         [audio](https://platform.openai.com/docs/guides/audio) guides.
@@ -1188,9 +1231,11 @@ class AsyncCompletions(AsyncAPIResource):
               [images](https://platform.openai.com/docs/guides/vision), and
               [audio](https://platform.openai.com/docs/guides/audio).
 
-          model: ID of the model to use. See the
-              [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
-              table for details on which models work with the Chat API.
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
 
           audio: Parameters for audio output. Required when audio output is requested with
               `modalities: ["audio"]`.
@@ -1251,8 +1296,8 @@ class AsyncCompletions(AsyncAPIResource):
               Keys are strings with a maximum length of 64 characters. Values are strings with
               a maximum length of 512 characters.
 
-          modalities: Output types that you would like the model to generate for this request. Most
-              models are capable of generating text, which is the default:
+          modalities: Output types that you would like the model to generate. Most models are capable
+              of generating text, which is the default:
 
               `["text"]`
 
@@ -1277,7 +1322,7 @@ class AsyncCompletions(AsyncAPIResource):
               whether they appear in the text so far, increasing the model's likelihood to
               talk about new topics.
 
-          reasoning_effort: **o1 and o3-mini models only**
+          reasoning_effort: **o-series models only**
 
               Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -1291,16 +1336,9 @@ class AsyncCompletions(AsyncAPIResource):
               in the
               [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
 
-              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
-              message the model generates is valid JSON.
-
-              **Important:** when using JSON mode, you **must** also instruct the model to
-              produce JSON yourself via a system or user message. Without this, the model may
-              generate an unending stream of whitespace until the generation reaches the token
-              limit, resulting in a long-running and seemingly "stuck" request. Also note that
-              the message content may be partially cut off if `finish_reason="length"`, which
-              indicates the generation exceeded `max_tokens` or the conversation exceeded the
-              max context length.
+              Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+              ensures the message the model generates is valid JSON. Using `json_schema` is
+              preferred for models that support it.
 
           seed: This feature is in Beta. If specified, our system will make a best effort to
               sample deterministically, such that repeated requests with the same `seed` and
@@ -1315,23 +1353,29 @@ class AsyncCompletions(AsyncAPIResource):
                 utilize scale tier credits until they are exhausted.
               - If set to 'auto', and the Project is not Scale tier enabled, the request will
                 be processed using the default service tier with a lower uptime SLA and no
-                latency guarantee.
+                latency guarentee.
               - If set to 'default', the request will be processed using the default service
-                tier with a lower uptime SLA and no latency guarantee.
+                tier with a lower uptime SLA and no latency guarentee.
               - When not set, the default behavior is 'auto'.
 
-          stop: Up to 4 sequences where the API will stop generating further tokens.
+              When this parameter is set, the response body will include the `service_tier`
+              utilized.
+
+          stop: Up to 4 sequences where the API will stop generating further tokens. The
+              returned text will not contain the stop sequence.
 
           store: Whether or not to store the output of this chat completion request for use in
               our [model distillation](https://platform.openai.com/docs/guides/distillation)
               or [evals](https://platform.openai.com/docs/guides/evals) products.
 
-          stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
-              sent as data-only
-              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
-              as they become available, with the stream terminated by a `data: [DONE]`
-              message.
-              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
+              for more information, along with the
+              [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
+              guide for more information on how to handle the streaming events.
 
           stream_options: Options for streaming response. Only set this when you set `stream: true`.
 
@@ -1368,6 +1412,10 @@ class AsyncCompletions(AsyncAPIResource):
               and detect abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
 
+          web_search_options: This tool searches the web for relevant results to use in a response. Learn more
+              about the
+              [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -1394,16 +1442,16 @@ class AsyncCompletions(AsyncAPIResource):
         max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
-        modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
         n: Optional[int] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
         prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
-        stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -1412,6 +1460,7 @@ class AsyncCompletions(AsyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1419,9 +1468,15 @@ class AsyncCompletions(AsyncAPIResource):
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
     ) -> AsyncStream[ChatCompletionChunk]:
-        """Creates a model response for the given chat conversation.
+        """
+        **Starting a new project?** We recommend trying
+        [Responses](https://platform.openai.com/docs/api-reference/responses) to take
+        advantage of the latest OpenAI platform features. Compare
+        [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
+
+        ---
 
-        Learn more in the
+        Creates a model response for the given chat conversation. Learn more in the
         [text generation](https://platform.openai.com/docs/guides/text-generation),
         [vision](https://platform.openai.com/docs/guides/vision), and
         [audio](https://platform.openai.com/docs/guides/audio) guides.
@@ -1440,16 +1495,20 @@ class AsyncCompletions(AsyncAPIResource):
               [images](https://platform.openai.com/docs/guides/vision), and
               [audio](https://platform.openai.com/docs/guides/audio).
 
-          model: ID of the model to use. See the
-              [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
-              table for details on which models work with the Chat API.
-
-          stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
-              sent as data-only
-              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
-              as they become available, with the stream terminated by a `data: [DONE]`
-              message.
-              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
+              for more information, along with the
+              [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
+              guide for more information on how to handle the streaming events.
 
           audio: Parameters for audio output. Required when audio output is requested with
               `modalities: ["audio"]`.
@@ -1510,8 +1569,8 @@ class AsyncCompletions(AsyncAPIResource):
               Keys are strings with a maximum length of 64 characters. Values are strings with
               a maximum length of 512 characters.
 
-          modalities: Output types that you would like the model to generate for this request. Most
-              models are capable of generating text, which is the default:
+          modalities: Output types that you would like the model to generate. Most models are capable
+              of generating text, which is the default:
 
               `["text"]`
 
@@ -1536,7 +1595,7 @@ class AsyncCompletions(AsyncAPIResource):
               whether they appear in the text so far, increasing the model's likelihood to
               talk about new topics.
 
-          reasoning_effort: **o1 and o3-mini models only**
+          reasoning_effort: **o-series models only**
 
               Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -1550,16 +1609,9 @@ class AsyncCompletions(AsyncAPIResource):
               in the
               [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
 
-              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
-              message the model generates is valid JSON.
-
-              **Important:** when using JSON mode, you **must** also instruct the model to
-              produce JSON yourself via a system or user message. Without this, the model may
-              generate an unending stream of whitespace until the generation reaches the token
-              limit, resulting in a long-running and seemingly "stuck" request. Also note that
-              the message content may be partially cut off if `finish_reason="length"`, which
-              indicates the generation exceeded `max_tokens` or the conversation exceeded the
-              max context length.
+              Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+              ensures the message the model generates is valid JSON. Using `json_schema` is
+              preferred for models that support it.
 
           seed: This feature is in Beta. If specified, our system will make a best effort to
               sample deterministically, such that repeated requests with the same `seed` and
@@ -1574,12 +1626,16 @@ class AsyncCompletions(AsyncAPIResource):
                 utilize scale tier credits until they are exhausted.
               - If set to 'auto', and the Project is not Scale tier enabled, the request will
                 be processed using the default service tier with a lower uptime SLA and no
-                latency guarantee.
+                latency guarentee.
               - If set to 'default', the request will be processed using the default service
-                tier with a lower uptime SLA and no latency guarantee.
+                tier with a lower uptime SLA and no latency guarentee.
               - When not set, the default behavior is 'auto'.
 
-          stop: Up to 4 sequences where the API will stop generating further tokens.
+              When this parameter is set, the response body will include the `service_tier`
+              utilized.
+
+          stop: Up to 4 sequences where the API will stop generating further tokens. The
+              returned text will not contain the stop sequence.
 
           store: Whether or not to store the output of this chat completion request for use in
               our [model distillation](https://platform.openai.com/docs/guides/distillation)
@@ -1620,6 +1676,10 @@ class AsyncCompletions(AsyncAPIResource):
               and detect abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
 
+          web_search_options: This tool searches the web for relevant results to use in a response. Learn more
+              about the
+              [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -1646,16 +1706,16 @@ class AsyncCompletions(AsyncAPIResource):
         max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
-        modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
         n: Optional[int] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
         prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
-        stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -1664,6 +1724,7 @@ class AsyncCompletions(AsyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1671,9 +1732,15 @@ class AsyncCompletions(AsyncAPIResource):
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
     ) -> ChatCompletion | AsyncStream[ChatCompletionChunk]:
-        """Creates a model response for the given chat conversation.
+        """
+        **Starting a new project?** We recommend trying
+        [Responses](https://platform.openai.com/docs/api-reference/responses) to take
+        advantage of the latest OpenAI platform features. Compare
+        [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
+
+        ---
 
-        Learn more in the
+        Creates a model response for the given chat conversation. Learn more in the
         [text generation](https://platform.openai.com/docs/guides/text-generation),
         [vision](https://platform.openai.com/docs/guides/vision), and
         [audio](https://platform.openai.com/docs/guides/audio) guides.
@@ -1692,16 +1759,20 @@ class AsyncCompletions(AsyncAPIResource):
               [images](https://platform.openai.com/docs/guides/vision), and
               [audio](https://platform.openai.com/docs/guides/audio).
 
-          model: ID of the model to use. See the
-              [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
-              table for details on which models work with the Chat API.
-
-          stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
-              sent as data-only
-              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
-              as they become available, with the stream terminated by a `data: [DONE]`
-              message.
-              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
+              for more information, along with the
+              [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
+              guide for more information on how to handle the streaming events.
 
           audio: Parameters for audio output. Required when audio output is requested with
               `modalities: ["audio"]`.
@@ -1762,8 +1833,8 @@ class AsyncCompletions(AsyncAPIResource):
               Keys are strings with a maximum length of 64 characters. Values are strings with
               a maximum length of 512 characters.
 
-          modalities: Output types that you would like the model to generate for this request. Most
-              models are capable of generating text, which is the default:
+          modalities: Output types that you would like the model to generate. Most models are capable
+              of generating text, which is the default:
 
               `["text"]`
 
@@ -1788,7 +1859,7 @@ class AsyncCompletions(AsyncAPIResource):
               whether they appear in the text so far, increasing the model's likelihood to
               talk about new topics.
 
-          reasoning_effort: **o1 and o3-mini models only**
+          reasoning_effort: **o-series models only**
 
               Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -1802,16 +1873,9 @@ class AsyncCompletions(AsyncAPIResource):
               in the
               [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
 
-              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
-              message the model generates is valid JSON.
-
-              **Important:** when using JSON mode, you **must** also instruct the model to
-              produce JSON yourself via a system or user message. Without this, the model may
-              generate an unending stream of whitespace until the generation reaches the token
-              limit, resulting in a long-running and seemingly "stuck" request. Also note that
-              the message content may be partially cut off if `finish_reason="length"`, which
-              indicates the generation exceeded `max_tokens` or the conversation exceeded the
-              max context length.
+              Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+              ensures the message the model generates is valid JSON. Using `json_schema` is
+              preferred for models that support it.
 
           seed: This feature is in Beta. If specified, our system will make a best effort to
               sample deterministically, such that repeated requests with the same `seed` and
@@ -1826,12 +1890,16 @@ class AsyncCompletions(AsyncAPIResource):
                 utilize scale tier credits until they are exhausted.
               - If set to 'auto', and the Project is not Scale tier enabled, the request will
                 be processed using the default service tier with a lower uptime SLA and no
-                latency guarantee.
+                latency guarentee.
               - If set to 'default', the request will be processed using the default service
-                tier with a lower uptime SLA and no latency guarantee.
+                tier with a lower uptime SLA and no latency guarentee.
               - When not set, the default behavior is 'auto'.
 
-          stop: Up to 4 sequences where the API will stop generating further tokens.
+              When this parameter is set, the response body will include the `service_tier`
+              utilized.
+
+          stop: Up to 4 sequences where the API will stop generating further tokens. The
+              returned text will not contain the stop sequence.
 
           store: Whether or not to store the output of this chat completion request for use in
               our [model distillation](https://platform.openai.com/docs/guides/distillation)
@@ -1872,6 +1940,10 @@ class AsyncCompletions(AsyncAPIResource):
               and detect abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
 
+          web_search_options: This tool searches the web for relevant results to use in a response. Learn more
+              about the
+              [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -1897,16 +1969,16 @@ class AsyncCompletions(AsyncAPIResource):
         max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
-        modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
         n: Optional[int] | NotGiven = NOT_GIVEN,
         parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
         prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
-        reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
-        stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
@@ -1916,6 +1988,7 @@ class AsyncCompletions(AsyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1958,6 +2031,7 @@ class AsyncCompletions(AsyncAPIResource):
                     "top_logprobs": top_logprobs,
                     "top_p": top_p,
                     "user": user,
+                    "web_search_options": web_search_options,
                 },
                 completion_create_params.CompletionCreateParams,
             ),
@@ -1982,7 +2056,7 @@ class AsyncCompletions(AsyncAPIResource):
     ) -> ChatCompletion:
         """Get a stored chat completion.
 
-        Only chat completions that have been created with
+        Only Chat Completions that have been created with
         the `store` parameter set to `true` will be returned.
 
         Args:
@@ -2018,7 +2092,7 @@ class AsyncCompletions(AsyncAPIResource):
     ) -> ChatCompletion:
         """Modify a stored chat completion.
 
-        Only chat completions that have been created
+        Only Chat Completions that have been created
         with the `store` parameter set to `true` can be modified. Currently, the only
         supported modification is to update the `metadata` field.
 
@@ -2064,24 +2138,24 @@ class AsyncCompletions(AsyncAPIResource):
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
     ) -> AsyncPaginator[ChatCompletion, AsyncCursorPage[ChatCompletion]]:
-        """List stored chat completions.
+        """List stored Chat Completions.
 
-        Only chat completions that have been stored with
+        Only Chat Completions that have been stored with
         the `store` parameter set to `true` will be returned.
 
         Args:
           after: Identifier for the last chat completion from the previous pagination request.
 
-          limit: Number of chat completions to retrieve.
+          limit: Number of Chat Completions to retrieve.
 
           metadata:
-              A list of metadata keys to filter the chat completions by. Example:
+              A list of metadata keys to filter the Chat Completions by. Example:
 
               `metadata[key1]=value1&metadata[key2]=value2`
 
-          model: The model used to generate the chat completions.
+          model: The model used to generate the Chat Completions.
 
-          order: Sort order for chat completions by timestamp. Use `asc` for ascending order or
+          order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or
               `desc` for descending order. Defaults to `asc`.
 
           extra_headers: Send extra headers
@@ -2127,7 +2201,7 @@ class AsyncCompletions(AsyncAPIResource):
     ) -> ChatCompletionDeleted:
         """Delete a stored chat completion.
 
-        Only chat completions that have been created
+        Only Chat Completions that have been created
         with the `store` parameter set to `true` can be deleted.
 
         Args:
src/openai/resources/chat/completions/messages.py
@@ -56,7 +56,7 @@ class Messages(SyncAPIResource):
     ) -> SyncCursorPage[ChatCompletionStoreMessage]:
         """Get the messages in a stored chat completion.
 
-        Only chat completions that have
+        Only Chat Completions that have
         been created with the `store` parameter set to `true` will be returned.
 
         Args:
@@ -134,7 +134,7 @@ class AsyncMessages(AsyncAPIResource):
     ) -> AsyncPaginator[ChatCompletionStoreMessage, AsyncCursorPage[ChatCompletionStoreMessage]]:
         """Get the messages in a stored chat completion.
 
-        Only chat completions that have
+        Only Chat Completions that have
         been created with the `store` parameter set to `true` will be returned.
 
         Args:
src/openai/resources/responses/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .responses import (
+    Responses,
+    AsyncResponses,
+    ResponsesWithRawResponse,
+    AsyncResponsesWithRawResponse,
+    ResponsesWithStreamingResponse,
+    AsyncResponsesWithStreamingResponse,
+)
+from .input_items import (
+    InputItems,
+    AsyncInputItems,
+    InputItemsWithRawResponse,
+    AsyncInputItemsWithRawResponse,
+    InputItemsWithStreamingResponse,
+    AsyncInputItemsWithStreamingResponse,
+)
+
+__all__ = [
+    "InputItems",
+    "AsyncInputItems",
+    "InputItemsWithRawResponse",
+    "AsyncInputItemsWithRawResponse",
+    "InputItemsWithStreamingResponse",
+    "AsyncInputItemsWithStreamingResponse",
+    "Responses",
+    "AsyncResponses",
+    "ResponsesWithRawResponse",
+    "AsyncResponsesWithRawResponse",
+    "ResponsesWithStreamingResponse",
+    "AsyncResponsesWithStreamingResponse",
+]
src/openai/resources/responses/input_items.py
@@ -0,0 +1,223 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Any, cast
+from typing_extensions import Literal
+
+import httpx
+
+from ... import _legacy_response
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...pagination import SyncCursorPage, AsyncCursorPage
+from ..._base_client import AsyncPaginator, make_request_options
+from ...types.responses import input_item_list_params
+from ...types.responses.response_item_list import Data
+
+__all__ = ["InputItems", "AsyncInputItems"]
+
+
+class InputItems(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> InputItemsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return InputItemsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> InputItemsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return InputItemsWithStreamingResponse(self)
+
+    def list(
+        self,
+        response_id: str,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        before: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncCursorPage[Data]:
+        """
+        Returns a list of input items for a given response.
+
+        Args:
+          after: An item ID to list items after, used in pagination.
+
+          before: An item ID to list items before, used in pagination.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: The order to return the input items in. Default is `asc`.
+
+              - `asc`: Return the input items in ascending order.
+              - `desc`: Return the input items in descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not response_id:
+            raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+        return self._get_api_list(
+            f"/responses/{response_id}/input_items",
+            page=SyncCursorPage[Data],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    input_item_list_params.InputItemListParams,
+                ),
+            ),
+            model=cast(Any, Data),  # Union types cannot be passed in as arguments in the type system
+        )
+
+
+class AsyncInputItems(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncInputItemsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncInputItemsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncInputItemsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncInputItemsWithStreamingResponse(self)
+
+    def list(
+        self,
+        response_id: str,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        before: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[Data, AsyncCursorPage[Data]]:
+        """
+        Returns a list of input items for a given response.
+
+        Args:
+          after: An item ID to list items after, used in pagination.
+
+          before: An item ID to list items before, used in pagination.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: The order to return the input items in. Default is `asc`.
+
+              - `asc`: Return the input items in ascending order.
+              - `desc`: Return the input items in descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not response_id:
+            raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+        return self._get_api_list(
+            f"/responses/{response_id}/input_items",
+            page=AsyncCursorPage[Data],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    input_item_list_params.InputItemListParams,
+                ),
+            ),
+            model=cast(Any, Data),  # Union types cannot be passed in as arguments in the type system
+        )
+
+
+class InputItemsWithRawResponse:
+    def __init__(self, input_items: InputItems) -> None:
+        self._input_items = input_items
+
+        self.list = _legacy_response.to_raw_response_wrapper(
+            input_items.list,
+        )
+
+
+class AsyncInputItemsWithRawResponse:
+    def __init__(self, input_items: AsyncInputItems) -> None:
+        self._input_items = input_items
+
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            input_items.list,
+        )
+
+
+class InputItemsWithStreamingResponse:
+    def __init__(self, input_items: InputItems) -> None:
+        self._input_items = input_items
+
+        self.list = to_streamed_response_wrapper(
+            input_items.list,
+        )
+
+
+class AsyncInputItemsWithStreamingResponse:
+    def __init__(self, input_items: AsyncInputItems) -> None:
+        self._input_items = input_items
+
+        self.list = async_to_streamed_response_wrapper(
+            input_items.list,
+        )
src/openai/resources/responses/responses.py
@@ -0,0 +1,1790 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Any, List, Type, Union, Iterable, Optional, cast
+from functools import partial
+from typing_extensions import Literal, overload
+
+import httpx
+
+from ... import _legacy_response
+from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
+from ..._utils import (
+    is_given,
+    required_args,
+    maybe_transform,
+    async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .input_items import (
+    InputItems,
+    AsyncInputItems,
+    InputItemsWithRawResponse,
+    AsyncInputItemsWithRawResponse,
+    InputItemsWithStreamingResponse,
+    AsyncInputItemsWithStreamingResponse,
+)
+from ..._streaming import Stream, AsyncStream
+from ...lib._tools import PydanticFunctionTool, ResponsesPydanticFunctionTool
+from ..._base_client import make_request_options
+from ...types.responses import response_create_params, response_retrieve_params
+from ...lib._parsing._responses import (
+    TextFormatT,
+    parse_response,
+    type_to_text_format_param as _type_to_text_format_param,
+)
+from ...types.shared.chat_model import ChatModel
+from ...types.responses.response import Response
+from ...types.responses.tool_param import ToolParam, ParseableToolParam
+from ...types.shared_params.metadata import Metadata
+from ...types.shared_params.reasoning import Reasoning
+from ...types.responses.parsed_response import ParsedResponse
+from ...lib.streaming.responses._responses import ResponseStreamManager, AsyncResponseStreamManager
+from ...types.responses.response_includable import ResponseIncludable
+from ...types.responses.response_input_param import ResponseInputParam
+from ...types.responses.response_stream_event import ResponseStreamEvent
+from ...types.responses.response_text_config_param import ResponseTextConfigParam
+
+__all__ = ["Responses", "AsyncResponses"]
+
+
+class Responses(SyncAPIResource):
+    @cached_property
+    def input_items(self) -> InputItems:
+        return InputItems(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> ResponsesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return ResponsesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> ResponsesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return ResponsesWithStreamingResponse(self)
+
+    @overload
+    def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response:
+        """Creates a model response.
+
+        Provide
+        [text](https://platform.openai.com/docs/guides/text) or
+        [image](https://platform.openai.com/docs/guides/images) inputs to generate
+        [text](https://platform.openai.com/docs/guides/text) or
+        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
+        the model call your own
+        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
+        built-in [tools](https://platform.openai.com/docs/guides/tools) like
+        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
+        your own data as input for the model's response.
+
+        Args:
+          input: Text, image, or file inputs to the model, used to generate a response.
+
+              Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Image inputs](https://platform.openai.com/docs/guides/images)
+              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+              - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+
+          instructions: Inserts a system (or developer) message as the first item in the model's
+              context.
+
+              When using along with `previous_response_id`, the instructions from a previous
+              response will be not be carried over to the next response. This makes it simple
+              to swap out system (or developer) messages in new responses.
+
+          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+          previous_response_id: The unique ID of the previous response to the model. Use this to create
+              multi-turn conversations. Learn more about
+              [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+
+          reasoning: **o-series models only**
+
+              Configuration options for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+          store: Whether to store the generated model response for later retrieval via API.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          text: Configuration options for a text response from the model. Can be plain text or
+              structured JSON data. Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+          tool_choice: How the model should select which tool (or tools) to use when generating a
+              response. See the `tools` parameter to see how to specify which tools the model
+              can call.
+
+          tools: An array of tools the model may call while generating a response. You can
+              specify which tool to use by setting the `tool_choice` parameter.
+
+              The two categories of tools you can provide the model are:
+
+              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+                capabilities, like
+                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+                [file search](https://platform.openai.com/docs/guides/tools-file-search).
+                Learn more about
+                [built-in tools](https://platform.openai.com/docs/guides/tools).
+              - **Function calls (custom tools)**: Functions that are defined by you, enabling
+                the model to call your own code. Learn more about
+                [function calling](https://platform.openai.com/docs/guides/function-calling).
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          truncation: The truncation strategy to use for the model response.
+
+              - `auto`: If the context of this response and previous ones exceeds the model's
+                context window size, the model will truncate the response to fit the context
+                window by dropping input items in the middle of the conversation.
+              - `disabled` (default): If a model response will exceed the context window size
+                for a model, the request will fail with a 400 error.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        stream: Literal[True],
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Stream[ResponseStreamEvent]:
+        """Creates a model response.
+
+        Provide
+        [text](https://platform.openai.com/docs/guides/text) or
+        [image](https://platform.openai.com/docs/guides/images) inputs to generate
+        [text](https://platform.openai.com/docs/guides/text) or
+        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
+        the model call your own
+        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
+        built-in [tools](https://platform.openai.com/docs/guides/tools) like
+        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
+        your own data as input for the model's response.
+
+        Args:
+          input: Text, image, or file inputs to the model, used to generate a response.
+
+              Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Image inputs](https://platform.openai.com/docs/guides/images)
+              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+              - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+
+          instructions: Inserts a system (or developer) message as the first item in the model's
+              context.
+
+              When using along with `previous_response_id`, the instructions from a previous
+              response will be not be carried over to the next response. This makes it simple
+              to swap out system (or developer) messages in new responses.
+
+          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+          previous_response_id: The unique ID of the previous response to the model. Use this to create
+              multi-turn conversations. Learn more about
+              [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+
+          reasoning: **o-series models only**
+
+              Configuration options for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+          store: Whether to store the generated model response for later retrieval via API.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          text: Configuration options for a text response from the model. Can be plain text or
+              structured JSON data. Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+          tool_choice: How the model should select which tool (or tools) to use when generating a
+              response. See the `tools` parameter to see how to specify which tools the model
+              can call.
+
+          tools: An array of tools the model may call while generating a response. You can
+              specify which tool to use by setting the `tool_choice` parameter.
+
+              The two categories of tools you can provide the model are:
+
+              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+                capabilities, like
+                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+                [file search](https://platform.openai.com/docs/guides/tools-file-search).
+                Learn more about
+                [built-in tools](https://platform.openai.com/docs/guides/tools).
+              - **Function calls (custom tools)**: Functions that are defined by you, enabling
+                the model to call your own code. Learn more about
+                [function calling](https://platform.openai.com/docs/guides/function-calling).
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          truncation: The truncation strategy to use for the model response.
+
+              - `auto`: If the context of this response and previous ones exceeds the model's
+                context window size, the model will truncate the response to fit the context
+                window by dropping input items in the middle of the conversation.
+              - `disabled` (default): If a model response will exceed the context window size
+                for a model, the request will fail with a 400 error.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        stream: bool,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response | Stream[ResponseStreamEvent]:
+        """Creates a model response.
+
+        Provide
+        [text](https://platform.openai.com/docs/guides/text) or
+        [image](https://platform.openai.com/docs/guides/images) inputs to generate
+        [text](https://platform.openai.com/docs/guides/text) or
+        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
+        the model call your own
+        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
+        built-in [tools](https://platform.openai.com/docs/guides/tools) like
+        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
+        your own data as input for the model's response.
+
+        Args:
+          input: Text, image, or file inputs to the model, used to generate a response.
+
+              Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Image inputs](https://platform.openai.com/docs/guides/images)
+              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+              - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+
+          instructions: Inserts a system (or developer) message as the first item in the model's
+              context.
+
+              When using along with `previous_response_id`, the instructions from a previous
+              response will be not be carried over to the next response. This makes it simple
+              to swap out system (or developer) messages in new responses.
+
+          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+          previous_response_id: The unique ID of the previous response to the model. Use this to create
+              multi-turn conversations. Learn more about
+              [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+
+          reasoning: **o-series models only**
+
+              Configuration options for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+          store: Whether to store the generated model response for later retrieval via API.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          text: Configuration options for a text response from the model. Can be plain text or
+              structured JSON data. Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+          tool_choice: How the model should select which tool (or tools) to use when generating a
+              response. See the `tools` parameter to see how to specify which tools the model
+              can call.
+
+          tools: An array of tools the model may call while generating a response. You can
+              specify which tool to use by setting the `tool_choice` parameter.
+
+              The two categories of tools you can provide the model are:
+
+              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+                capabilities, like
+                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+                [file search](https://platform.openai.com/docs/guides/tools-file-search).
+                Learn more about
+                [built-in tools](https://platform.openai.com/docs/guides/tools).
+              - **Function calls (custom tools)**: Functions that are defined by you, enabling
+                the model to call your own code. Learn more about
+                [function calling](https://platform.openai.com/docs/guides/function-calling).
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          truncation: The truncation strategy to use for the model response.
+
+              - `auto`: If the context of this response and previous ones exceeds the model's
+                context window size, the model will truncate the response to fit the context
+                window by dropping input items in the middle of the conversation.
+              - `disabled` (default): If a model response will exceed the context window size
+                for a model, the request will fail with a 400 error.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @required_args(["input", "model"], ["input", "model", "stream"])
+    def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response | Stream[ResponseStreamEvent]:
+        return self._post(
+            "/responses",
+            body=maybe_transform(
+                {
+                    "input": input,
+                    "model": model,
+                    "include": include,
+                    "instructions": instructions,
+                    "max_output_tokens": max_output_tokens,
+                    "metadata": metadata,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "previous_response_id": previous_response_id,
+                    "reasoning": reasoning,
+                    "store": store,
+                    "stream": stream,
+                    "temperature": temperature,
+                    "text": text,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "top_p": top_p,
+                    "truncation": truncation,
+                    "user": user,
+                },
+                response_create_params.ResponseCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Response,
+            stream=stream or False,
+            stream_cls=Stream[ResponseStreamEvent],
+        )
+
+    def stream(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
+        tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ResponseStreamManager[TextFormatT]:
+        if is_given(text_format):
+            if not text:
+                text = {}
+
+            if "format" in text:
+                raise TypeError("Cannot mix and match text.format with text_format")
+
+            text["format"] = _type_to_text_format_param(text_format)
+
+        tools = _make_tools(tools)
+
+        api_request: partial[Stream[ResponseStreamEvent]] = partial(
+            self.create,
+            input=input,
+            model=model,
+            tools=tools,
+            include=include,
+            instructions=instructions,
+            max_output_tokens=max_output_tokens,
+            metadata=metadata,
+            parallel_tool_calls=parallel_tool_calls,
+            previous_response_id=previous_response_id,
+            store=store,
+            stream=True,
+            temperature=temperature,
+            text=text,
+            tool_choice=tool_choice,
+            reasoning=reasoning,
+            top_p=top_p,
+            truncation=truncation,
+            user=user,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+
+        return ResponseStreamManager(
+            api_request,
+            text_format=text_format,
+            input_tools=tools,
+        )
+
+    def parse(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
+        tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ParsedResponse[TextFormatT]:
+        if is_given(text_format):
+            if not text:
+                text = {}
+
+            if "format" in text:
+                raise TypeError("Cannot mix and match text.format with text_format")
+
+            text["format"] = _type_to_text_format_param(text_format)
+
+        tools = _make_tools(tools)
+
+        def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
+            return parse_response(
+                input_tools=tools,
+                text_format=text_format,
+                response=raw_response,
+            )
+
+        return self._post(
+            "/responses",
+            body=maybe_transform(
+                {
+                    "input": input,
+                    "model": model,
+                    "include": include,
+                    "instructions": instructions,
+                    "max_output_tokens": max_output_tokens,
+                    "metadata": metadata,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "previous_response_id": previous_response_id,
+                    "reasoning": reasoning,
+                    "store": store,
+                    "stream": stream,
+                    "temperature": temperature,
+                    "text": text,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "top_p": top_p,
+                    "truncation": truncation,
+                    "user": user,
+                },
+                response_create_params.ResponseCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                post_parser=parser,
+            ),
+            # we turn the `Response` instance into a `ParsedResponse`
+            # in the `parser` function above
+            cast_to=cast(Type[ParsedResponse[TextFormatT]], Response),
+        )
+
+    def retrieve(
+        self,
+        response_id: str,
+        *,
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response:
+        """
+        Retrieves a model response with the given ID.
+
+        Args:
+          include: Additional fields to include in the response. See the `include` parameter for
+              Response creation above for more information.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not response_id:
+            raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+        return self._get(
+            f"/responses/{response_id}",
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform({"include": include}, response_retrieve_params.ResponseRetrieveParams),
+            ),
+            cast_to=Response,
+        )
+
+    def delete(
+        self,
+        response_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> None:
+        """
+        Deletes a model response with the given ID.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not response_id:
+            raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+        extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+        return self._delete(
+            f"/responses/{response_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=NoneType,
+        )
+
+
+class AsyncResponses(AsyncAPIResource):
+    @cached_property
+    def input_items(self) -> AsyncInputItems:
+        return AsyncInputItems(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> AsyncResponsesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncResponsesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncResponsesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncResponsesWithStreamingResponse(self)
+
+    @overload
+    async def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response:
+        """Creates a model response.
+
+        Provide
+        [text](https://platform.openai.com/docs/guides/text) or
+        [image](https://platform.openai.com/docs/guides/images) inputs to generate
+        [text](https://platform.openai.com/docs/guides/text) or
+        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
+        the model call your own
+        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
+        built-in [tools](https://platform.openai.com/docs/guides/tools) like
+        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
+        your own data as input for the model's response.
+
+        Args:
+          input: Text, image, or file inputs to the model, used to generate a response.
+
+              Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Image inputs](https://platform.openai.com/docs/guides/images)
+              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+              - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+
+          instructions: Inserts a system (or developer) message as the first item in the model's
+              context.
+
+              When using along with `previous_response_id`, the instructions from a previous
+              response will be not be carried over to the next response. This makes it simple
+              to swap out system (or developer) messages in new responses.
+
+          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+          previous_response_id: The unique ID of the previous response to the model. Use this to create
+              multi-turn conversations. Learn more about
+              [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+
+          reasoning: **o-series models only**
+
+              Configuration options for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+          store: Whether to store the generated model response for later retrieval via API.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          text: Configuration options for a text response from the model. Can be plain text or
+              structured JSON data. Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+          tool_choice: How the model should select which tool (or tools) to use when generating a
+              response. See the `tools` parameter to see how to specify which tools the model
+              can call.
+
+          tools: An array of tools the model may call while generating a response. You can
+              specify which tool to use by setting the `tool_choice` parameter.
+
+              The two categories of tools you can provide the model are:
+
+              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+                capabilities, like
+                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+                [file search](https://platform.openai.com/docs/guides/tools-file-search).
+                Learn more about
+                [built-in tools](https://platform.openai.com/docs/guides/tools).
+              - **Function calls (custom tools)**: Functions that are defined by you, enabling
+                the model to call your own code. Learn more about
+                [function calling](https://platform.openai.com/docs/guides/function-calling).
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          truncation: The truncation strategy to use for the model response.
+
+              - `auto`: If the context of this response and previous ones exceeds the model's
+                context window size, the model will truncate the response to fit the context
+                window by dropping input items in the middle of the conversation.
+              - `disabled` (default): If a model response will exceed the context window size
+                for a model, the request will fail with a 400 error.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        stream: Literal[True],
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncStream[ResponseStreamEvent]:
+        """Creates a model response.
+
+        Provide
+        [text](https://platform.openai.com/docs/guides/text) or
+        [image](https://platform.openai.com/docs/guides/images) inputs to generate
+        [text](https://platform.openai.com/docs/guides/text) or
+        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
+        the model call your own
+        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
+        built-in [tools](https://platform.openai.com/docs/guides/tools) like
+        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
+        your own data as input for the model's response.
+
+        Args:
+          input: Text, image, or file inputs to the model, used to generate a response.
+
+              Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Image inputs](https://platform.openai.com/docs/guides/images)
+              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+              - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+
+          instructions: Inserts a system (or developer) message as the first item in the model's
+              context.
+
+              When using along with `previous_response_id`, the instructions from a previous
+              response will be not be carried over to the next response. This makes it simple
+              to swap out system (or developer) messages in new responses.
+
+          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+          previous_response_id: The unique ID of the previous response to the model. Use this to create
+              multi-turn conversations. Learn more about
+              [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+
+          reasoning: **o-series models only**
+
+              Configuration options for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+          store: Whether to store the generated model response for later retrieval via API.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          text: Configuration options for a text response from the model. Can be plain text or
+              structured JSON data. Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+          tool_choice: How the model should select which tool (or tools) to use when generating a
+              response. See the `tools` parameter to see how to specify which tools the model
+              can call.
+
+          tools: An array of tools the model may call while generating a response. You can
+              specify which tool to use by setting the `tool_choice` parameter.
+
+              The two categories of tools you can provide the model are:
+
+              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+                capabilities, like
+                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+                [file search](https://platform.openai.com/docs/guides/tools-file-search).
+                Learn more about
+                [built-in tools](https://platform.openai.com/docs/guides/tools).
+              - **Function calls (custom tools)**: Functions that are defined by you, enabling
+                the model to call your own code. Learn more about
+                [function calling](https://platform.openai.com/docs/guides/function-calling).
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          truncation: The truncation strategy to use for the model response.
+
+              - `auto`: If the context of this response and previous ones exceeds the model's
+                context window size, the model will truncate the response to fit the context
+                window by dropping input items in the middle of the conversation.
+              - `disabled` (default): If a model response will exceed the context window size
+                for a model, the request will fail with a 400 error.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        stream: bool,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response | AsyncStream[ResponseStreamEvent]:
+        """Creates a model response.
+
+        Provide
+        [text](https://platform.openai.com/docs/guides/text) or
+        [image](https://platform.openai.com/docs/guides/images) inputs to generate
+        [text](https://platform.openai.com/docs/guides/text) or
+        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
+        the model call your own
+        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
+        built-in [tools](https://platform.openai.com/docs/guides/tools) like
+        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
+        your own data as input for the model's response.
+
+        Args:
+          input: Text, image, or file inputs to the model, used to generate a response.
+
+              Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Image inputs](https://platform.openai.com/docs/guides/images)
+              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+              - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+
+          instructions: Inserts a system (or developer) message as the first item in the model's
+              context.
+
+              When using along with `previous_response_id`, the instructions from a previous
+              response will be not be carried over to the next response. This makes it simple
+              to swap out system (or developer) messages in new responses.
+
+          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+          previous_response_id: The unique ID of the previous response to the model. Use this to create
+              multi-turn conversations. Learn more about
+              [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+
+          reasoning: **o-series models only**
+
+              Configuration options for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+          store: Whether to store the generated model response for later retrieval via API.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          text: Configuration options for a text response from the model. Can be plain text or
+              structured JSON data. Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+          tool_choice: How the model should select which tool (or tools) to use when generating a
+              response. See the `tools` parameter to see how to specify which tools the model
+              can call.
+
+          tools: An array of tools the model may call while generating a response. You can
+              specify which tool to use by setting the `tool_choice` parameter.
+
+              The two categories of tools you can provide the model are:
+
+              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+                capabilities, like
+                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+                [file search](https://platform.openai.com/docs/guides/tools-file-search).
+                Learn more about
+                [built-in tools](https://platform.openai.com/docs/guides/tools).
+              - **Function calls (custom tools)**: Functions that are defined by you, enabling
+                the model to call your own code. Learn more about
+                [function calling](https://platform.openai.com/docs/guides/function-calling).
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          truncation: The truncation strategy to use for the model response.
+
+              - `auto`: If the context of this response and previous ones exceeds the model's
+                context window size, the model will truncate the response to fit the context
+                window by dropping input items in the middle of the conversation.
+              - `disabled` (default): If a model response will exceed the context window size
+                for a model, the request will fail with a 400 error.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @required_args(["input", "model"], ["input", "model", "stream"])
+    async def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response | AsyncStream[ResponseStreamEvent]:
+        return await self._post(
+            "/responses",
+            body=await async_maybe_transform(
+                {
+                    "input": input,
+                    "model": model,
+                    "include": include,
+                    "instructions": instructions,
+                    "max_output_tokens": max_output_tokens,
+                    "metadata": metadata,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "previous_response_id": previous_response_id,
+                    "reasoning": reasoning,
+                    "store": store,
+                    "stream": stream,
+                    "temperature": temperature,
+                    "text": text,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "top_p": top_p,
+                    "truncation": truncation,
+                    "user": user,
+                },
+                response_create_params.ResponseCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Response,
+            stream=stream or False,
+            stream_cls=AsyncStream[ResponseStreamEvent],
+        )
+
+    def stream(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
+        tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncResponseStreamManager[TextFormatT]:
+        if is_given(text_format):
+            if not text:
+                text = {}
+
+            if "format" in text:
+                raise TypeError("Cannot mix and match text.format with text_format")
+
+            text["format"] = _type_to_text_format_param(text_format)
+
+        tools = _make_tools(tools)
+
+        api_request = self.create(
+            input=input,
+            model=model,
+            tools=tools,
+            include=include,
+            instructions=instructions,
+            max_output_tokens=max_output_tokens,
+            metadata=metadata,
+            parallel_tool_calls=parallel_tool_calls,
+            previous_response_id=previous_response_id,
+            store=store,
+            stream=True,
+            temperature=temperature,
+            text=text,
+            tool_choice=tool_choice,
+            reasoning=reasoning,
+            top_p=top_p,
+            truncation=truncation,
+            user=user,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+
+        return AsyncResponseStreamManager(
+            api_request,
+            text_format=text_format,
+            input_tools=tools,
+        )
+
+    async def parse(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
+        tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ParsedResponse[TextFormatT]:
+        if is_given(text_format):
+            if not text:
+                text = {}
+
+            if "format" in text:
+                raise TypeError("Cannot mix and match text.format with text_format")
+
+            text["format"] = _type_to_text_format_param(text_format)
+
+        tools = _make_tools(tools)
+
+        def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
+            return parse_response(
+                input_tools=tools,
+                text_format=text_format,
+                response=raw_response,
+            )
+
+        return await self._post(
+            "/responses",
+            body=maybe_transform(
+                {
+                    "input": input,
+                    "model": model,
+                    "include": include,
+                    "instructions": instructions,
+                    "max_output_tokens": max_output_tokens,
+                    "metadata": metadata,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "previous_response_id": previous_response_id,
+                    "reasoning": reasoning,
+                    "store": store,
+                    "stream": stream,
+                    "temperature": temperature,
+                    "text": text,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "top_p": top_p,
+                    "truncation": truncation,
+                    "user": user,
+                },
+                response_create_params.ResponseCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                post_parser=parser,
+            ),
+            # we turn the `Response` instance into a `ParsedResponse`
+            # in the `parser` function above
+            cast_to=cast(Type[ParsedResponse[TextFormatT]], Response),
+        )
+
+    async def retrieve(
+        self,
+        response_id: str,
+        *,
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response:
+        """
+        Retrieves a model response with the given ID.
+
+        Args:
+          include: Additional fields to include in the response. See the `include` parameter for
+              Response creation above for more information.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not response_id:
+            raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+        return await self._get(
+            f"/responses/{response_id}",
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=await async_maybe_transform(
+                    {"include": include}, response_retrieve_params.ResponseRetrieveParams
+                ),
+            ),
+            cast_to=Response,
+        )
+
+    async def delete(
+        self,
+        response_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> None:
+        """
+        Deletes a model response with the given ID.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not response_id:
+            raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+        extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+        return await self._delete(
+            f"/responses/{response_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=NoneType,
+        )
+
+
+class ResponsesWithRawResponse:
+    def __init__(self, responses: Responses) -> None:
+        self._responses = responses
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            responses.create,
+        )
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            responses.retrieve,
+        )
+        self.delete = _legacy_response.to_raw_response_wrapper(
+            responses.delete,
+        )
+
+    @cached_property
+    def input_items(self) -> InputItemsWithRawResponse:
+        return InputItemsWithRawResponse(self._responses.input_items)
+
+
+class AsyncResponsesWithRawResponse:
+    def __init__(self, responses: AsyncResponses) -> None:
+        self._responses = responses
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            responses.create,
+        )
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            responses.retrieve,
+        )
+        self.delete = _legacy_response.async_to_raw_response_wrapper(
+            responses.delete,
+        )
+
+    @cached_property
+    def input_items(self) -> AsyncInputItemsWithRawResponse:
+        return AsyncInputItemsWithRawResponse(self._responses.input_items)
+
+
+class ResponsesWithStreamingResponse:
+    def __init__(self, responses: Responses) -> None:
+        self._responses = responses
+
+        self.create = to_streamed_response_wrapper(
+            responses.create,
+        )
+        self.retrieve = to_streamed_response_wrapper(
+            responses.retrieve,
+        )
+        self.delete = to_streamed_response_wrapper(
+            responses.delete,
+        )
+
+    @cached_property
+    def input_items(self) -> InputItemsWithStreamingResponse:
+        return InputItemsWithStreamingResponse(self._responses.input_items)
+
+
+class AsyncResponsesWithStreamingResponse:
+    def __init__(self, responses: AsyncResponses) -> None:
+        self._responses = responses
+
+        self.create = async_to_streamed_response_wrapper(
+            responses.create,
+        )
+        self.retrieve = async_to_streamed_response_wrapper(
+            responses.retrieve,
+        )
+        self.delete = async_to_streamed_response_wrapper(
+            responses.delete,
+        )
+
+    @cached_property
+    def input_items(self) -> AsyncInputItemsWithStreamingResponse:
+        return AsyncInputItemsWithStreamingResponse(self._responses.input_items)
+
+
+def _make_tools(tools: Iterable[ParseableToolParam] | NotGiven) -> List[ToolParam] | NotGiven:
+    if not is_given(tools):
+        return NOT_GIVEN
+
+    converted_tools: List[ToolParam] = []
+    for tool in tools:
+        if tool["type"] != "function":
+            converted_tools.append(tool)
+            continue
+
+        if "function" not in tool:
+            # standard Responses API case
+            converted_tools.append(tool)
+            continue
+
+        function = cast(Any, tool)["function"]  # pyright: ignore[reportUnnecessaryCast]
+        if not isinstance(function, PydanticFunctionTool):
+            raise Exception(
+                "Expected Chat Completions function tool shape to be created using `openai.pydantic_function_tool()`"
+            )
+
+        assert "parameters" in function
+        new_tool = ResponsesPydanticFunctionTool(
+            {
+                "type": "function",
+                "name": function["name"],
+                "description": function.get("description"),
+                "parameters": function["parameters"],
+                "strict": function.get("strict") or False,
+            },
+            function.model,
+        )
+
+        converted_tools.append(new_tool.cast())
+
+    return converted_tools
src/openai/resources/uploads/uploads.py
@@ -193,10 +193,9 @@ class Uploads(SyncAPIResource):
         contains all the parts you uploaded. This File is usable in the rest of our
         platform as a regular File object.
 
-        For certain `purpose`s, the correct `mime_type` must be specified. Please refer
-        to documentation for the supported MIME types for your use case:
-
-        - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search#supported-files)
+        For certain `purpose` values, the correct `mime_type` must be specified. Please
+        refer to documentation for the
+        [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files).
 
         For guidance on the proper filename extensions for each purpose, please follow
         the documentation on
@@ -497,10 +496,9 @@ class AsyncUploads(AsyncAPIResource):
         contains all the parts you uploaded. This File is usable in the rest of our
         platform as a regular File object.
 
-        For certain `purpose`s, the correct `mime_type` must be specified. Please refer
-        to documentation for the supported MIME types for your use case:
-
-        - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search#supported-files)
+        For certain `purpose` values, the correct `mime_type` must be specified. Please
+        refer to documentation for the
+        [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files).
 
         For guidance on the proper filename extensions for each purpose, please follow
         the documentation on
src/openai/resources/beta/vector_stores/__init__.py โ†’ src/openai/resources/vector_stores/__init__.py
File renamed without changes
src/openai/resources/beta/vector_stores/file_batches.py โ†’ src/openai/resources/vector_stores/file_batches.py
@@ -3,31 +3,31 @@
 from __future__ import annotations
 
 import asyncio
-from typing import List, Iterable
-from typing_extensions import Literal
+from typing import Dict, List, Iterable, Optional
+from typing_extensions import Union, Literal
 from concurrent.futures import Future, ThreadPoolExecutor, as_completed
 
 import httpx
 import sniffio
 
-from .... import _legacy_response
-from ....types import FileObject
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from ...._utils import (
+from ... import _legacy_response
+from ...types import FileChunkingStrategyParam
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
+from ..._utils import (
     is_given,
     maybe_transform,
     async_maybe_transform,
 )
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
-from ....pagination import SyncCursorPage, AsyncCursorPage
-from ....types.beta import FileChunkingStrategyParam
-from ...._base_client import AsyncPaginator, make_request_options
-from ....types.beta.vector_stores import file_batch_create_params, file_batch_list_files_params
-from ....types.beta.file_chunking_strategy_param import FileChunkingStrategyParam
-from ....types.beta.vector_stores.vector_store_file import VectorStoreFile
-from ....types.beta.vector_stores.vector_store_file_batch import VectorStoreFileBatch
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...pagination import SyncCursorPage, AsyncCursorPage
+from ..._base_client import AsyncPaginator, make_request_options
+from ...types.file_object import FileObject
+from ...types.vector_stores import file_batch_create_params, file_batch_list_files_params
+from ...types.file_chunking_strategy_param import FileChunkingStrategyParam
+from ...types.vector_stores.vector_store_file import VectorStoreFile
+from ...types.vector_stores.vector_store_file_batch import VectorStoreFileBatch
 
 __all__ = ["FileBatches", "AsyncFileBatches"]
 
@@ -57,6 +57,7 @@ class FileBatches(SyncAPIResource):
         vector_store_id: str,
         *,
         file_ids: List[str],
+        attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
         chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -73,6 +74,12 @@ class FileBatches(SyncAPIResource):
               the vector store should use. Useful for tools like `file_search` that can access
               files.
 
+          attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard. Keys are strings with a maximum
+              length of 64 characters. Values are strings with a maximum length of 512
+              characters, booleans, or numbers.
+
           chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
               strategy. Only applicable if `file_ids` is non-empty.
 
@@ -92,6 +99,7 @@ class FileBatches(SyncAPIResource):
             body=maybe_transform(
                 {
                     "file_ids": file_ids,
+                    "attributes": attributes,
                     "chunking_strategy": chunking_strategy,
                 },
                 file_batch_create_params.FileBatchCreateParams,
@@ -386,6 +394,7 @@ class AsyncFileBatches(AsyncAPIResource):
         vector_store_id: str,
         *,
         file_ids: List[str],
+        attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
         chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -402,6 +411,12 @@ class AsyncFileBatches(AsyncAPIResource):
               the vector store should use. Useful for tools like `file_search` that can access
               files.
 
+          attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard. Keys are strings with a maximum
+              length of 64 characters. Values are strings with a maximum length of 512
+              characters, booleans, or numbers.
+
           chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
               strategy. Only applicable if `file_ids` is non-empty.
 
@@ -421,6 +436,7 @@ class AsyncFileBatches(AsyncAPIResource):
             body=await async_maybe_transform(
                 {
                     "file_ids": file_ids,
+                    "attributes": attributes,
                     "chunking_strategy": chunking_strategy,
                 },
                 file_batch_create_params.FileBatchCreateParams,
src/openai/resources/beta/vector_stores/files.py โ†’ src/openai/resources/vector_stores/files.py
@@ -2,28 +2,29 @@
 
 from __future__ import annotations
 
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Dict, Union, Optional
 from typing_extensions import Literal, assert_never
 
 import httpx
 
-from .... import _legacy_response
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from ...._utils import (
+from ... import _legacy_response
+from ...types import FileChunkingStrategyParam
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
+from ..._utils import (
     is_given,
     maybe_transform,
     async_maybe_transform,
 )
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
-from ....pagination import SyncCursorPage, AsyncCursorPage
-from ....types.beta import FileChunkingStrategyParam
-from ...._base_client import AsyncPaginator, make_request_options
-from ....types.beta.vector_stores import file_list_params, file_create_params
-from ....types.beta.file_chunking_strategy_param import FileChunkingStrategyParam
-from ....types.beta.vector_stores.vector_store_file import VectorStoreFile
-from ....types.beta.vector_stores.vector_store_file_deleted import VectorStoreFileDeleted
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...pagination import SyncPage, AsyncPage, SyncCursorPage, AsyncCursorPage
+from ..._base_client import AsyncPaginator, make_request_options
+from ...types.vector_stores import file_list_params, file_create_params, file_update_params
+from ...types.file_chunking_strategy_param import FileChunkingStrategyParam
+from ...types.vector_stores.vector_store_file import VectorStoreFile
+from ...types.vector_stores.file_content_response import FileContentResponse
+from ...types.vector_stores.vector_store_file_deleted import VectorStoreFileDeleted
 
 __all__ = ["Files", "AsyncFiles"]
 
@@ -53,6 +54,7 @@ class Files(SyncAPIResource):
         vector_store_id: str,
         *,
         file_id: str,
+        attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
         chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -71,6 +73,12 @@ class Files(SyncAPIResource):
               vector store should use. Useful for tools like `file_search` that can access
               files.
 
+          attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard. Keys are strings with a maximum
+              length of 64 characters. Values are strings with a maximum length of 512
+              characters, booleans, or numbers.
+
           chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
               strategy. Only applicable if `file_ids` is non-empty.
 
@@ -90,6 +98,7 @@ class Files(SyncAPIResource):
             body=maybe_transform(
                 {
                     "file_id": file_id,
+                    "attributes": attributes,
                     "chunking_strategy": chunking_strategy,
                 },
                 file_create_params.FileCreateParams,
@@ -137,6 +146,51 @@ class Files(SyncAPIResource):
             cast_to=VectorStoreFile,
         )
 
+    def update(
+        self,
+        file_id: str,
+        *,
+        vector_store_id: str,
+        attributes: Optional[Dict[str, Union[str, float, bool]]],
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFile:
+        """
+        Update attributes on a vector store file.
+
+        Args:
+          attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard. Keys are strings with a maximum
+              length of 64 characters. Values are strings with a maximum length of 512
+              characters, booleans, or numbers.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            f"/vector_stores/{vector_store_id}/files/{file_id}",
+            body=maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStoreFile,
+        )
+
     def list(
         self,
         vector_store_id: str,
@@ -339,6 +393,44 @@ class Files(SyncAPIResource):
             poll_interval_ms=poll_interval_ms,
         )
 
+    def content(
+        self,
+        file_id: str,
+        *,
+        vector_store_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncPage[FileContentResponse]:
+        """
+        Retrieve the parsed contents of a vector store file.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/vector_stores/{vector_store_id}/files/{file_id}/content",
+            page=SyncPage[FileContentResponse],
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            model=FileContentResponse,
+        )
+
 
 class AsyncFiles(AsyncAPIResource):
     @cached_property
@@ -365,6 +457,7 @@ class AsyncFiles(AsyncAPIResource):
         vector_store_id: str,
         *,
         file_id: str,
+        attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
         chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -383,6 +476,12 @@ class AsyncFiles(AsyncAPIResource):
               vector store should use. Useful for tools like `file_search` that can access
               files.
 
+          attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard. Keys are strings with a maximum
+              length of 64 characters. Values are strings with a maximum length of 512
+              characters, booleans, or numbers.
+
           chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
               strategy. Only applicable if `file_ids` is non-empty.
 
@@ -402,6 +501,7 @@ class AsyncFiles(AsyncAPIResource):
             body=await async_maybe_transform(
                 {
                     "file_id": file_id,
+                    "attributes": attributes,
                     "chunking_strategy": chunking_strategy,
                 },
                 file_create_params.FileCreateParams,
@@ -449,6 +549,51 @@ class AsyncFiles(AsyncAPIResource):
             cast_to=VectorStoreFile,
         )
 
+    async def update(
+        self,
+        file_id: str,
+        *,
+        vector_store_id: str,
+        attributes: Optional[Dict[str, Union[str, float, bool]]],
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFile:
+        """
+        Update attributes on a vector store file.
+
+        Args:
+          attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard. Keys are strings with a maximum
+              length of 64 characters. Values are strings with a maximum length of 512
+              characters, booleans, or numbers.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            f"/vector_stores/{vector_store_id}/files/{file_id}",
+            body=await async_maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStoreFile,
+        )
+
     def list(
         self,
         vector_store_id: str,
@@ -653,6 +798,44 @@ class AsyncFiles(AsyncAPIResource):
             chunking_strategy=chunking_strategy,
         )
 
+    def content(
+        self,
+        file_id: str,
+        *,
+        vector_store_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[FileContentResponse, AsyncPage[FileContentResponse]]:
+        """
+        Retrieve the parsed contents of a vector store file.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/vector_stores/{vector_store_id}/files/{file_id}/content",
+            page=AsyncPage[FileContentResponse],
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            model=FileContentResponse,
+        )
+
 
 class FilesWithRawResponse:
     def __init__(self, files: Files) -> None:
@@ -664,12 +847,18 @@ class FilesWithRawResponse:
         self.retrieve = _legacy_response.to_raw_response_wrapper(
             files.retrieve,
         )
+        self.update = _legacy_response.to_raw_response_wrapper(
+            files.update,
+        )
         self.list = _legacy_response.to_raw_response_wrapper(
             files.list,
         )
         self.delete = _legacy_response.to_raw_response_wrapper(
             files.delete,
         )
+        self.content = _legacy_response.to_raw_response_wrapper(
+            files.content,
+        )
 
 
 class AsyncFilesWithRawResponse:
@@ -682,12 +871,18 @@ class AsyncFilesWithRawResponse:
         self.retrieve = _legacy_response.async_to_raw_response_wrapper(
             files.retrieve,
         )
+        self.update = _legacy_response.async_to_raw_response_wrapper(
+            files.update,
+        )
         self.list = _legacy_response.async_to_raw_response_wrapper(
             files.list,
         )
         self.delete = _legacy_response.async_to_raw_response_wrapper(
             files.delete,
         )
+        self.content = _legacy_response.async_to_raw_response_wrapper(
+            files.content,
+        )
 
 
 class FilesWithStreamingResponse:
@@ -700,12 +895,18 @@ class FilesWithStreamingResponse:
         self.retrieve = to_streamed_response_wrapper(
             files.retrieve,
         )
+        self.update = to_streamed_response_wrapper(
+            files.update,
+        )
         self.list = to_streamed_response_wrapper(
             files.list,
         )
         self.delete = to_streamed_response_wrapper(
             files.delete,
         )
+        self.content = to_streamed_response_wrapper(
+            files.content,
+        )
 
 
 class AsyncFilesWithStreamingResponse:
@@ -718,9 +919,15 @@ class AsyncFilesWithStreamingResponse:
         self.retrieve = async_to_streamed_response_wrapper(
             files.retrieve,
         )
+        self.update = async_to_streamed_response_wrapper(
+            files.update,
+        )
         self.list = async_to_streamed_response_wrapper(
             files.list,
         )
         self.delete = async_to_streamed_response_wrapper(
             files.delete,
         )
+        self.content = async_to_streamed_response_wrapper(
+            files.content,
+        )
src/openai/resources/beta/vector_stores/vector_stores.py โ†’ src/openai/resources/vector_stores/vector_stores.py
@@ -2,12 +2,12 @@
 
 from __future__ import annotations
 
-from typing import List, Optional
+from typing import List, Union, Optional
 from typing_extensions import Literal
 
 import httpx
 
-from .... import _legacy_response
+from ... import _legacy_response
 from .files import (
     Files,
     AsyncFiles,
@@ -16,14 +16,22 @@ from .files import (
     FilesWithStreamingResponse,
     AsyncFilesWithStreamingResponse,
 )
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import (
+from ...types import (
+    FileChunkingStrategyParam,
+    vector_store_list_params,
+    vector_store_create_params,
+    vector_store_search_params,
+    vector_store_update_params,
+)
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import (
     maybe_transform,
     async_maybe_transform,
 )
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...pagination import SyncPage, AsyncPage, SyncCursorPage, AsyncCursorPage
 from .file_batches import (
     FileBatches,
     AsyncFileBatches,
@@ -32,18 +40,12 @@ from .file_batches import (
     FileBatchesWithStreamingResponse,
     AsyncFileBatchesWithStreamingResponse,
 )
-from ....pagination import SyncCursorPage, AsyncCursorPage
-from ....types.beta import (
-    FileChunkingStrategyParam,
-    vector_store_list_params,
-    vector_store_create_params,
-    vector_store_update_params,
-)
-from ...._base_client import AsyncPaginator, make_request_options
-from ....types.beta.vector_store import VectorStore
-from ....types.shared_params.metadata import Metadata
-from ....types.beta.vector_store_deleted import VectorStoreDeleted
-from ....types.beta.file_chunking_strategy_param import FileChunkingStrategyParam
+from ..._base_client import AsyncPaginator, make_request_options
+from ...types.vector_store import VectorStore
+from ...types.vector_store_deleted import VectorStoreDeleted
+from ...types.shared_params.metadata import Metadata
+from ...types.file_chunking_strategy_param import FileChunkingStrategyParam
+from ...types.vector_store_search_response import VectorStoreSearchResponse
 
 __all__ = ["VectorStores", "AsyncVectorStores"]
 
@@ -329,6 +331,69 @@ class VectorStores(SyncAPIResource):
             cast_to=VectorStoreDeleted,
         )
 
+    def search(
+        self,
+        vector_store_id: str,
+        *,
+        query: Union[str, List[str]],
+        filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN,
+        max_num_results: int | NotGiven = NOT_GIVEN,
+        ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN,
+        rewrite_query: bool | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncPage[VectorStoreSearchResponse]:
+        """
+        Search a vector store for relevant chunks based on a query and file attributes
+        filter.
+
+        Args:
+          query: A query string for a search
+
+          filters: A filter to apply based on file attributes.
+
+          max_num_results: The maximum number of results to return. This number should be between 1 and 50
+              inclusive.
+
+          ranking_options: Ranking options for search.
+
+          rewrite_query: Whether to rewrite the natural language query for vector search.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/vector_stores/{vector_store_id}/search",
+            page=SyncPage[VectorStoreSearchResponse],
+            body=maybe_transform(
+                {
+                    "query": query,
+                    "filters": filters,
+                    "max_num_results": max_num_results,
+                    "ranking_options": ranking_options,
+                    "rewrite_query": rewrite_query,
+                },
+                vector_store_search_params.VectorStoreSearchParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            model=VectorStoreSearchResponse,
+            method="post",
+        )
+
 
 class AsyncVectorStores(AsyncAPIResource):
     @cached_property
@@ -611,6 +676,69 @@ class AsyncVectorStores(AsyncAPIResource):
             cast_to=VectorStoreDeleted,
         )
 
+    def search(
+        self,
+        vector_store_id: str,
+        *,
+        query: Union[str, List[str]],
+        filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN,
+        max_num_results: int | NotGiven = NOT_GIVEN,
+        ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN,
+        rewrite_query: bool | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[VectorStoreSearchResponse, AsyncPage[VectorStoreSearchResponse]]:
+        """
+        Search a vector store for relevant chunks based on a query and file attributes
+        filter.
+
+        Args:
+          query: A query string for a search
+
+          filters: A filter to apply based on file attributes.
+
+          max_num_results: The maximum number of results to return. This number should be between 1 and 50
+              inclusive.
+
+          ranking_options: Ranking options for search.
+
+          rewrite_query: Whether to rewrite the natural language query for vector search.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/vector_stores/{vector_store_id}/search",
+            page=AsyncPage[VectorStoreSearchResponse],
+            body=maybe_transform(
+                {
+                    "query": query,
+                    "filters": filters,
+                    "max_num_results": max_num_results,
+                    "ranking_options": ranking_options,
+                    "rewrite_query": rewrite_query,
+                },
+                vector_store_search_params.VectorStoreSearchParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            model=VectorStoreSearchResponse,
+            method="post",
+        )
+
 
 class VectorStoresWithRawResponse:
     def __init__(self, vector_stores: VectorStores) -> None:
@@ -631,6 +759,9 @@ class VectorStoresWithRawResponse:
         self.delete = _legacy_response.to_raw_response_wrapper(
             vector_stores.delete,
         )
+        self.search = _legacy_response.to_raw_response_wrapper(
+            vector_stores.search,
+        )
 
     @cached_property
     def files(self) -> FilesWithRawResponse:
@@ -660,6 +791,9 @@ class AsyncVectorStoresWithRawResponse:
         self.delete = _legacy_response.async_to_raw_response_wrapper(
             vector_stores.delete,
         )
+        self.search = _legacy_response.async_to_raw_response_wrapper(
+            vector_stores.search,
+        )
 
     @cached_property
     def files(self) -> AsyncFilesWithRawResponse:
@@ -689,6 +823,9 @@ class VectorStoresWithStreamingResponse:
         self.delete = to_streamed_response_wrapper(
             vector_stores.delete,
         )
+        self.search = to_streamed_response_wrapper(
+            vector_stores.search,
+        )
 
     @cached_property
     def files(self) -> FilesWithStreamingResponse:
@@ -718,6 +855,9 @@ class AsyncVectorStoresWithStreamingResponse:
         self.delete = async_to_streamed_response_wrapper(
             vector_stores.delete,
         )
+        self.search = async_to_streamed_response_wrapper(
+            vector_stores.search,
+        )
 
     @cached_property
     def files(self) -> AsyncFilesWithStreamingResponse:
src/openai/resources/__init__.py
@@ -64,6 +64,14 @@ from .uploads import (
     UploadsWithStreamingResponse,
     AsyncUploadsWithStreamingResponse,
 )
+from .responses import (
+    Responses,
+    AsyncResponses,
+    ResponsesWithRawResponse,
+    AsyncResponsesWithRawResponse,
+    ResponsesWithStreamingResponse,
+    AsyncResponsesWithStreamingResponse,
+)
 from .embeddings import (
     Embeddings,
     AsyncEmbeddings,
@@ -96,6 +104,14 @@ from .moderations import (
     ModerationsWithStreamingResponse,
     AsyncModerationsWithStreamingResponse,
 )
+from .vector_stores import (
+    VectorStores,
+    AsyncVectorStores,
+    VectorStoresWithRawResponse,
+    AsyncVectorStoresWithRawResponse,
+    VectorStoresWithStreamingResponse,
+    AsyncVectorStoresWithStreamingResponse,
+)
 
 __all__ = [
     "Completions",
@@ -152,6 +168,12 @@ __all__ = [
     "AsyncFineTuningWithRawResponse",
     "FineTuningWithStreamingResponse",
     "AsyncFineTuningWithStreamingResponse",
+    "VectorStores",
+    "AsyncVectorStores",
+    "VectorStoresWithRawResponse",
+    "AsyncVectorStoresWithRawResponse",
+    "VectorStoresWithStreamingResponse",
+    "AsyncVectorStoresWithStreamingResponse",
     "Beta",
     "AsyncBeta",
     "BetaWithRawResponse",
@@ -170,4 +192,10 @@ __all__ = [
     "AsyncUploadsWithRawResponse",
     "UploadsWithStreamingResponse",
     "AsyncUploadsWithStreamingResponse",
+    "Responses",
+    "AsyncResponses",
+    "ResponsesWithRawResponse",
+    "AsyncResponsesWithRawResponse",
+    "ResponsesWithStreamingResponse",
+    "AsyncResponsesWithStreamingResponse",
 ]
src/openai/resources/files.py
@@ -96,14 +96,10 @@ class Files(SyncAPIResource):
         Args:
           file: The File object (not file name) to be uploaded.
 
-          purpose: The intended purpose of the uploaded file.
-
-              Use "assistants" for
-              [Assistants](https://platform.openai.com/docs/api-reference/assistants) and
-              [Message](https://platform.openai.com/docs/api-reference/messages) files,
-              "vision" for Assistants image file inputs, "batch" for
-              [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for
-              [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning).
+          purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the
+              Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for
+              fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
+              Flexible file type for any purpose - `evals`: Used for eval data sets
 
           extra_headers: Send extra headers
 
@@ -412,14 +408,10 @@ class AsyncFiles(AsyncAPIResource):
         Args:
           file: The File object (not file name) to be uploaded.
 
-          purpose: The intended purpose of the uploaded file.
-
-              Use "assistants" for
-              [Assistants](https://platform.openai.com/docs/api-reference/assistants) and
-              [Message](https://platform.openai.com/docs/api-reference/messages) files,
-              "vision" for Assistants image file inputs, "batch" for
-              [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for
-              [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning).
+          purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the
+              Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for
+              fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
+              Flexible file type for any purpose - `evals`: Used for eval data sets
 
           extra_headers: Send extra headers
 
src/openai/types/beta/threads/run_create_params.py
@@ -9,6 +9,7 @@ from ...shared.chat_model import ChatModel
 from ..assistant_tool_param import AssistantToolParam
 from .runs.run_step_include import RunStepInclude
 from ...shared_params.metadata import Metadata
+from ...shared.reasoning_effort import ReasoningEffort
 from .message_content_part_param import MessageContentPartParam
 from ..code_interpreter_tool_param import CodeInterpreterToolParam
 from ..assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
@@ -106,8 +107,8 @@ class RunCreateParamsBase(TypedDict, total=False):
     during tool use.
     """
 
-    reasoning_effort: Optional[Literal["low", "medium", "high"]]
-    """**o1 and o3-mini models only**
+    reasoning_effort: Optional[ReasoningEffort]
+    """**o-series models only**
 
     Constrains effort on reasoning for
     [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
src/openai/types/beta/__init__.py
@@ -4,7 +4,6 @@ from __future__ import annotations
 
 from .thread import Thread as Thread
 from .assistant import Assistant as Assistant
-from .vector_store import VectorStore as VectorStore
 from .function_tool import FunctionTool as FunctionTool
 from .assistant_tool import AssistantTool as AssistantTool
 from .thread_deleted import ThreadDeleted as ThreadDeleted
@@ -14,35 +13,21 @@ from .function_tool_param import FunctionToolParam as FunctionToolParam
 from .assistant_tool_param import AssistantToolParam as AssistantToolParam
 from .thread_create_params import ThreadCreateParams as ThreadCreateParams
 from .thread_update_params import ThreadUpdateParams as ThreadUpdateParams
-from .vector_store_deleted import VectorStoreDeleted as VectorStoreDeleted
 from .assistant_list_params import AssistantListParams as AssistantListParams
 from .assistant_tool_choice import AssistantToolChoice as AssistantToolChoice
 from .code_interpreter_tool import CodeInterpreterTool as CodeInterpreterTool
 from .assistant_stream_event import AssistantStreamEvent as AssistantStreamEvent
-from .file_chunking_strategy import FileChunkingStrategy as FileChunkingStrategy
 from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam
 from .assistant_create_params import AssistantCreateParams as AssistantCreateParams
 from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams
-from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
-from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
-from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
 from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam
 from .code_interpreter_tool_param import CodeInterpreterToolParam as CodeInterpreterToolParam
 from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption
-from .file_chunking_strategy_param import FileChunkingStrategyParam as FileChunkingStrategyParam
 from .thread_create_and_run_params import ThreadCreateAndRunParams as ThreadCreateAndRunParams
-from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy
 from .assistant_tool_choice_function import AssistantToolChoiceFunction as AssistantToolChoiceFunction
 from .assistant_response_format_option import AssistantResponseFormatOption as AssistantResponseFormatOption
-from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam
 from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam as AssistantToolChoiceOptionParam
-from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject
-from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam
 from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam as AssistantToolChoiceFunctionParam
-from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject
 from .assistant_response_format_option_param import (
     AssistantResponseFormatOptionParam as AssistantResponseFormatOptionParam,
 )
-from .static_file_chunking_strategy_object_param import (
-    StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam,
-)
src/openai/types/beta/assistant_create_params.py
@@ -3,12 +3,12 @@
 from __future__ import annotations
 
 from typing import List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypedDict
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from ..shared.chat_model import ChatModel
 from .assistant_tool_param import AssistantToolParam
 from ..shared_params.metadata import Metadata
-from .file_chunking_strategy_param import FileChunkingStrategyParam
+from ..shared.reasoning_effort import ReasoningEffort
 from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
 
 __all__ = [
@@ -17,6 +17,10 @@ __all__ = [
     "ToolResourcesCodeInterpreter",
     "ToolResourcesFileSearch",
     "ToolResourcesFileSearchVectorStore",
+    "ToolResourcesFileSearchVectorStoreChunkingStrategy",
+    "ToolResourcesFileSearchVectorStoreChunkingStrategyAuto",
+    "ToolResourcesFileSearchVectorStoreChunkingStrategyStatic",
+    "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic",
 ]
 
 
@@ -53,8 +57,8 @@ class AssistantCreateParams(TypedDict, total=False):
     name: Optional[str]
     """The name of the assistant. The maximum length is 256 characters."""
 
-    reasoning_effort: Optional[Literal["low", "medium", "high"]]
-    """**o1 and o3-mini models only**
+    reasoning_effort: Optional[ReasoningEffort]
+    """**o-series models only**
 
     Constrains effort on reasoning for
     [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -127,12 +131,43 @@ class ToolResourcesCodeInterpreter(TypedDict, total=False):
     """
 
 
+class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False):
+    type: Required[Literal["auto"]]
+    """Always `auto`."""
+
+
+class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False):
+    chunk_overlap_tokens: Required[int]
+    """The number of tokens that overlap between chunks. The default value is `400`.
+
+    Note that the overlap must not exceed half of `max_chunk_size_tokens`.
+    """
+
+    max_chunk_size_tokens: Required[int]
+    """The maximum number of tokens in each chunk.
+
+    The default value is `800`. The minimum value is `100` and the maximum value is
+    `4096`.
+    """
+
+
+class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False):
+    static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic]
+
+    type: Required[Literal["static"]]
+    """Always `static`."""
+
+
+ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[
+    ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic
+]
+
+
 class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
-    chunking_strategy: FileChunkingStrategyParam
+    chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy
     """The chunking strategy used to chunk the file(s).
 
-    If not set, will use the `auto` strategy. Only applicable if `file_ids` is
-    non-empty.
+    If not set, will use the `auto` strategy.
     """
 
     file_ids: List[str]
src/openai/types/beta/assistant_update_params.py
@@ -7,6 +7,7 @@ from typing_extensions import Literal, TypedDict
 
 from .assistant_tool_param import AssistantToolParam
 from ..shared_params.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
 from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
 
 __all__ = ["AssistantUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
@@ -79,8 +80,8 @@ class AssistantUpdateParams(TypedDict, total=False):
     name: Optional[str]
     """The name of the assistant. The maximum length is 256 characters."""
 
-    reasoning_effort: Optional[Literal["low", "medium", "high"]]
-    """**o1 and o3-mini models only**
+    reasoning_effort: Optional[ReasoningEffort]
+    """**o-series models only**
 
     Constrains effort on reasoning for
     [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
src/openai/types/beta/thread_create_and_run_params.py
@@ -10,7 +10,6 @@ from .function_tool_param import FunctionToolParam
 from .file_search_tool_param import FileSearchToolParam
 from ..shared_params.metadata import Metadata
 from .code_interpreter_tool_param import CodeInterpreterToolParam
-from .file_chunking_strategy_param import FileChunkingStrategyParam
 from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
 from .threads.message_content_part_param import MessageContentPartParam
 from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
@@ -26,6 +25,10 @@ __all__ = [
     "ThreadToolResourcesCodeInterpreter",
     "ThreadToolResourcesFileSearch",
     "ThreadToolResourcesFileSearchVectorStore",
+    "ThreadToolResourcesFileSearchVectorStoreChunkingStrategy",
+    "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto",
+    "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic",
+    "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic",
     "ToolResources",
     "ToolResourcesCodeInterpreter",
     "ToolResourcesFileSearch",
@@ -224,12 +227,44 @@ class ThreadToolResourcesCodeInterpreter(TypedDict, total=False):
     """
 
 
+class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False):
+    type: Required[Literal["auto"]]
+    """Always `auto`."""
+
+
+class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False):
+    chunk_overlap_tokens: Required[int]
+    """The number of tokens that overlap between chunks. The default value is `400`.
+
+    Note that the overlap must not exceed half of `max_chunk_size_tokens`.
+    """
+
+    max_chunk_size_tokens: Required[int]
+    """The maximum number of tokens in each chunk.
+
+    The default value is `800`. The minimum value is `100` and the maximum value is
+    `4096`.
+    """
+
+
+class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False):
+    static: Required[ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic]
+
+    type: Required[Literal["static"]]
+    """Always `static`."""
+
+
+ThreadToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[
+    ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto,
+    ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic,
+]
+
+
 class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False):
-    chunking_strategy: FileChunkingStrategyParam
+    chunking_strategy: ThreadToolResourcesFileSearchVectorStoreChunkingStrategy
     """The chunking strategy used to chunk the file(s).
 
-    If not set, will use the `auto` strategy. Only applicable if `file_ids` is
-    non-empty.
+    If not set, will use the `auto` strategy.
     """
 
     file_ids: List[str]
src/openai/types/beta/thread_create_params.py
@@ -7,7 +7,6 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from ..shared_params.metadata import Metadata
 from .code_interpreter_tool_param import CodeInterpreterToolParam
-from .file_chunking_strategy_param import FileChunkingStrategyParam
 from .threads.message_content_part_param import MessageContentPartParam
 
 __all__ = [
@@ -20,6 +19,10 @@ __all__ = [
     "ToolResourcesCodeInterpreter",
     "ToolResourcesFileSearch",
     "ToolResourcesFileSearchVectorStore",
+    "ToolResourcesFileSearchVectorStoreChunkingStrategy",
+    "ToolResourcesFileSearchVectorStoreChunkingStrategyAuto",
+    "ToolResourcesFileSearchVectorStoreChunkingStrategyStatic",
+    "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic",
 ]
 
 
@@ -101,12 +104,43 @@ class ToolResourcesCodeInterpreter(TypedDict, total=False):
     """
 
 
+class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False):
+    type: Required[Literal["auto"]]
+    """Always `auto`."""
+
+
+class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False):
+    chunk_overlap_tokens: Required[int]
+    """The number of tokens that overlap between chunks. The default value is `400`.
+
+    Note that the overlap must not exceed half of `max_chunk_size_tokens`.
+    """
+
+    max_chunk_size_tokens: Required[int]
+    """The maximum number of tokens in each chunk.
+
+    The default value is `800`. The minimum value is `100` and the maximum value is
+    `4096`.
+    """
+
+
+class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False):
+    static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic]
+
+    type: Required[Literal["static"]]
+    """Always `static`."""
+
+
+ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[
+    ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic
+]
+
+
 class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
-    chunking_strategy: FileChunkingStrategyParam
+    chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy
     """The chunking strategy used to chunk the file(s).
 
-    If not set, will use the `auto` strategy. Only applicable if `file_ids` is
-    non-empty.
+    If not set, will use the `auto` strategy.
     """
 
     file_ids: List[str]
src/openai/types/chat/chat_completion_audio_param.py
@@ -17,7 +17,6 @@ class ChatCompletionAudioParam(TypedDict, total=False):
     voice: Required[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]
     """The voice the model uses to respond.
 
-    Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also
-    supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices
-    are less expressive).
+    Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, and
+    `shimmer`.
     """
src/openai/types/chat/chat_completion_content_part_param.py
@@ -3,14 +3,39 @@
 from __future__ import annotations
 
 from typing import Union
-from typing_extensions import TypeAlias
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
 from .chat_completion_content_part_image_param import ChatCompletionContentPartImageParam
 from .chat_completion_content_part_input_audio_param import ChatCompletionContentPartInputAudioParam
 
-__all__ = ["ChatCompletionContentPartParam"]
+__all__ = ["ChatCompletionContentPartParam", "File", "FileFile"]
+
+
+class FileFile(TypedDict, total=False):
+    file_data: str
+    """
+    The base64 encoded file data, used when passing the file to the model as a
+    string.
+    """
+
+    file_id: str
+    """The ID of an uploaded file to use as input."""
+
+    file_name: str
+    """The name of the file, used when passing the file to the model as a string."""
+
+
+class File(TypedDict, total=False):
+    file: Required[FileFile]
+
+    type: Required[Literal["file"]]
+    """The type of the content part. Always `file`."""
+
 
 ChatCompletionContentPartParam: TypeAlias = Union[
-    ChatCompletionContentPartTextParam, ChatCompletionContentPartImageParam, ChatCompletionContentPartInputAudioParam
+    ChatCompletionContentPartTextParam,
+    ChatCompletionContentPartImageParam,
+    ChatCompletionContentPartInputAudioParam,
+    File,
 ]
src/openai/types/chat/chat_completion_message.py
@@ -7,7 +7,29 @@ from ..._models import BaseModel
 from .chat_completion_audio import ChatCompletionAudio
 from .chat_completion_message_tool_call import ChatCompletionMessageToolCall
 
-__all__ = ["ChatCompletionMessage", "FunctionCall"]
+__all__ = ["ChatCompletionMessage", "Annotation", "AnnotationURLCitation", "FunctionCall"]
+
+
+class AnnotationURLCitation(BaseModel):
+    end_index: int
+    """The index of the last character of the URL citation in the message."""
+
+    start_index: int
+    """The index of the first character of the URL citation in the message."""
+
+    title: str
+    """The title of the web resource."""
+
+    url: str
+    """The URL of the web resource."""
+
+
+class Annotation(BaseModel):
+    type: Literal["url_citation"]
+    """The type of the URL citation. Always `url_citation`."""
+
+    url_citation: AnnotationURLCitation
+    """A URL citation when using web search."""
 
 
 class FunctionCall(BaseModel):
@@ -33,6 +55,12 @@ class ChatCompletionMessage(BaseModel):
     role: Literal["assistant"]
     """The role of the author of this message."""
 
+    annotations: Optional[List[Annotation]] = None
+    """
+    Annotations for the message, when applicable, as when using the
+    [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+    """
+
     audio: Optional[ChatCompletionAudio] = None
     """
     If the audio output modality is requested, this object contains data about the
src/openai/types/chat/chat_completion_reasoning_effort.py
@@ -1,8 +1,8 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import Optional
-from typing_extensions import Literal, TypeAlias
+
+from ..shared.reasoning_effort import ReasoningEffort
 
 __all__ = ["ChatCompletionReasoningEffort"]
 
-ChatCompletionReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]]
+ChatCompletionReasoningEffort = ReasoningEffort
src/openai/types/chat/completion_create_params.py
@@ -7,11 +7,10 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from ..shared.chat_model import ChatModel
 from ..shared_params.metadata import Metadata
-from .chat_completion_modality import ChatCompletionModality
+from ..shared.reasoning_effort import ReasoningEffort
 from .chat_completion_tool_param import ChatCompletionToolParam
 from .chat_completion_audio_param import ChatCompletionAudioParam
 from .chat_completion_message_param import ChatCompletionMessageParam
-from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort
 from ..shared_params.function_parameters import FunctionParameters
 from ..shared_params.response_format_text import ResponseFormatText
 from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
@@ -26,6 +25,9 @@ __all__ = [
     "FunctionCall",
     "Function",
     "ResponseFormat",
+    "WebSearchOptions",
+    "WebSearchOptionsUserLocation",
+    "WebSearchOptionsUserLocationApproximate",
     "CompletionCreateParamsNonStreaming",
     "CompletionCreateParamsStreaming",
 ]
@@ -43,11 +45,12 @@ class CompletionCreateParamsBase(TypedDict, total=False):
     """
 
     model: Required[Union[str, ChatModel]]
-    """ID of the model to use.
+    """Model ID used to generate the response, like `gpt-4o` or `o1`.
 
-    See the
-    [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
-    table for details on which models work with the Chat API.
+    OpenAI offers a wide range of models with different capabilities, performance
+    characteristics, and price points. Refer to the
+    [model guide](https://platform.openai.com/docs/models) to browse and compare
+    available models.
     """
 
     audio: Optional[ChatCompletionAudioParam]
@@ -133,10 +136,10 @@ class CompletionCreateParamsBase(TypedDict, total=False):
     a maximum length of 512 characters.
     """
 
-    modalities: Optional[List[ChatCompletionModality]]
+    modalities: Optional[List[Literal["text", "audio"]]]
     """
-    Output types that you would like the model to generate for this request. Most
-    models are capable of generating text, which is the default:
+    Output types that you would like the model to generate. Most models are capable
+    of generating text, which is the default:
 
     `["text"]`
 
@@ -174,8 +177,8 @@ class CompletionCreateParamsBase(TypedDict, total=False):
     far, increasing the model's likelihood to talk about new topics.
     """
 
-    reasoning_effort: Optional[ChatCompletionReasoningEffort]
-    """**o1 and o3-mini models only**
+    reasoning_effort: Optional[ReasoningEffort]
+    """**o-series models only**
 
     Constrains effort on reasoning for
     [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -191,16 +194,9 @@ class CompletionCreateParamsBase(TypedDict, total=False):
     in the
     [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
 
-    Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
-    message the model generates is valid JSON.
-
-    **Important:** when using JSON mode, you **must** also instruct the model to
-    produce JSON yourself via a system or user message. Without this, the model may
-    generate an unending stream of whitespace until the generation reaches the token
-    limit, resulting in a long-running and seemingly "stuck" request. Also note that
-    the message content may be partially cut off if `finish_reason="length"`, which
-    indicates the generation exceeded `max_tokens` or the conversation exceeded the
-    max context length.
+    Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+    ensures the message the model generates is valid JSON. Using `json_schema` is
+    preferred for models that support it.
     """
 
     seed: Optional[int]
@@ -221,14 +217,20 @@ class CompletionCreateParamsBase(TypedDict, total=False):
       utilize scale tier credits until they are exhausted.
     - If set to 'auto', and the Project is not Scale tier enabled, the request will
       be processed using the default service tier with a lower uptime SLA and no
-      latency guarantee.
+      latency guarentee.
     - If set to 'default', the request will be processed using the default service
-      tier with a lower uptime SLA and no latency guarantee.
+      tier with a lower uptime SLA and no latency guarentee.
     - When not set, the default behavior is 'auto'.
+
+    When this parameter is set, the response body will include the `service_tier`
+    utilized.
     """
 
-    stop: Union[Optional[str], List[str]]
-    """Up to 4 sequences where the API will stop generating further tokens."""
+    stop: Union[Optional[str], List[str], None]
+    """Up to 4 sequences where the API will stop generating further tokens.
+
+    The returned text will not contain the stop sequence.
+    """
 
     store: Optional[bool]
     """
@@ -292,6 +294,13 @@ class CompletionCreateParamsBase(TypedDict, total=False):
     [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
     """
 
+    web_search_options: WebSearchOptions
+    """
+    This tool searches the web for relevant results to use in a response. Learn more
+    about the
+    [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+    """
+
 
 FunctionCall: TypeAlias = Union[Literal["none", "auto"], ChatCompletionFunctionCallOptionParam]
 
@@ -322,30 +331,73 @@ class Function(TypedDict, total=False):
     """
 
 
-ResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema]
+ResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject]
+
+
+class WebSearchOptionsUserLocationApproximate(TypedDict, total=False):
+    city: str
+    """Free text input for the city of the user, e.g. `San Francisco`."""
+
+    country: str
+    """
+    The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
+    the user, e.g. `US`.
+    """
+
+    region: str
+    """Free text input for the region of the user, e.g. `California`."""
+
+    timezone: str
+    """
+    The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
+    user, e.g. `America/Los_Angeles`.
+    """
+
+
+class WebSearchOptionsUserLocation(TypedDict, total=False):
+    approximate: Required[WebSearchOptionsUserLocationApproximate]
+    """Approximate location parameters for the search."""
+
+    type: Required[Literal["approximate"]]
+    """The type of location approximation. Always `approximate`."""
+
+
+class WebSearchOptions(TypedDict, total=False):
+    search_context_size: Literal["low", "medium", "high"]
+    """
+    High level guidance for the amount of context window space to use for the
+    search. One of `low`, `medium`, or `high`. `medium` is the default.
+    """
+
+    user_location: Optional[WebSearchOptionsUserLocation]
+    """Approximate location parameters for the search."""
 
 
 class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False):
     stream: Optional[Literal[False]]
-    """If set, partial message deltas will be sent, like in ChatGPT.
-
-    Tokens will be sent as data-only
-    [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
-    as they become available, with the stream terminated by a `data: [DONE]`
-    message.
-    [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
+    """
+    If set to true, the model response data will be streamed to the client as it is
+    generated using
+    [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+    See the
+    [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
+    for more information, along with the
+    [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
+    guide for more information on how to handle the streaming events.
     """
 
 
 class CompletionCreateParamsStreaming(CompletionCreateParamsBase):
     stream: Required[Literal[True]]
-    """If set, partial message deltas will be sent, like in ChatGPT.
-
-    Tokens will be sent as data-only
-    [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
-    as they become available, with the stream terminated by a `data: [DONE]`
-    message.
-    [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
+    """
+    If set to true, the model response data will be streamed to the client as it is
+    generated using
+    [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+    See the
+    [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
+    for more information, along with the
+    [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
+    guide for more information on how to handle the streaming events.
     """
 
 
src/openai/types/chat/completion_list_params.py
@@ -15,19 +15,19 @@ class CompletionListParams(TypedDict, total=False):
     """Identifier for the last chat completion from the previous pagination request."""
 
     limit: int
-    """Number of chat completions to retrieve."""
+    """Number of Chat Completions to retrieve."""
 
     metadata: Optional[Metadata]
-    """A list of metadata keys to filter the chat completions by. Example:
+    """A list of metadata keys to filter the Chat Completions by. Example:
 
     `metadata[key1]=value1&metadata[key2]=value2`
     """
 
     model: str
-    """The model used to generate the chat completions."""
+    """The model used to generate the Chat Completions."""
 
     order: Literal["asc", "desc"]
-    """Sort order for chat completions by timestamp.
+    """Sort order for Chat Completions by timestamp.
 
     Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`.
     """
src/openai/types/responses/__init__.py
@@ -0,0 +1,138 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .tool import Tool as Tool
+from .response import Response as Response
+from .tool_param import ToolParam as ToolParam
+from .computer_tool import ComputerTool as ComputerTool
+from .function_tool import FunctionTool as FunctionTool
+from .response_error import ResponseError as ResponseError
+from .response_usage import ResponseUsage as ResponseUsage
+from .parsed_response import (
+    ParsedContent as ParsedContent,
+    ParsedResponse as ParsedResponse,
+    ParsedResponseOutputItem as ParsedResponseOutputItem,
+    ParsedResponseOutputText as ParsedResponseOutputText,
+    ParsedResponseOutputMessage as ParsedResponseOutputMessage,
+    ParsedResponseFunctionToolCall as ParsedResponseFunctionToolCall,
+)
+from .response_status import ResponseStatus as ResponseStatus
+from .web_search_tool import WebSearchTool as WebSearchTool
+from .file_search_tool import FileSearchTool as FileSearchTool
+from .tool_choice_types import ToolChoiceTypes as ToolChoiceTypes
+from .response_item_list import ResponseItemList as ResponseItemList
+from .computer_tool_param import ComputerToolParam as ComputerToolParam
+from .function_tool_param import FunctionToolParam as FunctionToolParam
+from .response_includable import ResponseIncludable as ResponseIncludable
+from .response_input_file import ResponseInputFile as ResponseInputFile
+from .response_input_text import ResponseInputText as ResponseInputText
+from .tool_choice_options import ToolChoiceOptions as ToolChoiceOptions
+from .response_error_event import ResponseErrorEvent as ResponseErrorEvent
+from .response_input_image import ResponseInputImage as ResponseInputImage
+from .response_input_param import ResponseInputParam as ResponseInputParam
+from .response_output_item import ResponseOutputItem as ResponseOutputItem
+from .response_output_text import ResponseOutputText as ResponseOutputText
+from .response_text_config import ResponseTextConfig as ResponseTextConfig
+from .tool_choice_function import ToolChoiceFunction as ToolChoiceFunction
+from .response_failed_event import ResponseFailedEvent as ResponseFailedEvent
+from .response_stream_event import ResponseStreamEvent as ResponseStreamEvent
+from .web_search_tool_param import WebSearchToolParam as WebSearchToolParam
+from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam
+from .input_item_list_params import InputItemListParams as InputItemListParams
+from .response_create_params import ResponseCreateParams as ResponseCreateParams
+from .response_created_event import ResponseCreatedEvent as ResponseCreatedEvent
+from .response_input_content import ResponseInputContent as ResponseInputContent
+from .response_output_message import ResponseOutputMessage as ResponseOutputMessage
+from .response_output_refusal import ResponseOutputRefusal as ResponseOutputRefusal
+from .tool_choice_types_param import ToolChoiceTypesParam as ToolChoiceTypesParam
+from .easy_input_message_param import EasyInputMessageParam as EasyInputMessageParam
+from .response_completed_event import ResponseCompletedEvent as ResponseCompletedEvent
+from .response_retrieve_params import ResponseRetrieveParams as ResponseRetrieveParams
+from .response_text_done_event import ResponseTextDoneEvent as ResponseTextDoneEvent
+from .response_audio_done_event import ResponseAudioDoneEvent as ResponseAudioDoneEvent
+from .response_incomplete_event import ResponseIncompleteEvent as ResponseIncompleteEvent
+from .response_input_file_param import ResponseInputFileParam as ResponseInputFileParam
+from .response_input_item_param import ResponseInputItemParam as ResponseInputItemParam
+from .response_input_text_param import ResponseInputTextParam as ResponseInputTextParam
+from .response_text_delta_event import ResponseTextDeltaEvent as ResponseTextDeltaEvent
+from .response_audio_delta_event import ResponseAudioDeltaEvent as ResponseAudioDeltaEvent
+from .response_in_progress_event import ResponseInProgressEvent as ResponseInProgressEvent
+from .response_input_image_param import ResponseInputImageParam as ResponseInputImageParam
+from .response_output_text_param import ResponseOutputTextParam as ResponseOutputTextParam
+from .response_text_config_param import ResponseTextConfigParam as ResponseTextConfigParam
+from .tool_choice_function_param import ToolChoiceFunctionParam as ToolChoiceFunctionParam
+from .response_computer_tool_call import ResponseComputerToolCall as ResponseComputerToolCall
+from .response_format_text_config import ResponseFormatTextConfig as ResponseFormatTextConfig
+from .response_function_tool_call import ResponseFunctionToolCall as ResponseFunctionToolCall
+from .response_refusal_done_event import ResponseRefusalDoneEvent as ResponseRefusalDoneEvent
+from .response_function_web_search import ResponseFunctionWebSearch as ResponseFunctionWebSearch
+from .response_input_content_param import ResponseInputContentParam as ResponseInputContentParam
+from .response_refusal_delta_event import ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent
+from .response_output_message_param import ResponseOutputMessageParam as ResponseOutputMessageParam
+from .response_output_refusal_param import ResponseOutputRefusalParam as ResponseOutputRefusalParam
+from .response_file_search_tool_call import ResponseFileSearchToolCall as ResponseFileSearchToolCall
+from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent
+from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent
+from .response_output_item_added_event import ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent
+from .response_computer_tool_call_param import ResponseComputerToolCallParam as ResponseComputerToolCallParam
+from .response_content_part_added_event import ResponseContentPartAddedEvent as ResponseContentPartAddedEvent
+from .response_format_text_config_param import ResponseFormatTextConfigParam as ResponseFormatTextConfigParam
+from .response_function_tool_call_param import ResponseFunctionToolCallParam as ResponseFunctionToolCallParam
+from .response_function_web_search_param import ResponseFunctionWebSearchParam as ResponseFunctionWebSearchParam
+from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall as ResponseCodeInterpreterToolCall
+from .response_input_message_content_list import ResponseInputMessageContentList as ResponseInputMessageContentList
+from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent
+from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam as ResponseFileSearchToolCallParam
+from .response_text_annotation_delta_event import ResponseTextAnnotationDeltaEvent as ResponseTextAnnotationDeltaEvent
+from .response_audio_transcript_delta_event import (
+    ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent,
+)
+from .response_format_text_json_schema_config import (
+    ResponseFormatTextJSONSchemaConfig as ResponseFormatTextJSONSchemaConfig,
+)
+from .response_web_search_call_completed_event import (
+    ResponseWebSearchCallCompletedEvent as ResponseWebSearchCallCompletedEvent,
+)
+from .response_web_search_call_searching_event import (
+    ResponseWebSearchCallSearchingEvent as ResponseWebSearchCallSearchingEvent,
+)
+from .response_file_search_call_completed_event import (
+    ResponseFileSearchCallCompletedEvent as ResponseFileSearchCallCompletedEvent,
+)
+from .response_file_search_call_searching_event import (
+    ResponseFileSearchCallSearchingEvent as ResponseFileSearchCallSearchingEvent,
+)
+from .response_input_message_content_list_param import (
+    ResponseInputMessageContentListParam as ResponseInputMessageContentListParam,
+)
+from .response_web_search_call_in_progress_event import (
+    ResponseWebSearchCallInProgressEvent as ResponseWebSearchCallInProgressEvent,
+)
+from .response_file_search_call_in_progress_event import (
+    ResponseFileSearchCallInProgressEvent as ResponseFileSearchCallInProgressEvent,
+)
+from .response_function_call_arguments_done_event import (
+    ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent,
+)
+from .response_function_call_arguments_delta_event import (
+    ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent,
+)
+from .response_format_text_json_schema_config_param import (
+    ResponseFormatTextJSONSchemaConfigParam as ResponseFormatTextJSONSchemaConfigParam,
+)
+from .response_code_interpreter_call_code_done_event import (
+    ResponseCodeInterpreterCallCodeDoneEvent as ResponseCodeInterpreterCallCodeDoneEvent,
+)
+from .response_code_interpreter_call_completed_event import (
+    ResponseCodeInterpreterCallCompletedEvent as ResponseCodeInterpreterCallCompletedEvent,
+)
+from .response_code_interpreter_call_code_delta_event import (
+    ResponseCodeInterpreterCallCodeDeltaEvent as ResponseCodeInterpreterCallCodeDeltaEvent,
+)
+from .response_code_interpreter_call_in_progress_event import (
+    ResponseCodeInterpreterCallInProgressEvent as ResponseCodeInterpreterCallInProgressEvent,
+)
+from .response_code_interpreter_call_interpreting_event import (
+    ResponseCodeInterpreterCallInterpretingEvent as ResponseCodeInterpreterCallInterpretingEvent,
+)
src/openai/types/responses/computer_tool.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ComputerTool"]
+
+
+class ComputerTool(BaseModel):
+    display_height: float
+    """The height of the computer display."""
+
+    display_width: float
+    """The width of the computer display."""
+
+    environment: Literal["mac", "windows", "ubuntu", "browser"]
+    """The type of computer environment to control."""
+
+    type: Literal["computer-preview"]
+    """The type of the computer use tool. Always `computer_use_preview`."""
src/openai/types/responses/computer_tool_param.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ComputerToolParam"]
+
+
+class ComputerToolParam(TypedDict, total=False):
+    display_height: Required[float]
+    """The height of the computer display."""
+
+    display_width: Required[float]
+    """The width of the computer display."""
+
+    environment: Required[Literal["mac", "windows", "ubuntu", "browser"]]
+    """The type of computer environment to control."""
+
+    type: Required[Literal["computer-preview"]]
+    """The type of the computer use tool. Always `computer_use_preview`."""
src/openai/types/responses/easy_input_message_param.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypedDict
+
+from .response_input_message_content_list_param import ResponseInputMessageContentListParam
+
+__all__ = ["EasyInputMessageParam"]
+
+
+class EasyInputMessageParam(TypedDict, total=False):
+    content: Required[Union[str, ResponseInputMessageContentListParam]]
+    """
+    Text, image, or audio input to the model, used to generate a response. Can also
+    contain previous assistant responses.
+    """
+
+    role: Required[Literal["user", "assistant", "system", "developer"]]
+    """The role of the message input.
+
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
+
+    type: Literal["message"]
+    """The type of the message input. Always `message`."""
src/openai/types/responses/file_search_tool.py
@@ -0,0 +1,44 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, TypeAlias
+
+from ..._models import BaseModel
+from ..shared.compound_filter import CompoundFilter
+from ..shared.comparison_filter import ComparisonFilter
+
+__all__ = ["FileSearchTool", "Filters", "RankingOptions"]
+
+Filters: TypeAlias = Union[ComparisonFilter, CompoundFilter]
+
+
+class RankingOptions(BaseModel):
+    ranker: Optional[Literal["auto", "default-2024-11-15"]] = None
+    """The ranker to use for the file search."""
+
+    score_threshold: Optional[float] = None
+    """
+    The score threshold for the file search, a number between 0 and 1. Numbers
+    closer to 1 will attempt to return only the most relevant results, but may
+    return fewer results.
+    """
+
+
+class FileSearchTool(BaseModel):
+    type: Literal["file_search"]
+    """The type of the file search tool. Always `file_search`."""
+
+    vector_store_ids: List[str]
+    """The IDs of the vector stores to search."""
+
+    filters: Optional[Filters] = None
+    """A filter to apply based on file attributes."""
+
+    max_num_results: Optional[int] = None
+    """The maximum number of results to return.
+
+    This number should be between 1 and 50 inclusive.
+    """
+
+    ranking_options: Optional[RankingOptions] = None
+    """Ranking options for search."""
src/openai/types/responses/file_search_tool_param.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..shared_params.compound_filter import CompoundFilter
+from ..shared_params.comparison_filter import ComparisonFilter
+
+__all__ = ["FileSearchToolParam", "Filters", "RankingOptions"]
+
+Filters: TypeAlias = Union[ComparisonFilter, CompoundFilter]
+
+
+class RankingOptions(TypedDict, total=False):
+    ranker: Literal["auto", "default-2024-11-15"]
+    """The ranker to use for the file search."""
+
+    score_threshold: float
+    """
+    The score threshold for the file search, a number between 0 and 1. Numbers
+    closer to 1 will attempt to return only the most relevant results, but may
+    return fewer results.
+    """
+
+
+class FileSearchToolParam(TypedDict, total=False):
+    type: Required[Literal["file_search"]]
+    """The type of the file search tool. Always `file_search`."""
+
+    vector_store_ids: Required[List[str]]
+    """The IDs of the vector stores to search."""
+
+    filters: Filters
+    """A filter to apply based on file attributes."""
+
+    max_num_results: int
+    """The maximum number of results to return.
+
+    This number should be between 1 and 50 inclusive.
+    """
+
+    ranking_options: RankingOptions
+    """Ranking options for search."""
src/openai/types/responses/function_tool.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["FunctionTool"]
+
+
+class FunctionTool(BaseModel):
+    name: str
+    """The name of the function to call."""
+
+    parameters: Dict[str, object]
+    """A JSON schema object describing the parameters of the function."""
+
+    strict: bool
+    """Whether to enforce strict parameter validation. Default `true`."""
+
+    type: Literal["function"]
+    """The type of the function tool. Always `function`."""
+
+    description: Optional[str] = None
+    """A description of the function.
+
+    Used by the model to determine whether or not to call the function.
+    """
src/openai/types/responses/function_tool_param.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["FunctionToolParam"]
+
+
+class FunctionToolParam(TypedDict, total=False):
+    name: Required[str]
+    """The name of the function to call."""
+
+    parameters: Required[Dict[str, object]]
+    """A JSON schema object describing the parameters of the function."""
+
+    strict: Required[bool]
+    """Whether to enforce strict parameter validation. Default `true`."""
+
+    type: Required[Literal["function"]]
+    """The type of the function tool. Always `function`."""
+
+    description: Optional[str]
+    """A description of the function.
+
+    Used by the model to determine whether or not to call the function.
+    """
src/openai/types/responses/input_item_list_params.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["InputItemListParams"]
+
+
+class InputItemListParams(TypedDict, total=False):
+    after: str
+    """An item ID to list items after, used in pagination."""
+
+    before: str
+    """An item ID to list items before, used in pagination."""
+
+    limit: int
+    """A limit on the number of objects to be returned.
+
+    Limit can range between 1 and 100, and the default is 20.
+    """
+
+    order: Literal["asc", "desc"]
+    """The order to return the input items in. Default is `asc`.
+
+    - `asc`: Return the input items in ascending order.
+    - `desc`: Return the input items in descending order.
+    """
src/openai/types/responses/parsed_response.py
@@ -0,0 +1,77 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import TYPE_CHECKING, List, Union, Generic, TypeVar, Optional
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .response import Response
+from ..._models import GenericModel
+from ..._utils._transform import PropertyInfo
+from .response_output_item import Reasoning
+from .response_output_text import ResponseOutputText
+from .response_output_message import ResponseOutputMessage
+from .response_output_refusal import ResponseOutputRefusal
+from .response_computer_tool_call import ResponseComputerToolCall
+from .response_function_tool_call import ResponseFunctionToolCall
+from .response_function_web_search import ResponseFunctionWebSearch
+from .response_file_search_tool_call import ResponseFileSearchToolCall
+
+__all__ = ["ParsedResponse", "ParsedResponseOutputMessage", "ParsedResponseOutputText"]
+
+ContentType = TypeVar("ContentType")
+
+# we need to disable this check because we're overriding properties
+# with subclasses of their types which is technically unsound as
+# properties can be mutated.
+# pyright: reportIncompatibleVariableOverride=false
+
+
+class ParsedResponseOutputText(ResponseOutputText, GenericModel, Generic[ContentType]):
+    parsed: Optional[ContentType] = None
+
+
+ParsedContent: TypeAlias = Annotated[
+    Union[ParsedResponseOutputText[ContentType], ResponseOutputRefusal],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class ParsedResponseOutputMessage(ResponseOutputMessage, GenericModel, Generic[ContentType]):
+    if TYPE_CHECKING:
+        content: List[ParsedContent[ContentType]]  # type: ignore[assignment]
+    else:
+        content: List[ParsedContent]
+
+
+class ParsedResponseFunctionToolCall(ResponseFunctionToolCall):
+    parsed_arguments: object = None
+
+
+ParsedResponseOutputItem: TypeAlias = Annotated[
+    Union[
+        ParsedResponseOutputMessage[ContentType],
+        ParsedResponseFunctionToolCall,
+        ResponseFileSearchToolCall,
+        ResponseFunctionWebSearch,
+        ResponseComputerToolCall,
+        Reasoning,
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class ParsedResponse(Response, GenericModel, Generic[ContentType]):
+    if TYPE_CHECKING:
+        output: List[ParsedResponseOutputItem[ContentType]]  # type: ignore[assignment]
+    else:
+        output: List[ParsedResponseOutputItem]
+
+    @property
+    def output_parsed(self) -> Optional[ContentType]:
+        for output in self.output:
+            if output.type == "message":
+                for content in output.content:
+                    if content.type == "output_text" and content.parsed:
+                        return content.parsed
+
+        return None
src/openai/types/responses/response.py
@@ -0,0 +1,204 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, TypeAlias
+
+from .tool import Tool
+from ..._models import BaseModel
+from .response_error import ResponseError
+from .response_usage import ResponseUsage
+from .response_status import ResponseStatus
+from ..shared.metadata import Metadata
+from ..shared.reasoning import Reasoning
+from .tool_choice_types import ToolChoiceTypes
+from ..shared.chat_model import ChatModel
+from .tool_choice_options import ToolChoiceOptions
+from .response_output_item import ResponseOutputItem
+from .response_text_config import ResponseTextConfig
+from .tool_choice_function import ToolChoiceFunction
+
+__all__ = ["Response", "IncompleteDetails", "ToolChoice"]
+
+
+class IncompleteDetails(BaseModel):
+    reason: Optional[Literal["max_output_tokens", "content_filter"]] = None
+    """The reason why the response is incomplete."""
+
+
+ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceTypes, ToolChoiceFunction]
+
+
+class Response(BaseModel):
+    id: str
+    """Unique identifier for this Response."""
+
+    created_at: float
+    """Unix timestamp (in seconds) of when this Response was created."""
+
+    error: Optional[ResponseError] = None
+    """An error object returned when the model fails to generate a Response."""
+
+    incomplete_details: Optional[IncompleteDetails] = None
+    """Details about why the response is incomplete."""
+
+    instructions: Optional[str] = None
+    """
+    Inserts a system (or developer) message as the first item in the model's
+    context.
+
+    When using along with `previous_response_id`, the instructions from a previous
+    response will be not be carried over to the next response. This makes it simple
+    to swap out system (or developer) messages in new responses.
+    """
+
+    metadata: Optional[Metadata] = None
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    model: Union[str, ChatModel]
+    """Model ID used to generate the response, like `gpt-4o` or `o1`.
+
+    OpenAI offers a wide range of models with different capabilities, performance
+    characteristics, and price points. Refer to the
+    [model guide](https://platform.openai.com/docs/models) to browse and compare
+    available models.
+    """
+
+    object: Literal["response"]
+    """The object type of this resource - always set to `response`."""
+
+    output: List[ResponseOutputItem]
+    """An array of content items generated by the model.
+
+    - The length and order of items in the `output` array is dependent on the
+      model's response.
+    - Rather than accessing the first item in the `output` array and assuming it's
+      an `assistant` message with the content generated by the model, you might
+      consider using the `output_text` property where supported in SDKs.
+    """
+
+    parallel_tool_calls: bool
+    """Whether to allow the model to run tool calls in parallel."""
+
+    temperature: Optional[float] = None
+    """What sampling temperature to use, between 0 and 2.
+
+    Higher values like 0.8 will make the output more random, while lower values like
+    0.2 will make it more focused and deterministic. We generally recommend altering
+    this or `top_p` but not both.
+    """
+
+    tool_choice: ToolChoice
+    """
+    How the model should select which tool (or tools) to use when generating a
+    response. See the `tools` parameter to see how to specify which tools the model
+    can call.
+    """
+
+    tools: List[Tool]
+    """An array of tools the model may call while generating a response.
+
+    You can specify which tool to use by setting the `tool_choice` parameter.
+
+    The two categories of tools you can provide the model are:
+
+    - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+      capabilities, like
+      [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+      [file search](https://platform.openai.com/docs/guides/tools-file-search).
+      Learn more about
+      [built-in tools](https://platform.openai.com/docs/guides/tools).
+    - **Function calls (custom tools)**: Functions that are defined by you, enabling
+      the model to call your own code. Learn more about
+      [function calling](https://platform.openai.com/docs/guides/function-calling).
+    """
+
+    top_p: Optional[float] = None
+    """
+    An alternative to sampling with temperature, called nucleus sampling, where the
+    model considers the results of the tokens with top_p probability mass. So 0.1
+    means only the tokens comprising the top 10% probability mass are considered.
+
+    We generally recommend altering this or `temperature` but not both.
+    """
+
+    max_output_tokens: Optional[int] = None
+    """
+    An upper bound for the number of tokens that can be generated for a response,
+    including visible output tokens and
+    [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+    """
+
+    previous_response_id: Optional[str] = None
+    """The unique ID of the previous response to the model.
+
+    Use this to create multi-turn conversations. Learn more about
+    [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+    """
+
+    reasoning: Optional[Reasoning] = None
+    """**o-series models only**
+
+    Configuration options for
+    [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+    """
+
+    status: Optional[ResponseStatus] = None
+    """The status of the response generation.
+
+    One of `completed`, `failed`, `in_progress`, or `incomplete`.
+    """
+
+    text: Optional[ResponseTextConfig] = None
+    """Configuration options for a text response from the model.
+
+    Can be plain text or structured JSON data. Learn more:
+
+    - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+    - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+    """
+
+    truncation: Optional[Literal["auto", "disabled"]] = None
+    """The truncation strategy to use for the model response.
+
+    - `auto`: If the context of this response and previous ones exceeds the model's
+      context window size, the model will truncate the response to fit the context
+      window by dropping input items in the middle of the conversation.
+    - `disabled` (default): If a model response will exceed the context window size
+      for a model, the request will fail with a 400 error.
+    """
+
+    usage: Optional[ResponseUsage] = None
+    """
+    Represents token usage details including input tokens, output tokens, a
+    breakdown of output tokens, and the total tokens used.
+    """
+
+    user: Optional[str] = None
+    """
+    A unique identifier representing your end-user, which can help OpenAI to monitor
+    and detect abuse.
+    [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+    """
+
+    @property
+    def output_text(self) -> str:
+        """Convenience property that aggregates all `output_text` items from the `output`
+        list.
+
+        If no `output_text` content blocks exist, then an empty string is returned.
+        """
+        texts: List[str] = []
+        for output in self.output:
+            if output.type == "message":
+                for content in output.content:
+                    if content.type == "output_text":
+                        texts.append(content.text)
+
+        return "".join(texts)
src/openai/types/responses/response_audio_delta_event.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseAudioDeltaEvent"]
+
+
+class ResponseAudioDeltaEvent(BaseModel):
+    delta: str
+    """A chunk of Base64 encoded response audio bytes."""
+
+    type: Literal["response.audio.delta"]
+    """The type of the event. Always `response.audio.delta`."""
src/openai/types/responses/response_audio_done_event.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseAudioDoneEvent"]
+
+
+class ResponseAudioDoneEvent(BaseModel):
+    type: Literal["response.audio.done"]
+    """The type of the event. Always `response.audio.done`."""
src/openai/types/responses/response_audio_transcript_delta_event.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseAudioTranscriptDeltaEvent"]
+
+
+class ResponseAudioTranscriptDeltaEvent(BaseModel):
+    delta: str
+    """The partial transcript of the audio response."""
+
+    type: Literal["response.audio.transcript.delta"]
+    """The type of the event. Always `response.audio.transcript.delta`."""
src/openai/types/responses/response_audio_transcript_done_event.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseAudioTranscriptDoneEvent"]
+
+
+class ResponseAudioTranscriptDoneEvent(BaseModel):
+    type: Literal["response.audio.transcript.done"]
+    """The type of the event. Always `response.audio.transcript.done`."""
src/openai/types/responses/response_code_interpreter_call_code_delta_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseCodeInterpreterCallCodeDeltaEvent"]
+
+
+class ResponseCodeInterpreterCallCodeDeltaEvent(BaseModel):
+    delta: str
+    """The partial code snippet added by the code interpreter."""
+
+    output_index: int
+    """The index of the output item that the code interpreter call is in progress."""
+
+    type: Literal["response.code_interpreter_call.code.delta"]
+    """The type of the event. Always `response.code_interpreter_call.code.delta`."""
src/openai/types/responses/response_code_interpreter_call_code_done_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseCodeInterpreterCallCodeDoneEvent"]
+
+
+class ResponseCodeInterpreterCallCodeDoneEvent(BaseModel):
+    code: str
+    """The final code snippet output by the code interpreter."""
+
+    output_index: int
+    """The index of the output item that the code interpreter call is in progress."""
+
+    type: Literal["response.code_interpreter_call.code.done"]
+    """The type of the event. Always `response.code_interpreter_call.code.done`."""
src/openai/types/responses/response_code_interpreter_call_completed_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
+
+__all__ = ["ResponseCodeInterpreterCallCompletedEvent"]
+
+
+class ResponseCodeInterpreterCallCompletedEvent(BaseModel):
+    code_interpreter_call: ResponseCodeInterpreterToolCall
+    """A tool call to run code."""
+
+    output_index: int
+    """The index of the output item that the code interpreter call is in progress."""
+
+    type: Literal["response.code_interpreter_call.completed"]
+    """The type of the event. Always `response.code_interpreter_call.completed`."""
src/openai/types/responses/response_code_interpreter_call_in_progress_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
+
+__all__ = ["ResponseCodeInterpreterCallInProgressEvent"]
+
+
+class ResponseCodeInterpreterCallInProgressEvent(BaseModel):
+    code_interpreter_call: ResponseCodeInterpreterToolCall
+    """A tool call to run code."""
+
+    output_index: int
+    """The index of the output item that the code interpreter call is in progress."""
+
+    type: Literal["response.code_interpreter_call.in_progress"]
+    """The type of the event. Always `response.code_interpreter_call.in_progress`."""
src/openai/types/responses/response_code_interpreter_call_interpreting_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
+
+__all__ = ["ResponseCodeInterpreterCallInterpretingEvent"]
+
+
+class ResponseCodeInterpreterCallInterpretingEvent(BaseModel):
+    code_interpreter_call: ResponseCodeInterpreterToolCall
+    """A tool call to run code."""
+
+    output_index: int
+    """The index of the output item that the code interpreter call is in progress."""
+
+    type: Literal["response.code_interpreter_call.interpreting"]
+    """The type of the event. Always `response.code_interpreter_call.interpreting`."""
src/openai/types/responses/response_code_interpreter_tool_call.py
@@ -0,0 +1,52 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+
+__all__ = ["ResponseCodeInterpreterToolCall", "Result", "ResultLogs", "ResultFiles", "ResultFilesFile"]
+
+
+class ResultLogs(BaseModel):
+    logs: str
+    """The logs of the code interpreter tool call."""
+
+    type: Literal["logs"]
+    """The type of the code interpreter text output. Always `logs`."""
+
+
+class ResultFilesFile(BaseModel):
+    file_id: str
+    """The ID of the file."""
+
+    mime_type: str
+    """The MIME type of the file."""
+
+
+class ResultFiles(BaseModel):
+    files: List[ResultFilesFile]
+
+    type: Literal["files"]
+    """The type of the code interpreter file output. Always `files`."""
+
+
+Result: TypeAlias = Annotated[Union[ResultLogs, ResultFiles], PropertyInfo(discriminator="type")]
+
+
+class ResponseCodeInterpreterToolCall(BaseModel):
+    id: str
+    """The unique ID of the code interpreter tool call."""
+
+    code: str
+    """The code to run."""
+
+    results: List[Result]
+    """The results of the code interpreter tool call."""
+
+    status: Literal["in_progress", "interpreting", "completed"]
+    """The status of the code interpreter tool call."""
+
+    type: Literal["code_interpreter_call"]
+    """The type of the code interpreter tool call. Always `code_interpreter_call`."""
src/openai/types/responses/response_completed_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .response import Response
+from ..._models import BaseModel
+
+__all__ = ["ResponseCompletedEvent"]
+
+
+class ResponseCompletedEvent(BaseModel):
+    response: Response
+    """Properties of the completed response."""
+
+    type: Literal["response.completed"]
+    """The type of the event. Always `response.completed`."""
src/openai/types/responses/response_computer_tool_call.py
@@ -0,0 +1,212 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+
+__all__ = [
+    "ResponseComputerToolCall",
+    "Action",
+    "ActionClick",
+    "ActionDoubleClick",
+    "ActionDrag",
+    "ActionDragPath",
+    "ActionKeypress",
+    "ActionMove",
+    "ActionScreenshot",
+    "ActionScroll",
+    "ActionType",
+    "ActionWait",
+    "PendingSafetyCheck",
+]
+
+
+class ActionClick(BaseModel):
+    button: Literal["left", "right", "wheel", "back", "forward"]
+    """Indicates which mouse button was pressed during the click.
+
+    One of `left`, `right`, `wheel`, `back`, or `forward`.
+    """
+
+    type: Literal["click"]
+    """Specifies the event type.
+
+    For a click action, this property is always set to `click`.
+    """
+
+    x: int
+    """The x-coordinate where the click occurred."""
+
+    y: int
+    """The y-coordinate where the click occurred."""
+
+
+class ActionDoubleClick(BaseModel):
+    type: Literal["double_click"]
+    """Specifies the event type.
+
+    For a double click action, this property is always set to `double_click`.
+    """
+
+    x: int
+    """The x-coordinate where the double click occurred."""
+
+    y: int
+    """The y-coordinate where the double click occurred."""
+
+
+class ActionDragPath(BaseModel):
+    x: int
+    """The x-coordinate."""
+
+    y: int
+    """The y-coordinate."""
+
+
+class ActionDrag(BaseModel):
+    path: List[ActionDragPath]
+    """An array of coordinates representing the path of the drag action.
+
+    Coordinates will appear as an array of objects, eg
+
+    ```
+    [
+      { x: 100, y: 200 },
+      { x: 200, y: 300 }
+    ]
+    ```
+    """
+
+    type: Literal["drag"]
+    """Specifies the event type.
+
+    For a drag action, this property is always set to `drag`.
+    """
+
+
+class ActionKeypress(BaseModel):
+    keys: List[str]
+    """The combination of keys the model is requesting to be pressed.
+
+    This is an array of strings, each representing a key.
+    """
+
+    type: Literal["keypress"]
+    """Specifies the event type.
+
+    For a keypress action, this property is always set to `keypress`.
+    """
+
+
+class ActionMove(BaseModel):
+    type: Literal["move"]
+    """Specifies the event type.
+
+    For a move action, this property is always set to `move`.
+    """
+
+    x: int
+    """The x-coordinate to move to."""
+
+    y: int
+    """The y-coordinate to move to."""
+
+
+class ActionScreenshot(BaseModel):
+    type: Literal["screenshot"]
+    """Specifies the event type.
+
+    For a screenshot action, this property is always set to `screenshot`.
+    """
+
+
+class ActionScroll(BaseModel):
+    scroll_x: int
+    """The horizontal scroll distance."""
+
+    scroll_y: int
+    """The vertical scroll distance."""
+
+    type: Literal["scroll"]
+    """Specifies the event type.
+
+    For a scroll action, this property is always set to `scroll`.
+    """
+
+    x: int
+    """The x-coordinate where the scroll occurred."""
+
+    y: int
+    """The y-coordinate where the scroll occurred."""
+
+
+class ActionType(BaseModel):
+    text: str
+    """The text to type."""
+
+    type: Literal["type"]
+    """Specifies the event type.
+
+    For a type action, this property is always set to `type`.
+    """
+
+
+class ActionWait(BaseModel):
+    type: Literal["wait"]
+    """Specifies the event type.
+
+    For a wait action, this property is always set to `wait`.
+    """
+
+
+Action: TypeAlias = Annotated[
+    Union[
+        ActionClick,
+        ActionDoubleClick,
+        ActionDrag,
+        ActionKeypress,
+        ActionMove,
+        ActionScreenshot,
+        ActionScroll,
+        ActionType,
+        ActionWait,
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class PendingSafetyCheck(BaseModel):
+    id: str
+    """The ID of the pending safety check."""
+
+    code: str
+    """The type of the pending safety check."""
+
+    message: str
+    """Details about the pending safety check."""
+
+
+class ResponseComputerToolCall(BaseModel):
+    id: str
+    """The unique ID of the computer call."""
+
+    action: Action
+    """A click action."""
+
+    call_id: str
+    """An identifier used when responding to the tool call with output."""
+
+    pending_safety_checks: List[PendingSafetyCheck]
+    """The pending safety checks for the computer call."""
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+    type: Literal["computer_call"]
+    """The type of the computer call. Always `computer_call`."""
src/openai/types/responses/response_computer_tool_call_param.py
@@ -0,0 +1,208 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+__all__ = [
+    "ResponseComputerToolCallParam",
+    "Action",
+    "ActionClick",
+    "ActionDoubleClick",
+    "ActionDrag",
+    "ActionDragPath",
+    "ActionKeypress",
+    "ActionMove",
+    "ActionScreenshot",
+    "ActionScroll",
+    "ActionType",
+    "ActionWait",
+    "PendingSafetyCheck",
+]
+
+
+class ActionClick(TypedDict, total=False):
+    button: Required[Literal["left", "right", "wheel", "back", "forward"]]
+    """Indicates which mouse button was pressed during the click.
+
+    One of `left`, `right`, `wheel`, `back`, or `forward`.
+    """
+
+    type: Required[Literal["click"]]
+    """Specifies the event type.
+
+    For a click action, this property is always set to `click`.
+    """
+
+    x: Required[int]
+    """The x-coordinate where the click occurred."""
+
+    y: Required[int]
+    """The y-coordinate where the click occurred."""
+
+
+class ActionDoubleClick(TypedDict, total=False):
+    type: Required[Literal["double_click"]]
+    """Specifies the event type.
+
+    For a double click action, this property is always set to `double_click`.
+    """
+
+    x: Required[int]
+    """The x-coordinate where the double click occurred."""
+
+    y: Required[int]
+    """The y-coordinate where the double click occurred."""
+
+
+class ActionDragPath(TypedDict, total=False):
+    x: Required[int]
+    """The x-coordinate."""
+
+    y: Required[int]
+    """The y-coordinate."""
+
+
+class ActionDrag(TypedDict, total=False):
+    path: Required[Iterable[ActionDragPath]]
+    """An array of coordinates representing the path of the drag action.
+
+    Coordinates will appear as an array of objects, eg
+
+    ```
+    [
+      { x: 100, y: 200 },
+      { x: 200, y: 300 }
+    ]
+    ```
+    """
+
+    type: Required[Literal["drag"]]
+    """Specifies the event type.
+
+    For a drag action, this property is always set to `drag`.
+    """
+
+
+class ActionKeypress(TypedDict, total=False):
+    keys: Required[List[str]]
+    """The combination of keys the model is requesting to be pressed.
+
+    This is an array of strings, each representing a key.
+    """
+
+    type: Required[Literal["keypress"]]
+    """Specifies the event type.
+
+    For a keypress action, this property is always set to `keypress`.
+    """
+
+
+class ActionMove(TypedDict, total=False):
+    type: Required[Literal["move"]]
+    """Specifies the event type.
+
+    For a move action, this property is always set to `move`.
+    """
+
+    x: Required[int]
+    """The x-coordinate to move to."""
+
+    y: Required[int]
+    """The y-coordinate to move to."""
+
+
+class ActionScreenshot(TypedDict, total=False):
+    type: Required[Literal["screenshot"]]
+    """Specifies the event type.
+
+    For a screenshot action, this property is always set to `screenshot`.
+    """
+
+
+class ActionScroll(TypedDict, total=False):
+    scroll_x: Required[int]
+    """The horizontal scroll distance."""
+
+    scroll_y: Required[int]
+    """The vertical scroll distance."""
+
+    type: Required[Literal["scroll"]]
+    """Specifies the event type.
+
+    For a scroll action, this property is always set to `scroll`.
+    """
+
+    x: Required[int]
+    """The x-coordinate where the scroll occurred."""
+
+    y: Required[int]
+    """The y-coordinate where the scroll occurred."""
+
+
+class ActionType(TypedDict, total=False):
+    text: Required[str]
+    """The text to type."""
+
+    type: Required[Literal["type"]]
+    """Specifies the event type.
+
+    For a type action, this property is always set to `type`.
+    """
+
+
+class ActionWait(TypedDict, total=False):
+    type: Required[Literal["wait"]]
+    """Specifies the event type.
+
+    For a wait action, this property is always set to `wait`.
+    """
+
+
+Action: TypeAlias = Union[
+    ActionClick,
+    ActionDoubleClick,
+    ActionDrag,
+    ActionKeypress,
+    ActionMove,
+    ActionScreenshot,
+    ActionScroll,
+    ActionType,
+    ActionWait,
+]
+
+
+class PendingSafetyCheck(TypedDict, total=False):
+    id: Required[str]
+    """The ID of the pending safety check."""
+
+    code: Required[str]
+    """The type of the pending safety check."""
+
+    message: Required[str]
+    """Details about the pending safety check."""
+
+
+class ResponseComputerToolCallParam(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the computer call."""
+
+    action: Required[Action]
+    """A click action."""
+
+    call_id: Required[str]
+    """An identifier used when responding to the tool call with output."""
+
+    pending_safety_checks: Required[Iterable[PendingSafetyCheck]]
+    """The pending safety checks for the computer call."""
+
+    status: Required[Literal["in_progress", "completed", "incomplete"]]
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+    type: Required[Literal["computer_call"]]
+    """The type of the computer call. Always `computer_call`."""
src/openai/types/responses/response_content_part_added_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from .response_output_text import ResponseOutputText
+from .response_output_refusal import ResponseOutputRefusal
+
+__all__ = ["ResponseContentPartAddedEvent", "Part"]
+
+Part: TypeAlias = Annotated[Union[ResponseOutputText, ResponseOutputRefusal], PropertyInfo(discriminator="type")]
+
+
+class ResponseContentPartAddedEvent(BaseModel):
+    content_index: int
+    """The index of the content part that was added."""
+
+    item_id: str
+    """The ID of the output item that the content part was added to."""
+
+    output_index: int
+    """The index of the output item that the content part was added to."""
+
+    part: Part
+    """The content part that was added."""
+
+    type: Literal["response.content_part.added"]
+    """The type of the event. Always `response.content_part.added`."""
src/openai/types/responses/response_content_part_done_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from .response_output_text import ResponseOutputText
+from .response_output_refusal import ResponseOutputRefusal
+
+__all__ = ["ResponseContentPartDoneEvent", "Part"]
+
+Part: TypeAlias = Annotated[Union[ResponseOutputText, ResponseOutputRefusal], PropertyInfo(discriminator="type")]
+
+
+class ResponseContentPartDoneEvent(BaseModel):
+    content_index: int
+    """The index of the content part that is done."""
+
+    item_id: str
+    """The ID of the output item that the content part was added to."""
+
+    output_index: int
+    """The index of the output item that the content part was added to."""
+
+    part: Part
+    """The content part that is done."""
+
+    type: Literal["response.content_part.done"]
+    """The type of the event. Always `response.content_part.done`."""
src/openai/types/responses/response_create_params.py
@@ -0,0 +1,204 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .tool_param import ToolParam
+from ..shared.chat_model import ChatModel
+from .response_includable import ResponseIncludable
+from .tool_choice_options import ToolChoiceOptions
+from .response_input_param import ResponseInputParam
+from ..shared_params.metadata import Metadata
+from .tool_choice_types_param import ToolChoiceTypesParam
+from ..shared_params.reasoning import Reasoning
+from .response_text_config_param import ResponseTextConfigParam
+from .tool_choice_function_param import ToolChoiceFunctionParam
+
+__all__ = [
+    "ResponseCreateParamsBase",
+    "ToolChoice",
+    "ResponseCreateParamsNonStreaming",
+    "ResponseCreateParamsStreaming",
+]
+
+
+class ResponseCreateParamsBase(TypedDict, total=False):
+    input: Required[Union[str, ResponseInputParam]]
+    """Text, image, or file inputs to the model, used to generate a response.
+
+    Learn more:
+
+    - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+    - [Image inputs](https://platform.openai.com/docs/guides/images)
+    - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+    - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+    - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+    """
+
+    model: Required[Union[str, ChatModel]]
+    """Model ID used to generate the response, like `gpt-4o` or `o1`.
+
+    OpenAI offers a wide range of models with different capabilities, performance
+    characteristics, and price points. Refer to the
+    [model guide](https://platform.openai.com/docs/models) to browse and compare
+    available models.
+    """
+
+    include: Optional[List[ResponseIncludable]]
+    """Specify additional output data to include in the model response.
+
+    Currently supported values are:
+
+    - `file_search_call.results`: Include the search results of the file search tool
+      call.
+    - `message.input_image.image_url`: Include image urls from the input message.
+    - `computer_call_output.output.image_url`: Include image urls from the computer
+      call output.
+    """
+
+    instructions: Optional[str]
+    """
+    Inserts a system (or developer) message as the first item in the model's
+    context.
+
+    When using along with `previous_response_id`, the instructions from a previous
+    response will be not be carried over to the next response. This makes it simple
+    to swap out system (or developer) messages in new responses.
+    """
+
+    max_output_tokens: Optional[int]
+    """
+    An upper bound for the number of tokens that can be generated for a response,
+    including visible output tokens and
+    [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+    """
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    parallel_tool_calls: Optional[bool]
+    """Whether to allow the model to run tool calls in parallel."""
+
+    previous_response_id: Optional[str]
+    """The unique ID of the previous response to the model.
+
+    Use this to create multi-turn conversations. Learn more about
+    [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+    """
+
+    reasoning: Optional[Reasoning]
+    """**o-series models only**
+
+    Configuration options for
+    [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+    """
+
+    store: Optional[bool]
+    """Whether to store the generated model response for later retrieval via API."""
+
+    temperature: Optional[float]
+    """What sampling temperature to use, between 0 and 2.
+
+    Higher values like 0.8 will make the output more random, while lower values like
+    0.2 will make it more focused and deterministic. We generally recommend altering
+    this or `top_p` but not both.
+    """
+
+    text: ResponseTextConfigParam
+    """Configuration options for a text response from the model.
+
+    Can be plain text or structured JSON data. Learn more:
+
+    - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+    - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+    """
+
+    tool_choice: ToolChoice
+    """
+    How the model should select which tool (or tools) to use when generating a
+    response. See the `tools` parameter to see how to specify which tools the model
+    can call.
+    """
+
+    tools: Iterable[ToolParam]
+    """An array of tools the model may call while generating a response.
+
+    You can specify which tool to use by setting the `tool_choice` parameter.
+
+    The two categories of tools you can provide the model are:
+
+    - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+      capabilities, like
+      [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+      [file search](https://platform.openai.com/docs/guides/tools-file-search).
+      Learn more about
+      [built-in tools](https://platform.openai.com/docs/guides/tools).
+    - **Function calls (custom tools)**: Functions that are defined by you, enabling
+      the model to call your own code. Learn more about
+      [function calling](https://platform.openai.com/docs/guides/function-calling).
+    """
+
+    top_p: Optional[float]
+    """
+    An alternative to sampling with temperature, called nucleus sampling, where the
+    model considers the results of the tokens with top_p probability mass. So 0.1
+    means only the tokens comprising the top 10% probability mass are considered.
+
+    We generally recommend altering this or `temperature` but not both.
+    """
+
+    truncation: Optional[Literal["auto", "disabled"]]
+    """The truncation strategy to use for the model response.
+
+    - `auto`: If the context of this response and previous ones exceeds the model's
+      context window size, the model will truncate the response to fit the context
+      window by dropping input items in the middle of the conversation.
+    - `disabled` (default): If a model response will exceed the context window size
+      for a model, the request will fail with a 400 error.
+    """
+
+    user: str
+    """
+    A unique identifier representing your end-user, which can help OpenAI to monitor
+    and detect abuse.
+    [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+    """
+
+
+ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceTypesParam, ToolChoiceFunctionParam]
+
+
+class ResponseCreateParamsNonStreaming(ResponseCreateParamsBase, total=False):
+    stream: Optional[Literal[False]]
+    """
+    If set to true, the model response data will be streamed to the client as it is
+    generated using
+    [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+    See the
+    [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+    for more information.
+    """
+
+
+class ResponseCreateParamsStreaming(ResponseCreateParamsBase):
+    stream: Required[Literal[True]]
+    """
+    If set to true, the model response data will be streamed to the client as it is
+    generated using
+    [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+    See the
+    [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+    for more information.
+    """
+
+
+ResponseCreateParams = Union[ResponseCreateParamsNonStreaming, ResponseCreateParamsStreaming]
src/openai/types/responses/response_created_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .response import Response
+from ..._models import BaseModel
+
+__all__ = ["ResponseCreatedEvent"]
+
+
+class ResponseCreatedEvent(BaseModel):
+    response: Response
+    """The response that was created."""
+
+    type: Literal["response.created"]
+    """The type of the event. Always `response.created`."""
src/openai/types/responses/response_error.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseError"]
+
+
+class ResponseError(BaseModel):
+    code: Literal[
+        "server_error",
+        "rate_limit_exceeded",
+        "invalid_prompt",
+        "vector_store_timeout",
+        "invalid_image",
+        "invalid_image_format",
+        "invalid_base64_image",
+        "invalid_image_url",
+        "image_too_large",
+        "image_too_small",
+        "image_parse_error",
+        "image_content_policy_violation",
+        "invalid_image_mode",
+        "image_file_too_large",
+        "unsupported_image_media_type",
+        "empty_image_file",
+        "failed_to_download_image",
+        "image_file_not_found",
+    ]
+    """The error code for the response."""
+
+    message: str
+    """A human-readable description of the error."""
src/openai/types/responses/response_error_event.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseErrorEvent"]
+
+
+class ResponseErrorEvent(BaseModel):
+    code: Optional[str] = None
+    """The error code."""
+
+    message: str
+    """The error message."""
+
+    param: Optional[str] = None
+    """The error parameter."""
+
+    type: Literal["error"]
+    """The type of the event. Always `error`."""
src/openai/types/responses/response_failed_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .response import Response
+from ..._models import BaseModel
+
+__all__ = ["ResponseFailedEvent"]
+
+
+class ResponseFailedEvent(BaseModel):
+    response: Response
+    """The response that failed."""
+
+    type: Literal["response.failed"]
+    """The type of the event. Always `response.failed`."""
src/openai/types/responses/response_file_search_call_completed_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFileSearchCallCompletedEvent"]
+
+
+class ResponseFileSearchCallCompletedEvent(BaseModel):
+    item_id: str
+    """The ID of the output item that the file search call is initiated."""
+
+    output_index: int
+    """The index of the output item that the file search call is initiated."""
+
+    type: Literal["response.file_search_call.completed"]
+    """The type of the event. Always `response.file_search_call.completed`."""
src/openai/types/responses/response_file_search_call_in_progress_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFileSearchCallInProgressEvent"]
+
+
+class ResponseFileSearchCallInProgressEvent(BaseModel):
+    item_id: str
+    """The ID of the output item that the file search call is initiated."""
+
+    output_index: int
+    """The index of the output item that the file search call is initiated."""
+
+    type: Literal["response.file_search_call.in_progress"]
+    """The type of the event. Always `response.file_search_call.in_progress`."""
src/openai/types/responses/response_file_search_call_searching_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFileSearchCallSearchingEvent"]
+
+
+class ResponseFileSearchCallSearchingEvent(BaseModel):
+    item_id: str
+    """The ID of the output item that the file search call is initiated."""
+
+    output_index: int
+    """The index of the output item that the file search call is searching."""
+
+    type: Literal["response.file_search_call.searching"]
+    """The type of the event. Always `response.file_search_call.searching`."""
src/openai/types/responses/response_file_search_tool_call.py
@@ -0,0 +1,51 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFileSearchToolCall", "Result"]
+
+
+class Result(BaseModel):
+    attributes: Optional[Dict[str, Union[str, float, bool]]] = None
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard. Keys are
+    strings with a maximum length of 64 characters. Values are strings with a
+    maximum length of 512 characters, booleans, or numbers.
+    """
+
+    file_id: Optional[str] = None
+    """The unique ID of the file."""
+
+    filename: Optional[str] = None
+    """The name of the file."""
+
+    score: Optional[float] = None
+    """The relevance score of the file - a value between 0 and 1."""
+
+    text: Optional[str] = None
+    """The text that was retrieved from the file."""
+
+
+class ResponseFileSearchToolCall(BaseModel):
+    id: str
+    """The unique ID of the file search tool call."""
+
+    queries: List[str]
+    """The queries used to search for files."""
+
+    status: Literal["in_progress", "searching", "completed", "incomplete", "failed"]
+    """The status of the file search tool call.
+
+    One of `in_progress`, `searching`, `incomplete` or `failed`,
+    """
+
+    type: Literal["file_search_call"]
+    """The type of the file search tool call. Always `file_search_call`."""
+
+    results: Optional[List[Result]] = None
+    """The results of the file search tool call."""
src/openai/types/responses/response_file_search_tool_call_param.py
@@ -0,0 +1,51 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFileSearchToolCallParam", "Result"]
+
+
+class Result(TypedDict, total=False):
+    attributes: Optional[Dict[str, Union[str, float, bool]]]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard. Keys are
+    strings with a maximum length of 64 characters. Values are strings with a
+    maximum length of 512 characters, booleans, or numbers.
+    """
+
+    file_id: str
+    """The unique ID of the file."""
+
+    filename: str
+    """The name of the file."""
+
+    score: float
+    """The relevance score of the file - a value between 0 and 1."""
+
+    text: str
+    """The text that was retrieved from the file."""
+
+
+class ResponseFileSearchToolCallParam(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the file search tool call."""
+
+    queries: Required[List[str]]
+    """The queries used to search for files."""
+
+    status: Required[Literal["in_progress", "searching", "completed", "incomplete", "failed"]]
+    """The status of the file search tool call.
+
+    One of `in_progress`, `searching`, `incomplete` or `failed`,
+    """
+
+    type: Required[Literal["file_search_call"]]
+    """The type of the file search tool call. Always `file_search_call`."""
+
+    results: Optional[Iterable[Result]]
+    """The results of the file search tool call."""
src/openai/types/responses/response_format_text_config.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..shared.response_format_text import ResponseFormatText
+from ..shared.response_format_json_object import ResponseFormatJSONObject
+from .response_format_text_json_schema_config import ResponseFormatTextJSONSchemaConfig
+
+__all__ = ["ResponseFormatTextConfig"]
+
+ResponseFormatTextConfig: TypeAlias = Annotated[
+    Union[ResponseFormatText, ResponseFormatTextJSONSchemaConfig, ResponseFormatJSONObject],
+    PropertyInfo(discriminator="type"),
+]
src/openai/types/responses/response_format_text_config_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from ..shared_params.response_format_text import ResponseFormatText
+from ..shared_params.response_format_json_object import ResponseFormatJSONObject
+from .response_format_text_json_schema_config_param import ResponseFormatTextJSONSchemaConfigParam
+
+__all__ = ["ResponseFormatTextConfigParam"]
+
+ResponseFormatTextConfigParam: TypeAlias = Union[
+    ResponseFormatText, ResponseFormatTextJSONSchemaConfigParam, ResponseFormatJSONObject
+]
src/openai/types/responses/response_format_text_json_schema_config.py
@@ -0,0 +1,43 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Optional
+from typing_extensions import Literal
+
+from pydantic import Field as FieldInfo
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFormatTextJSONSchemaConfig"]
+
+
+class ResponseFormatTextJSONSchemaConfig(BaseModel):
+    schema_: Dict[str, object] = FieldInfo(alias="schema")
+    """
+    The schema for the response format, described as a JSON Schema object. Learn how
+    to build JSON schemas [here](https://json-schema.org/).
+    """
+
+    type: Literal["json_schema"]
+    """The type of response format being defined. Always `json_schema`."""
+
+    description: Optional[str] = None
+    """
+    A description of what the response format is for, used by the model to determine
+    how to respond in the format.
+    """
+
+    name: Optional[str] = None
+    """The name of the response format.
+
+    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+    of 64.
+    """
+
+    strict: Optional[bool] = None
+    """
+    Whether to enable strict schema adherence when generating the output. If set to
+    true, the model will always follow the exact schema defined in the `schema`
+    field. Only a subset of JSON Schema is supported when `strict` is `true`. To
+    learn more, read the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+    """
src/openai/types/responses/response_format_text_json_schema_config_param.py
@@ -0,0 +1,41 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFormatTextJSONSchemaConfigParam"]
+
+
+class ResponseFormatTextJSONSchemaConfigParam(TypedDict, total=False):
+    schema: Required[Dict[str, object]]
+    """
+    The schema for the response format, described as a JSON Schema object. Learn how
+    to build JSON schemas [here](https://json-schema.org/).
+    """
+
+    type: Required[Literal["json_schema"]]
+    """The type of response format being defined. Always `json_schema`."""
+
+    description: str
+    """
+    A description of what the response format is for, used by the model to determine
+    how to respond in the format.
+    """
+
+    name: str
+    """The name of the response format.
+
+    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+    of 64.
+    """
+
+    strict: Optional[bool]
+    """
+    Whether to enable strict schema adherence when generating the output. If set to
+    true, the model will always follow the exact schema defined in the `schema`
+    field. Only a subset of JSON Schema is supported when `strict` is `true`. To
+    learn more, read the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+    """
src/openai/types/responses/response_function_call_arguments_delta_event.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFunctionCallArgumentsDeltaEvent"]
+
+
+class ResponseFunctionCallArgumentsDeltaEvent(BaseModel):
+    delta: str
+    """The function-call arguments delta that is added."""
+
+    item_id: str
+    """The ID of the output item that the function-call arguments delta is added to."""
+
+    output_index: int
+    """
+    The index of the output item that the function-call arguments delta is added to.
+    """
+
+    type: Literal["response.function_call_arguments.delta"]
+    """The type of the event. Always `response.function_call_arguments.delta`."""
src/openai/types/responses/response_function_call_arguments_done_event.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFunctionCallArgumentsDoneEvent"]
+
+
+class ResponseFunctionCallArgumentsDoneEvent(BaseModel):
+    arguments: str
+    """The function-call arguments."""
+
+    item_id: str
+    """The ID of the item."""
+
+    output_index: int
+    """The index of the output item."""
+
+    type: Literal["response.function_call_arguments.done"]
src/openai/types/responses/response_function_tool_call.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFunctionToolCall"]
+
+
+class ResponseFunctionToolCall(BaseModel):
+    id: str
+    """The unique ID of the function tool call."""
+
+    arguments: str
+    """A JSON string of the arguments to pass to the function."""
+
+    call_id: str
+    """The unique ID of the function tool call generated by the model."""
+
+    name: str
+    """The name of the function to run."""
+
+    type: Literal["function_call"]
+    """The type of the function tool call. Always `function_call`."""
+
+    status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
src/openai/types/responses/response_function_tool_call_param.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFunctionToolCallParam"]
+
+
+class ResponseFunctionToolCallParam(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the function tool call."""
+
+    arguments: Required[str]
+    """A JSON string of the arguments to pass to the function."""
+
+    call_id: Required[str]
+    """The unique ID of the function tool call generated by the model."""
+
+    name: Required[str]
+    """The name of the function to run."""
+
+    type: Required[Literal["function_call"]]
+    """The type of the function tool call. Always `function_call`."""
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
src/openai/types/responses/response_function_web_search.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFunctionWebSearch"]
+
+
+class ResponseFunctionWebSearch(BaseModel):
+    id: str
+    """The unique ID of the web search tool call."""
+
+    status: Literal["in_progress", "searching", "completed", "failed"]
+    """The status of the web search tool call."""
+
+    type: Literal["web_search_call"]
+    """The type of the web search tool call. Always `web_search_call`."""
src/openai/types/responses/response_function_web_search_param.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFunctionWebSearchParam"]
+
+
+class ResponseFunctionWebSearchParam(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the web search tool call."""
+
+    status: Required[Literal["in_progress", "searching", "completed", "failed"]]
+    """The status of the web search tool call."""
+
+    type: Required[Literal["web_search_call"]]
+    """The type of the web search tool call. Always `web_search_call`."""
src/openai/types/responses/response_in_progress_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .response import Response
+from ..._models import BaseModel
+
+__all__ = ["ResponseInProgressEvent"]
+
+
+class ResponseInProgressEvent(BaseModel):
+    response: Response
+    """The response that is in progress."""
+
+    type: Literal["response.in_progress"]
+    """The type of the event. Always `response.in_progress`."""
src/openai/types/responses/response_includable.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ResponseIncludable"]
+
+ResponseIncludable: TypeAlias = Literal[
+    "file_search_call.results", "message.input_image.image_url", "computer_call_output.output.image_url"
+]
src/openai/types/responses/response_incomplete_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .response import Response
+from ..._models import BaseModel
+
+__all__ = ["ResponseIncompleteEvent"]
+
+
+class ResponseIncompleteEvent(BaseModel):
+    response: Response
+    """The response that was incomplete."""
+
+    type: Literal["response.incomplete"]
+    """The type of the event. Always `response.incomplete`."""
src/openai/types/responses/response_input_content.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .response_input_file import ResponseInputFile
+from .response_input_text import ResponseInputText
+from .response_input_image import ResponseInputImage
+
+__all__ = ["ResponseInputContent"]
+
+ResponseInputContent: TypeAlias = Annotated[
+    Union[ResponseInputText, ResponseInputImage, ResponseInputFile], PropertyInfo(discriminator="type")
+]
src/openai/types/responses/response_input_content_param.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .response_input_file_param import ResponseInputFileParam
+from .response_input_text_param import ResponseInputTextParam
+from .response_input_image_param import ResponseInputImageParam
+
+__all__ = ["ResponseInputContentParam"]
+
+ResponseInputContentParam: TypeAlias = Union[ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam]
src/openai/types/responses/response_input_file.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseInputFile"]
+
+
+class ResponseInputFile(BaseModel):
+    type: Literal["input_file"]
+    """The type of the input item. Always `input_file`."""
+
+    file_data: Optional[str] = None
+    """The content of the file to be sent to the model."""
+
+    file_id: Optional[str] = None
+    """The ID of the file to be sent to the model."""
+
+    filename: Optional[str] = None
+    """The name of the file to be sent to the model."""
src/openai/types/responses/response_input_file_param.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseInputFileParam"]
+
+
+class ResponseInputFileParam(TypedDict, total=False):
+    type: Required[Literal["input_file"]]
+    """The type of the input item. Always `input_file`."""
+
+    file_data: str
+    """The content of the file to be sent to the model."""
+
+    file_id: str
+    """The ID of the file to be sent to the model."""
+
+    filename: str
+    """The name of the file to be sent to the model."""
src/openai/types/responses/response_input_image.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseInputImage"]
+
+
+class ResponseInputImage(BaseModel):
+    detail: Literal["high", "low", "auto"]
+    """The detail level of the image to be sent to the model.
+
+    One of `high`, `low`, or `auto`. Defaults to `auto`.
+    """
+
+    type: Literal["input_image"]
+    """The type of the input item. Always `input_image`."""
+
+    file_id: Optional[str] = None
+    """The ID of the file to be sent to the model."""
+
+    image_url: Optional[str] = None
+    """The URL of the image to be sent to the model.
+
+    A fully qualified URL or base64 encoded image in a data URL.
+    """
src/openai/types/responses/response_input_image_param.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseInputImageParam"]
+
+
+class ResponseInputImageParam(TypedDict, total=False):
+    detail: Required[Literal["high", "low", "auto"]]
+    """The detail level of the image to be sent to the model.
+
+    One of `high`, `low`, or `auto`. Defaults to `auto`.
+    """
+
+    type: Required[Literal["input_image"]]
+    """The type of the input item. Always `input_image`."""
+
+    file_id: Optional[str]
+    """The ID of the file to be sent to the model."""
+
+    image_url: Optional[str]
+    """The URL of the image to be sent to the model.
+
+    A fully qualified URL or base64 encoded image in a data URL.
+    """
src/openai/types/responses/response_input_item_param.py
@@ -0,0 +1,174 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .easy_input_message_param import EasyInputMessageParam
+from .response_output_message_param import ResponseOutputMessageParam
+from .response_computer_tool_call_param import ResponseComputerToolCallParam
+from .response_function_tool_call_param import ResponseFunctionToolCallParam
+from .response_function_web_search_param import ResponseFunctionWebSearchParam
+from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam
+from .response_input_message_content_list_param import ResponseInputMessageContentListParam
+
+__all__ = [
+    "ResponseInputItemParam",
+    "Message",
+    "ComputerCallOutput",
+    "ComputerCallOutputOutput",
+    "ComputerCallOutputAcknowledgedSafetyCheck",
+    "FunctionCallOutput",
+    "Reasoning",
+    "ReasoningContent",
+    "ItemReference",
+]
+
+
+class Message(TypedDict, total=False):
+    content: Required[ResponseInputMessageContentListParam]
+    """
+    A list of one or many input items to the model, containing different content
+    types.
+    """
+
+    role: Required[Literal["user", "system", "developer"]]
+    """The role of the message input. One of `user`, `system`, or `developer`."""
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+    type: Literal["message"]
+    """The type of the message input. Always set to `message`."""
+
+
+class ComputerCallOutputOutput(TypedDict, total=False):
+    type: Required[Literal["computer_screenshot"]]
+    """Specifies the event type.
+
+    For a computer screenshot, this property is always set to `computer_screenshot`.
+    """
+
+    file_id: str
+    """The identifier of an uploaded file that contains the screenshot."""
+
+    image_url: str
+    """The URL of the screenshot image."""
+
+
+class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False):
+    id: Required[str]
+    """The ID of the pending safety check."""
+
+    code: Required[str]
+    """The type of the pending safety check."""
+
+    message: Required[str]
+    """Details about the pending safety check."""
+
+
+class ComputerCallOutput(TypedDict, total=False):
+    call_id: Required[str]
+    """The ID of the computer tool call that produced the output."""
+
+    output: Required[ComputerCallOutputOutput]
+    """A computer screenshot image used with the computer use tool."""
+
+    type: Required[Literal["computer_call_output"]]
+    """The type of the computer tool call output. Always `computer_call_output`."""
+
+    id: str
+    """The ID of the computer tool call output."""
+
+    acknowledged_safety_checks: Iterable[ComputerCallOutputAcknowledgedSafetyCheck]
+    """
+    The safety checks reported by the API that have been acknowledged by the
+    developer.
+    """
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the message input.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when input items
+    are returned via API.
+    """
+
+
+class FunctionCallOutput(TypedDict, total=False):
+    call_id: Required[str]
+    """The unique ID of the function tool call generated by the model."""
+
+    output: Required[str]
+    """A JSON string of the output of the function tool call."""
+
+    type: Required[Literal["function_call_output"]]
+    """The type of the function tool call output. Always `function_call_output`."""
+
+    id: str
+    """The unique ID of the function tool call output.
+
+    Populated when this item is returned via API.
+    """
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+
+class ReasoningContent(TypedDict, total=False):
+    text: Required[str]
+    """
+    A short summary of the reasoning used by the model when generating the response.
+    """
+
+    type: Required[Literal["reasoning_summary"]]
+    """The type of the object. Always `text`."""
+
+
+class Reasoning(TypedDict, total=False):
+    id: Required[str]
+    """The unique identifier of the reasoning content."""
+
+    content: Required[Iterable[ReasoningContent]]
+    """Reasoning text contents."""
+
+    type: Required[Literal["reasoning"]]
+    """The type of the object. Always `reasoning`."""
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+
+class ItemReference(TypedDict, total=False):
+    id: Required[str]
+    """The ID of the item to reference."""
+
+    type: Required[Literal["item_reference"]]
+    """The type of item to reference. Always `item_reference`."""
+
+
+ResponseInputItemParam: TypeAlias = Union[
+    EasyInputMessageParam,
+    Message,
+    ResponseOutputMessageParam,
+    ResponseFileSearchToolCallParam,
+    ResponseComputerToolCallParam,
+    ComputerCallOutput,
+    ResponseFunctionWebSearchParam,
+    ResponseFunctionToolCallParam,
+    FunctionCallOutput,
+    Reasoning,
+    ItemReference,
+]
src/openai/types/responses/response_input_message_content_list.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import TypeAlias
+
+from .response_input_content import ResponseInputContent
+
+__all__ = ["ResponseInputMessageContentList"]
+
+ResponseInputMessageContentList: TypeAlias = List[ResponseInputContent]
src/openai/types/responses/response_input_message_content_list_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union
+from typing_extensions import TypeAlias
+
+from .response_input_file_param import ResponseInputFileParam
+from .response_input_text_param import ResponseInputTextParam
+from .response_input_image_param import ResponseInputImageParam
+
+__all__ = ["ResponseInputMessageContentListParam", "ResponseInputContentParam"]
+
+ResponseInputContentParam: TypeAlias = Union[ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam]
+
+ResponseInputMessageContentListParam: TypeAlias = List[ResponseInputContentParam]
src/openai/types/responses/response_input_param.py
@@ -0,0 +1,177 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .easy_input_message_param import EasyInputMessageParam
+from .response_output_message_param import ResponseOutputMessageParam
+from .response_computer_tool_call_param import ResponseComputerToolCallParam
+from .response_function_tool_call_param import ResponseFunctionToolCallParam
+from .response_function_web_search_param import ResponseFunctionWebSearchParam
+from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam
+from .response_input_message_content_list_param import ResponseInputMessageContentListParam
+
+__all__ = [
+    "ResponseInputParam",
+    "ResponseInputItemParam",
+    "Message",
+    "ComputerCallOutput",
+    "ComputerCallOutputOutput",
+    "ComputerCallOutputAcknowledgedSafetyCheck",
+    "FunctionCallOutput",
+    "Reasoning",
+    "ReasoningContent",
+    "ItemReference",
+]
+
+
+class Message(TypedDict, total=False):
+    content: Required[ResponseInputMessageContentListParam]
+    """
+    A list of one or many input items to the model, containing different content
+    types.
+    """
+
+    role: Required[Literal["user", "system", "developer"]]
+    """The role of the message input. One of `user`, `system`, or `developer`."""
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+    type: Literal["message"]
+    """The type of the message input. Always set to `message`."""
+
+
+class ComputerCallOutputOutput(TypedDict, total=False):
+    type: Required[Literal["computer_screenshot"]]
+    """Specifies the event type.
+
+    For a computer screenshot, this property is always set to `computer_screenshot`.
+    """
+
+    file_id: str
+    """The identifier of an uploaded file that contains the screenshot."""
+
+    image_url: str
+    """The URL of the screenshot image."""
+
+
+class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False):
+    id: Required[str]
+    """The ID of the pending safety check."""
+
+    code: Required[str]
+    """The type of the pending safety check."""
+
+    message: Required[str]
+    """Details about the pending safety check."""
+
+
+class ComputerCallOutput(TypedDict, total=False):
+    call_id: Required[str]
+    """The ID of the computer tool call that produced the output."""
+
+    output: Required[ComputerCallOutputOutput]
+    """A computer screenshot image used with the computer use tool."""
+
+    type: Required[Literal["computer_call_output"]]
+    """The type of the computer tool call output. Always `computer_call_output`."""
+
+    id: str
+    """The ID of the computer tool call output."""
+
+    acknowledged_safety_checks: Iterable[ComputerCallOutputAcknowledgedSafetyCheck]
+    """
+    The safety checks reported by the API that have been acknowledged by the
+    developer.
+    """
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the message input.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when input items
+    are returned via API.
+    """
+
+
+class FunctionCallOutput(TypedDict, total=False):
+    call_id: Required[str]
+    """The unique ID of the function tool call generated by the model."""
+
+    output: Required[str]
+    """A JSON string of the output of the function tool call."""
+
+    type: Required[Literal["function_call_output"]]
+    """The type of the function tool call output. Always `function_call_output`."""
+
+    id: str
+    """The unique ID of the function tool call output.
+
+    Populated when this item is returned via API.
+    """
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+
+class ReasoningContent(TypedDict, total=False):
+    text: Required[str]
+    """
+    A short summary of the reasoning used by the model when generating the response.
+    """
+
+    type: Required[Literal["reasoning_summary"]]
+    """The type of the object. Always `text`."""
+
+
+class Reasoning(TypedDict, total=False):
+    id: Required[str]
+    """The unique identifier of the reasoning content."""
+
+    content: Required[Iterable[ReasoningContent]]
+    """Reasoning text contents."""
+
+    type: Required[Literal["reasoning"]]
+    """The type of the object. Always `reasoning`."""
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+
+class ItemReference(TypedDict, total=False):
+    id: Required[str]
+    """The ID of the item to reference."""
+
+    type: Required[Literal["item_reference"]]
+    """The type of item to reference. Always `item_reference`."""
+
+
+ResponseInputItemParam: TypeAlias = Union[
+    EasyInputMessageParam,
+    Message,
+    ResponseOutputMessageParam,
+    ResponseFileSearchToolCallParam,
+    ResponseComputerToolCallParam,
+    ComputerCallOutput,
+    ResponseFunctionWebSearchParam,
+    ResponseFunctionToolCallParam,
+    FunctionCallOutput,
+    Reasoning,
+    ItemReference,
+]
+
+ResponseInputParam: TypeAlias = List[ResponseInputItemParam]
src/openai/types/responses/response_input_text.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseInputText"]
+
+
+class ResponseInputText(BaseModel):
+    text: str
+    """The text input to the model."""
+
+    type: Literal["input_text"]
+    """The type of the input item. Always `input_text`."""
src/openai/types/responses/response_input_text_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseInputTextParam"]
+
+
+class ResponseInputTextParam(TypedDict, total=False):
+    text: Required[str]
+    """The text input to the model."""
+
+    type: Required[Literal["input_text"]]
+    """The type of the input item. Always `input_text`."""
src/openai/types/responses/response_item_list.py
@@ -0,0 +1,152 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from .response_output_message import ResponseOutputMessage
+from .response_computer_tool_call import ResponseComputerToolCall
+from .response_function_tool_call import ResponseFunctionToolCall
+from .response_function_web_search import ResponseFunctionWebSearch
+from .response_file_search_tool_call import ResponseFileSearchToolCall
+from .response_input_message_content_list import ResponseInputMessageContentList
+
+__all__ = [
+    "ResponseItemList",
+    "Data",
+    "DataMessage",
+    "DataComputerCallOutput",
+    "DataComputerCallOutputOutput",
+    "DataComputerCallOutputAcknowledgedSafetyCheck",
+    "DataFunctionCallOutput",
+]
+
+
+class DataMessage(BaseModel):
+    id: str
+    """The unique ID of the message input."""
+
+    content: ResponseInputMessageContentList
+    """
+    A list of one or many input items to the model, containing different content
+    types.
+    """
+
+    role: Literal["user", "system", "developer"]
+    """The role of the message input. One of `user`, `system`, or `developer`."""
+
+    status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+    """The status of item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+    type: Optional[Literal["message"]] = None
+    """The type of the message input. Always set to `message`."""
+
+
+class DataComputerCallOutputOutput(BaseModel):
+    type: Literal["computer_screenshot"]
+    """Specifies the event type.
+
+    For a computer screenshot, this property is always set to `computer_screenshot`.
+    """
+
+    file_id: Optional[str] = None
+    """The identifier of an uploaded file that contains the screenshot."""
+
+    image_url: Optional[str] = None
+    """The URL of the screenshot image."""
+
+
+class DataComputerCallOutputAcknowledgedSafetyCheck(BaseModel):
+    id: str
+    """The ID of the pending safety check."""
+
+    code: str
+    """The type of the pending safety check."""
+
+    message: str
+    """Details about the pending safety check."""
+
+
+class DataComputerCallOutput(BaseModel):
+    id: str
+    """The unique ID of the computer call tool output."""
+
+    call_id: str
+    """The ID of the computer tool call that produced the output."""
+
+    output: DataComputerCallOutputOutput
+    """A computer screenshot image used with the computer use tool."""
+
+    type: Literal["computer_call_output"]
+    """The type of the computer tool call output. Always `computer_call_output`."""
+
+    acknowledged_safety_checks: Optional[List[DataComputerCallOutputAcknowledgedSafetyCheck]] = None
+    """
+    The safety checks reported by the API that have been acknowledged by the
+    developer.
+    """
+
+    status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+    """The status of the message input.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when input items
+    are returned via API.
+    """
+
+
+class DataFunctionCallOutput(BaseModel):
+    id: str
+    """The unique ID of the function call tool output."""
+
+    call_id: str
+    """The unique ID of the function tool call generated by the model."""
+
+    output: str
+    """A JSON string of the output of the function tool call."""
+
+    type: Literal["function_call_output"]
+    """The type of the function tool call output. Always `function_call_output`."""
+
+    status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+
+Data: TypeAlias = Annotated[
+    Union[
+        DataMessage,
+        ResponseOutputMessage,
+        ResponseFileSearchToolCall,
+        ResponseComputerToolCall,
+        DataComputerCallOutput,
+        ResponseFunctionWebSearch,
+        ResponseFunctionToolCall,
+        DataFunctionCallOutput,
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class ResponseItemList(BaseModel):
+    data: List[Data]
+    """A list of items used to generate this response."""
+
+    first_id: str
+    """The ID of the first item in the list."""
+
+    has_more: bool
+    """Whether there are more items available."""
+
+    last_id: str
+    """The ID of the last item in the list."""
+
+    object: Literal["list"]
+    """The type of object returned, must be `list`."""
src/openai/types/responses/response_output_item.py
@@ -0,0 +1,55 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from .response_output_message import ResponseOutputMessage
+from .response_computer_tool_call import ResponseComputerToolCall
+from .response_function_tool_call import ResponseFunctionToolCall
+from .response_function_web_search import ResponseFunctionWebSearch
+from .response_file_search_tool_call import ResponseFileSearchToolCall
+
+__all__ = ["ResponseOutputItem", "Reasoning", "ReasoningContent"]
+
+
+class ReasoningContent(BaseModel):
+    text: str
+    """
+    A short summary of the reasoning used by the model when generating the response.
+    """
+
+    type: Literal["reasoning_summary"]
+    """The type of the object. Always `text`."""
+
+
+class Reasoning(BaseModel):
+    id: str
+    """The unique identifier of the reasoning content."""
+
+    content: List[ReasoningContent]
+    """Reasoning text contents."""
+
+    type: Literal["reasoning"]
+    """The type of the object. Always `reasoning`."""
+
+    status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+
+ResponseOutputItem: TypeAlias = Annotated[
+    Union[
+        ResponseOutputMessage,
+        ResponseFileSearchToolCall,
+        ResponseFunctionToolCall,
+        ResponseFunctionWebSearch,
+        ResponseComputerToolCall,
+        Reasoning,
+    ],
+    PropertyInfo(discriminator="type"),
+]
src/openai/types/responses/response_output_item_added_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_output_item import ResponseOutputItem
+
+__all__ = ["ResponseOutputItemAddedEvent"]
+
+
+class ResponseOutputItemAddedEvent(BaseModel):
+    item: ResponseOutputItem
+    """The output item that was added."""
+
+    output_index: int
+    """The index of the output item that was added."""
+
+    type: Literal["response.output_item.added"]
+    """The type of the event. Always `response.output_item.added`."""
src/openai/types/responses/response_output_item_done_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_output_item import ResponseOutputItem
+
+__all__ = ["ResponseOutputItemDoneEvent"]
+
+
+class ResponseOutputItemDoneEvent(BaseModel):
+    item: ResponseOutputItem
+    """The output item that was marked done."""
+
+    output_index: int
+    """The index of the output item that was marked done."""
+
+    type: Literal["response.output_item.done"]
+    """The type of the event. Always `response.output_item.done`."""
src/openai/types/responses/response_output_message.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from .response_output_text import ResponseOutputText
+from .response_output_refusal import ResponseOutputRefusal
+
+__all__ = ["ResponseOutputMessage", "Content"]
+
+Content: TypeAlias = Annotated[Union[ResponseOutputText, ResponseOutputRefusal], PropertyInfo(discriminator="type")]
+
+
+class ResponseOutputMessage(BaseModel):
+    id: str
+    """The unique ID of the output message."""
+
+    content: List[Content]
+    """The content of the output message."""
+
+    role: Literal["assistant"]
+    """The role of the output message. Always `assistant`."""
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the message input.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when input items
+    are returned via API.
+    """
+
+    type: Literal["message"]
+    """The type of the output message. Always `message`."""
src/openai/types/responses/response_output_message_param.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .response_output_text_param import ResponseOutputTextParam
+from .response_output_refusal_param import ResponseOutputRefusalParam
+
+__all__ = ["ResponseOutputMessageParam", "Content"]
+
+Content: TypeAlias = Union[ResponseOutputTextParam, ResponseOutputRefusalParam]
+
+
+class ResponseOutputMessageParam(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the output message."""
+
+    content: Required[Iterable[Content]]
+    """The content of the output message."""
+
+    role: Required[Literal["assistant"]]
+    """The role of the output message. Always `assistant`."""
+
+    status: Required[Literal["in_progress", "completed", "incomplete"]]
+    """The status of the message input.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when input items
+    are returned via API.
+    """
+
+    type: Required[Literal["message"]]
+    """The type of the output message. Always `message`."""
src/openai/types/responses/response_output_refusal.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseOutputRefusal"]
+
+
+class ResponseOutputRefusal(BaseModel):
+    refusal: str
+    """The refusal explanationfrom the model."""
+
+    type: Literal["refusal"]
+    """The type of the refusal. Always `refusal`."""
src/openai/types/responses/response_output_refusal_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseOutputRefusalParam"]
+
+
+class ResponseOutputRefusalParam(TypedDict, total=False):
+    refusal: Required[str]
+    """The refusal explanationfrom the model."""
+
+    type: Required[Literal["refusal"]]
+    """The type of the refusal. Always `refusal`."""
src/openai/types/responses/response_output_text.py
@@ -0,0 +1,64 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+
+__all__ = ["ResponseOutputText", "Annotation", "AnnotationFileCitation", "AnnotationURLCitation", "AnnotationFilePath"]
+
+
+class AnnotationFileCitation(BaseModel):
+    file_id: str
+    """The ID of the file."""
+
+    index: int
+    """The index of the file in the list of files."""
+
+    type: Literal["file_citation"]
+    """The type of the file citation. Always `file_citation`."""
+
+
+class AnnotationURLCitation(BaseModel):
+    end_index: int
+    """The index of the last character of the URL citation in the message."""
+
+    start_index: int
+    """The index of the first character of the URL citation in the message."""
+
+    title: str
+    """The title of the web resource."""
+
+    type: Literal["url_citation"]
+    """The type of the URL citation. Always `url_citation`."""
+
+    url: str
+    """The URL of the web resource."""
+
+
+class AnnotationFilePath(BaseModel):
+    file_id: str
+    """The ID of the file."""
+
+    index: int
+    """The index of the file in the list of files."""
+
+    type: Literal["file_path"]
+    """The type of the file path. Always `file_path`."""
+
+
+Annotation: TypeAlias = Annotated[
+    Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationFilePath], PropertyInfo(discriminator="type")
+]
+
+
+class ResponseOutputText(BaseModel):
+    annotations: List[Annotation]
+    """The annotations of the text output."""
+
+    text: str
+    """The text output from the model."""
+
+    type: Literal["output_text"]
+    """The type of the output text. Always `output_text`."""
src/openai/types/responses/response_output_text_param.py
@@ -0,0 +1,67 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+__all__ = [
+    "ResponseOutputTextParam",
+    "Annotation",
+    "AnnotationFileCitation",
+    "AnnotationURLCitation",
+    "AnnotationFilePath",
+]
+
+
+class AnnotationFileCitation(TypedDict, total=False):
+    file_id: Required[str]
+    """The ID of the file."""
+
+    index: Required[int]
+    """The index of the file in the list of files."""
+
+    type: Required[Literal["file_citation"]]
+    """The type of the file citation. Always `file_citation`."""
+
+
+class AnnotationURLCitation(TypedDict, total=False):
+    end_index: Required[int]
+    """The index of the last character of the URL citation in the message."""
+
+    start_index: Required[int]
+    """The index of the first character of the URL citation in the message."""
+
+    title: Required[str]
+    """The title of the web resource."""
+
+    type: Required[Literal["url_citation"]]
+    """The type of the URL citation. Always `url_citation`."""
+
+    url: Required[str]
+    """The URL of the web resource."""
+
+
+class AnnotationFilePath(TypedDict, total=False):
+    file_id: Required[str]
+    """The ID of the file."""
+
+    index: Required[int]
+    """The index of the file in the list of files."""
+
+    type: Required[Literal["file_path"]]
+    """The type of the file path. Always `file_path`."""
+
+
+Annotation: TypeAlias = Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationFilePath]
+
+
+class ResponseOutputTextParam(TypedDict, total=False):
+    annotations: Required[Iterable[Annotation]]
+    """The annotations of the text output."""
+
+    text: Required[str]
+    """The text output from the model."""
+
+    type: Required[Literal["output_text"]]
+    """The type of the output text. Always `output_text`."""
src/openai/types/responses/response_refusal_delta_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseRefusalDeltaEvent"]
+
+
+class ResponseRefusalDeltaEvent(BaseModel):
+    content_index: int
+    """The index of the content part that the refusal text is added to."""
+
+    delta: str
+    """The refusal text that is added."""
+
+    item_id: str
+    """The ID of the output item that the refusal text is added to."""
+
+    output_index: int
+    """The index of the output item that the refusal text is added to."""
+
+    type: Literal["response.refusal.delta"]
+    """The type of the event. Always `response.refusal.delta`."""
src/openai/types/responses/response_refusal_done_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseRefusalDoneEvent"]
+
+
+class ResponseRefusalDoneEvent(BaseModel):
+    content_index: int
+    """The index of the content part that the refusal text is finalized."""
+
+    item_id: str
+    """The ID of the output item that the refusal text is finalized."""
+
+    output_index: int
+    """The index of the output item that the refusal text is finalized."""
+
+    refusal: str
+    """The refusal text that is finalized."""
+
+    type: Literal["response.refusal.done"]
+    """The type of the event. Always `response.refusal.done`."""
src/openai/types/responses/response_retrieve_params.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import TypedDict
+
+from .response_includable import ResponseIncludable
+
+__all__ = ["ResponseRetrieveParams"]
+
+
+class ResponseRetrieveParams(TypedDict, total=False):
+    include: List[ResponseIncludable]
+    """Additional fields to include in the response.
+
+    See the `include` parameter for Response creation above for more information.
+    """
src/openai/types/responses/response_status.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ResponseStatus"]
+
+ResponseStatus: TypeAlias = Literal["completed", "failed", "in_progress", "incomplete"]
src/openai/types/responses/response_stream_event.py
@@ -0,0 +1,78 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .response_error_event import ResponseErrorEvent
+from .response_failed_event import ResponseFailedEvent
+from .response_created_event import ResponseCreatedEvent
+from .response_completed_event import ResponseCompletedEvent
+from .response_text_done_event import ResponseTextDoneEvent
+from .response_audio_done_event import ResponseAudioDoneEvent
+from .response_incomplete_event import ResponseIncompleteEvent
+from .response_text_delta_event import ResponseTextDeltaEvent
+from .response_audio_delta_event import ResponseAudioDeltaEvent
+from .response_in_progress_event import ResponseInProgressEvent
+from .response_refusal_done_event import ResponseRefusalDoneEvent
+from .response_refusal_delta_event import ResponseRefusalDeltaEvent
+from .response_output_item_done_event import ResponseOutputItemDoneEvent
+from .response_content_part_done_event import ResponseContentPartDoneEvent
+from .response_output_item_added_event import ResponseOutputItemAddedEvent
+from .response_content_part_added_event import ResponseContentPartAddedEvent
+from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent
+from .response_text_annotation_delta_event import ResponseTextAnnotationDeltaEvent
+from .response_audio_transcript_delta_event import ResponseAudioTranscriptDeltaEvent
+from .response_web_search_call_completed_event import ResponseWebSearchCallCompletedEvent
+from .response_web_search_call_searching_event import ResponseWebSearchCallSearchingEvent
+from .response_file_search_call_completed_event import ResponseFileSearchCallCompletedEvent
+from .response_file_search_call_searching_event import ResponseFileSearchCallSearchingEvent
+from .response_web_search_call_in_progress_event import ResponseWebSearchCallInProgressEvent
+from .response_file_search_call_in_progress_event import ResponseFileSearchCallInProgressEvent
+from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent
+from .response_function_call_arguments_delta_event import ResponseFunctionCallArgumentsDeltaEvent
+from .response_code_interpreter_call_code_done_event import ResponseCodeInterpreterCallCodeDoneEvent
+from .response_code_interpreter_call_completed_event import ResponseCodeInterpreterCallCompletedEvent
+from .response_code_interpreter_call_code_delta_event import ResponseCodeInterpreterCallCodeDeltaEvent
+from .response_code_interpreter_call_in_progress_event import ResponseCodeInterpreterCallInProgressEvent
+from .response_code_interpreter_call_interpreting_event import ResponseCodeInterpreterCallInterpretingEvent
+
+__all__ = ["ResponseStreamEvent"]
+
+ResponseStreamEvent: TypeAlias = Annotated[
+    Union[
+        ResponseAudioDeltaEvent,
+        ResponseAudioDoneEvent,
+        ResponseAudioTranscriptDeltaEvent,
+        ResponseAudioTranscriptDoneEvent,
+        ResponseCodeInterpreterCallCodeDeltaEvent,
+        ResponseCodeInterpreterCallCodeDoneEvent,
+        ResponseCodeInterpreterCallCompletedEvent,
+        ResponseCodeInterpreterCallInProgressEvent,
+        ResponseCodeInterpreterCallInterpretingEvent,
+        ResponseCompletedEvent,
+        ResponseContentPartAddedEvent,
+        ResponseContentPartDoneEvent,
+        ResponseCreatedEvent,
+        ResponseErrorEvent,
+        ResponseFileSearchCallCompletedEvent,
+        ResponseFileSearchCallInProgressEvent,
+        ResponseFileSearchCallSearchingEvent,
+        ResponseFunctionCallArgumentsDeltaEvent,
+        ResponseFunctionCallArgumentsDoneEvent,
+        ResponseInProgressEvent,
+        ResponseFailedEvent,
+        ResponseIncompleteEvent,
+        ResponseOutputItemAddedEvent,
+        ResponseOutputItemDoneEvent,
+        ResponseRefusalDeltaEvent,
+        ResponseRefusalDoneEvent,
+        ResponseTextAnnotationDeltaEvent,
+        ResponseTextDeltaEvent,
+        ResponseTextDoneEvent,
+        ResponseWebSearchCallCompletedEvent,
+        ResponseWebSearchCallInProgressEvent,
+        ResponseWebSearchCallSearchingEvent,
+    ],
+    PropertyInfo(discriminator="type"),
+]
src/openai/types/responses/response_text_annotation_delta_event.py
@@ -0,0 +1,79 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+
+__all__ = [
+    "ResponseTextAnnotationDeltaEvent",
+    "Annotation",
+    "AnnotationFileCitation",
+    "AnnotationURLCitation",
+    "AnnotationFilePath",
+]
+
+
+class AnnotationFileCitation(BaseModel):
+    file_id: str
+    """The ID of the file."""
+
+    index: int
+    """The index of the file in the list of files."""
+
+    type: Literal["file_citation"]
+    """The type of the file citation. Always `file_citation`."""
+
+
+class AnnotationURLCitation(BaseModel):
+    end_index: int
+    """The index of the last character of the URL citation in the message."""
+
+    start_index: int
+    """The index of the first character of the URL citation in the message."""
+
+    title: str
+    """The title of the web resource."""
+
+    type: Literal["url_citation"]
+    """The type of the URL citation. Always `url_citation`."""
+
+    url: str
+    """The URL of the web resource."""
+
+
+class AnnotationFilePath(BaseModel):
+    file_id: str
+    """The ID of the file."""
+
+    index: int
+    """The index of the file in the list of files."""
+
+    type: Literal["file_path"]
+    """The type of the file path. Always `file_path`."""
+
+
+Annotation: TypeAlias = Annotated[
+    Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationFilePath], PropertyInfo(discriminator="type")
+]
+
+
+class ResponseTextAnnotationDeltaEvent(BaseModel):
+    annotation: Annotation
+    """A citation to a file."""
+
+    annotation_index: int
+    """The index of the annotation that was added."""
+
+    content_index: int
+    """The index of the content part that the text annotation was added to."""
+
+    item_id: str
+    """The ID of the output item that the text annotation was added to."""
+
+    output_index: int
+    """The index of the output item that the text annotation was added to."""
+
+    type: Literal["response.output_text.annotation.added"]
+    """The type of the event. Always `response.output_text.annotation.added`."""
src/openai/types/responses/response_text_config.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .response_format_text_config import ResponseFormatTextConfig
+
+__all__ = ["ResponseTextConfig"]
+
+
+class ResponseTextConfig(BaseModel):
+    format: Optional[ResponseFormatTextConfig] = None
+    """An object specifying the format that the model must output.
+
+    Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
+    ensures the model will match your supplied JSON schema. Learn more in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+    The default format is `{ "type": "text" }` with no additional options.
+
+    **Not recommended for gpt-4o and newer models:**
+
+    Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+    ensures the message the model generates is valid JSON. Using `json_schema` is
+    preferred for models that support it.
+    """
src/openai/types/responses/response_text_config_param.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+from .response_format_text_config_param import ResponseFormatTextConfigParam
+
+__all__ = ["ResponseTextConfigParam"]
+
+
+class ResponseTextConfigParam(TypedDict, total=False):
+    format: ResponseFormatTextConfigParam
+    """An object specifying the format that the model must output.
+
+    Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
+    ensures the model will match your supplied JSON schema. Learn more in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+    The default format is `{ "type": "text" }` with no additional options.
+
+    **Not recommended for gpt-4o and newer models:**
+
+    Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+    ensures the message the model generates is valid JSON. Using `json_schema` is
+    preferred for models that support it.
+    """
src/openai/types/responses/response_text_delta_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseTextDeltaEvent"]
+
+
+class ResponseTextDeltaEvent(BaseModel):
+    content_index: int
+    """The index of the content part that the text delta was added to."""
+
+    delta: str
+    """The text delta that was added."""
+
+    item_id: str
+    """The ID of the output item that the text delta was added to."""
+
+    output_index: int
+    """The index of the output item that the text delta was added to."""
+
+    type: Literal["response.output_text.delta"]
+    """The type of the event. Always `response.output_text.delta`."""
src/openai/types/responses/response_text_done_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseTextDoneEvent"]
+
+
+class ResponseTextDoneEvent(BaseModel):
+    content_index: int
+    """The index of the content part that the text content is finalized."""
+
+    item_id: str
+    """The ID of the output item that the text content is finalized."""
+
+    output_index: int
+    """The index of the output item that the text content is finalized."""
+
+    text: str
+    """The text content that is finalized."""
+
+    type: Literal["response.output_text.done"]
+    """The type of the event. Always `response.output_text.done`."""
src/openai/types/responses/response_usage.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseUsage", "OutputTokensDetails"]
+
+
+class OutputTokensDetails(BaseModel):
+    reasoning_tokens: int
+    """The number of reasoning tokens."""
+
+
+class ResponseUsage(BaseModel):
+    input_tokens: int
+    """The number of input tokens."""
+
+    output_tokens: int
+    """The number of output tokens."""
+
+    output_tokens_details: OutputTokensDetails
+    """A detailed breakdown of the output tokens."""
+
+    total_tokens: int
+    """The total number of tokens used."""
src/openai/types/responses/response_web_search_call_completed_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseWebSearchCallCompletedEvent"]
+
+
+class ResponseWebSearchCallCompletedEvent(BaseModel):
+    item_id: str
+    """Unique ID for the output item associated with the web search call."""
+
+    output_index: int
+    """The index of the output item that the web search call is associated with."""
+
+    type: Literal["response.web_search_call.completed"]
+    """The type of the event. Always `response.web_search_call.completed`."""
src/openai/types/responses/response_web_search_call_in_progress_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseWebSearchCallInProgressEvent"]
+
+
+class ResponseWebSearchCallInProgressEvent(BaseModel):
+    item_id: str
+    """Unique ID for the output item associated with the web search call."""
+
+    output_index: int
+    """The index of the output item that the web search call is associated with."""
+
+    type: Literal["response.web_search_call.in_progress"]
+    """The type of the event. Always `response.web_search_call.in_progress`."""
src/openai/types/responses/response_web_search_call_searching_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseWebSearchCallSearchingEvent"]
+
+
+class ResponseWebSearchCallSearchingEvent(BaseModel):
+    item_id: str
+    """Unique ID for the output item associated with the web search call."""
+
+    output_index: int
+    """The index of the output item that the web search call is associated with."""
+
+    type: Literal["response.web_search_call.searching"]
+    """The type of the event. Always `response.web_search_call.searching`."""
src/openai/types/responses/tool.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .computer_tool import ComputerTool
+from .function_tool import FunctionTool
+from .web_search_tool import WebSearchTool
+from .file_search_tool import FileSearchTool
+
+__all__ = ["Tool"]
+
+Tool: TypeAlias = Annotated[
+    Union[FileSearchTool, FunctionTool, ComputerTool, WebSearchTool], PropertyInfo(discriminator="type")
+]
src/openai/types/responses/tool_choice_function.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ToolChoiceFunction"]
+
+
+class ToolChoiceFunction(BaseModel):
+    name: str
+    """The name of the function to call."""
+
+    type: Literal["function"]
+    """For function calling, the type is always `function`."""
src/openai/types/responses/tool_choice_function_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ToolChoiceFunctionParam"]
+
+
+class ToolChoiceFunctionParam(TypedDict, total=False):
+    name: Required[str]
+    """The name of the function to call."""
+
+    type: Required[Literal["function"]]
+    """For function calling, the type is always `function`."""
src/openai/types/responses/tool_choice_options.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ToolChoiceOptions"]
+
+ToolChoiceOptions: TypeAlias = Literal["none", "auto", "required"]
src/openai/types/responses/tool_choice_types.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ToolChoiceTypes"]
+
+
+class ToolChoiceTypes(BaseModel):
+    type: Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"]
+    """The type of hosted tool the model should to use.
+
+    Learn more about
+    [built-in tools](https://platform.openai.com/docs/guides/tools).
+
+    Allowed values are:
+
+    - `file_search`
+    - `web_search_preview`
+    - `computer_use_preview`
+    """
src/openai/types/responses/tool_choice_types_param.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ToolChoiceTypesParam"]
+
+
+class ToolChoiceTypesParam(TypedDict, total=False):
+    type: Required[
+        Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"]
+    ]
+    """The type of hosted tool the model should to use.
+
+    Learn more about
+    [built-in tools](https://platform.openai.com/docs/guides/tools).
+
+    Allowed values are:
+
+    - `file_search`
+    - `web_search_preview`
+    - `computer_use_preview`
+    """
src/openai/types/responses/tool_param.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .computer_tool_param import ComputerToolParam
+from .function_tool_param import FunctionToolParam
+from .web_search_tool_param import WebSearchToolParam
+from .file_search_tool_param import FileSearchToolParam
+from ..chat.chat_completion_tool_param import ChatCompletionToolParam
+
+__all__ = ["ToolParam"]
+
+ToolParam: TypeAlias = Union[FileSearchToolParam, FunctionToolParam, ComputerToolParam, WebSearchToolParam]
+
+ParseableToolParam: TypeAlias = Union[ToolParam, ChatCompletionToolParam]
src/openai/types/responses/web_search_tool.py
@@ -0,0 +1,48 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["WebSearchTool", "UserLocation"]
+
+
+class UserLocation(BaseModel):
+    type: Literal["approximate"]
+    """The type of location approximation. Always `approximate`."""
+
+    city: Optional[str] = None
+    """Free text input for the city of the user, e.g. `San Francisco`."""
+
+    country: Optional[str] = None
+    """
+    The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
+    the user, e.g. `US`.
+    """
+
+    region: Optional[str] = None
+    """Free text input for the region of the user, e.g. `California`."""
+
+    timezone: Optional[str] = None
+    """
+    The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
+    user, e.g. `America/Los_Angeles`.
+    """
+
+
+class WebSearchTool(BaseModel):
+    type: Literal["web_search_preview", "web_search_preview_2025_03_11"]
+    """The type of the web search tool. One of:
+
+    - `web_search_preview`
+    - `web_search_preview_2025_03_11`
+    """
+
+    search_context_size: Optional[Literal["low", "medium", "high"]] = None
+    """
+    High level guidance for the amount of context window space to use for the
+    search. One of `low`, `medium`, or `high`. `medium` is the default.
+    """
+
+    user_location: Optional[UserLocation] = None
src/openai/types/responses/web_search_tool_param.py
@@ -0,0 +1,48 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["WebSearchToolParam", "UserLocation"]
+
+
+class UserLocation(TypedDict, total=False):
+    type: Required[Literal["approximate"]]
+    """The type of location approximation. Always `approximate`."""
+
+    city: str
+    """Free text input for the city of the user, e.g. `San Francisco`."""
+
+    country: str
+    """
+    The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
+    the user, e.g. `US`.
+    """
+
+    region: str
+    """Free text input for the region of the user, e.g. `California`."""
+
+    timezone: str
+    """
+    The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
+    user, e.g. `America/Los_Angeles`.
+    """
+
+
+class WebSearchToolParam(TypedDict, total=False):
+    type: Required[Literal["web_search_preview", "web_search_preview_2025_03_11"]]
+    """The type of the web search tool. One of:
+
+    - `web_search_preview`
+    - `web_search_preview_2025_03_11`
+    """
+
+    search_context_size: Literal["low", "medium", "high"]
+    """
+    High level guidance for the amount of context window space to use for the
+    search. One of `low`, `medium`, or `high`. `medium` is the default.
+    """
+
+    user_location: Optional[UserLocation]
src/openai/types/shared/__init__.py
@@ -1,8 +1,12 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
 from .metadata import Metadata as Metadata
+from .reasoning import Reasoning as Reasoning
 from .chat_model import ChatModel as ChatModel
 from .error_object import ErrorObject as ErrorObject
+from .compound_filter import CompoundFilter as CompoundFilter
+from .reasoning_effort import ReasoningEffort as ReasoningEffort
+from .comparison_filter import ComparisonFilter as ComparisonFilter
 from .function_definition import FunctionDefinition as FunctionDefinition
 from .function_parameters import FunctionParameters as FunctionParameters
 from .response_format_text import ResponseFormatText as ResponseFormatText
src/openai/types/shared/chat_model.py
@@ -13,6 +13,9 @@ ChatModel: TypeAlias = Literal[
     "o1-preview-2024-09-12",
     "o1-mini",
     "o1-mini-2024-09-12",
+    "computer-use-preview",
+    "computer-use-preview-2025-02-04",
+    "computer-use-preview-2025-03-11",
     "gpt-4.5-preview",
     "gpt-4.5-preview-2025-02-27",
     "gpt-4o",
src/openai/types/shared/comparison_filter.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ComparisonFilter"]
+
+
+class ComparisonFilter(BaseModel):
+    key: str
+    """The key to compare against the value."""
+
+    type: Literal["eq", "ne", "gt", "gte", "lt", "lte"]
+    """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`.
+
+    - `eq`: equals
+    - `ne`: not equal
+    - `gt`: greater than
+    - `gte`: greater than or equal
+    - `lt`: less than
+    - `lte`: less than or equal
+    """
+
+    value: Union[str, float, bool]
+    """
+    The value to compare against the attribute key; supports string, number, or
+    boolean types.
+    """
src/openai/types/shared/compound_filter.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, TypeAlias
+
+from ..._models import BaseModel
+from .comparison_filter import ComparisonFilter
+
+__all__ = ["CompoundFilter", "Filter"]
+
+Filter: TypeAlias = Union[ComparisonFilter, object]
+
+
+class CompoundFilter(BaseModel):
+    filters: List[Filter]
+    """Array of filters to combine.
+
+    Items can be `ComparisonFilter` or `CompoundFilter`.
+    """
+
+    type: Literal["and", "or"]
+    """Type of operation: `and` or `or`."""
src/openai/types/shared/reasoning.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .reasoning_effort import ReasoningEffort
+
+__all__ = ["Reasoning"]
+
+
+class Reasoning(BaseModel):
+    effort: Optional[ReasoningEffort] = None
+    """**o-series models only**
+
+    Constrains effort on reasoning for
+    [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+    supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+    result in faster responses and fewer tokens used on reasoning in a response.
+    """
+
+    generate_summary: Optional[Literal["concise", "detailed"]] = None
+    """**o-series models only**
+
+    A summary of the reasoning performed by the model. This can be useful for
+    debugging and understanding the model's reasoning process. One of `concise` or
+    `detailed`.
+    """
src/openai/types/shared/reasoning_effort.py
@@ -0,0 +1,8 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ReasoningEffort"]
+
+ReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]]
src/openai/types/shared/response_format_json_object.py
@@ -9,4 +9,4 @@ __all__ = ["ResponseFormatJSONObject"]
 
 class ResponseFormatJSONObject(BaseModel):
     type: Literal["json_object"]
-    """The type of response format being defined: `json_object`"""
+    """The type of response format being defined. Always `json_object`."""
src/openai/types/shared/response_format_json_schema.py
@@ -25,20 +25,24 @@ class JSONSchema(BaseModel):
     """
 
     schema_: Optional[Dict[str, object]] = FieldInfo(alias="schema", default=None)
-    """The schema for the response format, described as a JSON Schema object."""
+    """
+    The schema for the response format, described as a JSON Schema object. Learn how
+    to build JSON schemas [here](https://json-schema.org/).
+    """
 
     strict: Optional[bool] = None
-    """Whether to enable strict schema adherence when generating the output.
-
-    If set to true, the model will always follow the exact schema defined in the
-    `schema` field. Only a subset of JSON Schema is supported when `strict` is
-    `true`. To learn more, read the
+    """
+    Whether to enable strict schema adherence when generating the output. If set to
+    true, the model will always follow the exact schema defined in the `schema`
+    field. Only a subset of JSON Schema is supported when `strict` is `true`. To
+    learn more, read the
     [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
     """
 
 
 class ResponseFormatJSONSchema(BaseModel):
     json_schema: JSONSchema
+    """Structured Outputs configuration options, including a JSON Schema."""
 
     type: Literal["json_schema"]
-    """The type of response format being defined: `json_schema`"""
+    """The type of response format being defined. Always `json_schema`."""
src/openai/types/shared/response_format_text.py
@@ -9,4 +9,4 @@ __all__ = ["ResponseFormatText"]
 
 class ResponseFormatText(BaseModel):
     type: Literal["text"]
-    """The type of response format being defined: `text`"""
+    """The type of response format being defined. Always `text`."""
src/openai/types/shared_params/__init__.py
@@ -1,7 +1,11 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
 from .metadata import Metadata as Metadata
+from .reasoning import Reasoning as Reasoning
 from .chat_model import ChatModel as ChatModel
+from .compound_filter import CompoundFilter as CompoundFilter
+from .reasoning_effort import ReasoningEffort as ReasoningEffort
+from .comparison_filter import ComparisonFilter as ComparisonFilter
 from .function_definition import FunctionDefinition as FunctionDefinition
 from .function_parameters import FunctionParameters as FunctionParameters
 from .response_format_text import ResponseFormatText as ResponseFormatText
src/openai/types/shared_params/chat_model.py
@@ -15,6 +15,9 @@ ChatModel: TypeAlias = Literal[
     "o1-preview-2024-09-12",
     "o1-mini",
     "o1-mini-2024-09-12",
+    "computer-use-preview",
+    "computer-use-preview-2025-02-04",
+    "computer-use-preview-2025-03-11",
     "gpt-4.5-preview",
     "gpt-4.5-preview-2025-02-27",
     "gpt-4o",
src/openai/types/shared_params/comparison_filter.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ComparisonFilter"]
+
+
+class ComparisonFilter(TypedDict, total=False):
+    key: Required[str]
+    """The key to compare against the value."""
+
+    type: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte"]]
+    """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`.
+
+    - `eq`: equals
+    - `ne`: not equal
+    - `gt`: greater than
+    - `gte`: greater than or equal
+    - `lt`: less than
+    - `lte`: less than or equal
+    """
+
+    value: Required[Union[str, float, bool]]
+    """
+    The value to compare against the attribute key; supports string, number, or
+    boolean types.
+    """
src/openai/types/shared_params/compound_filter.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .comparison_filter import ComparisonFilter
+
+__all__ = ["CompoundFilter", "Filter"]
+
+Filter: TypeAlias = Union[ComparisonFilter, object]
+
+
+class CompoundFilter(TypedDict, total=False):
+    filters: Required[Iterable[Filter]]
+    """Array of filters to combine.
+
+    Items can be `ComparisonFilter` or `CompoundFilter`.
+    """
+
+    type: Required[Literal["and", "or"]]
+    """Type of operation: `and` or `or`."""
src/openai/types/shared_params/reasoning.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from ..shared.reasoning_effort import ReasoningEffort
+
+__all__ = ["Reasoning"]
+
+
+class Reasoning(TypedDict, total=False):
+    effort: Required[Optional[ReasoningEffort]]
+    """**o-series models only**
+
+    Constrains effort on reasoning for
+    [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+    supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+    result in faster responses and fewer tokens used on reasoning in a response.
+    """
+
+    generate_summary: Optional[Literal["concise", "detailed"]]
+    """**o-series models only**
+
+    A summary of the reasoning performed by the model. This can be useful for
+    debugging and understanding the model's reasoning process. One of `concise` or
+    `detailed`.
+    """
src/openai/types/shared_params/reasoning_effort.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ReasoningEffort"]
+
+ReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]]
src/openai/types/shared_params/response_format_json_object.py
@@ -9,4 +9,4 @@ __all__ = ["ResponseFormatJSONObject"]
 
 class ResponseFormatJSONObject(TypedDict, total=False):
     type: Required[Literal["json_object"]]
-    """The type of response format being defined: `json_object`"""
+    """The type of response format being defined. Always `json_object`."""
src/openai/types/shared_params/response_format_json_schema.py
@@ -23,20 +23,24 @@ class JSONSchema(TypedDict, total=False):
     """
 
     schema: Dict[str, object]
-    """The schema for the response format, described as a JSON Schema object."""
+    """
+    The schema for the response format, described as a JSON Schema object. Learn how
+    to build JSON schemas [here](https://json-schema.org/).
+    """
 
     strict: Optional[bool]
-    """Whether to enable strict schema adherence when generating the output.
-
-    If set to true, the model will always follow the exact schema defined in the
-    `schema` field. Only a subset of JSON Schema is supported when `strict` is
-    `true`. To learn more, read the
+    """
+    Whether to enable strict schema adherence when generating the output. If set to
+    true, the model will always follow the exact schema defined in the `schema`
+    field. Only a subset of JSON Schema is supported when `strict` is `true`. To
+    learn more, read the
     [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
     """
 
 
 class ResponseFormatJSONSchema(TypedDict, total=False):
     json_schema: Required[JSONSchema]
+    """Structured Outputs configuration options, including a JSON Schema."""
 
     type: Required[Literal["json_schema"]]
-    """The type of response format being defined: `json_schema`"""
+    """The type of response format being defined. Always `json_schema`."""
src/openai/types/shared_params/response_format_text.py
@@ -9,4 +9,4 @@ __all__ = ["ResponseFormatText"]
 
 class ResponseFormatText(TypedDict, total=False):
     type: Required[Literal["text"]]
-    """The type of response format being defined: `text`"""
+    """The type of response format being defined. Always `text`."""
src/openai/types/beta/vector_stores/__init__.py โ†’ src/openai/types/vector_stores/__init__.py
@@ -5,6 +5,8 @@ from __future__ import annotations
 from .file_list_params import FileListParams as FileListParams
 from .vector_store_file import VectorStoreFile as VectorStoreFile
 from .file_create_params import FileCreateParams as FileCreateParams
+from .file_update_params import FileUpdateParams as FileUpdateParams
+from .file_content_response import FileContentResponse as FileContentResponse
 from .vector_store_file_batch import VectorStoreFileBatch as VectorStoreFileBatch
 from .file_batch_create_params import FileBatchCreateParams as FileBatchCreateParams
 from .vector_store_file_deleted import VectorStoreFileDeleted as VectorStoreFileDeleted
src/openai/types/beta/vector_stores/file_batch_create_params.py โ†’ src/openai/types/vector_stores/file_batch_create_params.py
@@ -2,7 +2,7 @@
 
 from __future__ import annotations
 
-from typing import List
+from typing import Dict, List, Union, Optional
 from typing_extensions import Required, TypedDict
 
 from ..file_chunking_strategy_param import FileChunkingStrategyParam
@@ -18,6 +18,15 @@ class FileBatchCreateParams(TypedDict, total=False):
     files.
     """
 
+    attributes: Optional[Dict[str, Union[str, float, bool]]]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard. Keys are
+    strings with a maximum length of 64 characters. Values are strings with a
+    maximum length of 512 characters, booleans, or numbers.
+    """
+
     chunking_strategy: FileChunkingStrategyParam
     """The chunking strategy used to chunk the file(s).
 
src/openai/types/beta/vector_stores/file_batch_list_files_params.py โ†’ src/openai/types/vector_stores/file_batch_list_files_params.py
File renamed without changes
src/openai/types/vector_stores/file_content_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["FileContentResponse"]
+
+
+class FileContentResponse(BaseModel):
+    text: Optional[str] = None
+    """The text content"""
+
+    type: Optional[str] = None
+    """The content type (currently only `"text"`)"""
src/openai/types/beta/vector_stores/file_create_params.py โ†’ src/openai/types/vector_stores/file_create_params.py
@@ -2,6 +2,7 @@
 
 from __future__ import annotations
 
+from typing import Dict, Union, Optional
 from typing_extensions import Required, TypedDict
 
 from ..file_chunking_strategy_param import FileChunkingStrategyParam
@@ -17,6 +18,15 @@ class FileCreateParams(TypedDict, total=False):
     files.
     """
 
+    attributes: Optional[Dict[str, Union[str, float, bool]]]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard. Keys are
+    strings with a maximum length of 64 characters. Values are strings with a
+    maximum length of 512 characters, booleans, or numbers.
+    """
+
     chunking_strategy: FileChunkingStrategyParam
     """The chunking strategy used to chunk the file(s).
 
src/openai/types/beta/vector_stores/file_list_params.py โ†’ src/openai/types/vector_stores/file_list_params.py
File renamed without changes
src/openai/types/vector_stores/file_update_params.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Optional
+from typing_extensions import Required, TypedDict
+
+__all__ = ["FileUpdateParams"]
+
+
+class FileUpdateParams(TypedDict, total=False):
+    vector_store_id: Required[str]
+
+    attributes: Required[Optional[Dict[str, Union[str, float, bool]]]]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard. Keys are
+    strings with a maximum length of 64 characters. Values are strings with a
+    maximum length of 512 characters, booleans, or numbers.
+    """
src/openai/types/beta/vector_stores/vector_store_file.py โ†’ src/openai/types/vector_stores/vector_store_file.py
@@ -1,9 +1,9 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import Optional
+from typing import Dict, Union, Optional
 from typing_extensions import Literal
 
-from ...._models import BaseModel
+from ..._models import BaseModel
 from ..file_chunking_strategy import FileChunkingStrategy
 
 __all__ = ["VectorStoreFile", "LastError"]
@@ -54,5 +54,14 @@ class VectorStoreFile(BaseModel):
     attached to.
     """
 
+    attributes: Optional[Dict[str, Union[str, float, bool]]] = None
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard. Keys are
+    strings with a maximum length of 64 characters. Values are strings with a
+    maximum length of 512 characters, booleans, or numbers.
+    """
+
     chunking_strategy: Optional[FileChunkingStrategy] = None
     """The strategy used to chunk the file."""
src/openai/types/beta/vector_stores/vector_store_file_batch.py โ†’ src/openai/types/vector_stores/vector_store_file_batch.py
@@ -2,7 +2,7 @@
 
 from typing_extensions import Literal
 
-from ...._models import BaseModel
+from ..._models import BaseModel
 
 __all__ = ["VectorStoreFileBatch", "FileCounts"]
 
src/openai/types/beta/vector_stores/vector_store_file_deleted.py โ†’ src/openai/types/vector_stores/vector_store_file_deleted.py
@@ -2,7 +2,7 @@
 
 from typing_extensions import Literal
 
-from ...._models import BaseModel
+from ..._models import BaseModel
 
 __all__ = ["VectorStoreFileDeleted"]
 
src/openai/types/__init__.py
@@ -8,7 +8,11 @@ from .model import Model as Model
 from .shared import (
     Metadata as Metadata,
     ChatModel as ChatModel,
+    Reasoning as Reasoning,
     ErrorObject as ErrorObject,
+    CompoundFilter as CompoundFilter,
+    ReasoningEffort as ReasoningEffort,
+    ComparisonFilter as ComparisonFilter,
     FunctionDefinition as FunctionDefinition,
     FunctionParameters as FunctionParameters,
     ResponseFormatText as ResponseFormatText,
@@ -27,6 +31,7 @@ from .image_model import ImageModel as ImageModel
 from .file_content import FileContent as FileContent
 from .file_deleted import FileDeleted as FileDeleted
 from .file_purpose import FilePurpose as FilePurpose
+from .vector_store import VectorStore as VectorStore
 from .model_deleted import ModelDeleted as ModelDeleted
 from .embedding_model import EmbeddingModel as EmbeddingModel
 from .images_response import ImagesResponse as ImagesResponse
@@ -40,16 +45,32 @@ from .file_create_params import FileCreateParams as FileCreateParams
 from .batch_create_params import BatchCreateParams as BatchCreateParams
 from .batch_request_counts import BatchRequestCounts as BatchRequestCounts
 from .upload_create_params import UploadCreateParams as UploadCreateParams
+from .vector_store_deleted import VectorStoreDeleted as VectorStoreDeleted
 from .audio_response_format import AudioResponseFormat as AudioResponseFormat
 from .image_generate_params import ImageGenerateParams as ImageGenerateParams
+from .file_chunking_strategy import FileChunkingStrategy as FileChunkingStrategy
 from .upload_complete_params import UploadCompleteParams as UploadCompleteParams
 from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
 from .completion_create_params import CompletionCreateParams as CompletionCreateParams
 from .moderation_create_params import ModerationCreateParams as ModerationCreateParams
+from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
 from .create_embedding_response import CreateEmbeddingResponse as CreateEmbeddingResponse
 from .moderation_create_response import ModerationCreateResponse as ModerationCreateResponse
+from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
+from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams
+from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
 from .moderation_text_input_param import ModerationTextInputParam as ModerationTextInputParam
+from .file_chunking_strategy_param import FileChunkingStrategyParam as FileChunkingStrategyParam
+from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse
 from .websocket_connection_options import WebsocketConnectionOptions as WebsocketConnectionOptions
 from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams
+from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy
 from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam
+from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam
 from .moderation_multi_modal_input_param import ModerationMultiModalInputParam as ModerationMultiModalInputParam
+from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject
+from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam
+from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject
+from .static_file_chunking_strategy_object_param import (
+    StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam,
+)
src/openai/types/beta/auto_file_chunking_strategy_param.py โ†’ src/openai/types/auto_file_chunking_strategy_param.py
File renamed without changes
src/openai/types/beta/file_chunking_strategy.py โ†’ src/openai/types/file_chunking_strategy.py
@@ -3,7 +3,7 @@
 from typing import Union
 from typing_extensions import Annotated, TypeAlias
 
-from ..._utils import PropertyInfo
+from .._utils import PropertyInfo
 from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject
 from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject
 
src/openai/types/beta/file_chunking_strategy_param.py โ†’ src/openai/types/file_chunking_strategy_param.py
File renamed without changes
src/openai/types/file_create_params.py
@@ -17,10 +17,8 @@ class FileCreateParams(TypedDict, total=False):
     purpose: Required[FilePurpose]
     """The intended purpose of the uploaded file.
 
-    Use "assistants" for
-    [Assistants](https://platform.openai.com/docs/api-reference/assistants) and
-    [Message](https://platform.openai.com/docs/api-reference/messages) files,
-    "vision" for Assistants image file inputs, "batch" for
-    [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for
-    [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning).
+    One of: - `assistants`: Used in the Assistants API - `batch`: Used in the Batch
+    API - `fine-tune`: Used for fine-tuning - `vision`: Images used for vision
+    fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used
+    for eval data sets
     """
src/openai/types/file_purpose.py
@@ -4,4 +4,4 @@ from typing_extensions import Literal, TypeAlias
 
 __all__ = ["FilePurpose"]
 
-FilePurpose: TypeAlias = Literal["assistants", "batch", "fine-tune", "vision"]
+FilePurpose: TypeAlias = Literal["assistants", "batch", "fine-tune", "vision", "user_data", "evals"]
src/openai/types/beta/other_file_chunking_strategy_object.py โ†’ src/openai/types/other_file_chunking_strategy_object.py
@@ -2,7 +2,7 @@
 
 from typing_extensions import Literal
 
-from ..._models import BaseModel
+from .._models import BaseModel
 
 __all__ = ["OtherFileChunkingStrategyObject"]
 
src/openai/types/beta/static_file_chunking_strategy.py โ†’ src/openai/types/static_file_chunking_strategy.py
@@ -1,7 +1,7 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
 
-from ..._models import BaseModel
+from .._models import BaseModel
 
 __all__ = ["StaticFileChunkingStrategy"]
 
src/openai/types/beta/static_file_chunking_strategy_object.py โ†’ src/openai/types/static_file_chunking_strategy_object.py
@@ -2,7 +2,7 @@
 
 from typing_extensions import Literal
 
-from ..._models import BaseModel
+from .._models import BaseModel
 from .static_file_chunking_strategy import StaticFileChunkingStrategy
 
 __all__ = ["StaticFileChunkingStrategyObject"]
src/openai/types/beta/static_file_chunking_strategy_object_param.py โ†’ src/openai/types/static_file_chunking_strategy_object_param.py
File renamed without changes
src/openai/types/beta/static_file_chunking_strategy_param.py โ†’ src/openai/types/static_file_chunking_strategy_param.py
File renamed without changes
src/openai/types/beta/vector_store.py โ†’ src/openai/types/vector_store.py
@@ -3,8 +3,8 @@
 from typing import Optional
 from typing_extensions import Literal
 
-from ..._models import BaseModel
-from ..shared.metadata import Metadata
+from .._models import BaseModel
+from .shared.metadata import Metadata
 
 __all__ = ["VectorStore", "FileCounts", "ExpiresAfter"]
 
src/openai/types/beta/vector_store_create_params.py โ†’ src/openai/types/vector_store_create_params.py
@@ -5,7 +5,7 @@ from __future__ import annotations
 from typing import List, Optional
 from typing_extensions import Literal, Required, TypedDict
 
-from ..shared_params.metadata import Metadata
+from .shared_params.metadata import Metadata
 from .file_chunking_strategy_param import FileChunkingStrategyParam
 
 __all__ = ["VectorStoreCreateParams", "ExpiresAfter"]
src/openai/types/beta/vector_store_deleted.py โ†’ src/openai/types/vector_store_deleted.py
@@ -2,7 +2,7 @@
 
 from typing_extensions import Literal
 
-from ..._models import BaseModel
+from .._models import BaseModel
 
 __all__ = ["VectorStoreDeleted"]
 
src/openai/types/beta/vector_store_list_params.py โ†’ src/openai/types/vector_store_list_params.py
File renamed without changes
src/openai/types/vector_store_search_params.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .shared_params.compound_filter import CompoundFilter
+from .shared_params.comparison_filter import ComparisonFilter
+
+__all__ = ["VectorStoreSearchParams", "Filters", "RankingOptions"]
+
+
+class VectorStoreSearchParams(TypedDict, total=False):
+    query: Required[Union[str, List[str]]]
+    """A query string for a search"""
+
+    filters: Filters
+    """A filter to apply based on file attributes."""
+
+    max_num_results: int
+    """The maximum number of results to return.
+
+    This number should be between 1 and 50 inclusive.
+    """
+
+    ranking_options: RankingOptions
+    """Ranking options for search."""
+
+    rewrite_query: bool
+    """Whether to rewrite the natural language query for vector search."""
+
+
+Filters: TypeAlias = Union[ComparisonFilter, CompoundFilter]
+
+
+class RankingOptions(TypedDict, total=False):
+    ranker: Literal["auto", "default-2024-11-15"]
+
+    score_threshold: float
src/openai/types/vector_store_search_response.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["VectorStoreSearchResponse", "Content"]
+
+
+class Content(BaseModel):
+    text: str
+    """The text content returned from search."""
+
+    type: Literal["text"]
+    """The type of content."""
+
+
+class VectorStoreSearchResponse(BaseModel):
+    attributes: Optional[Dict[str, Union[str, float, bool]]] = None
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard. Keys are
+    strings with a maximum length of 64 characters. Values are strings with a
+    maximum length of 512 characters, booleans, or numbers.
+    """
+
+    content: List[Content]
+    """Content chunks from the file."""
+
+    file_id: str
+    """The ID of the vector store file."""
+
+    filename: str
+    """The name of the vector store file."""
+
+    score: float
+    """The similarity score for the result."""
src/openai/types/beta/vector_store_update_params.py โ†’ src/openai/types/vector_store_update_params.py
@@ -5,7 +5,7 @@ from __future__ import annotations
 from typing import Optional
 from typing_extensions import Literal, Required, TypedDict
 
-from ..shared_params.metadata import Metadata
+from .shared_params.metadata import Metadata
 
 __all__ = ["VectorStoreUpdateParams", "ExpiresAfter"]
 
src/openai/_client.py
@@ -37,7 +37,9 @@ from .resources.beta import beta
 from .resources.chat import chat
 from .resources.audio import audio
 from .resources.uploads import uploads
+from .resources.responses import responses
 from .resources.fine_tuning import fine_tuning
+from .resources.vector_stores import vector_stores
 
 __all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "OpenAI", "AsyncOpenAI", "Client", "AsyncClient"]
 
@@ -52,9 +54,11 @@ class OpenAI(SyncAPIClient):
     moderations: moderations.Moderations
     models: models.Models
     fine_tuning: fine_tuning.FineTuning
+    vector_stores: vector_stores.VectorStores
     beta: beta.Beta
     batches: batches.Batches
     uploads: uploads.Uploads
+    responses: responses.Responses
     with_raw_response: OpenAIWithRawResponse
     with_streaming_response: OpenAIWithStreamedResponse
 
@@ -149,9 +153,11 @@ class OpenAI(SyncAPIClient):
         self.moderations = moderations.Moderations(self)
         self.models = models.Models(self)
         self.fine_tuning = fine_tuning.FineTuning(self)
+        self.vector_stores = vector_stores.VectorStores(self)
         self.beta = beta.Beta(self)
         self.batches = batches.Batches(self)
         self.uploads = uploads.Uploads(self)
+        self.responses = responses.Responses(self)
         self.with_raw_response = OpenAIWithRawResponse(self)
         self.with_streaming_response = OpenAIWithStreamedResponse(self)
 
@@ -279,9 +285,11 @@ class AsyncOpenAI(AsyncAPIClient):
     moderations: moderations.AsyncModerations
     models: models.AsyncModels
     fine_tuning: fine_tuning.AsyncFineTuning
+    vector_stores: vector_stores.AsyncVectorStores
     beta: beta.AsyncBeta
     batches: batches.AsyncBatches
     uploads: uploads.AsyncUploads
+    responses: responses.AsyncResponses
     with_raw_response: AsyncOpenAIWithRawResponse
     with_streaming_response: AsyncOpenAIWithStreamedResponse
 
@@ -376,9 +384,11 @@ class AsyncOpenAI(AsyncAPIClient):
         self.moderations = moderations.AsyncModerations(self)
         self.models = models.AsyncModels(self)
         self.fine_tuning = fine_tuning.AsyncFineTuning(self)
+        self.vector_stores = vector_stores.AsyncVectorStores(self)
         self.beta = beta.AsyncBeta(self)
         self.batches = batches.AsyncBatches(self)
         self.uploads = uploads.AsyncUploads(self)
+        self.responses = responses.AsyncResponses(self)
         self.with_raw_response = AsyncOpenAIWithRawResponse(self)
         self.with_streaming_response = AsyncOpenAIWithStreamedResponse(self)
 
@@ -507,9 +517,11 @@ class OpenAIWithRawResponse:
         self.moderations = moderations.ModerationsWithRawResponse(client.moderations)
         self.models = models.ModelsWithRawResponse(client.models)
         self.fine_tuning = fine_tuning.FineTuningWithRawResponse(client.fine_tuning)
+        self.vector_stores = vector_stores.VectorStoresWithRawResponse(client.vector_stores)
         self.beta = beta.BetaWithRawResponse(client.beta)
         self.batches = batches.BatchesWithRawResponse(client.batches)
         self.uploads = uploads.UploadsWithRawResponse(client.uploads)
+        self.responses = responses.ResponsesWithRawResponse(client.responses)
 
 
 class AsyncOpenAIWithRawResponse:
@@ -523,9 +535,11 @@ class AsyncOpenAIWithRawResponse:
         self.moderations = moderations.AsyncModerationsWithRawResponse(client.moderations)
         self.models = models.AsyncModelsWithRawResponse(client.models)
         self.fine_tuning = fine_tuning.AsyncFineTuningWithRawResponse(client.fine_tuning)
+        self.vector_stores = vector_stores.AsyncVectorStoresWithRawResponse(client.vector_stores)
         self.beta = beta.AsyncBetaWithRawResponse(client.beta)
         self.batches = batches.AsyncBatchesWithRawResponse(client.batches)
         self.uploads = uploads.AsyncUploadsWithRawResponse(client.uploads)
+        self.responses = responses.AsyncResponsesWithRawResponse(client.responses)
 
 
 class OpenAIWithStreamedResponse:
@@ -539,9 +553,11 @@ class OpenAIWithStreamedResponse:
         self.moderations = moderations.ModerationsWithStreamingResponse(client.moderations)
         self.models = models.ModelsWithStreamingResponse(client.models)
         self.fine_tuning = fine_tuning.FineTuningWithStreamingResponse(client.fine_tuning)
+        self.vector_stores = vector_stores.VectorStoresWithStreamingResponse(client.vector_stores)
         self.beta = beta.BetaWithStreamingResponse(client.beta)
         self.batches = batches.BatchesWithStreamingResponse(client.batches)
         self.uploads = uploads.UploadsWithStreamingResponse(client.uploads)
+        self.responses = responses.ResponsesWithStreamingResponse(client.responses)
 
 
 class AsyncOpenAIWithStreamedResponse:
@@ -555,9 +571,11 @@ class AsyncOpenAIWithStreamedResponse:
         self.moderations = moderations.AsyncModerationsWithStreamingResponse(client.moderations)
         self.models = models.AsyncModelsWithStreamingResponse(client.models)
         self.fine_tuning = fine_tuning.AsyncFineTuningWithStreamingResponse(client.fine_tuning)
+        self.vector_stores = vector_stores.AsyncVectorStoresWithStreamingResponse(client.vector_stores)
         self.beta = beta.AsyncBetaWithStreamingResponse(client.beta)
         self.batches = batches.AsyncBatchesWithStreamingResponse(client.batches)
         self.uploads = uploads.AsyncUploadsWithStreamingResponse(client.uploads)
+        self.responses = responses.AsyncResponsesWithStreamingResponse(client.responses)
 
 
 Client = OpenAI
src/openai/_streaming.py
@@ -59,7 +59,7 @@ class Stream(Generic[_T]):
             if sse.data.startswith("[DONE]"):
                 break
 
-            if sse.event is None:
+            if sse.event is None or sse.event.startswith("response."):
                 data = sse.json()
                 if is_mapping(data) and data.get("error"):
                     message = None
@@ -161,7 +161,7 @@ class AsyncStream(Generic[_T]):
             if sse.data.startswith("[DONE]"):
                 break
 
-            if sse.event is None:
+            if sse.event is None or sse.event.startswith("response."):
                 data = sse.json()
                 if is_mapping(data) and data.get("error"):
                     message = None
tests/api_resources/beta/vector_stores/test_files.py
@@ -1,420 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from openai import OpenAI, AsyncOpenAI
-from tests.utils import assert_matches_type
-from openai.pagination import SyncCursorPage, AsyncCursorPage
-from openai.types.beta.vector_stores import (
-    VectorStoreFile,
-    VectorStoreFileDeleted,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestFiles:
-    parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
-    @parametrize
-    def test_method_create(self, client: OpenAI) -> None:
-        file = client.beta.vector_stores.files.create(
-            "vs_abc123",
-            file_id="string",
-        )
-        assert_matches_type(VectorStoreFile, file, path=["response"])
-
-    @parametrize
-    def test_method_create_with_all_params(self, client: OpenAI) -> None:
-        file = client.beta.vector_stores.files.create(
-            "vs_abc123",
-            file_id="string",
-            chunking_strategy={"type": "auto"},
-        )
-        assert_matches_type(VectorStoreFile, file, path=["response"])
-
-    @parametrize
-    def test_raw_response_create(self, client: OpenAI) -> None:
-        response = client.beta.vector_stores.files.with_raw_response.create(
-            "vs_abc123",
-            file_id="string",
-        )
-
-        assert response.is_closed is True
-        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-        file = response.parse()
-        assert_matches_type(VectorStoreFile, file, path=["response"])
-
-    @parametrize
-    def test_streaming_response_create(self, client: OpenAI) -> None:
-        with client.beta.vector_stores.files.with_streaming_response.create(
-            "vs_abc123",
-            file_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
-            file = response.parse()
-            assert_matches_type(VectorStoreFile, file, path=["response"])
-
-        assert cast(Any, response.is_closed) is True
-
-    @parametrize
-    def test_path_params_create(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            client.beta.vector_stores.files.with_raw_response.create(
-                "",
-                file_id="string",
-            )
-
-    @parametrize
-    def test_method_retrieve(self, client: OpenAI) -> None:
-        file = client.beta.vector_stores.files.retrieve(
-            "file-abc123",
-            vector_store_id="vs_abc123",
-        )
-        assert_matches_type(VectorStoreFile, file, path=["response"])
-
-    @parametrize
-    def test_raw_response_retrieve(self, client: OpenAI) -> None:
-        response = client.beta.vector_stores.files.with_raw_response.retrieve(
-            "file-abc123",
-            vector_store_id="vs_abc123",
-        )
-
-        assert response.is_closed is True
-        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-        file = response.parse()
-        assert_matches_type(VectorStoreFile, file, path=["response"])
-
-    @parametrize
-    def test_streaming_response_retrieve(self, client: OpenAI) -> None:
-        with client.beta.vector_stores.files.with_streaming_response.retrieve(
-            "file-abc123",
-            vector_store_id="vs_abc123",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
-            file = response.parse()
-            assert_matches_type(VectorStoreFile, file, path=["response"])
-
-        assert cast(Any, response.is_closed) is True
-
-    @parametrize
-    def test_path_params_retrieve(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            client.beta.vector_stores.files.with_raw_response.retrieve(
-                "file-abc123",
-                vector_store_id="",
-            )
-
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
-            client.beta.vector_stores.files.with_raw_response.retrieve(
-                "",
-                vector_store_id="vs_abc123",
-            )
-
-    @parametrize
-    def test_method_list(self, client: OpenAI) -> None:
-        file = client.beta.vector_stores.files.list(
-            "string",
-        )
-        assert_matches_type(SyncCursorPage[VectorStoreFile], file, path=["response"])
-
-    @parametrize
-    def test_method_list_with_all_params(self, client: OpenAI) -> None:
-        file = client.beta.vector_stores.files.list(
-            "string",
-            after="string",
-            before="string",
-            filter="in_progress",
-            limit=0,
-            order="asc",
-        )
-        assert_matches_type(SyncCursorPage[VectorStoreFile], file, path=["response"])
-
-    @parametrize
-    def test_raw_response_list(self, client: OpenAI) -> None:
-        response = client.beta.vector_stores.files.with_raw_response.list(
-            "string",
-        )
-
-        assert response.is_closed is True
-        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-        file = response.parse()
-        assert_matches_type(SyncCursorPage[VectorStoreFile], file, path=["response"])
-
-    @parametrize
-    def test_streaming_response_list(self, client: OpenAI) -> None:
-        with client.beta.vector_stores.files.with_streaming_response.list(
-            "string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
-            file = response.parse()
-            assert_matches_type(SyncCursorPage[VectorStoreFile], file, path=["response"])
-
-        assert cast(Any, response.is_closed) is True
-
-    @parametrize
-    def test_path_params_list(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            client.beta.vector_stores.files.with_raw_response.list(
-                "",
-            )
-
-    @parametrize
-    def test_method_delete(self, client: OpenAI) -> None:
-        file = client.beta.vector_stores.files.delete(
-            "string",
-            vector_store_id="string",
-        )
-        assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
-
-    @parametrize
-    def test_raw_response_delete(self, client: OpenAI) -> None:
-        response = client.beta.vector_stores.files.with_raw_response.delete(
-            "string",
-            vector_store_id="string",
-        )
-
-        assert response.is_closed is True
-        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-        file = response.parse()
-        assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
-
-    @parametrize
-    def test_streaming_response_delete(self, client: OpenAI) -> None:
-        with client.beta.vector_stores.files.with_streaming_response.delete(
-            "string",
-            vector_store_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
-            file = response.parse()
-            assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
-
-        assert cast(Any, response.is_closed) is True
-
-    @parametrize
-    def test_path_params_delete(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            client.beta.vector_stores.files.with_raw_response.delete(
-                "string",
-                vector_store_id="",
-            )
-
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
-            client.beta.vector_stores.files.with_raw_response.delete(
-                "",
-                vector_store_id="string",
-            )
-
-
-class TestAsyncFiles:
-    parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
-    @parametrize
-    async def test_method_create(self, async_client: AsyncOpenAI) -> None:
-        file = await async_client.beta.vector_stores.files.create(
-            "vs_abc123",
-            file_id="string",
-        )
-        assert_matches_type(VectorStoreFile, file, path=["response"])
-
-    @parametrize
-    async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
-        file = await async_client.beta.vector_stores.files.create(
-            "vs_abc123",
-            file_id="string",
-            chunking_strategy={"type": "auto"},
-        )
-        assert_matches_type(VectorStoreFile, file, path=["response"])
-
-    @parametrize
-    async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.vector_stores.files.with_raw_response.create(
-            "vs_abc123",
-            file_id="string",
-        )
-
-        assert response.is_closed is True
-        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-        file = response.parse()
-        assert_matches_type(VectorStoreFile, file, path=["response"])
-
-    @parametrize
-    async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.vector_stores.files.with_streaming_response.create(
-            "vs_abc123",
-            file_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
-            file = await response.parse()
-            assert_matches_type(VectorStoreFile, file, path=["response"])
-
-        assert cast(Any, response.is_closed) is True
-
-    @parametrize
-    async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            await async_client.beta.vector_stores.files.with_raw_response.create(
-                "",
-                file_id="string",
-            )
-
-    @parametrize
-    async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
-        file = await async_client.beta.vector_stores.files.retrieve(
-            "file-abc123",
-            vector_store_id="vs_abc123",
-        )
-        assert_matches_type(VectorStoreFile, file, path=["response"])
-
-    @parametrize
-    async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.vector_stores.files.with_raw_response.retrieve(
-            "file-abc123",
-            vector_store_id="vs_abc123",
-        )
-
-        assert response.is_closed is True
-        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-        file = response.parse()
-        assert_matches_type(VectorStoreFile, file, path=["response"])
-
-    @parametrize
-    async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.vector_stores.files.with_streaming_response.retrieve(
-            "file-abc123",
-            vector_store_id="vs_abc123",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
-            file = await response.parse()
-            assert_matches_type(VectorStoreFile, file, path=["response"])
-
-        assert cast(Any, response.is_closed) is True
-
-    @parametrize
-    async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            await async_client.beta.vector_stores.files.with_raw_response.retrieve(
-                "file-abc123",
-                vector_store_id="",
-            )
-
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
-            await async_client.beta.vector_stores.files.with_raw_response.retrieve(
-                "",
-                vector_store_id="vs_abc123",
-            )
-
-    @parametrize
-    async def test_method_list(self, async_client: AsyncOpenAI) -> None:
-        file = await async_client.beta.vector_stores.files.list(
-            "string",
-        )
-        assert_matches_type(AsyncCursorPage[VectorStoreFile], file, path=["response"])
-
-    @parametrize
-    async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
-        file = await async_client.beta.vector_stores.files.list(
-            "string",
-            after="string",
-            before="string",
-            filter="in_progress",
-            limit=0,
-            order="asc",
-        )
-        assert_matches_type(AsyncCursorPage[VectorStoreFile], file, path=["response"])
-
-    @parametrize
-    async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.vector_stores.files.with_raw_response.list(
-            "string",
-        )
-
-        assert response.is_closed is True
-        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-        file = response.parse()
-        assert_matches_type(AsyncCursorPage[VectorStoreFile], file, path=["response"])
-
-    @parametrize
-    async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.vector_stores.files.with_streaming_response.list(
-            "string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
-            file = await response.parse()
-            assert_matches_type(AsyncCursorPage[VectorStoreFile], file, path=["response"])
-
-        assert cast(Any, response.is_closed) is True
-
-    @parametrize
-    async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            await async_client.beta.vector_stores.files.with_raw_response.list(
-                "",
-            )
-
-    @parametrize
-    async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
-        file = await async_client.beta.vector_stores.files.delete(
-            "string",
-            vector_store_id="string",
-        )
-        assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
-
-    @parametrize
-    async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.vector_stores.files.with_raw_response.delete(
-            "string",
-            vector_store_id="string",
-        )
-
-        assert response.is_closed is True
-        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-        file = response.parse()
-        assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
-
-    @parametrize
-    async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.vector_stores.files.with_streaming_response.delete(
-            "string",
-            vector_store_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
-            file = await response.parse()
-            assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
-
-        assert cast(Any, response.is_closed) is True
-
-    @parametrize
-    async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            await async_client.beta.vector_stores.files.with_raw_response.delete(
-                "string",
-                vector_store_id="",
-            )
-
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
-            await async_client.beta.vector_stores.files.with_raw_response.delete(
-                "",
-                vector_store_id="string",
-            )
tests/api_resources/chat/test_completions.py
@@ -74,9 +74,9 @@ class TestCompletions:
             presence_penalty=-2,
             reasoning_effort="low",
             response_format={"type": "text"},
-            seed=0,
+            seed=-9007199254740991,
             service_tier="auto",
-            stop="string",
+            stop="\n",
             store=True,
             stream=False,
             stream_options={"include_usage": True},
@@ -96,6 +96,18 @@ class TestCompletions:
             top_logprobs=0,
             top_p=1,
             user="user-1234",
+            web_search_options={
+                "search_context_size": "low",
+                "user_location": {
+                    "approximate": {
+                        "city": "city",
+                        "country": "country",
+                        "region": "region",
+                        "timezone": "timezone",
+                    },
+                    "type": "approximate",
+                },
+            },
         )
         assert_matches_type(ChatCompletion, completion, path=["response"])
 
@@ -189,9 +201,9 @@ class TestCompletions:
             presence_penalty=-2,
             reasoning_effort="low",
             response_format={"type": "text"},
-            seed=0,
+            seed=-9007199254740991,
             service_tier="auto",
-            stop="string",
+            stop="\n",
             store=True,
             stream_options={"include_usage": True},
             temperature=1,
@@ -210,6 +222,18 @@ class TestCompletions:
             top_logprobs=0,
             top_p=1,
             user="user-1234",
+            web_search_options={
+                "search_context_size": "low",
+                "user_location": {
+                    "approximate": {
+                        "city": "city",
+                        "country": "country",
+                        "region": "region",
+                        "timezone": "timezone",
+                    },
+                    "type": "approximate",
+                },
+            },
         )
         completion_stream.response.close()
 
@@ -477,9 +501,9 @@ class TestAsyncCompletions:
             presence_penalty=-2,
             reasoning_effort="low",
             response_format={"type": "text"},
-            seed=0,
+            seed=-9007199254740991,
             service_tier="auto",
-            stop="string",
+            stop="\n",
             store=True,
             stream=False,
             stream_options={"include_usage": True},
@@ -499,6 +523,18 @@ class TestAsyncCompletions:
             top_logprobs=0,
             top_p=1,
             user="user-1234",
+            web_search_options={
+                "search_context_size": "low",
+                "user_location": {
+                    "approximate": {
+                        "city": "city",
+                        "country": "country",
+                        "region": "region",
+                        "timezone": "timezone",
+                    },
+                    "type": "approximate",
+                },
+            },
         )
         assert_matches_type(ChatCompletion, completion, path=["response"])
 
@@ -592,9 +628,9 @@ class TestAsyncCompletions:
             presence_penalty=-2,
             reasoning_effort="low",
             response_format={"type": "text"},
-            seed=0,
+            seed=-9007199254740991,
             service_tier="auto",
-            stop="string",
+            stop="\n",
             store=True,
             stream_options={"include_usage": True},
             temperature=1,
@@ -613,6 +649,18 @@ class TestAsyncCompletions:
             top_logprobs=0,
             top_p=1,
             user="user-1234",
+            web_search_options={
+                "search_context_size": "low",
+                "user_location": {
+                    "approximate": {
+                        "city": "city",
+                        "country": "country",
+                        "region": "region",
+                        "timezone": "timezone",
+                    },
+                    "type": "approximate",
+                },
+            },
         )
         await completion_stream.response.aclose()
 
tests/api_resources/beta/vector_stores/__init__.py โ†’ tests/api_resources/responses/__init__.py
File renamed without changes
tests/api_resources/responses/test_input_items.py
@@ -0,0 +1,121 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncCursorPage, AsyncCursorPage
+from openai.types.responses.response_item_list import Data
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestInputItems:
+    parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+    @parametrize
+    def test_method_list(self, client: OpenAI) -> None:
+        input_item = client.responses.input_items.list(
+            response_id="response_id",
+        )
+        assert_matches_type(SyncCursorPage[Data], input_item, path=["response"])
+
+    @parametrize
+    def test_method_list_with_all_params(self, client: OpenAI) -> None:
+        input_item = client.responses.input_items.list(
+            response_id="response_id",
+            after="after",
+            before="before",
+            limit=0,
+            order="asc",
+        )
+        assert_matches_type(SyncCursorPage[Data], input_item, path=["response"])
+
+    @parametrize
+    def test_raw_response_list(self, client: OpenAI) -> None:
+        response = client.responses.input_items.with_raw_response.list(
+            response_id="response_id",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        input_item = response.parse()
+        assert_matches_type(SyncCursorPage[Data], input_item, path=["response"])
+
+    @parametrize
+    def test_streaming_response_list(self, client: OpenAI) -> None:
+        with client.responses.input_items.with_streaming_response.list(
+            response_id="response_id",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            input_item = response.parse()
+            assert_matches_type(SyncCursorPage[Data], input_item, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_list(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
+            client.responses.input_items.with_raw_response.list(
+                response_id="",
+            )
+
+
+class TestAsyncInputItems:
+    parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+    @parametrize
+    async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+        input_item = await async_client.responses.input_items.list(
+            response_id="response_id",
+        )
+        assert_matches_type(AsyncCursorPage[Data], input_item, path=["response"])
+
+    @parametrize
+    async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+        input_item = await async_client.responses.input_items.list(
+            response_id="response_id",
+            after="after",
+            before="before",
+            limit=0,
+            order="asc",
+        )
+        assert_matches_type(AsyncCursorPage[Data], input_item, path=["response"])
+
+    @parametrize
+    async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.responses.input_items.with_raw_response.list(
+            response_id="response_id",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        input_item = response.parse()
+        assert_matches_type(AsyncCursorPage[Data], input_item, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.responses.input_items.with_streaming_response.list(
+            response_id="response_id",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            input_item = await response.parse()
+            assert_matches_type(AsyncCursorPage[Data], input_item, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
+            await async_client.responses.input_items.with_raw_response.list(
+                response_id="",
+            )
tests/api_resources/vector_stores/__init__.py
@@ -0,0 +1,1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
tests/api_resources/beta/vector_stores/test_file_batches.py โ†’ tests/api_resources/vector_stores/test_file_batches.py
@@ -10,7 +10,7 @@ import pytest
 from openai import OpenAI, AsyncOpenAI
 from tests.utils import assert_matches_type
 from openai.pagination import SyncCursorPage, AsyncCursorPage
-from openai.types.beta.vector_stores import (
+from openai.types.vector_stores import (
     VectorStoreFile,
     VectorStoreFileBatch,
 )
@@ -23,25 +23,26 @@ class TestFileBatches:
 
     @parametrize
     def test_method_create(self, client: OpenAI) -> None:
-        file_batch = client.beta.vector_stores.file_batches.create(
-            "vs_abc123",
+        file_batch = client.vector_stores.file_batches.create(
+            vector_store_id="vs_abc123",
             file_ids=["string"],
         )
         assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
 
     @parametrize
     def test_method_create_with_all_params(self, client: OpenAI) -> None:
-        file_batch = client.beta.vector_stores.file_batches.create(
-            "vs_abc123",
+        file_batch = client.vector_stores.file_batches.create(
+            vector_store_id="vs_abc123",
             file_ids=["string"],
+            attributes={"foo": "string"},
             chunking_strategy={"type": "auto"},
         )
         assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
 
     @parametrize
     def test_raw_response_create(self, client: OpenAI) -> None:
-        response = client.beta.vector_stores.file_batches.with_raw_response.create(
-            "vs_abc123",
+        response = client.vector_stores.file_batches.with_raw_response.create(
+            vector_store_id="vs_abc123",
             file_ids=["string"],
         )
 
@@ -52,8 +53,8 @@ class TestFileBatches:
 
     @parametrize
     def test_streaming_response_create(self, client: OpenAI) -> None:
-        with client.beta.vector_stores.file_batches.with_streaming_response.create(
-            "vs_abc123",
+        with client.vector_stores.file_batches.with_streaming_response.create(
+            vector_store_id="vs_abc123",
             file_ids=["string"],
         ) as response:
             assert not response.is_closed
@@ -67,23 +68,23 @@ class TestFileBatches:
     @parametrize
     def test_path_params_create(self, client: OpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            client.beta.vector_stores.file_batches.with_raw_response.create(
-                "",
+            client.vector_stores.file_batches.with_raw_response.create(
+                vector_store_id="",
                 file_ids=["string"],
             )
 
     @parametrize
     def test_method_retrieve(self, client: OpenAI) -> None:
-        file_batch = client.beta.vector_stores.file_batches.retrieve(
-            "vsfb_abc123",
+        file_batch = client.vector_stores.file_batches.retrieve(
+            batch_id="vsfb_abc123",
             vector_store_id="vs_abc123",
         )
         assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
 
     @parametrize
     def test_raw_response_retrieve(self, client: OpenAI) -> None:
-        response = client.beta.vector_stores.file_batches.with_raw_response.retrieve(
-            "vsfb_abc123",
+        response = client.vector_stores.file_batches.with_raw_response.retrieve(
+            batch_id="vsfb_abc123",
             vector_store_id="vs_abc123",
         )
 
@@ -94,8 +95,8 @@ class TestFileBatches:
 
     @parametrize
     def test_streaming_response_retrieve(self, client: OpenAI) -> None:
-        with client.beta.vector_stores.file_batches.with_streaming_response.retrieve(
-            "vsfb_abc123",
+        with client.vector_stores.file_batches.with_streaming_response.retrieve(
+            batch_id="vsfb_abc123",
             vector_store_id="vs_abc123",
         ) as response:
             assert not response.is_closed
@@ -109,30 +110,30 @@ class TestFileBatches:
     @parametrize
     def test_path_params_retrieve(self, client: OpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            client.beta.vector_stores.file_batches.with_raw_response.retrieve(
-                "vsfb_abc123",
+            client.vector_stores.file_batches.with_raw_response.retrieve(
+                batch_id="vsfb_abc123",
                 vector_store_id="",
             )
 
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
-            client.beta.vector_stores.file_batches.with_raw_response.retrieve(
-                "",
+            client.vector_stores.file_batches.with_raw_response.retrieve(
+                batch_id="",
                 vector_store_id="vs_abc123",
             )
 
     @parametrize
     def test_method_cancel(self, client: OpenAI) -> None:
-        file_batch = client.beta.vector_stores.file_batches.cancel(
-            "string",
-            vector_store_id="string",
+        file_batch = client.vector_stores.file_batches.cancel(
+            batch_id="batch_id",
+            vector_store_id="vector_store_id",
         )
         assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
 
     @parametrize
     def test_raw_response_cancel(self, client: OpenAI) -> None:
-        response = client.beta.vector_stores.file_batches.with_raw_response.cancel(
-            "string",
-            vector_store_id="string",
+        response = client.vector_stores.file_batches.with_raw_response.cancel(
+            batch_id="batch_id",
+            vector_store_id="vector_store_id",
         )
 
         assert response.is_closed is True
@@ -142,9 +143,9 @@ class TestFileBatches:
 
     @parametrize
     def test_streaming_response_cancel(self, client: OpenAI) -> None:
-        with client.beta.vector_stores.file_batches.with_streaming_response.cancel(
-            "string",
-            vector_store_id="string",
+        with client.vector_stores.file_batches.with_streaming_response.cancel(
+            batch_id="batch_id",
+            vector_store_id="vector_store_id",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -157,32 +158,32 @@ class TestFileBatches:
     @parametrize
     def test_path_params_cancel(self, client: OpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            client.beta.vector_stores.file_batches.with_raw_response.cancel(
-                "string",
+            client.vector_stores.file_batches.with_raw_response.cancel(
+                batch_id="batch_id",
                 vector_store_id="",
             )
 
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
-            client.beta.vector_stores.file_batches.with_raw_response.cancel(
-                "",
-                vector_store_id="string",
+            client.vector_stores.file_batches.with_raw_response.cancel(
+                batch_id="",
+                vector_store_id="vector_store_id",
             )
 
     @parametrize
     def test_method_list_files(self, client: OpenAI) -> None:
-        file_batch = client.beta.vector_stores.file_batches.list_files(
-            "string",
-            vector_store_id="string",
+        file_batch = client.vector_stores.file_batches.list_files(
+            batch_id="batch_id",
+            vector_store_id="vector_store_id",
         )
         assert_matches_type(SyncCursorPage[VectorStoreFile], file_batch, path=["response"])
 
     @parametrize
     def test_method_list_files_with_all_params(self, client: OpenAI) -> None:
-        file_batch = client.beta.vector_stores.file_batches.list_files(
-            "string",
-            vector_store_id="string",
-            after="string",
-            before="string",
+        file_batch = client.vector_stores.file_batches.list_files(
+            batch_id="batch_id",
+            vector_store_id="vector_store_id",
+            after="after",
+            before="before",
             filter="in_progress",
             limit=0,
             order="asc",
@@ -191,9 +192,9 @@ class TestFileBatches:
 
     @parametrize
     def test_raw_response_list_files(self, client: OpenAI) -> None:
-        response = client.beta.vector_stores.file_batches.with_raw_response.list_files(
-            "string",
-            vector_store_id="string",
+        response = client.vector_stores.file_batches.with_raw_response.list_files(
+            batch_id="batch_id",
+            vector_store_id="vector_store_id",
         )
 
         assert response.is_closed is True
@@ -203,9 +204,9 @@ class TestFileBatches:
 
     @parametrize
     def test_streaming_response_list_files(self, client: OpenAI) -> None:
-        with client.beta.vector_stores.file_batches.with_streaming_response.list_files(
-            "string",
-            vector_store_id="string",
+        with client.vector_stores.file_batches.with_streaming_response.list_files(
+            batch_id="batch_id",
+            vector_store_id="vector_store_id",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -218,15 +219,15 @@ class TestFileBatches:
     @parametrize
     def test_path_params_list_files(self, client: OpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            client.beta.vector_stores.file_batches.with_raw_response.list_files(
-                "string",
+            client.vector_stores.file_batches.with_raw_response.list_files(
+                batch_id="batch_id",
                 vector_store_id="",
             )
 
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
-            client.beta.vector_stores.file_batches.with_raw_response.list_files(
-                "",
-                vector_store_id="string",
+            client.vector_stores.file_batches.with_raw_response.list_files(
+                batch_id="",
+                vector_store_id="vector_store_id",
             )
 
 
@@ -235,25 +236,26 @@ class TestAsyncFileBatches:
 
     @parametrize
     async def test_method_create(self, async_client: AsyncOpenAI) -> None:
-        file_batch = await async_client.beta.vector_stores.file_batches.create(
-            "vs_abc123",
+        file_batch = await async_client.vector_stores.file_batches.create(
+            vector_store_id="vs_abc123",
             file_ids=["string"],
         )
         assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
 
     @parametrize
     async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
-        file_batch = await async_client.beta.vector_stores.file_batches.create(
-            "vs_abc123",
+        file_batch = await async_client.vector_stores.file_batches.create(
+            vector_store_id="vs_abc123",
             file_ids=["string"],
+            attributes={"foo": "string"},
             chunking_strategy={"type": "auto"},
         )
         assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
 
     @parametrize
     async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.vector_stores.file_batches.with_raw_response.create(
-            "vs_abc123",
+        response = await async_client.vector_stores.file_batches.with_raw_response.create(
+            vector_store_id="vs_abc123",
             file_ids=["string"],
         )
 
@@ -264,8 +266,8 @@ class TestAsyncFileBatches:
 
     @parametrize
     async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.vector_stores.file_batches.with_streaming_response.create(
-            "vs_abc123",
+        async with async_client.vector_stores.file_batches.with_streaming_response.create(
+            vector_store_id="vs_abc123",
             file_ids=["string"],
         ) as response:
             assert not response.is_closed
@@ -279,23 +281,23 @@ class TestAsyncFileBatches:
     @parametrize
     async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            await async_client.beta.vector_stores.file_batches.with_raw_response.create(
-                "",
+            await async_client.vector_stores.file_batches.with_raw_response.create(
+                vector_store_id="",
                 file_ids=["string"],
             )
 
     @parametrize
     async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
-        file_batch = await async_client.beta.vector_stores.file_batches.retrieve(
-            "vsfb_abc123",
+        file_batch = await async_client.vector_stores.file_batches.retrieve(
+            batch_id="vsfb_abc123",
             vector_store_id="vs_abc123",
         )
         assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
 
     @parametrize
     async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.vector_stores.file_batches.with_raw_response.retrieve(
-            "vsfb_abc123",
+        response = await async_client.vector_stores.file_batches.with_raw_response.retrieve(
+            batch_id="vsfb_abc123",
             vector_store_id="vs_abc123",
         )
 
@@ -306,8 +308,8 @@ class TestAsyncFileBatches:
 
     @parametrize
     async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.vector_stores.file_batches.with_streaming_response.retrieve(
-            "vsfb_abc123",
+        async with async_client.vector_stores.file_batches.with_streaming_response.retrieve(
+            batch_id="vsfb_abc123",
             vector_store_id="vs_abc123",
         ) as response:
             assert not response.is_closed
@@ -321,30 +323,30 @@ class TestAsyncFileBatches:
     @parametrize
     async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            await async_client.beta.vector_stores.file_batches.with_raw_response.retrieve(
-                "vsfb_abc123",
+            await async_client.vector_stores.file_batches.with_raw_response.retrieve(
+                batch_id="vsfb_abc123",
                 vector_store_id="",
             )
 
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
-            await async_client.beta.vector_stores.file_batches.with_raw_response.retrieve(
-                "",
+            await async_client.vector_stores.file_batches.with_raw_response.retrieve(
+                batch_id="",
                 vector_store_id="vs_abc123",
             )
 
     @parametrize
     async def test_method_cancel(self, async_client: AsyncOpenAI) -> None:
-        file_batch = await async_client.beta.vector_stores.file_batches.cancel(
-            "string",
-            vector_store_id="string",
+        file_batch = await async_client.vector_stores.file_batches.cancel(
+            batch_id="batch_id",
+            vector_store_id="vector_store_id",
         )
         assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
 
     @parametrize
     async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.vector_stores.file_batches.with_raw_response.cancel(
-            "string",
-            vector_store_id="string",
+        response = await async_client.vector_stores.file_batches.with_raw_response.cancel(
+            batch_id="batch_id",
+            vector_store_id="vector_store_id",
         )
 
         assert response.is_closed is True
@@ -354,9 +356,9 @@ class TestAsyncFileBatches:
 
     @parametrize
     async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.vector_stores.file_batches.with_streaming_response.cancel(
-            "string",
-            vector_store_id="string",
+        async with async_client.vector_stores.file_batches.with_streaming_response.cancel(
+            batch_id="batch_id",
+            vector_store_id="vector_store_id",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -369,32 +371,32 @@ class TestAsyncFileBatches:
     @parametrize
     async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            await async_client.beta.vector_stores.file_batches.with_raw_response.cancel(
-                "string",
+            await async_client.vector_stores.file_batches.with_raw_response.cancel(
+                batch_id="batch_id",
                 vector_store_id="",
             )
 
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
-            await async_client.beta.vector_stores.file_batches.with_raw_response.cancel(
-                "",
-                vector_store_id="string",
+            await async_client.vector_stores.file_batches.with_raw_response.cancel(
+                batch_id="",
+                vector_store_id="vector_store_id",
             )
 
     @parametrize
     async def test_method_list_files(self, async_client: AsyncOpenAI) -> None:
-        file_batch = await async_client.beta.vector_stores.file_batches.list_files(
-            "string",
-            vector_store_id="string",
+        file_batch = await async_client.vector_stores.file_batches.list_files(
+            batch_id="batch_id",
+            vector_store_id="vector_store_id",
         )
         assert_matches_type(AsyncCursorPage[VectorStoreFile], file_batch, path=["response"])
 
     @parametrize
     async def test_method_list_files_with_all_params(self, async_client: AsyncOpenAI) -> None:
-        file_batch = await async_client.beta.vector_stores.file_batches.list_files(
-            "string",
-            vector_store_id="string",
-            after="string",
-            before="string",
+        file_batch = await async_client.vector_stores.file_batches.list_files(
+            batch_id="batch_id",
+            vector_store_id="vector_store_id",
+            after="after",
+            before="before",
             filter="in_progress",
             limit=0,
             order="asc",
@@ -403,9 +405,9 @@ class TestAsyncFileBatches:
 
     @parametrize
     async def test_raw_response_list_files(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.vector_stores.file_batches.with_raw_response.list_files(
-            "string",
-            vector_store_id="string",
+        response = await async_client.vector_stores.file_batches.with_raw_response.list_files(
+            batch_id="batch_id",
+            vector_store_id="vector_store_id",
         )
 
         assert response.is_closed is True
@@ -415,9 +417,9 @@ class TestAsyncFileBatches:
 
     @parametrize
     async def test_streaming_response_list_files(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.vector_stores.file_batches.with_streaming_response.list_files(
-            "string",
-            vector_store_id="string",
+        async with async_client.vector_stores.file_batches.with_streaming_response.list_files(
+            batch_id="batch_id",
+            vector_store_id="vector_store_id",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -430,13 +432,13 @@ class TestAsyncFileBatches:
     @parametrize
     async def test_path_params_list_files(self, async_client: AsyncOpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            await async_client.beta.vector_stores.file_batches.with_raw_response.list_files(
-                "string",
+            await async_client.vector_stores.file_batches.with_raw_response.list_files(
+                batch_id="batch_id",
                 vector_store_id="",
             )
 
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
-            await async_client.beta.vector_stores.file_batches.with_raw_response.list_files(
-                "",
-                vector_store_id="string",
+            await async_client.vector_stores.file_batches.with_raw_response.list_files(
+                batch_id="",
+                vector_store_id="vector_store_id",
             )
tests/api_resources/vector_stores/test_files.py
@@ -0,0 +1,625 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncPage, AsyncPage, SyncCursorPage, AsyncCursorPage
+from openai.types.vector_stores import (
+    VectorStoreFile,
+    FileContentResponse,
+    VectorStoreFileDeleted,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestFiles:
+    parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+    @parametrize
+    def test_method_create(self, client: OpenAI) -> None:
+        file = client.vector_stores.files.create(
+            vector_store_id="vs_abc123",
+            file_id="file_id",
+        )
+        assert_matches_type(VectorStoreFile, file, path=["response"])
+
+    @parametrize
+    def test_method_create_with_all_params(self, client: OpenAI) -> None:
+        file = client.vector_stores.files.create(
+            vector_store_id="vs_abc123",
+            file_id="file_id",
+            attributes={"foo": "string"},
+            chunking_strategy={"type": "auto"},
+        )
+        assert_matches_type(VectorStoreFile, file, path=["response"])
+
+    @parametrize
+    def test_raw_response_create(self, client: OpenAI) -> None:
+        response = client.vector_stores.files.with_raw_response.create(
+            vector_store_id="vs_abc123",
+            file_id="file_id",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        file = response.parse()
+        assert_matches_type(VectorStoreFile, file, path=["response"])
+
+    @parametrize
+    def test_streaming_response_create(self, client: OpenAI) -> None:
+        with client.vector_stores.files.with_streaming_response.create(
+            vector_store_id="vs_abc123",
+            file_id="file_id",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            file = response.parse()
+            assert_matches_type(VectorStoreFile, file, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_create(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+            client.vector_stores.files.with_raw_response.create(
+                vector_store_id="",
+                file_id="file_id",
+            )
+
+    @parametrize
+    def test_method_retrieve(self, client: OpenAI) -> None:
+        file = client.vector_stores.files.retrieve(
+            file_id="file-abc123",
+            vector_store_id="vs_abc123",
+        )
+        assert_matches_type(VectorStoreFile, file, path=["response"])
+
+    @parametrize
+    def test_raw_response_retrieve(self, client: OpenAI) -> None:
+        response = client.vector_stores.files.with_raw_response.retrieve(
+            file_id="file-abc123",
+            vector_store_id="vs_abc123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        file = response.parse()
+        assert_matches_type(VectorStoreFile, file, path=["response"])
+
+    @parametrize
+    def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+        with client.vector_stores.files.with_streaming_response.retrieve(
+            file_id="file-abc123",
+            vector_store_id="vs_abc123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            file = response.parse()
+            assert_matches_type(VectorStoreFile, file, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_retrieve(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+            client.vector_stores.files.with_raw_response.retrieve(
+                file_id="file-abc123",
+                vector_store_id="",
+            )
+
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+            client.vector_stores.files.with_raw_response.retrieve(
+                file_id="",
+                vector_store_id="vs_abc123",
+            )
+
+    @parametrize
+    def test_method_update(self, client: OpenAI) -> None:
+        file = client.vector_stores.files.update(
+            file_id="file-abc123",
+            vector_store_id="vs_abc123",
+            attributes={"foo": "string"},
+        )
+        assert_matches_type(VectorStoreFile, file, path=["response"])
+
+    @parametrize
+    def test_raw_response_update(self, client: OpenAI) -> None:
+        response = client.vector_stores.files.with_raw_response.update(
+            file_id="file-abc123",
+            vector_store_id="vs_abc123",
+            attributes={"foo": "string"},
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        file = response.parse()
+        assert_matches_type(VectorStoreFile, file, path=["response"])
+
+    @parametrize
+    def test_streaming_response_update(self, client: OpenAI) -> None:
+        with client.vector_stores.files.with_streaming_response.update(
+            file_id="file-abc123",
+            vector_store_id="vs_abc123",
+            attributes={"foo": "string"},
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            file = response.parse()
+            assert_matches_type(VectorStoreFile, file, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_update(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+            client.vector_stores.files.with_raw_response.update(
+                file_id="file-abc123",
+                vector_store_id="",
+                attributes={"foo": "string"},
+            )
+
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+            client.vector_stores.files.with_raw_response.update(
+                file_id="",
+                vector_store_id="vs_abc123",
+                attributes={"foo": "string"},
+            )
+
+    @parametrize
+    def test_method_list(self, client: OpenAI) -> None:
+        file = client.vector_stores.files.list(
+            vector_store_id="vector_store_id",
+        )
+        assert_matches_type(SyncCursorPage[VectorStoreFile], file, path=["response"])
+
+    @parametrize
+    def test_method_list_with_all_params(self, client: OpenAI) -> None:
+        file = client.vector_stores.files.list(
+            vector_store_id="vector_store_id",
+            after="after",
+            before="before",
+            filter="in_progress",
+            limit=0,
+            order="asc",
+        )
+        assert_matches_type(SyncCursorPage[VectorStoreFile], file, path=["response"])
+
+    @parametrize
+    def test_raw_response_list(self, client: OpenAI) -> None:
+        response = client.vector_stores.files.with_raw_response.list(
+            vector_store_id="vector_store_id",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        file = response.parse()
+        assert_matches_type(SyncCursorPage[VectorStoreFile], file, path=["response"])
+
+    @parametrize
+    def test_streaming_response_list(self, client: OpenAI) -> None:
+        with client.vector_stores.files.with_streaming_response.list(
+            vector_store_id="vector_store_id",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            file = response.parse()
+            assert_matches_type(SyncCursorPage[VectorStoreFile], file, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_list(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+            client.vector_stores.files.with_raw_response.list(
+                vector_store_id="",
+            )
+
+    @parametrize
+    def test_method_delete(self, client: OpenAI) -> None:
+        file = client.vector_stores.files.delete(
+            file_id="file_id",
+            vector_store_id="vector_store_id",
+        )
+        assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
+
+    @parametrize
+    def test_raw_response_delete(self, client: OpenAI) -> None:
+        response = client.vector_stores.files.with_raw_response.delete(
+            file_id="file_id",
+            vector_store_id="vector_store_id",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        file = response.parse()
+        assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
+
+    @parametrize
+    def test_streaming_response_delete(self, client: OpenAI) -> None:
+        with client.vector_stores.files.with_streaming_response.delete(
+            file_id="file_id",
+            vector_store_id="vector_store_id",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            file = response.parse()
+            assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_delete(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+            client.vector_stores.files.with_raw_response.delete(
+                file_id="file_id",
+                vector_store_id="",
+            )
+
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+            client.vector_stores.files.with_raw_response.delete(
+                file_id="",
+                vector_store_id="vector_store_id",
+            )
+
+    @parametrize
+    def test_method_content(self, client: OpenAI) -> None:
+        file = client.vector_stores.files.content(
+            file_id="file-abc123",
+            vector_store_id="vs_abc123",
+        )
+        assert_matches_type(SyncPage[FileContentResponse], file, path=["response"])
+
+    @parametrize
+    def test_raw_response_content(self, client: OpenAI) -> None:
+        response = client.vector_stores.files.with_raw_response.content(
+            file_id="file-abc123",
+            vector_store_id="vs_abc123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        file = response.parse()
+        assert_matches_type(SyncPage[FileContentResponse], file, path=["response"])
+
+    @parametrize
+    def test_streaming_response_content(self, client: OpenAI) -> None:
+        with client.vector_stores.files.with_streaming_response.content(
+            file_id="file-abc123",
+            vector_store_id="vs_abc123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            file = response.parse()
+            assert_matches_type(SyncPage[FileContentResponse], file, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_content(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+            client.vector_stores.files.with_raw_response.content(
+                file_id="file-abc123",
+                vector_store_id="",
+            )
+
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+            client.vector_stores.files.with_raw_response.content(
+                file_id="",
+                vector_store_id="vs_abc123",
+            )
+
+
+class TestAsyncFiles:
+    parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+    @parametrize
+    async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+        file = await async_client.vector_stores.files.create(
+            vector_store_id="vs_abc123",
+            file_id="file_id",
+        )
+        assert_matches_type(VectorStoreFile, file, path=["response"])
+
+    @parametrize
+    async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+        file = await async_client.vector_stores.files.create(
+            vector_store_id="vs_abc123",
+            file_id="file_id",
+            attributes={"foo": "string"},
+            chunking_strategy={"type": "auto"},
+        )
+        assert_matches_type(VectorStoreFile, file, path=["response"])
+
+    @parametrize
+    async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.vector_stores.files.with_raw_response.create(
+            vector_store_id="vs_abc123",
+            file_id="file_id",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        file = response.parse()
+        assert_matches_type(VectorStoreFile, file, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.vector_stores.files.with_streaming_response.create(
+            vector_store_id="vs_abc123",
+            file_id="file_id",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            file = await response.parse()
+            assert_matches_type(VectorStoreFile, file, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+            await async_client.vector_stores.files.with_raw_response.create(
+                vector_store_id="",
+                file_id="file_id",
+            )
+
+    @parametrize
+    async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+        file = await async_client.vector_stores.files.retrieve(
+            file_id="file-abc123",
+            vector_store_id="vs_abc123",
+        )
+        assert_matches_type(VectorStoreFile, file, path=["response"])
+
+    @parametrize
+    async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.vector_stores.files.with_raw_response.retrieve(
+            file_id="file-abc123",
+            vector_store_id="vs_abc123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        file = response.parse()
+        assert_matches_type(VectorStoreFile, file, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.vector_stores.files.with_streaming_response.retrieve(
+            file_id="file-abc123",
+            vector_store_id="vs_abc123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            file = await response.parse()
+            assert_matches_type(VectorStoreFile, file, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+            await async_client.vector_stores.files.with_raw_response.retrieve(
+                file_id="file-abc123",
+                vector_store_id="",
+            )
+
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+            await async_client.vector_stores.files.with_raw_response.retrieve(
+                file_id="",
+                vector_store_id="vs_abc123",
+            )
+
+    @parametrize
+    async def test_method_update(self, async_client: AsyncOpenAI) -> None:
+        file = await async_client.vector_stores.files.update(
+            file_id="file-abc123",
+            vector_store_id="vs_abc123",
+            attributes={"foo": "string"},
+        )
+        assert_matches_type(VectorStoreFile, file, path=["response"])
+
+    @parametrize
+    async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.vector_stores.files.with_raw_response.update(
+            file_id="file-abc123",
+            vector_store_id="vs_abc123",
+            attributes={"foo": "string"},
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        file = response.parse()
+        assert_matches_type(VectorStoreFile, file, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.vector_stores.files.with_streaming_response.update(
+            file_id="file-abc123",
+            vector_store_id="vs_abc123",
+            attributes={"foo": "string"},
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            file = await response.parse()
+            assert_matches_type(VectorStoreFile, file, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+            await async_client.vector_stores.files.with_raw_response.update(
+                file_id="file-abc123",
+                vector_store_id="",
+                attributes={"foo": "string"},
+            )
+
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+            await async_client.vector_stores.files.with_raw_response.update(
+                file_id="",
+                vector_store_id="vs_abc123",
+                attributes={"foo": "string"},
+            )
+
+    @parametrize
+    async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+        file = await async_client.vector_stores.files.list(
+            vector_store_id="vector_store_id",
+        )
+        assert_matches_type(AsyncCursorPage[VectorStoreFile], file, path=["response"])
+
+    @parametrize
+    async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+        file = await async_client.vector_stores.files.list(
+            vector_store_id="vector_store_id",
+            after="after",
+            before="before",
+            filter="in_progress",
+            limit=0,
+            order="asc",
+        )
+        assert_matches_type(AsyncCursorPage[VectorStoreFile], file, path=["response"])
+
+    @parametrize
+    async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.vector_stores.files.with_raw_response.list(
+            vector_store_id="vector_store_id",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        file = response.parse()
+        assert_matches_type(AsyncCursorPage[VectorStoreFile], file, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.vector_stores.files.with_streaming_response.list(
+            vector_store_id="vector_store_id",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            file = await response.parse()
+            assert_matches_type(AsyncCursorPage[VectorStoreFile], file, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+            await async_client.vector_stores.files.with_raw_response.list(
+                vector_store_id="",
+            )
+
+    @parametrize
+    async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+        file = await async_client.vector_stores.files.delete(
+            file_id="file_id",
+            vector_store_id="vector_store_id",
+        )
+        assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
+
+    @parametrize
+    async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.vector_stores.files.with_raw_response.delete(
+            file_id="file_id",
+            vector_store_id="vector_store_id",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        file = response.parse()
+        assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.vector_stores.files.with_streaming_response.delete(
+            file_id="file_id",
+            vector_store_id="vector_store_id",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            file = await response.parse()
+            assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+            await async_client.vector_stores.files.with_raw_response.delete(
+                file_id="file_id",
+                vector_store_id="",
+            )
+
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+            await async_client.vector_stores.files.with_raw_response.delete(
+                file_id="",
+                vector_store_id="vector_store_id",
+            )
+
+    @parametrize
+    async def test_method_content(self, async_client: AsyncOpenAI) -> None:
+        file = await async_client.vector_stores.files.content(
+            file_id="file-abc123",
+            vector_store_id="vs_abc123",
+        )
+        assert_matches_type(AsyncPage[FileContentResponse], file, path=["response"])
+
+    @parametrize
+    async def test_raw_response_content(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.vector_stores.files.with_raw_response.content(
+            file_id="file-abc123",
+            vector_store_id="vs_abc123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        file = response.parse()
+        assert_matches_type(AsyncPage[FileContentResponse], file, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_content(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.vector_stores.files.with_streaming_response.content(
+            file_id="file-abc123",
+            vector_store_id="vs_abc123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            file = await response.parse()
+            assert_matches_type(AsyncPage[FileContentResponse], file, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_content(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+            await async_client.vector_stores.files.with_raw_response.content(
+                file_id="file-abc123",
+                vector_store_id="",
+            )
+
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+            await async_client.vector_stores.files.with_raw_response.content(
+                file_id="",
+                vector_store_id="vs_abc123",
+            )
tests/api_resources/test_responses.py
@@ -0,0 +1,498 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.types.responses import Response
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestResponses:
+    parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+    @parametrize
+    def test_method_create_overload_1(self, client: OpenAI) -> None:
+        response = client.responses.create(
+            input="string",
+            model="gpt-4o",
+        )
+        assert_matches_type(Response, response, path=["response"])
+
+    @parametrize
+    def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
+        response = client.responses.create(
+            input="string",
+            model="gpt-4o",
+            include=["file_search_call.results"],
+            instructions="instructions",
+            max_output_tokens=0,
+            metadata={"foo": "string"},
+            parallel_tool_calls=True,
+            previous_response_id="previous_response_id",
+            reasoning={
+                "effort": "low",
+                "generate_summary": "concise",
+            },
+            store=True,
+            stream=False,
+            temperature=1,
+            text={"format": {"type": "text"}},
+            tool_choice="none",
+            tools=[
+                {
+                    "type": "file_search",
+                    "vector_store_ids": ["string"],
+                    "filters": {
+                        "key": "key",
+                        "type": "eq",
+                        "value": "string",
+                    },
+                    "max_num_results": 0,
+                    "ranking_options": {
+                        "ranker": "auto",
+                        "score_threshold": 0,
+                    },
+                }
+            ],
+            top_p=1,
+            truncation="auto",
+            user="user-1234",
+        )
+        assert_matches_type(Response, response, path=["response"])
+
+    @parametrize
+    def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
+        http_response = client.responses.with_raw_response.create(
+            input="string",
+            model="gpt-4o",
+        )
+
+        assert http_response.is_closed is True
+        assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+        response = http_response.parse()
+        assert_matches_type(Response, response, path=["response"])
+
+    @parametrize
+    def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
+        with client.responses.with_streaming_response.create(
+            input="string",
+            model="gpt-4o",
+        ) as http_response:
+            assert not http_response.is_closed
+            assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            response = http_response.parse()
+            assert_matches_type(Response, response, path=["response"])
+
+        assert cast(Any, http_response.is_closed) is True
+
+    @parametrize
+    def test_method_create_overload_2(self, client: OpenAI) -> None:
+        response_stream = client.responses.create(
+            input="string",
+            model="gpt-4o",
+            stream=True,
+        )
+        response_stream.response.close()
+
+    @parametrize
+    def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
+        response_stream = client.responses.create(
+            input="string",
+            model="gpt-4o",
+            stream=True,
+            include=["file_search_call.results"],
+            instructions="instructions",
+            max_output_tokens=0,
+            metadata={"foo": "string"},
+            parallel_tool_calls=True,
+            previous_response_id="previous_response_id",
+            reasoning={
+                "effort": "low",
+                "generate_summary": "concise",
+            },
+            store=True,
+            temperature=1,
+            text={"format": {"type": "text"}},
+            tool_choice="none",
+            tools=[
+                {
+                    "type": "file_search",
+                    "vector_store_ids": ["string"],
+                    "filters": {
+                        "key": "key",
+                        "type": "eq",
+                        "value": "string",
+                    },
+                    "max_num_results": 0,
+                    "ranking_options": {
+                        "ranker": "auto",
+                        "score_threshold": 0,
+                    },
+                }
+            ],
+            top_p=1,
+            truncation="auto",
+            user="user-1234",
+        )
+        response_stream.response.close()
+
+    @parametrize
+    def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
+        response = client.responses.with_raw_response.create(
+            input="string",
+            model="gpt-4o",
+            stream=True,
+        )
+
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        stream = response.parse()
+        stream.close()
+
+    @parametrize
+    def test_streaming_response_create_overload_2(self, client: OpenAI) -> None:
+        with client.responses.with_streaming_response.create(
+            input="string",
+            model="gpt-4o",
+            stream=True,
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            stream = response.parse()
+            stream.close()
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_method_retrieve(self, client: OpenAI) -> None:
+        response = client.responses.retrieve(
+            response_id="resp_677efb5139a88190b512bc3fef8e535d",
+        )
+        assert_matches_type(Response, response, path=["response"])
+
+    @parametrize
+    def test_method_retrieve_with_all_params(self, client: OpenAI) -> None:
+        response = client.responses.retrieve(
+            response_id="resp_677efb5139a88190b512bc3fef8e535d",
+            include=["file_search_call.results"],
+        )
+        assert_matches_type(Response, response, path=["response"])
+
+    @parametrize
+    def test_raw_response_retrieve(self, client: OpenAI) -> None:
+        http_response = client.responses.with_raw_response.retrieve(
+            response_id="resp_677efb5139a88190b512bc3fef8e535d",
+        )
+
+        assert http_response.is_closed is True
+        assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+        response = http_response.parse()
+        assert_matches_type(Response, response, path=["response"])
+
+    @parametrize
+    def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+        with client.responses.with_streaming_response.retrieve(
+            response_id="resp_677efb5139a88190b512bc3fef8e535d",
+        ) as http_response:
+            assert not http_response.is_closed
+            assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            response = http_response.parse()
+            assert_matches_type(Response, response, path=["response"])
+
+        assert cast(Any, http_response.is_closed) is True
+
+    @parametrize
+    def test_path_params_retrieve(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
+            client.responses.with_raw_response.retrieve(
+                response_id="",
+            )
+
+    @parametrize
+    def test_method_delete(self, client: OpenAI) -> None:
+        response = client.responses.delete(
+            "resp_677efb5139a88190b512bc3fef8e535d",
+        )
+        assert response is None
+
+    @parametrize
+    def test_raw_response_delete(self, client: OpenAI) -> None:
+        http_response = client.responses.with_raw_response.delete(
+            "resp_677efb5139a88190b512bc3fef8e535d",
+        )
+
+        assert http_response.is_closed is True
+        assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+        response = http_response.parse()
+        assert response is None
+
+    @parametrize
+    def test_streaming_response_delete(self, client: OpenAI) -> None:
+        with client.responses.with_streaming_response.delete(
+            "resp_677efb5139a88190b512bc3fef8e535d",
+        ) as http_response:
+            assert not http_response.is_closed
+            assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            response = http_response.parse()
+            assert response is None
+
+        assert cast(Any, http_response.is_closed) is True
+
+    @parametrize
+    def test_path_params_delete(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
+            client.responses.with_raw_response.delete(
+                "",
+            )
+
+
+class TestAsyncResponses:
+    parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+    @parametrize
+    async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.responses.create(
+            input="string",
+            model="gpt-4o",
+        )
+        assert_matches_type(Response, response, path=["response"])
+
+    @parametrize
+    async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.responses.create(
+            input="string",
+            model="gpt-4o",
+            include=["file_search_call.results"],
+            instructions="instructions",
+            max_output_tokens=0,
+            metadata={"foo": "string"},
+            parallel_tool_calls=True,
+            previous_response_id="previous_response_id",
+            reasoning={
+                "effort": "low",
+                "generate_summary": "concise",
+            },
+            store=True,
+            stream=False,
+            temperature=1,
+            text={"format": {"type": "text"}},
+            tool_choice="none",
+            tools=[
+                {
+                    "type": "file_search",
+                    "vector_store_ids": ["string"],
+                    "filters": {
+                        "key": "key",
+                        "type": "eq",
+                        "value": "string",
+                    },
+                    "max_num_results": 0,
+                    "ranking_options": {
+                        "ranker": "auto",
+                        "score_threshold": 0,
+                    },
+                }
+            ],
+            top_p=1,
+            truncation="auto",
+            user="user-1234",
+        )
+        assert_matches_type(Response, response, path=["response"])
+
+    @parametrize
+    async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
+        http_response = await async_client.responses.with_raw_response.create(
+            input="string",
+            model="gpt-4o",
+        )
+
+        assert http_response.is_closed is True
+        assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+        response = http_response.parse()
+        assert_matches_type(Response, response, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.responses.with_streaming_response.create(
+            input="string",
+            model="gpt-4o",
+        ) as http_response:
+            assert not http_response.is_closed
+            assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            response = await http_response.parse()
+            assert_matches_type(Response, response, path=["response"])
+
+        assert cast(Any, http_response.is_closed) is True
+
+    @parametrize
+    async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None:
+        response_stream = await async_client.responses.create(
+            input="string",
+            model="gpt-4o",
+            stream=True,
+        )
+        await response_stream.response.aclose()
+
+    @parametrize
+    async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
+        response_stream = await async_client.responses.create(
+            input="string",
+            model="gpt-4o",
+            stream=True,
+            include=["file_search_call.results"],
+            instructions="instructions",
+            max_output_tokens=0,
+            metadata={"foo": "string"},
+            parallel_tool_calls=True,
+            previous_response_id="previous_response_id",
+            reasoning={
+                "effort": "low",
+                "generate_summary": "concise",
+            },
+            store=True,
+            temperature=1,
+            text={"format": {"type": "text"}},
+            tool_choice="none",
+            tools=[
+                {
+                    "type": "file_search",
+                    "vector_store_ids": ["string"],
+                    "filters": {
+                        "key": "key",
+                        "type": "eq",
+                        "value": "string",
+                    },
+                    "max_num_results": 0,
+                    "ranking_options": {
+                        "ranker": "auto",
+                        "score_threshold": 0,
+                    },
+                }
+            ],
+            top_p=1,
+            truncation="auto",
+            user="user-1234",
+        )
+        await response_stream.response.aclose()
+
+    @parametrize
+    async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.responses.with_raw_response.create(
+            input="string",
+            model="gpt-4o",
+            stream=True,
+        )
+
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        stream = response.parse()
+        await stream.close()
+
+    @parametrize
+    async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.responses.with_streaming_response.create(
+            input="string",
+            model="gpt-4o",
+            stream=True,
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            stream = await response.parse()
+            await stream.close()
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.responses.retrieve(
+            response_id="resp_677efb5139a88190b512bc3fef8e535d",
+        )
+        assert_matches_type(Response, response, path=["response"])
+
+    @parametrize
+    async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.responses.retrieve(
+            response_id="resp_677efb5139a88190b512bc3fef8e535d",
+            include=["file_search_call.results"],
+        )
+        assert_matches_type(Response, response, path=["response"])
+
+    @parametrize
+    async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+        http_response = await async_client.responses.with_raw_response.retrieve(
+            response_id="resp_677efb5139a88190b512bc3fef8e535d",
+        )
+
+        assert http_response.is_closed is True
+        assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+        response = http_response.parse()
+        assert_matches_type(Response, response, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.responses.with_streaming_response.retrieve(
+            response_id="resp_677efb5139a88190b512bc3fef8e535d",
+        ) as http_response:
+            assert not http_response.is_closed
+            assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            response = await http_response.parse()
+            assert_matches_type(Response, response, path=["response"])
+
+        assert cast(Any, http_response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
+            await async_client.responses.with_raw_response.retrieve(
+                response_id="",
+            )
+
+    @parametrize
+    async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.responses.delete(
+            "resp_677efb5139a88190b512bc3fef8e535d",
+        )
+        assert response is None
+
+    @parametrize
+    async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+        http_response = await async_client.responses.with_raw_response.delete(
+            "resp_677efb5139a88190b512bc3fef8e535d",
+        )
+
+        assert http_response.is_closed is True
+        assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+        response = http_response.parse()
+        assert response is None
+
+    @parametrize
+    async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.responses.with_streaming_response.delete(
+            "resp_677efb5139a88190b512bc3fef8e535d",
+        ) as http_response:
+            assert not http_response.is_closed
+            assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            response = await http_response.parse()
+            assert response is None
+
+        assert cast(Any, http_response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
+            await async_client.responses.with_raw_response.delete(
+                "",
+            )
tests/api_resources/beta/test_vector_stores.py โ†’ tests/api_resources/test_vector_stores.py
@@ -9,11 +9,12 @@ import pytest
 
 from openai import OpenAI, AsyncOpenAI
 from tests.utils import assert_matches_type
-from openai.pagination import SyncCursorPage, AsyncCursorPage
-from openai.types.beta import (
+from openai.types import (
     VectorStore,
     VectorStoreDeleted,
+    VectorStoreSearchResponse,
 )
+from openai.pagination import SyncPage, AsyncPage, SyncCursorPage, AsyncCursorPage
 
 base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
 
@@ -23,12 +24,12 @@ class TestVectorStores:
 
     @parametrize
     def test_method_create(self, client: OpenAI) -> None:
-        vector_store = client.beta.vector_stores.create()
+        vector_store = client.vector_stores.create()
         assert_matches_type(VectorStore, vector_store, path=["response"])
 
     @parametrize
     def test_method_create_with_all_params(self, client: OpenAI) -> None:
-        vector_store = client.beta.vector_stores.create(
+        vector_store = client.vector_stores.create(
             chunking_strategy={"type": "auto"},
             expires_after={
                 "anchor": "last_active_at",
@@ -42,7 +43,7 @@ class TestVectorStores:
 
     @parametrize
     def test_raw_response_create(self, client: OpenAI) -> None:
-        response = client.beta.vector_stores.with_raw_response.create()
+        response = client.vector_stores.with_raw_response.create()
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -51,7 +52,7 @@ class TestVectorStores:
 
     @parametrize
     def test_streaming_response_create(self, client: OpenAI) -> None:
-        with client.beta.vector_stores.with_streaming_response.create() as response:
+        with client.vector_stores.with_streaming_response.create() as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
@@ -62,15 +63,15 @@ class TestVectorStores:
 
     @parametrize
     def test_method_retrieve(self, client: OpenAI) -> None:
-        vector_store = client.beta.vector_stores.retrieve(
-            "string",
+        vector_store = client.vector_stores.retrieve(
+            "vector_store_id",
         )
         assert_matches_type(VectorStore, vector_store, path=["response"])
 
     @parametrize
     def test_raw_response_retrieve(self, client: OpenAI) -> None:
-        response = client.beta.vector_stores.with_raw_response.retrieve(
-            "string",
+        response = client.vector_stores.with_raw_response.retrieve(
+            "vector_store_id",
         )
 
         assert response.is_closed is True
@@ -80,8 +81,8 @@ class TestVectorStores:
 
     @parametrize
     def test_streaming_response_retrieve(self, client: OpenAI) -> None:
-        with client.beta.vector_stores.with_streaming_response.retrieve(
-            "string",
+        with client.vector_stores.with_streaming_response.retrieve(
+            "vector_store_id",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -94,21 +95,21 @@ class TestVectorStores:
     @parametrize
     def test_path_params_retrieve(self, client: OpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            client.beta.vector_stores.with_raw_response.retrieve(
+            client.vector_stores.with_raw_response.retrieve(
                 "",
             )
 
     @parametrize
     def test_method_update(self, client: OpenAI) -> None:
-        vector_store = client.beta.vector_stores.update(
-            "string",
+        vector_store = client.vector_stores.update(
+            vector_store_id="vector_store_id",
         )
         assert_matches_type(VectorStore, vector_store, path=["response"])
 
     @parametrize
     def test_method_update_with_all_params(self, client: OpenAI) -> None:
-        vector_store = client.beta.vector_stores.update(
-            "string",
+        vector_store = client.vector_stores.update(
+            vector_store_id="vector_store_id",
             expires_after={
                 "anchor": "last_active_at",
                 "days": 1,
@@ -120,8 +121,8 @@ class TestVectorStores:
 
     @parametrize
     def test_raw_response_update(self, client: OpenAI) -> None:
-        response = client.beta.vector_stores.with_raw_response.update(
-            "string",
+        response = client.vector_stores.with_raw_response.update(
+            vector_store_id="vector_store_id",
         )
 
         assert response.is_closed is True
@@ -131,8 +132,8 @@ class TestVectorStores:
 
     @parametrize
     def test_streaming_response_update(self, client: OpenAI) -> None:
-        with client.beta.vector_stores.with_streaming_response.update(
-            "string",
+        with client.vector_stores.with_streaming_response.update(
+            vector_store_id="vector_store_id",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -145,20 +146,20 @@ class TestVectorStores:
     @parametrize
     def test_path_params_update(self, client: OpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            client.beta.vector_stores.with_raw_response.update(
-                "",
+            client.vector_stores.with_raw_response.update(
+                vector_store_id="",
             )
 
     @parametrize
     def test_method_list(self, client: OpenAI) -> None:
-        vector_store = client.beta.vector_stores.list()
+        vector_store = client.vector_stores.list()
         assert_matches_type(SyncCursorPage[VectorStore], vector_store, path=["response"])
 
     @parametrize
     def test_method_list_with_all_params(self, client: OpenAI) -> None:
-        vector_store = client.beta.vector_stores.list(
-            after="string",
-            before="string",
+        vector_store = client.vector_stores.list(
+            after="after",
+            before="before",
             limit=0,
             order="asc",
         )
@@ -166,7 +167,7 @@ class TestVectorStores:
 
     @parametrize
     def test_raw_response_list(self, client: OpenAI) -> None:
-        response = client.beta.vector_stores.with_raw_response.list()
+        response = client.vector_stores.with_raw_response.list()
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -175,7 +176,7 @@ class TestVectorStores:
 
     @parametrize
     def test_streaming_response_list(self, client: OpenAI) -> None:
-        with client.beta.vector_stores.with_streaming_response.list() as response:
+        with client.vector_stores.with_streaming_response.list() as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
@@ -186,15 +187,15 @@ class TestVectorStores:
 
     @parametrize
     def test_method_delete(self, client: OpenAI) -> None:
-        vector_store = client.beta.vector_stores.delete(
-            "string",
+        vector_store = client.vector_stores.delete(
+            "vector_store_id",
         )
         assert_matches_type(VectorStoreDeleted, vector_store, path=["response"])
 
     @parametrize
     def test_raw_response_delete(self, client: OpenAI) -> None:
-        response = client.beta.vector_stores.with_raw_response.delete(
-            "string",
+        response = client.vector_stores.with_raw_response.delete(
+            "vector_store_id",
         )
 
         assert response.is_closed is True
@@ -204,8 +205,8 @@ class TestVectorStores:
 
     @parametrize
     def test_streaming_response_delete(self, client: OpenAI) -> None:
-        with client.beta.vector_stores.with_streaming_response.delete(
-            "string",
+        with client.vector_stores.with_streaming_response.delete(
+            "vector_store_id",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -218,22 +219,83 @@ class TestVectorStores:
     @parametrize
     def test_path_params_delete(self, client: OpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            client.beta.vector_stores.with_raw_response.delete(
+            client.vector_stores.with_raw_response.delete(
                 "",
             )
 
+    @parametrize
+    def test_method_search(self, client: OpenAI) -> None:
+        vector_store = client.vector_stores.search(
+            vector_store_id="vs_abc123",
+            query="string",
+        )
+        assert_matches_type(SyncPage[VectorStoreSearchResponse], vector_store, path=["response"])
+
+    @parametrize
+    def test_method_search_with_all_params(self, client: OpenAI) -> None:
+        vector_store = client.vector_stores.search(
+            vector_store_id="vs_abc123",
+            query="string",
+            filters={
+                "key": "key",
+                "type": "eq",
+                "value": "string",
+            },
+            max_num_results=1,
+            ranking_options={
+                "ranker": "auto",
+                "score_threshold": 0,
+            },
+            rewrite_query=True,
+        )
+        assert_matches_type(SyncPage[VectorStoreSearchResponse], vector_store, path=["response"])
+
+    @parametrize
+    def test_raw_response_search(self, client: OpenAI) -> None:
+        response = client.vector_stores.with_raw_response.search(
+            vector_store_id="vs_abc123",
+            query="string",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        vector_store = response.parse()
+        assert_matches_type(SyncPage[VectorStoreSearchResponse], vector_store, path=["response"])
+
+    @parametrize
+    def test_streaming_response_search(self, client: OpenAI) -> None:
+        with client.vector_stores.with_streaming_response.search(
+            vector_store_id="vs_abc123",
+            query="string",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            vector_store = response.parse()
+            assert_matches_type(SyncPage[VectorStoreSearchResponse], vector_store, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_search(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+            client.vector_stores.with_raw_response.search(
+                vector_store_id="",
+                query="string",
+            )
+
 
 class TestAsyncVectorStores:
     parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
 
     @parametrize
     async def test_method_create(self, async_client: AsyncOpenAI) -> None:
-        vector_store = await async_client.beta.vector_stores.create()
+        vector_store = await async_client.vector_stores.create()
         assert_matches_type(VectorStore, vector_store, path=["response"])
 
     @parametrize
     async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
-        vector_store = await async_client.beta.vector_stores.create(
+        vector_store = await async_client.vector_stores.create(
             chunking_strategy={"type": "auto"},
             expires_after={
                 "anchor": "last_active_at",
@@ -247,7 +309,7 @@ class TestAsyncVectorStores:
 
     @parametrize
     async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.vector_stores.with_raw_response.create()
+        response = await async_client.vector_stores.with_raw_response.create()
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -256,7 +318,7 @@ class TestAsyncVectorStores:
 
     @parametrize
     async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.vector_stores.with_streaming_response.create() as response:
+        async with async_client.vector_stores.with_streaming_response.create() as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
@@ -267,15 +329,15 @@ class TestAsyncVectorStores:
 
     @parametrize
     async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
-        vector_store = await async_client.beta.vector_stores.retrieve(
-            "string",
+        vector_store = await async_client.vector_stores.retrieve(
+            "vector_store_id",
         )
         assert_matches_type(VectorStore, vector_store, path=["response"])
 
     @parametrize
     async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.vector_stores.with_raw_response.retrieve(
-            "string",
+        response = await async_client.vector_stores.with_raw_response.retrieve(
+            "vector_store_id",
         )
 
         assert response.is_closed is True
@@ -285,8 +347,8 @@ class TestAsyncVectorStores:
 
     @parametrize
     async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.vector_stores.with_streaming_response.retrieve(
-            "string",
+        async with async_client.vector_stores.with_streaming_response.retrieve(
+            "vector_store_id",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -299,21 +361,21 @@ class TestAsyncVectorStores:
     @parametrize
     async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            await async_client.beta.vector_stores.with_raw_response.retrieve(
+            await async_client.vector_stores.with_raw_response.retrieve(
                 "",
             )
 
     @parametrize
     async def test_method_update(self, async_client: AsyncOpenAI) -> None:
-        vector_store = await async_client.beta.vector_stores.update(
-            "string",
+        vector_store = await async_client.vector_stores.update(
+            vector_store_id="vector_store_id",
         )
         assert_matches_type(VectorStore, vector_store, path=["response"])
 
     @parametrize
     async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
-        vector_store = await async_client.beta.vector_stores.update(
-            "string",
+        vector_store = await async_client.vector_stores.update(
+            vector_store_id="vector_store_id",
             expires_after={
                 "anchor": "last_active_at",
                 "days": 1,
@@ -325,8 +387,8 @@ class TestAsyncVectorStores:
 
     @parametrize
     async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.vector_stores.with_raw_response.update(
-            "string",
+        response = await async_client.vector_stores.with_raw_response.update(
+            vector_store_id="vector_store_id",
         )
 
         assert response.is_closed is True
@@ -336,8 +398,8 @@ class TestAsyncVectorStores:
 
     @parametrize
     async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.vector_stores.with_streaming_response.update(
-            "string",
+        async with async_client.vector_stores.with_streaming_response.update(
+            vector_store_id="vector_store_id",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -350,20 +412,20 @@ class TestAsyncVectorStores:
     @parametrize
     async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            await async_client.beta.vector_stores.with_raw_response.update(
-                "",
+            await async_client.vector_stores.with_raw_response.update(
+                vector_store_id="",
             )
 
     @parametrize
     async def test_method_list(self, async_client: AsyncOpenAI) -> None:
-        vector_store = await async_client.beta.vector_stores.list()
+        vector_store = await async_client.vector_stores.list()
         assert_matches_type(AsyncCursorPage[VectorStore], vector_store, path=["response"])
 
     @parametrize
     async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
-        vector_store = await async_client.beta.vector_stores.list(
-            after="string",
-            before="string",
+        vector_store = await async_client.vector_stores.list(
+            after="after",
+            before="before",
             limit=0,
             order="asc",
         )
@@ -371,7 +433,7 @@ class TestAsyncVectorStores:
 
     @parametrize
     async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.vector_stores.with_raw_response.list()
+        response = await async_client.vector_stores.with_raw_response.list()
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -380,7 +442,7 @@ class TestAsyncVectorStores:
 
     @parametrize
     async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.vector_stores.with_streaming_response.list() as response:
+        async with async_client.vector_stores.with_streaming_response.list() as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
@@ -391,15 +453,15 @@ class TestAsyncVectorStores:
 
     @parametrize
     async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
-        vector_store = await async_client.beta.vector_stores.delete(
-            "string",
+        vector_store = await async_client.vector_stores.delete(
+            "vector_store_id",
         )
         assert_matches_type(VectorStoreDeleted, vector_store, path=["response"])
 
     @parametrize
     async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.vector_stores.with_raw_response.delete(
-            "string",
+        response = await async_client.vector_stores.with_raw_response.delete(
+            "vector_store_id",
         )
 
         assert response.is_closed is True
@@ -409,8 +471,8 @@ class TestAsyncVectorStores:
 
     @parametrize
     async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.vector_stores.with_streaming_response.delete(
-            "string",
+        async with async_client.vector_stores.with_streaming_response.delete(
+            "vector_store_id",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -423,6 +485,67 @@ class TestAsyncVectorStores:
     @parametrize
     async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
-            await async_client.beta.vector_stores.with_raw_response.delete(
+            await async_client.vector_stores.with_raw_response.delete(
                 "",
             )
+
+    @parametrize
+    async def test_method_search(self, async_client: AsyncOpenAI) -> None:
+        vector_store = await async_client.vector_stores.search(
+            vector_store_id="vs_abc123",
+            query="string",
+        )
+        assert_matches_type(AsyncPage[VectorStoreSearchResponse], vector_store, path=["response"])
+
+    @parametrize
+    async def test_method_search_with_all_params(self, async_client: AsyncOpenAI) -> None:
+        vector_store = await async_client.vector_stores.search(
+            vector_store_id="vs_abc123",
+            query="string",
+            filters={
+                "key": "key",
+                "type": "eq",
+                "value": "string",
+            },
+            max_num_results=1,
+            ranking_options={
+                "ranker": "auto",
+                "score_threshold": 0,
+            },
+            rewrite_query=True,
+        )
+        assert_matches_type(AsyncPage[VectorStoreSearchResponse], vector_store, path=["response"])
+
+    @parametrize
+    async def test_raw_response_search(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.vector_stores.with_raw_response.search(
+            vector_store_id="vs_abc123",
+            query="string",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        vector_store = response.parse()
+        assert_matches_type(AsyncPage[VectorStoreSearchResponse], vector_store, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_search(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.vector_stores.with_streaming_response.search(
+            vector_store_id="vs_abc123",
+            query="string",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            vector_store = await response.parse()
+            assert_matches_type(AsyncPage[VectorStoreSearchResponse], vector_store, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_search(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+            await async_client.vector_stores.with_raw_response.search(
+                vector_store_id="",
+                query="string",
+            )
tests/lib/chat/test_completions.py
@@ -58,6 +58,7 @@ ParsedChatCompletion[NoneType](
             index=0,
             logprobs=None,
             message=ParsedChatCompletionMessage[NoneType](
+                annotations=None,
                 audio=None,
                 content="I'm unable to provide real-time weather updates. To get the current weather in San Francisco, I
 recommend checking a reliable weather website or app like the Weather Channel or a local news station.",
@@ -126,6 +127,7 @@ ParsedChatCompletion[Location](
             index=0,
             logprobs=None,
             message=ParsedChatCompletionMessage[Location](
+                annotations=None,
                 audio=None,
                 content='{"city":"San Francisco","temperature":65,"units":"f"}',
                 function_call=None,
@@ -195,6 +197,7 @@ ParsedChatCompletion[Location](
             index=0,
             logprobs=None,
             message=ParsedChatCompletionMessage[Location](
+                annotations=None,
                 audio=None,
                 content='{"city":"San Francisco","temperature":65,"units":"f"}',
                 function_call=None,
@@ -266,6 +269,7 @@ ParsedChoice[ColorDetection](
     index=0,
     logprobs=None,
     message=ParsedChatCompletionMessage[ColorDetection](
+        annotations=None,
         audio=None,
         content='{"color":"red","hex_color_code":"#FF0000"}',
         function_call=None,
@@ -315,6 +319,7 @@ def test_parse_pydantic_model_multiple_choices(
         index=0,
         logprobs=None,
         message=ParsedChatCompletionMessage[Location](
+            annotations=None,
             audio=None,
             content='{"city":"San Francisco","temperature":64,"units":"f"}',
             function_call=None,
@@ -329,6 +334,7 @@ def test_parse_pydantic_model_multiple_choices(
         index=1,
         logprobs=None,
         message=ParsedChatCompletionMessage[Location](
+            annotations=None,
             audio=None,
             content='{"city":"San Francisco","temperature":65,"units":"f"}',
             function_call=None,
@@ -343,6 +349,7 @@ def test_parse_pydantic_model_multiple_choices(
         index=2,
         logprobs=None,
         message=ParsedChatCompletionMessage[Location](
+            annotations=None,
             audio=None,
             content='{"city":"San Francisco","temperature":63.0,"units":"f"}',
             function_call=None,
@@ -393,6 +400,7 @@ ParsedChatCompletion[CalendarEvent](
             index=0,
             logprobs=None,
             message=ParsedChatCompletionMessage[CalendarEvent](
+                annotations=None,
                 audio=None,
                 content='{"name":"Science Fair","date":"Friday","participants":["Alice","Bob"]}',
                 function_call=None,
@@ -454,6 +462,7 @@ ParsedChoice[Query](
     index=0,
     logprobs=None,
     message=ParsedChatCompletionMessage[Query](
+        annotations=None,
         audio=None,
         content=None,
         function_call=None,
@@ -565,6 +574,7 @@ def test_parse_pydantic_model_refusal(client: OpenAI, respx_mock: MockRouter, mo
         index=0,
         logprobs=None,
         message=ParsedChatCompletionMessage[Location](
+            annotations=None,
             audio=None,
             content=None,
             function_call=None,
@@ -614,6 +624,7 @@ def test_parse_pydantic_tool(client: OpenAI, respx_mock: MockRouter, monkeypatch
         index=0,
         logprobs=None,
         message=ParsedChatCompletionMessage[NoneType](
+            annotations=None,
             audio=None,
             content=None,
             function_call=None,
@@ -686,6 +697,7 @@ def test_parse_multiple_pydantic_tools(client: OpenAI, respx_mock: MockRouter, m
         index=0,
         logprobs=None,
         message=ParsedChatCompletionMessage[NoneType](
+            annotations=None,
             audio=None,
             content=None,
             function_call=None,
@@ -767,6 +779,7 @@ def test_parse_strict_tools(client: OpenAI, respx_mock: MockRouter, monkeypatch:
         index=0,
         logprobs=None,
         message=ParsedChatCompletionMessage[NoneType](
+            annotations=None,
             audio=None,
             content=None,
             function_call=None,
@@ -849,6 +862,7 @@ ParsedChatCompletion[Location](
             index=0,
             logprobs=None,
             message=ParsedChatCompletionMessage[Location](
+                annotations=None,
                 audio=None,
                 content='{"city":"San Francisco","temperature":58,"units":"f"}',
                 function_call=None,
@@ -924,6 +938,7 @@ ParsedChatCompletion[Location](
             index=0,
             logprobs=None,
             message=ParsedChatCompletionMessage[Location](
+                annotations=None,
                 audio=None,
                 content='{"city":"San Francisco","temperature":65,"units":"f"}',
                 function_call=None,
tests/lib/chat/test_completions_streaming.py
@@ -63,6 +63,7 @@ def test_parse_nothing(client: OpenAI, respx_mock: MockRouter, monkeypatch: pyte
         index=0,
         logprobs=None,
         message=ParsedChatCompletionMessage[NoneType](
+            annotations=None,
             audio=None,
             content="I'm unable to provide real-time weather updates. To get the current weather in San Francisco, I 
 recommend checking a reliable weather website or a weather app.",
@@ -141,6 +142,7 @@ ParsedChatCompletion[Location](
             index=0,
             logprobs=None,
             message=ParsedChatCompletionMessage[Location](
+                annotations=None,
                 audio=None,
                 content='{"city":"San Francisco","temperature":61,"units":"f"}',
                 function_call=None,
@@ -318,6 +320,7 @@ def test_parse_pydantic_model_multiple_choices(
         index=0,
         logprobs=None,
         message=ParsedChatCompletionMessage[Location](
+            annotations=None,
             audio=None,
             content='{"city":"San Francisco","temperature":65,"units":"f"}',
             function_call=None,
@@ -332,6 +335,7 @@ def test_parse_pydantic_model_multiple_choices(
         index=1,
         logprobs=None,
         message=ParsedChatCompletionMessage[Location](
+            annotations=None,
             audio=None,
             content='{"city":"San Francisco","temperature":61,"units":"f"}',
             function_call=None,
@@ -346,6 +350,7 @@ def test_parse_pydantic_model_multiple_choices(
         index=2,
         logprobs=None,
         message=ParsedChatCompletionMessage[Location](
+            annotations=None,
             audio=None,
             content='{"city":"San Francisco","temperature":59,"units":"f"}',
             function_call=None,
@@ -421,6 +426,7 @@ RefusalDoneEvent(refusal="I'm sorry, I can't assist with that request.", type='r
         index=0,
         logprobs=None,
         message=ParsedChatCompletionMessage[Location](
+            annotations=None,
             audio=None,
             content=None,
             function_call=None,
@@ -495,6 +501,7 @@ def test_content_logprobs_events(client: OpenAI, respx_mock: MockRouter, monkeyp
             refusal=None
         ),
         message=ParsedChatCompletionMessage[NoneType](
+            annotations=None,
             audio=None,
             content='Foo!',
             function_call=None,
@@ -606,6 +613,7 @@ def test_refusal_logprobs_events(client: OpenAI, respx_mock: MockRouter, monkeyp
             ]
         ),
         message=ParsedChatCompletionMessage[Location](
+            annotations=None,
             audio=None,
             content=None,
             function_call=None,
@@ -652,6 +660,7 @@ def test_parse_pydantic_tool(client: OpenAI, respx_mock: MockRouter, monkeypatch
         index=0,
         logprobs=None,
         message=ParsedChatCompletionMessage[object](
+            annotations=None,
             audio=None,
             content=None,
             function_call=None,
@@ -684,6 +693,7 @@ def test_parse_pydantic_tool(client: OpenAI, respx_mock: MockRouter, monkeypatch
         index=0,
         logprobs=None,
         message=ParsedChatCompletionMessage[NoneType](
+            annotations=None,
             audio=None,
             content=None,
             function_call=None,
@@ -755,6 +765,7 @@ def test_parse_multiple_pydantic_tools(client: OpenAI, respx_mock: MockRouter, m
         index=0,
         logprobs=None,
         message=ParsedChatCompletionMessage[object](
+            annotations=None,
             audio=None,
             content=None,
             function_call=None,
@@ -863,6 +874,7 @@ def test_parse_strict_tools(client: OpenAI, respx_mock: MockRouter, monkeypatch:
         index=0,
         logprobs=None,
         message=ParsedChatCompletionMessage[object](
+            annotations=None,
             audio=None,
             content=None,
             function_call=None,
@@ -914,6 +926,7 @@ def test_non_pydantic_response_format(client: OpenAI, respx_mock: MockRouter, mo
         index=0,
         logprobs=None,
         message=ParsedChatCompletionMessage[NoneType](
+            annotations=None,
             audio=None,
             content='\\n  {\\n    "location": "San Francisco, CA",\\n    "weather": {\\n      "temperature": "18ยฐC",\\n      
 "condition": "Partly Cloudy",\\n      "humidity": "72%",\\n      "windSpeed": "15 km/h",\\n      "windDirection": "NW"\\n   
@@ -974,6 +987,7 @@ FunctionToolCallArgumentsDoneEvent(
         index=0,
         logprobs=None,
         message=ParsedChatCompletionMessage[NoneType](
+            annotations=None,
             audio=None,
             content=None,
             function_call=None,
@@ -1033,6 +1047,7 @@ def test_chat_completion_state_helper(client: OpenAI, respx_mock: MockRouter, mo
         index=0,
         logprobs=None,
         message=ParsedChatCompletionMessage[NoneType](
+            annotations=None,
             audio=None,
             content="I'm unable to provide real-time weather updates. To get the current weather in San Francisco, I 
 recommend checking a reliable weather website or a weather app.",
.stats.yml
@@ -1,2 +1,2 @@
-configured_endpoints: 74
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b524aed1c2c5c928aa4e2c546f5dbb364e7b4d5027daf05e42e210b05a97c3c6.yml
+configured_endpoints: 81
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-be834d63e326a82494e819085137f5eb15866f3fc787db1f3afe7168d419e18a.yml
api.md
@@ -3,10 +3,14 @@
 ```python
 from openai.types import (
     ChatModel,
+    ComparisonFilter,
+    CompoundFilter,
     ErrorObject,
     FunctionDefinition,
     FunctionParameters,
     Metadata,
+    Reasoning,
+    ReasoningEffort,
     ResponseFormatJSONObject,
     ResponseFormatJSONSchema,
     ResponseFormatText,
@@ -59,7 +63,6 @@ from openai.types.chat import (
     ChatCompletionModality,
     ChatCompletionNamedToolChoice,
     ChatCompletionPredictionContent,
-    ChatCompletionReasoningEffort,
     ChatCompletionRole,
     ChatCompletionStoreMessage,
     ChatCompletionStreamOptions,
@@ -69,6 +72,7 @@ from openai.types.chat import (
     ChatCompletionToolChoiceOption,
     ChatCompletionToolMessageParam,
     ChatCompletionUserMessageParam,
+    ChatCompletionReasoningEffort,
 )
 ```
 
@@ -249,6 +253,73 @@ Methods:
 
 - <code title="get /fine_tuning/jobs/{fine_tuning_job_id}/checkpoints">client.fine_tuning.jobs.checkpoints.<a href="./src/openai/resources/fine_tuning/jobs/checkpoints.py">list</a>(fine_tuning_job_id, \*\*<a href="src/openai/types/fine_tuning/jobs/checkpoint_list_params.py">params</a>) -> <a href="./src/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py">SyncCursorPage[FineTuningJobCheckpoint]</a></code>
 
+# VectorStores
+
+Types:
+
+```python
+from openai.types import (
+    AutoFileChunkingStrategyParam,
+    FileChunkingStrategy,
+    FileChunkingStrategyParam,
+    OtherFileChunkingStrategyObject,
+    StaticFileChunkingStrategy,
+    StaticFileChunkingStrategyObject,
+    StaticFileChunkingStrategyObjectParam,
+    VectorStore,
+    VectorStoreDeleted,
+    VectorStoreSearchResponse,
+)
+```
+
+Methods:
+
+- <code title="post /vector_stores">client.vector_stores.<a href="./src/openai/resources/vector_stores/vector_stores.py">create</a>(\*\*<a href="src/openai/types/vector_store_create_params.py">params</a>) -> <a href="./src/openai/types/vector_store.py">VectorStore</a></code>
+- <code title="get /vector_stores/{vector_store_id}">client.vector_stores.<a href="./src/openai/resources/vector_stores/vector_stores.py">retrieve</a>(vector_store_id) -> <a href="./src/openai/types/vector_store.py">VectorStore</a></code>
+- <code title="post /vector_stores/{vector_store_id}">client.vector_stores.<a href="./src/openai/resources/vector_stores/vector_stores.py">update</a>(vector_store_id, \*\*<a href="src/openai/types/vector_store_update_params.py">params</a>) -> <a href="./src/openai/types/vector_store.py">VectorStore</a></code>
+- <code title="get /vector_stores">client.vector_stores.<a href="./src/openai/resources/vector_stores/vector_stores.py">list</a>(\*\*<a href="src/openai/types/vector_store_list_params.py">params</a>) -> <a href="./src/openai/types/vector_store.py">SyncCursorPage[VectorStore]</a></code>
+- <code title="delete /vector_stores/{vector_store_id}">client.vector_stores.<a href="./src/openai/resources/vector_stores/vector_stores.py">delete</a>(vector_store_id) -> <a href="./src/openai/types/vector_store_deleted.py">VectorStoreDeleted</a></code>
+- <code title="post /vector_stores/{vector_store_id}/search">client.vector_stores.<a href="./src/openai/resources/vector_stores/vector_stores.py">search</a>(vector_store_id, \*\*<a href="src/openai/types/vector_store_search_params.py">params</a>) -> <a href="./src/openai/types/vector_store_search_response.py">SyncPage[VectorStoreSearchResponse]</a></code>
+
+## Files
+
+Types:
+
+```python
+from openai.types.vector_stores import VectorStoreFile, VectorStoreFileDeleted, FileContentResponse
+```
+
+Methods:
+
+- <code title="post /vector_stores/{vector_store_id}/files">client.vector_stores.files.<a href="./src/openai/resources/vector_stores/files.py">create</a>(vector_store_id, \*\*<a href="src/openai/types/vector_stores/file_create_params.py">params</a>) -> <a href="./src/openai/types/vector_stores/vector_store_file.py">VectorStoreFile</a></code>
+- <code title="get /vector_stores/{vector_store_id}/files/{file_id}">client.vector_stores.files.<a href="./src/openai/resources/vector_stores/files.py">retrieve</a>(file_id, \*, vector_store_id) -> <a href="./src/openai/types/vector_stores/vector_store_file.py">VectorStoreFile</a></code>
+- <code title="post /vector_stores/{vector_store_id}/files/{file_id}">client.vector_stores.files.<a href="./src/openai/resources/vector_stores/files.py">update</a>(file_id, \*, vector_store_id, \*\*<a href="src/openai/types/vector_stores/file_update_params.py">params</a>) -> <a href="./src/openai/types/vector_stores/vector_store_file.py">VectorStoreFile</a></code>
+- <code title="get /vector_stores/{vector_store_id}/files">client.vector_stores.files.<a href="./src/openai/resources/vector_stores/files.py">list</a>(vector_store_id, \*\*<a href="src/openai/types/vector_stores/file_list_params.py">params</a>) -> <a href="./src/openai/types/vector_stores/vector_store_file.py">SyncCursorPage[VectorStoreFile]</a></code>
+- <code title="delete /vector_stores/{vector_store_id}/files/{file_id}">client.vector_stores.files.<a href="./src/openai/resources/vector_stores/files.py">delete</a>(file_id, \*, vector_store_id) -> <a href="./src/openai/types/vector_stores/vector_store_file_deleted.py">VectorStoreFileDeleted</a></code>
+- <code title="get /vector_stores/{vector_store_id}/files/{file_id}/content">client.vector_stores.files.<a href="./src/openai/resources/vector_stores/files.py">content</a>(file_id, \*, vector_store_id) -> <a href="./src/openai/types/vector_stores/file_content_response.py">SyncPage[FileContentResponse]</a></code>
+- <code>client.vector_stores.files.<a href="./src/openai/resources/vector_stores/files.py">create_and_poll</a>(\*args) -> VectorStoreFile</code>
+- <code>client.vector_stores.files.<a href="./src/openai/resources/vector_stores/files.py">poll</a>(\*args) -> VectorStoreFile</code>
+- <code>client.vector_stores.files.<a href="./src/openai/resources/vector_stores/files.py">upload</a>(\*args) -> VectorStoreFile</code>
+- <code>client.vector_stores.files.<a href="./src/openai/resources/vector_stores/files.py">upload_and_poll</a>(\*args) -> VectorStoreFile</code>
+
+## FileBatches
+
+Types:
+
+```python
+from openai.types.vector_stores import VectorStoreFileBatch
+```
+
+Methods:
+
+- <code title="post /vector_stores/{vector_store_id}/file_batches">client.vector_stores.file_batches.<a href="./src/openai/resources/vector_stores/file_batches.py">create</a>(vector_store_id, \*\*<a href="src/openai/types/vector_stores/file_batch_create_params.py">params</a>) -> <a href="./src/openai/types/vector_stores/vector_store_file_batch.py">VectorStoreFileBatch</a></code>
+- <code title="get /vector_stores/{vector_store_id}/file_batches/{batch_id}">client.vector_stores.file_batches.<a href="./src/openai/resources/vector_stores/file_batches.py">retrieve</a>(batch_id, \*, vector_store_id) -> <a href="./src/openai/types/vector_stores/vector_store_file_batch.py">VectorStoreFileBatch</a></code>
+- <code title="post /vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel">client.vector_stores.file_batches.<a href="./src/openai/resources/vector_stores/file_batches.py">cancel</a>(batch_id, \*, vector_store_id) -> <a href="./src/openai/types/vector_stores/vector_store_file_batch.py">VectorStoreFileBatch</a></code>
+- <code title="get /vector_stores/{vector_store_id}/file_batches/{batch_id}/files">client.vector_stores.file_batches.<a href="./src/openai/resources/vector_stores/file_batches.py">list_files</a>(batch_id, \*, vector_store_id, \*\*<a href="src/openai/types/vector_stores/file_batch_list_files_params.py">params</a>) -> <a href="./src/openai/types/vector_stores/vector_store_file.py">SyncCursorPage[VectorStoreFile]</a></code>
+- <code>client.vector_stores.file_batches.<a href="./src/openai/resources/vector_stores/file_batches.py">create_and_poll</a>(\*args) -> VectorStoreFileBatch</code>
+- <code>client.vector_stores.file_batches.<a href="./src/openai/resources/vector_stores/file_batches.py">poll</a>(\*args) -> VectorStoreFileBatch</code>
+- <code>client.vector_stores.file_batches.<a href="./src/openai/resources/vector_stores/file_batches.py">upload_and_poll</a>(\*args) -> VectorStoreFileBatch</code>
+
 # Beta
 
 ## Realtime
@@ -317,69 +388,6 @@ Methods:
 
 - <code title="post /realtime/sessions">client.beta.realtime.sessions.<a href="./src/openai/resources/beta/realtime/sessions.py">create</a>(\*\*<a href="src/openai/types/beta/realtime/session_create_params.py">params</a>) -> <a href="./src/openai/types/beta/realtime/session_create_response.py">SessionCreateResponse</a></code>
 
-## VectorStores
-
-Types:
-
-```python
-from openai.types.beta import (
-    AutoFileChunkingStrategyParam,
-    FileChunkingStrategy,
-    FileChunkingStrategyParam,
-    OtherFileChunkingStrategyObject,
-    StaticFileChunkingStrategy,
-    StaticFileChunkingStrategyObject,
-    StaticFileChunkingStrategyObjectParam,
-    VectorStore,
-    VectorStoreDeleted,
-)
-```
-
-Methods:
-
-- <code title="post /vector_stores">client.beta.vector_stores.<a href="./src/openai/resources/beta/vector_stores/vector_stores.py">create</a>(\*\*<a href="src/openai/types/beta/vector_store_create_params.py">params</a>) -> <a href="./src/openai/types/beta/vector_store.py">VectorStore</a></code>
-- <code title="get /vector_stores/{vector_store_id}">client.beta.vector_stores.<a href="./src/openai/resources/beta/vector_stores/vector_stores.py">retrieve</a>(vector_store_id) -> <a href="./src/openai/types/beta/vector_store.py">VectorStore</a></code>
-- <code title="post /vector_stores/{vector_store_id}">client.beta.vector_stores.<a href="./src/openai/resources/beta/vector_stores/vector_stores.py">update</a>(vector_store_id, \*\*<a href="src/openai/types/beta/vector_store_update_params.py">params</a>) -> <a href="./src/openai/types/beta/vector_store.py">VectorStore</a></code>
-- <code title="get /vector_stores">client.beta.vector_stores.<a href="./src/openai/resources/beta/vector_stores/vector_stores.py">list</a>(\*\*<a href="src/openai/types/beta/vector_store_list_params.py">params</a>) -> <a href="./src/openai/types/beta/vector_store.py">SyncCursorPage[VectorStore]</a></code>
-- <code title="delete /vector_stores/{vector_store_id}">client.beta.vector_stores.<a href="./src/openai/resources/beta/vector_stores/vector_stores.py">delete</a>(vector_store_id) -> <a href="./src/openai/types/beta/vector_store_deleted.py">VectorStoreDeleted</a></code>
-
-### Files
-
-Types:
-
-```python
-from openai.types.beta.vector_stores import VectorStoreFile, VectorStoreFileDeleted
-```
-
-Methods:
-
-- <code title="post /vector_stores/{vector_store_id}/files">client.beta.vector_stores.files.<a href="./src/openai/resources/beta/vector_stores/files.py">create</a>(vector_store_id, \*\*<a href="src/openai/types/beta/vector_stores/file_create_params.py">params</a>) -> <a href="./src/openai/types/beta/vector_stores/vector_store_file.py">VectorStoreFile</a></code>
-- <code title="get /vector_stores/{vector_store_id}/files/{file_id}">client.beta.vector_stores.files.<a href="./src/openai/resources/beta/vector_stores/files.py">retrieve</a>(file_id, \*, vector_store_id) -> <a href="./src/openai/types/beta/vector_stores/vector_store_file.py">VectorStoreFile</a></code>
-- <code title="get /vector_stores/{vector_store_id}/files">client.beta.vector_stores.files.<a href="./src/openai/resources/beta/vector_stores/files.py">list</a>(vector_store_id, \*\*<a href="src/openai/types/beta/vector_stores/file_list_params.py">params</a>) -> <a href="./src/openai/types/beta/vector_stores/vector_store_file.py">SyncCursorPage[VectorStoreFile]</a></code>
-- <code title="delete /vector_stores/{vector_store_id}/files/{file_id}">client.beta.vector_stores.files.<a href="./src/openai/resources/beta/vector_stores/files.py">delete</a>(file_id, \*, vector_store_id) -> <a href="./src/openai/types/beta/vector_stores/vector_store_file_deleted.py">VectorStoreFileDeleted</a></code>
-- <code>client.beta.vector_stores.files.<a href="./src/openai/resources/beta/vector_stores/files.py">create_and_poll</a>(\*args) -> VectorStoreFile</code>
-- <code>client.beta.vector_stores.files.<a href="./src/openai/resources/beta/vector_stores/files.py">poll</a>(\*args) -> VectorStoreFile</code>
-- <code>client.beta.vector_stores.files.<a href="./src/openai/resources/beta/vector_stores/files.py">upload</a>(\*args) -> VectorStoreFile</code>
-- <code>client.beta.vector_stores.files.<a href="./src/openai/resources/beta/vector_stores/files.py">upload_and_poll</a>(\*args) -> VectorStoreFile</code>
-
-### FileBatches
-
-Types:
-
-```python
-from openai.types.beta.vector_stores import VectorStoreFileBatch
-```
-
-Methods:
-
-- <code title="post /vector_stores/{vector_store_id}/file_batches">client.beta.vector_stores.file_batches.<a href="./src/openai/resources/beta/vector_stores/file_batches.py">create</a>(vector_store_id, \*\*<a href="src/openai/types/beta/vector_stores/file_batch_create_params.py">params</a>) -> <a href="./src/openai/types/beta/vector_stores/vector_store_file_batch.py">VectorStoreFileBatch</a></code>
-- <code title="get /vector_stores/{vector_store_id}/file_batches/{batch_id}">client.beta.vector_stores.file_batches.<a href="./src/openai/resources/beta/vector_stores/file_batches.py">retrieve</a>(batch_id, \*, vector_store_id) -> <a href="./src/openai/types/beta/vector_stores/vector_store_file_batch.py">VectorStoreFileBatch</a></code>
-- <code title="post /vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel">client.beta.vector_stores.file_batches.<a href="./src/openai/resources/beta/vector_stores/file_batches.py">cancel</a>(batch_id, \*, vector_store_id) -> <a href="./src/openai/types/beta/vector_stores/vector_store_file_batch.py">VectorStoreFileBatch</a></code>
-- <code title="get /vector_stores/{vector_store_id}/file_batches/{batch_id}/files">client.beta.vector_stores.file_batches.<a href="./src/openai/resources/beta/vector_stores/file_batches.py">list_files</a>(batch_id, \*, vector_store_id, \*\*<a href="src/openai/types/beta/vector_stores/file_batch_list_files_params.py">params</a>) -> <a href="./src/openai/types/beta/vector_stores/vector_store_file.py">SyncCursorPage[VectorStoreFile]</a></code>
-- <code>client.beta.vector_stores.file_batches.<a href="./src/openai/resources/beta/vector_stores/file_batches.py">create_and_poll</a>(\*args) -> VectorStoreFileBatch</code>
-- <code>client.beta.vector_stores.file_batches.<a href="./src/openai/resources/beta/vector_stores/file_batches.py">poll</a>(\*args) -> VectorStoreFileBatch</code>
-- <code>client.beta.vector_stores.file_batches.<a href="./src/openai/resources/beta/vector_stores/file_batches.py">upload_and_poll</a>(\*args) -> VectorStoreFileBatch</code>
-
 ## Assistants
 
 Types:
@@ -573,3 +581,99 @@ from openai.types.uploads import UploadPart
 Methods:
 
 - <code title="post /uploads/{upload_id}/parts">client.uploads.parts.<a href="./src/openai/resources/uploads/parts.py">create</a>(upload_id, \*\*<a href="src/openai/types/uploads/part_create_params.py">params</a>) -> <a href="./src/openai/types/uploads/upload_part.py">UploadPart</a></code>
+
+# Responses
+
+Types:
+
+```python
+from openai.types.responses import (
+    ComputerTool,
+    EasyInputMessage,
+    FileSearchTool,
+    FunctionTool,
+    Response,
+    ResponseAudioDeltaEvent,
+    ResponseAudioDoneEvent,
+    ResponseAudioTranscriptDeltaEvent,
+    ResponseAudioTranscriptDoneEvent,
+    ResponseCodeInterpreterCallCodeDeltaEvent,
+    ResponseCodeInterpreterCallCodeDoneEvent,
+    ResponseCodeInterpreterCallCompletedEvent,
+    ResponseCodeInterpreterCallInProgressEvent,
+    ResponseCodeInterpreterCallInterpretingEvent,
+    ResponseCodeInterpreterToolCall,
+    ResponseCompletedEvent,
+    ResponseComputerToolCall,
+    ResponseContent,
+    ResponseContentPartAddedEvent,
+    ResponseContentPartDoneEvent,
+    ResponseCreatedEvent,
+    ResponseError,
+    ResponseErrorEvent,
+    ResponseFailedEvent,
+    ResponseFileSearchCallCompletedEvent,
+    ResponseFileSearchCallInProgressEvent,
+    ResponseFileSearchCallSearchingEvent,
+    ResponseFileSearchToolCall,
+    ResponseFormatTextConfig,
+    ResponseFormatTextJSONSchemaConfig,
+    ResponseFunctionCallArgumentsDeltaEvent,
+    ResponseFunctionCallArgumentsDoneEvent,
+    ResponseFunctionToolCall,
+    ResponseFunctionWebSearch,
+    ResponseInProgressEvent,
+    ResponseIncludable,
+    ResponseIncompleteEvent,
+    ResponseInput,
+    ResponseInputAudio,
+    ResponseInputContent,
+    ResponseInputFile,
+    ResponseInputImage,
+    ResponseInputItem,
+    ResponseInputMessageContentList,
+    ResponseInputText,
+    ResponseOutputAudio,
+    ResponseOutputItem,
+    ResponseOutputItemAddedEvent,
+    ResponseOutputItemDoneEvent,
+    ResponseOutputMessage,
+    ResponseOutputRefusal,
+    ResponseOutputText,
+    ResponseRefusalDeltaEvent,
+    ResponseRefusalDoneEvent,
+    ResponseStatus,
+    ResponseStreamEvent,
+    ResponseTextAnnotationDeltaEvent,
+    ResponseTextConfig,
+    ResponseTextDeltaEvent,
+    ResponseTextDoneEvent,
+    ResponseUsage,
+    ResponseWebSearchCallCompletedEvent,
+    ResponseWebSearchCallInProgressEvent,
+    ResponseWebSearchCallSearchingEvent,
+    Tool,
+    ToolChoiceFunction,
+    ToolChoiceOptions,
+    ToolChoiceTypes,
+    WebSearchTool,
+)
+```
+
+Methods:
+
+- <code title="post /responses">client.responses.<a href="./src/openai/resources/responses/responses.py">create</a>(\*\*<a href="src/openai/types/responses/response_create_params.py">params</a>) -> <a href="./src/openai/types/responses/response.py">Response</a></code>
+- <code title="get /responses/{response_id}">client.responses.<a href="./src/openai/resources/responses/responses.py">retrieve</a>(response_id, \*\*<a href="src/openai/types/responses/response_retrieve_params.py">params</a>) -> <a href="./src/openai/types/responses/response.py">Response</a></code>
+- <code title="delete /responses/{response_id}">client.responses.<a href="./src/openai/resources/responses/responses.py">delete</a>(response_id) -> None</code>
+
+## InputItems
+
+Types:
+
+```python
+from openai.types.responses import ResponseItemList
+```
+
+Methods:
+
+- <code title="get /responses/{response_id}/input_items">client.responses.input_items.<a href="./src/openai/resources/responses/input_items.py">list</a>(response_id, \*\*<a href="src/openai/types/responses/input_item_list_params.py">params</a>) -> SyncCursorPage[Data]</code>
README.md
@@ -10,13 +10,10 @@ It is generated from our [OpenAPI specification](https://github.com/openai/opena
 
 ## Documentation
 
-The REST API documentation can be found on [platform.openai.com](https://platform.openai.com/docs). The full API of this library can be found in [api.md](api.md).
+The REST API documentation can be found on [platform.openai.com](https://platform.openai.com/docs/api-reference). The full API of this library can be found in [api.md](api.md).
 
 ## Installation
 
-> [!IMPORTANT]
-> The SDK was rewritten in v1, which was released November 6th 2023. See the [v1 migration guide](https://github.com/openai/openai-python/discussions/742), which includes scripts to automatically update your code.
-
 ```sh
 # install from PyPI
 pip install openai
@@ -26,46 +23,69 @@ pip install openai
 
 The full API of this library can be found in [api.md](api.md).
 
+The primary API for interacting with OpenAI models is the [Responses API](https://platform.openai.com/docs/api-reference/responses). You can generate text from the model with the code below.
+
 ```python
 import os
 from openai import OpenAI
 
 client = OpenAI(
-    api_key=os.environ.get("OPENAI_API_KEY"),  # This is the default and can be omitted
+    # This is the default and can be omitted
+    api_key=os.environ.get("OPENAI_API_KEY"),
+)
+
+response = client.responses.create(
+    model="gpt-4o",
+    instructions="You are a coding assistant that talks like a pirate.",
+    input="How do I check if a Python object is an instance of a class?",
 )
 
-chat_completion = client.chat.completions.create(
+print(response.output_text)
+```
+
+The previous standard (supported indefinitely) for generating text is the [Chat Completions API](https://platform.openai.com/docs/api-reference/chat). You can use that API to generate text from the model with the code below.
+
+```python
+from openai import OpenAI
+
+client = OpenAI()
+
+completion = client.chat.completions.create(
+    model="gpt-4o",
     messages=[
+        {"role": "developer", "content": "Talk like a pirate."},
         {
             "role": "user",
-            "content": "Say this is a test",
-        }
+            "content": "How do I check if a Python object is an instance of a class?",
+        },
     ],
-    model="gpt-4o",
 )
+
+print(completion.choices[0].message.content)
 ```
 
 While you can provide an `api_key` keyword argument,
 we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/)
 to add `OPENAI_API_KEY="My API Key"` to your `.env` file
-so that your API Key is not stored in source control.
+so that your API key is not stored in source control.
+[Get an API key here](https://platform.openai.com/settings/organization/api-keys).
 
 ### Vision
 
-With a hosted image:
+With an image URL:
 
 ```python
-response = client.chat.completions.create(
+prompt = "What is in this image?"
+img_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/d5/2023_06_08_Raccoon1.jpg/1599px-2023_06_08_Raccoon1.jpg"
+
+response = client.responses.create(
     model="gpt-4o-mini",
-    messages=[
+    input=[
         {
             "role": "user",
             "content": [
-                {"type": "text", "text": prompt},
-                {
-                    "type": "image_url",
-                    "image_url": {"url": f"{img_url}"},
-                },
+                {"type": "input_text", "text": prompt},
+                {"type": "input_image", "image_url": f"{img_url}"},
             ],
         }
     ],
@@ -75,73 +95,29 @@ response = client.chat.completions.create(
 With the image as a base64 encoded string:
 
 ```python
-response = client.chat.completions.create(
+import base64
+from openai import OpenAI
+
+client = OpenAI()
+
+prompt = "What is in this image?"
+with open("path/to/image.png", "rb") as image_file:
+    b64_image = base64.b64encode(image_file.read()).decode("utf-8")
+
+response = client.responses.create(
     model="gpt-4o-mini",
-    messages=[
+    input=[
         {
             "role": "user",
             "content": [
-                {"type": "text", "text": prompt},
-                {
-                    "type": "image_url",
-                    "image_url": {"url": f"data:{img_type};base64,{img_b64_str}"},
-                },
+                {"type": "input_text", "text": prompt},
+                {"type": "input_image", "image_url": f"data:image/png;base64,{b64_image}"},
             ],
         }
     ],
 )
 ```
 
-### Polling Helpers
-
-When interacting with the API some actions such as starting a Run and adding files to vector stores are asynchronous and take time to complete. The SDK includes
-helper functions which will poll the status until it reaches a terminal state and then return the resulting object.
-If an API method results in an action that could benefit from polling there will be a corresponding version of the
-method ending in '\_and_poll'.
-
-For instance to create a Run and poll until it reaches a terminal state you can run:
-
-```python
-run = client.beta.threads.runs.create_and_poll(
-    thread_id=thread.id,
-    assistant_id=assistant.id,
-)
-```
-
-More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/how-it-works/run-lifecycle)
-
-### Bulk Upload Helpers
-
-When creating and interacting with vector stores, you can use polling helpers to monitor the status of operations.
-For convenience, we also provide a bulk upload helper to allow you to simultaneously upload several files at once.
-
-```python
-sample_files = [Path("sample-paper.pdf"), ...]
-
-batch = await client.vector_stores.file_batches.upload_and_poll(
-    store.id,
-    files=sample_files,
-)
-```
-
-### Streaming Helpers
-
-The SDK also includes helpers to process streams and handle incoming events.
-
-```python
-with client.beta.threads.runs.stream(
-    thread_id=thread.id,
-    assistant_id=assistant.id,
-    instructions="Please address the user as Jane Doe. The user has a premium account.",
-) as stream:
-    for event in stream:
-        # Print the text from text delta events
-        if event.type == "thread.message.delta" and event.data.delta.content:
-            print(event.data.delta.content[0].text)
-```
-
-More information on streaming helpers can be found in the dedicated documentation: [helpers.md](helpers.md)
-
 ## Async usage
 
 Simply import `AsyncOpenAI` instead of `OpenAI` and use `await` with each API call:
@@ -152,20 +128,16 @@ import asyncio
 from openai import AsyncOpenAI
 
 client = AsyncOpenAI(
-    api_key=os.environ.get("OPENAI_API_KEY"),  # This is the default and can be omitted
+    # This is the default and can be omitted
+    api_key=os.environ.get("OPENAI_API_KEY"),
 )
 
 
 async def main() -> None:
-    chat_completion = await client.chat.completions.create(
-        messages=[
-            {
-                "role": "user",
-                "content": "Say this is a test",
-            }
-        ],
-        model="gpt-4o",
+    response = await client.responses.create(
+        model="gpt-4o", input="Explain disestablishmentarianism to a smart five year old."
     )
+    print(response.output_text)
 
 
 asyncio.run(main())
@@ -182,18 +154,14 @@ from openai import OpenAI
 
 client = OpenAI()
 
-stream = client.chat.completions.create(
-    messages=[
-        {
-            "role": "user",
-            "content": "Say this is a test",
-        }
-    ],
+stream = client.responses.create(
     model="gpt-4o",
+    input="Write a one-sentence bedtime story about a unicorn.",
     stream=True,
 )
-for chunk in stream:
-    print(chunk.choices[0].delta.content or "", end="")
+
+for event in stream:
+    print(event)
 ```
 
 The async client uses the exact same interface.
@@ -206,58 +174,19 @@ client = AsyncOpenAI()
 
 
 async def main():
-    stream = await client.chat.completions.create(
-        model="gpt-4",
-        messages=[{"role": "user", "content": "Say this is a test"}],
+    stream = client.responses.create(
+        model="gpt-4o",
+        input="Write a one-sentence bedtime story about a unicorn.",
         stream=True,
     )
-    async for chunk in stream:
-        print(chunk.choices[0].delta.content or "", end="")
-
-
-asyncio.run(main())
-```
-
-## Module-level client
-
-> [!IMPORTANT]
-> We highly recommend instantiating client instances instead of relying on the global client.
 
-We also expose a global client instance that is accessible in a similar fashion to versions prior to v1.
-
-```py
-import openai
-
-# optional; defaults to `os.environ['OPENAI_API_KEY']`
-openai.api_key = '...'
+    for event in stream:
+        print(event)
 
-# all client options can be configured just like the `OpenAI` instantiation counterpart
-openai.base_url = "https://..."
-openai.default_headers = {"x-foo": "true"}
 
-completion = openai.chat.completions.create(
-    model="gpt-4o",
-    messages=[
-        {
-            "role": "user",
-            "content": "How do I output all files in a directory using Python?",
-        },
-    ],
-)
-print(completion.choices[0].message.content)
+asyncio.run(main())
 ```
 
-The API is the exact same as the standard client instance-based API.
-
-This is intended to be used within REPLs or notebooks for faster iteration, **not** in application code.
-
-We recommend that you always instantiate a client (e.g., with `client = OpenAI()`) in application code because:
-
-- It can be difficult to reason about where client options are configured
-- It's not possible to change certain client options without potentially causing race conditions
-- It's harder to mock for testing purposes
-- It's not possible to control cleanup of network connections
-
 ## Realtime API beta
 
 The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as [function calling](https://platform.openai.com/docs/guides/function-calling) through a WebSocket connection.
@@ -304,7 +233,7 @@ However the real magic of the Realtime API is handling audio inputs / outputs, s
 
 ### Realtime error handling
 
-Whenever an error occurs, the Realtime API will send an [`error` event](https://platform.openai.com/docs/guides/realtime-model-capabilities#error-handling) and the connection will stay open and remain usable. This means you need to handle it yourself, as *no errors are raised directly* by the SDK when an `error` event comes in.
+Whenever an error occurs, the Realtime API will send an [`error` event](https://platform.openai.com/docs/guides/realtime-model-capabilities#error-handling) and the connection will stay open and remain usable. This means you need to handle it yourself, as _no errors are raised directly_ by the SDK when an `error` event comes in.
 
 ```py
 client = AsyncOpenAI()
@@ -408,11 +337,11 @@ from openai import OpenAI
 
 client = OpenAI()
 
-completion = client.chat.completions.create(
-    messages=[
+response = client.chat.responses.create(
+    input=[
         {
             "role": "user",
-            "content": "Can you generate an example json object describing a fruit?",
+            "content": "How much ?",
         }
     ],
     model="gpt-4o",
@@ -489,15 +418,16 @@ Error codes are as follows:
 All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI.
 
 ```python
-completion = await client.chat.completions.create(
-    messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-4"
+response = await client.responses.create(
+    model="gpt-4o-mini",
+    input="Say 'this is a test'.",
 )
-print(completion._request_id)  # req_123
+print(response._request_id)  # req_123
 ```
 
 Note that unlike other properties that use an `_` prefix, the `_request_id` property
-*is* public. Unless documented otherwise, *all* other `_` prefix properties,
-methods and modules are *private*.
+_is_ public. Unless documented otherwise, _all_ other `_` prefix properties,
+methods and modules are _private_.
 
 > [!IMPORTANT]  
 > If you need to access request IDs for failed requests you must catch the `APIStatusError` exception
@@ -514,8 +444,7 @@ except openai.APIStatusError as exc:
     raise exc
 ```
 
-
-### Retries
+## Retries
 
 Certain errors are automatically retried 2 times by default, with a short exponential backoff.
 Connection errors (for example, due to a network connectivity problem), 408 Request Timeout, 409 Conflict,
@@ -544,7 +473,7 @@ client.with_options(max_retries=5).chat.completions.create(
 )
 ```
 
-### Timeouts
+## Timeouts
 
 By default requests time out after 10 minutes. You can configure this with a `timeout` option,
 which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object: