Commit a3b8e772
Changed files (50)
src
openai
resources
fine_tuning
types
fine_tuning
alpha
graders
tests
api_resources
fine_tuning
src/openai/resources/fine_tuning/alpha/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .alpha import (
+ Alpha,
+ AsyncAlpha,
+ AlphaWithRawResponse,
+ AsyncAlphaWithRawResponse,
+ AlphaWithStreamingResponse,
+ AsyncAlphaWithStreamingResponse,
+)
+from .graders import (
+ Graders,
+ AsyncGraders,
+ GradersWithRawResponse,
+ AsyncGradersWithRawResponse,
+ GradersWithStreamingResponse,
+ AsyncGradersWithStreamingResponse,
+)
+
+__all__ = [
+ "Graders",
+ "AsyncGraders",
+ "GradersWithRawResponse",
+ "AsyncGradersWithRawResponse",
+ "GradersWithStreamingResponse",
+ "AsyncGradersWithStreamingResponse",
+ "Alpha",
+ "AsyncAlpha",
+ "AlphaWithRawResponse",
+ "AsyncAlphaWithRawResponse",
+ "AlphaWithStreamingResponse",
+ "AsyncAlphaWithStreamingResponse",
+]
src/openai/resources/fine_tuning/alpha/alpha.py
@@ -0,0 +1,102 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .graders import (
+ Graders,
+ AsyncGraders,
+ GradersWithRawResponse,
+ AsyncGradersWithRawResponse,
+ GradersWithStreamingResponse,
+ AsyncGradersWithStreamingResponse,
+)
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["Alpha", "AsyncAlpha"]
+
+
+class Alpha(SyncAPIResource):
+ @cached_property
+ def graders(self) -> Graders:
+ return Graders(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AlphaWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AlphaWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AlphaWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AlphaWithStreamingResponse(self)
+
+
+class AsyncAlpha(AsyncAPIResource):
+ @cached_property
+ def graders(self) -> AsyncGraders:
+ return AsyncGraders(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncAlphaWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncAlphaWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncAlphaWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncAlphaWithStreamingResponse(self)
+
+
+class AlphaWithRawResponse:
+ def __init__(self, alpha: Alpha) -> None:
+ self._alpha = alpha
+
+ @cached_property
+ def graders(self) -> GradersWithRawResponse:
+ return GradersWithRawResponse(self._alpha.graders)
+
+
+class AsyncAlphaWithRawResponse:
+ def __init__(self, alpha: AsyncAlpha) -> None:
+ self._alpha = alpha
+
+ @cached_property
+ def graders(self) -> AsyncGradersWithRawResponse:
+ return AsyncGradersWithRawResponse(self._alpha.graders)
+
+
+class AlphaWithStreamingResponse:
+ def __init__(self, alpha: Alpha) -> None:
+ self._alpha = alpha
+
+ @cached_property
+ def graders(self) -> GradersWithStreamingResponse:
+ return GradersWithStreamingResponse(self._alpha.graders)
+
+
+class AsyncAlphaWithStreamingResponse:
+ def __init__(self, alpha: AsyncAlpha) -> None:
+ self._alpha = alpha
+
+ @cached_property
+ def graders(self) -> AsyncGradersWithStreamingResponse:
+ return AsyncGradersWithStreamingResponse(self._alpha.graders)
src/openai/resources/fine_tuning/alpha/graders.py
@@ -0,0 +1,272 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...._base_client import make_request_options
+from ....types.fine_tuning.alpha import grader_run_params, grader_validate_params
+from ....types.fine_tuning.alpha.grader_run_response import GraderRunResponse
+from ....types.fine_tuning.alpha.grader_validate_response import GraderValidateResponse
+
+__all__ = ["Graders", "AsyncGraders"]
+
+
+class Graders(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> GradersWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return GradersWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> GradersWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return GradersWithStreamingResponse(self)
+
+ def run(
+ self,
+ *,
+ grader: grader_run_params.Grader,
+ model_sample: str,
+ reference_answer: Union[str, Iterable[object], float, object],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> GraderRunResponse:
+ """
+ Run a grader.
+
+ Args:
+ grader: The grader used for the fine-tuning job.
+
+ model_sample: The model sample to be evaluated.
+
+ reference_answer: The reference answer for the evaluation.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/fine_tuning/alpha/graders/run",
+ body=maybe_transform(
+ {
+ "grader": grader,
+ "model_sample": model_sample,
+ "reference_answer": reference_answer,
+ },
+ grader_run_params.GraderRunParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=GraderRunResponse,
+ )
+
+ def validate(
+ self,
+ *,
+ grader: grader_validate_params.Grader,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> GraderValidateResponse:
+ """
+ Validate a grader.
+
+ Args:
+ grader: The grader used for the fine-tuning job.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/fine_tuning/alpha/graders/validate",
+ body=maybe_transform({"grader": grader}, grader_validate_params.GraderValidateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=GraderValidateResponse,
+ )
+
+
+class AsyncGraders(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncGradersWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncGradersWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncGradersWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncGradersWithStreamingResponse(self)
+
+ async def run(
+ self,
+ *,
+ grader: grader_run_params.Grader,
+ model_sample: str,
+ reference_answer: Union[str, Iterable[object], float, object],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> GraderRunResponse:
+ """
+ Run a grader.
+
+ Args:
+ grader: The grader used for the fine-tuning job.
+
+ model_sample: The model sample to be evaluated.
+
+ reference_answer: The reference answer for the evaluation.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/fine_tuning/alpha/graders/run",
+ body=await async_maybe_transform(
+ {
+ "grader": grader,
+ "model_sample": model_sample,
+ "reference_answer": reference_answer,
+ },
+ grader_run_params.GraderRunParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=GraderRunResponse,
+ )
+
+ async def validate(
+ self,
+ *,
+ grader: grader_validate_params.Grader,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> GraderValidateResponse:
+ """
+ Validate a grader.
+
+ Args:
+ grader: The grader used for the fine-tuning job.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/fine_tuning/alpha/graders/validate",
+ body=await async_maybe_transform({"grader": grader}, grader_validate_params.GraderValidateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=GraderValidateResponse,
+ )
+
+
+class GradersWithRawResponse:
+ def __init__(self, graders: Graders) -> None:
+ self._graders = graders
+
+ self.run = _legacy_response.to_raw_response_wrapper(
+ graders.run,
+ )
+ self.validate = _legacy_response.to_raw_response_wrapper(
+ graders.validate,
+ )
+
+
+class AsyncGradersWithRawResponse:
+ def __init__(self, graders: AsyncGraders) -> None:
+ self._graders = graders
+
+ self.run = _legacy_response.async_to_raw_response_wrapper(
+ graders.run,
+ )
+ self.validate = _legacy_response.async_to_raw_response_wrapper(
+ graders.validate,
+ )
+
+
+class GradersWithStreamingResponse:
+ def __init__(self, graders: Graders) -> None:
+ self._graders = graders
+
+ self.run = to_streamed_response_wrapper(
+ graders.run,
+ )
+ self.validate = to_streamed_response_wrapper(
+ graders.validate,
+ )
+
+
+class AsyncGradersWithStreamingResponse:
+ def __init__(self, graders: AsyncGraders) -> None:
+ self._graders = graders
+
+ self.run = async_to_streamed_response_wrapper(
+ graders.run,
+ )
+ self.validate = async_to_streamed_response_wrapper(
+ graders.validate,
+ )
src/openai/resources/fine_tuning/jobs/jobs.py
@@ -345,6 +345,72 @@ class Jobs(SyncAPIResource):
model=FineTuningJobEvent,
)
+ def pause(
+ self,
+ fine_tuning_job_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FineTuningJob:
+ """
+ Pause a fine-tune job.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not fine_tuning_job_id:
+ raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
+ return self._post(
+ f"/fine_tuning/jobs/{fine_tuning_job_id}/pause",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FineTuningJob,
+ )
+
+ def resume(
+ self,
+ fine_tuning_job_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FineTuningJob:
+ """
+ Resume a fine-tune job.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not fine_tuning_job_id:
+ raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
+ return self._post(
+ f"/fine_tuning/jobs/{fine_tuning_job_id}/resume",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FineTuningJob,
+ )
+
class AsyncJobs(AsyncAPIResource):
@cached_property
@@ -657,6 +723,72 @@ class AsyncJobs(AsyncAPIResource):
model=FineTuningJobEvent,
)
+ async def pause(
+ self,
+ fine_tuning_job_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FineTuningJob:
+ """
+ Pause a fine-tune job.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not fine_tuning_job_id:
+ raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
+ return await self._post(
+ f"/fine_tuning/jobs/{fine_tuning_job_id}/pause",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FineTuningJob,
+ )
+
+ async def resume(
+ self,
+ fine_tuning_job_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FineTuningJob:
+ """
+ Resume a fine-tune job.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not fine_tuning_job_id:
+ raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
+ return await self._post(
+ f"/fine_tuning/jobs/{fine_tuning_job_id}/resume",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FineTuningJob,
+ )
+
class JobsWithRawResponse:
def __init__(self, jobs: Jobs) -> None:
@@ -677,6 +809,12 @@ class JobsWithRawResponse:
self.list_events = _legacy_response.to_raw_response_wrapper(
jobs.list_events,
)
+ self.pause = _legacy_response.to_raw_response_wrapper(
+ jobs.pause,
+ )
+ self.resume = _legacy_response.to_raw_response_wrapper(
+ jobs.resume,
+ )
@cached_property
def checkpoints(self) -> CheckpointsWithRawResponse:
@@ -702,6 +840,12 @@ class AsyncJobsWithRawResponse:
self.list_events = _legacy_response.async_to_raw_response_wrapper(
jobs.list_events,
)
+ self.pause = _legacy_response.async_to_raw_response_wrapper(
+ jobs.pause,
+ )
+ self.resume = _legacy_response.async_to_raw_response_wrapper(
+ jobs.resume,
+ )
@cached_property
def checkpoints(self) -> AsyncCheckpointsWithRawResponse:
@@ -727,6 +871,12 @@ class JobsWithStreamingResponse:
self.list_events = to_streamed_response_wrapper(
jobs.list_events,
)
+ self.pause = to_streamed_response_wrapper(
+ jobs.pause,
+ )
+ self.resume = to_streamed_response_wrapper(
+ jobs.resume,
+ )
@cached_property
def checkpoints(self) -> CheckpointsWithStreamingResponse:
@@ -752,6 +902,12 @@ class AsyncJobsWithStreamingResponse:
self.list_events = async_to_streamed_response_wrapper(
jobs.list_events,
)
+ self.pause = async_to_streamed_response_wrapper(
+ jobs.pause,
+ )
+ self.resume = async_to_streamed_response_wrapper(
+ jobs.resume,
+ )
@cached_property
def checkpoints(self) -> AsyncCheckpointsWithStreamingResponse:
src/openai/resources/fine_tuning/__init__.py
@@ -8,6 +8,14 @@ from .jobs import (
JobsWithStreamingResponse,
AsyncJobsWithStreamingResponse,
)
+from .alpha import (
+ Alpha,
+ AsyncAlpha,
+ AlphaWithRawResponse,
+ AsyncAlphaWithRawResponse,
+ AlphaWithStreamingResponse,
+ AsyncAlphaWithStreamingResponse,
+)
from .checkpoints import (
Checkpoints,
AsyncCheckpoints,
@@ -38,6 +46,12 @@ __all__ = [
"AsyncCheckpointsWithRawResponse",
"CheckpointsWithStreamingResponse",
"AsyncCheckpointsWithStreamingResponse",
+ "Alpha",
+ "AsyncAlpha",
+ "AlphaWithRawResponse",
+ "AsyncAlphaWithRawResponse",
+ "AlphaWithStreamingResponse",
+ "AsyncAlphaWithStreamingResponse",
"FineTuning",
"AsyncFineTuning",
"FineTuningWithRawResponse",
src/openai/resources/fine_tuning/fine_tuning.py
@@ -12,6 +12,14 @@ from .jobs.jobs import (
AsyncJobsWithStreamingResponse,
)
from ..._resource import SyncAPIResource, AsyncAPIResource
+from .alpha.alpha import (
+ Alpha,
+ AsyncAlpha,
+ AlphaWithRawResponse,
+ AsyncAlphaWithRawResponse,
+ AlphaWithStreamingResponse,
+ AsyncAlphaWithStreamingResponse,
+)
from .checkpoints.checkpoints import (
Checkpoints,
AsyncCheckpoints,
@@ -33,6 +41,10 @@ class FineTuning(SyncAPIResource):
def checkpoints(self) -> Checkpoints:
return Checkpoints(self._client)
+ @cached_property
+ def alpha(self) -> Alpha:
+ return Alpha(self._client)
+
@cached_property
def with_raw_response(self) -> FineTuningWithRawResponse:
"""
@@ -62,6 +74,10 @@ class AsyncFineTuning(AsyncAPIResource):
def checkpoints(self) -> AsyncCheckpoints:
return AsyncCheckpoints(self._client)
+ @cached_property
+ def alpha(self) -> AsyncAlpha:
+ return AsyncAlpha(self._client)
+
@cached_property
def with_raw_response(self) -> AsyncFineTuningWithRawResponse:
"""
@@ -94,6 +110,10 @@ class FineTuningWithRawResponse:
def checkpoints(self) -> CheckpointsWithRawResponse:
return CheckpointsWithRawResponse(self._fine_tuning.checkpoints)
+ @cached_property
+ def alpha(self) -> AlphaWithRawResponse:
+ return AlphaWithRawResponse(self._fine_tuning.alpha)
+
class AsyncFineTuningWithRawResponse:
def __init__(self, fine_tuning: AsyncFineTuning) -> None:
@@ -107,6 +127,10 @@ class AsyncFineTuningWithRawResponse:
def checkpoints(self) -> AsyncCheckpointsWithRawResponse:
return AsyncCheckpointsWithRawResponse(self._fine_tuning.checkpoints)
+ @cached_property
+ def alpha(self) -> AsyncAlphaWithRawResponse:
+ return AsyncAlphaWithRawResponse(self._fine_tuning.alpha)
+
class FineTuningWithStreamingResponse:
def __init__(self, fine_tuning: FineTuning) -> None:
@@ -120,6 +144,10 @@ class FineTuningWithStreamingResponse:
def checkpoints(self) -> CheckpointsWithStreamingResponse:
return CheckpointsWithStreamingResponse(self._fine_tuning.checkpoints)
+ @cached_property
+ def alpha(self) -> AlphaWithStreamingResponse:
+ return AlphaWithStreamingResponse(self._fine_tuning.alpha)
+
class AsyncFineTuningWithStreamingResponse:
def __init__(self, fine_tuning: AsyncFineTuning) -> None:
@@ -132,3 +160,7 @@ class AsyncFineTuningWithStreamingResponse:
@cached_property
def checkpoints(self) -> AsyncCheckpointsWithStreamingResponse:
return AsyncCheckpointsWithStreamingResponse(self._fine_tuning.checkpoints)
+
+ @cached_property
+ def alpha(self) -> AsyncAlphaWithStreamingResponse:
+ return AsyncAlphaWithStreamingResponse(self._fine_tuning.alpha)
src/openai/types/fine_tuning/alpha/__init__.py
@@ -0,0 +1,8 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .grader_run_params import GraderRunParams as GraderRunParams
+from .grader_run_response import GraderRunResponse as GraderRunResponse
+from .grader_validate_params import GraderValidateParams as GraderValidateParams
+from .grader_validate_response import GraderValidateResponse as GraderValidateResponse
src/openai/types/fine_tuning/alpha/grader_run_params.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Required, TypeAlias, TypedDict
+
+from ...graders.multi_grader_param import MultiGraderParam
+from ...graders.python_grader_param import PythonGraderParam
+from ...graders.score_model_grader_param import ScoreModelGraderParam
+from ...graders.string_check_grader_param import StringCheckGraderParam
+from ...graders.text_similarity_grader_param import TextSimilarityGraderParam
+
+__all__ = ["GraderRunParams", "Grader"]
+
+
+class GraderRunParams(TypedDict, total=False):
+ grader: Required[Grader]
+ """The grader used for the fine-tuning job."""
+
+ model_sample: Required[str]
+ """The model sample to be evaluated."""
+
+ reference_answer: Required[Union[str, Iterable[object], float, object]]
+ """The reference answer for the evaluation."""
+
+
+Grader: TypeAlias = Union[
+ StringCheckGraderParam, TextSimilarityGraderParam, PythonGraderParam, ScoreModelGraderParam, MultiGraderParam
+]
src/openai/types/fine_tuning/alpha/grader_run_response.py
@@ -0,0 +1,67 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Optional
+
+from pydantic import Field as FieldInfo
+
+from ...._models import BaseModel
+
+__all__ = ["GraderRunResponse", "Metadata", "MetadataErrors"]
+
+
+class MetadataErrors(BaseModel):
+ formula_parse_error: bool
+
+ invalid_variable_error: bool
+
+ api_model_grader_parse_error: bool = FieldInfo(alias="model_grader_parse_error")
+
+ api_model_grader_refusal_error: bool = FieldInfo(alias="model_grader_refusal_error")
+
+ api_model_grader_server_error: bool = FieldInfo(alias="model_grader_server_error")
+
+ api_model_grader_server_error_details: Optional[str] = FieldInfo(
+ alias="model_grader_server_error_details", default=None
+ )
+
+ other_error: bool
+
+ python_grader_runtime_error: bool
+
+ python_grader_runtime_error_details: Optional[str] = None
+
+ python_grader_server_error: bool
+
+ python_grader_server_error_type: Optional[str] = None
+
+ sample_parse_error: bool
+
+ truncated_observation_error: bool
+
+ unresponsive_reward_error: bool
+
+
+class Metadata(BaseModel):
+ errors: MetadataErrors
+
+ execution_time: float
+
+ name: str
+
+ sampled_model_name: Optional[str] = None
+
+ scores: Dict[str, object]
+
+ token_usage: Optional[int] = None
+
+ type: str
+
+
+class GraderRunResponse(BaseModel):
+ metadata: Metadata
+
+ api_model_grader_token_usage_per_model: Dict[str, object] = FieldInfo(alias="model_grader_token_usage_per_model")
+
+ reward: float
+
+ sub_rewards: Dict[str, object]
src/openai/types/fine_tuning/alpha/grader_validate_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Required, TypeAlias, TypedDict
+
+from ...graders.multi_grader_param import MultiGraderParam
+from ...graders.python_grader_param import PythonGraderParam
+from ...graders.score_model_grader_param import ScoreModelGraderParam
+from ...graders.string_check_grader_param import StringCheckGraderParam
+from ...graders.text_similarity_grader_param import TextSimilarityGraderParam
+
+__all__ = ["GraderValidateParams", "Grader"]
+
+
+class GraderValidateParams(TypedDict, total=False):
+ grader: Required[Grader]
+ """The grader used for the fine-tuning job."""
+
+
+Grader: TypeAlias = Union[
+ StringCheckGraderParam, TextSimilarityGraderParam, PythonGraderParam, ScoreModelGraderParam, MultiGraderParam
+]
src/openai/types/fine_tuning/alpha/grader_validate_response.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union, Optional
+from typing_extensions import TypeAlias
+
+from ...._models import BaseModel
+from ...graders.multi_grader import MultiGrader
+from ...graders.python_grader import PythonGrader
+from ...graders.score_model_grader import ScoreModelGrader
+from ...graders.string_check_grader import StringCheckGrader
+from ...graders.text_similarity_grader import TextSimilarityGrader
+
+__all__ = ["GraderValidateResponse", "Grader"]
+
+Grader: TypeAlias = Union[StringCheckGrader, TextSimilarityGrader, PythonGrader, ScoreModelGrader, MultiGrader]
+
+
+class GraderValidateResponse(BaseModel):
+ grader: Optional[Grader] = None
+ """The grader used for the fine-tuning job."""
src/openai/types/fine_tuning/__init__.py
@@ -2,13 +2,25 @@
from __future__ import annotations
+from .dpo_method import DpoMethod as DpoMethod
from .fine_tuning_job import FineTuningJob as FineTuningJob
from .job_list_params import JobListParams as JobListParams
+from .dpo_method_param import DpoMethodParam as DpoMethodParam
from .job_create_params import JobCreateParams as JobCreateParams
+from .supervised_method import SupervisedMethod as SupervisedMethod
+from .dpo_hyperparameters import DpoHyperparameters as DpoHyperparameters
+from .reinforcement_method import ReinforcementMethod as ReinforcementMethod
from .fine_tuning_job_event import FineTuningJobEvent as FineTuningJobEvent
from .job_list_events_params import JobListEventsParams as JobListEventsParams
+from .supervised_method_param import SupervisedMethodParam as SupervisedMethodParam
+from .dpo_hyperparameters_param import DpoHyperparametersParam as DpoHyperparametersParam
+from .reinforcement_method_param import ReinforcementMethodParam as ReinforcementMethodParam
+from .supervised_hyperparameters import SupervisedHyperparameters as SupervisedHyperparameters
from .fine_tuning_job_integration import FineTuningJobIntegration as FineTuningJobIntegration
+from .reinforcement_hyperparameters import ReinforcementHyperparameters as ReinforcementHyperparameters
+from .supervised_hyperparameters_param import SupervisedHyperparametersParam as SupervisedHyperparametersParam
from .fine_tuning_job_wandb_integration import FineTuningJobWandbIntegration as FineTuningJobWandbIntegration
+from .reinforcement_hyperparameters_param import ReinforcementHyperparametersParam as ReinforcementHyperparametersParam
from .fine_tuning_job_wandb_integration_object import (
FineTuningJobWandbIntegrationObject as FineTuningJobWandbIntegrationObject,
)
src/openai/types/fine_tuning/dpo_hyperparameters.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["DpoHyperparameters"]
+
+
+class DpoHyperparameters(BaseModel):
+ batch_size: Union[Literal["auto"], int, None] = None
+ """Number of examples in each batch.
+
+ A larger batch size means that model parameters are updated less frequently, but
+ with lower variance.
+ """
+
+ beta: Union[Literal["auto"], float, None] = None
+ """The beta value for the DPO method.
+
+ A higher beta value will increase the weight of the penalty between the policy
+ and reference model.
+ """
+
+ learning_rate_multiplier: Union[Literal["auto"], float, None] = None
+ """Scaling factor for the learning rate.
+
+ A smaller learning rate may be useful to avoid overfitting.
+ """
+
+ n_epochs: Union[Literal["auto"], int, None] = None
+ """The number of epochs to train the model for.
+
+ An epoch refers to one full cycle through the training dataset.
+ """
src/openai/types/fine_tuning/dpo_hyperparameters_param.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["DpoHyperparametersParam"]
+
+
+class DpoHyperparametersParam(TypedDict, total=False):
+ batch_size: Union[Literal["auto"], int]
+ """Number of examples in each batch.
+
+ A larger batch size means that model parameters are updated less frequently, but
+ with lower variance.
+ """
+
+ beta: Union[Literal["auto"], float]
+ """The beta value for the DPO method.
+
+ A higher beta value will increase the weight of the penalty between the policy
+ and reference model.
+ """
+
+ learning_rate_multiplier: Union[Literal["auto"], float]
+ """Scaling factor for the learning rate.
+
+ A smaller learning rate may be useful to avoid overfitting.
+ """
+
+ n_epochs: Union[Literal["auto"], int]
+ """The number of epochs to train the model for.
+
+ An epoch refers to one full cycle through the training dataset.
+ """
src/openai/types/fine_tuning/dpo_method.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .dpo_hyperparameters import DpoHyperparameters
+
+__all__ = ["DpoMethod"]
+
+
+class DpoMethod(BaseModel):
+ hyperparameters: Optional[DpoHyperparameters] = None
+ """The hyperparameters used for the DPO fine-tuning job."""
src/openai/types/fine_tuning/dpo_method_param.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+from .dpo_hyperparameters_param import DpoHyperparametersParam
+
+__all__ = ["DpoMethodParam"]
+
+
+class DpoMethodParam(TypedDict, total=False):
+ hyperparameters: DpoHyperparametersParam
+ """The hyperparameters used for the DPO fine-tuning job."""
src/openai/types/fine_tuning/fine_tuning_job.py
@@ -4,19 +4,13 @@ from typing import List, Union, Optional
from typing_extensions import Literal
from ..._models import BaseModel
+from .dpo_method import DpoMethod
from ..shared.metadata import Metadata
+from .supervised_method import SupervisedMethod
+from .reinforcement_method import ReinforcementMethod
from .fine_tuning_job_wandb_integration_object import FineTuningJobWandbIntegrationObject
-__all__ = [
- "FineTuningJob",
- "Error",
- "Hyperparameters",
- "Method",
- "MethodDpo",
- "MethodDpoHyperparameters",
- "MethodSupervised",
- "MethodSupervisedHyperparameters",
-]
+__all__ = ["FineTuningJob", "Error", "Hyperparameters", "Method"]
class Error(BaseModel):
@@ -54,74 +48,18 @@ class Hyperparameters(BaseModel):
"""
-class MethodDpoHyperparameters(BaseModel):
- batch_size: Union[Literal["auto"], int, None] = None
- """Number of examples in each batch.
-
- A larger batch size means that model parameters are updated less frequently, but
- with lower variance.
- """
-
- beta: Union[Literal["auto"], float, None] = None
- """The beta value for the DPO method.
-
- A higher beta value will increase the weight of the penalty between the policy
- and reference model.
- """
-
- learning_rate_multiplier: Union[Literal["auto"], float, None] = None
- """Scaling factor for the learning rate.
-
- A smaller learning rate may be useful to avoid overfitting.
- """
-
- n_epochs: Union[Literal["auto"], int, None] = None
- """The number of epochs to train the model for.
-
- An epoch refers to one full cycle through the training dataset.
- """
-
-
-class MethodDpo(BaseModel):
- hyperparameters: Optional[MethodDpoHyperparameters] = None
- """The hyperparameters used for the fine-tuning job."""
-
-
-class MethodSupervisedHyperparameters(BaseModel):
- batch_size: Union[Literal["auto"], int, None] = None
- """Number of examples in each batch.
-
- A larger batch size means that model parameters are updated less frequently, but
- with lower variance.
- """
-
- learning_rate_multiplier: Union[Literal["auto"], float, None] = None
- """Scaling factor for the learning rate.
-
- A smaller learning rate may be useful to avoid overfitting.
- """
-
- n_epochs: Union[Literal["auto"], int, None] = None
- """The number of epochs to train the model for.
-
- An epoch refers to one full cycle through the training dataset.
- """
-
-
-class MethodSupervised(BaseModel):
- hyperparameters: Optional[MethodSupervisedHyperparameters] = None
- """The hyperparameters used for the fine-tuning job."""
-
-
class Method(BaseModel):
- dpo: Optional[MethodDpo] = None
+ type: Literal["supervised", "dpo", "reinforcement"]
+ """The type of method. Is either `supervised`, `dpo`, or `reinforcement`."""
+
+ dpo: Optional[DpoMethod] = None
"""Configuration for the DPO fine-tuning method."""
- supervised: Optional[MethodSupervised] = None
- """Configuration for the supervised fine-tuning method."""
+ reinforcement: Optional[ReinforcementMethod] = None
+ """Configuration for the reinforcement fine-tuning method."""
- type: Optional[Literal["supervised", "dpo"]] = None
- """The type of method. Is either `supervised` or `dpo`."""
+ supervised: Optional[SupervisedMethod] = None
+ """Configuration for the supervised fine-tuning method."""
class FineTuningJob(BaseModel):
src/openai/types/fine_tuning/job_create_params.py
@@ -5,19 +5,12 @@ from __future__ import annotations
from typing import List, Union, Iterable, Optional
from typing_extensions import Literal, Required, TypedDict
+from .dpo_method_param import DpoMethodParam
from ..shared_params.metadata import Metadata
+from .supervised_method_param import SupervisedMethodParam
+from .reinforcement_method_param import ReinforcementMethodParam
-__all__ = [
- "JobCreateParams",
- "Hyperparameters",
- "Integration",
- "IntegrationWandb",
- "Method",
- "MethodDpo",
- "MethodDpoHyperparameters",
- "MethodSupervised",
- "MethodSupervisedHyperparameters",
-]
+__all__ = ["JobCreateParams", "Hyperparameters", "Integration", "IntegrationWandb", "Method"]
class JobCreateParams(TypedDict, total=False):
@@ -166,71 +159,15 @@ class Integration(TypedDict, total=False):
"""
-class MethodDpoHyperparameters(TypedDict, total=False):
- batch_size: Union[Literal["auto"], int]
- """Number of examples in each batch.
-
- A larger batch size means that model parameters are updated less frequently, but
- with lower variance.
- """
-
- beta: Union[Literal["auto"], float]
- """The beta value for the DPO method.
-
- A higher beta value will increase the weight of the penalty between the policy
- and reference model.
- """
-
- learning_rate_multiplier: Union[Literal["auto"], float]
- """Scaling factor for the learning rate.
-
- A smaller learning rate may be useful to avoid overfitting.
- """
-
- n_epochs: Union[Literal["auto"], int]
- """The number of epochs to train the model for.
-
- An epoch refers to one full cycle through the training dataset.
- """
-
-
-class MethodDpo(TypedDict, total=False):
- hyperparameters: MethodDpoHyperparameters
- """The hyperparameters used for the fine-tuning job."""
-
-
-class MethodSupervisedHyperparameters(TypedDict, total=False):
- batch_size: Union[Literal["auto"], int]
- """Number of examples in each batch.
-
- A larger batch size means that model parameters are updated less frequently, but
- with lower variance.
- """
-
- learning_rate_multiplier: Union[Literal["auto"], float]
- """Scaling factor for the learning rate.
-
- A smaller learning rate may be useful to avoid overfitting.
- """
-
- n_epochs: Union[Literal["auto"], int]
- """The number of epochs to train the model for.
-
- An epoch refers to one full cycle through the training dataset.
- """
-
-
-class MethodSupervised(TypedDict, total=False):
- hyperparameters: MethodSupervisedHyperparameters
- """The hyperparameters used for the fine-tuning job."""
-
-
class Method(TypedDict, total=False):
- dpo: MethodDpo
+ type: Required[Literal["supervised", "dpo", "reinforcement"]]
+ """The type of method. Is either `supervised`, `dpo`, or `reinforcement`."""
+
+ dpo: DpoMethodParam
"""Configuration for the DPO fine-tuning method."""
- supervised: MethodSupervised
- """Configuration for the supervised fine-tuning method."""
+ reinforcement: ReinforcementMethodParam
+ """Configuration for the reinforcement fine-tuning method."""
- type: Literal["supervised", "dpo"]
- """The type of method. Is either `supervised` or `dpo`."""
+ supervised: SupervisedMethodParam
+ """Configuration for the supervised fine-tuning method."""
src/openai/types/fine_tuning/reinforcement_hyperparameters.py
@@ -0,0 +1,43 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ReinforcementHyperparameters"]
+
+
+class ReinforcementHyperparameters(BaseModel):
+ batch_size: Union[Literal["auto"], int, None] = None
+ """Number of examples in each batch.
+
+ A larger batch size means that model parameters are updated less frequently, but
+ with lower variance.
+ """
+
+ compute_multiplier: Union[Literal["auto"], float, None] = None
+ """
+ Multiplier on amount of compute used for exploring search space during training.
+ """
+
+ eval_interval: Union[Literal["auto"], int, None] = None
+ """The number of training steps between evaluation runs."""
+
+ eval_samples: Union[Literal["auto"], int, None] = None
+ """Number of evaluation samples to generate per training step."""
+
+ learning_rate_multiplier: Union[Literal["auto"], float, None] = None
+ """Scaling factor for the learning rate.
+
+ A smaller learning rate may be useful to avoid overfitting.
+ """
+
+ n_epochs: Union[Literal["auto"], int, None] = None
+ """The number of epochs to train the model for.
+
+ An epoch refers to one full cycle through the training dataset.
+ """
+
+ reasoning_effort: Optional[Literal["default", "low", "medium", "high"]] = None
+ """Level of reasoning effort."""
src/openai/types/fine_tuning/reinforcement_hyperparameters_param.py
@@ -0,0 +1,43 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["ReinforcementHyperparametersParam"]
+
+
+class ReinforcementHyperparametersParam(TypedDict, total=False):
+ batch_size: Union[Literal["auto"], int]
+ """Number of examples in each batch.
+
+ A larger batch size means that model parameters are updated less frequently, but
+ with lower variance.
+ """
+
+ compute_multiplier: Union[Literal["auto"], float]
+ """
+ Multiplier on amount of compute used for exploring search space during training.
+ """
+
+ eval_interval: Union[Literal["auto"], int]
+ """The number of training steps between evaluation runs."""
+
+ eval_samples: Union[Literal["auto"], int]
+ """Number of evaluation samples to generate per training step."""
+
+ learning_rate_multiplier: Union[Literal["auto"], float]
+ """Scaling factor for the learning rate.
+
+ A smaller learning rate may be useful to avoid overfitting.
+ """
+
+ n_epochs: Union[Literal["auto"], int]
+ """The number of epochs to train the model for.
+
+ An epoch refers to one full cycle through the training dataset.
+ """
+
+ reasoning_effort: Literal["default", "low", "medium", "high"]
+ """Level of reasoning effort."""
src/openai/types/fine_tuning/reinforcement_method.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union, Optional
+from typing_extensions import TypeAlias
+
+from ..._models import BaseModel
+from ..graders.multi_grader import MultiGrader
+from ..graders.python_grader import PythonGrader
+from ..graders.score_model_grader import ScoreModelGrader
+from ..graders.string_check_grader import StringCheckGrader
+from .reinforcement_hyperparameters import ReinforcementHyperparameters
+from ..graders.text_similarity_grader import TextSimilarityGrader
+
+__all__ = ["ReinforcementMethod", "Grader"]
+
+Grader: TypeAlias = Union[StringCheckGrader, TextSimilarityGrader, PythonGrader, ScoreModelGrader, MultiGrader]
+
+
+class ReinforcementMethod(BaseModel):
+ grader: Grader
+ """The grader used for the fine-tuning job."""
+
+ hyperparameters: Optional[ReinforcementHyperparameters] = None
+ """The hyperparameters used for the reinforcement fine-tuning job."""
src/openai/types/fine_tuning/reinforcement_method_param.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Required, TypeAlias, TypedDict
+
+from ..graders.multi_grader_param import MultiGraderParam
+from ..graders.python_grader_param import PythonGraderParam
+from ..graders.score_model_grader_param import ScoreModelGraderParam
+from ..graders.string_check_grader_param import StringCheckGraderParam
+from .reinforcement_hyperparameters_param import ReinforcementHyperparametersParam
+from ..graders.text_similarity_grader_param import TextSimilarityGraderParam
+
+__all__ = ["ReinforcementMethodParam", "Grader"]
+
+Grader: TypeAlias = Union[
+ StringCheckGraderParam, TextSimilarityGraderParam, PythonGraderParam, ScoreModelGraderParam, MultiGraderParam
+]
+
+
+class ReinforcementMethodParam(TypedDict, total=False):
+ grader: Required[Grader]
+ """The grader used for the fine-tuning job."""
+
+ hyperparameters: ReinforcementHyperparametersParam
+ """The hyperparameters used for the reinforcement fine-tuning job."""
src/openai/types/fine_tuning/supervised_hyperparameters.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["SupervisedHyperparameters"]
+
+
+class SupervisedHyperparameters(BaseModel):
+ batch_size: Union[Literal["auto"], int, None] = None
+ """Number of examples in each batch.
+
+ A larger batch size means that model parameters are updated less frequently, but
+ with lower variance.
+ """
+
+ learning_rate_multiplier: Union[Literal["auto"], float, None] = None
+ """Scaling factor for the learning rate.
+
+ A smaller learning rate may be useful to avoid overfitting.
+ """
+
+ n_epochs: Union[Literal["auto"], int, None] = None
+ """The number of epochs to train the model for.
+
+ An epoch refers to one full cycle through the training dataset.
+ """
src/openai/types/fine_tuning/supervised_hyperparameters_param.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["SupervisedHyperparametersParam"]
+
+
+class SupervisedHyperparametersParam(TypedDict, total=False):
+ batch_size: Union[Literal["auto"], int]
+ """Number of examples in each batch.
+
+ A larger batch size means that model parameters are updated less frequently, but
+ with lower variance.
+ """
+
+ learning_rate_multiplier: Union[Literal["auto"], float]
+ """Scaling factor for the learning rate.
+
+ A smaller learning rate may be useful to avoid overfitting.
+ """
+
+ n_epochs: Union[Literal["auto"], int]
+ """The number of epochs to train the model for.
+
+ An epoch refers to one full cycle through the training dataset.
+ """
src/openai/types/fine_tuning/supervised_method.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .supervised_hyperparameters import SupervisedHyperparameters
+
+__all__ = ["SupervisedMethod"]
+
+
+class SupervisedMethod(BaseModel):
+ hyperparameters: Optional[SupervisedHyperparameters] = None
+ """The hyperparameters used for the fine-tuning job."""
src/openai/types/fine_tuning/supervised_method_param.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+from .supervised_hyperparameters_param import SupervisedHyperparametersParam
+
+__all__ = ["SupervisedMethodParam"]
+
+
+class SupervisedMethodParam(TypedDict, total=False):
+ hyperparameters: SupervisedHyperparametersParam
+ """The hyperparameters used for the fine-tuning job."""
src/openai/types/graders/__init__.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .multi_grader import MultiGrader as MultiGrader
+from .python_grader import PythonGrader as PythonGrader
+from .label_model_grader import LabelModelGrader as LabelModelGrader
+from .multi_grader_param import MultiGraderParam as MultiGraderParam
+from .score_model_grader import ScoreModelGrader as ScoreModelGrader
+from .python_grader_param import PythonGraderParam as PythonGraderParam
+from .string_check_grader import StringCheckGrader as StringCheckGrader
+from .text_similarity_grader import TextSimilarityGrader as TextSimilarityGrader
+from .label_model_grader_param import LabelModelGraderParam as LabelModelGraderParam
+from .score_model_grader_param import ScoreModelGraderParam as ScoreModelGraderParam
+from .string_check_grader_param import StringCheckGraderParam as StringCheckGraderParam
+from .text_similarity_grader_param import TextSimilarityGraderParam as TextSimilarityGraderParam
src/openai/types/eval_label_model_grader.py โ src/openai/types/graders/label_model_grader.py
@@ -3,10 +3,10 @@
from typing import List, Union, Optional
from typing_extensions import Literal, TypeAlias
-from .._models import BaseModel
-from .responses.response_input_text import ResponseInputText
+from ..._models import BaseModel
+from ..responses.response_input_text import ResponseInputText
-__all__ = ["EvalLabelModelGrader", "Input", "InputContent", "InputContentOutputText"]
+__all__ = ["LabelModelGrader", "Input", "InputContent", "InputContentOutputText"]
class InputContentOutputText(BaseModel):
@@ -34,7 +34,7 @@ class Input(BaseModel):
"""The type of the message input. Always `message`."""
-class EvalLabelModelGrader(BaseModel):
+class LabelModelGrader(BaseModel):
input: List[Input]
labels: List[str]
src/openai/types/graders/label_model_grader_param.py
@@ -0,0 +1,54 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..responses.response_input_text_param import ResponseInputTextParam
+
+__all__ = ["LabelModelGraderParam", "Input", "InputContent", "InputContentOutputText"]
+
+
+class InputContentOutputText(TypedDict, total=False):
+ text: Required[str]
+ """The text output from the model."""
+
+ type: Required[Literal["output_text"]]
+ """The type of the output text. Always `output_text`."""
+
+
+InputContent: TypeAlias = Union[str, ResponseInputTextParam, InputContentOutputText]
+
+
+class Input(TypedDict, total=False):
+ content: Required[InputContent]
+ """Text inputs to the model - can contain template strings."""
+
+ role: Required[Literal["user", "assistant", "system", "developer"]]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Literal["message"]
+ """The type of the message input. Always `message`."""
+
+
+class LabelModelGraderParam(TypedDict, total=False):
+ input: Required[Iterable[Input]]
+
+ labels: Required[List[str]]
+ """The labels to assign to each item in the evaluation."""
+
+ model: Required[str]
+ """The model to use for the evaluation. Must support structured outputs."""
+
+ name: Required[str]
+ """The name of the grader."""
+
+ passing_labels: Required[List[str]]
+ """The labels that indicate a passing result. Must be a subset of labels."""
+
+ type: Required[Literal["label_model"]]
+ """The object type, which is always `label_model`."""
src/openai/types/graders/multi_grader.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Union
+from typing_extensions import Literal, TypeAlias
+
+from ..._models import BaseModel
+from .python_grader import PythonGrader
+from .label_model_grader import LabelModelGrader
+from .score_model_grader import ScoreModelGrader
+from .string_check_grader import StringCheckGrader
+from .text_similarity_grader import TextSimilarityGrader
+
+__all__ = ["MultiGrader", "Graders"]
+
+Graders: TypeAlias = Union[StringCheckGrader, TextSimilarityGrader, PythonGrader, ScoreModelGrader, LabelModelGrader]
+
+
+class MultiGrader(BaseModel):
+ calculate_output: str
+ """A formula to calculate the output based on grader results."""
+
+ graders: Dict[str, Graders]
+
+ name: str
+ """The name of the grader."""
+
+ type: Literal["multi"]
+ """The type of grader."""
src/openai/types/graders/multi_grader_param.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .python_grader_param import PythonGraderParam
+from .label_model_grader_param import LabelModelGraderParam
+from .score_model_grader_param import ScoreModelGraderParam
+from .string_check_grader_param import StringCheckGraderParam
+from .text_similarity_grader_param import TextSimilarityGraderParam
+
+__all__ = ["MultiGraderParam", "Graders"]
+
+Graders: TypeAlias = Union[
+ StringCheckGraderParam, TextSimilarityGraderParam, PythonGraderParam, ScoreModelGraderParam, LabelModelGraderParam
+]
+
+
+class MultiGraderParam(TypedDict, total=False):
+ calculate_output: Required[str]
+ """A formula to calculate the output based on grader results."""
+
+ graders: Required[Dict[str, Graders]]
+
+ name: Required[str]
+ """The name of the grader."""
+
+ type: Required[Literal["multi"]]
+ """The type of grader."""
src/openai/types/graders/python_grader.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["PythonGrader"]
+
+
+class PythonGrader(BaseModel):
+ name: str
+ """The name of the grader."""
+
+ source: str
+ """The source code of the python script."""
+
+ type: Literal["python"]
+ """The object type, which is always `python`."""
+
+ image_tag: Optional[str] = None
+ """The image tag to use for the python script."""
src/openai/types/graders/python_grader_param.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["PythonGraderParam"]
+
+
+class PythonGraderParam(TypedDict, total=False):
+ name: Required[str]
+ """The name of the grader."""
+
+ source: Required[str]
+ """The source code of the python script."""
+
+ type: Required[Literal["python"]]
+ """The object type, which is always `python`."""
+
+ image_tag: str
+ """The image tag to use for the python script."""
src/openai/types/graders/score_model_grader.py
@@ -0,0 +1,54 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, TypeAlias
+
+from ..._models import BaseModel
+from ..responses.response_input_text import ResponseInputText
+
+__all__ = ["ScoreModelGrader", "Input", "InputContent", "InputContentOutputText"]
+
+
+class InputContentOutputText(BaseModel):
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
+
+
+InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText]
+
+
+class Input(BaseModel):
+ content: InputContent
+ """Text inputs to the model - can contain template strings."""
+
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
+
+
+class ScoreModelGrader(BaseModel):
+ input: List[Input]
+ """The input text. This may include template strings."""
+
+ model: str
+ """The model to use for the evaluation."""
+
+ name: str
+ """The name of the grader."""
+
+ type: Literal["score_model"]
+ """The object type, which is always `score_model`."""
+
+ range: Optional[List[float]] = None
+ """The range of the score. Defaults to `[0, 1]`."""
+
+ sampling_params: Optional[object] = None
+ """The sampling parameters for the model."""
src/openai/types/graders/score_model_grader_param.py
@@ -0,0 +1,55 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..responses.response_input_text_param import ResponseInputTextParam
+
+__all__ = ["ScoreModelGraderParam", "Input", "InputContent", "InputContentOutputText"]
+
+
+class InputContentOutputText(TypedDict, total=False):
+ text: Required[str]
+ """The text output from the model."""
+
+ type: Required[Literal["output_text"]]
+ """The type of the output text. Always `output_text`."""
+
+
+InputContent: TypeAlias = Union[str, ResponseInputTextParam, InputContentOutputText]
+
+
+class Input(TypedDict, total=False):
+ content: Required[InputContent]
+ """Text inputs to the model - can contain template strings."""
+
+ role: Required[Literal["user", "assistant", "system", "developer"]]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Literal["message"]
+ """The type of the message input. Always `message`."""
+
+
+class ScoreModelGraderParam(TypedDict, total=False):
+ input: Required[Iterable[Input]]
+ """The input text. This may include template strings."""
+
+ model: Required[str]
+ """The model to use for the evaluation."""
+
+ name: Required[str]
+ """The name of the grader."""
+
+ type: Required[Literal["score_model"]]
+ """The object type, which is always `score_model`."""
+
+ range: Iterable[float]
+ """The range of the score. Defaults to `[0, 1]`."""
+
+ sampling_params: object
+ """The sampling parameters for the model."""
src/openai/types/eval_string_check_grader.py โ src/openai/types/graders/string_check_grader.py
@@ -2,12 +2,12 @@
from typing_extensions import Literal
-from .._models import BaseModel
+from ..._models import BaseModel
-__all__ = ["EvalStringCheckGrader"]
+__all__ = ["StringCheckGrader"]
-class EvalStringCheckGrader(BaseModel):
+class StringCheckGrader(BaseModel):
input: str
"""The input text. This may include template strings."""
src/openai/types/eval_string_check_grader_param.py โ src/openai/types/graders/string_check_grader_param.py
@@ -4,10 +4,10 @@ from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
-__all__ = ["EvalStringCheckGraderParam"]
+__all__ = ["StringCheckGraderParam"]
-class EvalStringCheckGraderParam(TypedDict, total=False):
+class StringCheckGraderParam(TypedDict, total=False):
input: Required[str]
"""The input text. This may include template strings."""
src/openai/types/eval_text_similarity_grader.py โ src/openai/types/graders/text_similarity_grader.py
@@ -1,14 +1,13 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Optional
from typing_extensions import Literal
-from .._models import BaseModel
+from ..._models import BaseModel
-__all__ = ["EvalTextSimilarityGrader"]
+__all__ = ["TextSimilarityGrader"]
-class EvalTextSimilarityGrader(BaseModel):
+class TextSimilarityGrader(BaseModel):
evaluation_metric: Literal[
"fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l"
]
@@ -21,14 +20,11 @@ class EvalTextSimilarityGrader(BaseModel):
input: str
"""The text being graded."""
- pass_threshold: float
- """A float score where a value greater than or equal indicates a passing grade."""
+ name: str
+ """The name of the grader."""
reference: str
"""The text being graded against."""
type: Literal["text_similarity"]
"""The type of grader."""
-
- name: Optional[str] = None
- """The name of the grader."""
src/openai/types/eval_text_similarity_grader_param.py โ src/openai/types/graders/text_similarity_grader_param.py
@@ -4,10 +4,10 @@ from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
-__all__ = ["EvalTextSimilarityGraderParam"]
+__all__ = ["TextSimilarityGraderParam"]
-class EvalTextSimilarityGraderParam(TypedDict, total=False):
+class TextSimilarityGraderParam(TypedDict, total=False):
evaluation_metric: Required[
Literal[
"fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l"
@@ -22,14 +22,11 @@ class EvalTextSimilarityGraderParam(TypedDict, total=False):
input: Required[str]
"""The text being graded."""
- pass_threshold: Required[float]
- """A float score where a value greater than or equal indicates a passing grade."""
+ name: Required[str]
+ """The name of the grader."""
reference: Required[str]
"""The text being graded against."""
type: Required[Literal["text_similarity"]]
"""The type of grader."""
-
- name: str
- """The name of the grader."""
src/openai/types/__init__.py
@@ -61,9 +61,7 @@ from .eval_retrieve_response import EvalRetrieveResponse as EvalRetrieveResponse
from .file_chunking_strategy import FileChunkingStrategy as FileChunkingStrategy
from .upload_complete_params import UploadCompleteParams as UploadCompleteParams
from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
-from .eval_label_model_grader import EvalLabelModelGrader as EvalLabelModelGrader
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
-from .eval_string_check_grader import EvalStringCheckGrader as EvalStringCheckGrader
from .moderation_create_params import ModerationCreateParams as ModerationCreateParams
from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
from .create_embedding_response import CreateEmbeddingResponse as CreateEmbeddingResponse
@@ -71,7 +69,6 @@ from .moderation_create_response import ModerationCreateResponse as ModerationCr
from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams
from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
-from .eval_text_similarity_grader import EvalTextSimilarityGrader as EvalTextSimilarityGrader
from .moderation_text_input_param import ModerationTextInputParam as ModerationTextInputParam
from .file_chunking_strategy_param import FileChunkingStrategyParam as FileChunkingStrategyParam
from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse
@@ -79,10 +76,8 @@ from .websocket_connection_options import WebsocketConnectionOptions as Websocke
from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams
from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy
from .eval_custom_data_source_config import EvalCustomDataSourceConfig as EvalCustomDataSourceConfig
-from .eval_string_check_grader_param import EvalStringCheckGraderParam as EvalStringCheckGraderParam
from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam
from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam
-from .eval_text_similarity_grader_param import EvalTextSimilarityGraderParam as EvalTextSimilarityGraderParam
from .moderation_multi_modal_input_param import ModerationMultiModalInputParam as ModerationMultiModalInputParam
from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject
from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam
src/openai/types/eval_create_params.py
@@ -6,15 +6,17 @@ from typing import Dict, List, Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from .shared_params.metadata import Metadata
-from .eval_string_check_grader_param import EvalStringCheckGraderParam
-from .eval_text_similarity_grader_param import EvalTextSimilarityGraderParam
+from .graders.python_grader_param import PythonGraderParam
+from .graders.score_model_grader_param import ScoreModelGraderParam
+from .graders.string_check_grader_param import StringCheckGraderParam
from .responses.response_input_text_param import ResponseInputTextParam
+from .graders.text_similarity_grader_param import TextSimilarityGraderParam
__all__ = [
"EvalCreateParams",
"DataSourceConfig",
"DataSourceConfigCustom",
- "DataSourceConfigLogs",
+ "DataSourceConfigStoredCompletions",
"TestingCriterion",
"TestingCriterionLabelModel",
"TestingCriterionLabelModelInput",
@@ -22,11 +24,9 @@ __all__ = [
"TestingCriterionLabelModelInputEvalItem",
"TestingCriterionLabelModelInputEvalItemContent",
"TestingCriterionLabelModelInputEvalItemContentOutputText",
+ "TestingCriterionTextSimilarity",
"TestingCriterionPython",
"TestingCriterionScoreModel",
- "TestingCriterionScoreModelInput",
- "TestingCriterionScoreModelInputContent",
- "TestingCriterionScoreModelInputContentOutputText",
]
@@ -65,15 +65,15 @@ class DataSourceConfigCustom(TypedDict, total=False):
"""
-class DataSourceConfigLogs(TypedDict, total=False):
- type: Required[Literal["logs"]]
- """The type of data source. Always `logs`."""
+class DataSourceConfigStoredCompletions(TypedDict, total=False):
+ type: Required[Literal["stored_completions"]]
+ """The type of data source. Always `stored_completions`."""
metadata: Dict[str, object]
- """Metadata filters for the logs data source."""
+ """Metadata filters for the stored completions data source."""
-DataSourceConfig: TypeAlias = Union[DataSourceConfigCustom, DataSourceConfigLogs]
+DataSourceConfig: TypeAlias = Union[DataSourceConfigCustom, DataSourceConfigStoredCompletions]
class TestingCriterionLabelModelInputSimpleInputMessage(TypedDict, total=False):
@@ -139,77 +139,28 @@ class TestingCriterionLabelModel(TypedDict, total=False):
"""The object type, which is always `label_model`."""
-class TestingCriterionPython(TypedDict, total=False):
- name: Required[str]
- """The name of the grader."""
-
- source: Required[str]
- """The source code of the python script."""
-
- type: Required[Literal["python"]]
- """The object type, which is always `python`."""
+class TestingCriterionTextSimilarity(TextSimilarityGraderParam, total=False):
+ __test__ = False
+ pass_threshold: Required[float]
+ """The threshold for the score."""
- image_tag: str
- """The image tag to use for the python script."""
+class TestingCriterionPython(PythonGraderParam, total=False):
+ __test__ = False
pass_threshold: float
"""The threshold for the score."""
-class TestingCriterionScoreModelInputContentOutputText(TypedDict, total=False):
- text: Required[str]
- """The text output from the model."""
-
- type: Required[Literal["output_text"]]
- """The type of the output text. Always `output_text`."""
-
-
-TestingCriterionScoreModelInputContent: TypeAlias = Union[
- str, ResponseInputTextParam, TestingCriterionScoreModelInputContentOutputText
-]
-
-
-class TestingCriterionScoreModelInput(TypedDict, total=False):
- content: Required[TestingCriterionScoreModelInputContent]
- """Text inputs to the model - can contain template strings."""
-
- role: Required[Literal["user", "assistant", "system", "developer"]]
- """The role of the message input.
-
- One of `user`, `assistant`, `system`, or `developer`.
- """
-
- type: Literal["message"]
- """The type of the message input. Always `message`."""
-
-
-class TestingCriterionScoreModel(TypedDict, total=False):
- input: Required[Iterable[TestingCriterionScoreModelInput]]
- """The input text. This may include template strings."""
-
- model: Required[str]
- """The model to use for the evaluation."""
-
- name: Required[str]
- """The name of the grader."""
-
- type: Required[Literal["score_model"]]
- """The object type, which is always `score_model`."""
-
+class TestingCriterionScoreModel(ScoreModelGraderParam, total=False):
+ __test__ = False
pass_threshold: float
"""The threshold for the score."""
- range: Iterable[float]
- """The range of the score. Defaults to `[0, 1]`."""
-
- sampling_params: object
- """The sampling parameters for the model."""
-
TestingCriterion: TypeAlias = Union[
TestingCriterionLabelModel,
- EvalStringCheckGraderParam,
- EvalTextSimilarityGraderParam,
+ StringCheckGraderParam,
+ TestingCriterionTextSimilarity,
TestingCriterionPython,
TestingCriterionScoreModel,
]
src/openai/types/eval_create_response.py
@@ -6,22 +6,21 @@ from typing_extensions import Literal, Annotated, TypeAlias
from .._utils import PropertyInfo
from .._models import BaseModel
from .shared.metadata import Metadata
-from .eval_label_model_grader import EvalLabelModelGrader
-from .eval_string_check_grader import EvalStringCheckGrader
-from .eval_text_similarity_grader import EvalTextSimilarityGrader
-from .responses.response_input_text import ResponseInputText
+from .graders.python_grader import PythonGrader
+from .graders.label_model_grader import LabelModelGrader
+from .graders.score_model_grader import ScoreModelGrader
+from .graders.string_check_grader import StringCheckGrader
from .eval_custom_data_source_config import EvalCustomDataSourceConfig
+from .graders.text_similarity_grader import TextSimilarityGrader
from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig
__all__ = [
"EvalCreateResponse",
"DataSourceConfig",
"TestingCriterion",
- "TestingCriterionPython",
- "TestingCriterionScoreModel",
- "TestingCriterionScoreModelInput",
- "TestingCriterionScoreModelInputContent",
- "TestingCriterionScoreModelInputContentOutputText",
+ "TestingCriterionEvalGraderTextSimilarity",
+ "TestingCriterionEvalGraderPython",
+ "TestingCriterionEvalGraderScoreModel",
]
DataSourceConfig: TypeAlias = Annotated[
@@ -29,86 +28,30 @@ DataSourceConfig: TypeAlias = Annotated[
]
-class TestingCriterionPython(BaseModel):
+class TestingCriterionEvalGraderTextSimilarity(TextSimilarityGrader):
__test__ = False
- name: str
- """The name of the grader."""
-
- source: str
- """The source code of the python script."""
-
- type: Literal["python"]
- """The object type, which is always `python`."""
-
- image_tag: Optional[str] = None
- """The image tag to use for the python script."""
-
- pass_threshold: Optional[float] = None
+ pass_threshold: float
"""The threshold for the score."""
-class TestingCriterionScoreModelInputContentOutputText(BaseModel):
- __test__ = False
- text: str
- """The text output from the model."""
-
- type: Literal["output_text"]
- """The type of the output text. Always `output_text`."""
-
-
-TestingCriterionScoreModelInputContent: TypeAlias = Union[
- str, ResponseInputText, TestingCriterionScoreModelInputContentOutputText
-]
-
-
-class TestingCriterionScoreModelInput(BaseModel):
+class TestingCriterionEvalGraderPython(PythonGrader):
__test__ = False
- content: TestingCriterionScoreModelInputContent
- """Text inputs to the model - can contain template strings."""
-
- role: Literal["user", "assistant", "system", "developer"]
- """The role of the message input.
-
- One of `user`, `assistant`, `system`, or `developer`.
- """
-
- type: Optional[Literal["message"]] = None
- """The type of the message input. Always `message`."""
+ pass_threshold: Optional[float] = None
+ """The threshold for the score."""
-class TestingCriterionScoreModel(BaseModel):
+class TestingCriterionEvalGraderScoreModel(ScoreModelGrader):
__test__ = False
- input: List[TestingCriterionScoreModelInput]
- """The input text. This may include template strings."""
-
- model: str
- """The model to use for the evaluation."""
-
- name: str
- """The name of the grader."""
-
- type: Literal["score_model"]
- """The object type, which is always `score_model`."""
-
pass_threshold: Optional[float] = None
"""The threshold for the score."""
- range: Optional[List[float]] = None
- """The range of the score. Defaults to `[0, 1]`."""
-
- sampling_params: Optional[object] = None
- """The sampling parameters for the model."""
-
-TestingCriterion: TypeAlias = Annotated[
- Union[
- EvalLabelModelGrader,
- EvalStringCheckGrader,
- EvalTextSimilarityGrader,
- TestingCriterionPython,
- TestingCriterionScoreModel,
- ],
- PropertyInfo(discriminator="type"),
+TestingCriterion: TypeAlias = Union[
+ LabelModelGrader,
+ StringCheckGrader,
+ TestingCriterionEvalGraderTextSimilarity,
+ TestingCriterionEvalGraderPython,
+ TestingCriterionEvalGraderScoreModel,
]
src/openai/types/eval_list_response.py
@@ -6,22 +6,21 @@ from typing_extensions import Literal, Annotated, TypeAlias
from .._utils import PropertyInfo
from .._models import BaseModel
from .shared.metadata import Metadata
-from .eval_label_model_grader import EvalLabelModelGrader
-from .eval_string_check_grader import EvalStringCheckGrader
-from .eval_text_similarity_grader import EvalTextSimilarityGrader
-from .responses.response_input_text import ResponseInputText
+from .graders.python_grader import PythonGrader
+from .graders.label_model_grader import LabelModelGrader
+from .graders.score_model_grader import ScoreModelGrader
+from .graders.string_check_grader import StringCheckGrader
from .eval_custom_data_source_config import EvalCustomDataSourceConfig
+from .graders.text_similarity_grader import TextSimilarityGrader
from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig
__all__ = [
"EvalListResponse",
"DataSourceConfig",
"TestingCriterion",
- "TestingCriterionPython",
- "TestingCriterionScoreModel",
- "TestingCriterionScoreModelInput",
- "TestingCriterionScoreModelInputContent",
- "TestingCriterionScoreModelInputContentOutputText",
+ "TestingCriterionEvalGraderTextSimilarity",
+ "TestingCriterionEvalGraderPython",
+ "TestingCriterionEvalGraderScoreModel",
]
DataSourceConfig: TypeAlias = Annotated[
@@ -29,86 +28,30 @@ DataSourceConfig: TypeAlias = Annotated[
]
-class TestingCriterionPython(BaseModel):
+class TestingCriterionEvalGraderTextSimilarity(TextSimilarityGrader):
__test__ = False
- name: str
- """The name of the grader."""
-
- source: str
- """The source code of the python script."""
-
- type: Literal["python"]
- """The object type, which is always `python`."""
-
- image_tag: Optional[str] = None
- """The image tag to use for the python script."""
-
- pass_threshold: Optional[float] = None
+ pass_threshold: float
"""The threshold for the score."""
-class TestingCriterionScoreModelInputContentOutputText(BaseModel):
- __test__ = False
- text: str
- """The text output from the model."""
-
- type: Literal["output_text"]
- """The type of the output text. Always `output_text`."""
-
-
-TestingCriterionScoreModelInputContent: TypeAlias = Union[
- str, ResponseInputText, TestingCriterionScoreModelInputContentOutputText
-]
-
-
-class TestingCriterionScoreModelInput(BaseModel):
+class TestingCriterionEvalGraderPython(PythonGrader):
__test__ = False
- content: TestingCriterionScoreModelInputContent
- """Text inputs to the model - can contain template strings."""
-
- role: Literal["user", "assistant", "system", "developer"]
- """The role of the message input.
-
- One of `user`, `assistant`, `system`, or `developer`.
- """
-
- type: Optional[Literal["message"]] = None
- """The type of the message input. Always `message`."""
+ pass_threshold: Optional[float] = None
+ """The threshold for the score."""
-class TestingCriterionScoreModel(BaseModel):
+class TestingCriterionEvalGraderScoreModel(ScoreModelGrader):
__test__ = False
- input: List[TestingCriterionScoreModelInput]
- """The input text. This may include template strings."""
-
- model: str
- """The model to use for the evaluation."""
-
- name: str
- """The name of the grader."""
-
- type: Literal["score_model"]
- """The object type, which is always `score_model`."""
-
pass_threshold: Optional[float] = None
"""The threshold for the score."""
- range: Optional[List[float]] = None
- """The range of the score. Defaults to `[0, 1]`."""
-
- sampling_params: Optional[object] = None
- """The sampling parameters for the model."""
-
-TestingCriterion: TypeAlias = Annotated[
- Union[
- EvalLabelModelGrader,
- EvalStringCheckGrader,
- EvalTextSimilarityGrader,
- TestingCriterionPython,
- TestingCriterionScoreModel,
- ],
- PropertyInfo(discriminator="type"),
+TestingCriterion: TypeAlias = Union[
+ LabelModelGrader,
+ StringCheckGrader,
+ TestingCriterionEvalGraderTextSimilarity,
+ TestingCriterionEvalGraderPython,
+ TestingCriterionEvalGraderScoreModel,
]
src/openai/types/eval_retrieve_response.py
@@ -6,22 +6,21 @@ from typing_extensions import Literal, Annotated, TypeAlias
from .._utils import PropertyInfo
from .._models import BaseModel
from .shared.metadata import Metadata
-from .eval_label_model_grader import EvalLabelModelGrader
-from .eval_string_check_grader import EvalStringCheckGrader
-from .eval_text_similarity_grader import EvalTextSimilarityGrader
-from .responses.response_input_text import ResponseInputText
+from .graders.python_grader import PythonGrader
+from .graders.label_model_grader import LabelModelGrader
+from .graders.score_model_grader import ScoreModelGrader
+from .graders.string_check_grader import StringCheckGrader
from .eval_custom_data_source_config import EvalCustomDataSourceConfig
+from .graders.text_similarity_grader import TextSimilarityGrader
from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig
__all__ = [
"EvalRetrieveResponse",
"DataSourceConfig",
"TestingCriterion",
- "TestingCriterionPython",
- "TestingCriterionScoreModel",
- "TestingCriterionScoreModelInput",
- "TestingCriterionScoreModelInputContent",
- "TestingCriterionScoreModelInputContentOutputText",
+ "TestingCriterionEvalGraderTextSimilarity",
+ "TestingCriterionEvalGraderPython",
+ "TestingCriterionEvalGraderScoreModel",
]
DataSourceConfig: TypeAlias = Annotated[
@@ -29,86 +28,30 @@ DataSourceConfig: TypeAlias = Annotated[
]
-class TestingCriterionPython(BaseModel):
+class TestingCriterionEvalGraderTextSimilarity(TextSimilarityGrader):
__test__ = False
- name: str
- """The name of the grader."""
-
- source: str
- """The source code of the python script."""
-
- type: Literal["python"]
- """The object type, which is always `python`."""
-
- image_tag: Optional[str] = None
- """The image tag to use for the python script."""
-
- pass_threshold: Optional[float] = None
+ pass_threshold: float
"""The threshold for the score."""
-class TestingCriterionScoreModelInputContentOutputText(BaseModel):
- __test__ = False
- text: str
- """The text output from the model."""
-
- type: Literal["output_text"]
- """The type of the output text. Always `output_text`."""
-
-
-TestingCriterionScoreModelInputContent: TypeAlias = Union[
- str, ResponseInputText, TestingCriterionScoreModelInputContentOutputText
-]
-
-
-class TestingCriterionScoreModelInput(BaseModel):
+class TestingCriterionEvalGraderPython(PythonGrader):
__test__ = False
- content: TestingCriterionScoreModelInputContent
- """Text inputs to the model - can contain template strings."""
-
- role: Literal["user", "assistant", "system", "developer"]
- """The role of the message input.
-
- One of `user`, `assistant`, `system`, or `developer`.
- """
-
- type: Optional[Literal["message"]] = None
- """The type of the message input. Always `message`."""
+ pass_threshold: Optional[float] = None
+ """The threshold for the score."""
-class TestingCriterionScoreModel(BaseModel):
+class TestingCriterionEvalGraderScoreModel(ScoreModelGrader):
__test__ = False
- input: List[TestingCriterionScoreModelInput]
- """The input text. This may include template strings."""
-
- model: str
- """The model to use for the evaluation."""
-
- name: str
- """The name of the grader."""
-
- type: Literal["score_model"]
- """The object type, which is always `score_model`."""
-
pass_threshold: Optional[float] = None
"""The threshold for the score."""
- range: Optional[List[float]] = None
- """The range of the score. Defaults to `[0, 1]`."""
-
- sampling_params: Optional[object] = None
- """The sampling parameters for the model."""
-
-TestingCriterion: TypeAlias = Annotated[
- Union[
- EvalLabelModelGrader,
- EvalStringCheckGrader,
- EvalTextSimilarityGrader,
- TestingCriterionPython,
- TestingCriterionScoreModel,
- ],
- PropertyInfo(discriminator="type"),
+TestingCriterion: TypeAlias = Union[
+ LabelModelGrader,
+ StringCheckGrader,
+ TestingCriterionEvalGraderTextSimilarity,
+ TestingCriterionEvalGraderPython,
+ TestingCriterionEvalGraderScoreModel,
]
src/openai/types/eval_update_response.py
@@ -6,22 +6,21 @@ from typing_extensions import Literal, Annotated, TypeAlias
from .._utils import PropertyInfo
from .._models import BaseModel
from .shared.metadata import Metadata
-from .eval_label_model_grader import EvalLabelModelGrader
-from .eval_string_check_grader import EvalStringCheckGrader
-from .eval_text_similarity_grader import EvalTextSimilarityGrader
-from .responses.response_input_text import ResponseInputText
+from .graders.python_grader import PythonGrader
+from .graders.label_model_grader import LabelModelGrader
+from .graders.score_model_grader import ScoreModelGrader
+from .graders.string_check_grader import StringCheckGrader
from .eval_custom_data_source_config import EvalCustomDataSourceConfig
+from .graders.text_similarity_grader import TextSimilarityGrader
from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig
__all__ = [
"EvalUpdateResponse",
"DataSourceConfig",
"TestingCriterion",
- "TestingCriterionPython",
- "TestingCriterionScoreModel",
- "TestingCriterionScoreModelInput",
- "TestingCriterionScoreModelInputContent",
- "TestingCriterionScoreModelInputContentOutputText",
+ "TestingCriterionEvalGraderTextSimilarity",
+ "TestingCriterionEvalGraderPython",
+ "TestingCriterionEvalGraderScoreModel",
]
DataSourceConfig: TypeAlias = Annotated[
@@ -29,86 +28,30 @@ DataSourceConfig: TypeAlias = Annotated[
]
-class TestingCriterionPython(BaseModel):
+class TestingCriterionEvalGraderTextSimilarity(TextSimilarityGrader):
__test__ = False
- name: str
- """The name of the grader."""
-
- source: str
- """The source code of the python script."""
-
- type: Literal["python"]
- """The object type, which is always `python`."""
-
- image_tag: Optional[str] = None
- """The image tag to use for the python script."""
-
- pass_threshold: Optional[float] = None
+ pass_threshold: float
"""The threshold for the score."""
-class TestingCriterionScoreModelInputContentOutputText(BaseModel):
- __test__ = False
- text: str
- """The text output from the model."""
-
- type: Literal["output_text"]
- """The type of the output text. Always `output_text`."""
-
-
-TestingCriterionScoreModelInputContent: TypeAlias = Union[
- str, ResponseInputText, TestingCriterionScoreModelInputContentOutputText
-]
-
-
-class TestingCriterionScoreModelInput(BaseModel):
+class TestingCriterionEvalGraderPython(PythonGrader):
__test__ = False
- content: TestingCriterionScoreModelInputContent
- """Text inputs to the model - can contain template strings."""
-
- role: Literal["user", "assistant", "system", "developer"]
- """The role of the message input.
-
- One of `user`, `assistant`, `system`, or `developer`.
- """
-
- type: Optional[Literal["message"]] = None
- """The type of the message input. Always `message`."""
+ pass_threshold: Optional[float] = None
+ """The threshold for the score."""
-class TestingCriterionScoreModel(BaseModel):
+class TestingCriterionEvalGraderScoreModel(ScoreModelGrader):
__test__ = False
- input: List[TestingCriterionScoreModelInput]
- """The input text. This may include template strings."""
-
- model: str
- """The model to use for the evaluation."""
-
- name: str
- """The name of the grader."""
-
- type: Literal["score_model"]
- """The object type, which is always `score_model`."""
-
pass_threshold: Optional[float] = None
"""The threshold for the score."""
- range: Optional[List[float]] = None
- """The range of the score. Defaults to `[0, 1]`."""
-
- sampling_params: Optional[object] = None
- """The sampling parameters for the model."""
-
-TestingCriterion: TypeAlias = Annotated[
- Union[
- EvalLabelModelGrader,
- EvalStringCheckGrader,
- EvalTextSimilarityGrader,
- TestingCriterionPython,
- TestingCriterionScoreModel,
- ],
- PropertyInfo(discriminator="type"),
+TestingCriterion: TypeAlias = Union[
+ LabelModelGrader,
+ StringCheckGrader,
+ TestingCriterionEvalGraderTextSimilarity,
+ TestingCriterionEvalGraderPython,
+ TestingCriterionEvalGraderScoreModel,
]
tests/api_resources/fine_tuning/alpha/__init__.py
@@ -0,0 +1,1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
tests/api_resources/fine_tuning/alpha/test_graders.py
@@ -0,0 +1,289 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.types.fine_tuning.alpha import (
+ GraderRunResponse,
+ GraderValidateResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestGraders:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_run(self, client: OpenAI) -> None:
+ grader = client.fine_tuning.alpha.graders.run(
+ grader={
+ "input": "input",
+ "name": "name",
+ "operation": "eq",
+ "reference": "reference",
+ "type": "string_check",
+ },
+ model_sample="model_sample",
+ reference_answer="string",
+ )
+ assert_matches_type(GraderRunResponse, grader, path=["response"])
+
+ @parametrize
+ def test_method_run_with_all_params(self, client: OpenAI) -> None:
+ grader = client.fine_tuning.alpha.graders.run(
+ grader={
+ "input": "input",
+ "name": "name",
+ "operation": "eq",
+ "reference": "reference",
+ "type": "string_check",
+ },
+ model_sample="model_sample",
+ reference_answer="string",
+ )
+ assert_matches_type(GraderRunResponse, grader, path=["response"])
+
+ @parametrize
+ def test_raw_response_run(self, client: OpenAI) -> None:
+ response = client.fine_tuning.alpha.graders.with_raw_response.run(
+ grader={
+ "input": "input",
+ "name": "name",
+ "operation": "eq",
+ "reference": "reference",
+ "type": "string_check",
+ },
+ model_sample="model_sample",
+ reference_answer="string",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ grader = response.parse()
+ assert_matches_type(GraderRunResponse, grader, path=["response"])
+
+ @parametrize
+ def test_streaming_response_run(self, client: OpenAI) -> None:
+ with client.fine_tuning.alpha.graders.with_streaming_response.run(
+ grader={
+ "input": "input",
+ "name": "name",
+ "operation": "eq",
+ "reference": "reference",
+ "type": "string_check",
+ },
+ model_sample="model_sample",
+ reference_answer="string",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ grader = response.parse()
+ assert_matches_type(GraderRunResponse, grader, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_validate(self, client: OpenAI) -> None:
+ grader = client.fine_tuning.alpha.graders.validate(
+ grader={
+ "input": "input",
+ "name": "name",
+ "operation": "eq",
+ "reference": "reference",
+ "type": "string_check",
+ },
+ )
+ assert_matches_type(GraderValidateResponse, grader, path=["response"])
+
+ @parametrize
+ def test_method_validate_with_all_params(self, client: OpenAI) -> None:
+ grader = client.fine_tuning.alpha.graders.validate(
+ grader={
+ "input": "input",
+ "name": "name",
+ "operation": "eq",
+ "reference": "reference",
+ "type": "string_check",
+ },
+ )
+ assert_matches_type(GraderValidateResponse, grader, path=["response"])
+
+ @parametrize
+ def test_raw_response_validate(self, client: OpenAI) -> None:
+ response = client.fine_tuning.alpha.graders.with_raw_response.validate(
+ grader={
+ "input": "input",
+ "name": "name",
+ "operation": "eq",
+ "reference": "reference",
+ "type": "string_check",
+ },
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ grader = response.parse()
+ assert_matches_type(GraderValidateResponse, grader, path=["response"])
+
+ @parametrize
+ def test_streaming_response_validate(self, client: OpenAI) -> None:
+ with client.fine_tuning.alpha.graders.with_streaming_response.validate(
+ grader={
+ "input": "input",
+ "name": "name",
+ "operation": "eq",
+ "reference": "reference",
+ "type": "string_check",
+ },
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ grader = response.parse()
+ assert_matches_type(GraderValidateResponse, grader, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncGraders:
+ parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ async def test_method_run(self, async_client: AsyncOpenAI) -> None:
+ grader = await async_client.fine_tuning.alpha.graders.run(
+ grader={
+ "input": "input",
+ "name": "name",
+ "operation": "eq",
+ "reference": "reference",
+ "type": "string_check",
+ },
+ model_sample="model_sample",
+ reference_answer="string",
+ )
+ assert_matches_type(GraderRunResponse, grader, path=["response"])
+
+ @parametrize
+ async def test_method_run_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ grader = await async_client.fine_tuning.alpha.graders.run(
+ grader={
+ "input": "input",
+ "name": "name",
+ "operation": "eq",
+ "reference": "reference",
+ "type": "string_check",
+ },
+ model_sample="model_sample",
+ reference_answer="string",
+ )
+ assert_matches_type(GraderRunResponse, grader, path=["response"])
+
+ @parametrize
+ async def test_raw_response_run(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.fine_tuning.alpha.graders.with_raw_response.run(
+ grader={
+ "input": "input",
+ "name": "name",
+ "operation": "eq",
+ "reference": "reference",
+ "type": "string_check",
+ },
+ model_sample="model_sample",
+ reference_answer="string",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ grader = response.parse()
+ assert_matches_type(GraderRunResponse, grader, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_run(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.fine_tuning.alpha.graders.with_streaming_response.run(
+ grader={
+ "input": "input",
+ "name": "name",
+ "operation": "eq",
+ "reference": "reference",
+ "type": "string_check",
+ },
+ model_sample="model_sample",
+ reference_answer="string",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ grader = await response.parse()
+ assert_matches_type(GraderRunResponse, grader, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_validate(self, async_client: AsyncOpenAI) -> None:
+ grader = await async_client.fine_tuning.alpha.graders.validate(
+ grader={
+ "input": "input",
+ "name": "name",
+ "operation": "eq",
+ "reference": "reference",
+ "type": "string_check",
+ },
+ )
+ assert_matches_type(GraderValidateResponse, grader, path=["response"])
+
+ @parametrize
+ async def test_method_validate_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ grader = await async_client.fine_tuning.alpha.graders.validate(
+ grader={
+ "input": "input",
+ "name": "name",
+ "operation": "eq",
+ "reference": "reference",
+ "type": "string_check",
+ },
+ )
+ assert_matches_type(GraderValidateResponse, grader, path=["response"])
+
+ @parametrize
+ async def test_raw_response_validate(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.fine_tuning.alpha.graders.with_raw_response.validate(
+ grader={
+ "input": "input",
+ "name": "name",
+ "operation": "eq",
+ "reference": "reference",
+ "type": "string_check",
+ },
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ grader = response.parse()
+ assert_matches_type(GraderValidateResponse, grader, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_validate(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.fine_tuning.alpha.graders.with_streaming_response.validate(
+ grader={
+ "input": "input",
+ "name": "name",
+ "operation": "eq",
+ "reference": "reference",
+ "type": "string_check",
+ },
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ grader = await response.parse()
+ assert_matches_type(GraderValidateResponse, grader, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
tests/api_resources/fine_tuning/test_jobs.py
@@ -52,6 +52,7 @@ class TestJobs:
],
metadata={"foo": "string"},
method={
+ "type": "supervised",
"dpo": {
"hyperparameters": {
"batch_size": "auto",
@@ -60,6 +61,24 @@ class TestJobs:
"n_epochs": "auto",
}
},
+ "reinforcement": {
+ "grader": {
+ "input": "input",
+ "name": "name",
+ "operation": "eq",
+ "reference": "reference",
+ "type": "string_check",
+ },
+ "hyperparameters": {
+ "batch_size": "auto",
+ "compute_multiplier": "auto",
+ "eval_interval": "auto",
+ "eval_samples": "auto",
+ "learning_rate_multiplier": "auto",
+ "n_epochs": "auto",
+ "reasoning_effort": "default",
+ },
+ },
"supervised": {
"hyperparameters": {
"batch_size": "auto",
@@ -67,7 +86,6 @@ class TestJobs:
"n_epochs": "auto",
}
},
- "type": "supervised",
},
seed=42,
suffix="x",
@@ -258,6 +276,82 @@ class TestJobs:
"",
)
+ @parametrize
+ def test_method_pause(self, client: OpenAI) -> None:
+ job = client.fine_tuning.jobs.pause(
+ "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ )
+ assert_matches_type(FineTuningJob, job, path=["response"])
+
+ @parametrize
+ def test_raw_response_pause(self, client: OpenAI) -> None:
+ response = client.fine_tuning.jobs.with_raw_response.pause(
+ "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ job = response.parse()
+ assert_matches_type(FineTuningJob, job, path=["response"])
+
+ @parametrize
+ def test_streaming_response_pause(self, client: OpenAI) -> None:
+ with client.fine_tuning.jobs.with_streaming_response.pause(
+ "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ job = response.parse()
+ assert_matches_type(FineTuningJob, job, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_pause(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
+ client.fine_tuning.jobs.with_raw_response.pause(
+ "",
+ )
+
+ @parametrize
+ def test_method_resume(self, client: OpenAI) -> None:
+ job = client.fine_tuning.jobs.resume(
+ "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ )
+ assert_matches_type(FineTuningJob, job, path=["response"])
+
+ @parametrize
+ def test_raw_response_resume(self, client: OpenAI) -> None:
+ response = client.fine_tuning.jobs.with_raw_response.resume(
+ "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ job = response.parse()
+ assert_matches_type(FineTuningJob, job, path=["response"])
+
+ @parametrize
+ def test_streaming_response_resume(self, client: OpenAI) -> None:
+ with client.fine_tuning.jobs.with_streaming_response.resume(
+ "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ job = response.parse()
+ assert_matches_type(FineTuningJob, job, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_resume(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
+ client.fine_tuning.jobs.with_raw_response.resume(
+ "",
+ )
+
class TestAsyncJobs:
parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
@@ -293,6 +387,7 @@ class TestAsyncJobs:
],
metadata={"foo": "string"},
method={
+ "type": "supervised",
"dpo": {
"hyperparameters": {
"batch_size": "auto",
@@ -301,6 +396,24 @@ class TestAsyncJobs:
"n_epochs": "auto",
}
},
+ "reinforcement": {
+ "grader": {
+ "input": "input",
+ "name": "name",
+ "operation": "eq",
+ "reference": "reference",
+ "type": "string_check",
+ },
+ "hyperparameters": {
+ "batch_size": "auto",
+ "compute_multiplier": "auto",
+ "eval_interval": "auto",
+ "eval_samples": "auto",
+ "learning_rate_multiplier": "auto",
+ "n_epochs": "auto",
+ "reasoning_effort": "default",
+ },
+ },
"supervised": {
"hyperparameters": {
"batch_size": "auto",
@@ -308,7 +421,6 @@ class TestAsyncJobs:
"n_epochs": "auto",
}
},
- "type": "supervised",
},
seed=42,
suffix="x",
@@ -498,3 +610,79 @@ class TestAsyncJobs:
await async_client.fine_tuning.jobs.with_raw_response.list_events(
"",
)
+
+ @parametrize
+ async def test_method_pause(self, async_client: AsyncOpenAI) -> None:
+ job = await async_client.fine_tuning.jobs.pause(
+ "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ )
+ assert_matches_type(FineTuningJob, job, path=["response"])
+
+ @parametrize
+ async def test_raw_response_pause(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.fine_tuning.jobs.with_raw_response.pause(
+ "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ job = response.parse()
+ assert_matches_type(FineTuningJob, job, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_pause(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.fine_tuning.jobs.with_streaming_response.pause(
+ "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ job = await response.parse()
+ assert_matches_type(FineTuningJob, job, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_pause(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
+ await async_client.fine_tuning.jobs.with_raw_response.pause(
+ "",
+ )
+
+ @parametrize
+ async def test_method_resume(self, async_client: AsyncOpenAI) -> None:
+ job = await async_client.fine_tuning.jobs.resume(
+ "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ )
+ assert_matches_type(FineTuningJob, job, path=["response"])
+
+ @parametrize
+ async def test_raw_response_resume(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.fine_tuning.jobs.with_raw_response.resume(
+ "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ job = response.parse()
+ assert_matches_type(FineTuningJob, job, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_resume(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.fine_tuning.jobs.with_streaming_response.resume(
+ "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ job = await response.parse()
+ assert_matches_type(FineTuningJob, job, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_resume(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
+ await async_client.fine_tuning.jobs.with_raw_response.resume(
+ "",
+ )
.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 97
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-0ee6b36cf3cc278cef4199a6aec5f7d530a6c1f17a74830037e96d50ca1edc50.yml
-openapi_spec_hash: e8ec5f46bc0655b34f292422d58a60f6
-config_hash: d9b6b6e6bc85744663e300eebc482067
+configured_endpoints: 101
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-794a6ed3c3d3d77887564755168056af8a426b17cf1ec721e3a300503dc22a41.yml
+openapi_spec_hash: 25a81c220713cd5b0bafc221d1dfa79a
+config_hash: 0b768ed1b56c6d82816f0fa40dc4aaf5
api.md
@@ -225,6 +225,21 @@ Methods:
# FineTuning
+## Methods
+
+Types:
+
+```python
+from openai.types.fine_tuning import (
+ DpoHyperparameters,
+ DpoMethod,
+ ReinforcementHyperparameters,
+ ReinforcementMethod,
+ SupervisedHyperparameters,
+ SupervisedMethod,
+)
+```
+
## Jobs
Types:
@@ -246,6 +261,8 @@ Methods:
- <code title="get /fine_tuning/jobs">client.fine_tuning.jobs.<a href="./src/openai/resources/fine_tuning/jobs/jobs.py">list</a>(\*\*<a href="src/openai/types/fine_tuning/job_list_params.py">params</a>) -> <a href="./src/openai/types/fine_tuning/fine_tuning_job.py">SyncCursorPage[FineTuningJob]</a></code>
- <code title="post /fine_tuning/jobs/{fine_tuning_job_id}/cancel">client.fine_tuning.jobs.<a href="./src/openai/resources/fine_tuning/jobs/jobs.py">cancel</a>(fine_tuning_job_id) -> <a href="./src/openai/types/fine_tuning/fine_tuning_job.py">FineTuningJob</a></code>
- <code title="get /fine_tuning/jobs/{fine_tuning_job_id}/events">client.fine_tuning.jobs.<a href="./src/openai/resources/fine_tuning/jobs/jobs.py">list_events</a>(fine_tuning_job_id, \*\*<a href="src/openai/types/fine_tuning/job_list_events_params.py">params</a>) -> <a href="./src/openai/types/fine_tuning/fine_tuning_job_event.py">SyncCursorPage[FineTuningJobEvent]</a></code>
+- <code title="post /fine_tuning/jobs/{fine_tuning_job_id}/pause">client.fine_tuning.jobs.<a href="./src/openai/resources/fine_tuning/jobs/jobs.py">pause</a>(fine_tuning_job_id) -> <a href="./src/openai/types/fine_tuning/fine_tuning_job.py">FineTuningJob</a></code>
+- <code title="post /fine_tuning/jobs/{fine_tuning_job_id}/resume">client.fine_tuning.jobs.<a href="./src/openai/resources/fine_tuning/jobs/jobs.py">resume</a>(fine_tuning_job_id) -> <a href="./src/openai/types/fine_tuning/fine_tuning_job.py">FineTuningJob</a></code>
### Checkpoints
@@ -279,6 +296,38 @@ Methods:
- <code title="get /fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions">client.fine_tuning.checkpoints.permissions.<a href="./src/openai/resources/fine_tuning/checkpoints/permissions.py">retrieve</a>(fine_tuned_model_checkpoint, \*\*<a href="src/openai/types/fine_tuning/checkpoints/permission_retrieve_params.py">params</a>) -> <a href="./src/openai/types/fine_tuning/checkpoints/permission_retrieve_response.py">PermissionRetrieveResponse</a></code>
- <code title="delete /fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}">client.fine_tuning.checkpoints.permissions.<a href="./src/openai/resources/fine_tuning/checkpoints/permissions.py">delete</a>(permission_id, \*, fine_tuned_model_checkpoint) -> <a href="./src/openai/types/fine_tuning/checkpoints/permission_delete_response.py">PermissionDeleteResponse</a></code>
+## Alpha
+
+### Graders
+
+Types:
+
+```python
+from openai.types.fine_tuning.alpha import GraderRunResponse, GraderValidateResponse
+```
+
+Methods:
+
+- <code title="post /fine_tuning/alpha/graders/run">client.fine_tuning.alpha.graders.<a href="./src/openai/resources/fine_tuning/alpha/graders.py">run</a>(\*\*<a href="src/openai/types/fine_tuning/alpha/grader_run_params.py">params</a>) -> <a href="./src/openai/types/fine_tuning/alpha/grader_run_response.py">GraderRunResponse</a></code>
+- <code title="post /fine_tuning/alpha/graders/validate">client.fine_tuning.alpha.graders.<a href="./src/openai/resources/fine_tuning/alpha/graders.py">validate</a>(\*\*<a href="src/openai/types/fine_tuning/alpha/grader_validate_params.py">params</a>) -> <a href="./src/openai/types/fine_tuning/alpha/grader_validate_response.py">GraderValidateResponse</a></code>
+
+# Graders
+
+## GraderModels
+
+Types:
+
+```python
+from openai.types.graders import (
+ LabelModelGrader,
+ MultiGrader,
+ PythonGrader,
+ ScoreModelGrader,
+ StringCheckGrader,
+ TextSimilarityGrader,
+)
+```
+
# VectorStores
Types:
@@ -738,10 +787,7 @@ Types:
```python
from openai.types import (
EvalCustomDataSourceConfig,
- EvalLabelModelGrader,
EvalStoredCompletionsDataSourceConfig,
- EvalStringCheckGrader,
- EvalTextSimilarityGrader,
EvalCreateResponse,
EvalRetrieveResponse,
EvalUpdateResponse,