Skip to content

Commit

Permalink
feat(api): batch list endpoint (#1338)
Browse files Browse the repository at this point in the history
  • Loading branch information
stainless-bot committed Apr 18, 2024
1 parent feec0dd commit a776f38
Show file tree
Hide file tree
Showing 6 changed files with 213 additions and 2 deletions.
2 changes: 1 addition & 1 deletion .stats.yml
Original file line number Diff line number Diff line change
@@ -1 +1 @@
configured_endpoints: 62
configured_endpoints: 63
1 change: 1 addition & 0 deletions api.md
Original file line number Diff line number Diff line change
Expand Up @@ -405,4 +405,5 @@ Methods:

- <code title="post /batches">client.batches.<a href="./src/openai/resources/batches.py">create</a>(\*\*<a href="src/openai/types/batch_create_params.py">params</a>) -> <a href="./src/openai/types/batch.py">Batch</a></code>
- <code title="get /batches/{batch_id}">client.batches.<a href="./src/openai/resources/batches.py">retrieve</a>(batch_id) -> <a href="./src/openai/types/batch.py">Batch</a></code>
- <code title="get /batches">client.batches.<a href="./src/openai/resources/batches.py">list</a>(\*\*<a href="src/openai/types/batch_list_params.py">params</a>) -> <a href="./src/openai/types/batch.py">SyncCursorPage[Batch]</a></code>
- <code title="post /batches/{batch_id}/cancel">client.batches.<a href="./src/openai/resources/batches.py">cancel</a>(batch_id) -> <a href="./src/openai/types/batch.py">Batch</a></code>
120 changes: 119 additions & 1 deletion src/openai/resources/batches.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import httpx

from .. import _legacy_response
from ..types import Batch, batch_create_params
from ..types import Batch, batch_list_params, batch_create_params
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from .._utils import (
maybe_transform,
Expand All @@ -17,7 +17,9 @@
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..pagination import SyncCursorPage, AsyncCursorPage
from .._base_client import (
AsyncPaginator,
make_request_options,
)

Expand Down Expand Up @@ -125,6 +127,58 @@ def retrieve(
cast_to=Batch,
)

def list(
self,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SyncCursorPage[Batch]:
"""List your organization's batches.
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/batches",
page=SyncCursorPage[Batch],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
},
batch_list_params.BatchListParams,
),
),
model=Batch,
)

def cancel(
self,
batch_id: str,
Expand Down Expand Up @@ -260,6 +314,58 @@ async def retrieve(
cast_to=Batch,
)

def list(
self,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncPaginator[Batch, AsyncCursorPage[Batch]]:
"""List your organization's batches.
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/batches",
page=AsyncCursorPage[Batch],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
},
batch_list_params.BatchListParams,
),
),
model=Batch,
)

async def cancel(
self,
batch_id: str,
Expand Down Expand Up @@ -304,6 +410,9 @@ def __init__(self, batches: Batches) -> None:
self.retrieve = _legacy_response.to_raw_response_wrapper(
batches.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
batches.list,
)
self.cancel = _legacy_response.to_raw_response_wrapper(
batches.cancel,
)
Expand All @@ -319,6 +428,9 @@ def __init__(self, batches: AsyncBatches) -> None:
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
batches.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
batches.list,
)
self.cancel = _legacy_response.async_to_raw_response_wrapper(
batches.cancel,
)
Expand All @@ -334,6 +446,9 @@ def __init__(self, batches: Batches) -> None:
self.retrieve = to_streamed_response_wrapper(
batches.retrieve,
)
self.list = to_streamed_response_wrapper(
batches.list,
)
self.cancel = to_streamed_response_wrapper(
batches.cancel,
)
Expand All @@ -349,6 +464,9 @@ def __init__(self, batches: AsyncBatches) -> None:
self.retrieve = async_to_streamed_response_wrapper(
batches.retrieve,
)
self.list = async_to_streamed_response_wrapper(
batches.list,
)
self.cancel = async_to_streamed_response_wrapper(
batches.cancel,
)
1 change: 1 addition & 0 deletions src/openai/types/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from .images_response import ImagesResponse as ImagesResponse
from .completion_usage import CompletionUsage as CompletionUsage
from .file_list_params import FileListParams as FileListParams
from .batch_list_params import BatchListParams as BatchListParams
from .completion_choice import CompletionChoice as CompletionChoice
from .image_edit_params import ImageEditParams as ImageEditParams
from .file_create_params import FileCreateParams as FileCreateParams
Expand Down
24 changes: 24 additions & 0 deletions src/openai/types/batch_list_params.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from __future__ import annotations

from typing_extensions import TypedDict

__all__ = ["BatchListParams"]


class BatchListParams(TypedDict, total=False):
after: str
"""A cursor for use in pagination.
`after` is an object ID that defines your place in the list. For instance, if
you make a list request and receive 100 objects, ending with obj_foo, your
subsequent call can include after=obj_foo in order to fetch the next page of the
list.
"""

limit: int
"""A limit on the number of objects to be returned.
Limit can range between 1 and 100, and the default is 20.
"""
67 changes: 67 additions & 0 deletions tests/api_resources/test_batches.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
from openai.types import Batch
from openai.pagination import SyncCursorPage, AsyncCursorPage

base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")

Expand Down Expand Up @@ -102,6 +103,39 @@ def test_path_params_retrieve(self, client: OpenAI) -> None:
"",
)

@parametrize
def test_method_list(self, client: OpenAI) -> None:
batch = client.batches.list()
assert_matches_type(SyncCursorPage[Batch], batch, path=["response"])

@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
batch = client.batches.list(
after="string",
limit=0,
)
assert_matches_type(SyncCursorPage[Batch], batch, path=["response"])

@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
response = client.batches.with_raw_response.list()

assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
batch = response.parse()
assert_matches_type(SyncCursorPage[Batch], batch, path=["response"])

@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
with client.batches.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"

batch = response.parse()
assert_matches_type(SyncCursorPage[Batch], batch, path=["response"])

assert cast(Any, response.is_closed) is True

@parametrize
def test_method_cancel(self, client: OpenAI) -> None:
batch = client.batches.cancel(
Expand Down Expand Up @@ -229,6 +263,39 @@ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
"",
)

@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
batch = await async_client.batches.list()
assert_matches_type(AsyncCursorPage[Batch], batch, path=["response"])

@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
batch = await async_client.batches.list(
after="string",
limit=0,
)
assert_matches_type(AsyncCursorPage[Batch], batch, path=["response"])

@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
response = await async_client.batches.with_raw_response.list()

assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
batch = response.parse()
assert_matches_type(AsyncCursorPage[Batch], batch, path=["response"])

@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
async with async_client.batches.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"

batch = await response.parse()
assert_matches_type(AsyncCursorPage[Batch], batch, path=["response"])

assert cast(Any, response.is_closed) is True

@parametrize
async def test_method_cancel(self, async_client: AsyncOpenAI) -> None:
batch = await async_client.batches.cancel(
Expand Down

0 comments on commit a776f38

Please sign in to comment.