Skip to content

Commit

Permalink
feat(api): add support for storing chat completions (#2117)
Browse files Browse the repository at this point in the history
  • Loading branch information
stainless-app[bot] committed Feb 13, 2025
1 parent 3f8d820 commit 300f58b
Show file tree
Hide file tree
Showing 22 changed files with 1,350 additions and 85 deletions.
4 changes: 2 additions & 2 deletions .stats.yml
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
configured_endpoints: 69
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-dfb00c627f58e5180af7a9b29ed2f2aa0764a3b9daa6a32a1cc45bc8e48dfe15.yml
configured_endpoints: 74
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-4aa6ee65ba9efc789e05e6a5ef0883b2cadf06def8efd863dbf75e9e233067e1.yml
14 changes: 13 additions & 1 deletion api.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ from openai.types.chat import (
ChatCompletionContentPartInputAudio,
ChatCompletionContentPartRefusal,
ChatCompletionContentPartText,
ChatCompletionDeleted,
ChatCompletionDeveloperMessageParam,
ChatCompletionFunctionCallOption,
ChatCompletionFunctionMessageParam,
Expand All @@ -59,6 +60,7 @@ from openai.types.chat import (
ChatCompletionPredictionContent,
ChatCompletionReasoningEffort,
ChatCompletionRole,
ChatCompletionStoreMessage,
ChatCompletionStreamOptions,
ChatCompletionSystemMessageParam,
ChatCompletionTokenLogprob,
Expand All @@ -71,7 +73,17 @@ from openai.types.chat import (

Methods:

- <code title="post /chat/completions">client.chat.completions.<a href="./src/openai/resources/chat/completions.py">create</a>(\*\*<a href="src/openai/types/chat/completion_create_params.py">params</a>) -> <a href="./src/openai/types/chat/chat_completion.py">ChatCompletion</a></code>
- <code title="post /chat/completions">client.chat.completions.<a href="./src/openai/resources/chat/completions/completions.py">create</a>(\*\*<a href="src/openai/types/chat/completion_create_params.py">params</a>) -> <a href="./src/openai/types/chat/chat_completion.py">ChatCompletion</a></code>
- <code title="get /chat/completions/{completion_id}">client.chat.completions.<a href="./src/openai/resources/chat/completions/completions.py">retrieve</a>(completion_id) -> <a href="./src/openai/types/chat/chat_completion.py">ChatCompletion</a></code>
- <code title="post /chat/completions/{completion_id}">client.chat.completions.<a href="./src/openai/resources/chat/completions/completions.py">update</a>(completion_id, \*\*<a href="src/openai/types/chat/completion_update_params.py">params</a>) -> <a href="./src/openai/types/chat/chat_completion.py">ChatCompletion</a></code>
- <code title="get /chat/completions">client.chat.completions.<a href="./src/openai/resources/chat/completions/completions.py">list</a>(\*\*<a href="src/openai/types/chat/completion_list_params.py">params</a>) -> <a href="./src/openai/types/chat/chat_completion.py">SyncCursorPage[ChatCompletion]</a></code>
- <code title="delete /chat/completions/{completion_id}">client.chat.completions.<a href="./src/openai/resources/chat/completions/completions.py">delete</a>(completion_id) -> <a href="./src/openai/types/chat/chat_completion_deleted.py">ChatCompletionDeleted</a></code>

### Messages

Methods:

- <code title="get /chat/completions/{completion_id}/messages">client.chat.completions.messages.<a href="./src/openai/resources/chat/completions/messages.py">list</a>(completion_id, \*\*<a href="src/openai/types/chat/completions/message_list_params.py">params</a>) -> <a href="./src/openai/types/chat/chat_completion_store_message.py">SyncCursorPage[ChatCompletionStoreMessage]</a></code>

# Embeddings

Expand Down
20 changes: 18 additions & 2 deletions src/openai/_utils/_sync.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,20 @@
from typing import Any, TypeVar, Callable, Awaitable
from typing_extensions import ParamSpec

import anyio
import sniffio
import anyio.to_thread

T_Retval = TypeVar("T_Retval")
T_ParamSpec = ParamSpec("T_ParamSpec")


if sys.version_info >= (3, 9):
to_thread = asyncio.to_thread
_asyncio_to_thread = asyncio.to_thread
else:
# backport of https://docs.python.org/3/library/asyncio-task.html#asyncio.to_thread
# for Python 3.8 support
async def to_thread(
async def _asyncio_to_thread(
func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs
) -> Any:
"""Asynchronously run function *func* in a separate thread.
Expand All @@ -34,6 +38,17 @@ async def to_thread(
return await loop.run_in_executor(None, func_call)


async def to_thread(
func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs
) -> T_Retval:
if sniffio.current_async_library() == "asyncio":
return await _asyncio_to_thread(func, *args, **kwargs)

return await anyio.to_thread.run_sync(
functools.partial(func, *args, **kwargs),
)


# inspired by `asyncer`, https://github.com/tiangolo/asyncer
def asyncify(function: Callable[T_ParamSpec, T_Retval]) -> Callable[T_ParamSpec, Awaitable[T_Retval]]:
"""
Expand All @@ -50,6 +65,7 @@ def blocking_func(arg1, arg2, kwarg1=None):
# blocking code
return result
result = asyncify(blocking_function)(arg1, arg2, kwarg1=value1)
```
Expand Down
8 changes: 4 additions & 4 deletions src/openai/cli/_api/chat/completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,13 +104,13 @@ def create(args: CLIChatCompletionCreateArgs) -> None:
"stream": False,
}
if args.temperature is not None:
params['temperature'] = args.temperature
params["temperature"] = args.temperature
if args.stop is not None:
params['stop'] = args.stop
params["stop"] = args.stop
if args.top_p is not None:
params['top_p'] = args.top_p
params["top_p"] = args.top_p
if args.n is not None:
params['n'] = args.n
params["n"] = args.n
if args.stream:
params["stream"] = args.stream # type: ignore
if args.max_tokens is not None:
Expand Down
4 changes: 2 additions & 2 deletions src/openai/lib/_parsing/_completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,13 +45,13 @@ def validate_input_tools(
for tool in tools:
if tool["type"] != "function":
raise ValueError(
f'Currently only `function` tool types support auto-parsing; Received `{tool["type"]}`',
f"Currently only `function` tool types support auto-parsing; Received `{tool['type']}`",
)

strict = tool["function"].get("strict")
if strict is not True:
raise ValueError(
f'`{tool["function"]["name"]}` is not strict. Only `strict` function tools can be auto-parsed'
f"`{tool['function']['name']}` is not strict. Only `strict` function tools can be auto-parsed"
)


Expand Down
2 changes: 1 addition & 1 deletion src/openai/resources/chat/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from .completions import (
from .completions.completions import (
Completions,
AsyncCompletions,
CompletionsWithRawResponse,
Expand Down
33 changes: 33 additions & 0 deletions src/openai/resources/chat/completions/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from .messages import (
Messages,
AsyncMessages,
MessagesWithRawResponse,
AsyncMessagesWithRawResponse,
MessagesWithStreamingResponse,
AsyncMessagesWithStreamingResponse,
)
from .completions import (
Completions,
AsyncCompletions,
CompletionsWithRawResponse,
AsyncCompletionsWithRawResponse,
CompletionsWithStreamingResponse,
AsyncCompletionsWithStreamingResponse,
)

__all__ = [
"Messages",
"AsyncMessages",
"MessagesWithRawResponse",
"AsyncMessagesWithRawResponse",
"MessagesWithStreamingResponse",
"AsyncMessagesWithStreamingResponse",
"Completions",
"AsyncCompletions",
"CompletionsWithRawResponse",
"AsyncCompletionsWithRawResponse",
"CompletionsWithStreamingResponse",
"AsyncCompletionsWithStreamingResponse",
]
Loading

0 comments on commit 300f58b

Please sign in to comment.