Skip to content

Commit

Permalink
feat(api): update via SDK Studio (#40)
Browse files Browse the repository at this point in the history
  • Loading branch information
stainless-app[bot] authored and stainless-bot committed May 15, 2024
1 parent 53a8b27 commit 7227816
Show file tree
Hide file tree
Showing 9 changed files with 430 additions and 1 deletion.
2 changes: 1 addition & 1 deletion .stats.yml
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
configured_endpoints: 6
configured_endpoints: 7
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-c28de228634e737a173375583a09eef5e0d7fa81fcdf7090d14d194e6ef4fdc5.yml
12 changes: 12 additions & 0 deletions api.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,15 @@
# Embeddings

Types:

```python
from groq.types import EmbeddingCreateResponse
```

Methods:

- <code title="post /openai/v1/embeddings">client.embeddings.<a href="./src/groq/resources/embeddings.py">create</a>(\*\*<a href="src/groq/types/embedding_create_params.py">params</a>) -> <a href="./src/groq/types/embedding_create_response.py">EmbeddingCreateResponse</a></code>

# Chat

## Completions
Expand Down
8 changes: 8 additions & 0 deletions src/groq/_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@


class Groq(SyncAPIClient):
embeddings: resources.EmbeddingsResource
chat: resources.ChatResource
audio: resources.AudioResource
models: resources.ModelsResource
Expand Down Expand Up @@ -106,6 +107,7 @@ def __init__(
_strict_response_validation=_strict_response_validation,
)

self.embeddings = resources.EmbeddingsResource(self)
self.chat = resources.ChatResource(self)
self.audio = resources.AudioResource(self)
self.models = resources.ModelsResource(self)
Expand Down Expand Up @@ -218,6 +220,7 @@ def _make_status_error(


class AsyncGroq(AsyncAPIClient):
embeddings: resources.AsyncEmbeddingsResource
chat: resources.AsyncChatResource
audio: resources.AsyncAudioResource
models: resources.AsyncModelsResource
Expand Down Expand Up @@ -278,6 +281,7 @@ def __init__(
_strict_response_validation=_strict_response_validation,
)

self.embeddings = resources.AsyncEmbeddingsResource(self)
self.chat = resources.AsyncChatResource(self)
self.audio = resources.AsyncAudioResource(self)
self.models = resources.AsyncModelsResource(self)
Expand Down Expand Up @@ -391,27 +395,31 @@ def _make_status_error(

class GroqWithRawResponse:
def __init__(self, client: Groq) -> None:
self.embeddings = resources.EmbeddingsResourceWithRawResponse(client.embeddings)
self.chat = resources.ChatResourceWithRawResponse(client.chat)
self.audio = resources.AudioResourceWithRawResponse(client.audio)
self.models = resources.ModelsResourceWithRawResponse(client.models)


class AsyncGroqWithRawResponse:
def __init__(self, client: AsyncGroq) -> None:
self.embeddings = resources.AsyncEmbeddingsResourceWithRawResponse(client.embeddings)
self.chat = resources.AsyncChatResourceWithRawResponse(client.chat)
self.audio = resources.AsyncAudioResourceWithRawResponse(client.audio)
self.models = resources.AsyncModelsResourceWithRawResponse(client.models)


class GroqWithStreamedResponse:
def __init__(self, client: Groq) -> None:
self.embeddings = resources.EmbeddingsResourceWithStreamingResponse(client.embeddings)
self.chat = resources.ChatResourceWithStreamingResponse(client.chat)
self.audio = resources.AudioResourceWithStreamingResponse(client.audio)
self.models = resources.ModelsResourceWithStreamingResponse(client.models)


class AsyncGroqWithStreamedResponse:
def __init__(self, client: AsyncGroq) -> None:
self.embeddings = resources.AsyncEmbeddingsResourceWithStreamingResponse(client.embeddings)
self.chat = resources.AsyncChatResourceWithStreamingResponse(client.chat)
self.audio = resources.AsyncAudioResourceWithStreamingResponse(client.audio)
self.models = resources.AsyncModelsResourceWithStreamingResponse(client.models)
Expand Down
14 changes: 14 additions & 0 deletions src/groq/resources/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,22 @@
ModelsResourceWithStreamingResponse,
AsyncModelsResourceWithStreamingResponse,
)
from .embeddings import (
EmbeddingsResource,
AsyncEmbeddingsResource,
EmbeddingsResourceWithRawResponse,
AsyncEmbeddingsResourceWithRawResponse,
EmbeddingsResourceWithStreamingResponse,
AsyncEmbeddingsResourceWithStreamingResponse,
)

__all__ = [
"EmbeddingsResource",
"AsyncEmbeddingsResource",
"EmbeddingsResourceWithRawResponse",
"AsyncEmbeddingsResourceWithRawResponse",
"EmbeddingsResourceWithStreamingResponse",
"AsyncEmbeddingsResourceWithStreamingResponse",
"ChatResource",
"AsyncChatResource",
"ChatResourceWithRawResponse",
Expand Down
203 changes: 203 additions & 0 deletions src/groq/resources/embeddings.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,203 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from __future__ import annotations

from typing import List, Union, Optional
from typing_extensions import Literal

import httpx

from ..types import embedding_create_params
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from .._utils import (
maybe_transform,
async_maybe_transform,
)
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
to_raw_response_wrapper,
to_streamed_response_wrapper,
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
from .._base_client import (
make_request_options,
)
from ..types.embedding_create_response import EmbeddingCreateResponse

__all__ = ["EmbeddingsResource", "AsyncEmbeddingsResource"]


class EmbeddingsResource(SyncAPIResource):
@cached_property
def with_raw_response(self) -> EmbeddingsResourceWithRawResponse:
return EmbeddingsResourceWithRawResponse(self)

@cached_property
def with_streaming_response(self) -> EmbeddingsResourceWithStreamingResponse:
return EmbeddingsResourceWithStreamingResponse(self)

def create(
self,
*,
input: Union[str, List[str]],
model: str,
dimensions: int | NotGiven = NOT_GIVEN,
encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN,
user: Optional[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> EmbeddingCreateResponse:
"""
Creates an embedding vector representing the input text.
Args:
input: Input text to embed, encoded as a string or array of tokens. To embed multiple
inputs in a single request, pass an array of strings or array of token arrays.
The input must not exceed the max input tokens for the model, cannot be an empty
string, and any array must be 2048 dimensions or less.
model: ID of the model to use.
dimensions: The number of dimensions to return the embeddings in.
encoding_format: The format to return the embeddings in.
user: A unique identifier representing your end-user, which can help us monitor and
detect abuse.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/openai/v1/embeddings",
body=maybe_transform(
{
"input": input,
"model": model,
"dimensions": dimensions,
"encoding_format": encoding_format,
"user": user,
},
embedding_create_params.EmbeddingCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EmbeddingCreateResponse,
)


class AsyncEmbeddingsResource(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncEmbeddingsResourceWithRawResponse:
return AsyncEmbeddingsResourceWithRawResponse(self)

@cached_property
def with_streaming_response(self) -> AsyncEmbeddingsResourceWithStreamingResponse:
return AsyncEmbeddingsResourceWithStreamingResponse(self)

async def create(
self,
*,
input: Union[str, List[str]],
model: str,
dimensions: int | NotGiven = NOT_GIVEN,
encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN,
user: Optional[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> EmbeddingCreateResponse:
"""
Creates an embedding vector representing the input text.
Args:
input: Input text to embed, encoded as a string or array of tokens. To embed multiple
inputs in a single request, pass an array of strings or array of token arrays.
The input must not exceed the max input tokens for the model, cannot be an empty
string, and any array must be 2048 dimensions or less.
model: ID of the model to use.
dimensions: The number of dimensions to return the embeddings in.
encoding_format: The format to return the embeddings in.
user: A unique identifier representing your end-user, which can help us monitor and
detect abuse.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/openai/v1/embeddings",
body=await async_maybe_transform(
{
"input": input,
"model": model,
"dimensions": dimensions,
"encoding_format": encoding_format,
"user": user,
},
embedding_create_params.EmbeddingCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EmbeddingCreateResponse,
)


class EmbeddingsResourceWithRawResponse:
def __init__(self, embeddings: EmbeddingsResource) -> None:
self._embeddings = embeddings

self.create = to_raw_response_wrapper(
embeddings.create,
)


class AsyncEmbeddingsResourceWithRawResponse:
def __init__(self, embeddings: AsyncEmbeddingsResource) -> None:
self._embeddings = embeddings

self.create = async_to_raw_response_wrapper(
embeddings.create,
)


class EmbeddingsResourceWithStreamingResponse:
def __init__(self, embeddings: EmbeddingsResource) -> None:
self._embeddings = embeddings

self.create = to_streamed_response_wrapper(
embeddings.create,
)


class AsyncEmbeddingsResourceWithStreamingResponse:
def __init__(self, embeddings: AsyncEmbeddingsResource) -> None:
self._embeddings = embeddings

self.create = async_to_streamed_response_wrapper(
embeddings.create,
)
2 changes: 2 additions & 0 deletions src/groq/types/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,5 @@
from .model import Model as Model
from .model_list import ModelList as ModelList
from .translation import Translation as Translation
from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse
33 changes: 33 additions & 0 deletions src/groq/types/embedding_create_params.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from __future__ import annotations

from typing import List, Union, Optional
from typing_extensions import Literal, Required, TypedDict

__all__ = ["EmbeddingCreateParams"]


class EmbeddingCreateParams(TypedDict, total=False):
input: Required[Union[str, List[str]]]
"""Input text to embed, encoded as a string or array of tokens.
To embed multiple inputs in a single request, pass an array of strings or array
of token arrays. The input must not exceed the max input tokens for the model,
cannot be an empty string, and any array must be 2048 dimensions or less.
"""

model: Required[str]
"""ID of the model to use."""

dimensions: int
"""The number of dimensions to return the embeddings in."""

encoding_format: Literal["float", "base64"]
"""The format to return the embeddings in."""

user: Optional[str]
"""
A unique identifier representing your end-user, which can help us monitor and
detect abuse.
"""
45 changes: 45 additions & 0 deletions src/groq/types/embedding_create_response.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import List, Union
from typing_extensions import Literal

from .._models import BaseModel

__all__ = ["EmbeddingCreateResponse", "Data", "Usage"]


class Data(BaseModel):
embedding: Union[List[float], str]
"""The embedding vector, which is a list of floats.
The length of vector depends on the model as listed in the
[embedding guide](/docs/guides/embeddings).
"""

index: int
"""The index of the embedding in the list of embeddings."""

object: Literal["embedding"]
"""The object type, which is always "embedding"."""


class Usage(BaseModel):
prompt_tokens: int
"""The number of tokens used by the prompt."""

total_tokens: int
"""The total number of tokens used by the request."""


class EmbeddingCreateResponse(BaseModel):
data: List[Data]
"""The list of embeddings generated by the model."""

model: str
"""The name of the model used to generate the embedding."""

object: Literal["list"]
"""The object type, which is always "list"."""

usage: Usage
"""The usage information for the request."""
Loading

0 comments on commit 7227816

Please sign in to comment.