Skip to content

Commit

Permalink
feat(api): Improve types
Browse files Browse the repository at this point in the history
* Mark required fields as required
* Define enums values for various strings
* fix tool call choice support
  • Loading branch information
Stainless Bot authored and gradenr committed May 21, 2024
1 parent 7e2111c commit e46d3ed
Show file tree
Hide file tree
Showing 13 changed files with 652 additions and 537 deletions.
2 changes: 1 addition & 1 deletion .stats.yml
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
configured_endpoints: 7
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-4643bbeed7059f8301560a9fa93e4196393cf65a0ce9b243f5bc071efadd2748.yml
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-45aff5d28bf67a962a928a38a6cf90ae3034bc824d2e65036d8784643e4995db.yml
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ chat_completion = client.chat.completions.create(
],
model="mixtral-8x7b-32768",
)
print(chat_completion.choices[0].message.content)
print(chat_completion.choices_0.message.content)
```

While you can provide an `api_key` keyword argument,
Expand Down Expand Up @@ -66,7 +66,7 @@ async def main() -> None:
],
model="mixtral-8x7b-32768",
)
print(chat_completion.choices[0].message.content)
print(chat_completion.choices_0.message.content)


asyncio.run(main())
Expand Down
12 changes: 6 additions & 6 deletions src/groq/resources/audio/transcriptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def create(
model: Union[str, Literal["whisper-large-v3"]],
language: str | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
response_format: Literal["json", "text", "verbose_json"] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
Expand Down Expand Up @@ -75,8 +75,8 @@ def create(
segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the
audio language.
response_format: The format of the transcript output, in one of these options: `json`, `text`,
`srt`, `verbose_json`, or `vtt`.
response_format: The format of the transcript output, in one of these options: `json`, `text`, or
`verbose_json`.
temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
output more random, while lower values like 0.2 will make it more focused and
Expand Down Expand Up @@ -147,7 +147,7 @@ async def create(
model: Union[str, Literal["whisper-large-v3"]],
language: str | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
response_format: Literal["json", "text", "verbose_json"] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
Expand Down Expand Up @@ -175,8 +175,8 @@ async def create(
segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the
audio language.
response_format: The format of the transcript output, in one of these options: `json`, `text`,
`srt`, `verbose_json`, or `vtt`.
response_format: The format of the transcript output, in one of these options: `json`, `text`, or
`verbose_json`.
temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
output more random, while lower values like 0.2 will make it more focused and
Expand Down
12 changes: 6 additions & 6 deletions src/groq/resources/audio/translations.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def create(
file: FileTypes,
model: Union[str, Literal["whisper-large-v3"]],
prompt: str | NotGiven = NOT_GIVEN,
response_format: str | NotGiven = NOT_GIVEN,
response_format: Literal["json", "text", "verbose_json"] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
Expand All @@ -68,8 +68,8 @@ def create(
segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in
English.
response_format: The format of the transcript output, in one of these options: `json`, `text`,
`srt`, `verbose_json`, or `vtt`.
response_format: The format of the transcript output, in one of these options: `json`, `text`, or
`verbose_json`.
temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
output more random, while lower values like 0.2 will make it more focused and
Expand Down Expand Up @@ -131,7 +131,7 @@ async def create(
file: FileTypes,
model: Union[str, Literal["whisper-large-v3"]],
prompt: str | NotGiven = NOT_GIVEN,
response_format: str | NotGiven = NOT_GIVEN,
response_format: Literal["json", "text", "verbose_json"] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
Expand All @@ -153,8 +153,8 @@ async def create(
segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in
English.
response_format: The format of the transcript output, in one of these options: `json`, `text`,
`srt`, `verbose_json`, or `vtt`.
response_format: The format of the transcript output, in one of these options: `json`, `text`, or
`verbose_json`.
temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
output more random, while lower values like 0.2 will make it more focused and
Expand Down
Loading

0 comments on commit e46d3ed

Please sign in to comment.