Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Changes to generated branch automatically reverted #852

Merged
merged 1 commit into from
May 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
revert custom code applied to generated branch
  • Loading branch information
Stainless Bot committed May 15, 2024
commit ef2d49824b7a4d9d5bdb16e3125df4379ac5c814
2 changes: 2 additions & 0 deletions examples/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
yarn.lock
node_modules
4 changes: 4 additions & 0 deletions examples/.keep
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
File generated from our OpenAPI spec by Stainless.

This directory can be used to store example files demonstrating usage of this SDK.
It is ignored by Stainless code generation and its content (other than this keep file) won't be touched.
41 changes: 41 additions & 0 deletions examples/assistant-stream-raw.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
#!/usr/bin/env -S npm run tsn -T

import OpenAI from 'openai';

const openai = new OpenAI();

async function main() {
const assistant = await openai.beta.assistants.create({
model: 'gpt-4-1106-preview',
name: 'Math Tutor',
instructions: 'You are a personal math tutor. Write and run code to answer math questions.',
});

const thread = await openai.beta.threads.create({
messages: [
{
role: 'user',
content: '"I need to solve the equation `3x + 11 = 14`. Can you help me?"',
},
],
});

const stream = await openai.beta.threads.runs.create(thread.id, {
assistant_id: assistant.id,
additional_instructions: 'Please address the user as Jane Doe. The user has a premium account.',
stream: true,
});

for await (const event of stream) {
if (event.event === 'thread.message.delta') {
const chunk = event.data.delta.content?.[0];
if (chunk && 'text' in chunk && chunk.text.value) {
process.stdout.write(chunk.text.value);
}
}
}

console.log();
}

main();
48 changes: 48 additions & 0 deletions examples/assistant-stream.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
#!/usr/bin/env -S npm run tsn -T

import OpenAI from 'openai';

/**
* Example of streaming a response from an assistant
*/

const openai = new OpenAI();

async function main() {
const assistant = await openai.beta.assistants.create({
model: 'gpt-4-1106-preview',
name: 'Math Tutor',
instructions: 'You are a personal math tutor. Write and run code to answer math questions.',
});

let assistantId = assistant.id;
console.log('Created Assistant with Id: ' + assistantId);

const thread = await openai.beta.threads.create({
messages: [
{
role: 'user',
content: '"I need to solve the equation `3x + 11 = 14`. Can you help me?"',
},
],
});

let threadId = thread.id;
console.log('Created thread with Id: ' + threadId);

const run = openai.beta.threads.runs
.stream(threadId, {
assistant_id: assistantId,
})
//Subscribe to streaming events and log them
.on('event', (event) => console.log(event))
.on('textDelta', (delta, snapshot) => console.log(snapshot))
.on('messageDelta', (delta, snapshot) => console.log(snapshot))
.on('run', (run) => console.log(run))
.on('messageDelta', (delta, snapshot) => console.log(snapshot))
.on('connect', () => console.log());
const result = await run.finalRun();
console.log('Run Result' + result);
}

main();
49 changes: 49 additions & 0 deletions examples/assistants.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
#!/usr/bin/env -S npm run tsn -T

import OpenAI from 'openai';

/**
* Example of polling for a complete response from an assistant
*/

const openai = new OpenAI();

async function main() {
const assistant = await openai.beta.assistants.create({
model: 'gpt-4-1106-preview',
name: 'Math Tutor',
instructions: 'You are a personal math tutor. Write and run code to answer math questions.',
// tools = [],
});

let assistantId = assistant.id;
console.log('Created Assistant with Id: ' + assistantId);

const thread = await openai.beta.threads.create({
messages: [
{
role: 'user',
content: '"I need to solve the equation `3x + 11 = 14`. Can you help me?"',
},
],
});

let threadId = thread.id;
console.log('Created thread with Id: ' + threadId);

const run = await openai.beta.threads.runs.createAndPoll(thread.id, {
assistant_id: assistantId,
additional_instructions: 'Please address the user as Jane Doe. The user has a premium account.',
});

console.log('Run finished with status: ' + run.status);

if (run.status == 'completed') {
const messages = await openai.beta.threads.messages.list(thread.id);
for (const message of messages.getPaginatedItems()) {
console.log(message);
}
}
}

main();
73 changes: 73 additions & 0 deletions examples/audio.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
#!/usr/bin/env -S npm run tsn -T
import 'openai/shims/node';

import OpenAI, { toFile } from 'openai';
import fs from 'fs';
import path from 'path';

// gets API Key from environment variable OPENAI_API_KEY
const openai = new OpenAI();

const speechFile = path.resolve(__dirname, './speech.mp3');

async function main() {
await streamingDemoNode();
await blockingDemo();
}
main();

async function streamingDemoNode() {
const response = await openai.audio.speech.create({
model: 'tts-1',
voice: 'alloy',
input: 'the quick brown chicken jumped over the lazy dogs',
});

const stream = response.body;

console.log(`Streaming response to ${speechFile}`);
await streamToFile(stream, speechFile);
console.log('Finished streaming');
}

async function blockingDemo() {
const mp3 = await openai.audio.speech.create({
model: 'tts-1',
voice: 'alloy',
input: 'the quick brown fox jumped over the lazy dogs',
});

const buffer = Buffer.from(await mp3.arrayBuffer());
await fs.promises.writeFile(speechFile, buffer);

const transcription = await openai.audio.transcriptions.create({
file: await toFile(buffer, 'speech.mp3'),
model: 'whisper-1',
});
console.log(transcription.text);

const translation = await openai.audio.translations.create({
file: await toFile(buffer, 'speech.mp3'),
model: 'whisper-1',
});
console.log(translation.text);
}

/**
* Note, this is Node-specific.
*
* Other runtimes would need a different `fs`,
* and would also use a web ReadableStream,
* which is different from a Node ReadableStream.
*/
async function streamToFile(stream: NodeJS.ReadableStream, path: fs.PathLike) {
return new Promise((resolve, reject) => {
const writeStream = fs.createWriteStream(path).on('error', reject).on('finish', resolve);

// If you don't see a `stream.pipe` method and you're using Node you might need to add `import 'openai/shims/node'` at the top of your entrypoint file.
stream.pipe(writeStream).on('error', (error) => {
writeStream.close();
reject(error);
});
});
}
38 changes: 38 additions & 0 deletions examples/azure.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
#!/usr/bin/env -S npm run tsn -T

import { AzureOpenAI } from 'openai';

// Corresponds to your Model deployment within your OpenAI resource, e.g. gpt-4-1106-preview
// Navigate to the Azure OpenAI Studio to deploy a model.
const deployment = 'gpt-4-1106-preview';

// Make sure to set both AZURE_OPENAI_ENDPOINT with the endpoint of your Azure resource and AZURE_OPENAI_API_KEY with the API key.
// You can find both information in the Azure Portal.
const openai = new AzureOpenAI();

async function main() {
console.log('Non-streaming:');
const result = await openai.chat.completions.create({
model: deployment,
messages: [{ role: 'user', content: 'Say hello!' }],
});
console.log(result.choices[0]!.message?.content);

console.log();
console.log('Streaming:');
const stream = await openai.chat.completions.create({
model: deployment,
messages: [{ role: 'user', content: 'Say hello!' }],
stream: true,
});

for await (const part of stream) {
process.stdout.write(part.choices[0]?.delta?.content ?? '');
}
process.stdout.write('\n');
}

main().catch((err) => {
console.error(err);
process.exit(1);
});
116 changes: 116 additions & 0 deletions examples/chat-params-types.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
#!/usr/bin/env -S npm run tsn -T

import OpenAI from 'openai';
import { Stream } from 'openai/streaming';

// gets API Key from environment variable OPENAI_API_KEY
const openai = new OpenAI();

async function main() {
// ---------------- Explicit non-streaming params ------------

const params: OpenAI.Chat.ChatCompletionCreateParams = {
model: 'gpt-4',
messages: [{ role: 'user', content: 'Say this is a test!' }],
};
const completion = await openai.chat.completions.create(params);
console.log(completion.choices[0]?.message?.content);

// ---------------- Explicit streaming params ----------------

const streamingParams: OpenAI.Chat.ChatCompletionCreateParams = {
model: 'gpt-4',
messages: [{ role: 'user', content: 'Say this is a test!' }],
stream: true,
};

const stream = await openai.chat.completions.create(streamingParams);
for await (const chunk of stream) {
process.stdout.write(chunk.choices[0]?.delta?.content || '');
}
process.stdout.write('\n');

// ---------------- Explicit (non)streaming types ----------------

const params1: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming = {
model: 'gpt-4',
messages: [{ role: 'user', content: 'Say this is a test!' }],
};

const params2: OpenAI.Chat.ChatCompletionCreateParamsStreaming = {
model: 'gpt-4',
messages: [{ role: 'user', content: 'Say this is a test!' }],
stream: true,
};

// ---------------- Implicit params type -------------------

// Note: the `as const` is required here so that TS can properly infer
// the right params type.
//
// If you didn't include it then you'd also get an error saying that
// `role: string` is not assignable.
const streamingParams2 = {
model: 'gpt-4',
messages: [{ role: 'user' as const, content: 'Say this is a test!' }],
stream: true as const,
};

// TS knows this is a Stream instance.
const stream2 = await openai.chat.completions.create(streamingParams2);
for await (const chunk of stream2) {
process.stdout.write(chunk.choices[0]?.delta?.content || '');
}
process.stdout.write('\n');

// Without the `as const` for `stream`.
const streamingParams3 = {
model: 'gpt-4',
messages: [{ role: 'user' as const, content: 'Say this is a test!' }],
stream: true,
};

// TS doesn't know if this is a `Stream` or a direct response
const response = await openai.chat.completions.create(streamingParams3);
if (response instanceof Stream) {
// here TS knows the response type is a `Stream`
} else {
// here TS knows the response type is a `ChatCompletion`
}

// ---------------- Dynamic params type -------------------

// TS knows this is a `Stream`
const streamParamsFromFn = await createCompletionParams(true);
const streamFromFn = await openai.chat.completions.create(streamParamsFromFn);
console.log(streamFromFn);

// TS knows this is a `ChatCompletion`
const paramsFromFn = await createCompletionParams(false);
const completionFromFn = await openai.chat.completions.create(paramsFromFn);
console.log(completionFromFn);
}

// Dynamically construct the params object while retaining whether or
// not the response will be streamed.
export async function createCompletionParams(
stream: true,
): Promise<OpenAI.Chat.ChatCompletionCreateParamsStreaming>;
export async function createCompletionParams(
stream: false,
): Promise<OpenAI.Chat.ChatCompletionCreateParamsNonStreaming>;
export async function createCompletionParams(
stream: boolean,
): Promise<OpenAI.Chat.ChatCompletionCreateParams> {
const params = {
model: 'gpt-3.5-turbo',
messages: [{ role: 'user' as const, content: 'Hello!' }],
stream: stream,
};

// <your logic here>

return params;
}

main();
Loading