Skip to content

Commit

Permalink
regenerate from spec 1.0.1
Browse files Browse the repository at this point in the history
  • Loading branch information
jeevnayak committed Jan 25, 2022
1 parent 4ac4160 commit 9d24119
Show file tree
Hide file tree
Showing 3 changed files with 396 additions and 22 deletions.
191 changes: 182 additions & 9 deletions api.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import { BASE_PATH, COLLECTION_FORMATS, RequestArgs, BaseAPI, RequiredError } fr
*/
export interface CreateAnswerRequest {
/**
* ID of the engine to use for completion.
* ID of the engine to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
* @type {string}
* @memberof CreateAnswerRequest
*/
Expand Down Expand Up @@ -64,7 +64,7 @@ export interface CreateAnswerRequest {
*/
'file'?: string | null;
/**
* ID of the engine to use for [Search](/docs/api-reference/searches/create).
* ID of the engine to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
* @type {string}
* @memberof CreateAnswerRequest
*/
Expand Down Expand Up @@ -199,7 +199,7 @@ export interface CreateAnswerResponseSelectedDocuments {
*/
export interface CreateClassificationRequest {
/**
* ID of the engine to use for completion.
* ID of the engine to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
* @type {string}
* @memberof CreateClassificationRequest
*/
Expand Down Expand Up @@ -229,7 +229,7 @@ export interface CreateClassificationRequest {
*/
'labels'?: Array<string> | null;
/**
* ID of the engine to use for [Search](/docs/api-reference/searches/create).
* ID of the engine to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
* @type {string}
* @memberof CreateClassificationRequest
*/
Expand Down Expand Up @@ -345,6 +345,110 @@ export interface CreateClassificationResponseSelectedExamples {
*/
'label'?: string;
}
/**
*
* @export
* @interface CreateCompletionFromModelRequest
*/
export interface CreateCompletionFromModelRequest {
/**
* The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
* @type {string | Array<string> | Array<number> | Array<any>}
* @memberof CreateCompletionFromModelRequest
*/
'prompt'?: string | Array<string> | Array<number> | Array<any> | null;
/**
* The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except `code-davinci-001`, which supports 4096).
* @type {number}
* @memberof CreateCompletionFromModelRequest
*/
'max_tokens'?: number | null;
/**
* What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or `top_p` but not both.
* @type {number}
* @memberof CreateCompletionFromModelRequest
*/
'temperature'?: number | null;
/**
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
* @type {number}
* @memberof CreateCompletionFromModelRequest
*/
'top_p'?: number | null;
/**
* How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
* @type {number}
* @memberof CreateCompletionFromModelRequest
*/
'n'?: number | null;
/**
* Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
* @type {boolean}
* @memberof CreateCompletionFromModelRequest
*/
'stream'?: boolean | null;
/**
* Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact [email protected] and describe your use case.
* @type {number}
* @memberof CreateCompletionFromModelRequest
*/
'logprobs'?: number | null;
/**
* Echo back the prompt in addition to the completion
* @type {boolean}
* @memberof CreateCompletionFromModelRequest
*/
'echo'?: boolean | null;
/**
* Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
* @type {string | Array<string>}
* @memberof CreateCompletionFromModelRequest
*/
'stop'?: string | Array<string> | null;
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
* @type {number}
* @memberof CreateCompletionFromModelRequest
*/
'presence_penalty'?: number | null;
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
* @type {number}
* @memberof CreateCompletionFromModelRequest
*/
'frequency_penalty'?: number | null;
/**
* Generates `best_of` completions server-side and returns the \"best\" (the one with the lowest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
* @type {number}
* @memberof CreateCompletionFromModelRequest
*/
'best_of'?: number | null;
/**
* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.
* @type {object}
* @memberof CreateCompletionFromModelRequest
*/
'logit_bias'?: object | null;
/**
* ID of the model to use for completion.
* @type {string}
* @memberof CreateCompletionFromModelRequest
*/
'model'?: string;
}
/**
*
* @export
* @interface CreateCompletionFromModelRequestAllOf
*/
export interface CreateCompletionFromModelRequestAllOf {
/**
* ID of the model to use for completion.
* @type {string}
* @memberof CreateCompletionFromModelRequestAllOf
*/
'model'?: string;
}
/**
*
* @export
Expand All @@ -358,7 +462,7 @@ export interface CreateCompletionRequest {
*/
'prompt'?: string | Array<string> | Array<number> | Array<any> | null;
/**
* The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except `davinci-codex`, which supports 4096).
* The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except `code-davinci-001`, which supports 4096).
* @type {number}
* @memberof CreateCompletionRequest
*/
Expand Down Expand Up @@ -1208,6 +1312,42 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
options: localVarRequestOptions,
};
},
/**
*
* @summary Creates a completion using a fine-tuned model
* @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createCompletionFromModel: async (createCompletionFromModelRequest: CreateCompletionFromModelRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
// verify required parameter 'createCompletionFromModelRequest' is not null or undefined
assertParamExists('createCompletionFromModel', 'createCompletionFromModelRequest', createCompletionFromModelRequest)
const localVarPath = `/completions`;
// use dummy base URL string because the URL constructor only accepts absolute URLs.
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
let baseOptions;
if (configuration) {
baseOptions = configuration.baseOptions;
}

const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
const localVarHeaderParameter = {} as any;
const localVarQueryParameter = {} as any;



localVarHeaderParameter['Content-Type'] = 'application/json';

setSearchParams(localVarUrlObj, localVarQueryParameter);
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
localVarRequestOptions.data = serializeDataIfNeeded(createCompletionFromModelRequest, localVarRequestOptions, configuration)

return {
url: toPathString(localVarUrlObj),
options: localVarRequestOptions,
};
},
/**
*
* @summary Creates an embedding vector representing the input text.
Expand Down Expand Up @@ -1335,7 +1475,7 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
/**
*
* @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
* @param {string} engineId The ID of the engine to use for this request
* @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
* @param {CreateSearchRequest} createSearchRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
Expand Down Expand Up @@ -1726,6 +1866,17 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
const localVarAxiosArgs = await localVarAxiosParamCreator.createCompletion(engineId, createCompletionRequest, options);
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
},
/**
*
* @summary Creates a completion using a fine-tuned model
* @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
async createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateCompletionResponse>> {
const localVarAxiosArgs = await localVarAxiosParamCreator.createCompletionFromModel(createCompletionFromModelRequest, options);
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
},
/**
*
* @summary Creates an embedding vector representing the input text.
Expand Down Expand Up @@ -1764,7 +1915,7 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
/**
*
* @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
* @param {string} engineId The ID of the engine to use for this request
* @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
* @param {CreateSearchRequest} createSearchRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
Expand Down Expand Up @@ -1921,6 +2072,16 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
createCompletion(engineId: string, createCompletionRequest: CreateCompletionRequest, options?: any): AxiosPromise<CreateCompletionResponse> {
return localVarFp.createCompletion(engineId, createCompletionRequest, options).then((request) => request(axios, basePath));
},
/**
*
* @summary Creates a completion using a fine-tuned model
* @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: any): AxiosPromise<CreateCompletionResponse> {
return localVarFp.createCompletionFromModel(createCompletionFromModelRequest, options).then((request) => request(axios, basePath));
},
/**
*
* @summary Creates an embedding vector representing the input text.
Expand Down Expand Up @@ -1956,7 +2117,7 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
/**
*
* @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
* @param {string} engineId The ID of the engine to use for this request
* @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
* @param {CreateSearchRequest} createSearchRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
Expand Down Expand Up @@ -2111,6 +2272,18 @@ export class OpenAIApi extends BaseAPI {
return OpenAIApiFp(this.configuration).createCompletion(engineId, createCompletionRequest, options).then((request) => request(this.axios, this.basePath));
}

/**
*
* @summary Creates a completion using a fine-tuned model
* @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof OpenAIApi
*/
public createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: AxiosRequestConfig) {
return OpenAIApiFp(this.configuration).createCompletionFromModel(createCompletionFromModelRequest, options).then((request) => request(this.axios, this.basePath));
}

/**
*
* @summary Creates an embedding vector representing the input text.
Expand Down Expand Up @@ -2152,7 +2325,7 @@ export class OpenAIApi extends BaseAPI {
/**
*
* @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
* @param {string} engineId The ID of the engine to use for this request
* @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
* @param {CreateSearchRequest} createSearchRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
Expand Down
Loading

0 comments on commit 9d24119

Please sign in to comment.