Skip to content

Commit

Permalink
OpenAI-DotNet 5.0.0 (RageAgainstThePixel#29)
Browse files Browse the repository at this point in the history
- Closes RageAgainstThePixel#28 Added chat endpoint
  • Loading branch information
StephenHodgson committed Mar 2, 2023
1 parent 86e051f commit d106827
Show file tree
Hide file tree
Showing 11 changed files with 362 additions and 9 deletions.
31 changes: 31 additions & 0 deletions OpenAI-DotNet-Tests/TestFixture_03_Chat.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
using System;
using System.Collections.Generic;
using System.Threading.Tasks;
using NUnit.Framework;
using OpenAI.Chat;

namespace OpenAI.Tests
{
internal class TestFixture_03_Chat
{
[Test]
public async Task Test_1_GetChatCompletion()
{
var api = new OpenAIClient(OpenAIAuthentication.LoadFromEnv());
Assert.IsNotNull(api.ChatEndpoint);
var chatPrompts = new List<ChatPrompt>
{
new ChatPrompt("system", "You are a helpful assistant."),
new ChatPrompt("user", "Who won the world series in 2020?"),
new ChatPrompt("assistant", "The Los Angeles Dodgers won the World Series in 2020."),
new ChatPrompt("user", "Where was it played?"),
};
var chatRequest = new ChatRequest(chatPrompts);
var result = await api.ChatEndpoint.GetCompletionAsync(chatRequest);
Assert.IsNotNull(result);
Assert.NotNull(result.Choices);
Assert.NotZero(result.Choices.Count);
Console.WriteLine(result.FirstChoice);
}
}
}
32 changes: 32 additions & 0 deletions OpenAI-DotNet/Chat/ChatEndpoint.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
using System;
using System.Text.Json;
using System.Threading;
using System.Threading.Tasks;

namespace OpenAI.Chat
{
public sealed class ChatEndpoint : BaseEndPoint
{
public ChatEndpoint(OpenAIClient api) : base(api) { }

protected override string GetEndpoint()
=> $"{Api.BaseUrl}chat";

/// <summary>
/// Creates a completion for the chat message
/// </summary>
/// <param name="chatRequest">The chat request which contains the message content.</param>
/// <param name="cancellationToken">Optional, <see cref="CancellationToken"/>.</param>
/// <returns><see cref="ChatResponse"/>.</returns>
public async Task<ChatResponse> GetCompletionAsync(ChatRequest chatRequest, CancellationToken cancellationToken = default)
{
var json = JsonSerializer.Serialize(chatRequest, Api.JsonSerializationOptions);
var payload = json.ToJsonStringContent();
var result = await Api.Client.PostAsync($"{GetEndpoint()}/completions", payload, cancellationToken);
var resultAsString = await result.ReadAsStringAsync(cancellationToken);
return JsonSerializer.Deserialize<ChatResponse>(resultAsString, Api.JsonSerializationOptions);
}

// TODO Streaming endpoints
}
}
23 changes: 23 additions & 0 deletions OpenAI-DotNet/Chat/ChatPrompt.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
using System.Text.Json;
using System.Text.Json.Serialization;

namespace OpenAI.Chat
{
public sealed class ChatPrompt
{
[JsonConstructor]
public ChatPrompt(string role, string content)
{
Role = role;
Content = content;
}

[JsonPropertyName("role")]
public string Role { get; }

[JsonPropertyName("content")]
public string Content { get; }

public override string ToString() => JsonSerializer.Serialize(this);
}
}
131 changes: 131 additions & 0 deletions OpenAI-DotNet/Chat/ChatRequest.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
using OpenAI.Models;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using System.Text.Json.Serialization;

namespace OpenAI.Chat
{
public sealed class ChatRequest
{
public ChatRequest(
IEnumerable<ChatPrompt> messages,
Model model = null,
double? temperature = null,
double? topP = null,
int? number = null,
string[] stops = null,
double? presencePenalty = null,
double? frequencyPenalty = null,
string user = null)
{
const string defaultModel = "gpt-3.5-turbo";
Model = model ?? new Model(defaultModel);

if (!Model.Contains(defaultModel))
{
throw new ArgumentException(nameof(model), $"{Model} not supported");
}

Messages = messages.ToList();
Temperature = temperature;
TopP = topP;
Number = number;
Stops = stops;
PresencePenalty = presencePenalty;
FrequencyPenalty = frequencyPenalty;
User = user;
}

/// <summary>
/// ID of the model to use. Currently, only gpt-3.5-turbo and gpt-3.5-turbo-0301 are supported.
/// </summary>
[JsonPropertyName("model")]
public string Model { get; }

/// <summary>
/// The messages to generate chat completions for, in the chat format.
/// </summary>
[JsonPropertyName("messages")]
public IReadOnlyList<ChatPrompt> Messages { get; }

/// <summary>
/// What sampling temperature to use, between 0 and 2.
/// Higher values like 0.8 will make the output more random, while lower values like 0.2 will
/// make it more focused and deterministic.
/// We generally recommend altering this or top_p but not both.<br/>
/// Defaults to 1
/// </summary>
[JsonPropertyName("temperature")]
public double? Temperature { get; }

/// <summary>
/// An alternative to sampling with temperature, called nucleus sampling,
/// where the model considers the results of the tokens with top_p probability mass.
/// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
/// We generally recommend altering this or temperature but not both.<br/>
/// Defaults to 1
/// </summary>
[JsonPropertyName("top_p")]
public double? TopP { get; }

/// <summary>
/// How many chat completion choices to generate for each input message.<br/>
/// Defaults to 1
/// </summary>
[JsonPropertyName("number")]
public int? Number { get; }

/// <summary>
/// Specifies where the results should stream and be returned at one time.
/// Do not set this yourself, use the appropriate methods on <see cref="ChatEndpoint"/> instead.<br/>
/// Defaults to false
/// </summary>
[JsonPropertyName("stream")]
public bool Stream { get; internal set; }

/// <summary>
/// Up to 4 sequences where the API will stop generating further tokens.
/// </summary>
[JsonPropertyName("stop")]
public string[] Stops { get; }

/// <summary>
/// Number between -2.0 and 2.0.
/// Positive values penalize new tokens based on whether they appear in the text so far,
/// increasing the model's likelihood to talk about new topics.<br/>
/// Defaults to 0
/// </summary>
[JsonPropertyName("presence_penalty")]
public double? PresencePenalty { get; }

/// <summary>
/// Number between -2.0 and 2.0.
/// Positive values penalize new tokens based on their existing frequency in the text so far,
/// decreasing the model's likelihood to repeat the same line verbatim.<br/>
/// Defaults to 0
/// </summary>
[JsonPropertyName("frequency_penalty")]
public double? FrequencyPenalty { get; }

/// <summary>Modify the likelihood of specified tokens appearing in the completion.
/// Accepts a json object that maps tokens(specified by their token ID in the tokenizer)
/// to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits
/// generated by the model prior to sampling.The exact effect will vary per model, but values between
/// -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result
/// in a ban or exclusive selection of the relevant token.<br/>
/// Defaults to null
/// </summary>
[JsonPropertyName("logit_bias")]
public Dictionary<string, double> LogitBias { get; set; }

/// <summary>
/// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
/// </summary>
[JsonPropertyName("user")]
public string User { get; }

public override string ToString() => JsonSerializer.Serialize(this);
}
}
33 changes: 33 additions & 0 deletions OpenAI-DotNet/Chat/ChatResponse.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using System.Text.Json.Serialization;

namespace OpenAI.Chat
{
public sealed class ChatResponse
{
[JsonPropertyName("id")]
public string Id { get; set; }

[JsonPropertyName("object")]
public string Object { get; set; }

[JsonPropertyName("created")]
public int Created { get; set; }

[JsonPropertyName("model")]
public string Model { get; set; }

[JsonPropertyName("usage")]
public Usage Usage { get; set; }

[JsonPropertyName("choices")]
public IReadOnlyList<Choice> Choices { get; set; }

[JsonIgnore]
public Choice FirstChoice => Choices.FirstOrDefault();

public override string ToString() => JsonSerializer.Serialize(this);
}
}
31 changes: 31 additions & 0 deletions OpenAI-DotNet/Chat/Choice.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
using System.Text.Json.Serialization;

namespace OpenAI.Chat
{
public sealed class Choice
{
[JsonConstructor]
public Choice(
Message message,
string finishReason,
int index)
{
Message = message;
FinishReason = finishReason;
Index = index;
}

[JsonPropertyName("message")]
public Message Message { get; }

[JsonPropertyName("finish_reason")]
public string FinishReason { get; }

[JsonPropertyName("index")]
public int Index { get; }

public override string ToString() => Message.ToString();

public static implicit operator string(Choice choice) => choice.ToString();
}
}
26 changes: 26 additions & 0 deletions OpenAI-DotNet/Chat/Message.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
using System.Text.Json.Serialization;

namespace OpenAI.Chat
{
public sealed class Message
{
[JsonConstructor]
public Message(
string role,
string content)
{
Role = role;
Content = content;
}

[JsonPropertyName("role")]
public string Role { get; }

[JsonPropertyName("content")]
public string Content { get; }

public override string ToString() => Content;

public static implicit operator string(Message message) => message.Content;
}
}
14 changes: 10 additions & 4 deletions OpenAI-DotNet/Models/Model.cs
Original file line number Diff line number Diff line change
Expand Up @@ -69,28 +69,34 @@ public Model(string id)
/// </summary>
public static Model Default => Davinci;

/// <summary>
/// Because gpt-3.5-turbo performs at a similar capability to text-davinci-003 but at 10%
/// the price per token, we recommend gpt-3.5-turbo for most use cases.
/// </summary>
public static Model GPT3_5_Turbo { get; } = new Model("gpt-3.5-turbo") { OwnedBy = "openai" };

/// <summary>
/// The most powerful, largest engine available, although the speed is quite slow.<para/>
/// Good at: Complex intent, cause and effect, summarization for audience
/// </summary>
public static Model Davinci => new Model("text-davinci-003") { OwnedBy = "openai" };
public static Model Davinci { get; } = new Model("text-davinci-003") { OwnedBy = "openai" };

/// <summary>
/// The 2nd most powerful engine, a bit faster than <see cref="Davinci"/>, and a bit faster.<para/>
/// Good at: Language translation, complex classification, text sentiment, summarization.
/// </summary>
public static Model Curie => new Model("text-curie-001") { OwnedBy = "openai" };
public static Model Curie { get; } = new Model("text-curie-001") { OwnedBy = "openai" };

/// <summary>
/// The 2nd fastest engine, a bit more powerful than <see cref="Ada"/>, and a bit slower.<para/>
/// Good at: Moderate classification, semantic search classification
/// </summary>
public static Model Babbage => new Model("text-babbage-001") { OwnedBy = "openai" };
public static Model Babbage { get; } = new Model("text-babbage-001") { OwnedBy = "openai" };

/// <summary>
/// The smallest, fastest engine available, although the quality of results may be poor.<para/>
/// Good at: Parsing text, simple classification, address correction, keywords
/// </summary>
public static Model Ada => new Model("text-ada-001") { OwnedBy = "openai" };
public static Model Ada { get; } = new Model("text-ada-001") { OwnedBy = "openai" };
}
}
6 changes: 4 additions & 2 deletions OpenAI-DotNet/OpenAI-DotNet.csproj
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@ Based on OpenAI-API by OKGoDoIt (Roger Pincombe)</Description>
<RepositoryUrl>https://github.com/RageAgainstThePixel/OpenAI-DotNet</RepositoryUrl>
<PackageTags>OpenAI, AI, ML, API, gpt, gpt-3, chatGPT</PackageTags>
<Title>OpenAI API</Title>
<PackageReleaseNotes>Bump version to 4.4.4
<PackageReleaseNotes>Bump version to 5.0.0
- Added Chat endpoint
Bump version to 4.4.4
- ImageEditRequest mask is now optional so long as texture has alpha transparency
- ImageVariationRequest added constructor overload for memory stream image
- Updated AuthInfo parameter validation
Expand All @@ -42,7 +44,7 @@ Bump version to 4.4.0
<AssemblyOriginatorKeyFile>OpenAI-DotNet.pfx</AssemblyOriginatorKeyFile>
<DelaySign>true</DelaySign>
<PackageId>OpenAI-DotNet</PackageId>
<Version>4.4.4</Version>
<Version>5.0.0</Version>
<AssemblyVersion>4.4.4.0</AssemblyVersion>
<FileVersion>4.4.4.0</FileVersion>
<Company>RageAgainstThePixel</Company>
Expand Down
Loading

0 comments on commit d106827

Please sign in to comment.