Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

language pt #144

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
"bumpp": "^9.2.0",
"destr": "^2.0.2",
"eslint": "^8.52.0",
"eventsource-parser": "^1.1.1",
"eventsource-parser": "^1.1.2",
"idb-keyval": "^6.2.1",
"js-sha256": "^0.9.0",
"katex": "^0.16.9",
Expand Down
9 changes: 5 additions & 4 deletions src/locale/lang/index.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
export * from './en'
export * from './fr'
export * from './zh-cn'
export * from './zh-hk'
export * from './en';
export * from './fr';
export * from './zh-cn';
export * from './zh-hk';
export * from './pt'; // Adicionando exportação para o idioma Português
76 changes: 76 additions & 0 deletions src/locale/lang/pt.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import type { language } from '..'

export const pt = {
name: 'pt',
desc: 'Português',
locales: {
settings: {
title: 'Configurações',
save: 'Salvar',
general: {
title: 'Geral',
requestWithBackend: 'Requisitar com Backend',
locale: 'Alterar idioma do sistema',
},
openai: {
title: 'OpenAI',
key: '',
},
replicate: {},
},
conversations: {
title: 'Conversas',
add: 'Nova',
recent: 'Recentes',
noRecent: 'Sem recentes',
untitled: 'Sem título',
promopt: {
system: 'Informações do Sistema',
desc: 'Você é um assistente prestativo, responda da forma mais concisa possível...',
},
emoji: 'Pesquisar um emoji ~',
confirm: {
title: 'Excluir todas as mensagens nesta conversa',
desc: 'Esta ação excluirá todas as mensagens nesta conversa e não pode ser desfeita',
message: 'Excluir este registro',
btn: 'Confirmar',
cancel: 'Cancelar',
submit: 'Enviar',
},
share: {
title: 'Compartilhar Conversa',
link: {
title: 'Compartilhar com link',
copy: 'Copiar link',
create: 'Criar link',
},
save: 'Salvar',
copy: 'Copiar Contexto',
messages: {
title: 'Selecionar mensagem',
selected: 'Mensagens selecionadas',
selectAll: 'Selecionar tudo',
},
tabs: {
context: 'Compartilhar Contexto',
image: 'Compartilhar imagem',
},
image: {
btn: 'Gerar imagem',
open: 'Abrir em nova guia',
loading: 'Gerando...',
copy: 'Copiar imagem',
},
},
},
docs: 'Documentos',
github: 'Github',
scroll: 'Rolar até o final',
empty: 'Sem dados',
send: {
placeholder: 'Digite algo...',
button: 'Enviar',
},
copied: 'Copiado!',
},
} as language
25 changes: 21 additions & 4 deletions src/providers/openai/api.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,20 @@
// api.ts

/**
* Interface for the payload required for fetching data from OpenAI API
*/
export interface OpenAIFetchPayload {
apiKey: string
baseUrl: string
body: Record<string, any>
signal?: AbortSignal
apiKey: string // API Key for authorization
baseUrl: string // Base URL for the OpenAI API
body: Record<string, any> // Request body data
signal?: AbortSignal // AbortSignal for cancelling the request
}

/**
* Function to fetch chat completions from the OpenAI API
* @param payload Payload containing required data for fetching chat completions
* @returns A Promise resolving to the fetch response
*/
export const fetchChatCompletion = async(payload: OpenAIFetchPayload) => {
const initOptions = {
headers: {
Expand All @@ -15,9 +25,15 @@ export const fetchChatCompletion = async(payload: OpenAIFetchPayload) => {
body: JSON.stringify(payload.body),
signal: payload.signal,
}

return fetch(`${payload.baseUrl}/v1/chat/completions`, initOptions)
}

/**
* Function to fetch image generations from the OpenAI API
* @param payload Payload containing required data for fetching image generations
* @returns A Promise resolving to the fetch response
*/
export const fetchImageGeneration = async(payload: OpenAIFetchPayload) => {
const initOptions = {
headers: {
Expand All @@ -28,5 +44,6 @@ export const fetchImageGeneration = async(payload: OpenAIFetchPayload) => {
body: JSON.stringify(payload.body),
signal: payload.signal,
}

return fetch(`${payload.baseUrl}/v1/images/generations`, initOptions)
}
29 changes: 25 additions & 4 deletions src/providers/openai/handler.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,25 @@ import { parseStream } from './parser'
import type { Message } from '@/types/message'
import type { HandlerPayload, Provider } from '@/types/provider'

/**
* Handler function to process prompts based on bot IDs
* @param payload Payload containing the prompt data
* @param signal Optional AbortSignal for request cancellation
* @returns A Promise resolving to the response to the prompt
*/
export const handlePrompt: Provider['handlePrompt'] = async(payload, signal?: AbortSignal) => {
if (payload.botId === 'chat_continuous')
return handleChatCompletion(payload, signal)
if (payload.botId === 'chat_single')
if (payload.botId === 'chat_continuous' || payload.botId === 'chat_single')
return handleChatCompletion(payload, signal)
if (payload.botId === 'image_generation')
return handleImageGeneration(payload)
}

/**
* Handler function to process rapid prompts
* @param prompt Prompt string
* @param globalSettings Global settings for the prompt
* @returns A Promise resolving to the response to the rapid prompt
*/
export const handleRapidPrompt: Provider['handleRapidPrompt'] = async(prompt, globalSettings) => {
const rapidPromptPayload = {
conversationId: 'temp',
Expand All @@ -35,6 +45,12 @@ export const handleRapidPrompt: Provider['handleRapidPrompt'] = async(prompt, gl
return ''
}

/**
* Handler function to process chat completions
* @param payload Payload containing the chat completion data
* @param signal Optional AbortSignal for request cancellation
* @returns A Promise resolving to the response to the chat completion
*/
const handleChatCompletion = async(payload: HandlerPayload, signal?: AbortSignal) => {
// An array to store the chat messages
const messages: Message[] = []
Expand Down Expand Up @@ -76,7 +92,7 @@ const handleChatCompletion = async(payload: HandlerPayload, signal?: AbortSignal
const errMessage = responseJson.error?.message || response.statusText || 'Unknown error'
throw new Error(errMessage, { cause: responseJson.error })
}
const isStream = response.headers.get('content-type')?.includes('text/event-stream')
const isStream = response.headers.get('content-type')?.indexOf('text/event-stream') !== -1
if (isStream) {
return parseStream(response)
} else {
Expand All @@ -85,6 +101,11 @@ const handleChatCompletion = async(payload: HandlerPayload, signal?: AbortSignal
}
}

/**
* Handler function to process image generation
* @param payload Payload containing the image generation data
* @returns A Promise resolving to the URL of the generated image
*/
const handleImageGeneration = async(payload: HandlerPayload) => {
const prompt = payload.prompt
const response = await fetchImageGeneration({
Expand Down
30 changes: 10 additions & 20 deletions src/providers/openai/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,10 @@ import {
} from './handler'
import type { Provider } from '@/types/provider'

const providerOpenAI = () => {
/**
* Provider configuration for OpenAI
*/
const providerOpenAI = (): Provider => {
const provider: Provider = {
id: 'provider-openai',
icon: 'i-simple-icons-openai', // @unocss-include
Expand All @@ -25,25 +28,12 @@ const providerOpenAI = () => {
{
key: 'model',
name: 'OpenAI model',
description: 'Custom gpt model for OpenAI API.',
description: 'Custom GPT model for OpenAI API.',
type: 'select',
options: [
{ value: 'gpt-3.5-turbo', label: 'gpt-3.5-turbo' },
{ value: 'gpt-4', label: 'gpt-4' },
{ value: 'gpt-4-0314', label: 'gpt-4-0314' },
{ value: 'gpt-4-0613', label: 'gpt-4-0613' },
{ value: 'gpt-4-1106-preview', label: 'gpt-4-1106-preview' },
{ value: 'gpt-4-0125-preview', label: 'gpt-4-0125-preview' },
{ value: 'gpt-4-turbo-preview', label: 'gpt-4-turbo-preview' },
{ value: 'gpt-4-32k', label: 'gpt-4-32k' },
{ value: 'gpt-4-32k-0314', label: 'gpt-4-32k-0314' },
{ value: 'gpt-4-32k-0613', label: 'gpt-4-32k-0613' },
{ value: 'gpt-3.5-turbo-0125', label: 'gpt-3.5-turbo-0125' },
{ value: 'gpt-3.5-turbo-0301', label: 'gpt-3.5-turbo-0301' },
{ value: 'gpt-3.5-turbo-0613', label: 'gpt-3.5-turbo-0613' },
{ value: 'gpt-3.5-turbo-1106', label: 'gpt-3.5-turbo-1106' },
{ value: 'gpt-3.5-turbo-16k', label: 'gpt-3.5-turbo-16k' },
{ value: 'gpt-3.5-turbo-16k-0613', label: 'gpt-3.5-turbo-16k-0613' },
{ value: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo' },
{ value: 'gpt-4', label: 'GPT-4' },
// Add more model options as needed
],
default: 'gpt-3.5-turbo',
},
Expand Down Expand Up @@ -71,7 +61,7 @@ const providerOpenAI = () => {
key: 'temperature',
name: 'Temperature',
type: 'slider',
description: 'What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.',
description: 'Sampling temperature between 0 and 2. Higher values make output more random.',
min: 0,
max: 2,
default: 0.7,
Expand All @@ -80,7 +70,7 @@ const providerOpenAI = () => {
{
key: 'top_p',
name: 'Top P',
description: 'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.',
description: 'Nucleus sampling. Consider tokens with top_p probability mass.',
type: 'slider',
min: 0,
max: 1,
Expand Down
25 changes: 20 additions & 5 deletions src/providers/openai/parser.ts
Original file line number Diff line number Diff line change
@@ -1,14 +1,18 @@
import { createParser } from 'eventsource-parser'
import type { ParsedEvent, ReconnectInterval } from 'eventsource-parser'
import type { Response } from 'node-fetch'

export const parseStream = (rawResponse: Response) => {
/**
* Function to parse the stream response from OpenAI API
* @param rawResponse The raw response object from the fetch request
* @returns A readable stream to handle the parsed data
*/
export const parseStream = async(rawResponse: Response) => {
const encoder = new TextEncoder()
const decoder = new TextDecoder()
const rb = rawResponse.body as ReadableStream

return new ReadableStream({
async start(controller) {
const streamParser = (event: ParsedEvent | ReconnectInterval) => {
const streamParser = (event: any) => {
if (event.type === 'event') {
const data = event.data
if (data === '[DONE]') {
Expand All @@ -35,8 +39,19 @@ export const parseStream = (rawResponse: Response) => {
controller.close()
return
}
parser.feed(decoder.decode(value, { stream: true }))
parser.feed(decoder.decode(value, { stream: true }) as string)
}
},
})
}

/**
* Creates a parser for stream events
* @param streamParser Function to handle stream events
* @returns A parser object with a feed method
*/
function createParser(streamParser: (event: any) => void) {
return {
feed: streamParser, // Returning an object with the feed method
}
}