Skip to content

Commit

Permalink
🔧 修正(cli.cjs, github-action.cjs, api.ts, config.ts, generateCommitMes…
Browse files Browse the repository at this point in the history
…sageFromGitDiff.ts): OCO_TOKEN_LIMITのデフォルト値を4096に設定しました

OCO_TOKEN_LIMITのデフォルト値を4096に設定しました。また、configファイルと関連する箇所でOCO_TOKEN_LIMITを使用するように変更しました。
※gpt-4やgpt-3.5-turbo-16kの最大トークン数に対応するためにOCO_TOKEN_LIMITでトークン数の最大数を変更できるようにした

🔧 fix(cli.cjs, github-action.cjs, api.ts, config.ts, generateCommitMessageFromGitDiff.ts): Set the default value of OCO_TOKEN_LIMIT to 4096

Set the default value of OCO_TOKEN_LIMIT to 4096. Also updated the relevant parts in the config file and other files to use OCO_TOKEN_LIMIT.
  • Loading branch information
takuya-o committed Jul 22, 2023
1 parent 42a34ca commit 1eeced8
Show file tree
Hide file tree
Showing 7 changed files with 86 additions and 13 deletions.
20 changes: 20 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,24 @@

All the commits in this repo are authored by OpenCommit — look at [the commits](https://github.com/di-sukharev/opencommit/commit/eae7618d575ee8d2e9fff5de56da79d40c4bc5fc) to see how OpenCommit works. Emojis and long commit descriptions are configurable.

## Enhanced from the original upstream [di-sukharev/opencommit](https://github.com/di-sukharev/opencommit)

This repository is forked from the original [di-sukharev/opencommit](https://github.com/di-sukharev/opencommit).
Enhancing functionality adopting early some pull requests I need.

Added features
* Support [Azure OpenAI Service #167](https://github.com/di-sukharev/opencommit/pull/167)
* Support [push config #220](https://github.com/di-sukharev/opencommit/pull/220)
* Add translations to English commit messages
* ~~Add [prefix on commit messages #160](https://github.com/di-sukharev/opencommit/pull/160)~~
* The fixed token limit of 4096 can now be changed with OCO_TOKEN_LIMIT to support gtp-3.5-turbo-16k.
and etc..

You can install it with the following command.
``` shell
$ npm install github:takuya-o/opencommit
```

## Setup OpenCommit as a CLI tool

You can use OpenCommit by simply running it via the CLI like this `oco`. 2 seconds and your staged changes are committed with a meaningful message.
Expand Down Expand Up @@ -172,12 +190,14 @@ You may switch to GPT-4 which performs better, but costs ~x15 times more 🤠

```sh
oco config set OCO_MODEL=gpt-4
oco config set OCO_TOKEN_LIMIT=32768
```

or for as a cheaper option:

```sh
oco config set OCO_MODEL=gpt-3.5-turbo
oco config set OCO_TOKEN_LIMIT=16384
```

Make sure that you spell it `gpt-4` (lowercase) and that you have API access to the 4th model. Even if you have ChatGPT+, that doesn't necessarily mean that you have API access to GPT-4.
Expand Down
23 changes: 20 additions & 3 deletions out/cli.cjs
Original file line number Diff line number Diff line change
Expand Up @@ -18882,7 +18882,6 @@ function getI18nLocal(value) {
var dotenv = __toESM(require_main(), 1);
dotenv.config();
var configCache = null;
var DEFAULT_MODEL_TOKEN_LIMIT = 4096;
var validateConfig = (key, condition, validationMessage) => {
if (!condition) {
ce(
Expand Down Expand Up @@ -18941,6 +18940,22 @@ var configValidators = {
);
return value;
},
["OCO_TOKEN_LIMIT" /* OCO_TOKEN_LIMIT */](value) {
if (typeof value === "string") {
value = parseInt(value);
validateConfig(
"OCO_TOKEN_LIMIT" /* OCO_TOKEN_LIMIT */,
!isNaN(value),
"Must be a number"
);
}
validateConfig(
"OCO_TOKEN_LIMIT" /* OCO_TOKEN_LIMIT */,
value ? typeof value === "number" : void 0,
"Must be a number"
);
return value;
},
["OCO_LANGUAGE" /* OCO_LANGUAGE */](value) {
validateConfig(
"OCO_LANGUAGE" /* OCO_LANGUAGE */,
Expand Down Expand Up @@ -19013,6 +19028,7 @@ var getConfig = () => {
OCO_DESCRIPTION: process.env.OCO_DESCRIPTION === "true" ? true : false,
OCO_EMOJI: process.env.OCO_EMOJI === "true" ? true : false,
OCO_MODEL: process.env.OCO_MODEL || "gpt-3.5-turbo-16k",
OCO_TOKEN_LIMIT: 4096,
OCO_LANGUAGE: process.env.OCO_LANGUAGE || "en",
OCO_DISABLE_GIT_PUSH: Boolean(process.env.OCO_DISABLE_GIT_PUSH),
OCO_MESSAGE_TEMPLATE_PLACEHOLDER: process.env.OCO_MESSAGE_TEMPLATE_PLACEHOLDER
Expand Down Expand Up @@ -23358,6 +23374,7 @@ var maxTokens = config2?.OCO_OPENAI_MAX_TOKENS;
var basePath = config2?.OCO_OPENAI_BASE_PATH;
var apiKey = config2?.OCO_OPENAI_API_KEY;
var apiType = config2?.OCO_OPENAI_API_TYPE || "openai";
var tokenLimit = config2?.OCO_TOKEN_LIMIT || 4096;
var [command, mode] = process.argv.slice(2);
if (!apiKey && command !== "config" && mode !== "set" /* set */) {
ae("opencommit");
Expand Down Expand Up @@ -23410,7 +23427,7 @@ var OpenAi = class {
};
try {
const REQUEST_TOKENS = messages.map((msg) => tokenCount(msg.content || "") + 4).reduce((a2, b5) => a2 + b5, 0);
if (REQUEST_TOKENS > DEFAULT_MODEL_TOKEN_LIMIT - maxTokens) {
if (REQUEST_TOKENS > tokenLimit - maxTokens) {
throw new Error("TOO_MUCH_TOKENS" /* tooMuchTokens */);
}
const { data } = await this.openAI.createChatCompletion(params);
Expand Down Expand Up @@ -23525,7 +23542,7 @@ var INIT_MESSAGES_PROMPT_LENGTH = INIT_MESSAGES_PROMPT.map(
var ADJUSTMENT_FACTOR = 20;
var generateCommitMessageByDiff = async (diff) => {
try {
const MAX_REQUEST_TOKENS = DEFAULT_MODEL_TOKEN_LIMIT - ADJUSTMENT_FACTOR - INIT_MESSAGES_PROMPT_LENGTH - config3?.OCO_OPENAI_MAX_TOKENS;
const MAX_REQUEST_TOKENS = (config3?.OCO_TOKEN_LIMIT || 4096) - ADJUSTMENT_FACTOR - INIT_MESSAGES_PROMPT_LENGTH - config3?.OCO_OPENAI_MAX_TOKENS;
if (tokenCount(diff) >= MAX_REQUEST_TOKENS) {
const commitMessagePromises = getCommitMsgsPromisesFromFileDiffs(
diff,
Expand Down
23 changes: 20 additions & 3 deletions out/github-action.cjs
Original file line number Diff line number Diff line change
Expand Up @@ -29081,7 +29081,6 @@ function getI18nLocal(value) {
var dotenv = __toESM(require_main(), 1);
dotenv.config();
var configCache = null;
var DEFAULT_MODEL_TOKEN_LIMIT = 4096;
var validateConfig = (key, condition, validationMessage) => {
if (!condition) {
ce(
Expand Down Expand Up @@ -29140,6 +29139,22 @@ var configValidators = {
);
return value;
},
["OCO_TOKEN_LIMIT" /* OCO_TOKEN_LIMIT */](value) {
if (typeof value === "string") {
value = parseInt(value);
validateConfig(
"OCO_TOKEN_LIMIT" /* OCO_TOKEN_LIMIT */,
!isNaN(value),
"Must be a number"
);
}
validateConfig(
"OCO_TOKEN_LIMIT" /* OCO_TOKEN_LIMIT */,
value ? typeof value === "number" : void 0,
"Must be a number"
);
return value;
},
["OCO_LANGUAGE" /* OCO_LANGUAGE */](value) {
validateConfig(
"OCO_LANGUAGE" /* OCO_LANGUAGE */,
Expand Down Expand Up @@ -29212,6 +29227,7 @@ var getConfig = () => {
OCO_DESCRIPTION: process.env.OCO_DESCRIPTION === "true" ? true : false,
OCO_EMOJI: process.env.OCO_EMOJI === "true" ? true : false,
OCO_MODEL: process.env.OCO_MODEL || "gpt-3.5-turbo-16k",
OCO_TOKEN_LIMIT: 4096,
OCO_LANGUAGE: process.env.OCO_LANGUAGE || "en",
OCO_DISABLE_GIT_PUSH: Boolean(process.env.OCO_DISABLE_GIT_PUSH),
OCO_MESSAGE_TEMPLATE_PLACEHOLDER: process.env.OCO_MESSAGE_TEMPLATE_PLACEHOLDER
Expand Down Expand Up @@ -30445,6 +30461,7 @@ var maxTokens = config2?.OCO_OPENAI_MAX_TOKENS;
var basePath = config2?.OCO_OPENAI_BASE_PATH;
var apiKey = config2?.OCO_OPENAI_API_KEY;
var apiType = config2?.OCO_OPENAI_API_TYPE || "openai";
var tokenLimit = config2?.OCO_TOKEN_LIMIT || 4096;
var [command, mode] = process.argv.slice(2);
if (!apiKey && command !== "config" && mode !== "set" /* set */) {
ae("opencommit");
Expand Down Expand Up @@ -30497,7 +30514,7 @@ var OpenAi = class {
};
try {
const REQUEST_TOKENS = messages.map((msg) => tokenCount(msg.content || "") + 4).reduce((a2, b) => a2 + b, 0);
if (REQUEST_TOKENS > DEFAULT_MODEL_TOKEN_LIMIT - maxTokens) {
if (REQUEST_TOKENS > tokenLimit - maxTokens) {
throw new Error("TOO_MUCH_TOKENS" /* tooMuchTokens */);
}
const { data } = await this.openAI.createChatCompletion(params);
Expand Down Expand Up @@ -30603,7 +30620,7 @@ var INIT_MESSAGES_PROMPT_LENGTH = INIT_MESSAGES_PROMPT.map(
var ADJUSTMENT_FACTOR = 20;
var generateCommitMessageByDiff = async (diff) => {
try {
const MAX_REQUEST_TOKENS = DEFAULT_MODEL_TOKEN_LIMIT - ADJUSTMENT_FACTOR - INIT_MESSAGES_PROMPT_LENGTH - config3?.OCO_OPENAI_MAX_TOKENS;
const MAX_REQUEST_TOKENS = (config3?.OCO_TOKEN_LIMIT || 4096) - ADJUSTMENT_FACTOR - INIT_MESSAGES_PROMPT_LENGTH - config3?.OCO_OPENAI_MAX_TOKENS;
if (tokenCount(diff) >= MAX_REQUEST_TOKENS) {
const commitMessagePromises = getCommitMsgsPromisesFromFileDiffs(
diff,
Expand Down
6 changes: 3 additions & 3 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions src/api.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import {

import {
CONFIG_MODES,
DEFAULT_MODEL_TOKEN_LIMIT,
getConfig
} from './commands/config';
import { tokenCount } from './utils/tokenCount';
Expand All @@ -22,6 +21,7 @@ let maxTokens = config?.OCO_OPENAI_MAX_TOKENS;
let basePath = config?.OCO_OPENAI_BASE_PATH;
let apiKey = config?.OCO_OPENAI_API_KEY;
let apiType = config?.OCO_OPENAI_API_TYPE || 'openai';
let tokenLimit = config?.OCO_TOKEN_LIMIT || 4096;

const [command, mode] = process.argv.slice(2);

Expand Down Expand Up @@ -87,7 +87,7 @@ class OpenAi {
.map((msg) => tokenCount(msg.content || '') + 4)
.reduce((a, b) => a + b, 0);

if (REQUEST_TOKENS > DEFAULT_MODEL_TOKEN_LIMIT - maxTokens) {
if (REQUEST_TOKENS > tokenLimit - maxTokens) {
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
}

Expand Down
19 changes: 19 additions & 0 deletions src/commands/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ export enum CONFIG_KEYS {
OCO_DESCRIPTION = 'OCO_DESCRIPTION',
OCO_EMOJI = 'OCO_EMOJI',
OCO_MODEL = 'OCO_MODEL',
OCO_TOKEN_LIMIT = 'OCO_TOKEN_LIMIT',
OCO_LANGUAGE = 'OCO_LANGUAGE',
OCO_DISABLE_GIT_PUSH = 'OCO_DISABLE_GIT_PUSH',
OCO_MESSAGE_TEMPLATE_PLACEHOLDER = 'OCO_MESSAGE_TEMPLATE_PLACEHOLDER'
Expand Down Expand Up @@ -111,6 +112,23 @@ export const configValidators = {
);
return value;
},
[CONFIG_KEYS.OCO_TOKEN_LIMIT](value: any) {
// If the value is a string, convert it to a number.
if (typeof value === 'string') {
value = parseInt(value);
validateConfig(
CONFIG_KEYS.OCO_TOKEN_LIMIT,
!isNaN(value),
'Must be a number'
);
}
validateConfig(
CONFIG_KEYS.OCO_TOKEN_LIMIT,
value ? typeof value === 'number' : undefined,
'Must be a number'
);
return value;
},
[CONFIG_KEYS.OCO_LANGUAGE](value: any) {
validateConfig(
CONFIG_KEYS.OCO_LANGUAGE,
Expand Down Expand Up @@ -197,6 +215,7 @@ export const getConfig = (): ConfigType | null => {
OCO_DESCRIPTION: process.env.OCO_DESCRIPTION === 'true' ? true : false,
OCO_EMOJI: process.env.OCO_EMOJI === 'true' ? true : false,
OCO_MODEL: process.env.OCO_MODEL || 'gpt-3.5-turbo-16k',
OCO_TOKEN_LIMIT:4096,
OCO_LANGUAGE: process.env.OCO_LANGUAGE || 'en',
OCO_DISABLE_GIT_PUSH: Boolean(process.env.OCO_DISABLE_GIT_PUSH),
OCO_MESSAGE_TEMPLATE_PLACEHOLDER: process.env.OCO_MESSAGE_TEMPLATE_PLACEHOLDER
Expand Down
4 changes: 2 additions & 2 deletions src/generateCommitMessageFromGitDiff.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ import {
ChatCompletionRequestMessageRoleEnum
} from 'openai';
import { api } from './api';
import { DEFAULT_MODEL_TOKEN_LIMIT, getConfig } from './commands/config';
import { getConfig } from './commands/config';
import { mergeDiffs } from './utils/mergeDiffs';
import { i18n, I18nLocals } from './i18n';
import { tokenCount } from './utils/tokenCount';
Expand Down Expand Up @@ -95,7 +95,7 @@ export const generateCommitMessageByDiff = async (
): Promise<string> => {
try {
const MAX_REQUEST_TOKENS =
DEFAULT_MODEL_TOKEN_LIMIT -
(config?.OCO_TOKEN_LIMIT || 4096) -
ADJUSTMENT_FACTOR -
INIT_MESSAGES_PROMPT_LENGTH -
config?.OCO_OPENAI_MAX_TOKENS;
Expand Down

0 comments on commit 1eeced8

Please sign in to comment.