-
Notifications
You must be signed in to change notification settings - Fork 131
/
token_counter.py
80 lines (70 loc) · 3.2 KB
/
token_counter.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
"""Functions for counting the number of tokens in a message or string."""
from __future__ import annotations
from typing import List
import json
import tiktoken_async
async def count_message_tokens(
messages: List[dict], model: str = "gpt-3.5-turbo-0301"
) -> int:
"""
Returns the number of tokens used by a list of messages.
Args:
messages (list): A list of messages, each of which is a dictionary
containing the role and content of the message.
model (str): The name of the model to use for tokenization.
Defaults to "gpt-3.5-turbo-0301".
Returns:
int: The number of tokens used by the list of messages.
"""
try:
encoding = await tiktoken_async.encoding_for_model(model)
except KeyError:
print("Warning: model not found. Using cl100k_base encoding.")
encoding = await tiktoken_async.get_encoding("cl100k_base")
if model == "gpt-3.5-turbo":
# !Note: gpt-3.5-turbo may change over time.
# Returning num tokens assuming Mgpt-3.5-turbo-0301.")
return await count_message_tokens(messages, model="gpt-3.5-turbo-0301")
elif model == "gpt-4":
# !Note: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.")
return await count_message_tokens(messages, model="gpt-4-0314")
# TODO: OpenAI has not mention how to count tokens for 0613, thus, we use the former method
elif model == "gpt-3.5-turbo-0301" or model == "gpt-3.5-turbo-0613" or model == "gpt-3.5-turbo-16k-0613":
tokens_per_message = (
4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
)
tokens_per_name = -1 # if there's a name, the role is omitted
elif model == "gpt-4-0314":
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(
f"num_tokens_from_messages() is not implemented for model {model}.\n"
" See https://github.com/openai/openai-python/blob/main/chatml.md for"
" information on how messages are converted to tokens."
)
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
if not isinstance(value, str):
# TODO: Since openai does not mentioned how to count tokens of 'funciton_call',
# and only string is countable, thus, if the value is not a `str` (`function_call`
# field of a message), we convert it into json
value = json.dumps(value)
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
async def count_string_tokens(string: str, model_name: str) -> int:
"""
Returns the number of tokens in a text string.
Args:
string (str): The text string.
model_name (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo")
Returns:
int: The number of tokens in the text string.
"""
encoding = await tiktoken_async.encoding_for_model(model_name)
return len(encoding.encode(string))