Skip to content

Commit

Permalink
support glm-4-9b-chat
Browse files Browse the repository at this point in the history
Signed-off-by: XingXing Qiao <[email protected]>
  • Loading branch information
xingxingqiao committed Jun 20, 2024
1 parent 35c6887 commit 3dbeba4
Show file tree
Hide file tree
Showing 6 changed files with 151 additions and 2 deletions.
94 changes: 93 additions & 1 deletion convert-hf-to-gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -483,6 +483,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a":
# ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code
res = "jina-v2-code"
if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b":
# ref: https://huggingface.co/THUDM/glm-4-9b-chat
res = "chatglm-bpe"

if res is None:
logger.warning("\n")
Expand Down Expand Up @@ -2729,7 +2732,7 @@ def write_tensors(self):
class ChatGLMModel(Model):
model_arch = gguf.MODEL_ARCH.CHATGLM

def set_vocab(self):
def set_vocab_chatglm3(self):
dir_model = self.dir_model
hparams = self.hparams
tokens: list[bytearray] = []
Expand Down Expand Up @@ -2789,6 +2792,95 @@ def set_vocab(self):
special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
special_vocab.add_to_gguf(self.gguf_writer)

@staticmethod
def token_bytes_to_string(b):
from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
byte_encoder = bytes_to_unicode()
return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])

@staticmethod
def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
parts = [bytes([b]) for b in token]
while True:
min_idx = None
min_rank = None
for i, pair in enumerate(zip(parts[:-1], parts[1:])):
rank = mergeable_ranks.get(pair[0] + pair[1])
if rank is not None and (min_rank is None or rank < min_rank):
min_idx = i
min_rank = rank
if min_rank is None or (max_rank is not None and min_rank >= max_rank):
break
assert min_idx is not None
parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
return parts

def set_vocab(self):
if "THUDM/chatglm3-6b" in self.hparams.get("_name_or_path", ""):
self.set_vocab_chatglm3()
return

dir_model = self.dir_model
hparams = self.hparams
tokens: list[str] = []
toktypes: list[int] = []

from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
vocab_size = hparams["padded_vocab_size"]
assert max(tokenizer.get_vocab().values()) < vocab_size

tokpre = self.get_vocab_base_pre(tokenizer)

merges = []
vocab = {}
mergeable_ranks = tokenizer.mergeable_ranks
for token, rank in mergeable_ranks.items():
vocab[ChatGLMModel.token_bytes_to_string(token)] = rank
if len(token) == 1:
continue
merged = ChatGLMModel.bpe(mergeable_ranks, token, max_rank=rank)
assert len(merged) >= 2 and len(merged) <= 7
merges.append(' '.join(map(ChatGLMModel.token_bytes_to_string, merged)))

# for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined
added_vocab = tokenizer.get_added_vocab()
reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()}

for i in range(vocab_size):
if i not in reverse_vocab:
tokens.append(f"[PAD{i}]")
toktypes.append(gguf.TokenType.USER_DEFINED)
elif reverse_vocab[i] in added_vocab:
tokens.append(reverse_vocab[i])
if tokenizer.added_tokens_decoder[i].special:
toktypes.append(gguf.TokenType.CONTROL)
else:
toktypes.append(gguf.TokenType.USER_DEFINED)
else:
tokens.append(reverse_vocab[i])
toktypes.append(gguf.TokenType.NORMAL)

self.gguf_writer.add_tokenizer_model("gpt2")
self.gguf_writer.add_tokenizer_pre(tokpre)
self.gguf_writer.add_token_list(tokens)
self.gguf_writer.add_token_types(toktypes)

special_vocab = gguf.SpecialVocab(dir_model, load_merges=False)
special_vocab.chat_template = "ChatGLM4"
special_vocab.merges = merges
# only add special tokens when they were not already loaded from config.json

special_vocab._set_special_token("bos", tokenizer.get_added_vocab()["<|endoftext|>"])

special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|user|>"])
special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|observation|>"])
special_vocab._set_special_token("eot", 151336)
# this one is usually not in config.json anyway
special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
special_vocab.add_to_gguf(self.gguf_writer)

def set_gguf_parameters(self):
self.gguf_writer.add_name(self.dir_model.name)
n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
Expand Down
2 changes: 2 additions & 0 deletions examples/server/public/index-new.html
Original file line number Diff line number Diff line change
Expand Up @@ -717,6 +717,8 @@
<option value="vicuna">Tess</option>
<option value="yi34b">Yi-6/9/34B-Chat</option>
<option value="zephyr">Zephyr</option>
<option value="chatglm3">ChatGLM3-6B</option>
<option value="chatglm4">ChatGLM4-9B</option>
<option value=""></option>
</optgroup>
</select>
Expand Down
38 changes: 37 additions & 1 deletion examples/server/public/prompt-formats.js
Original file line number Diff line number Diff line change
Expand Up @@ -327,5 +327,41 @@ export const promptFormats = {
userMsgSuffix: "",

stops: ""
},

// ----------------------------

"chatglm3": {
template: `[gMASK]sop<|system|>\n {{prompt}}{{history}}<|{{char}}|> `,

historyTemplate: `<|{{name}}|>\n {{message}}`,

char: "assistant",
charMsgPrefix: "",
charMsgSuffix: "",

user: "user",
userMsgPrefix: "",
userMsgSuffix: "",

stops: ""
},

// ----------------------------

"chatglm4": {
template: `[gMASK]<sop><|system|>\n{{prompt}}{{history}}<|{{char}}|>`,

historyTemplate: `<|{{name}}|>\n{{message}}`,

char: "assistant",
charMsgPrefix: "",
charMsgSuffix: "",

user: "user",
userMsgPrefix: "",
userMsgSuffix: "",

stops: ""
}
};
};
14 changes: 14 additions & 0 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4730,6 +4730,7 @@ static void llm_load_hparams(
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
switch (hparams.n_layer) {
case 28: model.type = e_model::MODEL_7B; break;
case 40: model.type = e_model::MODEL_8B; break;
default: model.type = e_model::MODEL_UNKNOWN;
}
} break;
Expand Down Expand Up @@ -4922,6 +4923,9 @@ static void llm_load_vocab(
} else if (
tokenizer_pre == "poro-chat") {
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_PORO;
} else if (
tokenizer_pre == "chatglm-bpe") {
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CHATGLM4;
} else {
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
}
Expand Down Expand Up @@ -13369,6 +13373,7 @@ struct llm_tokenizer_bpe {
break;
case LLAMA_VOCAB_PRE_TYPE_DBRX:
case LLAMA_VOCAB_PRE_TYPE_SMAUG:
case LLAMA_VOCAB_PRE_TYPE_CHATGLM4:
regex_exprs = {
// same as llama3
"(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
Expand Down Expand Up @@ -18914,6 +18919,15 @@ static int32_t llama_chat_apply_template_internal(
if (add_ass) {
ss << "<|assistant|>";
}
} else if (tmpl.find("ChatGLM4") != std::string::npos) {
ss << "[gMASK]" << "<sop>";
for (auto message : chat) {
std::string role(message->role);
ss << "<|" << role << "|>" << "\n" << message->content;
}
if (add_ass) {
ss << "<|assistant|>";
}
} else {
// template not supported
return -1;
Expand Down
1 change: 1 addition & 0 deletions llama.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ extern "C" {
LLAMA_VOCAB_PRE_TYPE_DBRX = 13,
LLAMA_VOCAB_PRE_TYPE_SMAUG = 14,
LLAMA_VOCAB_PRE_TYPE_PORO = 15,
LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 16,
};

// note: these values should be synchronized with ggml_rope
Expand Down
4 changes: 4 additions & 0 deletions tests/test-chat-template.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ int main(void) {
"{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{- '<|assistant|>\n' -}}{% endif %}",
// ChatGLM3
"{% for message in messages %}{% if loop.first %}[gMASK]sop<|{{ message['role'] }}|>\n {{ message['content'] }}{% else %}<|{{ message['role'] }}|>\n {{ message['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}",
// ChatGLM4
"ChatGLM4",
};
std::vector<std::string> expected_output = {
// teknium/OpenHermes-2.5-Mistral-7B
Expand Down Expand Up @@ -97,6 +99,8 @@ int main(void) {
"<|system|>\nYou are a helpful assistant<|end|>\n<|user|>\nHello<|end|>\n<|assistant|>\nHi there<|end|>\n<|user|>\nWho are you<|end|>\n<|assistant|>\n I am an assistant <|end|>\n<|user|>\nAnother question<|end|>\n<|assistant|>\n",
// ChatGLM3
"[gMASK]sop<|system|>\n You are a helpful assistant<|user|>\n Hello<|assistant|>\n Hi there<|user|>\n Who are you<|assistant|>\n I am an assistant <|user|>\n Another question<|assistant|>",
// ChatGLM4
"[gMASK]<sop><|system|>\nYou are a helpful assistant<|user|>\nHello<|assistant|>\nHi there<|user|>\nWho are you<|assistant|>\n I am an assistant <|user|>\nAnother question<|assistant|>",
};
std::vector<char> formatted_chat(1024);
int32_t res;
Expand Down

0 comments on commit 3dbeba4

Please sign in to comment.