diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py
index c18400a9503a7e..a7193094ad7949 100755
--- a/convert-hf-to-gguf.py
+++ b/convert-hf-to-gguf.py
@@ -483,6 +483,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a":
# ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code
res = "jina-v2-code"
+ if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b":
+ # ref: https://huggingface.co/THUDM/glm-4-9b-chat
+ res = "chatglm-bpe"
if res is None:
logger.warning("\n")
@@ -2729,7 +2732,7 @@ def write_tensors(self):
class ChatGLMModel(Model):
model_arch = gguf.MODEL_ARCH.CHATGLM
- def set_vocab(self):
+ def set_vocab_chatglm3(self):
dir_model = self.dir_model
hparams = self.hparams
tokens: list[bytearray] = []
@@ -2789,6 +2792,95 @@ def set_vocab(self):
special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
special_vocab.add_to_gguf(self.gguf_writer)
+ @staticmethod
+ def token_bytes_to_string(b):
+ from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
+ byte_encoder = bytes_to_unicode()
+ return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
+
+ @staticmethod
+ def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
+ parts = [bytes([b]) for b in token]
+ while True:
+ min_idx = None
+ min_rank = None
+ for i, pair in enumerate(zip(parts[:-1], parts[1:])):
+ rank = mergeable_ranks.get(pair[0] + pair[1])
+ if rank is not None and (min_rank is None or rank < min_rank):
+ min_idx = i
+ min_rank = rank
+ if min_rank is None or (max_rank is not None and min_rank >= max_rank):
+ break
+ assert min_idx is not None
+ parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
+ return parts
+
+ def set_vocab(self):
+ if "THUDM/chatglm3-6b" in self.hparams.get("_name_or_path", ""):
+ self.set_vocab_chatglm3()
+ return
+
+ dir_model = self.dir_model
+ hparams = self.hparams
+ tokens: list[str] = []
+ toktypes: list[int] = []
+
+ from transformers import AutoTokenizer
+ tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
+ vocab_size = hparams["padded_vocab_size"]
+ assert max(tokenizer.get_vocab().values()) < vocab_size
+
+ tokpre = self.get_vocab_base_pre(tokenizer)
+
+ merges = []
+ vocab = {}
+ mergeable_ranks = tokenizer.mergeable_ranks
+ for token, rank in mergeable_ranks.items():
+ vocab[ChatGLMModel.token_bytes_to_string(token)] = rank
+ if len(token) == 1:
+ continue
+ merged = ChatGLMModel.bpe(mergeable_ranks, token, max_rank=rank)
+ assert len(merged) >= 2 and len(merged) <= 7
+ merges.append(' '.join(map(ChatGLMModel.token_bytes_to_string, merged)))
+
+ # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined
+ added_vocab = tokenizer.get_added_vocab()
+ reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()}
+
+ for i in range(vocab_size):
+ if i not in reverse_vocab:
+ tokens.append(f"[PAD{i}]")
+ toktypes.append(gguf.TokenType.USER_DEFINED)
+ elif reverse_vocab[i] in added_vocab:
+ tokens.append(reverse_vocab[i])
+ if tokenizer.added_tokens_decoder[i].special:
+ toktypes.append(gguf.TokenType.CONTROL)
+ else:
+ toktypes.append(gguf.TokenType.USER_DEFINED)
+ else:
+ tokens.append(reverse_vocab[i])
+ toktypes.append(gguf.TokenType.NORMAL)
+
+ self.gguf_writer.add_tokenizer_model("gpt2")
+ self.gguf_writer.add_tokenizer_pre(tokpre)
+ self.gguf_writer.add_token_list(tokens)
+ self.gguf_writer.add_token_types(toktypes)
+
+ special_vocab = gguf.SpecialVocab(dir_model, load_merges=False)
+ special_vocab.chat_template = "ChatGLM4"
+ special_vocab.merges = merges
+ # only add special tokens when they were not already loaded from config.json
+
+ special_vocab._set_special_token("bos", tokenizer.get_added_vocab()["<|endoftext|>"])
+
+ special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
+ special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|user|>"])
+ special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|observation|>"])
+ special_vocab._set_special_token("eot", 151336)
+ # this one is usually not in config.json anyway
+ special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
+ special_vocab.add_to_gguf(self.gguf_writer)
+
def set_gguf_parameters(self):
self.gguf_writer.add_name(self.dir_model.name)
n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
diff --git a/examples/server/public/index-new.html b/examples/server/public/index-new.html
index 19c9f643d3027b..beadc26ba5c2bc 100644
--- a/examples/server/public/index-new.html
+++ b/examples/server/public/index-new.html
@@ -717,6 +717,8 @@
+
+
diff --git a/examples/server/public/prompt-formats.js b/examples/server/public/prompt-formats.js
index 73ddb7187eb7ab..55e1aa621ff8c0 100644
--- a/examples/server/public/prompt-formats.js
+++ b/examples/server/public/prompt-formats.js
@@ -327,5 +327,41 @@ export const promptFormats = {
userMsgSuffix: "",
stops: ""
+ },
+
+ // ----------------------------
+
+ "chatglm3": {
+ template: `[gMASK]sop<|system|>\n {{prompt}}{{history}}<|{{char}}|> `,
+
+ historyTemplate: `<|{{name}}|>\n {{message}}`,
+
+ char: "assistant",
+ charMsgPrefix: "",
+ charMsgSuffix: "",
+
+ user: "user",
+ userMsgPrefix: "",
+ userMsgSuffix: "",
+
+ stops: ""
+ },
+
+ // ----------------------------
+
+ "chatglm4": {
+ template: `[gMASK]<|system|>\n{{prompt}}{{history}}<|{{char}}|>`,
+
+ historyTemplate: `<|{{name}}|>\n{{message}}`,
+
+ char: "assistant",
+ charMsgPrefix: "",
+ charMsgSuffix: "",
+
+ user: "user",
+ userMsgPrefix: "",
+ userMsgSuffix: "",
+
+ stops: ""
}
- };
+};
diff --git a/llama.cpp b/llama.cpp
index 7b1090102887e1..a818eff8dc4925 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -4730,6 +4730,7 @@ static void llm_load_hparams(
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
switch (hparams.n_layer) {
case 28: model.type = e_model::MODEL_7B; break;
+ case 40: model.type = e_model::MODEL_8B; break;
default: model.type = e_model::MODEL_UNKNOWN;
}
} break;
@@ -4922,6 +4923,9 @@ static void llm_load_vocab(
} else if (
tokenizer_pre == "poro-chat") {
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_PORO;
+ } else if (
+ tokenizer_pre == "chatglm-bpe") {
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CHATGLM4;
} else {
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
}
@@ -13369,6 +13373,7 @@ struct llm_tokenizer_bpe {
break;
case LLAMA_VOCAB_PRE_TYPE_DBRX:
case LLAMA_VOCAB_PRE_TYPE_SMAUG:
+ case LLAMA_VOCAB_PRE_TYPE_CHATGLM4:
regex_exprs = {
// same as llama3
"(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
@@ -18914,6 +18919,15 @@ static int32_t llama_chat_apply_template_internal(
if (add_ass) {
ss << "<|assistant|>";
}
+ } else if (tmpl.find("ChatGLM4") != std::string::npos) {
+ ss << "[gMASK]" << "";
+ for (auto message : chat) {
+ std::string role(message->role);
+ ss << "<|" << role << "|>" << "\n" << message->content;
+ }
+ if (add_ass) {
+ ss << "<|assistant|>";
+ }
} else {
// template not supported
return -1;
diff --git a/llama.h b/llama.h
index da310ffaf9ad99..b1ff05bd719bee 100644
--- a/llama.h
+++ b/llama.h
@@ -87,6 +87,7 @@ extern "C" {
LLAMA_VOCAB_PRE_TYPE_DBRX = 13,
LLAMA_VOCAB_PRE_TYPE_SMAUG = 14,
LLAMA_VOCAB_PRE_TYPE_PORO = 15,
+ LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 16,
};
// note: these values should be synchronized with ggml_rope
diff --git a/tests/test-chat-template.cpp b/tests/test-chat-template.cpp
index 87f39f10394417..0fe4d29674269a 100644
--- a/tests/test-chat-template.cpp
+++ b/tests/test-chat-template.cpp
@@ -59,6 +59,8 @@ int main(void) {
"{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{- '<|assistant|>\n' -}}{% endif %}",
// ChatGLM3
"{% for message in messages %}{% if loop.first %}[gMASK]sop<|{{ message['role'] }}|>\n {{ message['content'] }}{% else %}<|{{ message['role'] }}|>\n {{ message['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}",
+ // ChatGLM4
+ "ChatGLM4",
};
std::vector expected_output = {
// teknium/OpenHermes-2.5-Mistral-7B
@@ -97,6 +99,8 @@ int main(void) {
"<|system|>\nYou are a helpful assistant<|end|>\n<|user|>\nHello<|end|>\n<|assistant|>\nHi there<|end|>\n<|user|>\nWho are you<|end|>\n<|assistant|>\n I am an assistant <|end|>\n<|user|>\nAnother question<|end|>\n<|assistant|>\n",
// ChatGLM3
"[gMASK]sop<|system|>\n You are a helpful assistant<|user|>\n Hello<|assistant|>\n Hi there<|user|>\n Who are you<|assistant|>\n I am an assistant <|user|>\n Another question<|assistant|>",
+ // ChatGLM4
+ "[gMASK]<|system|>\nYou are a helpful assistant<|user|>\nHello<|assistant|>\nHi there<|user|>\nWho are you<|assistant|>\n I am an assistant <|user|>\nAnother question<|assistant|>",
};
std::vector formatted_chat(1024);
int32_t res;