forked from princeton-nlp/SimPO
-
Notifications
You must be signed in to change notification settings - Fork 0
/
generate.py
28 lines (24 loc) · 897 Bytes
/
generate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
import torch
from transformers import pipeline
import json
import warnings
model_id = "princeton-nlp/Llama-3-Instruct-8B-SimPO"
with open('chat_templates.json', 'r') as f:
chat_templates = json.load(f)
if "llama-3" in model_id.lower():
template = chat_templates["llama3"]
elif "mistral-7b-base" in model_id.lower():
template = chat_templates["mistral-base"]
elif "mistral-7b-instruct" in model_id.lower():
template = chat_templates["mistral-instruct"]
else:
warnings.warn("No template set for the given model_id.")
generator = pipeline(
"text-generation",
model=model_id,
model_kwargs={"torch_dtype": torch.bfloat16},
device="cuda",
)
generator.tokenizer.chat_template = template
outputs = generator([{"role": "user", "content": "What's the difference between llamas and alpacas?"}], do_sample=False, max_new_tokens=200)
print(outputs[0]['generated_text'])