forked from EleutherAI/gpt-neox
-
Notifications
You must be signed in to change notification settings - Fork 7
/
test_hf_model.py
executable file
·32 lines (26 loc) · 931 Bytes
/
test_hf_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import transformers
model_path = ""
model = transformers.AutoModelForCausalLM.from_pretrained(model_path)
tokenizer_path = "EleutherAI/gpt-neox-20b"
tokenizer = transformers.AutoTokenizer.from_pretrained(tokenizer_path)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.paddding_side = "left"
prompts = [
"The Statue of Liberty was a gift from",
"What is the difference between a list and a tuple in Python?",
"Write a function that checks whether a string is a palindrome or not.\ndef is_palindrome(s):",
"",
"",
]
batch_encoding = tokenizer(prompts, return_tensors="pt", padding=True)
print(f"Generating {len(prompts)} prompts...")
samples = model.generate(
**batch_encoding,
max_new_tokens=64,
temperature=0.4,
do_sample=True,
)
for i, sample in enumerate(samples):
print(f"Prompt: {prompts[i]}")
print(f"⇥ {tokenizer.decode(sample, skip_special_tokens=True)}")
print()