Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add chat template #1873

Merged
merged 30 commits into from
Jun 3, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
62df55d
initial chat template
KonradSzafer May 8, 2024
f4902e0
tokenizer attribute check
KonradSzafer May 8, 2024
4b790fa
variable rename
KonradSzafer May 8, 2024
cd9e454
interface update
KonradSzafer May 8, 2024
9dfb58a
system instruction
KonradSzafer May 12, 2024
3369f88
system inst default update
KonradSzafer May 14, 2024
921c4d6
fewshot as multiturn
KonradSzafer May 14, 2024
a4bc484
typing update
KonradSzafer May 14, 2024
d01032d
indent update
KonradSzafer May 14, 2024
8a0ce59
added comments
KonradSzafer May 14, 2024
9bd948d
Merge branch 'main' into chat_template
KonradSzafer May 22, 2024
691e0c0
Adding a fewshot in a more readable way
KonradSzafer May 22, 2024
1162e34
linting
KonradSzafer May 22, 2024
c370665
Moved apply chat template to LM
KonradSzafer May 29, 2024
899a544
multiturn alternation fix
KonradSzafer May 30, 2024
f8771d2
cache key update
KonradSzafer May 30, 2024
52df595
apply chat template method fix
KonradSzafer May 30, 2024
615352c
add system prompt hash to cache_key
KonradSzafer May 30, 2024
d7b8fd9
tokenizer name property for cache_key
KonradSzafer May 30, 2024
6f76522
property name fix
KonradSzafer May 30, 2024
4b0c49a
linting backward compatibility fix
KonradSzafer May 31, 2024
dca730a
docs and errors update
KonradSzafer May 31, 2024
a6d3c05
add documentation on adding chat template compatibility to model_guide
haileyschoelkopf May 31, 2024
16715f2
fewshot as multiturn check fix
KonradSzafer May 31, 2024
0ee30f1
Merge pull request #9 from EleutherAI/chat_template
KonradSzafer May 31, 2024
8ed9d77
saving system inst and chat template in results
KonradSzafer Jun 3, 2024
222dae3
eval tracker update
KonradSzafer Jun 3, 2024
2db5209
docs update
KonradSzafer Jun 3, 2024
54ef077
merge main
KonradSzafer Jun 3, 2024
4bcd0ae
Apply suggestions from code review
clefourrier Jun 3, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
initial chat template
  • Loading branch information
KonradSzafer committed May 8, 2024
commit 62df55d12e7016e085de616b7726b5d8dbcc8fc7
7 changes: 7 additions & 0 deletions lm_eval/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,12 @@ def setup_parser() -> argparse.ArgumentParser:
default=False,
help="If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis. Use with --output_path.",
)
parser.add_argument(
"--apply_chat_template",
action="store_true",
default=False,
help="If True, applies the chat template to the prompt",
)
parser.add_argument(
"--show_config",
action="store_true",
Expand Down Expand Up @@ -357,6 +363,7 @@ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:
check_integrity=args.check_integrity,
write_out=args.write_out,
log_samples=args.log_samples,
apply_chat_template=args.apply_chat_template,
gen_kwargs=args.gen_kwargs,
task_manager=task_manager,
verbosity=args.verbosity,
Expand Down
47 changes: 46 additions & 1 deletion lm_eval/api/samplers.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,54 @@ def get_context(self, doc, num_fewshot):
)
+ self.fewshot_delimiter
)

return labeled_examples

def get_chat_context(
self,
doc,
num_fewshot,
chat_history: list = [],
):
# draw an extra fewshot sample if using same split as evaluating on
n_samples = (
num_fewshot + 1
if self.config.fewshot_split == self.config.test_split
else num_fewshot
)
# draw `n_samples` docs from fewshot_docs
fewshotex = self.sample(n_samples)

# get rid of the doc that's the one we're evaluating, if it's in the fewshot
# TODO: should we just stop people from using fewshot from same split as evaluating?
selected_docs = [x for x in fewshotex if x != doc][:num_fewshot]

for doc in selected_docs:
chat_history.append(
{
"role": "user",
"content": self.doc_to_text(doc)
if (
self.config.doc_to_choice is None
or isinstance(self.doc_to_text(doc), str)
)
else self.doc_to_choice(doc)[self.doc_to_text(doc)],
}
)
chat_history.append(
{
"role": "assistant",
"content": str(self.doc_to_target(doc)[0])
if isinstance(self.doc_to_target(doc), list)
else self.doc_to_target(doc)
if (
self.config.doc_to_choice is None
or isinstance(self.doc_to_target(doc), str)
)
else str(self.doc_to_choice(doc)[self.doc_to_target(doc)]),
}
)
return chat_history

def sample(self, n):
"""
Draw `n` samples from our fewshot docs. This method should be overridden by subclasses.
Expand Down
96 changes: 81 additions & 15 deletions lm_eval/api/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,6 +373,8 @@ def build_all_requests(
world_size=None,
cache_requests=False,
rewrite_requests_cache=False,
apply_chat_template=False,
tokenizer=None,
) -> None:
"""Build a set of Instances for a task, and store them in task.instances"""

Expand Down Expand Up @@ -421,6 +423,8 @@ def build_all_requests(
fewshot_ctx = self.fewshot_context(
doc,
0 if self.config.num_fewshot is None else self.config.num_fewshot,
apply_chat_template,
tokenizer,
)

# TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
Expand Down Expand Up @@ -957,41 +961,103 @@ def fewshot_docs(self):
)
return super().fewshot_docs()

def convert_chat_history_to_string(self, chat_history: list, tokenizer=None) -> str:
"""Returns chat history tokenized or concatenated as a string.

:param chat_history: list
The chat history to convert to a string.
:param tokenizer:
Optional tokenizer to use for applying the chat template, if None, the sampler's fewshot_delimiter is used.
"""
if tokenizer:
return tokenizer.apply_chat_template(
chat_history, tokenize=False, add_generation_prompt=True
)
else:
return self.sampler.fewshot_delimiter + "".join(
KonradSzafer marked this conversation as resolved.
Show resolved Hide resolved
f"{s['role']}: {s['content']}" + self.sampler.fewshot_delimiter
for s in chat_history
)

@utils.positional_deprecated
def fewshot_context(self, doc: str, num_fewshot: int) -> str:
def fewshot_context(
self,
doc: str,
num_fewshot: int,
apply_chat_template: bool = False,
tokenizer=None,
) -> str:
"""Returns a fewshot context string that is made up of a prepended description
(if provided), the `num_fewshot` number of examples, and an appended prompt example.

:param doc: str
The document as returned from training_docs, validation_docs, or test_docs.
:param num_fewshot: int
The number of fewshot examples to provide in the returned context string.
:param apply_chat_template: bool
Whether to apply the chat template to the fewshot context.
:param tokenizer:
The tokenizer to use for applying the chat template.
:returns: str
The fewshot context.
"""
if description := self.config.description:
description = utils.apply_template(self.config.description, doc)

chat_history = []
if num_fewshot == 0:
# always prepend the (possibly empty) task description
labeled_examples = description
if apply_chat_template:
chat_history.append({"role": "system", "content": description})
else:
labeled_examples = description
else:
labeled_examples = description + self.sampler.get_context(doc, num_fewshot)
if apply_chat_template:
chat_history = self.sampler.get_chat_context(
doc, num_fewshot, chat_history
)
else:
labeled_examples = description + self.sampler.get_context(
doc, num_fewshot
)

example = self.doc_to_text(doc)
if self.multiple_input:
return labeled_examples
if apply_chat_template:
if not self.multiple_input:
if isinstance(example, str):
chat_history.append({"role": "user", "content": example})
elif isinstance(example, list):
chat_histories_list = []
for ex in example:
chat = deepcopy(chat_history)
chat.append({"role": "user", "content": ex})
chat_histories_list.append(
self.convert_chat_history_to_string(chat, tokenizer)
)
return chat_histories_list
elif isinstance(example, int):
if self.config.doc_to_choice is not None:
choices = self.doc_to_choice(doc)
chat_history.append(
{"role": "user", "content": choices[example]}
)
else:
chat_history.append({"role": "user", "content": str(example)})
return self.convert_chat_history_to_string(chat_history, tokenizer)
else:
if isinstance(example, str):
return labeled_examples + example
elif isinstance(example, list):
return [labeled_examples + ex for ex in example]
elif isinstance(example, int):
if self.config.doc_to_choice is not None:
choices = self.doc_to_choice(doc)
return labeled_examples + choices[example]
else:
return labeled_examples + str(example)
if self.multiple_input:
return labeled_examples
else:
if isinstance(example, str):
return labeled_examples + example
elif isinstance(example, list):
return [labeled_examples + ex for ex in example]
elif isinstance(example, int):
if self.config.doc_to_choice is not None:
choices = self.doc_to_choice(doc)
return labeled_examples + choices[example]
else:
return labeled_examples + str(example)

def apply_filters(self):
"""Iterates over FilterEnsembles and applies them to instances"""
Expand Down
9 changes: 9 additions & 0 deletions lm_eval/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ def simple_evaluate(
check_integrity: bool = False,
write_out: bool = False,
log_samples: bool = True,
apply_chat_template: bool = False,
gen_kwargs: Optional[str] = None,
task_manager: Optional[TaskManager] = None,
verbosity: str = "INFO",
Expand Down Expand Up @@ -99,6 +100,8 @@ def simple_evaluate(
If True, write out an example document and model input for checking task integrity
:param log_samples: bool
If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis
:param apply_chat_template: bool
If True, apply chat template to the prompt
:param gen_kwargs: str
String arguments for model generation
Ignored for all tasks with loglikelihood output_type
Expand Down Expand Up @@ -262,6 +265,7 @@ def simple_evaluate(
bootstrap_iters=bootstrap_iters,
write_out=write_out,
log_samples=log_samples,
apply_chat_template=apply_chat_template,
verbosity=verbosity,
)

Expand Down Expand Up @@ -317,6 +321,7 @@ def evaluate(
bootstrap_iters: Optional[int] = 100000,
write_out: bool = False,
log_samples: bool = True,
apply_chat_template: bool = False,
verbosity: str = "INFO",
):
"""Instantiate and evaluate a model on a list of tasks.
Expand All @@ -333,6 +338,8 @@ def evaluate(
If True, write out an example document and model input for checking task integrity
:param log_samples: bool
If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis
:param apply_chat_template: bool
If True, apply chat template to the prompt
:return
Dictionary of results
"""
Expand Down Expand Up @@ -362,6 +369,8 @@ def evaluate(
world_size=lm.world_size,
cache_requests=cache_requests,
rewrite_requests_cache=rewrite_requests_cache,
apply_chat_template=apply_chat_template,
tokenizer=lm.tokenizer,
)
eval_logger.debug(
f"Task: {task_output.task_name}; number of requests on this rank: {len(task.instances)}"
Expand Down