Skip to content

Commit

Permalink
✅ Update test fixtures for no empty train streams
Browse files Browse the repository at this point in the history
Signed-off-by: Evaline Ju <[email protected]>
  • Loading branch information
evaline-ju committed Nov 2, 2023
1 parent 414fa1e commit 04dd2a1
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 2 deletions.
12 changes: 10 additions & 2 deletions tests/fixtures/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,11 @@ def causal_lm_train_kwargs():
"base_model": HFAutoCausalLM.bootstrap(
model_name=CAUSAL_LM_MODEL, tokenizer_name=CAUSAL_LM_MODEL
),
"train_stream": caikit.core.data_model.DataStream.from_iterable([]),
"train_stream": caikit.core.data_model.DataStream.from_iterable([
caikit_nlp.data_model.GenerationTrainRecord(
input="@foo what a cute dog!", output="no complaint"
),
]),
"num_epochs": 0,
"tuning_config": caikit_nlp.data_model.TuningConfig(
num_virtual_tokens=8, prompt_tuning_init_text="hello world"
Expand Down Expand Up @@ -149,7 +153,11 @@ def seq2seq_lm_train_kwargs():
"base_model": HFAutoSeq2SeqLM.bootstrap(
model_name=SEQ2SEQ_LM_MODEL, tokenizer_name=SEQ2SEQ_LM_MODEL
),
"train_stream": caikit.core.data_model.DataStream.from_iterable([]),
"train_stream": caikit.core.data_model.DataStream.from_iterable([
caikit_nlp.data_model.GenerationTrainRecord(
input="@foo what a cute dog!", output="no complaint"
),
]),
"num_epochs": 0,
"tuning_config": caikit_nlp.data_model.TuningConfig(
num_virtual_tokens=16, prompt_tuning_init_text="hello world"
Expand Down
13 changes: 13 additions & 0 deletions tests/modules/text_generation/test_peft_prompt_tuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,19 @@ def test_prompt_output_types(causal_lm_train_kwargs):
assert model


def test_error_empty_stream(causal_lm_train_kwargs):
patch_kwargs = {
"num_epochs": 1,
"verbalizer": "Tweet text : {{input}} Label : ",
"train_stream": caikit.core.data_model.DataStream.from_iterable([]),
}
causal_lm_train_kwargs.update(patch_kwargs)
with pytest.raises(ValueError):
caikit_nlp.modules.text_generation.PeftPromptTuning.train(
**causal_lm_train_kwargs
)


### Implementation details
# These tests can probably be removed and tested directly through .save() once
# full seq2seq support is completed and verified.
Expand Down

0 comments on commit 04dd2a1

Please sign in to comment.