Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[evals] Refactor evals package to expose completion_fn. #515

Merged
merged 23 commits into from
Apr 11, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
d87a056
[evals] Refactor evals package to expose `completion_fn`.
hwchung27 Mar 29, 2023
d9c1395
Add `record_raw_samples`
hwchung27 Apr 2, 2023
a1c6207
Andrew/evals refactor (#579)
andrew-openai Apr 5, 2023
deb29d3
update manifest and pyproject to support fetching data on pip install…
andrew-openai Apr 5, 2023
9b1c350
we need to still use the interop for string/list[dicts] for modelgrad…
andrew-openai Apr 5, 2023
c470d52
refactor simple evals to not use result.prompt (#593)
andrew-openai Apr 5, 2023
b691cfa
Clean up duplicate recordings
hwchung27 Apr 6, 2023
7266049
Replace ModelSpecs with CompletionFn (#594)
jwang47 Apr 6, 2023
b2a45cf
Add --registry_path CLI arg (#601)
jwang47 Apr 6, 2023
924d2d4
Andrew/langchain llms (#602)
andrew-openai Apr 7, 2023
4401cce
rm sample freeform, some docs (#603)
andrew-openai Apr 7, 2023
013d636
Update completion-fn-protocol.md
andrew-openai Apr 7, 2023
08062bc
some documentation cleanup
joe-at-openai Apr 10, 2023
3367006
some documentation cleanup
joe-at-openai Apr 10, 2023
5e71a76
some documentation cleanup
joe-at-openai Apr 10, 2023
e621b6f
inner monologue example (#610)
andrew-openai Apr 10, 2023
49d17ed
Update README.md
andrew-openai Apr 10, 2023
1bfba77
Update run-evals.md
andrew-openai Apr 10, 2023
b018aff
cleanup
andrew-openai Apr 10, 2023
5222f2c
Merge branch 'main' into evals_refactor_merge_main
andrew-openai Apr 10, 2023
9db703d
get oaieval to run
andrew-openai Apr 10, 2023
02bc2cb
address comments
andrew-openai Apr 11, 2023
50114a5
bump version
andrew-openai Apr 11, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
get oaieval to run
  • Loading branch information
andrew-openai committed Apr 10, 2023
commit 9db703df6b70521a469197988843514eff1337e9
22 changes: 20 additions & 2 deletions evals/cli/oaieval.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ def get_parser() -> argparse.ArgumentParser:
)
parser.add_argument("eval", type=str, help="Name of an eval. See registry.")
parser.add_argument("--extra_eval_params", type=str, default="")
parser.add_argument("--modelspec_extra_options", type=str, default="")
parser.add_argument("--max_samples", type=int, default=None)
parser.add_argument("--cache", action=argparse.BooleanOptionalAction, default=True)
parser.add_argument("--visible", action=argparse.BooleanOptionalAction, default=None)
Expand Down Expand Up @@ -110,6 +109,25 @@ def run(args, registry: Optional[Registry] = None):
run_url = f"{run_spec.run_id}"
logger.info(_purple(f"Run started: {run_url}"))

def parse_extra_eval_params(param_str: Optional[str]) -> Mapping[str, Any]:
"""Parse a string of the form "key1=value1,key2=value2" into a dict."""
if not param_str:
return {}

def to_number(x):
try:
return int(x)
except:
pass
try:
return float(x)
except:
pass
return x

str_dict = dict(kv.split("=") for kv in param_str.split(","))
return {k: to_number(v) for k, v in str_dict.items()}

extra_eval_params = parse_extra_eval_params(args.extra_eval_params)

eval_class = registry.get_class(eval_spec)
Expand Down Expand Up @@ -143,7 +161,7 @@ def main():
logging.getLogger("openai").setLevel(logging.WARN)
if hasattr(openai.error, "set_display_cause"):
openai.error.set_display_cause()
run(args, model_resolver=ModelResolver())
run(args)


if __name__ == "__main__":
Expand Down
4 changes: 1 addition & 3 deletions evals/elsuite/modelgraded/classify.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,6 @@
import evals
import evals.record
from evals import CompletionFn, DummyCompletionFn, OpenAIChatCompletionFn
from evals.elsuite.utils import PromptFn, format_necessary, scrub_formatting_from_prompt

from evals.elsuite.modelgraded.base import ModelGradedSpec
from evals.elsuite.modelgraded.classify_utils import (
CHOICE_KEY,
Expand Down Expand Up @@ -151,7 +149,7 @@ def eval_sample(self, test_sample: dict, rng: Random) -> None:
args = {k: v[1] for k, v in args.items()}
prompt = self.mg.format(**args, **completions, **test_sample)
evaluate = PromptFn(
self.prompt,
prompt,
completion_fn=self.eval_completion_fn,
max_tokens=self.max_tokens,
)
Expand Down
12 changes: 5 additions & 7 deletions evals/registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from evals import OpenAIChatCompletionFn, OpenAICompletionFn
from evals.api import CompletionFn, DummyCompletionFn
from evals.base import BaseEvalSpec, CompletionFnSpec, EvalSetSpec, EvalSpec
from evals.elsuite.modelgraded.base import ModelGradedSpec
from evals.utils.misc import make_object

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -147,10 +148,11 @@ def get_alias():
except TypeError as e:
raise TypeError(f"Error while processing {object} '{name}': {e}")

def get_model(self, name: str) -> ModelSpec:
return self._dereference(name, self._models, "model", ModelSpec)

def get_modelgraded_spec(self, name: str, **kwargs: dict) -> dict[str, Any]:
assert name in self._modelgraded_specs, (
f"Modelgraded spec {name} not found. "
f"Closest matches: {difflib.get_close_matches(name, self._modelgraded_specs.keys(), n=5)}"
)
return self._dereference(
name, self._modelgraded_specs, "modelgraded spec", ModelGradedSpec, **kwargs
)
Expand Down Expand Up @@ -266,9 +268,5 @@ def _evals(self):
def _modelgraded_specs(self):
return self._load_registry([p / "modelgraded" for p in self._registry_paths])

@functools.cached_property
def _models(self):
return self._load_registry([p / "models" for p in self._registry_paths])


registry = Registry()