Skip to content

Commit

Permalink
fix: Revert "Allow passing in required project_name (deepset-ai#445)" (
Browse files Browse the repository at this point in the history
…deepset-ai#467)

* Revert "Allow passing in required project_name (deepset-ai#445)"

This reverts commit ffe86a9.

* fix: Fail early when API params are not correctly passed to the evaluator
doc: Update docstring to mention required API parameters

* Add back dependencies required for integration testing
  • Loading branch information
shadeMe committed Feb 22, 2024
1 parent 66fb26e commit 9996295
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 20 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ def __init__(
api: str = "openai",
api_key: Secret = Secret.from_env_var("OPENAI_API_KEY"),
api_params: Optional[Dict[str, Any]] = None,
project_name: Optional[str] = None,
):
"""
Construct a new UpTrain evaluator.
Expand All @@ -53,16 +52,15 @@ def __init__(
The API key to use.
:param api_params:
Additional parameters to pass to the API client.
:param project_name:
Name of the project required when using UpTrain API.
Required parameters for the UpTrain API: `project_name`.
"""
self.metric = metric if isinstance(metric, UpTrainMetric) else UpTrainMetric.from_str(metric)
self.metric_params = metric_params
self.descriptor = METRIC_DESCRIPTORS[self.metric]
self.api = api
self.api_key = api_key
self.api_params = api_params
self.project_name = project_name

self._init_backend()
expected_inputs = self.descriptor.input_parameters
Expand Down Expand Up @@ -94,7 +92,7 @@ def run(self, **inputs) -> Dict[str, Any]:
:param inputs:
The inputs to evaluate. These are determined by the
metric being calculated. See :class:`UpTrainMetric` for more
metric being calculated. See `UpTrainMetric` for more
information.
:returns:
A nested list of metric results. Each input can have one or more
Expand All @@ -116,7 +114,7 @@ def run(self, **inputs) -> Dict[str, Any]:
if isinstance(self._backend_client, EvalLLM):
results = self._backend_client.evaluate(**eval_args)
else:
results = self._backend_client.log_and_evaluate(**eval_args, project_name=self.project_name)
results = self._backend_client.log_and_evaluate(**eval_args)

OutputConverters.validate_outputs(results)
converted_results = [
Expand Down Expand Up @@ -148,7 +146,6 @@ def check_serializable(obj: Any):
api=self.api,
api_key=self.api_key.to_dict(),
api_params=self.api_params,
project_name=self.project_name,
)

@classmethod
Expand Down Expand Up @@ -197,9 +194,12 @@ def _init_backend(self):
assert api_key is not None
if self.api == "openai":
backend_client = EvalLLM(openai_api_key=api_key)
if self.api_params is not None:
msg = "OpenAI API does not support additional parameters"
raise ValueError(msg)
elif self.api == "uptrain":
if not self.project_name:
msg = "project_name not provided. UpTrain API requires a project name."
if self.api_params is None or "project_name" not in self.api_params:
msg = "UpTrain API requires a 'project_name' API parameter"
raise ValueError(msg)
backend_client = APIClient(uptrain_api_key=api_key)

Expand Down
29 changes: 18 additions & 11 deletions integrations/uptrain/tests/test_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,17 +115,32 @@ def test_evaluator_api(monkeypatch):
UpTrainMetric.RESPONSE_COMPLETENESS,
api="uptrain",
api_key=Secret.from_env_var("UPTRAIN_API_KEY"),
project_name="test",
api_params={"project_name": "test"},
)
assert eval.api == "uptrain"
assert eval.api_key == Secret.from_env_var("UPTRAIN_API_KEY")
assert eval.api_params == {"project_name": "test"}

with pytest.raises(ValueError, match="Unsupported API"):
UpTrainEvaluator(UpTrainMetric.CONTEXT_RELEVANCE, api="cohere")

with pytest.raises(ValueError, match="None of the following authentication environment variables are set"):
UpTrainEvaluator(UpTrainMetric.CONTEXT_RELEVANCE, api="uptrain", api_key=Secret.from_env_var("asd39920qqq"))

with pytest.raises(ValueError, match="does not support additional parameters"):
UpTrainEvaluator(
UpTrainMetric.CONTEXT_RELEVANCE,
api_params={"project_name": "test"},
api="openai",
)

with pytest.raises(ValueError, match="requires .* API parameter"):
UpTrainEvaluator(
UpTrainMetric.CONTEXT_RELEVANCE,
api_params=None,
api="uptrain",
)


def test_evaluator_metric_init_params():
eval = UpTrainEvaluator(
Expand Down Expand Up @@ -158,8 +173,7 @@ def test_evaluator_serde(os_environ_get):
"metric_params": {"method": "rouge"},
"api": "uptrain",
"api_key": Secret.from_env_var("ENV_VAR", strict=False),
"api_params": {"eval_name": "test"},
"project_name": "test",
"api_params": {"project_name": "test"},
}
eval = UpTrainEvaluator(**init_params)
serde_data = eval.to_dict()
Expand All @@ -170,13 +184,12 @@ def test_evaluator_serde(os_environ_get):
assert eval.api_key == new_eval.api_key
assert eval.metric_params == new_eval.metric_params
assert eval.api_params == new_eval.api_params
assert eval.project_name == new_eval.project_name
assert type(new_eval._backend_client) == type(eval._backend_client)
assert type(new_eval._backend_metric) == type(eval._backend_metric)

with pytest.raises(DeserializationError, match=r"cannot serialize the API/metric parameters"):
init_params3 = copy.deepcopy(init_params)
init_params3["api_params"] = {"arg": Unserializable("")}
init_params3["api_params"] = {"arg": Unserializable(""), "project_name": "test"}
eval = UpTrainEvaluator(**init_params3)
eval.to_dict()

Expand Down Expand Up @@ -205,10 +218,8 @@ def test_evaluator_valid_inputs(metric, inputs, params):
init_params = {
"metric": metric,
"metric_params": params,
"api": "uptrain",
"api_key": Secret.from_token("Aaa"),
"api_params": None,
"project_name": "test",
}
eval = UpTrainEvaluator(**init_params)
eval._backend_client = MockBackend([metric])
Expand All @@ -234,10 +245,8 @@ def test_evaluator_invalid_inputs(metric, inputs, error_string, params):
init_params = {
"metric": metric,
"metric_params": params,
"api": "uptrain",
"api_key": Secret.from_token("Aaa"),
"api_params": None,
"project_name": "test",
}
eval = UpTrainEvaluator(**init_params)
eval._backend_client = MockBackend([metric])
Expand Down Expand Up @@ -311,10 +320,8 @@ def test_evaluator_outputs(metric, inputs, expected_outputs, metric_params):
init_params = {
"metric": metric,
"metric_params": metric_params,
"api": "uptrain",
"api_key": Secret.from_token("Aaa"),
"api_params": None,
"project_name": "test",
}
eval = UpTrainEvaluator(**init_params)
eval._backend_client = MockBackend([metric])
Expand Down

0 comments on commit 9996295

Please sign in to comment.