From 13fb7c5b5f0c45e0ad44517da0bf4872a24180d6 Mon Sep 17 00:00:00 2001 From: David Berenstein Date: Mon, 9 Oct 2023 17:03:47 +0100 Subject: [PATCH] feat: added on_agent_final_answer-support to Agent callback_manager (#5736) * chore: added on_agent_final_answer-support to Agent callback_manager * chore: format black * run pre-commit to format file * updated release notes * reverted sorted imports --------- Co-authored-by: Stefano Fiorucci <44616784+anakin87@users.noreply.github.com> --- haystack/agents/base.py | 12 ++++++++++-- ..._final_answer_to_agent_base-7798ea8de2f43af0.yaml | 4 ++++ 2 files changed, 14 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/add-on_agent_final_answer_to_agent_base-7798ea8de2f43af0.yaml diff --git a/haystack/agents/base.py b/haystack/agents/base.py index 53585a9267..da6d1d61ba 100644 --- a/haystack/agents/base.py +++ b/haystack/agents/base.py @@ -244,7 +244,9 @@ def __init__( self.max_steps = max_steps self.tm = tools_manager or ToolsManager() self.memory = memory or NoMemory() - self.callback_manager = Events(("on_agent_start", "on_agent_step", "on_agent_finish", "on_new_token")) + self.callback_manager = Events( + ("on_agent_start", "on_agent_step", "on_agent_finish", "on_agent_final_answer", "on_new_token") + ) self.prompt_node = prompt_node prompt_template = prompt_template or prompt_node.default_prompt_template or "zero-shot-react" resolved_prompt_template = prompt_node.get_prompt_template(prompt_template) @@ -290,8 +292,12 @@ def on_agent_start(**kwargs: Any) -> None: agent_name = kwargs.pop("name", "react") print_text(f"\nAgent {agent_name} started with {kwargs}\n") + def on_agent_final_answer(final_answer: Dict[str, Any], **kwargs: Any) -> None: + pass + self.tm.callback_manager.on_tool_finish += on_tool_finish self.callback_manager.on_agent_start += on_agent_start + self.callback_manager.on_agent_final_answer += on_agent_final_answer if streaming: self.callback_manager.on_new_token += lambda token, **kwargs: print_text(token, color=agent_color) @@ -359,7 +365,9 @@ def run( agent_step = self._step(query, agent_step, params) finally: self.callback_manager.on_agent_finish(agent_step) - return agent_step.final_answer(query=query) + final_answer = agent_step.final_answer(query=query) + self.callback_manager.on_agent_final_answer(final_answer) + return final_answer def _step(self, query: str, current_step: AgentStep, params: Optional[dict] = None): # plan next step using the LLM diff --git a/releasenotes/notes/add-on_agent_final_answer_to_agent_base-7798ea8de2f43af0.yaml b/releasenotes/notes/add-on_agent_final_answer_to_agent_base-7798ea8de2f43af0.yaml new file mode 100644 index 0000000000..347fe8a0be --- /dev/null +++ b/releasenotes/notes/add-on_agent_final_answer_to_agent_base-7798ea8de2f43af0.yaml @@ -0,0 +1,4 @@ +--- +enhancements: + - | + added support for using `on_final_answer` trough `Agent` `callback_manager`