From 2897eb5ec8023b0122e20d8712897f8e7d9d2a6d Mon Sep 17 00:00:00 2001 From: Tuana Celik Date: Tue, 18 Apr 2023 16:43:42 -0600 Subject: [PATCH 1/2] Adding the Basic Agent Memory tool --- integrations/basic-agent-memory.md | 116 +++++++++++++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 integrations/basic-agent-memory.md diff --git a/integrations/basic-agent-memory.md b/integrations/basic-agent-memory.md new file mode 100644 index 0000000..1b05f3f --- /dev/null +++ b/integrations/basic-agent-memory.md @@ -0,0 +1,116 @@ +--- +layout: integration +name: Basic Agent Memory Tppl +description: A working memory that stores the Agent's conversation memory +authors: + - name: Roland Tannous + socials: + github: rolandtannous + twitter: rolandtannous + - name: Xceron + socials: + github: Xceron +pypi: https://pypi.org/project/haystack-memory/ +repo: https://github.com/rolandtannous/haystack-memory +type: Agent Tool +report_issue: https://github.com/rolandtannous/haystack-memory/issues +--- + +# Basic Haystack Memory Tool + +This library implements a working memory that stores the Agent's conversation memory +and a sensory memory that stores the agent's short-term sensory memory. The working memory can be utilized in-memory or through Redis, with the + +Redis implementation featuring a sliding window. On the other hand, the sensory memory is an in-memory implementation that mimics +a human's brief sensory memory, lasting only for the duration of one interaction.. + +## Installation + +- Python pip: ```pip install --upgrade haystack-memory``` . This method will attempt to install the dependencies (farm-haystack>=1.15.0, redis) +- Python pip (skip dependency installation): Use ```pip install --upgrade haystack-memory --no-deps``` +- Using git: ```pip install git+https://github.com/rolandtannous/haystack-memory.git@main#egg=haystack-memory``` + + +## Usage + +To use memory in your agent, you need three components: +- `MemoryRecallNode`: This node is added to the agent as a tool. It will allow the agent to remember the conversation and make query-memory associations. +- `MemoryUtils`: This class should be used to save the queries and the final agent answers to the conversation memory. +- `chat`: This is a method of the MemoryUtils class. It is used to chat with the agent. It will save the query and the answer to the memory. It also returns the full result for further usage. + +```py +from haystack.agents import Agent, Tool +from haystack.nodes import PromptNode +from haystack_memory.prompt_templates import memory_template +from haystack_memory.memory import MemoryRecallNode +from haystack_memory.utils import MemoryUtils + +# Initialize the memory and the memory tool so the agent can retrieve the memory +working_memory = [] +sensory_memory = [] +memory_node = MemoryRecallNode(memory=working_memory) +memory_tool = Tool(name="Memory", + pipeline_or_node=memory_node, + description="Your memory. Always access this tool first to remember what you have learned.") + +prompt_node = PromptNode(model_name_or_path="text-davinci-003", + api_key="", + max_length=1024, + stop_words=["Observation:"]) +memory_agent = Agent(prompt_node=prompt_node, prompt_template=memory_template) +memory_agent.add_tool(memory_tool) + +# Initialize the utils to save the query and the answers to the memory +memory_utils = MemoryUtils(working_memory=working_memory,sensory_memory=sensory_memory, agent=memory_agent) +result = memory_utils.chat("") +print(working_memory) +``` + +### Redis + +The working memory can also be stored in a redis database which makes it possible to use different memories at the same time to be used with multiple agents. Additionally, it supports a sliding window to only utilize the last k messages. + +```py +from haystack.agents import Agent, Tool +from haystack.nodes import PromptNode +from haystack_memory.memory import RedisMemoryRecallNode +from haystack_memory.prompt_templates import memory_template +from haystack_memory.utils import RedisUtils + +sensory_memory = [] +# Initialize the memory and the memory tool so the agent can retrieve the memory +redis_memory_node = RedisMemoryRecallNode(memory_id="working_memory", + host="localhost", + port=6379, + db=0) +memory_tool = Tool(name="Memory", + pipeline_or_node=redis_memory_node, + description="Your memory. Always access this tool first to remember what you have learned.") +prompt_node = PromptNode(model_name_or_path="text-davinci-003", + api_key="", + max_length=1024, + stop_words=["Observation:"]) +memory_agent = Agent(prompt_node=prompt_node, prompt_template=memory_template) +# Initialize the utils to save the query and the answers to the memory +redis_utils = RedisUtils(agent=memory_agent, + sensory_memory=sensory_memory, + memory_id="working_memory", + host="localhost", + port=6379, + db=0) +result = redis_utils.chat("") +``` + + +## Examples + +Examples can be found in the `examples/` folder. They contain usage examples for both in-memory and Redis memory types. +To open the examples in colab, click on the following links: +- Basic Memory: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/rolandtannous/HaystackAgentBasicMemory/blob/main/examples/example_basic_memory.ipynb) +- Redis Memory: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/rolandtannous/HaystackAgentBasicMemory/blob/main/examples/example_redis_memory.ipynb) + + + + + + From cd28165bbb6128a68ee29b0de8795c46137c12ca Mon Sep 17 00:00:00 2001 From: Tuana Celik Date: Tue, 18 Apr 2023 17:02:54 -0600 Subject: [PATCH 2/2] standardizing the integrations descriptions --- integrations/azure-translator.md | 7 ++++++- integrations/fastrag.md | 4 +++- integrations/lemmatize.md | 10 +++++++--- integrations/qdrant-document-store.md | 2 ++ integrations/veracity.md | 8 ++++++-- 5 files changed, 24 insertions(+), 7 deletions(-) diff --git a/integrations/azure-translator.md b/integrations/azure-translator.md index 55b2af8..c5c2114 100644 --- a/integrations/azure-translator.md +++ b/integrations/azure-translator.md @@ -13,9 +13,14 @@ type: Custom Node report_issue: https://github.com/recrudesce/haystack_translate_node/issues --- -## Include in your pipeline as follows: +# Azure Translate Nodes + +This package allows you to use the Azure translation endpoints to separately translate the query and the answer. It's good for scenarios where your dataset is in a different language to what you expect the user query to be in. This way, you will be able to translate the user query to the your dataset's language, and translate the answer back to the user's language. + +## Installation git clone the repo somewhere, change to the directory, then `pip install '.'` +## Usage Include in your pipeline as follows: ```python diff --git a/integrations/fastrag.md b/integrations/fastrag.md index d3ab8dd..4374c49 100644 --- a/integrations/fastrag.md +++ b/integrations/fastrag.md @@ -12,6 +12,8 @@ type: Custom Node report_issue: https://github.com/IntelLabs/fastRAG/issues --- +# fastRAG + fast**RAG** is a research framework designed to facilitate the building of retrieval augmented generative pipelines. Its main goal is to make retrieval augmented generation as efficient as possible through the use of state-of-the-art and efficient retrieval and generative models. The framework includes a variety of sparse and dense retrieval models, as well as different extractive and generative information processing models. fastRAG aims to provide researchers and developers with a comprehensive tool-set for exploring and advancing the field of retrieval augmented generation. It includes custom nodes such as: @@ -22,7 +24,7 @@ It includes custom nodes such as: - Efficient document vector store (PLAID) - Benchmarking scripts -## 📍 Installation +## Installation Preliminary requirements: diff --git a/integrations/lemmatize.md b/integrations/lemmatize.md index d14f7e2..d942cdd 100644 --- a/integrations/lemmatize.md +++ b/integrations/lemmatize.md @@ -14,7 +14,9 @@ repo: https://github.com/recrudesce/haystack_lemmatize_node type: Custom Node report_issue: https://github.com/recrudesce/haystack_lemmatize_node/issues --- -## What is Lemmatization + +## Lemmatization + Lemmatization is a text pre-processing technique used in natural language processing (NLP) models to break a word down to its root meaning to identify similarities. For example, a lemmatization algorithm would reduce the word better to its root word, or lemme, good. This node can be placed within a pipeline to lemmatize documents returned by a Retriever, prior to adding them as context to a prompt (for a PromptNode or similar). @@ -28,11 +30,13 @@ The process of lemmatizing the document content can potentially reduce the amoun ### After Lemmatization: ![image](https://user-images.githubusercontent.com/6450799/230404246-a8488a57-73bd-4420-9f1b-8a080b84121b.png) -## How to Use +## Installation Clone the repo to a directory, change to that directory, then perform a `pip install '.'`. This will install the package to your Python libraries. -Then, include it in your pipeline - example as follows: +## Usage + +Include it in your pipeline - example as follows: ```python import logging diff --git a/integrations/qdrant-document-store.md b/integrations/qdrant-document-store.md index 062131a..1688c71 100644 --- a/integrations/qdrant-document-store.md +++ b/integrations/qdrant-document-store.md @@ -13,6 +13,8 @@ type: Document Store report_issue: https://github.com/qdrant/qdrant-haystack/issues --- +# Qdrant DocumentStore + An integration of [Qdrant](https://qdrant.tech) vector database with [Haystack](https://haystack.deepset.ai/) by [deepset](https://www.deepset.ai). diff --git a/integrations/veracity.md b/integrations/veracity.md index 2e41fe2..ee38552 100644 --- a/integrations/veracity.md +++ b/integrations/veracity.md @@ -10,15 +10,19 @@ repo: https://github.com/Xceron/haystack_veracity_node type: Custom Node report_issue: https://github.com/Xceron/haystack_veracity_node/issues --- +# Veracity Node + This Node checks whether the given input is correctly answered by the given context (as judged by the given LLM). One example usage is together with [Haystack Memory](https://github.com/rolandtannous/haystack-memory): After the memory is retrieved, the given model checks whether the output is satisfying the question. **Important**: The Node expects the context to be passed into `results`. If the previous node in the pipeline is putting the text somewhere else, use a [Shaper](https://docs.haystack.deepset.ai/docs/shaper) to `rename` the argument to `results`. -## How to Use +## Installation + Clone the repo to a directory, change to that directory, then perform a `pip install '.'`. This will install the package to your Python libraries. -## Example Usage with Haystack Memory +## Usage +### Example Usage with Haystack Memory ```py from haystack_veracity_node.node import VeracityNode from haystack_memory.memory import RedisMemoryRecallNode