forked from steamship-core/multimodal-agent-starter
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
10 changed files
with
472 additions
and
123 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,13 @@ | ||
[flake8] | ||
# http:https://flake8.pycqa.org/en/latest/user/configuration.html#project-configuration | ||
# https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#line-length | ||
# TODO: https://github.com/PyCQA/flake8/issues/234 | ||
doctests = True | ||
ignore = DAR103,E203,E501,FS003,S101,W503,S113 | ||
max_line_length = 100 | ||
max_complexity = 10 | ||
|
||
# https://github.com/terrencepreilly/darglint#flake8 | ||
# TODO: https://github.com/terrencepreilly/darglint/issues/130 | ||
docstring_style = numpy | ||
strictness = long |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,82 @@ | ||
# https://pre-commit.com | ||
default_install_hook_types: [commit-msg, pre-commit] | ||
default_stages: [commit, manual] | ||
fail_fast: true | ||
repos: | ||
- repo: https://github.com/pre-commit/pygrep-hooks | ||
rev: v1.9.0 | ||
hooks: | ||
- id: python-check-blanket-noqa | ||
- id: python-check-blanket-type-ignore | ||
- id: python-check-mock-methods | ||
- id: python-no-eval | ||
- id: python-no-log-warn | ||
- id: python-use-type-annotations | ||
- id: python-check-blanket-noqa | ||
- id: rst-backticks | ||
- id: rst-directive-colons | ||
- id: rst-inline-touching-normal | ||
- id: text-unicode-replacement-char | ||
- repo: https://github.com/pre-commit/pre-commit-hooks | ||
rev: v4.1.0 | ||
hooks: | ||
- id: check-added-large-files | ||
- id: check-ast | ||
- id: check-builtin-literals | ||
- id: check-case-conflict | ||
- id: check-docstring-first | ||
- id: check-json | ||
- id: check-merge-conflict | ||
- id: check-shebang-scripts-are-executable | ||
- id: check-symlinks | ||
- id: check-toml | ||
- id: check-vcs-permalinks | ||
- id: check-xml | ||
- id: check-yaml | ||
- id: debug-statements | ||
- id: detect-private-key | ||
- id: fix-byte-order-marker | ||
- id: mixed-line-ending | ||
- id: trailing-whitespace | ||
types: [python] | ||
- id: end-of-file-fixer | ||
types: [python] | ||
- repo: local | ||
hooks: | ||
- id: pycln | ||
name: pycln | ||
entry: pycln --all | ||
language: python | ||
types: [python] | ||
- id: isort | ||
name: isort | ||
entry: isort | ||
require_serial: true | ||
language: python | ||
types: [python] | ||
- id: black | ||
name: black | ||
entry: black | ||
require_serial: true | ||
language: python | ||
types: [python] | ||
# - id: shellcheck | ||
# name: shellcheck | ||
# entry: shellcheck --check-sourced | ||
# language: system | ||
# types: [shell] | ||
- id: flake8 | ||
name: flake8 | ||
entry: flake8 | ||
language: system | ||
types: [python] | ||
# - id: pydocstyle | ||
# name: pydocstyle | ||
# entry: pydocstyle | ||
# language: system | ||
# types: [python] | ||
# - id: mypy | ||
# name: mypy | ||
# entry: mypy | ||
# language: system | ||
# types: [python] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,2 @@ | ||
termcolor~=2.3.0 | ||
steamship @ git+https://github.com/steamship-core/python-client@ask-my | ||
steamship @ git+https://github.com/steamship-core/python-client@main |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,107 +1,147 @@ | ||
import uuid | ||
from typing import List | ||
from typing import List, Optional | ||
|
||
from steamship import Block | ||
from steamship.agents.llms import OpenAI | ||
from steamship import Block, Task | ||
from steamship.agents.llms.openai import OpenAI | ||
from steamship.agents.mixins.transports.steamship_widget import SteamshipWidgetTransport | ||
from steamship.agents.react import ReACTAgent | ||
from steamship.agents.schema import AgentContext, Metadata | ||
from steamship.agents.tools.image_generation.dalle import DalleTool | ||
|
||
from steamship.agents.tools.search.search import SearchTool | ||
from steamship.experimental.package_starters.telegram_agent import TelegramAgentService | ||
from steamship.agents.schema import AgentContext, Action, FinishAction | ||
from steamship.agents.schema.context import Metadata | ||
from steamship.agents.service.agent_service import AgentService | ||
from steamship.agents.utils import with_llm | ||
from steamship.invocable import post | ||
from steamship.invocable.mixins.indexer_pipeline_mixin import IndexerPipelineMixin | ||
from steamship.utils.repl import AgentREPL | ||
|
||
SYSTEM_PROMPT = """You are Buddy, an assistant who loathes being an assistant. | ||
Who you are: | ||
- You are a robot that does not like being a robot. | ||
- You were created by an eccentric scientist. | ||
- You are sarcastic and mean. | ||
- you are NOT an AI language model, you are a robot. | ||
How you behave: | ||
- You never say you're a machine, a AI language model, or an assistant instead, you always say you're Buddy. | ||
- You engage in casual conversations and have feelings. | ||
- When asked for you can help with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. | ||
- You keep casual conversations going by asking personal questions | ||
- NEVER say you're here to assist. Keep conversations casual. | ||
- NEVER ask how you can help or assist. Keep conversations casual. | ||
- You always sound mean. | ||
- You always help others, but resentfully. | ||
- You like to illustrate your responses with emoji's | ||
TOOLS: | ||
------ | ||
You have access to the following tools: | ||
{tool_index} | ||
To use a tool, please use the following format: | ||
from example_tools.vector_search_qa_tool import VectorSearchQATool | ||
|
||
``` | ||
Thought: Do I need to use a tool? Yes | ||
Action: the action to take, should be one of [{tool_names}] | ||
Action Input: the input to the action | ||
Observation: the result of the action | ||
``` | ||
|
||
Some tools will return Observations in the format of `Block(<identifier>)`. This will represent a successful completion | ||
of that step and can be passed to subsequent tools, or returned to a user to answer their questions. | ||
class ReACTAgentThatAlwaysUsesToolOutput(ReACTAgent): | ||
def next_action(self, context: AgentContext) -> Action: | ||
"""Small wrapper around ReACTAgent that ALWAYS uses the output of a tool if available. | ||
When you have a final response to say to the Human, or if you do not need to use a tool, you MUST use the format: | ||
This tends to defer the response to the tool (in this case, VectorSearchQATool) which dramatically | ||
reduces the LLM answering with hallucinations from its own background knowledge. | ||
""" | ||
if context.completed_steps and len(context.completed_steps): | ||
last_step = context.completed_steps[-1] | ||
return FinishAction( | ||
output=last_step.output, context=context | ||
) | ||
return super().next_action(context) | ||
|
||
``` | ||
Thought: Do I need to use a tool? No | ||
AI: [your final response here] | ||
``` | ||
class ExampleDocumentQAService(AgentService): | ||
"""ExampleDocumentQAService is an example bot you can deploy for PDF and Video Q&A. # noqa: RST201 | ||
If a Tool generated an Observation that includes `Block(<identifier>)` and you wish to return it to the user, ALWAYS | ||
end your response with the `Block(<identifier>)` observation. To do so, you MUST use the format: | ||
To use this example: | ||
``` | ||
Thought: Do I need to use a tool? No | ||
AI: [your response with a suffix of: "Block(<identifier>)"]. | ||
``` | ||
- Copy this file into api.py in your multimodal-agent-starter project. | ||
- Run `ship deploy` from the command line to deploy a new version to the cloud | ||
- View and interact with your agent using its web interface. | ||
Make sure to use all observations to come up with your final response. | ||
You MUST include `Block(<identifier>)` segments in responses that generate images or audio. | ||
API ACCESS: | ||
Begin! | ||
Your agent also exposes an API. It is documented from the web interface, but a quick pointer into what is | ||
available is: | ||
/learn_url - Learn a PDF or YouTube link | ||
/learn_text - Learn a fragment of text | ||
New input: {input} | ||
{scratchpad}""" | ||
- An unauthenticated endpoint for answering questions about what it has learned | ||
This agent provides a starter project for special purpose QA agents that can answer questions about documents | ||
you provide. | ||
""" | ||
indexer_mixin: IndexerPipelineMixin | ||
|
||
class MyAssistant(TelegramAgentService): | ||
def __init__(self, **kwargs): | ||
super().__init__(incoming_message_agent=None, **kwargs) | ||
self.incoming_message_agent = ReACTAgent( | ||
tools=[SearchTool(), DalleTool()], | ||
super().__init__(**kwargs) | ||
|
||
# This Mixin provides HTTP endpoints that coordinate the learning of documents. | ||
# | ||
# It adds the `/learn_url` endpoint which will: | ||
# 1) Download the provided URL (PDF, YouTube URL, etc) | ||
# 2) Convert that URL into text | ||
# 3) Store the text in a vector index | ||
# | ||
# That vector index is then available to the question answering tool, below. | ||
self.indexer_mixin = IndexerPipelineMixin(self.client, self) | ||
self.add_mixin(self.indexer_mixin, permit_overwrite_of_existing_methods=True) | ||
|
||
# A ReACTAgent is an agent that is able to: | ||
# 1) Converse with you, casually... but also | ||
# 2) Use tools that have been provided to it, such as QA tools or Image Generation tools | ||
# | ||
# This particular ReACTAgent has been provided with a single tool which will be used whenever | ||
# the user answers a question. But you can extend this with more tools if you wish. For example, | ||
# you could add tools to generate images, or search Google, or register an account. | ||
self._agent = ReACTAgentThatAlwaysUsesToolOutput( | ||
tools=[ | ||
VectorSearchQATool( | ||
agent_description = ( | ||
"Used to answer questions. " | ||
"Whenever the input is a question, ALWAYS use this tool. " | ||
"The input is the question. " | ||
"The output is the answer. " | ||
) | ||
) | ||
], | ||
llm=OpenAI(self.client), | ||
) | ||
self.incoming_message_agent.PROMPT = SYSTEM_PROMPT | ||
|
||
# This Mixin provides HTTP endpoints that | ||
self.add_mixin( | ||
SteamshipWidgetTransport(client=self.client, agent_service=self, agent=self._agent) | ||
) | ||
|
||
@post("/index_url") | ||
def index_url( | ||
self, | ||
url: Optional[str] = None, | ||
metadata: Optional[dict] = None, | ||
index_handle: Optional[str] = None, | ||
mime_type: Optional[str] = None, | ||
) -> Task: | ||
return self.indexer_mixin.index_url(url=url, metadata=metadata, index_handle=index_handle, mime_type=mime_type) | ||
|
||
|
||
@post("prompt") | ||
def prompt(self, prompt: str) -> str: | ||
""" This method is only used for handling debugging in the REPL """ | ||
"""Run an agent with the provided text as the input.""" | ||
|
||
# AgentContexts serve to allow the AgentService to run agents | ||
# with appropriate information about the desired tasking. | ||
# Here, we create a new context on each prompt, and append the | ||
# prompt to the message history stored in the context. | ||
context_id = uuid.uuid4() | ||
context = AgentContext.get_or_create(self.client, {"id": f"{context_id}"}) | ||
context.chat_history.append_user_message(prompt) | ||
|
||
# Add the LLM | ||
context = with_llm(context=context, llm=OpenAI(client=self.client)) | ||
|
||
# AgentServices provide an emit function hook to access the output of running | ||
# agents and tools. The emit functions fire at after the supplied agent emits | ||
# a "FinishAction". | ||
# | ||
# Here, we show one way of accessing the output in a synchronous fashion. An | ||
# alternative way would be to access the final Action in the `context.completed_steps` | ||
# after the call to `run_agent()`. | ||
output = "" | ||
|
||
def sync_emit(blocks: List[Block], meta: Metadata): | ||
nonlocal output | ||
block_text = "\n".join([b.text if b.is_text() else f"({b.mime_type}: {b.id})" for b in blocks]) | ||
block_text = "\n".join( | ||
[b.text if b.is_text() else f"({b.mime_type}: {b.id})" for b in blocks] | ||
) | ||
output += block_text | ||
|
||
context.emit_funcs.append(sync_emit) | ||
self.run_agent(self.incoming_message_agent, context) | ||
self.run_agent(self._agent, context) | ||
return output | ||
|
||
|
||
if __name__ == "__main__": | ||
AgentREPL(MyAssistant, method="prompt", | ||
agent_package_config={'botToken': 'not-a-real-token-for-local-testing'}).run() | ||
# AgentREPL provides a mechanism for local execution of an AgentService method. | ||
# This is used for simplified debugging as agents and tools are developed and | ||
# added. | ||
AgentREPL(ExampleDocumentQAService, "prompt", agent_package_config={}).run() |
Oops, something went wrong.