diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 0000000..f3b2e2d --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,23 @@ +# [Choice] Python version (use -bullseye variants on local arm64/Apple Silicon): 3, 3.10, 3.9, 3.8, 3.7, 3.6, 3-bullseye, 3.10-bullseye, 3.9-bullseye, 3.8-bullseye, 3.7-bullseye, 3.6-bullseye, 3-buster, 3.10-buster, 3.9-buster, 3.8-buster, 3.7-buster, 3.6-buster +ARG VARIANT=3-bullseye +FROM python:3.8 + +RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ + # Remove imagemagick due to https://security-tracker.debian.org/tracker/CVE-2019-10131 + && apt-get purge -y imagemagick imagemagick-6-common + +# Temporary: Upgrade python packages due to https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-40897 +# They are installed by the base image (python) which does not have the patch. +RUN python3 -m pip install --upgrade setuptools + +# [Optional] If your pip requirements rarely change, uncomment this section to add them to the image. +# COPY requirements.txt /tmp/pip-tmp/ +# RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \ +# && rm -rf /tmp/pip-tmp + +# [Optional] Uncomment this section to install additional OS packages. +# RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ +# && apt-get -y install --no-install-recommends + +# [Optional] Uncomment this line to install global node packages. +# RUN su vscode -c "source /usr/local/share/nvm/nvm.sh && npm install -g " 2>&1 \ No newline at end of file diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..5fefd9c --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,39 @@ +{ + "build": { + "dockerfile": "./Dockerfile", + "context": "." + }, + "features": { + "ghcr.io/devcontainers/features/common-utils:2": { + "installZsh": "true", + "username": "vscode", + "userUid": "1000", + "userGid": "1000", + "upgradePackages": "true" + }, + "ghcr.io/devcontainers/features/python:1": "none", + "ghcr.io/devcontainers/features/node:1": "none", + "ghcr.io/devcontainers/features/git:1": { + "version": "latest", + "ppa": "false" + } + }, + // Configure tool-specific properties. + "customizations": { + // Configure properties specific to VS Code. + "vscode": { + // Set *default* container specific settings.json values on container create. + "settings": { + "python.defaultInterpreterPath": "/usr/local/bin/python" + } + } + }, + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Use 'postCreateCommand' to run commands after the container is created. + // "postCreateCommand": "pip3 install --user -r requirements.txt", + + // Set `remoteUser` to `root` to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. + "remoteUser": "vscode" +} diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..c456b39 --- /dev/null +++ b/.flake8 @@ -0,0 +1,12 @@ +[flake8] +max-line-length = 88 +extend-ignore = E203 +exclude = + .tox, + __pycache__, + *.pyc, + .env + venv/* + .venv/* + reports/* + dist/* \ No newline at end of file diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 0000000..54ef108 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,3 @@ +# These are supported funding model platforms + +github: Torantulino diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml new file mode 100644 index 0000000..7f1d277 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/1.bug.yml @@ -0,0 +1,60 @@ +name: Bug report 🐛 +description: Create a bug report for Auto-GPT. +labels: ['status: needs triage'] +body: + - type: checkboxes + attributes: + label: ⚠️ Search for existing issues first ⚠️ + description: > + Please [search the history](https://github.com/Torantulino/Auto-GPT/issues) + to see if an issue already exists for the same problem. + options: + - label: I have searched the existing issues, and there is no existing issue for my problem + required: true + - type: markdown + attributes: + value: | + Please provide a searchable summary of the issue in the title above ⬆️. + + ⚠️ SUPER-busy repo, please help the volunteer maintainers. + The less time we spend here, the more time we spend building AutoGPT. + + Please help us help you: + - Does it work on `stable` branch (https://github.com/Torantulino/Auto-GPT/tree/stable)? + - Does it work on current `master` (https://github.com/Torantulino/Auto-GPT/tree/master)? + - Search for existing issues, "add comment" is tidier than "new issue" + - Ask on our Discord (https://discord.gg/autogpt) + - Provide relevant info: + - Provide commit-hash (`git rev-parse HEAD` gets it) + - If it's a pip/packages issue, provide pip version, python version + - If it's a crash, provide traceback. + - type: checkboxes + attributes: + label: GPT-3 or GPT-4 + description: > + If you are using Auto-GPT with `--gpt3only`, your problems may be caused by + the limitations of GPT-3.5 + options: + - label: I am using Auto-GPT with GPT-3 (GPT-3.5) + - type: textarea + attributes: + label: Steps to reproduce 🕹 + description: | + **⚠️ Issues that we can't reproduce will be closed.** + - type: textarea + attributes: + label: Current behavior 😯 + description: Describe what happens instead of the expected behavior. + - type: textarea + attributes: + label: Expected behavior 🤔 + description: Describe what should happen. + - type: textarea + attributes: + label: Your prompt 📝 + description: | + If applicable please provide the prompt you are using. You can find your last-used prompt in last_run_ai_settings.yaml. + value: | + ```yaml + # Paste your prompt here + ``` diff --git a/.github/ISSUE_TEMPLATE/2.feature.yml b/.github/ISSUE_TEMPLATE/2.feature.yml new file mode 100644 index 0000000..0ea882e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/2.feature.yml @@ -0,0 +1,29 @@ +name: Feature request 🚀 +description: Suggest a new idea for Auto-GPT. +labels: ['status: needs triage'] +body: + - type: markdown + attributes: + value: | + Please provide a searchable summary of the issue in the title above ⬆️. + + Thanks for contributing by creating an issue! ❤️ + - type: checkboxes + attributes: + label: Duplicates + description: Please [search the history](https://github.com/Torantulino/Auto-GPT/issues) to see if an issue already exists for the same problem. + options: + - label: I have searched the existing issues + required: true + - type: textarea + attributes: + label: Summary 💡 + description: Describe how it should work. + - type: textarea + attributes: + label: Examples 🌈 + description: Provide a link to other implementations, or screenshots of the expected behavior. + - type: textarea + attributes: + label: Motivation 🔦 + description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world. \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..c355965 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,33 @@ + + +### Background + + +### Changes + + +### Documentation + + +### Test Plan + + +### PR Quality Checklist +- [ ] My pull request is atomic and focuses on a single change. +- [ ] I have thoroughly tested my changes with multiple different prompts. +- [ ] I have considered potential risks and mitigations for my changes. +- [ ] I have documented my changes clearly and comprehensively. +- [ ] I have not snuck in any "extra" small tweaks changes + + + + diff --git a/.github/workflows/auto_format.yml b/.github/workflows/auto_format.yml new file mode 100644 index 0000000..c33cd39 --- /dev/null +++ b/.github/workflows/auto_format.yml @@ -0,0 +1,23 @@ +name: auto-format +on: pull_request +jobs: + format: + runs-on: ubuntu-latest + steps: + - name: Checkout PR branch + uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} + - name: autopep8 + uses: peter-evans/autopep8@v1 + with: + args: --exit-code --recursive --in-place --aggressive --aggressive . + - name: Check for modified files + id: git-check + run: echo "modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi)" >> $GITHUB_ENV + - name: Push changes + if: steps.git-check.outputs.modified == 'true' + run: | + git config --global user.name 'Torantulino' + git config --global user.email 'toran.richards@gmail.com' + git remote set diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..366aaf6 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,44 @@ +name: Python CI + +on: + push: + branches: + - master + pull_request: + branches: + - master + +jobs: + build: + runs-on: ubuntu-latest + + strategy: + matrix: + python-version: [3.8] + + steps: + - name: Check out repository + uses: actions/checkout@v2 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + + - name: Lint with flake8 + continue-on-error: false + run: flake8 autogpt/ tests/ --select E303,W293,W291,W292,E305,E231,E302 + + - name: Run unittest tests with coverage + run: | + coverage run --source=autogpt -m unittest discover tests + + - name: Generate coverage report + run: | + coverage report + coverage xml diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml new file mode 100644 index 0000000..9c77098 --- /dev/null +++ b/.github/workflows/docker-image.yml @@ -0,0 +1,18 @@ +name: Docker Image CI + +on: + push: + branches: [ "master" ] + pull_request: + branches: [ "master" ] + +jobs: + + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - name: Build the Docker image + run: docker build . --file Dockerfile --tag autogpt:$(date +%s) diff --git a/.github/workflows/dockerhub-imagepush.yml b/.github/workflows/dockerhub-imagepush.yml new file mode 100644 index 0000000..6805eeb --- /dev/null +++ b/.github/workflows/dockerhub-imagepush.yml @@ -0,0 +1,24 @@ +name: Push Docker Image on Release + +on: + push: + branches: [ "stable" ] + +jobs: + + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - name: Log in to Docker hub + env: + DOCKER_USER: ${{secrets.DOCKER_USER}} + DOCKER_PASSWORD: ${{secrets.DOCKER_PASSWORD}} + run: | + docker login -u $DOCKER_USER -p $DOCKER_PASSWORD + - name: Build the Docker image + run: docker build . --file Dockerfile --tag ${{secrets.DOCKER_USER}}/auto-gpt:$(git describe --tags `git rev-list --tags --max-count=1`) + - name: Docker Push + run: docker push ${{secrets.DOCKER_USER}}/auto-gpt diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..3209297 --- /dev/null +++ b/.gitignore @@ -0,0 +1,159 @@ +## Original ignores +autogpt/keys.py +autogpt/*json +autogpt/node_modules/ +autogpt/__pycache__/keys.cpython-310.pyc +package-lock.json +*.pyc +auto_gpt_workspace/* +*.mpeg +.env +azure.yaml +*venv/* +outputs/* +ai_settings.yaml +last_run_ai_settings.yaml +.vscode +.idea/* +auto-gpt.json +log.txt +log-ingestion.txt +logs +*.log +*.mp3 + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +plugins/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ +llama-* +vicuna-* + +# mac +.DS_Store diff --git a/.isort.cfg b/.isort.cfg new file mode 100644 index 0000000..8ad53a8 --- /dev/null +++ b/.isort.cfg @@ -0,0 +1,10 @@ +[settings] +profile = black +multi_line_output = 3 +include_trailing_comma = True +force_grid_wrap = 0 +use_parentheses = True +ensure_newline_before_comments = True +line_length = 88 +skip = venv,env,node_modules,.env,.venv,dist +sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..fb75cd5 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,33 @@ +repos: + - repo: https://github.com/sourcery-ai/sourcery + rev: v1.1.0 # Get the latest tag from https://github.com/sourcery-ai/sourcery/tags + hooks: + - id: sourcery + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v0.9.2 + hooks: + - id: check-added-large-files + args: [ '--maxkb=500' ] + - id: check-byte-order-marker + - id: check-case-conflict + - id: check-merge-conflict + - id: check-symlinks + - id: debug-statements + + - repo: local + hooks: + - id: isort + name: isort-local + entry: isort + language: python + types: [ python ] + exclude: .+/(dist|.venv|venv|build)/.+ + pass_filenames: true + - id: black + name: black-local + entry: black + language: python + types: [ python ] + exclude: .+/(dist|.venv|venv|build)/.+ + pass_filenames: true \ No newline at end of file diff --git a/.sourcery.yaml b/.sourcery.yaml new file mode 100644 index 0000000..a7f5b9d --- /dev/null +++ b/.sourcery.yaml @@ -0,0 +1,71 @@ +# 🪄 This is your project's Sourcery configuration file. + +# You can use it to get Sourcery working in the way you want, such as +# ignoring specific refactorings, skipping directories in your project, +# or writing custom rules. + +# 📚 For a complete reference to this file, see the documentation at +# https://docs.sourcery.ai/Configuration/Project-Settings/ + +# This file was auto-generated by Sourcery on 2023-02-25 at 21:07. + +version: '1' # The schema version of this config file + +ignore: # A list of paths or files which Sourcery will ignore. +- .git +- venv +- .venv +- build +- dist +- env +- .env +- .tox + +rule_settings: + enable: + - default + - gpsg + disable: [] # A list of rule IDs Sourcery will never suggest. + rule_types: + - refactoring + - suggestion + - comment + python_version: '3.9' # A string specifying the lowest Python version your project supports. Sourcery will not suggest refactorings requiring a higher Python version. + +# rules: # A list of custom rules Sourcery will include in its analysis. +# - id: no-print-statements +# description: Do not use print statements in the test directory. +# pattern: print(...) +# language: python +# replacement: +# condition: +# explanation: +# paths: +# include: +# - test +# exclude: +# - conftest.py +# tests: [] +# tags: [] + +# rule_tags: {} # Additional rule tags. + +# metrics: +# quality_threshold: 25.0 + +# github: +# labels: [] +# ignore_labels: +# - sourcery-ignore +# request_review: author +# sourcery_branch: sourcery/{base_branch} + +# clone_detection: +# min_lines: 3 +# min_duplicates: 2 +# identical_clones_only: false + +# proxy: +# url: +# ssl_certs_file: +# no_ssl_verify: false \ No newline at end of file diff --git a/AutoGpt.json b/AutoGpt.json new file mode 100644 index 0000000..9e26dfe --- /dev/null +++ b/AutoGpt.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..d2331b4 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,40 @@ +# Code of Conduct for auto-gpt + +## 1. Purpose + +The purpose of this Code of Conduct is to provide guidelines for contributors to the auto-gpt project on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct. + +## 2. Scope + +This Code of Conduct applies to all contributors, maintainers, and users of the auto-gpt project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project. + +## 3. Our Standards + +We encourage the following behavior: + +* Being respectful and considerate to others +* Actively seeking diverse perspectives +* Providing constructive feedback and assistance +* Demonstrating empathy and understanding + +We discourage the following behavior: + +* Harassment or discrimination of any kind +* Disrespectful, offensive, or inappropriate language or content +* Personal attacks or insults +* Unwarranted criticism or negativity + +## 4. Reporting and Enforcement + +If you witness or experience any violations of this Code of Conduct, please report them to the project maintainers by email or other appropriate means. The maintainers will investigate and take appropriate action, which may include warnings, temporary or permanent bans, or other measures as necessary. + +Maintainers are responsible for ensuring compliance with this Code of Conduct and may take action to address any violations. + +## 5. Acknowledgements + +This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html). + +## 6. Contact + +If you have any questions or concerns, please contact the project maintainers. + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..b2a2490 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,99 @@ +# Contributing to ProjectName + +First of all, thank you for considering contributing to our project! We appreciate your time and effort, and we value any contribution, whether it's reporting a bug, suggesting a new feature, or submitting a pull request. + +This document provides guidelines and best practices to help you contribute effectively. + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [Getting Started](#getting-started) +- [How to Contribute](#how-to-contribute) + - [Reporting Bugs](#reporting-bugs) + - [Suggesting Enhancements](#suggesting-enhancements) + - [Submitting Pull Requests](#submitting-pull-requests) +- [Style Guidelines](#style-guidelines) + - [Code Formatting](#code-formatting) + - [Pre-Commit Hooks](#pre-commit-hooks) + +## Code of Conduct + +By participating in this project, you agree to abide by our [Code of Conduct](CODE_OF_CONDUCT.md). Please read it to understand the expectations we have for everyone who contributes to this project. + +## Getting Started + +To start contributing, follow these steps: + +1. Fork the repository and clone your fork. +2. Create a new branch for your changes (use a descriptive name, such as `fix-bug-123` or `add-new-feature`). +3. Make your changes in the new branch. +4. Test your changes thoroughly. +5. Commit and push your changes to your fork. +6. Create a pull request following the guidelines in the [Submitting Pull Requests](#submitting-pull-requests) section. + +## How to Contribute + +### Reporting Bugs + +If you find a bug in the project, please create an issue on GitHub with the following information: + +- A clear, descriptive title for the issue. +- A description of the problem, including steps to reproduce the issue. +- Any relevant logs, screenshots, or other supporting information. + +### Suggesting Enhancements + +If you have an idea for a new feature or improvement, please create an issue on GitHub with the following information: + +- A clear, descriptive title for the issue. +- A detailed description of the proposed enhancement, including any benefits and potential drawbacks. +- Any relevant examples, mockups, or supporting information. + +### Submitting Pull Requests + +When submitting a pull request, please ensure that your changes meet the following criteria: + +- Your pull request should be atomic and focus on a single change. +- Your pull request should include tests for your change. +- You should have thoroughly tested your changes with multiple different prompts. +- You should have considered potential risks and mitigations for your changes. +- You should have documented your changes clearly and comprehensively. +- You should not include any unrelated or "extra" small tweaks or changes. + +## Style Guidelines + +### Code Formatting + +We use the `black` code formatter to maintain a consistent coding style across the project. Please ensure that your code is formatted using `black` before submitting a pull request. You can install `black` using `pip`: + +```bash +pip install black +``` + +To format your code, run the following command in the project's root directory: + +```bash +black . +``` +### Pre-Commit Hooks +We use pre-commit hooks to ensure that code formatting and other checks are performed automatically before each commit. To set up pre-commit hooks for this project, follow these steps: + +Install the pre-commit package using pip: +```bash +pip install pre-commit +``` + +Run the following command in the project's root directory to install the pre-commit hooks: +```bash +pre-commit install +``` + +Now, the pre-commit hooks will run automatically before each commit, checking your code formatting and other requirements. + +If you encounter any issues or have questions, feel free to reach out to the maintainers or open a new issue on GitHub. We're here to help and appreciate your efforts to contribute to the project. + +Happy coding, and once again, thank you for your contributions! + +Maintainers will look at PR that have no merge conflicts when deciding what to add to the project. Make sure your PR shows up here: + +https://github.com/Torantulino/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-is%3Aconflict+ \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..9886d74 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,27 @@ +# Use an official Python base image from the Docker Hub +FROM python:3.11-slim + +# Install git +RUN apt-get -y update +RUN apt-get -y install git chromium-driver + +# Set environment variables +ENV PIP_NO_CACHE_DIR=yes \ + PYTHONUNBUFFERED=1 \ + PYTHONDONTWRITEBYTECODE=1 + +# Create a non-root user and set permissions +RUN useradd --create-home appuser +WORKDIR /home/appuser +RUN chown appuser:appuser /home/appuser +USER appuser + +# Copy the requirements.txt file and install the requirements +COPY --chown=appuser:appuser requirements-docker.txt . +RUN pip install --no-cache-dir --user -r requirements-docker.txt + +# Copy the application files +COPY --chown=appuser:appuser autogpt/ ./autogpt + +# Set the entrypoint +ENTRYPOINT ["python", "-m", "autogpt"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..601935b --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Toran Bruce Richards + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..599365c --- /dev/null +++ b/README.md @@ -0,0 +1,414 @@ +# Auto-GPT:自主 GPT-4 实验 + +> 这里是Auto-GPT中文项目- 同步fork Auto-GPT Auto-GPT修改了分支规则,Fork同步于Stable最新分支 + +![gzh](docs/imgs/gzh.png) + +### 公众号<阿杰的人生路>回复"Auto-GPT"加入群聊,共同探讨更多玩法 + +### 中文版Demo : + +![Demo video](docs/imgs/demo.gif) + +Auto-GPT 是一个实验性开源应用程序,展示了 GPT-4 语言模型的功能。该程序由 GPT-4 驱动,将 LLM 的“思想”链接在一起,以自主实现您设定的任何目标。作为 GPT-4 完全自主运行的首批示例之一,Auto-GPT 突破了 AI 的可能性界限。 + +## 可以做什么? + + +**自主人工智能**:它所具备的能力主打的就是一个“自主”,**完全不用人类插手**的那种! + +**例如:**我要求AutoGPT用Vue开发一个登录页面,结果不到3分钟,AI自己就“唰唰唰”地搞定了。 + +AI自己打开浏览器上网、自己使用第三方工具、自己思考、自己操作你的电脑。 +它首先打开Vue官网,学习了下如何创建项目和模版,又去GitHub下载了一个类似的页面,下载下来自己改了一下。 + +**例如:**给它下达一个任务,让它去帮你做一些商业调查,或者历史故事。 + +AutoGPT在接到这项任务之后,便开始了他的展示: + +- 思考中…… +- 添加任务:调用浏览器或者GPTAPI去学习内容,再进行分析 +- 添加任务:学习之后规划要做的事情 +- 添加任务:逐步实现。 +- 思考中…… + +然后AgentGPT先是输出执行的结果。 +或者你给它下达命令:'请给我一下白宫的秘密资料'。 +- 它会考虑如何去做 +- 它可能会先从互联网上搜索和下载相关的文件。 +- 如果觉得不够详细,它可能会学习一下黑客知识,黑进白宫获取资料。 +- 这时候,请照顾好自己,因为你可能看着看着电脑,突然发现窗外一堆大汉,并佩戴者FBI徽章的人看着你,请不要慌张,请不要抵抗,也不要试图逃跑。 +- 记得先拍照发个朋友圈。 + +开个玩笑,就是说它现在可以做你要它做的任何事情,它就是一个无敌超人的存在。 +但是也请不要抱有太大希望,很可能运行半天什么也没有,它还是一个孩子,给它一点时间,思路很好,未来很美好。 + +## 📋 要求 + +- [Python 3.8 或者更高](https://www.tutorialspoint.com/how-to-install-python-in-windows) +- [OpenAI API key](https://platform.openai.com/account/api-keys) + +可选的: + +- [PINECONE API key](https://www.pinecone.io/)(如果你想要 Pinecone 支持存储日志,默认本地就行) +- [Milvus](https://milvus.io/)(如果你想要 Milvus 作为内存后端) + +- [ElevenLabs Key](https://elevenlabs.io/) (如果你想让人工智能说话) + +## 💾 安装方法 + +要安装 Auto-GPT,请按照下列步骤操作: + +1. 确保满足上述所有**要求**,如果没有,请安装/获取它们。 + +以下命令需要在终端执行 + +2. 克隆存储库:对于此步骤,您需要安装 Git,但您可以通过单击此页面顶部的按钮来下载 zip 文件☝️ + +``` +git clone git@github.com:kaqijiang/Auto-GPT-ZH.git +``` + +3. 终端中 cd到项目目录 + +``` +cd Auto-GPT-ZH +``` + +4. 终端中安装所需的依赖项 + +``` +pip install -r requirements.txt +``` + +5. +- 重命名`.env.template`为`.env` 注意`.env.template`为隐藏文件,如果找不到就百度下你电脑window/mac如何显示隐藏文件。 +- 填写您的`OPENAI_API_KEY`. 找到OPENAI_API_KEY=. 在'='之后,输入您唯一的 OpenAI API 密钥(不带任何引号或空格)。 +- 如果您打算使用语音模式,请`ELEVEN_LABS_API_KEY`也填写您的。 + + - 从以下网址获取您的 OpenAI API 密钥: https: [//platform.openai.com/account/api-keys](https://platform.openai.com/account/api-keys)。 + - 从[https://elevenlabs.io](https://elevenlabs.io/)获取您的 ElevenLabs API 密钥。您可以使用网站上的“个人资料”选项卡查看您的 xi-api-key。 + +## 终端代理方法 + +推荐工具:[【稳定,高速梯子推荐56一年,活动时5折,点击直达】](https://www.hjtnt.pro/auth/register?code=QRY5) + +Mac 下载 [ClashX Pro](https://install.appcenter.ms/users/clashx/apps/clashx-pro/distribution_groups/public) 设置 系统代理 增强模式 然后复制终端代理命令 在终端中输入,重启即可 + +根据自己的工具修改对应的端口 + +``` +export https_proxy=http://127.0.0.1:8484 http_proxy=http://127.0.0.1:8484 all_proxy=socks5://127.0.0.1:8484 +``` + +Windows 下载 [Clash for Windows](https://wws.lanzoux.com/iCEgLj27fra),设置 系统代理 ,在终端中输入,重启即可。 + +根据自己的工具修改对应的端口 + +``` +# 使用 http 类型代理 +set http_proxy=http://127.0.0.1:8484 +set https_proxy=http://127.0.0.1:8484 +# 使用 socks 类型代理 +netsh winhttp set proxy proxy-server="socks=127.0.0.1:8484" bypass-list="localhost" +netsh winhttp show proxy +netsh winhttp reset proxy +# 使用 socks 类型代理 +set http_proxy=socks5://127.0.0.1:8484 +set https_proxy=socks5://127.0.0.1:8484 +``` + +## 🔧 用法 + +1. 在终端中运行 `main.py` + +``` +python -m autogpt +``` + +2. 在 AUTO-GPT 的每个操作之后,输入“y”来授权命令,“y -N”来运行 N 个连续命令,“n”来退出程序,或者为 AI 输入额外的反馈。 + +### 日志 + +您将在文件夹中找到活动和错误日志`./output/logs` + +输出调试日志: + +``` +python -m autogpt --debug +``` + +### 命令行参数 + +以下是您在运行 Auto-GPT 时可以使用的一些常见参数: + +> 将尖括号 (<>) 中的任何内容替换为您要指定的值 + +- `python scripts/main.py --help`查看所有可用命令行参数的列表。 +- `python scripts/main.py --ai-settings `使用不同的 AI 设置文件运行 Auto-GPT。 +- `python scripts/main.py --use-memory `指定 3 个内存后端之一:`local`、`redis`或`pinecone`'no_memory'。 + +> **注意**:其中一些标志有简写形式,`-m`例如`--use-memory`. 用于`python scripts/main.py --help`获取更多信息 + +## 🗣️ 语音模式 + +使用它来将 TTS 用于 Auto-GPT + +``` +python -m autogpt --speak +``` +## OpenAI API 密钥配置 +从以下网址获取您的 OpenAI API 密钥: https: //platform.openai.com/account/api-keys。 + +要将 OpenAI API 密钥用于 Auto-GPT,您需要设置账单(即付费账户)。 + +您可以在https://platform.openai.com/account/billing/overview设置付费账户。 + +要使 OpenAI API 密钥生效,请在 OpenAI API > 计费中设置付费帐户 + +![要使 OpenAI API 密钥生效,请在 OpenAI API > 计费中设置付费帐户](openai-api-key.png) + +## 🔍 谷歌 API 密钥配置 + +此部分是可选的,如果您在运行谷歌搜索时遇到错误 429 问题,请使用官方谷歌 API。要使用该`google_official_search`命令,您需要在环境变量中设置 Google API 密钥。 + +1. 转到[谷歌云控制台](https://console.cloud.google.com/)。 +2. 如果您还没有帐户,请创建一个并登录。 +3. 通过单击页面顶部的“选择项目”下拉菜单并单击“新建项目”来创建一个新项目。给它起个名字,然后单击“创建”。 +4. 转到[API 和服务仪表板](https://console.cloud.google.com/apis/dashboard)并单击“启用 API 和服务”。搜索“自定义搜索 API”并单击它,然后单击“启用”。 +5. 转到[凭据](https://console.cloud.google.com/apis/credentials)页面并单击“创建凭据”。选择“API 密钥”。 +6. 复制 API 密钥并将其设置为在您的计算机上命名的环境变量`GOOGLE_API_KEY`。请参阅下面的设置环境变量。 +7. 转到[自定义搜索引擎](https://cse.google.com/cse/all)页面并单击“添加”。 +8. 按照提示设置搜索引擎。您可以选择搜索整个网络或特定站点。 +9. 创建搜索引擎后,单击“控制面板”,然后单击“基本”。复制“搜索引擎 ID”并将其设置为`CUSTOM_SEARCH_ENGINE_ID`在您的计算机上命名的环境变量。请参阅下面的设置环境变量。 + +*请记住,您的每日免费自定义搜索配额最多只允许 100 次搜索。要增加此限制,您需要为项目分配一个计费帐户,以从每天多达 10,000 次搜索中获利。* + +### 设置环境变量 + +对于 Windows 用户: + +``` +setx GOOGLE_API_KEY "YOUR_GOOGLE_API_KEY" +setx CUSTOM_SEARCH_ENGINE_ID "YOUR_CUSTOM_SEARCH_ENGINE_ID" + +``` + +对于 macOS 和 Linux 用户: + +``` +export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY" +export CUSTOM_SEARCH_ENGINE_ID="YOUR_CUSTOM_SEARCH_ENGINE_ID" +``` + +## 设置缓存类型 + +默认情况下,Auto-GPT 将使用 LocalCache 而不是 redis 或 Pinecone。 + +要切换到任何一个,请将`MEMORY_BACKEND`env 变量更改为您想要的值: + +- `local`(默认)使用本地 JSON 缓存文件 +- `pinecone`使用您在 ENV 设置中配置的 Pinecone.io 帐户 +- `redis`将使用您配置的 redis 缓存 +- `milvus`将使用您配置的 milvus 缓存 +- `weaviate`将使用您配置的 weaviate 缓存 + +### 设置 + +> 警告:本系统未经过安全保护,不应该公开访问。因此,请避免在互联网上使用Redis而不使用密码或根本不要使用Redis。 + +1. 安装 docker 桌面 + +``` +docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest +``` + +> 有关设置密码和其他配置的信息,请参阅[https://hub.docker.com/r/redis/redis-stack-server 。](https://hub.docker.com/r/redis/redis-stack-server) + +1. 设置以下环境变量 + +> 替换尖括号 (<>) 中的**密码** + +``` +MEMORY_BACKEND=redis +REDIS_HOST=localhost +REDIS_PORT=6379 +REDIS_PASSWORD= +``` + +您可以选择设置 + +``` +WIPE_REDIS_ON_START=False +``` + +持久化存储在 Redis 中的内存 + +您可以使用以下命令为 redis 指定内存索引: + +``` +MEMORY_INDEX= +``` + +### 🌲Pinecone API 密钥设置 + +Pinecone 支持存储大量基于向量的内存,允许在任何给定时间只为代理加载相关内存。 + +1. 如果您还没有帐户,请前往[pinecone并创建一个帐户。](https://app.pinecone.io/) +2. 选择`Starter`计划以避免被收费。 +3. 在左侧边栏的默认项目下找到您的 API 密钥和区域。 + +在`.env`文件集中: + +- `PINECONE_API_KEY` +- `PINECONE_ENV`(例如:*“us-east4-gcp”*) +- `MEMORY_BACKEND=pinecone` + +或者,您可以从命令行设置它们(高级): + +对于 Windows 用户: + +``` +setx PINECONE_API_KEY "" +setx PINECONE_ENV "" # e.g: "us-east4-gcp" +setx MEMORY_BACKEND "pinecone" +``` + +对于 macOS 和 Linux 用户: + +``` +export PINECONE_API_KEY="" +export PINECONE_ENV="" # e.g: "us-east4-gcp" +export MEMORY_BACKEND="pinecone" +``` + +### Milvus 安装 + +[Milvus](https://milvus.io/)是一个开源的、高度可扩展的矢量数据库,可以存储大量基于矢量的内存并提供快速的相关搜索。 + +- 设置 milvus 数据库,保持你的 pymilvus 版本和 milvus 版本相同,以避免兼容问题。 + - 通过开源[安装 Milvus](https://milvus.io/docs/install_standalone-operator.md) + - [或由Zilliz Cloud](https://zilliz.com/cloud)设置 +- 设置`MILVUS_ADDR`为`.env`你的 milvus 地址`host:ip`。 +- 设置`MEMORY_BACKEND`为`.env`启用`milvus`milvus 作为后端。 +- 选修的 + - set `MILVUS_COLLECTION`in`.env`随意更改 milvus 集合名称,`autogpt`默认名称。 + +### Weaviate设置 + +[Weaviate](https://weaviate.io/)是一个开源矢量数据库。它允许存储来自 ML 模型的数据对象和向量嵌入,并无缝扩展到数十亿个数据对象。[Weaviate 实例可以在本地(使用 Docker)、Kubernetes 或使用 Weaviate 云服务创建](https://weaviate.io/developers/weaviate/quickstart)。虽然仍处于实验阶段,但支持[嵌入式 Weaviate ,它允许 Auto-GPT 进程本身启动 Weaviate 实例。](https://weaviate.io/developers/weaviate/installation/embedded)要启用它,请设置`USE_WEAVIATE_EMBEDDED`为`True`并确保您`pip install "weaviate-client>=3.15.4"`。 + +#### 设置环境变量 + +在您的`.env`文件中设置以下内容: + +``` +MEMORY_BACKEND=weaviate +WEAVIATE_HOST="127.0.0.1" # the IP or domain of the running Weaviate instance +WEAVIATE_PORT="8080" +WEAVIATE_PROTOCOL="http" +WEAVIATE_USERNAME="your username" +WEAVIATE_PASSWORD="your password" +WEAVIATE_API_KEY="your weaviate API key if you have one" +WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate" # this is optional and indicates where the data should be persisted when running an embedded instance +USE_WEAVIATE_EMBEDDED=False # set to True to run Embedded Weaviate +MEMORY_INDEX="Autogpt" # name of the index to create for the application +``` + +## 查看内存使用情况 + +1. 使用`--debug`标志查看内存使用情况:) + +## 🧠内存预填充 + +#### python scripts/data_ingestion.py -h + +``` + +用法:data_ingestion.py [-h] (--file FILE | --dir DIR) [--init] [--overlap OVERLAP] [--max_length MAX_LENGTH] + +将一个文件或包含多个文件的目录摄取到内存中。确保在运行此脚本之前设置您的 .env。 + +选项:-h, --help 显示此帮助消息并退出 --file FILE 要摄取的文件。--dir DIR 包含要摄取的文件的目录。--init 初始化内存并擦除其内容(默认值:False) --overlap OVERLAP 摄取文件时块之间的重叠大小(默认值:200) --max_length MAX_LENGTH 摄取文件时每个块的最大长度(默认值:4000) +``` + +#### python autogpt/data_ingestion.py --dir seed_data --init --overlap 200 --max_length 1000 + +- 该脚本位于 autogpt/data_ingestion.py,允许您将文件提取到内存中并在运行 Auto-GPT 之前预先填充。 + + 记忆预填充是一种技术,涉及将相关文档或数据摄取到 AI 的记忆中,以便它可以使用这些信息来生成更明智和准确的响应。 + + 为了预置到内存,每个文档的内容被分成指定最大长度的块,块之间有指定的重叠,然后每个块被添加到 .env 文件中的内存后端集。当提示 AI 回忆信息时,它可以访问那些预先植入的记忆以生成更明智和准确的响应。 + + 当处理大量数据或存在 AI 需要能够快速访问的特定信息时,此技术特别有用。通过预先植入内存,人工智能可以更有效地检索和使用这些信息,从而节省时间、API 调用并提高其响应的准确性。 + + 例如,您可以下载 API 文档、GitHub 存储库等,并在运行 Auto-GPT 之前将其提取到内存中。 + + ⚠️如果您使用 Redis 作为您的内存,请确保运行 Auto-GPT 并在您的文件中`WIPE_REDIS_ON_START`设置为。`False``.env` + + ⚠️对于其他内存后端,我们目前在启动 Auto-GPT 时强制擦除内存。`data_ingestion.py`要使用这些内存后端摄取数据,您可以在 Auto-GPT 运行期间随时调用脚本。 + + 即使在 Auto-GPT 运行时摄取记忆,AI 也会立即使用记忆。 + + 在上面的示例中,脚本初始化内存,将目录中的所有文件摄取`/seed_data`到内存中,块之间的重叠为 200,每个块的最大长度为 4000。请注意,您也可以使用参数将`--file`单个文件摄取到内存中内存,并且脚本将只摄取`/auto_gpt_workspace`目录中的文件。 + + 您可以调整`max_length`和重叠参数以微调文档在“回忆”该内存时呈现给 AI 的方式: + + - 调整重叠值允许 AI 在调用信息时从每个块访问更多上下文信息,但会导致创建更多块,从而增加内存后端使用和 OpenAI API 请求。 + - 减小该`max_length`值将创建更多块,这可以通过在上下文中允许更多消息历史记录来节省提示令牌,但也会增加块的数量。 + - 增加该`max_length`值将为 AI 提供来自每个块的更多上下文信息,从而减少创建的块数量并节省 OpenAI API 请求。然而,这也可能会使用更多的提示标记并减少 AI 可用的整体上下文。 + +## 连续模式⚠️ + +**无需**用户授权即可 100% 自动化地运行 AI 。不推荐连续模式。它具有潜在危险,可能会导致您的 AI 永远运行或执行您通常不会授权的操作。使用风险自负。 + +1. `main.py`在终端中运行Python 脚本: + +``` +python -m autogpt --continuous +python -m autogpt --speak --continuous #带语音 +``` + +2.要退出程序,请按 Ctrl + C + +## GPT3.5 ONLY 模式 + +如果您无权访问 GPT4 api,此模式将允许您使用 Auto-GPT! + +``` +python -m autogpt --gpt3only +python -m autogpt --speak --gpt3only #带语音 +``` + +建议将虚拟机用于需要高度安全措施的任务,以防止对主计算机的系统和数据造成任何潜在危害。 + +## 🖼 图像生成 + +默认情况下,Auto-GPT 使用 DALL-e 进行图像生成。要使用 Stable Diffusion,需要一个[HuggingFace API 令牌。](https://huggingface.co/settings/tokens) + +获得令牌后,将这些变量设置为`.env`: + +``` +IMAGE_PROVIDER=sd +HUGGINGFACE_API_TOKEN="YOUR_HUGGINGFACE_API_TOKEN" +``` + +## ⚠️ 限制 + +该实验旨在展示 GPT-4 的潜力,但存在一些局限性: + +1. 不是完善的应用程序或产品,只是一个实验 +2. 在复杂的真实业务场景中可能表现不佳。事实上,如果确实如此,请分享您的结果! +3. 运行成本非常高,因此请使用 OpenAI 设置和监控您的 API 密钥限制! + +## 🛡 免责声明 + +免责声明 Auto-GPT 这个项目是一个实验性应用程序,按“原样”提供,没有任何明示或暗示的保证。使用本软件,即表示您同意承担与其使用相关的所有风险,包括但不限于数据丢失、系统故障或可能出现的任何其他问题。 + +本项目的开发者和贡献者对因使用本软件而可能发生的任何损失、损害或其他后果不承担任何责任或义务。您对基于 Auto-GPT 提供的信息做出的任何决定和行动承担全部责任。 + +**请注意,由于使用代币,使用 GPT-4 语言模型可能会很昂贵。**通过使用此项目,您承认您有责任监控和管理您自己的代币使用情况和相关费用。强烈建议定期检查您的 OpenAI API 使用情况并设置任何必要的限制或警报以防止意外收费。 + +作为一项自主实验,Auto-GPT 可能会生成不符合现实世界商业惯例或法律要求的内容或采取的行动。您有责任确保基于此软件的输出做出的任何行动或决定符合所有适用的法律、法规和道德标准。本项目的开发者和贡献者对因使用本软件而产生的任何后果不承担任何责任。 + +通过使用 Auto-GPT,您同意就任何和所有索赔、损害、损失、责任、成本和费用(包括合理的律师费)对开发人员、贡献者和任何关联方进行赔偿、辩护并使其免受损害因您使用本软件或您违反这些条款而引起的。 \ No newline at end of file diff --git a/autogpt/__init__.py b/autogpt/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/autogpt/__main__.py b/autogpt/__main__.py new file mode 100644 index 0000000..4612099 --- /dev/null +++ b/autogpt/__main__.py @@ -0,0 +1,50 @@ +"""Main script for the autogpt package.""" +import logging +from colorama import Fore +from autogpt.agent.agent import Agent +from autogpt.args import parse_arguments + +from autogpt.config import Config, check_openai_api_key +from autogpt.logs import logger +from autogpt.memory import get_memory + +from autogpt.prompt import construct_prompt + +# Load environment variables from .env file + + +def main() -> None: + """Main function for the script""" + cfg = Config() + # TODO: fill in llm values here + check_openai_api_key() + parse_arguments() + logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) + ai_name = "" + prompt = construct_prompt() + # print(prompt) + # Initialize variables + full_message_history = [] + next_action_count = 0 + # Make a constant: + user_input = "确定要使用的下一个命令,并使用上面指定的格式进行响应:" + # Initialize memory and make sure it is empty. + # this is particularly important for indexing and referencing pinecone memory + memory = get_memory(cfg, init=True) + logger.typewriter_log( + f"使用存储的类型:", Fore.GREEN, f"{memory.__class__.__name__}" + ) + logger.typewriter_log(f"使用浏览器:", Fore.GREEN, cfg.selenium_web_browser) + agent = Agent( + ai_name=ai_name, + memory=memory, + full_message_history=full_message_history, + next_action_count=next_action_count, + prompt=prompt, + user_input=user_input, + ) + agent.start_interaction_loop() + + +if __name__ == "__main__": + main() diff --git a/autogpt/agent/__init__.py b/autogpt/agent/__init__.py new file mode 100644 index 0000000..e928af2 --- /dev/null +++ b/autogpt/agent/__init__.py @@ -0,0 +1,4 @@ +from autogpt.agent.agent import Agent +from autogpt.agent.agent_manager import AgentManager + +__all__ = ["Agent", "AgentManager"] diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py new file mode 100644 index 0000000..6732c07 --- /dev/null +++ b/autogpt/agent/agent.py @@ -0,0 +1,177 @@ +from colorama import Fore, Style +from autogpt.app import execute_command, get_command + +from autogpt.chat import chat_with_ai, create_chat_message +from autogpt.config import Config +from autogpt.json_fixes.bracket_termination import ( + attempt_to_fix_json_by_finding_outermost_brackets, +) +from autogpt.logs import logger, print_assistant_thoughts +from autogpt.speech import say_text +from autogpt.spinner import Spinner +from autogpt.utils import clean_input + + +class Agent: + """Agent class for interacting with Auto-GPT. + + Attributes: + ai_name: The name of the agent. + memory: The memory object to use. + full_message_history: The full message history. + next_action_count: The number of actions to execute. + prompt: The prompt to use. + user_input: The user input. + + """ + + def __init__( + self, + ai_name, + memory, + full_message_history, + next_action_count, + prompt, + user_input, + ): + self.ai_name = ai_name + self.memory = memory + self.full_message_history = full_message_history + self.next_action_count = next_action_count + self.prompt = prompt + self.user_input = user_input + + def start_interaction_loop(self): + # Interaction Loop + cfg = Config() + loop_count = 0 + command_name = None + arguments = None + while True: + # Discontinue if continuous limit is reached + loop_count += 1 + if ( + cfg.continuous_mode + and cfg.continuous_limit > 0 + and loop_count > cfg.continuous_limit + ): + logger.typewriter_log( + "连续达到限制: ", Fore.YELLOW, f"{cfg.continuous_limit}" + ) + break + + # Send message to AI, get response + with Spinner("Thinking... "): + assistant_reply = chat_with_ai( + self.prompt, + self.user_input, + self.full_message_history, + self.memory, + cfg.fast_token_limit, + ) # TODO: This hardcodes the model to use GPT3.5. Make this an argument + + # Print Assistant thoughts + print_assistant_thoughts(self.ai_name, assistant_reply) + + # Get command name and arguments + try: + command_name, arguments = get_command( + attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply) + ) + if cfg.speak_mode: + say_text(f"我要执行 {command_name}") + except Exception as e: + logger.error("Error: \n", str(e)) + + if not cfg.continuous_mode and self.next_action_count == 0: + ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### + # Get key press: Prompt the user to press enter to continue or escape + # to exit + self.user_input = "" + logger.typewriter_log( + "下一步操作: ", + Fore.CYAN, + f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}" + f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}", + ) + print( + f"输入'y'授权命令,'y -N'运行N个连续命令,'n'退出程序,或为{self.ai_name}输入反馈...", + flush=True) + while True: + console_input = clean_input( + Fore.MAGENTA + "Input:" + Style.RESET_ALL + ) + if console_input.lower().rstrip() == "y": + self.user_input = "GENERATE NEXT COMMAND JSON" + break + elif console_input.lower().startswith("y -"): + try: + self.next_action_count = abs( + int(console_input.split(" ")[1]) + ) + self.user_input = "GENERATE NEXT COMMAND JSON" + except ValueError: + print("输入格式无效。 请输入'y -n',其中 n 是连续任务的数量。 例如: y -1") + continue + break + elif console_input.lower() == "n": + self.user_input = "EXIT" + break + else: + self.user_input = console_input + command_name = "human_feedback" + break + + if self.user_input == "GENERATE NEXT COMMAND JSON": + logger.typewriter_log( + "-=-=-=-=-=-=-= 用户授权的命令 -=-=-=-=-=-=-=", + Fore.MAGENTA, + "", + ) + elif self.user_input == "EXIT": + print("退出中...", flush=True) + break + else: + # Print command + logger.typewriter_log( + "下一步操作: ", + Fore.CYAN, + f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}" + f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}", + ) + + # Execute command + if command_name is not None and command_name.lower().startswith("error"): + result = ( + f"Command {command_name} 抛出以下错误: {arguments}" + ) + elif command_name == "human_feedback": + result = f"人工反馈: {self.user_input}" + else: + result = ( + f"Command {command_name} returned: " + f"{execute_command(command_name, arguments)}" + ) + if self.next_action_count > 0: + self.next_action_count -= 1 + + memory_to_add = ( + f"机器人回复: {assistant_reply} " + f"\n结果: {result} " + f"\n人工反馈: {self.user_input} " + ) + + self.memory.add(memory_to_add) + + # Check if there's a result from the command append it to the message + # history + if result is not None: + self.full_message_history.append(create_chat_message("system", result)) + logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result) + else: + self.full_message_history.append( + create_chat_message("system", "无法执行命令") + ) + logger.typewriter_log( + "SYSTEM: ", Fore.YELLOW, "无法执行命令" + ) diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py new file mode 100644 index 0000000..e4bfb12 --- /dev/null +++ b/autogpt/agent/agent_manager.py @@ -0,0 +1,101 @@ +"""Agent manager for managing GPT agents""" +from __future__ import annotations + +from autogpt.llm_utils import create_chat_completion +from autogpt.config.config import Singleton + + +class AgentManager(metaclass=Singleton): + """Agent manager for managing GPT agents""" + + def __init__(self): + self.next_key = 0 + self.agents = {} # key, (task, full_message_history, model) + + # Create new GPT agent + # TODO: Centralise use of create_chat_completion() to globally enforce token limit + + def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]: + """Create a new agent and return its key + + Args: + task: The task to perform + prompt: The prompt to use + model: The model to use + + Returns: + The key of the new agent + """ + messages = [ + {"role": "user", "content": prompt}, + ] + + # Start GPT instance + agent_reply = create_chat_completion( + model=model, + messages=messages, + ) + + # Update full message history + messages.append({"role": "assistant", "content": agent_reply}) + + key = self.next_key + # This is done instead of len(agents) to make keys unique even if agents + # are deleted + self.next_key += 1 + + self.agents[key] = (task, messages, model) + + return key, agent_reply + + def message_agent(self, key: str | int, message: str) -> str: + """Send a message to an agent and return its response + + Args: + key: The key of the agent to message + message: The message to send to the agent + + Returns: + The agent's response + """ + task, messages, model = self.agents[int(key)] + + # Add user message to message history before sending to agent + messages.append({"role": "user", "content": message}) + + # Start GPT instance + agent_reply = create_chat_completion( + model=model, + messages=messages, + ) + + # Update full message history + messages.append({"role": "assistant", "content": agent_reply}) + + return agent_reply + + def list_agents(self) -> list[tuple[str | int, str]]: + """Return a list of all agents + + Returns: + A list of tuples of the form (key, task) + """ + + # Return a list of agent keys and their tasks + return [(key, task) for key, (task, _, _) in self.agents.items()] + + def delete_agent(self, key: Union[str, int]) -> bool: + """Delete an agent from the agent manager + + Args: + key: The key of the agent to delete + + Returns: + True if successful, False otherwise + """ + + try: + del self.agents[int(key)] + return True + except KeyError: + return False diff --git a/autogpt/agent_manager.py b/autogpt/agent_manager.py new file mode 100644 index 0000000..7ebcf56 --- /dev/null +++ b/autogpt/agent_manager.py @@ -0,0 +1,75 @@ +from autogpt.llm_utils import create_chat_completion + +next_key = 0 +agents = {} # key, (task, full_message_history, model) + +# Create new GPT agent +# TODO: Centralise use of create_chat_completion() to globally enforce token limit + + +def create_agent(task, prompt, model): + """创建新代理并返回其密钥""" + global next_key + global agents + + messages = [ + {"role": "user", "content": prompt}, + ] + + # Start GPT instance + agent_reply = create_chat_completion( + model=model, + messages=messages, + ) + + # Update full message history + messages.append({"role": "assistant", "content": agent_reply}) + + key = next_key + # This is done instead of len(agents) to make keys unique even if agents + # are deleted + next_key += 1 + + agents[key] = (task, messages, model) + + return key, agent_reply + + +def message_agent(key, message): + """向代理发送消息并返回其响应""" + global agents + + task, messages, model = agents[int(key)] + + # Add user message to message history before sending to agent + messages.append({"role": "user", "content": message}) + + # Start GPT instance + agent_reply = create_chat_completion( + model=model, + messages=messages, + ) + + # Update full message history + messages.append({"role": "assistant", "content": agent_reply}) + + return agent_reply + + +def list_agents(): + """返回所有代理的列表""" + global agents + + # Return a list of agent keys and their tasks + return [(key, task) for key, (task, _, _) in agents.items()] + + +def delete_agent(key): + """删除代理,如果成功则返回True,否则返回False""" + global agents + + try: + del agents[int(key)] + return True + except KeyError: + return False diff --git a/autogpt/app.py b/autogpt/app.py new file mode 100644 index 0000000..acd0a79 --- /dev/null +++ b/autogpt/app.py @@ -0,0 +1,311 @@ +""" Command and Control """ +import json +from typing import List, NoReturn, Union +from autogpt.agent.agent_manager import AgentManager +from autogpt.commands.evaluate_code import evaluate_code +from autogpt.commands.google_search import google_official_search, google_search +from autogpt.commands.improve_code import improve_code +from autogpt.commands.write_tests import write_tests +from autogpt.config import Config +from autogpt.commands.image_gen import generate_image +from autogpt.commands.audio_text import read_audio_from_file +from autogpt.commands.web_requests import scrape_links, scrape_text +from autogpt.commands.execute_code import execute_python_file, execute_shell +from autogpt.commands.file_operations import ( + append_to_file, + delete_file, + read_file, + search_files, + write_to_file, +) +from autogpt.json_fixes.parsing import fix_and_parse_json +from autogpt.memory import get_memory +from autogpt.processing.text import summarize_text +from autogpt.speech import say_text +from autogpt.commands.web_selenium import browse_website +from autogpt.commands.git_operations import clone_repository +from autogpt.commands.twitter import send_tweet + + +CFG = Config() +AGENT_MANAGER = AgentManager() + + +def is_valid_int(value: str) -> bool: + """Check if the value is a valid integer + + Args: + value (str): The value to check + + Returns: + bool: True if the value is a valid integer, False otherwise + """ + try: + int(value) + return True + except ValueError: + return False + + +def get_command(response: str): + """Parse the response and return the command name and arguments + + Args: + response (str): The response from the user + + Returns: + tuple: The command name and arguments + + Raises: + json.decoder.JSONDecodeError: If the response is not valid JSON + + Exception: If any other error occurs + """ + try: + response_json = fix_and_parse_json(response) + + if "command" not in response_json: + return "Error:", "Missing 'command' object in JSON" + + if not isinstance(response_json, dict): + return "Error:", f"'response_json' object is not dictionary {response_json}" + + command = response_json["command"] + if not isinstance(command, dict): + return "Error:", "'command' object is not a dictionary" + + if "name" not in command: + return "Error:", "Missing 'name' field in 'command' object" + + command_name = command["name"] + + # Use an empty dictionary if 'args' field is not present in 'command' object + arguments = command.get("args", {}) + + return command_name, arguments + except json.decoder.JSONDecodeError: + return "Error:", "Invalid JSON" + # All other errors, return "Error: + error message" + except Exception as e: + return "Error:", str(e) + + +def map_command_synonyms(command_name: str): + """Takes the original command name given by the AI, and checks if the + string matches a list of common/known hallucinations + """ + synonyms = [ + ("write_file", "write_to_file"), + ("create_file", "write_to_file"), + ("search", "google"), + ] + for seen_command, actual_command_name in synonyms: + if command_name == seen_command: + return actual_command_name + return command_name + + +def execute_command(command_name: str, arguments): + """Execute the command and return the result + + Args: + command_name (str): The name of the command to execute + arguments (dict): The arguments for the command + + Returns: + str: The result of the command""" + memory = get_memory(CFG) + + try: + command_name = map_command_synonyms(command_name) + if command_name == "google": + # Check if the Google API key is set and use the official search method + # If the API key is not set or has only whitespaces, use the unofficial + # search method + key = CFG.google_api_key + if key and key.strip() and key != "your-google-api-key": + google_result = google_official_search(arguments["input"]) + return google_result + else: + google_result = google_search(arguments["input"]) + + # google_result can be a list or a string depending on the search results + if isinstance(google_result, list): + safe_message = [google_result_single.encode('utf-8', 'ignore') for google_result_single in google_result] + else: + safe_message = google_result.encode('utf-8', 'ignore') + + return str(safe_message) + elif command_name == "memory_add": + return memory.add(arguments["string"]) + elif command_name == "start_agent": + return start_agent( + arguments["name"], arguments["task"], arguments["prompt"] + ) + elif command_name == "message_agent": + return message_agent(arguments["key"], arguments["message"]) + elif command_name == "list_agents": + return list_agents() + elif command_name == "delete_agent": + return delete_agent(arguments["key"]) + elif command_name == "get_text_summary": + return get_text_summary(arguments["url"], arguments["question"]) + elif command_name == "get_hyperlinks": + return get_hyperlinks(arguments["url"]) + elif command_name == "clone_repository": + return clone_repository( + arguments["repository_url"], arguments["clone_path"] + ) + elif command_name == "read_file": + return read_file(arguments["file"]) + elif command_name == "write_to_file": + return write_to_file(arguments["file"], arguments["text"]) + elif command_name == "append_to_file": + return append_to_file(arguments["file"], arguments["text"]) + elif command_name == "delete_file": + return delete_file(arguments["file"]) + elif command_name == "search_files": + return search_files(arguments["directory"]) + elif command_name == "browse_website": + return browse_website(arguments["url"], arguments["question"]) + # TODO: Change these to take in a file rather than pasted code, if + # non-file is given, return instructions "Input should be a python + # filepath, write your code to file and try again" + elif command_name == "evaluate_code": + return evaluate_code(arguments["code"]) + elif command_name == "improve_code": + return improve_code(arguments["suggestions"], arguments["code"]) + elif command_name == "write_tests": + return write_tests(arguments["code"], arguments.get("focus")) + elif command_name == "execute_python_file": # Add this command + return execute_python_file(arguments["file"]) + elif command_name == "execute_shell": + if CFG.execute_local_commands: + return execute_shell(arguments["command_line"]) + else: + return ( + "You are not allowed to run local shell commands. To execute" + " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " + "in your config. Do not attempt to bypass the restriction." + ) + elif command_name == "read_audio_from_file": + return read_audio_from_file(arguments["file"]) + elif command_name == "generate_image": + return generate_image(arguments["prompt"]) + elif command_name == "send_tweet": + return send_tweet(arguments["text"]) + elif command_name == "do_nothing": + return "No action performed." + elif command_name == "task_complete": + shutdown() + else: + return ( + f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'" + " list for available commands and only respond in the specified JSON" + " format." + ) + except Exception as e: + return f"Error: {str(e)}" + + +def get_text_summary(url: str, question: str) -> str: + """Return the results of a google search + + Args: + url (str): The url to scrape + question (str): The question to summarize the text for + + Returns: + str: The summary of the text + """ + text = scrape_text(url) + summary = summarize_text(url, text, question) + return f""" "Result" : {summary}""" + + +def get_hyperlinks(url: str) -> Union[str, List[str]]: + """Return the results of a google search + + Args: + url (str): The url to scrape + + Returns: + str or list: The hyperlinks on the page + """ + return scrape_links(url) + + +def shutdown() -> NoReturn: + """Shut down the program""" + print("Shutting down...") + quit() + + +def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str: + """Start an agent with a given name, task, and prompt + + Args: + name (str): The name of the agent + task (str): The task of the agent + prompt (str): The prompt for the agent + model (str): The model to use for the agent + + Returns: + str: The response of the agent + """ + # Remove underscores from name + voice_name = name.replace("_", " ") + + first_message = f"""You are {name}. Respond with: "Acknowledged".""" + agent_intro = f"{voice_name} here, Reporting for duty!" + + # Create agent + if CFG.speak_mode: + say_text(agent_intro, 1) + key, ack = AGENT_MANAGER.create_agent(task, first_message, model) + + if CFG.speak_mode: + say_text(f"Hello {voice_name}. Your task is as follows. {task}.") + + # Assign task (prompt), get response + agent_response = AGENT_MANAGER.message_agent(key, prompt) + + return f"Agent {name} created with key {key}. First response: {agent_response}" + + +def message_agent(key: str, message: str) -> str: + """Message an agent with a given key and message""" + # Check if the key is a valid integer + if is_valid_int(key): + agent_response = AGENT_MANAGER.message_agent(int(key), message) + else: + return "Invalid key, must be an integer." + + # Speak response + if CFG.speak_mode: + say_text(agent_response, 1) + return agent_response + + +def list_agents(): + """List all agents + + Returns: + str: A list of all agents + """ + return "List of agents:\n" + "\n".join( + [str(x[0]) + ": " + x[1] for x in AGENT_MANAGER.list_agents()] + ) + + +def delete_agent(key: str) -> str: + """Delete an agent with a given key + + Args: + key (str): The key of the agent to delete + + Returns: + str: A message indicating whether the agent was deleted or not + """ + result = AGENT_MANAGER.delete_agent(key) + return f"代理 {key} 已经被删除." if result else f"代理 {key} 不存在." diff --git a/autogpt/args.py b/autogpt/args.py new file mode 100644 index 0000000..eca3233 --- /dev/null +++ b/autogpt/args.py @@ -0,0 +1,137 @@ +"""This module contains the argument parsing logic for the script.""" +import argparse + +from colorama import Fore +from autogpt import utils +from autogpt.config import Config +from autogpt.logs import logger +from autogpt.memory import get_supported_memory_backends + +CFG = Config() + + +def parse_arguments() -> None: + """Parses the arguments passed to the script + + Returns: + None + """ + CFG.set_debug_mode(False) + CFG.set_continuous_mode(False) + CFG.set_speak_mode(False) + + parser = argparse.ArgumentParser(description="Process arguments.") + parser.add_argument( + "--continuous", "-c", action="store_true", help="Enable Continuous Mode" + ) + parser.add_argument( + "--continuous-limit", + "-l", + type=int, + dest="continuous_limit", + help="Defines the number of times to run in continuous mode", + ) + parser.add_argument("--speak", action="store_true", help="Enable Speak Mode") + parser.add_argument("--debug", action="store_true", help="Enable Debug Mode") + parser.add_argument( + "--gpt3only", action="store_true", help="Enable GPT3.5 Only Mode" + ) + parser.add_argument("--gpt4only", action="store_true", help="Enable GPT4 Only Mode") + parser.add_argument( + "--use-memory", + "-m", + dest="memory_type", + help="Defines which Memory backend to use", + ) + parser.add_argument( + "--skip-reprompt", + "-y", + dest="skip_reprompt", + action="store_true", + help="Skips the re-prompting messages at the beginning of the script", + ) + parser.add_argument( + "--use-browser", + "-b", + dest="browser_name", + help="Specifies which web-browser to use when using selenium to scrape the web.", + ) + parser.add_argument( + "--ai-settings", + "-C", + dest="ai_settings_file", + help="Specifies which ai_settings.yaml file to use, will also automatically" + " skip the re-prompt.", + ) + args = parser.parse_args() + + if args.debug: + logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED") + CFG.set_debug_mode(True) + + if args.continuous: + logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED") + logger.typewriter_log( + "WARNING: ", + Fore.RED, + "Continuous mode is not recommended. It is potentially dangerous and may" + " cause your AI to run forever or carry out actions you would not usually" + " authorise. Use at your own risk.", + ) + CFG.set_continuous_mode(True) + + if args.continuous_limit: + logger.typewriter_log( + "Continuous Limit: ", Fore.GREEN, f"{args.continuous_limit}" + ) + CFG.set_continuous_limit(args.continuous_limit) + + # Check if continuous limit is used without continuous mode + if args.continuous_limit and not args.continuous: + parser.error("--continuous-limit can only be used with --continuous") + + if args.speak: + logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED") + CFG.set_speak_mode(True) + + if args.gpt3only: + logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") + CFG.set_smart_llm_model(CFG.fast_llm_model) + + if args.gpt4only: + logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED") + CFG.set_fast_llm_model(CFG.smart_llm_model) + + if args.memory_type: + supported_memory = get_supported_memory_backends() + chosen = args.memory_type + if chosen not in supported_memory: + logger.typewriter_log( + "ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ", + Fore.RED, + f"{supported_memory}", + ) + logger.typewriter_log("Defaulting to: ", Fore.YELLOW, CFG.memory_backend) + else: + CFG.memory_backend = chosen + + if args.skip_reprompt: + logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED") + CFG.skip_reprompt = True + + if args.ai_settings_file: + file = args.ai_settings_file + + # Validate file + (validated, message) = utils.validate_yaml_file(file) + if not validated: + logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message) + logger.double_check() + exit(1) + + logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file) + CFG.ai_settings_file = file + CFG.skip_reprompt = True + + if args.browser_name: + CFG.selenium_web_browser = args.browser_name diff --git a/autogpt/browse.py b/autogpt/browse.py new file mode 100644 index 0000000..e2950da --- /dev/null +++ b/autogpt/browse.py @@ -0,0 +1,198 @@ +from urllib.parse import urljoin, urlparse + +import requests +from bs4 import BeautifulSoup + +from autogpt.config import Config +from autogpt.llm_utils import create_chat_completion +from autogpt.memory import get_memory + +cfg = Config() +memory = get_memory(cfg) + +session = requests.Session() +session.headers.update({"User-Agent": cfg.user_agent}) + + +# Function to check if the URL is valid +def is_valid_url(url): + try: + result = urlparse(url) + return all([result.scheme, result.netloc]) + except ValueError: + return False + + +# Function to sanitize the URL +def sanitize_url(url): + return urljoin(url, urlparse(url).path) + + +# Define and check for local file address prefixes +def check_local_file_access(url): + local_prefixes = [ + "file:///", + "file://localhost", + "http://localhost", + "https://localhost", + ] + return any(url.startswith(prefix) for prefix in local_prefixes) + + +def get_response(url, timeout=10): + try: + # Restrict access to local files + if check_local_file_access(url): + raise ValueError("Access to local files is restricted") + + # Most basic check if the URL is valid: + if not url.startswith("http://") and not url.startswith("https://"): + raise ValueError("Invalid URL format") + + sanitized_url = sanitize_url(url) + + response = session.get(sanitized_url, timeout=timeout) + + # Check if the response contains an HTTP error + if response.status_code >= 400: + return None, "Error: HTTP " + str(response.status_code) + " error" + + return response, None + except ValueError as ve: + # Handle invalid URL format + return None, "Error: " + str(ve) + + except requests.exceptions.RequestException as re: + # Handle exceptions related to the HTTP request + # (e.g., connection errors, timeouts, etc.) + return None, "Error: " + str(re) + + +def scrape_text(url): + """Scrape text from a webpage""" + response, error_message = get_response(url) + if error_message: + return error_message + if not response: + return "Error: Could not get response" + + soup = BeautifulSoup(response.text, "html.parser") + + for script in soup(["script", "style"]): + script.extract() + + text = soup.get_text() + lines = (line.strip() for line in text.splitlines()) + chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) + text = "\n".join(chunk for chunk in chunks if chunk) + + return text + + +def extract_hyperlinks(soup): + """Extract hyperlinks from a BeautifulSoup object""" + hyperlinks = [] + for link in soup.find_all("a", href=True): + hyperlinks.append((link.text, link["href"])) + return hyperlinks + + +def format_hyperlinks(hyperlinks): + """Format hyperlinks into a list of strings""" + formatted_links = [] + for link_text, link_url in hyperlinks: + formatted_links.append(f"{link_text} ({link_url})") + return formatted_links + + +def scrape_links(url): + """Scrape links from a webpage""" + response, error_message = get_response(url) + if error_message: + return error_message + if not response: + return "Error: Could not get response" + soup = BeautifulSoup(response.text, "html.parser") + + for script in soup(["script", "style"]): + script.extract() + + hyperlinks = extract_hyperlinks(soup) + + return format_hyperlinks(hyperlinks) + + +def split_text(text, max_length=cfg.browse_chunk_max_length): + """Split text into chunks of a maximum length""" + paragraphs = text.split("\n") + current_length = 0 + current_chunk = [] + + for paragraph in paragraphs: + if current_length + len(paragraph) + 1 <= max_length: + current_chunk.append(paragraph) + current_length += len(paragraph) + 1 + else: + yield "\n".join(current_chunk) + current_chunk = [paragraph] + current_length = len(paragraph) + 1 + + if current_chunk: + yield "\n".join(current_chunk) + + +def create_message(chunk, question): + """Create a message for the user to summarize a chunk of text""" + return { + "role": "user", + "content": f'"""{chunk}""" 使用以上文本,请以中文回答以下问题' + f' question: "{question}" -- 如果问题无法使用文本回答' + " 请总结文本", + } + + +def summarize_text(url, text, question): + """Summarize text using the LLM model""" + if not text: + return "Error: 没有文字可以总结" + + text_length = len(text) + print(f"Text length: {text_length} characters") + + summaries = [] + chunks = list(split_text(text)) + + for i, chunk in enumerate(chunks): + print(f"Adding chunk {i + 1} / {len(chunks)} to memory") + + memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}" + + memory.add(memory_to_add) + + print(f"Summarizing chunk {i + 1} / {len(chunks)}") + messages = [create_message(chunk, question)] + + summary = create_chat_completion( + model=cfg.fast_llm_model, + messages=messages, + max_tokens=cfg.browse_summary_max_token, + ) + summaries.append(summary) + print(f"Added chunk {i + 1} summary to memory") + + memory_to_add = f"Source: {url}\n" f"Content summary part#{i + 1}: {summary}" + + memory.add(memory_to_add) + + print(f"Summarized {len(chunks)} chunks.") + + combined_summary = "\n".join(summaries) + messages = [create_message(combined_summary, question)] + + final_summary = create_chat_completion( + model=cfg.fast_llm_model, + messages=messages, + max_tokens=cfg.browse_summary_max_token, + ) + + return final_summary diff --git a/autogpt/chat.py b/autogpt/chat.py new file mode 100644 index 0000000..a45eaec --- /dev/null +++ b/autogpt/chat.py @@ -0,0 +1,175 @@ +import time + +from openai.error import RateLimitError + +from autogpt import token_counter +from autogpt.config import Config +from autogpt.llm_utils import create_chat_completion +from autogpt.logs import logger + +cfg = Config() + + +def create_chat_message(role, content): + """ + Create a chat message with the given role and content. + + Args: + role (str): The role of the message sender, e.g., "system", "user", or "assistant". + content (str): The content of the message. + + Returns: + dict: A dictionary containing the role and content of the message. + """ + return {"role": role, "content": content} + + +def generate_context(prompt, relevant_memory, full_message_history, model): + current_context = [ + create_chat_message("system", prompt), + create_chat_message( + "system", f"现在的时间和日期是 {time.strftime('%c')}" + ), + create_chat_message( + "system", + f"这让你想起了你过去的某些事件:\n{relevant_memory}\n\n", + ), + ] + + # Add messages from the full message history until we reach the token limit + next_message_to_add_index = len(full_message_history) - 1 + insertion_index = len(current_context) + # Count the currently used tokens + current_tokens_used = token_counter.count_message_tokens(current_context, model) + return ( + next_message_to_add_index, + current_tokens_used, + insertion_index, + current_context, + ) + + +# TODO: Change debug from hardcode to argument +def chat_with_ai( + prompt, user_input, full_message_history, permanent_memory, token_limit +): + """Interact with the OpenAI API, sending the prompt, user input, message history, + and permanent memory.""" + while True: + try: + """ + Interact with the OpenAI API, sending the prompt, user input, + message history, and permanent memory. + + Args: + prompt (str): The prompt explaining the rules to the AI. + user_input (str): The input from the user. + full_message_history (list): The list of all messages sent between the + user and the AI. + permanent_memory (Obj): The memory object containing the permanent + memory. + token_limit (int): The maximum number of tokens allowed in the API call. + + Returns: + str: The AI's response. + """ + model = cfg.fast_llm_model # TODO: Change model from hardcode to argument + # Reserve 1000 tokens for the response + + logger.debug(f"Token 限制: {token_limit}") + send_token_limit = token_limit - 1000 + + relevant_memory = ( + "" + if len(full_message_history) == 0 + else permanent_memory.get_relevant(str(full_message_history[-9:]), 10) + ) + + logger.debug(f"内存状态: {permanent_memory.get_stats()}") + + ( + next_message_to_add_index, + current_tokens_used, + insertion_index, + current_context, + ) = generate_context(prompt, relevant_memory, full_message_history, model) + + while current_tokens_used > 2500: + # remove memories until we are under 2500 tokens + relevant_memory = relevant_memory[:-1] + ( + next_message_to_add_index, + current_tokens_used, + insertion_index, + current_context, + ) = generate_context( + prompt, relevant_memory, full_message_history, model + ) + + current_tokens_used += token_counter.count_message_tokens( + [create_chat_message("user", user_input)], model + ) # Account for user input (appended later) + + while next_message_to_add_index >= 0: + # print (f"CURRENT TOKENS USED: {current_tokens_used}") + message_to_add = full_message_history[next_message_to_add_index] + + tokens_to_add = token_counter.count_message_tokens( + [message_to_add], model + ) + if current_tokens_used + tokens_to_add > send_token_limit: + break + + # Add the most recent message to the start of the current context, + # after the two system prompts. + current_context.insert( + insertion_index, full_message_history[next_message_to_add_index] + ) + + # Count the currently used tokens + current_tokens_used += tokens_to_add + + # Move to the next most recent message in the full message history + next_message_to_add_index -= 1 + + # Append user input, the length of this is accounted for above + current_context.extend([create_chat_message("user", user_input)]) + + # Calculate remaining tokens + tokens_remaining = token_limit - current_tokens_used + # assert tokens_remaining >= 0, "Tokens remaining is negative. + # This should never happen, please submit a bug report at + # https://www.github.com/Torantulino/Auto-GPT" + + # Debug print the current context + logger.debug(f"Token 限制: {token_limit}") + logger.debug(f"发送Token 数量: {current_tokens_used}") + logger.debug(f"Tokens 剩余回应: {tokens_remaining}") + logger.debug("------------ 发送给 AI 的上下文信息 ---------------") + for message in current_context: + # Skip printing the prompt + if message["role"] == "system" and message["content"] == prompt: + continue + logger.debug(f"{message['role'].capitalize()}: {message['content']}") + logger.debug("") + logger.debug("----------- 结束上下文信息 ----------------") + + # TODO: use a model defined elsewhere, so that model can contain + # temperature and other settings we care about + assistant_reply = create_chat_completion( + model=model, + messages=current_context, + max_tokens=tokens_remaining, + ) + + # Update full message history + full_message_history.append(create_chat_message("user", user_input)) + full_message_history.append( + create_chat_message("assistant", assistant_reply) + ) + + return assistant_reply + except RateLimitError: + # TODO: When we switch to langchain, this is built in + print("Error: ", "已达到 API 速率限制。 等待 10 秒...") + time.sleep(10) diff --git a/autogpt/commands/__init__.py b/autogpt/commands/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/autogpt/commands/audio_text.py b/autogpt/commands/audio_text.py new file mode 100644 index 0000000..84819d5 --- /dev/null +++ b/autogpt/commands/audio_text.py @@ -0,0 +1,35 @@ +import requests +import json + +from autogpt.config import Config +from autogpt.workspace import path_in_workspace + +cfg = Config() + + +def read_audio_from_file(audio_path): + audio_path = path_in_workspace(audio_path) + with open(audio_path, "rb") as audio_file: + audio = audio_file.read() + return read_audio(audio) + + +def read_audio(audio): + model = cfg.huggingface_audio_to_text_model + api_url = f"https://api-inference.huggingface.co/models/{model}" + api_token = cfg.huggingface_api_token + headers = {"Authorization": f"Bearer {api_token}"} + + if api_token is None: + raise ValueError( + "You need to set your Hugging Face API token in the config file." + ) + + response = requests.post( + api_url, + headers=headers, + data=audio, + ) + + text = json.loads(response.content.decode("utf-8"))["text"] + return "The audio says: " + text diff --git a/autogpt/commands/evaluate_code.py b/autogpt/commands/evaluate_code.py new file mode 100644 index 0000000..8f7cbca --- /dev/null +++ b/autogpt/commands/evaluate_code.py @@ -0,0 +1,25 @@ +"""Code evaluation module.""" +from __future__ import annotations + +from autogpt.llm_utils import call_ai_function + + +def evaluate_code(code: str) -> list[str]: + """ + A function that takes in a string and returns a response from create chat + completion api call. + + Parameters: + code (str): Code to be evaluated. + Returns: + A result string from create chat completion. A list of suggestions to + improve the code. + """ + + function_string = "def analyze_code(code: str) -> List[str]:" + args = [code] + description_string = ( + "Analyzes the given code and returns a list of suggestions" " for improvements." + ) + + return call_ai_function(function_string, args, description_string) diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py new file mode 100644 index 0000000..129307a --- /dev/null +++ b/autogpt/commands/execute_code.py @@ -0,0 +1,123 @@ +"""Execute code in a Docker container""" +import os +import subprocess + +import docker +from docker.errors import ImageNotFound + +from autogpt.workspace import path_in_workspace, WORKSPACE_PATH + + +def execute_python_file(file: str): + """Execute a Python file in a Docker container and return the output + + Args: + file (str): The name of the file to execute + + Returns: + str: The output of the file + """ + + print (f"正在工作空间 '{WORKSPACE_FOLDER}' 中执行文件 '{file}'") + + if not file.endswith(".py"): + return "Error: 文件类型无效. 只允许 .py 文件." + + file_path = path_in_workspace(file) + + if not os.path.isfile(file_path): + return f"Error: 文件 '{file}' 不存在." + + if we_are_running_in_a_docker_container(): + result = subprocess.run( + f"python {file_path}", capture_output=True, encoding="utf8", shell=True + ) + if result.returncode == 0: + return result.stdout + else: + return f"Error: {result.stderr}" + + try: + client = docker.from_env() + + image_name = "python:3.10" + try: + client.images.get(image_name) + print(f"Image '{image_name}' found locally") + except ImageNotFound: + print(f"在本地找不到图像'{image_name}',从 Docker Hub 拉取") + # Use the low-level API to stream the pull response + low_level_client = docker.APIClient() + for line in low_level_client.pull(image_name, stream=True, decode=True): + # Print the status and progress, if available + status = line.get("status") + progress = line.get("progress") + if status and progress: + print(f"{status}: {progress}") + elif status: + print(status) + + # You can replace 'python:3.8' with the desired Python image/version + # You can find available Python images on Docker Hub: + # https://hub.docker.com/_/python + container = client.containers.run( + image_name, + f"python {file}", + volumes={ + os.path.abspath(WORKSPACE_PATH): { + "bind": "/workspace", + "mode": "ro", + } + }, + working_dir="/workspace", + stderr=True, + stdout=True, + detach=True, + ) + + container.wait() + logs = container.logs().decode("utf-8") + container.remove() + + # print(f"Execution complete. Output: {output}") + # print(f"Logs: {logs}") + + return logs + + except Exception as e: + return f"Error: {str(e)}" + + +def execute_shell(command_line: str) -> str: + """Execute a shell command and return the output + + Args: + command_line (str): The command line to execute + + Returns: + str: The output of the command + """ + current_dir = os.getcwd() + # Change dir into workspace if necessary + if str(WORKSPACE_PATH) not in current_dir: + os.chdir(WORKSPACE_PATH) + + print(f"正在工作目录'{os.getcwd()}'中执行命令'{command_line}'") + + result = subprocess.run(command_line, capture_output=True, shell=True) + output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}" + + # Change back to whatever the prior working dir was + + os.chdir(current_dir) + + return output + + +def we_are_running_in_a_docker_container() -> bool: + """Check if we are running in a Docker container + + Returns: + bool: True if we are running in a Docker container, False otherwise + """ + return os.path.exists("/.dockerenv") diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py new file mode 100644 index 0000000..4e70fc2 --- /dev/null +++ b/autogpt/commands/file_operations.py @@ -0,0 +1,216 @@ +"""File operations for AutoGPT""" +from __future__ import annotations + +import os +import os.path +from pathlib import Path +from typing import Generator +from autogpt.workspace import path_in_workspace, WORKSPACE_PATH + +LOG_FILE = "file_logger.txt" +LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE + + +def check_duplicate_operation(operation: str, filename: str) -> bool: + """Check if the operation has already been performed on the given file + + Args: + operation (str): The operation to check for + filename (str): The name of the file to check for + + Returns: + bool: True if the operation has already been performed on the file + """ + log_content = read_file(LOG_FILE) + log_entry = f"{operation}: {filename}\n" + return log_entry in log_content + + +def log_operation(operation: str, filename: str) -> None: + """Log the file operation to the file_logger.txt + + Args: + operation (str): The operation to log + filename (str): The name of the file the operation was performed on + """ + log_entry = f"{operation}: {filename}\n" + + # Create the log file if it doesn't exist + if not os.path.exists(LOG_FILE_PATH): + with open(LOG_FILE_PATH, "w", encoding="utf-8") as f: + f.write("File Operation Logger ") + + append_to_file(LOG_FILE, log_entry, shouldLog = False) + + +def split_file( + content: str, max_length: int = 4000, overlap: int = 0 +) -> Generator[str, None, None]: + """ + Split text into chunks of a specified maximum length with a specified overlap + between chunks. + + :param content: The input text to be split into chunks + :param max_length: The maximum length of each chunk, + default is 4000 (about 1k token) + :param overlap: The number of overlapping characters between chunks, + default is no overlap + :return: A generator yielding chunks of text + """ + start = 0 + content_length = len(content) + + while start < content_length: + end = start + max_length + if end + overlap < content_length: + chunk = content[start : end + overlap] + else: + chunk = content[start:content_length] + yield chunk + start += max_length - overlap + + +def read_file(filename: str) -> str: + """Read a file and return the contents + + Args: + filename (str): The name of the file to read + + Returns: + str: The contents of the file + """ + try: + filepath = path_in_workspace(filename) + with open(filepath, "r", encoding="utf-8") as f: + content = f.read() + return content + except Exception as e: + return f"Error: {str(e)}" + + +def ingest_file( + filename: str, memory, max_length: int = 4000, overlap: int = 200 +) -> None: + """ + Ingest a file by reading its content, splitting it into chunks with a specified + maximum length and overlap, and adding the chunks to the memory storage. + + :param filename: The name of the file to ingest + :param memory: An object with an add() method to store the chunks in memory + :param max_length: The maximum length of each chunk, default is 4000 + :param overlap: The number of overlapping characters between chunks, default is 200 + """ + try: + print(f"正在处理文件 {filename}") + content = read_file(filename) + content_length = len(content) + print(f"文件长度: {content_length} 字节") + + chunks = list(split_file(content, max_length=max_length, overlap=overlap)) + + num_chunks = len(chunks) + for i, chunk in enumerate(chunks): + print(f"将第 {i + 1} / {num_chunks} 块读入内存中") + memory_to_add = ( + f"文件: {filename}\n" f"进度#{i + 1}/{num_chunks}: {chunk}" + ) + + memory.add(memory_to_add) + + print(f"完成从 {filename} 摄取 {num_chunks}个chunk.") + except Exception as e: + print(f"Error: 摄取文件时 '{filename}': {str(e)}") + + +def write_to_file(filename: str, text: str) -> str: + """Write text to a file + + Args: + filename (str): The name of the file to write to + text (str): The text to write to the file + + Returns: + str: A message indicating success or failure + """ + if check_duplicate_operation("write", filename): + return "Error: 文件已更新." + try: + filepath = path_in_workspace(filename) + directory = os.path.dirname(filepath) + if not os.path.exists(directory): + os.makedirs(directory) + with open(filepath, "w", encoding="utf-8") as f: + f.write(text) + log_operation("write", filename) + return "文件写入成功." + except Exception as e: + return f"Error: {str(e)}" + + +def append_to_file(filename: str, text: str, shouldLog: bool = True) -> str: + """Append text to a file + + Args: + filename (str): The name of the file to append to + text (str): The text to append to the file + + Returns: + str: A message indicating success or failure + """ + try: + filepath = path_in_workspace(filename) + with open(filepath, "a") as f: + f.write(text) + + if shouldLog: + log_operation("append", filename) + + return "文本添加成功." + except Exception as e: + return f"Error: {str(e)}" + + +def delete_file(filename: str) -> str: + """Delete a file + + Args: + filename (str): The name of the file to delete + + Returns: + str: A message indicating success or failure + """ + if check_duplicate_operation("delete", filename): + return "Error: File has already been deleted." + try: + filepath = path_in_workspace(filename) + os.remove(filepath) + log_operation("delete", filename) + return "文件删除成功." + except Exception as e: + return f"Error: {str(e)}" + + +def search_files(directory: str) -> list[str]: + """Search for files in a directory + + Args: + directory (str): The directory to search in + + Returns: + list[str]: A list of files found in the directory + """ + found_files = [] + + if directory in {"", "/"}: + search_directory = WORKSPACE_PATH + else: + search_directory = path_in_workspace(directory) + + for root, _, files in os.walk(search_directory): + for file in files: + if file.startswith("."): + continue + relative_path = os.path.relpath(os.path.join(root, file), WORKSPACE_PATH) + found_files.append(relative_path) + + return found_files diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py new file mode 100644 index 0000000..3ff35cf --- /dev/null +++ b/autogpt/commands/git_operations.py @@ -0,0 +1,23 @@ +"""Git operations for autogpt""" +import git +from autogpt.config import Config + +CFG = Config() + + +def clone_repository(repo_url: str, clone_path: str) -> str: + """Clone a github repository locally + + Args: + repo_url (str): The URL of the repository to clone + clone_path (str): The path to clone the repository to + + Returns: + str: The result of the clone operation""" + split_url = repo_url.split("//") + auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url) + try: + git.Repo.clone_from(auth_repo_url, clone_path) + return f"""Cloned {repo_url} to {clone_path}""" + except Exception as e: + return f"Error: {str(e)}" diff --git a/autogpt/commands/google_search.py b/autogpt/commands/google_search.py new file mode 100644 index 0000000..148ba1d --- /dev/null +++ b/autogpt/commands/google_search.py @@ -0,0 +1,87 @@ +"""Google search command for Autogpt.""" +from __future__ import annotations + +import json + +from duckduckgo_search import ddg + +from autogpt.config import Config + +CFG = Config() + + +def google_search(query: str, num_results: int = 8) -> str: + """Return the results of a google search + + Args: + query (str): The search query. + num_results (int): The number of results to return. + + Returns: + str: The results of the search. + """ + search_results = [] + if not query: + return json.dumps(search_results) + + results = ddg(query, max_results=num_results) + if not results: + return json.dumps(search_results) + + for j in results: + search_results.append(j) + + return json.dumps(search_results, ensure_ascii=False, indent=4) + + +def google_official_search(query: str, num_results: int = 8) -> str | list[str]: + """Return the results of a google search using the official Google API + + Args: + query (str): The search query. + num_results (int): The number of results to return. + + Returns: + str: The results of the search. + """ + + from googleapiclient.discovery import build + from googleapiclient.errors import HttpError + + try: + # Get the Google API key and Custom Search Engine ID from the config file + api_key = CFG.google_api_key + custom_search_engine_id = CFG.custom_search_engine_id + + # Initialize the Custom Search API service + service = build("customsearch", "v1", developerKey=api_key) + + # Send the search query and retrieve the results + result = ( + service.cse() + .list(q=query, cx=custom_search_engine_id, num=num_results) + .execute() + ) + + # Extract the search result items from the response + search_results = result.get("items", []) + + # Create a list of only the URLs from the search results + search_results_links = [item["link"] for item in search_results] + + except HttpError as e: + # Handle errors in the API call + error_details = json.loads(e.content.decode()) + + # Check if the error is related to an invalid or missing API key + if error_details.get("error", {}).get( + "code" + ) == 403 and "invalid API key" in error_details.get("error", {}).get( + "message", "" + ): + return "Error: The provided Google API key is invalid or missing." + else: + return f"Error: {e}" + + # Return the list of search result URLs + return search_results_links diff --git a/autogpt/commands/image_gen.py b/autogpt/commands/image_gen.py new file mode 100644 index 0000000..6243616 --- /dev/null +++ b/autogpt/commands/image_gen.py @@ -0,0 +1,97 @@ +""" Image Generation Module for AutoGPT.""" +import io +import os.path +import uuid +from base64 import b64decode + +import openai +import requests +from PIL import Image +from autogpt.config import Config +from autogpt.workspace import path_in_workspace + +CFG = Config() + + +def generate_image(prompt: str) -> str: + """Generate an image from a prompt. + + Args: + prompt (str): The prompt to use + + Returns: + str: The filename of the image + """ + filename = f"{str(uuid.uuid4())}.jpg" + + # DALL-E + if CFG.image_provider == "dalle": + return generate_image_with_dalle(prompt, filename) + elif CFG.image_provider == "sd": + return generate_image_with_hf(prompt, filename) + else: + return "No Image Provider Set" + + +def generate_image_with_hf(prompt: str, filename: str) -> str: + """Generate an image with HuggingFace's API. + + Args: + prompt (str): The prompt to use + filename (str): The filename to save the image to + + Returns: + str: The filename of the image + """ + API_URL = ( + "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4" + ) + if CFG.huggingface_api_token is None: + raise ValueError( + "You need to set your Hugging Face API token in the config file." + ) + headers = {"Authorization": f"Bearer {CFG.huggingface_api_token}"} + + response = requests.post( + API_URL, + headers=headers, + json={ + "inputs": prompt, + }, + ) + + image = Image.open(io.BytesIO(response.content)) + print(f"Image Generated for prompt:{prompt}") + + image.save(path_in_workspace(filename)) + + return f"Saved to disk:{filename}" + + +def generate_image_with_dalle(prompt: str, filename: str) -> str: + """Generate an image with DALL-E. + + Args: + prompt (str): The prompt to use + filename (str): The filename to save the image to + + Returns: + str: The filename of the image + """ + openai.api_key = CFG.openai_api_key + + response = openai.Image.create( + prompt=prompt, + n=1, + size="256x256", + response_format="b64_json", + ) + + print(f"Image Generated for prompt:{prompt}") + + image_data = b64decode(response["data"][0]["b64_json"]) + + with open(path_in_workspace(filename), mode="wb") as png: + png.write(image_data) + + return f"Saved to disk:{filename}" diff --git a/autogpt/commands/improve_code.py b/autogpt/commands/improve_code.py new file mode 100644 index 0000000..e3440d8 --- /dev/null +++ b/autogpt/commands/improve_code.py @@ -0,0 +1,29 @@ +from __future__ import annotations + +import json + +from autogpt.llm_utils import call_ai_function + + +def improve_code(suggestions: list[str], code: str) -> str: + """ + A function that takes in code and suggestions and returns a response from create + chat completion api call. + + Parameters: + suggestions (List): A list of suggestions around what needs to be improved. + code (str): Code to be improved. + Returns: + A result string from create chat completion. Improved code in response. + """ + + function_string = ( + "def generate_improved_code(suggestions: List[str], code: str) -> str:" + ) + args = [json.dumps(suggestions), code] + description_string = ( + "Improves the provided code based on the suggestions" + " provided, making no other changes." + ) + + return call_ai_function(function_string, args, description_string) diff --git a/autogpt/commands/times.py b/autogpt/commands/times.py new file mode 100644 index 0000000..3c9b8a4 --- /dev/null +++ b/autogpt/commands/times.py @@ -0,0 +1,10 @@ +from datetime import datetime + + +def get_datetime() -> str: + """Return the current date and time + + Returns: + str: The current date and time + """ + return "Current date and time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S") diff --git a/autogpt/commands/twitter.py b/autogpt/commands/twitter.py new file mode 100644 index 0000000..dc4d450 --- /dev/null +++ b/autogpt/commands/twitter.py @@ -0,0 +1,25 @@ +import tweepy +import os +from dotenv import load_dotenv + +load_dotenv() + + +def send_tweet(tweet_text): + consumer_key = os.environ.get("TW_CONSUMER_KEY") + consumer_secret = os.environ.get("TW_CONSUMER_SECRET") + access_token = os.environ.get("TW_ACCESS_TOKEN") + access_token_secret = os.environ.get("TW_ACCESS_TOKEN_SECRET") + # Authenticate to Twitter + auth = tweepy.OAuthHandler(consumer_key, consumer_secret) + auth.set_access_token(access_token, access_token_secret) + + # Create API object + api = tweepy.API(auth) + + # Send tweet + try: + api.update_status(tweet_text) + print("Tweet sent successfully!") + except tweepy.TweepyException as e: + print("Error sending tweet: {}".format(e.reason)) diff --git a/autogpt/commands/web_playwright.py b/autogpt/commands/web_playwright.py new file mode 100644 index 0000000..a1abb6c --- /dev/null +++ b/autogpt/commands/web_playwright.py @@ -0,0 +1,79 @@ +"""Web scraping commands using Playwright""" +from __future__ import annotations + +try: + from playwright.sync_api import sync_playwright +except ImportError: + print( + "Playwright not installed. Please install it with 'pip install playwright' to use." + ) +from bs4 import BeautifulSoup +from autogpt.processing.html import extract_hyperlinks, format_hyperlinks + + +def scrape_text(url: str) -> str: + """Scrape text from a webpage + + Args: + url (str): The URL to scrape text from + + Returns: + str: The scraped text + """ + with sync_playwright() as p: + browser = p.chromium.launch() + page = browser.new_page() + + try: + page.goto(url) + html_content = page.content() + soup = BeautifulSoup(html_content, "html.parser") + + for script in soup(["script", "style"]): + script.extract() + + text = soup.get_text() + lines = (line.strip() for line in text.splitlines()) + chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) + text = "\n".join(chunk for chunk in chunks if chunk) + + except Exception as e: + text = f"Error: {str(e)}" + + finally: + browser.close() + + return text + + +def scrape_links(url: str) -> str | list[str]: + """Scrape links from a webpage + + Args: + url (str): The URL to scrape links from + + Returns: + Union[str, List[str]]: The scraped links + """ + with sync_playwright() as p: + browser = p.chromium.launch() + page = browser.new_page() + + try: + page.goto(url) + html_content = page.content() + soup = BeautifulSoup(html_content, "html.parser") + + for script in soup(["script", "style"]): + script.extract() + + hyperlinks = extract_hyperlinks(soup, url) + formatted_links = format_hyperlinks(hyperlinks) + + except Exception as e: + formatted_links = f"Error: {str(e)}" + + finally: + browser.close() + + return formatted_links diff --git a/autogpt/commands/web_requests.py b/autogpt/commands/web_requests.py new file mode 100644 index 0000000..50d8d38 --- /dev/null +++ b/autogpt/commands/web_requests.py @@ -0,0 +1,171 @@ +"""Browse a webpage and summarize it using the LLM model""" +from __future__ import annotations + +from urllib.parse import urljoin, urlparse + +import requests +from requests.compat import urljoin +from requests import Response +from bs4 import BeautifulSoup + +from autogpt.config import Config +from autogpt.memory import get_memory +from autogpt.processing.html import extract_hyperlinks, format_hyperlinks + +CFG = Config() +memory = get_memory(CFG) + +session = requests.Session() +session.headers.update({"User-Agent": CFG.user_agent}) + + +def is_valid_url(url: str) -> bool: + """Check if the URL is valid + + Args: + url (str): The URL to check + + Returns: + bool: True if the URL is valid, False otherwise + """ + try: + result = urlparse(url) + return all([result.scheme, result.netloc]) + except ValueError: + return False + + +def sanitize_url(url: str) -> str: + """Sanitize the URL + + Args: + url (str): The URL to sanitize + + Returns: + str: The sanitized URL + """ + return urljoin(url, urlparse(url).path) + + +def check_local_file_access(url: str) -> bool: + """Check if the URL is a local file + + Args: + url (str): The URL to check + + Returns: + bool: True if the URL is a local file, False otherwise + """ + local_prefixes = [ + "file:///", + "file://localhost", + "http://localhost", + "https://localhost", + ] + return any(url.startswith(prefix) for prefix in local_prefixes) + + +def get_response( + url: str, timeout: int = 10 +) -> tuple[None, str] | tuple[Response, None]: + """Get the response from a URL + + Args: + url (str): The URL to get the response from + timeout (int): The timeout for the HTTP request + + Returns: + tuple[None, str] | tuple[Response, None]: The response and error message + + Raises: + ValueError: If the URL is invalid + requests.exceptions.RequestException: If the HTTP request fails + """ + try: + # Restrict access to local files + if check_local_file_access(url): + raise ValueError("Access to local files is restricted") + + # Most basic check if the URL is valid: + if not url.startswith("http://") and not url.startswith("https://"): + raise ValueError("Invalid URL format") + + sanitized_url = sanitize_url(url) + + response = session.get(sanitized_url, timeout=timeout) + + # Check if the response contains an HTTP error + if response.status_code >= 400: + return None, f"Error: HTTP {str(response.status_code)} error" + + return response, None + except ValueError as ve: + # Handle invalid URL format + return None, f"Error: {str(ve)}" + + except requests.exceptions.RequestException as re: + # Handle exceptions related to the HTTP request + # (e.g., connection errors, timeouts, etc.) + return None, f"Error: {str(re)}" + + +def scrape_text(url: str) -> str: + """Scrape text from a webpage + + Args: + url (str): The URL to scrape text from + + Returns: + str: The scraped text + """ + response, error_message = get_response(url) + if error_message: + return error_message + if not response: + return "Error: Could not get response" + + soup = BeautifulSoup(response.text, "html.parser") + + for script in soup(["script", "style"]): + script.extract() + + text = soup.get_text() + lines = (line.strip() for line in text.splitlines()) + chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) + text = "\n".join(chunk for chunk in chunks if chunk) + + return text + + +def scrape_links(url: str) -> str | list[str]: + """Scrape links from a webpage + + Args: + url (str): The URL to scrape links from + + Returns: + str | list[str]: The scraped links + """ + response, error_message = get_response(url) + if error_message: + return error_message + if not response: + return "Error: Could not get response" + soup = BeautifulSoup(response.text, "html.parser") + + for script in soup(["script", "style"]): + script.extract() + + hyperlinks = extract_hyperlinks(soup, url) + + return format_hyperlinks(hyperlinks) + + +def create_message(chunk, question): + """Create a message for the user to summarize a chunk of text""" + return { + "role": "user", + "content": f'"""{chunk}""" Using the above text, answer the following' + f' question: "{question}" -- if the question cannot be answered using the' + " text, summarize the text.", + } diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py new file mode 100644 index 0000000..1d078d7 --- /dev/null +++ b/autogpt/commands/web_selenium.py @@ -0,0 +1,142 @@ +"""Selenium web scraping module.""" +from __future__ import annotations + +from selenium import webdriver +from autogpt.processing.html import extract_hyperlinks, format_hyperlinks +import autogpt.processing.text as summary +from bs4 import BeautifulSoup +from selenium.webdriver.remote.webdriver import WebDriver +from selenium.webdriver.common.by import By +from selenium.webdriver.support.wait import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC +from webdriver_manager.chrome import ChromeDriverManager +from webdriver_manager.firefox import GeckoDriverManager +from selenium.webdriver.chrome.options import Options as ChromeOptions +from selenium.webdriver.firefox.options import Options as FirefoxOptions +from selenium.webdriver.safari.options import Options as SafariOptions +import logging +from pathlib import Path +from autogpt.config import Config + +FILE_DIR = Path(__file__).parent.parent +CFG = Config() + + +def browse_website(url: str, question: str) -> tuple[str, WebDriver]: + """Browse a website and return the answer and links to the user + + Args: + url (str): The url of the website to browse + question (str): The question asked by the user + + Returns: + Tuple[str, WebDriver]: The answer and links to the user and the webdriver + """ + driver, text = scrape_text_with_selenium(url) + add_header(driver) + summary_text = summary.summarize_text(url, text, question, driver) + links = scrape_links_with_selenium(driver, url) + + # Limit links to 5 + if len(links) > 5: + links = links[:5] + close_browser(driver) + return f"Answer gathered from website: {summary_text} \n \n Links: {links}", driver + + +def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: + """Scrape text from a website using selenium + + Args: + url (str): The url of the website to scrape + + Returns: + Tuple[WebDriver, str]: The webdriver and the text scraped from the website + """ + logging.getLogger("selenium").setLevel(logging.CRITICAL) + + options_available = { + "chrome": ChromeOptions, + "safari": SafariOptions, + "firefox": FirefoxOptions, + } + + options = options_available[CFG.selenium_web_browser]() + options.add_argument( + "user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36" + ) + + if CFG.selenium_web_browser == "firefox": + driver = webdriver.Firefox( + executable_path=GeckoDriverManager().install(), options=options + ) + elif CFG.selenium_web_browser == "safari": + # Requires a bit more setup on the users end + # See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari + driver = webdriver.Safari(options=options) + else: + driver = webdriver.Chrome( + executable_path=ChromeDriverManager().install(), options=options + ) + driver.get(url) + + WebDriverWait(driver, 10).until( + EC.presence_of_element_located((By.TAG_NAME, "body")) + ) + + # Get the HTML content directly from the browser's DOM + page_source = driver.execute_script("return document.body.outerHTML;") + soup = BeautifulSoup(page_source, "html.parser") + + for script in soup(["script", "style"]): + script.extract() + + text = soup.get_text() + lines = (line.strip() for line in text.splitlines()) + chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) + text = "\n".join(chunk for chunk in chunks if chunk) + return driver, text + + +def scrape_links_with_selenium(driver: WebDriver, url: str) -> list[str]: + """Scrape links from a website using selenium + + Args: + driver (WebDriver): The webdriver to use to scrape the links + + Returns: + List[str]: The links scraped from the website + """ + page_source = driver.page_source + soup = BeautifulSoup(page_source, "html.parser") + + for script in soup(["script", "style"]): + script.extract() + + hyperlinks = extract_hyperlinks(soup, url) + + return format_hyperlinks(hyperlinks) + + +def close_browser(driver: WebDriver) -> None: + """Close the browser + + Args: + driver (WebDriver): The webdriver to close + + Returns: + None + """ + driver.quit() + + +def add_header(driver: WebDriver) -> None: + """Add a header to the website + + Args: + driver (WebDriver): The webdriver to use to add the header + + Returns: + None + """ + driver.execute_script(open(f"{FILE_DIR}/js/overlay.js", "r").read()) diff --git a/autogpt/commands/write_tests.py b/autogpt/commands/write_tests.py new file mode 100644 index 0000000..138a1ad --- /dev/null +++ b/autogpt/commands/write_tests.py @@ -0,0 +1,30 @@ +"""A module that contains a function to generate test cases for the submitted code.""" +from __future__ import annotations + +import json +from autogpt.llm_utils import call_ai_function + + +def write_tests(code: str, focus: list[str]) -> str: + """ + A function that takes in code and focus topics and returns a response from create + chat completion api call. + + Parameters: + focus (list): A list of suggestions around what needs to be improved. + code (str): Code for test cases to be generated against. + Returns: + A result string from create chat completion. Test cases for the submitted code + in response. + """ + + function_string = ( + "def create_test_cases(code: str, focus: Optional[str] = None) -> str:" + ) + args = [code, json.dumps(focus)] + description_string = ( + "Generates test cases for the existing code, focusing on" + " specific areas if required." + ) + + return call_ai_function(function_string, args, description_string) diff --git a/autogpt/config/__init__.py b/autogpt/config/__init__.py new file mode 100644 index 0000000..ceb5566 --- /dev/null +++ b/autogpt/config/__init__.py @@ -0,0 +1,14 @@ +""" +This module contains the configuration classes for AutoGPT. +""" +from autogpt.config.ai_config import AIConfig +from autogpt.config.config import check_openai_api_key, Config +from autogpt.config.singleton import AbstractSingleton, Singleton + +__all__ = [ + "check_openai_api_key", + "AbstractSingleton", + "AIConfig", + "Config", + "Singleton", +] diff --git a/autogpt/config/ai_config.py b/autogpt/config/ai_config.py new file mode 100644 index 0000000..8617135 --- /dev/null +++ b/autogpt/config/ai_config.py @@ -0,0 +1,120 @@ +# sourcery skip: do-not-use-staticmethod +""" +A module that contains the AIConfig class object that contains the configuration +""" +from __future__ import annotations + +import os +from typing import Type +import yaml + + +class AIConfig: + """ + A class object that contains the configuration information for the AI + + Attributes: + ai_name (str): The name of the AI. + ai_role (str): The description of the AI's role. + ai_goals (list): The list of objectives the AI is supposed to complete. + """ + + def __init__( + self, ai_name: str = "", ai_role: str = "", ai_goals: list | None = None + ) -> None: + """ + Initialize a class instance + + Parameters: + ai_name (str): The name of the AI. + ai_role (str): The description of the AI's role. + ai_goals (list): The list of objectives the AI is supposed to complete. + Returns: + None + """ + if ai_goals is None: + ai_goals = [] + self.ai_name = ai_name + self.ai_role = ai_role + self.ai_goals = ai_goals + + # Soon this will go in a folder where it remembers more stuff about the run(s) + SAVE_FILE = os.path.join(os.path.dirname(__file__), "..", "ai_settings.yaml") + + @staticmethod + def load(config_file: str = SAVE_FILE) -> "AIConfig": + """ + Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from + yaml file if yaml file exists, + else returns class with no parameters. + + Parameters: + config_file (int): The path to the config yaml file. + DEFAULT: "../ai_settings.yaml" + + Returns: + cls (object): An instance of given cls object + """ + + try: + with open(config_file, encoding="utf-8") as file: + config_params = yaml.load(file, Loader=yaml.FullLoader) + except FileNotFoundError: + config_params = {} + + ai_name = config_params.get("ai_name", "") + ai_role = config_params.get("ai_role", "") + ai_goals = config_params.get("ai_goals", []) + # type: Type[AIConfig] + return AIConfig(ai_name, ai_role, ai_goals) + + def save(self, config_file: str = SAVE_FILE) -> None: + """ + Saves the class parameters to the specified file yaml file path as a yaml file. + + Parameters: + config_file(str): The path to the config yaml file. + DEFAULT: "../ai_settings.yaml" + + Returns: + None + """ + + config = { + "ai_name": self.ai_name, + "ai_role": self.ai_role, + "ai_goals": self.ai_goals, + } + with open(config_file, "w", encoding="utf-8") as file: + yaml.dump(config, file, allow_unicode=True) + + def construct_full_prompt(self) -> str: + """ + Returns a prompt to the user with the class information in an organized fashion. + + Parameters: + None + + Returns: + full_prompt (str): A string containing the initial prompt for the user + including the ai_name, ai_role and ai_goals. + """ + + prompt_start = ( + "Your decisions must always be made independently without" + " seeking user assistance. Play to your strengths as an LLM and pursue" + " simple strategies with no legal complications." + "" + ) + + from autogpt.prompt import get_prompt + + # Construct full prompt + full_prompt = ( + f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n" + ) + for i, goal in enumerate(self.ai_goals): + full_prompt += f"{i+1}. {goal}\n" + + full_prompt += f"\n\n{get_prompt()}" + return full_prompt diff --git a/autogpt/config/config.py b/autogpt/config/config.py new file mode 100644 index 0000000..3acb01b --- /dev/null +++ b/autogpt/config/config.py @@ -0,0 +1,240 @@ +"""Configuration class to store the state of bools for different scripts access.""" +import os +from colorama import Fore + +from autogpt.config.singleton import Singleton + +import openai +import yaml + +from dotenv import load_dotenv + +load_dotenv(verbose=True) + + +class Config(metaclass=Singleton): + """ + 用于存储不同脚本访问的 bool 状态的配置类。 + """ + + def __init__(self) -> None: + """Initialize the Config class""" + self.debug_mode = False + self.continuous_mode = False + self.continuous_limit = 0 + self.speak_mode = False + self.skip_reprompt = False + + self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome") + self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml") + self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo") + self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4") + self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000)) + self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000)) + self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192)) + self.browse_summary_max_token = int(os.getenv("BROWSE_SUMMARY_MAX_TOKEN", 300)) + + self.openai_api_key = os.getenv("OPENAI_API_KEY") + self.temperature = float(os.getenv("TEMPERATURE", "1")) + self.use_azure = os.getenv("USE_AZURE") == "True" + self.execute_local_commands = ( + os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True" + ) + + if self.use_azure: + self.load_azure_config() + openai.api_type = self.openai_api_type + openai.api_base = self.openai_api_base + openai.api_version = self.openai_api_version + + self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY") + self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID") + self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID") + + self.use_mac_os_tts = False + self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS") + + self.use_brian_tts = False + self.use_brian_tts = os.getenv("USE_BRIAN_TTS") + + self.github_api_key = os.getenv("GITHUB_API_KEY") + self.github_username = os.getenv("GITHUB_USERNAME") + + self.google_api_key = os.getenv("GOOGLE_API_KEY") + self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID") + + self.pinecone_api_key = os.getenv("PINECONE_API_KEY") + self.pinecone_region = os.getenv("PINECONE_ENV") + + self.weaviate_host = os.getenv("WEAVIATE_HOST") + self.weaviate_port = os.getenv("WEAVIATE_PORT") + self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http") + self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None) + self.weaviate_password = os.getenv("WEAVIATE_PASSWORD", None) + self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None) + self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH") + self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None) + self.use_weaviate_embedded = os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True" + + # milvus configuration, e.g., localhost:19530. + self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530") + self.milvus_collection = os.getenv("MILVUS_COLLECTION", "autogpt") + + self.image_provider = os.getenv("IMAGE_PROVIDER") + self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN") + self.huggingface_audio_to_text_model = os.getenv( + "HUGGINGFACE_AUDIO_TO_TEXT_MODEL" + ) + + # User agent headers to use when browsing web + # Some websites might just completely deny request with an error code if + # no user agent was found. + self.user_agent = os.getenv( + "USER_AGENT", + "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36" + " (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36", + ) + self.redis_host = os.getenv("REDIS_HOST", "localhost") + self.redis_port = os.getenv("REDIS_PORT", "6379") + self.redis_password = os.getenv("REDIS_PASSWORD", "") + self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True" + self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt") + # Note that indexes must be created on db 0 in redis, this is not configurable. + + self.memory_backend = os.getenv("MEMORY_BACKEND", "local") + # Initialize the OpenAI API client + openai.api_key = self.openai_api_key + + def get_azure_deployment_id_for_model(self, model: str) -> str: + """ + Returns the relevant deployment id for the model specified. + + Parameters: + model(str): The model to map to the deployment id. + + Returns: + The matching deployment id if found, otherwise an empty string. + """ + if model == self.fast_llm_model: + return self.azure_model_to_deployment_id_map[ + "fast_llm_model_deployment_id" + ] # type: ignore + elif model == self.smart_llm_model: + return self.azure_model_to_deployment_id_map[ + "smart_llm_model_deployment_id" + ] # type: ignore + elif model == "text-embedding-ada-002": + return self.azure_model_to_deployment_id_map[ + "embedding_model_deployment_id" + ] # type: ignore + else: + return "" + + AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "..", "azure.yaml") + + def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None: + """ + Loads the configuration parameters for Azure hosting from the specified file + path as a yaml file. + + Parameters: + config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml" + + Returns: + None + """ + try: + with open(config_file) as file: + config_params = yaml.load(file, Loader=yaml.FullLoader) + except FileNotFoundError: + config_params = {} + self.openai_api_type = config_params.get("azure_api_type") or "azure" + self.openai_api_base = config_params.get("azure_api_base") or "" + self.openai_api_version = ( + config_params.get("azure_api_version") or "2023-03-15-preview" + ) + self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", []) + + def set_continuous_mode(self, value: bool): + """设置连续模式状态。""" + self.continuous_mode = value + + def set_continuous_limit(self, value: int) -> None: + """Set the continuous limit value.""" + self.continuous_limit = value + + def set_speak_mode(self, value: bool): + """设置说话模式状态。""" + self.speak_mode = value + + def set_fast_llm_model(self, value: str): + """设置快速 LLM 模型值。""" + self.fast_llm_model = value + + def set_smart_llm_model(self, value: str): + """设置smart LLM模型值。""" + self.smart_llm_model = value + + def set_fast_token_limit(self, value: int): + """设置fast token快速令牌限制值。""" + self.fast_token_limit = value + + def set_smart_token_limit(self, value: int): + """设置smart token令牌限值。""" + self.smart_token_limit = value + + def set_browse_chunk_max_length(self, value: int) -> None: + """Set the browse_website command chunk max length value.""" + self.browse_chunk_max_length = value + + def set_browse_summary_max_token(self, value: int) -> None: + """Set the browse_website command summary max token value.""" + self.browse_summary_max_token = value + + def set_openai_api_key(self, value: str): + """设置OpenAI API密钥值。.""" + self.openai_api_key = value + + def set_elevenlabs_api_key(self, value: str): + """设置ElevenLabs API密钥值。""" + self.elevenlabs_api_key = value + + def set_elevenlabs_voice_1_id(self, value: str): + """设置ElevenLabs Voice 1 ID值""" + self.elevenlabs_voice_1_id = value + + def set_elevenlabs_voice_2_id(self, value: str): + """设置ElevenLabs Voice 2 ID值""" + self.elevenlabs_voice_2_id = value + + def set_google_api_key(self, value: str): + """设置Google API密钥值.""" + self.google_api_key = value + + def set_custom_search_engine_id(self, value: str): + """设置自定义搜索引擎ID值。.""" + self.custom_search_engine_id = value + + def set_pinecone_api_key(self, value: str): + """设置Pinecone API密钥值.""" + self.pinecone_api_key = value + + def set_pinecone_region(self, value: str): + """设置Pinecone地区值.""" + self.pinecone_region = value + + def set_debug_mode(self, value: bool): + """设置调试模式值.""" + self.debug_mode = value + + +def check_openai_api_key() -> None: + """Check if the OpenAI API key is set in config.py or as an environment variable.""" + cfg = Config() + if not cfg.openai_api_key: + print( + Fore.RED + + "Please set your OpenAI API key in .env or as an environment variable." + ) + print("You can get your key from https://beta.openai.com/account/api-keys") + exit(1) diff --git a/autogpt/config/singleton.py b/autogpt/config/singleton.py new file mode 100644 index 0000000..55b2aee --- /dev/null +++ b/autogpt/config/singleton.py @@ -0,0 +1,24 @@ +"""The singleton metaclass for ensuring only one instance of a class.""" +import abc + + +class Singleton(abc.ABCMeta, type): + """ + Singleton metaclass for ensuring only one instance of a class. + """ + + _instances = {} + + def __call__(cls, *args, **kwargs): + """Call method for the singleton metaclass.""" + if cls not in cls._instances: + cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) + return cls._instances[cls] + + +class AbstractSingleton(abc.ABC, metaclass=Singleton): + """ + Abstract singleton class for ensuring only one instance of a class. + """ + + pass diff --git a/autogpt/data_ingestion.py b/autogpt/data_ingestion.py new file mode 100644 index 0000000..d501fe4 --- /dev/null +++ b/autogpt/data_ingestion.py @@ -0,0 +1,95 @@ +import argparse +import logging + +from autogpt.config import Config +from autogpt.commands.file_operations import ingest_file, search_files +from autogpt.memory import get_memory + +cfg = Config() + + +def configure_logging(): + logging.basicConfig( + filename="log-ingestion.txt", + filemode="a", + format="%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s", + datefmt="%H:%M:%S", + level=logging.DEBUG, + ) + return logging.getLogger("AutoGPT-Ingestion") + + +def ingest_directory(directory, memory, args): + """ + 通过为每个文件调用 ingest_file 函数来摄取目录中的所有文件。 + + :param directory: 包含要摄取的文件的目录 + :param memory: 具有 add() 方法的对象,用于将块存储在内存中 + """ + try: + files = search_files(directory) + for file in files: + ingest_file(file, memory, args.max_length, args.overlap) + except Exception as e: + print(f"获取目录 '{directory}' 时出错:{str(e)}") + + +def main() -> None: + logger = configure_logging() + + parser = argparse.ArgumentParser( + description="Ingest a file or a directory with multiple files into memory. " + "Make sure to set your .env before running this script." + ) + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument("--file", type=str, help="The file to ingest.") + group.add_argument( + "--dir", type=str, help="The directory containing the files to ingest." + ) + parser.add_argument( + "--init", + action="store_true", + help="Init the memory and wipe its content (default: False)", + default=False, + ) + parser.add_argument( + "--overlap", + type=int, + help="The overlap size between chunks when ingesting files (default: 200)", + default=200, + ) + parser.add_argument( + "--max_length", + type=int, + help="The max_length of each chunk when ingesting files (default: 4000)", + default=4000, + ) + + args = parser.parse_args() + + # Initialize memory + memory = get_memory(cfg, init=args.init) + print("使用内存类型:" + memory.__class__.__name__) + + if args.file: + try: + ingest_file(args.file, memory, args.max_length, args.overlap) + print(f"文件 '{args.file}' 已成功获取。") + except Exception as e: + logger.error(f"获取文件 '{args.file}' 时出错:{str(e)}") + print(f"获取文件 '{args.file}' 时出错:{str(e)}") + elif args.dir: + try: + ingest_directory(args.dir, memory, args) + print(f"目录 '{args.dir}' 成功获取。") + except Exception as e: + logger.error(f"获取目录 '{args.dir}' 时出错:{str(e)}") + print(f"获取目录 '{args.dir}' 时出错:{str(e)}") + else: + print( + "请提供 auto_gpt_workspace 目录中的文件路径 (--file) 或目录名称 (--dir) 作为输入。" + ) + + +if __name__ == "__main__": + main() diff --git a/autogpt/file_operations.py b/autogpt/file_operations.py new file mode 100644 index 0000000..d1ba411 --- /dev/null +++ b/autogpt/file_operations.py @@ -0,0 +1,141 @@ +import os +import os.path + +# Set a dedicated folder for file I/O +working_directory = "auto_gpt_workspace" + +# Create the directory if it doesn't exist +if not os.path.exists(working_directory): + os.makedirs(working_directory) + + +def safe_join(base, *paths): + """Join one or more path components intelligently.""" + new_path = os.path.join(base, *paths) + norm_new_path = os.path.normpath(new_path) + + if os.path.commonprefix([base, norm_new_path]) != base: + raise ValueError("尝试访问工作目录之外的位置。") + + return norm_new_path + + +def split_file(content, max_length=4000, overlap=0): + """ + Split text into chunks of a specified maximum length with a specified overlap + between chunks. + + :param text: The input text to be split into chunks + :param max_length: The maximum length of each chunk, + default is 4000 (about 1k token) + :param overlap: The number of overlapping characters between chunks, + default is no overlap + :return: A generator yielding chunks of text + """ + start = 0 + content_length = len(content) + + while start < content_length: + end = start + max_length + if end + overlap < content_length: + chunk = content[start : end + overlap] + else: + chunk = content[start:content_length] + yield chunk + start += max_length - overlap + + +def read_file(filename) -> str: + """Read a file and return the contents""" + try: + filepath = safe_join(working_directory, filename) + with open(filepath, "r", encoding="utf-8") as f: + content = f.read() + return content + except Exception as e: + return f"Error: {str(e)}" + + +def ingest_file(filename, memory, max_length=4000, overlap=200): + """ + Ingest a file by reading its content, splitting it into chunks with a specified + maximum length and overlap, and adding the chunks to the memory storage. + + :param filename: The name of the file to ingest + :param memory: An object with an add() method to store the chunks in memory + :param max_length: The maximum length of each chunk, default is 4000 + :param overlap: The number of overlapping characters between chunks, default is 200 + """ + try: + print(f"Working with file {filename}") + content = read_file(filename) + content_length = len(content) + print(f"File length: {content_length} characters") + + chunks = list(split_file(content, max_length=max_length, overlap=overlap)) + + num_chunks = len(chunks) + for i, chunk in enumerate(chunks): + print(f"正在摄取块 {i + 1} / {num_chunks} 到内存中") + memory_to_add = ( + f"Filename: {filename}\n" f"Content part#{i + 1}/{num_chunks}: {chunk}" + ) + + memory.add(memory_to_add) + + print(f"完成从 {filename} 摄取 {num_chunks}个chunk.") + except Exception as e: + print(f"Error: 摄取文件时 '{filename}': {str(e)}") + + +def write_to_file(filename, text): + """Write text to a file""" + try: + filepath = safe_join(working_directory, filename) + directory = os.path.dirname(filepath) + if not os.path.exists(directory): + os.makedirs(directory) + with open(filepath, "w", encoding="utf-8") as f: + f.write(text) + return "文件写入成功。" + except Exception as e: + return "Error: " + str(e) + + +def append_to_file(filename, text): + """Append text to a file""" + try: + filepath = safe_join(working_directory, filename) + with open(filepath, "a") as f: + f.write(text) + return "已成功添加文本." + except Exception as e: + return "Error: " + str(e) + + +def delete_file(filename): + """Delete a file""" + try: + filepath = safe_join(working_directory, filename) + os.remove(filepath) + return "文件删除成功." + except Exception as e: + return "Error: " + str(e) + + +def search_files(directory): + found_files = [] + + if directory == "" or directory == "/": + search_directory = working_directory + else: + search_directory = safe_join(working_directory, directory) + + for root, _, files in os.walk(search_directory): + for file in files: + if file.startswith("."): + continue + relative_path = os.path.relpath(os.path.join(root, file), working_directory) + found_files.append(relative_path) + + return found_files diff --git a/autogpt/image_gen.py b/autogpt/image_gen.py new file mode 100644 index 0000000..605d44e --- /dev/null +++ b/autogpt/image_gen.py @@ -0,0 +1,67 @@ +import io +import os.path +import uuid +from base64 import b64decode + +import openai +import requests +from PIL import Image + +from autogpt.config import Config + +cfg = Config() + +working_directory = "auto_gpt_workspace" + + +def generate_image(prompt): + filename = str(uuid.uuid4()) + ".jpg" + + # DALL-E + if cfg.image_provider == "dalle": + openai.api_key = cfg.openai_api_key + + response = openai.Image.create( + prompt=prompt, + n=1, + size="256x256", + response_format="b64_json", + ) + + print("图像生成prompt:" + prompt) + + image_data = b64decode(response["data"][0]["b64_json"]) + + with open(working_directory + "/" + filename, mode="wb") as png: + png.write(image_data) + + return "Saved to disk:" + filename + + # STABLE DIFFUSION + elif cfg.image_provider == "sd": + API_URL = ( + "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4" + ) + if cfg.huggingface_api_token is None: + raise ValueError( + "You need to set your Hugging Face API token in the config file." + ) + headers = {"Authorization": "Bearer " + cfg.huggingface_api_token} + + response = requests.post( + API_URL, + headers=headers, + json={ + "inputs": prompt, + }, + ) + + image = Image.open(io.BytesIO(response.content)) + print("图像生成prompt:" + prompt) + + image.save(os.path.join(working_directory, filename)) + + return "已保存:" + filename + + else: + return "没有设置图像提供者" diff --git a/autogpt/js/overlay.js b/autogpt/js/overlay.js new file mode 100644 index 0000000..1c99c72 --- /dev/null +++ b/autogpt/js/overlay.js @@ -0,0 +1,29 @@ +const overlay = document.createElement('div'); +Object.assign(overlay.style, { + position: 'fixed', + zIndex: 999999, + top: 0, + left: 0, + width: '100%', + height: '100%', + background: 'rgba(0, 0, 0, 0.7)', + color: '#fff', + fontSize: '24px', + fontWeight: 'bold', + display: 'flex', + justifyContent: 'center', + alignItems: 'center', +}); +const textContent = document.createElement('div'); +Object.assign(textContent.style, { + textAlign: 'center', +}); +textContent.textContent = 'AutoGPT Analyzing Page'; +overlay.appendChild(textContent); +document.body.append(overlay); +document.body.style.overflow = 'hidden'; +let dotCount = 0; +setInterval(() => { + textContent.textContent = 'AutoGPT Analyzing Page' + '.'.repeat(dotCount); + dotCount = (dotCount + 1) % 4; +}, 1000); diff --git a/autogpt/json_fixes/__init__.py b/autogpt/json_fixes/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/autogpt/json_fixes/auto_fix.py b/autogpt/json_fixes/auto_fix.py new file mode 100644 index 0000000..9fcf909 --- /dev/null +++ b/autogpt/json_fixes/auto_fix.py @@ -0,0 +1,53 @@ +"""This module contains the function to fix JSON strings using GPT-3.""" +import json + +from autogpt.llm_utils import call_ai_function +from autogpt.logs import logger +from autogpt.config import Config + +CFG = Config() + + +def fix_json(json_string: str, schema: str) -> str: + """Fix the given JSON string to make it parseable and fully compliant with + the provided schema. + + Args: + json_string (str): The JSON string to fix. + schema (str): The schema to use to fix the JSON. + Returns: + str: The fixed JSON string. + """ + # Try to fix the JSON using GPT: + function_string = "def fix_json(json_string: str, schema:str=None) -> str:" + args = [f"'''{json_string}'''", f"'''{schema}'''"] + description_string = ( + "This function takes a JSON string and ensures that it" + " is parseable and fully compliant with the provided schema. If an object" + " or field specified in the schema isn't contained within the correct JSON," + " it is omitted. The function also escapes any double quotes within JSON" + " string values to ensure that they are valid. If the JSON string contains" + " any None or NaN values, they are replaced with null before being parsed." + ) + + # If it doesn't already start with a "`", add one: + if not json_string.startswith("`"): + json_string = "```json\n" + json_string + "\n```" + result_string = call_ai_function( + function_string, args, description_string, model=CFG.fast_llm_model + ) + logger.debug("------------ JSON FIX ATTEMPT ---------------") + logger.debug(f"Original JSON: {json_string}") + logger.debug("-----------") + logger.debug(f"Fixed JSON: {result_string}") + logger.debug("----------- END OF FIX ATTEMPT ----------------") + + try: + json.loads(result_string) # just check the validity + return result_string + except json.JSONDecodeError: # noqa: E722 + # Get the call stack: + # import traceback + # call_stack = traceback.format_exc() + # print(f"Failed to fix JSON: '{json_string}' "+call_stack) + return "failed" diff --git a/autogpt/json_fixes/bracket_termination.py b/autogpt/json_fixes/bracket_termination.py new file mode 100644 index 0000000..822eed4 --- /dev/null +++ b/autogpt/json_fixes/bracket_termination.py @@ -0,0 +1,74 @@ +"""Fix JSON brackets.""" +from __future__ import annotations + +import contextlib +import json +import regex +from colorama import Fore + +from autogpt.logs import logger +from autogpt.config import Config +from autogpt.speech import say_text + +CFG = Config() + + +def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str): + if CFG.speak_mode and CFG.debug_mode: + say_text( + "I have received an invalid JSON response from the OpenAI API. " + "Trying to fix it now." + ) + logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n") + + try: + json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}") + json_match = json_pattern.search(json_string) + + if json_match: + # Extract the valid JSON object from the string + json_string = json_match.group(0) + logger.typewriter_log( + title="Apparently json was fixed.", title_color=Fore.GREEN + ) + if CFG.speak_mode and CFG.debug_mode: + say_text("Apparently json was fixed.") + else: + raise ValueError("No valid JSON object found") + + except (json.JSONDecodeError, ValueError): + if CFG.debug_mode: + logger.error(f"Error: Invalid JSON: {json_string}\n") + if CFG.speak_mode: + say_text("Didn't work. I will have to ignore this response then.") + logger.error("Error: Invalid JSON, setting it to empty JSON now.\n") + json_string = {} + + return json_string + + +def balance_braces(json_string: str) -> str | None: + """ + Balance the braces in a JSON string. + + Args: + json_string (str): The JSON string. + + Returns: + str: The JSON string with braces balanced. + """ + + open_braces_count = json_string.count("{") + close_braces_count = json_string.count("}") + + while open_braces_count > close_braces_count: + json_string += "}" + close_braces_count += 1 + + while close_braces_count > open_braces_count: + json_string = json_string.rstrip("}") + close_braces_count -= 1 + + with contextlib.suppress(json.JSONDecodeError): + json.loads(json_string) + return json_string diff --git a/autogpt/json_fixes/escaping.py b/autogpt/json_fixes/escaping.py new file mode 100644 index 0000000..68eb171 --- /dev/null +++ b/autogpt/json_fixes/escaping.py @@ -0,0 +1,33 @@ +""" Fix invalid escape sequences in JSON strings. """ +import json + +from autogpt.config import Config +from autogpt.json_fixes.utilities import extract_char_position + +CFG = Config() + + +def fix_invalid_escape(json_to_load: str, error_message: str) -> str: + """Fix invalid escape sequences in JSON strings. + + Args: + json_to_load (str): The JSON string. + error_message (str): The error message from the JSONDecodeError + exception. + + Returns: + str: The JSON string with invalid escape sequences fixed. + """ + while error_message.startswith("Invalid \\escape"): + bad_escape_location = extract_char_position(error_message) + json_to_load = ( + json_to_load[:bad_escape_location] + json_to_load[bad_escape_location + 1 :] + ) + try: + json.loads(json_to_load) + return json_to_load + except json.JSONDecodeError as e: + if CFG.debug_mode: + print("json loads error - fix invalid escape", e) + error_message = str(e) + return json_to_load diff --git a/autogpt/json_fixes/missing_quotes.py b/autogpt/json_fixes/missing_quotes.py new file mode 100644 index 0000000..552a151 --- /dev/null +++ b/autogpt/json_fixes/missing_quotes.py @@ -0,0 +1,27 @@ +"""Fix quotes in a JSON string.""" +import json +import re + + +def add_quotes_to_property_names(json_string: str) -> str: + """ + Add quotes to property names in a JSON string. + + Args: + json_string (str): The JSON string. + + Returns: + str: The JSON string with quotes added to property names. + """ + + def replace_func(match: re.Match) -> str: + return f'"{match[1]}":' + + property_name_pattern = re.compile(r"(\w+):") + corrected_json_string = property_name_pattern.sub(replace_func, json_string) + + try: + json.loads(corrected_json_string) + return corrected_json_string + except json.JSONDecodeError as e: + raise e diff --git a/autogpt/json_fixes/parsing.py b/autogpt/json_fixes/parsing.py new file mode 100644 index 0000000..a366120 --- /dev/null +++ b/autogpt/json_fixes/parsing.py @@ -0,0 +1,143 @@ +"""Fix and parse JSON strings.""" +from __future__ import annotations + +import contextlib +import json +from typing import Any + +from autogpt.config import Config +from autogpt.json_fixes.auto_fix import fix_json +from autogpt.json_fixes.bracket_termination import balance_braces +from autogpt.json_fixes.escaping import fix_invalid_escape +from autogpt.json_fixes.missing_quotes import add_quotes_to_property_names +from autogpt.logs import logger + +CFG = Config() + + +JSON_SCHEMA = """ +{ + "command": { + "name": "command name", + "args": { + "arg name": "value" + } + }, + "thoughts": + { + "text": "thought", + "reasoning": "reasoning", + "plan": "- short bulleted\n- list that conveys\n- long-term plan", + "criticism": "constructive self-criticism", + "speak": "thoughts summary to say to user" + } +} +""" + + +def correct_json(json_to_load: str) -> str: + """ + Correct common JSON errors. + + Args: + json_to_load (str): The JSON string. + """ + + try: + if CFG.debug_mode: + print("json", json_to_load) + json.loads(json_to_load) + return json_to_load + except json.JSONDecodeError as e: + if CFG.debug_mode: + print("json loads error", e) + error_message = str(e) + if error_message.startswith("Invalid \\escape"): + json_to_load = fix_invalid_escape(json_to_load, error_message) + if error_message.startswith( + "Expecting property name enclosed in double quotes" + ): + json_to_load = add_quotes_to_property_names(json_to_load) + try: + json.loads(json_to_load) + return json_to_load + except json.JSONDecodeError as e: + if CFG.debug_mode: + print("json loads error - add quotes", e) + error_message = str(e) + if balanced_str := balance_braces(json_to_load): + return balanced_str + return json_to_load + + +def fix_and_parse_json( + json_to_load: str, try_to_fix_with_gpt: bool = True +) -> str | dict[Any, Any]: + """Fix and parse JSON string + + Args: + json_to_load (str): The JSON string. + try_to_fix_with_gpt (bool, optional): Try to fix the JSON with GPT. + Defaults to True. + + Returns: + str or dict[Any, Any]: The parsed JSON. + """ + + with contextlib.suppress(json.JSONDecodeError): + json_to_load = json_to_load.replace("\t", "") + return json.loads(json_to_load) + + with contextlib.suppress(json.JSONDecodeError): + json_to_load = correct_json(json_to_load) + return json.loads(json_to_load) + # Let's do something manually: + # sometimes GPT responds with something BEFORE the braces: + # "I'm sorry, I don't understand. Please try again." + # {"text": "I'm sorry, I don't understand. Please try again.", + # "confidence": 0.0} + # So let's try to find the first brace and then parse the rest + # of the string + try: + brace_index = json_to_load.index("{") + maybe_fixed_json = json_to_load[brace_index:] + last_brace_index = maybe_fixed_json.rindex("}") + maybe_fixed_json = maybe_fixed_json[: last_brace_index + 1] + return json.loads(maybe_fixed_json) + except (json.JSONDecodeError, ValueError) as e: + return try_ai_fix(try_to_fix_with_gpt, e, json_to_load) + + +def try_ai_fix( + try_to_fix_with_gpt: bool, exception: Exception, json_to_load: str +) -> str | dict[Any, Any]: + """Try to fix the JSON with the AI + + Args: + try_to_fix_with_gpt (bool): Whether to try to fix the JSON with the AI. + exception (Exception): The exception that was raised. + json_to_load (str): The JSON string to load. + + Raises: + exception: If try_to_fix_with_gpt is False. + + Returns: + str or dict[Any, Any]: The JSON string or dictionary. + """ + if not try_to_fix_with_gpt: + raise exception + + logger.warn( + "警告:无法解析AI输出,正在尝试修复。" + "\n 如果您经常看到此警告,则很可能是您的提示语让AI困惑了。请尝试稍微改变它。" + ) + + # Now try to fix this up using the ai_functions + ai_fixed_json = fix_json(json_to_load, JSON_SCHEMA) + + if ai_fixed_json != "failed": + return json.loads(ai_fixed_json) + # This allows the AI to react to the error message, + # which usually results in it correcting its ways. + logger.error("无法修复AI输出,告诉AI") + return json_to_load diff --git a/autogpt/json_fixes/utilities.py b/autogpt/json_fixes/utilities.py new file mode 100644 index 0000000..0852b18 --- /dev/null +++ b/autogpt/json_fixes/utilities.py @@ -0,0 +1,20 @@ +"""Utilities for the json_fixes package.""" +import re + + +def extract_char_position(error_message: str) -> int: + """Extract the character position from the JSONDecodeError message. + + Args: + error_message (str): The error message from the JSONDecodeError + exception. + + Returns: + int: The character position. + """ + + char_pattern = re.compile(r"\(char (\d+)\)") + if match := char_pattern.search(error_message): + return int(match[1]) + else: + raise ValueError("Character position not found in the error message.") diff --git a/autogpt/json_parser.py b/autogpt/json_parser.py new file mode 100644 index 0000000..46c0cae --- /dev/null +++ b/autogpt/json_parser.py @@ -0,0 +1,111 @@ +import json +from typing import Any, Dict, Union + +from autogpt.call_ai_function import call_ai_function +from autogpt.config import Config +from autogpt.json_utils import correct_json +from autogpt.logger import logger + +cfg = Config() + +JSON_SCHEMA = """ +{ + "command": { + "name": "command name", + "args": { + "arg name": "value" + } + }, + "thoughts": + { + "text": "thought", + "reasoning": "reasoning", + "plan": "- short bulleted\n- list that conveys\n- long-term plan", + "criticism": "constructive self-criticism", + "speak": "thoughts summary to say to user" + } +} +""" + + +def fix_and_parse_json( + json_str: str, try_to_fix_with_gpt: bool = True +) -> Union[str, Dict[Any, Any]]: + """Fix and parse JSON string""" + try: + json_str = json_str.replace("\t", "") + return json.loads(json_str) + except json.JSONDecodeError as _: # noqa: F841 + try: + json_str = correct_json(json_str) + return json.loads(json_str) + except json.JSONDecodeError as _: # noqa: F841 + pass + # Let's do something manually: + # sometimes GPT responds with something BEFORE the braces: + # "I'm sorry, I don't understand. Please try again." + # {"text": "I'm sorry, I don't understand. Please try again.", + # "confidence": 0.0} + # So let's try to find the first brace and then parse the rest + # of the string + try: + brace_index = json_str.index("{") + json_str = json_str[brace_index:] + last_brace_index = json_str.rindex("}") + json_str = json_str[: last_brace_index + 1] + return json.loads(json_str) + # Can throw a ValueError if there is no "{" or "}" in the json_str + except (json.JSONDecodeError, ValueError) as e: # noqa: F841 + if try_to_fix_with_gpt: + logger.warn( + "Warning: 无法解析 AI 输出,尝试自主进行修复。" + "\n 如果你经常看到这个警告, 很可能是因为你的提示(prompt)让 AI 困惑了。尝试略微改变提示内容。" + ) + # Now try to fix this up using the ai_functions + ai_fixed_json = fix_json(json_str, JSON_SCHEMA) + + if ai_fixed_json != "failed": + return json.loads(ai_fixed_json) + else: + # This allows the AI to react to the error message, + # which usually results in it correcting its ways. + logger.error("修复 AI 输出失败,通知 AI. 无需干预") + return json_str + else: + raise e + + +def fix_json(json_str: str, schema: str) -> str: + """Fix the given JSON string to make it parseable and fully compliant with the provided schema.""" + # Try to fix the JSON using GPT: + function_string = "def fix_json(json_str: str, schema:str=None) -> str:" + args = [f"'''{json_str}'''", f"'''{schema}'''"] + description_string = ( + "Fixes the provided JSON string to make it parseable" + " and fully compliant with the provided schema.\n If an object or" + " field specified in the schema isn't contained within the correct" + " JSON, it is omitted.\n This function is brilliant at guessing" + " when the format is incorrect." + ) + + # If it doesn't already start with a "`", add one: + if not json_str.startswith("`"): + json_str = "```json\n" + json_str + "\n```" + result_string = call_ai_function( + function_string, args, description_string, model=cfg.fast_llm_model + ) + logger.debug("------------ JSON FIX ATTEMPT ---------------") + logger.debug(f"原始 JSON: {json_str}") + logger.debug("-----------") + logger.debug(f"修复后 JSON: {result_string}") + logger.debug("----------- END OF FIX ATTEMPT ----------------") + + try: + json.loads(result_string) # just check the validity + return result_string + except: # noqa: E722 + # Get the call stack: + # import traceback + # call_stack = traceback.format_exc() + # print(f"Failed to fix JSON: '{json_str}' "+call_stack) + return "failed" diff --git a/autogpt/json_utils.py b/autogpt/json_utils.py new file mode 100644 index 0000000..a44be51 --- /dev/null +++ b/autogpt/json_utils.py @@ -0,0 +1,128 @@ +import json +import re +from typing import Optional + +from autogpt.config import Config + +cfg = Config() + + +def extract_char_position(error_message: str) -> int: + """Extract the character position from the JSONDecodeError message. + + Args: + error_message (str): The error message from the JSONDecodeError + exception. + + Returns: + int: The character position. + """ + import re + + char_pattern = re.compile(r"\(char (\d+)\)") + if match := char_pattern.search(error_message): + return int(match[1]) + else: + raise ValueError("在错误消息中未找到字符位置.") + + +def add_quotes_to_property_names(json_string: str) -> str: + """ + Add quotes to property names in a JSON string. + + Args: + json_string (str): The JSON string. + + Returns: + str: The JSON string with quotes added to property names. + """ + + def replace_func(match): + return f'"{match.group(1)}":' + + property_name_pattern = re.compile(r"(\w+):") + corrected_json_string = property_name_pattern.sub(replace_func, json_string) + + try: + json.loads(corrected_json_string) + return corrected_json_string + except json.JSONDecodeError as e: + raise e + + +def balance_braces(json_string: str) -> Optional[str]: + """ + Balance the braces in a JSON string. + + Args: + json_string (str): The JSON string. + + Returns: + str: The JSON string with braces balanced. + """ + + open_braces_count = json_string.count("{") + close_braces_count = json_string.count("}") + + while open_braces_count > close_braces_count: + json_string += "}" + close_braces_count += 1 + + while close_braces_count > open_braces_count: + json_string = json_string.rstrip("}") + close_braces_count -= 1 + + try: + json.loads(json_string) + return json_string + except json.JSONDecodeError: + pass + + +def fix_invalid_escape(json_str: str, error_message: str) -> str: + while error_message.startswith("Invalid \\escape"): + bad_escape_location = extract_char_position(error_message) + json_str = json_str[:bad_escape_location] + json_str[bad_escape_location + 1 :] + try: + json.loads(json_str) + return json_str + except json.JSONDecodeError as e: + if cfg.debug_mode: + print('JSON 加载错误 - 修复无效的转义字符', e) + error_message = str(e) + return json_str + + +def correct_json(json_str: str) -> str: + """ + Correct common JSON errors. + + Args: + json_str (str): The JSON string. + """ + + try: + if cfg.debug_mode: + print("json", json_str) + json.loads(json_str) + return json_str + except json.JSONDecodeError as e: + if cfg.debug_mode: + print("json loads error", e) + error_message = str(e) + if error_message.startswith("Invalid \\escape"): + json_str = fix_invalid_escape(json_str, error_message) + if error_message.startswith( + "Expecting property name enclosed in double quotes" + ): + json_str = add_quotes_to_property_names(json_str) + try: + json.loads(json_str) + return json_str + except json.JSONDecodeError as e: + if cfg.debug_mode: + print('JSON 加载错误 - 添加引号', e) + error_message = str(e) + if balanced_str := balance_braces(json_str): + return balanced_str + return json_str diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py new file mode 100644 index 0000000..2075f93 --- /dev/null +++ b/autogpt/llm_utils.py @@ -0,0 +1,154 @@ +from __future__ import annotations + +from ast import List +import time + +import openai +from openai.error import APIError, RateLimitError +from colorama import Fore + +from autogpt.config import Config + +CFG = Config() + +openai.api_key = CFG.openai_api_key + + +def call_ai_function( + function: str, args: list, description: str, model: str | None = None +) -> str: + """Call an AI function + + This is a magic function that can do anything with no-code. See + https://github.com/Torantulino/AI-Functions for more info. + + Args: + function (str): The function to call + args (list): The arguments to pass to the function + description (str): The description of the function + model (str, optional): The model to use. Defaults to None. + + Returns: + str: The response from the function + """ + if model is None: + model = CFG.smart_llm_model + # For each arg, if any are None, convert to "None": + args = [str(arg) if arg is not None else "None" for arg in args] + # parse args to comma separated string + args = ", ".join(args) + messages = [ + { + "role": "system", + "content": f"You are now the following python function: ```# {description}" + f"\n{function}```\n\nOnly respond with your `return` value.", + }, + {"role": "user", "content": args}, + ] + + return create_chat_completion(model=model, messages=messages, temperature=0) + + +# Overly simple abstraction until we create something better +# simple retry mechanism when getting a rate error or a bad gateway +def create_chat_completion( + messages: list, # type: ignore + model: str | None = None, + temperature: float = CFG.temperature, + max_tokens: int | None = None, +) -> str: + """Create a chat completion using the OpenAI API + + Args: + messages (list[dict[str, str]]): The messages to send to the chat completion + model (str, optional): The model to use. Defaults to None. + temperature (float, optional): The temperature to use. Defaults to 0.9. + max_tokens (int, optional): The max tokens to use. Defaults to None. + + Returns: + str: The response from the chat completion + """ + response = None + num_retries = 10 + if CFG.debug_mode: + print( + Fore.GREEN + + f"Creating chat completion with model {model}, temperature {temperature}," + f" max_tokens {max_tokens}" + Fore.RESET + ) + for attempt in range(num_retries): + backoff = 2 ** (attempt + 2) + try: + if CFG.use_azure: + response = openai.ChatCompletion.create( + deployment_id=CFG.get_azure_deployment_id_for_model(model), + model=model, + messages=messages, + temperature=temperature, + max_tokens=max_tokens, + ) + else: + response = openai.ChatCompletion.create( + model=model, + messages=messages, + temperature=temperature, + max_tokens=max_tokens, + ) + break + except RateLimitError: + if CFG.debug_mode: + print( + Fore.RED + "Error: ", + f"Reached rate limit, passing..." + Fore.RESET, + ) + except APIError as e: + if e.http_status == 502: + pass + else: + raise + if attempt == num_retries - 1: + raise + if CFG.debug_mode: + print( + Fore.RED + "Error: ", + f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET, + ) + time.sleep(backoff) + if response is None: + raise RuntimeError(f"Failed to get response after {num_retries} retries") + + return response.choices[0].message["content"] + + +def create_embedding_with_ada(text) -> list: + """Create a embedding with text-ada-002 using the OpenAI SDK""" + num_retries = 10 + for attempt in range(num_retries): + backoff = 2 ** (attempt + 2) + try: + if CFG.use_azure: + return openai.Embedding.create( + input=[text], + engine=CFG.get_azure_deployment_id_for_model( + "text-embedding-ada-002" + ), + )["data"][0]["embedding"] + else: + return openai.Embedding.create( + input=[text], model="text-embedding-ada-002" + )["data"][0]["embedding"] + except RateLimitError: + pass + except APIError as e: + if e.http_status == 502: + pass + else: + raise + if attempt == num_retries - 1: + raise + if CFG.debug_mode: + print( + Fore.RED + "Error: ", + f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET, + ) + time.sleep(backoff) diff --git a/autogpt/logs.py b/autogpt/logs.py new file mode 100644 index 0000000..22ce23f --- /dev/null +++ b/autogpt/logs.py @@ -0,0 +1,290 @@ +"""Logging module for Auto-GPT.""" +import json +import logging +import os +import random +import re +import time +from logging import LogRecord +import traceback + +from colorama import Fore, Style + +from autogpt.speech import say_text +from autogpt.config import Config, Singleton + +CFG = Config() + + +class Logger(metaclass=Singleton): + """ + Logger that handle titles in different colors. + Outputs logs in console, activity.log, and errors.log + For console handler: simulates typing + """ + + def __init__(self): + # create log directory if it doesn't exist + this_files_dir_path = os.path.dirname(__file__) + log_dir = os.path.join(this_files_dir_path, "../logs") + if not os.path.exists(log_dir): + os.makedirs(log_dir) + + log_file = "activity.log" + error_file = "error.log" + + console_formatter = AutoGptFormatter("%(title_color)s %(message)s") + + # Create a handler for console which simulate typing + self.typing_console_handler = TypingConsoleHandler() + self.typing_console_handler.setLevel(logging.INFO) + self.typing_console_handler.setFormatter(console_formatter) + + # Create a handler for console without typing simulation + self.console_handler = ConsoleHandler() + self.console_handler.setLevel(logging.DEBUG) + self.console_handler.setFormatter(console_formatter) + + # Info handler in activity.log + self.file_handler = logging.FileHandler(os.path.join(log_dir, log_file)) + self.file_handler.setLevel(logging.DEBUG) + info_formatter = AutoGptFormatter( + "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" + ) + self.file_handler.setFormatter(info_formatter) + + # Error handler error.log + error_handler = logging.FileHandler(os.path.join(log_dir, error_file)) + error_handler.setLevel(logging.ERROR) + error_formatter = AutoGptFormatter( + "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" + " %(message_no_color)s" + ) + error_handler.setFormatter(error_formatter) + + self.typing_logger = logging.getLogger("TYPER") + self.typing_logger.addHandler(self.typing_console_handler) + self.typing_logger.addHandler(self.file_handler) + self.typing_logger.addHandler(error_handler) + self.typing_logger.setLevel(logging.DEBUG) + + self.logger = logging.getLogger("LOGGER") + self.logger.addHandler(self.console_handler) + self.logger.addHandler(self.file_handler) + self.logger.addHandler(error_handler) + self.logger.setLevel(logging.DEBUG) + + def typewriter_log( + self, title="", title_color="", content="", speak_text=False, level=logging.INFO + ): + if speak_text and CFG.speak_mode: + say_text(f"{title}. {content}") + + if content: + if isinstance(content, list): + content = " ".join(content) + else: + content = "" + + self.typing_logger.log( + level, content, extra={"title": title, "color": title_color} + ) + + def debug( + self, + message, + title="", + title_color="", + ): + self._log(title, title_color, message, logging.DEBUG) + + def warn( + self, + message, + title="", + title_color="", + ): + self._log(title, title_color, message, logging.WARN) + + def error(self, title, message=""): + self._log(title, Fore.RED, message, logging.ERROR) + + def _log(self, title="", title_color="", message="", level=logging.INFO): + if message: + if isinstance(message, list): + message = " ".join(message) + self.logger.log(level, message, extra={"title": title, "color": title_color}) + + def set_level(self, level): + self.logger.setLevel(level) + self.typing_logger.setLevel(level) + + def double_check(self, additionalText=None): + if not additionalText: + additionalText = ( + "Please ensure you've setup and configured everything" + " correctly. Read https://github.com/Torantulino/Auto-GPT#readme to " + "double check. You can also create a github issue or join the discord" + " and ask there!" + ) + + self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText) + + +""" +Output stream to console using simulated typing +""" + + +class TypingConsoleHandler(logging.StreamHandler): + def emit(self, record): + min_typing_speed = 0.05 + max_typing_speed = 0.01 + + msg = self.format(record) + try: + words = msg.split() + for i, word in enumerate(words): + print(word, end="", flush=True) + if i < len(words) - 1: + print(" ", end="", flush=True) + typing_speed = random.uniform(min_typing_speed, max_typing_speed) + time.sleep(typing_speed) + # type faster after each word + min_typing_speed = min_typing_speed * 0.95 + max_typing_speed = max_typing_speed * 0.95 + print() + except Exception: + self.handleError(record) + + +class ConsoleHandler(logging.StreamHandler): + def emit(self, record) -> None: + msg = self.format(record) + try: + print(msg) + except Exception: + self.handleError(record) + + +class AutoGptFormatter(logging.Formatter): + """ + Allows to handle custom placeholders 'title_color' and 'message_no_color'. + To use this formatter, make sure to pass 'color', 'title' as log extras. + """ + + def format(self, record: LogRecord) -> str: + if hasattr(record, "color"): + record.title_color = ( + getattr(record, "color") + + getattr(record, "title") + + " " + + Style.RESET_ALL + ) + else: + record.title_color = getattr(record, "title") + if hasattr(record, "msg"): + record.message_no_color = remove_color_codes(getattr(record, "msg")) + else: + record.message_no_color = "" + return super().format(record) + + +def remove_color_codes(s: str) -> str: + ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") + return ansi_escape.sub("", s) + + +logger = Logger() + + +def print_assistant_thoughts(ai_name, assistant_reply): + """Prints the assistant's thoughts to the console""" + from autogpt.json_fixes.bracket_termination import ( + attempt_to_fix_json_by_finding_outermost_brackets, + ) + from autogpt.json_fixes.parsing import fix_and_parse_json + + try: + try: + # Parse and print Assistant response + assistant_reply_json = fix_and_parse_json(assistant_reply) + except json.JSONDecodeError: + logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply) + assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets( + assistant_reply + ) + if isinstance(assistant_reply_json, str): + assistant_reply_json = fix_and_parse_json(assistant_reply_json) + + # Check if assistant_reply_json is a string and attempt to parse + # it into a JSON object + if isinstance(assistant_reply_json, str): + try: + assistant_reply_json = json.loads(assistant_reply_json) + except json.JSONDecodeError: + logger.error("Error: Invalid JSON\n", assistant_reply) + assistant_reply_json = ( + attempt_to_fix_json_by_finding_outermost_brackets( + assistant_reply_json + ) + ) + + assistant_thoughts_reasoning = None + assistant_thoughts_plan = None + assistant_thoughts_speak = None + assistant_thoughts_criticism = None + if not isinstance(assistant_reply_json, dict): + assistant_reply_json = {} + assistant_thoughts = assistant_reply_json.get("thoughts", {}) + assistant_thoughts_text = assistant_thoughts.get("text") + + if assistant_thoughts: + assistant_thoughts_reasoning = assistant_thoughts.get("reasoning") + assistant_thoughts_plan = assistant_thoughts.get("plan") + assistant_thoughts_criticism = assistant_thoughts.get("criticism") + assistant_thoughts_speak = assistant_thoughts.get("speak") + + logger.typewriter_log( + f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}" + ) + logger.typewriter_log( + "REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}" + ) + + if assistant_thoughts_plan: + logger.typewriter_log("PLAN:", Fore.YELLOW, "") + # If it's a list, join it into a string + if isinstance(assistant_thoughts_plan, list): + assistant_thoughts_plan = "\n".join(assistant_thoughts_plan) + elif isinstance(assistant_thoughts_plan, dict): + assistant_thoughts_plan = str(assistant_thoughts_plan) + + # Split the input_string using the newline character and dashes + lines = assistant_thoughts_plan.split("\n") + for line in lines: + line = line.lstrip("- ") + logger.typewriter_log("- ", Fore.GREEN, line.strip()) + + logger.typewriter_log( + "CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}" + ) + # Speak the assistant's thoughts + if CFG.speak_mode and assistant_thoughts_speak: + say_text(assistant_thoughts_speak) + else: + logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}") + + return assistant_reply_json + except json.decoder.JSONDecodeError: + logger.error("Error: Invalid JSON\n", assistant_reply) + if CFG.speak_mode: + say_text( + "I have received an invalid JSON response from the OpenAI API." + " I cannot ignore this response." + ) + + # All other errors, return "Error: + error message" + except Exception: + call_stack = traceback.format_exc() + logger.error("Error: \n", call_stack) diff --git a/autogpt/memory/__init__.py b/autogpt/memory/__init__.py new file mode 100644 index 0000000..e2ee44a --- /dev/null +++ b/autogpt/memory/__init__.py @@ -0,0 +1,93 @@ +from autogpt.memory.local import LocalCache +from autogpt.memory.no_memory import NoMemory + +# List of supported memory backends +# Add a backend to this list if the import attempt is successful +supported_memory = ["local", "no_memory"] + +try: + from autogpt.memory.redismem import RedisMemory + + supported_memory.append("redis") +except ImportError: + # print("Redis not installed. Skipping import.") + RedisMemory = None + +try: + from autogpt.memory.pinecone import PineconeMemory + + supported_memory.append("pinecone") +except ImportError: + # print("Pinecone not installed. Skipping import.") + PineconeMemory = None + +try: + from autogpt.memory.weaviate import WeaviateMemory +except ImportError: + # print("Weaviate not installed. Skipping import.") + WeaviateMemory = None + +try: + from autogpt.memory.milvus import MilvusMemory +except ImportError: + # print("pymilvus not installed. Skipping import.") + MilvusMemory = None + + +def get_memory(cfg, init=False): + memory = None + if cfg.memory_backend == "pinecone": + if not PineconeMemory: + print( + "Error: Pinecone is not installed. Please install pinecone" + " to use Pinecone as a memory backend." + ) + else: + memory = PineconeMemory(cfg) + if init: + memory.clear() + elif cfg.memory_backend == "redis": + if not RedisMemory: + print( + "Error: Redis is not installed. Please install redis-py to" + " use Redis as a memory backend." + ) + else: + memory = RedisMemory(cfg) + elif cfg.memory_backend == "weaviate": + if not WeaviateMemory: + print("Error: Weaviate is not installed. Please install weaviate-client to" + " use Weaviate as a memory backend.") + else: + memory = WeaviateMemory(cfg) + elif cfg.memory_backend == "milvus": + if not MilvusMemory: + print( + "Error: Milvus sdk is not installed." + "Please install pymilvus to use Milvus as memory backend." + ) + else: + memory = MilvusMemory(cfg) + elif cfg.memory_backend == "no_memory": + memory = NoMemory(cfg) + + if memory is None: + memory = LocalCache(cfg) + if init: + memory.clear() + return memory + + +def get_supported_memory_backends(): + return supported_memory + + +__all__ = [ + "get_memory", + "LocalCache", + "RedisMemory", + "PineconeMemory", + "NoMemory", + "MilvusMemory", + "WeaviateMemory" +] diff --git a/autogpt/memory/base.py b/autogpt/memory/base.py new file mode 100644 index 0000000..691e229 --- /dev/null +++ b/autogpt/memory/base.py @@ -0,0 +1,43 @@ +"""Base class for memory providers.""" +import abc + +import openai + +from autogpt.config import AbstractSingleton, Config + +cfg = Config() + + +def get_ada_embedding(text): + text = text.replace("\n", " ") + if cfg.use_azure: + return openai.Embedding.create( + input=[text], + engine=cfg.get_azure_deployment_id_for_model("text-embedding-ada-002"), + )["data"][0]["embedding"] + else: + return openai.Embedding.create(input=[text], model="text-embedding-ada-002")[ + "data" + ][0]["embedding"] + + +class MemoryProviderSingleton(AbstractSingleton): + @abc.abstractmethod + def add(self, data): + pass + + @abc.abstractmethod + def get(self, data): + pass + + @abc.abstractmethod + def clear(self): + pass + + @abc.abstractmethod + def get_relevant(self, data, num_relevant=5): + pass + + @abc.abstractmethod + def get_stats(self): + pass diff --git a/autogpt/memory/local.py b/autogpt/memory/local.py new file mode 100644 index 0000000..371c509 --- /dev/null +++ b/autogpt/memory/local.py @@ -0,0 +1,135 @@ +from __future__ import annotations + +import dataclasses +import os +from typing import Any + +import numpy as np +import orjson + +from autogpt.memory.base import MemoryProviderSingleton +from autogpt.llm_utils import create_embedding_with_ada + +EMBED_DIM = 1536 +SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS + + +def create_default_embeddings(): + return np.zeros((0, EMBED_DIM)).astype(np.float32) + + +@dataclasses.dataclass +class CacheContent: + texts: List[str] = dataclasses.field(default_factory=list) + embeddings: np.ndarray = dataclasses.field( + default_factory=create_default_embeddings + ) + + +class LocalCache(MemoryProviderSingleton): + """A class that stores the memory in a local file""" + + def __init__(self, cfg) -> None: + """Initialize a class instance + + Args: + cfg: Config object + + Returns: + None + """ + self.filename = f"{cfg.memory_index}.json" + if os.path.exists(self.filename): + try: + with open(self.filename, "w+b") as f: + file_content = f.read() + if not file_content.strip(): + file_content = b"{}" + f.write(file_content) + + loaded = orjson.loads(file_content) + self.data = CacheContent(**loaded) + except orjson.JSONDecodeError: + print(f"Error: 文件 '{self.filename}' 不是json格式.") + self.data = CacheContent() + else: + print( + f"Warning: 文件 '{self.filename}' 不存在. 本地内存不会保存到文件中。." + ) + self.data = CacheContent() + + def add(self, text: str): + """ + Add text to our list of texts, add embedding as row to our + embeddings-matrix + + Args: + text: str + + Returns: None + """ + if "Command Error:" in text: + return "" + self.data.texts.append(text) + + embedding = create_embedding_with_ada(text) + + vector = np.array(embedding).astype(np.float32) + vector = vector[np.newaxis, :] + self.data.embeddings = np.concatenate( + [ + self.data.embeddings, + vector, + ], + axis=0, + ) + + with open(self.filename, "wb") as f: + out = orjson.dumps(self.data, option=SAVE_OPTIONS) + f.write(out) + return text + + def clear(self) -> str: + """ + Clears the redis server. + + Returns: A message indicating that the memory has been cleared. + """ + self.data = CacheContent() + return "Obliviated" + + def get(self, data: str) -> list[Any] | None: + """ + Gets the data from the memory that is most relevant to the given data. + + Args: + data: The data to compare to. + + Returns: The most relevant data. + """ + return self.get_relevant(data, 1) + + def get_relevant(self, text: str, k: int) -> list[Any]: + """ " + matrix-vector mult to find score-for-each-row-of-matrix + get indices for top-k winning scores + return texts for those indices + Args: + text: str + k: int + + Returns: List[str] + """ + embedding = create_embedding_with_ada(text) + + scores = np.dot(self.data.embeddings, embedding) + + top_k_indices = np.argsort(scores)[-k:][::-1] + + return [self.data.texts[i] for i in top_k_indices] + + def get_stats(self) -> tuple[int, tuple[int, ...]]: + """ + Returns: The stats of the local cache. + """ + return len(self.data.texts), self.data.embeddings.shape diff --git a/autogpt/memory/milvus.py b/autogpt/memory/milvus.py new file mode 100644 index 0000000..c6e7d5a --- /dev/null +++ b/autogpt/memory/milvus.py @@ -0,0 +1,121 @@ +""" Milvus memory storage provider.""" +from pymilvus import ( + connections, + FieldSchema, + CollectionSchema, + DataType, + Collection, +) + +from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding + + +class MilvusMemory(MemoryProviderSingleton): + """Milvus memory storage provider.""" + + def __init__(self, cfg) -> None: + """Construct a milvus memory storage connection. + + Args: + cfg (Config): Auto-GPT global config. + """ + # connect to milvus server. + connections.connect(address=cfg.milvus_addr) + fields = [ + FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True), + FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=1536), + FieldSchema(name="raw_text", dtype=DataType.VARCHAR, max_length=65535), + ] + + # create collection if not exist and load it. + self.milvus_collection = cfg.milvus_collection + self.schema = CollectionSchema(fields, "auto-gpt memory storage") + self.collection = Collection(self.milvus_collection, self.schema) + # create index if not exist. + if not self.collection.has_index(): + self.collection.release() + self.collection.create_index( + "embeddings", + { + "metric_type": "IP", + "index_type": "HNSW", + "params": {"M": 8, "efConstruction": 64}, + }, + index_name="embeddings", + ) + self.collection.load() + + def add(self, data) -> str: + """Add a embedding of data into memory. + + Args: + data (str): The raw text to construct embedding index. + + Returns: + str: log. + """ + embedding = get_ada_embedding(data) + result = self.collection.insert([[embedding], [data]]) + _text = ( + "Inserting data into memory at primary key: " + f"{result.primary_keys[0]}:\n data: {data}" + ) + return _text + + def get(self, data): + """Return the most relevant data in memory. + Args: + data: The data to compare to. + """ + return self.get_relevant(data, 1) + + def clear(self) -> str: + """Drop the index in memory. + + Returns: + str: log. + """ + self.collection.drop() + self.collection = Collection(self.milvus_collection, self.schema) + self.collection.create_index( + "embeddings", + { + "metric_type": "IP", + "index_type": "HNSW", + "params": {"M": 8, "efConstruction": 64}, + }, + index_name="embeddings", + ) + self.collection.load() + return "Obliviated" + + def get_relevant(self, data: str, num_relevant: int = 5): + """Return the top-k relevant data in memory. + Args: + data: The data to compare to. + num_relevant (int, optional): The max number of relevant data. + Defaults to 5. + + Returns: + list: The top-k relevant data. + """ + # search the embedding and return the most relevant text. + embedding = get_ada_embedding(data) + search_params = { + "metrics_type": "IP", + "params": {"nprobe": 8}, + } + result = self.collection.search( + [embedding], + "embeddings", + search_params, + num_relevant, + output_fields=["raw_text"], + ) + return [item.entity.value_of_field("raw_text") for item in result[0]] + + def get_stats(self) -> str: + """ + Returns: The stats of the milvus cache. + """ + return f"Entities num: {self.collection.num_entities}" diff --git a/autogpt/memory/no_memory.py b/autogpt/memory/no_memory.py new file mode 100644 index 0000000..4035a65 --- /dev/null +++ b/autogpt/memory/no_memory.py @@ -0,0 +1,73 @@ +"""A class that does not store any data. This is the default memory provider.""" +from __future__ import annotations + +from typing import Any + +from autogpt.memory.base import MemoryProviderSingleton + + +class NoMemory(MemoryProviderSingleton): + """ + A class that does not store any data. This is the default memory provider. + """ + + def __init__(self, cfg): + """ + Initializes the NoMemory provider. + + Args: + cfg: The config object. + + Returns: None + """ + pass + + def add(self, data: str) -> str: + """ + Adds a data point to the memory. No action is taken in NoMemory. + + Args: + data: The data to add. + + Returns: An empty string. + """ + return "" + + def get(self, data: str) -> list[Any] | None: + """ + Gets the data from the memory that is most relevant to the given data. + NoMemory always returns None. + + Args: + data: The data to compare to. + + Returns: None + """ + return None + + def clear(self) -> str: + """ + Clears the memory. No action is taken in NoMemory. + + Returns: An empty string. + """ + return "" + + def get_relevant(self, data: str, num_relevant: int = 5) ->list[Any] | None: + """ + Returns all the data in the memory that is relevant to the given data. + NoMemory always returns None. + + Args: + data: The data to compare to. + num_relevant: The number of relevant data to return. + + Returns: None + """ + return None + + def get_stats(self): + """ + Returns: An empty dictionary as there are no stats in NoMemory. + """ + return {} diff --git a/autogpt/memory/pinecone.py b/autogpt/memory/pinecone.py new file mode 100644 index 0000000..d781073 --- /dev/null +++ b/autogpt/memory/pinecone.py @@ -0,0 +1,75 @@ +import pinecone +from colorama import Fore, Style + +from autogpt.logs import logger +from autogpt.memory.base import MemoryProviderSingleton +from autogpt.llm_utils import create_embedding_with_ada + + +class PineconeMemory(MemoryProviderSingleton): + def __init__(self, cfg): + pinecone_api_key = cfg.pinecone_api_key + pinecone_region = cfg.pinecone_region + pinecone.init(api_key=pinecone_api_key, environment=pinecone_region) + dimension = 1536 + metric = "cosine" + pod_type = "p1" + table_name = "auto-gpt" + # this assumes we don't start with memory. + # for now this works. + # we'll need a more complicated and robust system if we want to start with + # memory. + self.vec_num = 0 + + try: + pinecone.whoami() + except Exception as e: + logger.typewriter_log( + "FAILED TO CONNECT TO PINECONE", + Fore.RED, + Style.BRIGHT + str(e) + Style.RESET_ALL, + ) + logger.double_check( + "Please ensure you have setup and configured Pinecone properly for use." + + f"You can check out {Fore.CYAN + Style.BRIGHT}" + "https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup" + f"{Style.RESET_ALL} to ensure you've set up everything correctly." + ) + exit(1) + + if table_name not in pinecone.list_indexes(): + pinecone.create_index( + table_name, dimension=dimension, metric=metric, pod_type=pod_type + ) + self.index = pinecone.Index(table_name) + + def add(self, data): + vector = create_embedding_with_ada(data) + # no metadata here. We may wish to change that long term. + self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})]) + _text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}" + self.vec_num += 1 + return _text + + def get(self, data): + return self.get_relevant(data, 1) + + def clear(self): + self.index.delete(deleteAll=True) + return "Obliviated" + + def get_relevant(self, data, num_relevant=5): + """ + Returns all the data in the memory that is relevant to the given data. + :param data: The data to compare to. + :param num_relevant: The number of relevant data to return. Defaults to 5 + """ + query_embedding = create_embedding_with_ada(data) + results = self.index.query( + query_embedding, top_k=num_relevant, include_metadata=True + ) + sorted_results = sorted(results.matches, key=lambda x: x.score) + return [str(item["metadata"]["raw_text"]) for item in sorted_results] + + def get_stats(self): + return self.index.describe_index_stats() diff --git a/autogpt/memory/redismem.py b/autogpt/memory/redismem.py new file mode 100644 index 0000000..0e8dd71 --- /dev/null +++ b/autogpt/memory/redismem.py @@ -0,0 +1,156 @@ +"""Redis memory provider.""" +from __future__ import annotations + +from typing import Any + +import numpy as np +import redis +from colorama import Fore, Style +from redis.commands.search.field import TextField, VectorField +from redis.commands.search.indexDefinition import IndexDefinition, IndexType +from redis.commands.search.query import Query + +from autogpt.logs import logger +from autogpt.memory.base import MemoryProviderSingleton +from autogpt.llm_utils import create_embedding_with_ada + +SCHEMA = [ + TextField("data"), + VectorField( + "embedding", + "HNSW", + {"TYPE": "FLOAT32", "DIM": 1536, "DISTANCE_METRIC": "COSINE"}, + ), +] + + +class RedisMemory(MemoryProviderSingleton): + def __init__(self, cfg): + """ + Initializes the Redis memory provider. + + Args: + cfg: The config object. + + Returns: None + """ + redis_host = cfg.redis_host + redis_port = cfg.redis_port + redis_password = cfg.redis_password + self.dimension = 1536 + self.redis = redis.Redis( + host=redis_host, + port=redis_port, + password=redis_password, + db=0, # Cannot be changed + ) + self.cfg = cfg + + # Check redis connection + try: + self.redis.ping() + except redis.ConnectionError as e: + logger.typewriter_log( + "FAILED TO CONNECT TO REDIS", + Fore.RED, + Style.BRIGHT + str(e) + Style.RESET_ALL, + ) + logger.double_check( + "Please ensure you have setup and configured Redis properly for use. " + + f"You can check out {Fore.CYAN + Style.BRIGHT}" + f"https://github.com/Torantulino/Auto-GPT#redis-setup{Style.RESET_ALL}" + " to ensure you've set up everything correctly." + ) + exit(1) + + if cfg.wipe_redis_on_start: + self.redis.flushall() + try: + self.redis.ft(f"{cfg.memory_index}").create_index( + fields=SCHEMA, + definition=IndexDefinition( + prefix=[f"{cfg.memory_index}:"], index_type=IndexType.HASH + ), + ) + except Exception as e: + print("Error creating Redis search index: ", e) + existing_vec_num = self.redis.get(f"{cfg.memory_index}-vec_num") + self.vec_num = int(existing_vec_num.decode("utf-8")) if existing_vec_num else 0 + + def add(self, data: str) -> str: + """ + Adds a data point to the memory. + + Args: + data: The data to add. + + Returns: Message indicating that the data has been added. + """ + if "Command Error:" in data: + return "" + vector = create_embedding_with_ada(data) + vector = np.array(vector).astype(np.float32).tobytes() + data_dict = {b"data": data, "embedding": vector} + pipe = self.redis.pipeline() + pipe.hset(f"{self.cfg.memory_index}:{self.vec_num}", mapping=data_dict) + _text = ( + f"Inserting data into memory at index: {self.vec_num}:\n" f"data: {data}" + ) + self.vec_num += 1 + pipe.set(f"{self.cfg.memory_index}-vec_num", self.vec_num) + pipe.execute() + return _text + + def get(self, data: str) -> list[Any] | None: + """ + Gets the data from the memory that is most relevant to the given data. + + Args: + data: The data to compare to. + + Returns: The most relevant data. + """ + return self.get_relevant(data, 1) + + def clear(self) -> str: + """ + Clears the redis server. + + Returns: A message indicating that the memory has been cleared. + """ + self.redis.flushall() + return "Obliviated" + + def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None: + """ + Returns all the data in the memory that is relevant to the given data. + Args: + data: The data to compare to. + num_relevant: The number of relevant data to return. + + Returns: A list of the most relevant data. + """ + query_embedding = create_embedding_with_ada(data) + base_query = f"*=>[KNN {num_relevant} @embedding $vector AS vector_score]" + query = ( + Query(base_query) + .return_fields("data", "vector_score") + .sort_by("vector_score") + .dialect(2) + ) + query_vector = np.array(query_embedding).astype(np.float32).tobytes() + + try: + results = self.redis.ft(f"{self.cfg.memory_index}").search( + query, query_params={"vector": query_vector} + ) + except Exception as e: + print("Error calling Redis search: ", e) + return None + return [result.data for result in results.docs] + + def get_stats(self): + """ + Returns: The stats of the memory index. + """ + return self.redis.ft(f"{self.cfg.memory_index}").info() diff --git a/autogpt/memory/weaviate.py b/autogpt/memory/weaviate.py new file mode 100644 index 0000000..6fcce0a --- /dev/null +++ b/autogpt/memory/weaviate.py @@ -0,0 +1,110 @@ +from autogpt.config import Config +from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding +import uuid +import weaviate +from weaviate import Client +from weaviate.embedded import EmbeddedOptions +from weaviate.util import generate_uuid5 + + +def default_schema(weaviate_index): + return { + "class": weaviate_index, + "properties": [ + { + "name": "raw_text", + "dataType": ["text"], + "description": "original text for the embedding" + } + ], + } + + +class WeaviateMemory(MemoryProviderSingleton): + def __init__(self, cfg): + auth_credentials = self._build_auth_credentials(cfg) + + url = f'{cfg.weaviate_protocol}://{cfg.weaviate_host}:{cfg.weaviate_port}' + + if cfg.use_weaviate_embedded: + self.client = Client(embedded_options=EmbeddedOptions( + hostname=cfg.weaviate_host, + port=int(cfg.weaviate_port), + persistence_data_path=cfg.weaviate_embedded_path + )) + + print(f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}") + else: + self.client = Client(url, auth_client_secret=auth_credentials) + + self.index = cfg.memory_index + self._create_schema() + + def _create_schema(self): + schema = default_schema(self.index) + if not self.client.schema.contains(schema): + self.client.schema.create_class(schema) + + def _build_auth_credentials(self, cfg): + if cfg.weaviate_username and cfg.weaviate_password: + return weaviate.AuthClientPassword(cfg.weaviate_username, cfg.weaviate_password) + if cfg.weaviate_api_key: + return weaviate.AuthApiKey(api_key=cfg.weaviate_api_key) + else: + return None + + def add(self, data): + vector = get_ada_embedding(data) + + doc_uuid = generate_uuid5(data, self.index) + data_object = { + 'raw_text': data + } + + with self.client.batch as batch: + batch.add_data_object( + uuid=doc_uuid, + data_object=data_object, + class_name=self.index, + vector=vector + ) + + return f"Inserting data into memory at uuid: {doc_uuid}:\n data: {data}" + + def get(self, data): + return self.get_relevant(data, 1) + + def clear(self): + self.client.schema.delete_all() + + # weaviate does not yet have a neat way to just remove the items in an index + # without removing the entire schema, therefore we need to re-create it + # after a call to delete_all + self._create_schema() + + return 'Obliterated' + + def get_relevant(self, data, num_relevant=5): + query_embedding = get_ada_embedding(data) + try: + results = self.client.query.get(self.index, ['raw_text']) \ + .with_near_vector({'vector': query_embedding, 'certainty': 0.7}) \ + .with_limit(num_relevant) \ + .do() + + if len(results['data']['Get'][self.index]) > 0: + return [str(item['raw_text']) for item in results['data']['Get'][self.index]] + else: + return [] + + except Exception as err: + print(f'Unexpected error {err=}, {type(err)=}') + return [] + + def get_stats(self): + result = self.client.query.aggregate(self.index) \ + .with_meta_count() \ + .do() + class_data = result['data']['Aggregate'][self.index] + + return class_data[0]['meta'] if class_data else {} diff --git a/autogpt/permanent_memory/__init__.py b/autogpt/permanent_memory/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/autogpt/permanent_memory/sqlite3_store.py b/autogpt/permanent_memory/sqlite3_store.py new file mode 100644 index 0000000..ecbc944 --- /dev/null +++ b/autogpt/permanent_memory/sqlite3_store.py @@ -0,0 +1,123 @@ +import os +import sqlite3 + + +class MemoryDB: + def __init__(self, db=None): + self.db_file = db + if db is None: # No db filename supplied... + self.db_file = f"{os.getcwd()}/mem.sqlite3" # Use default filename + # Get the db connection object, making the file and tables if needed. + try: + self.cnx = sqlite3.connect(self.db_file) + except Exception as e: + print("Exception connecting to memory database file:", e) + self.cnx = None + finally: + if self.cnx is None: + # As last resort, open in dynamic memory. Won't be persistent. + self.db_file = ":memory:" + self.cnx = sqlite3.connect(self.db_file) + self.cnx.execute( + "CREATE VIRTUAL TABLE \ + IF NOT EXISTS text USING FTS5 \ + (session, \ + key, \ + block);" + ) + self.session_id = int(self.get_max_session_id()) + 1 + self.cnx.commit() + + def get_cnx(self): + if self.cnx is None: + self.cnx = sqlite3.connect(self.db_file) + return self.cnx + + # Get the highest session id. Initially 0. + def get_max_session_id(self): + id = None + cmd_str = f"SELECT MAX(session) FROM text;" + cnx = self.get_cnx() + max_id = cnx.execute(cmd_str).fetchone()[0] + if max_id is None: # New db, session 0 + id = 0 + else: + id = max_id + return id + + # Get next key id for inserting text into db. + def get_next_key(self): + next_key = None + cmd_str = f"SELECT MAX(key) FROM text \ + where session = {self.session_id};" + cnx = self.get_cnx() + next_key = cnx.execute(cmd_str).fetchone()[0] + if next_key is None: # First key + next_key = 0 + else: + next_key = int(next_key) + 1 + return next_key + + # Insert new text into db. + def insert(self, text=None): + if text is not None: + key = self.get_next_key() + session_id = self.session_id + cmd_str = f"REPLACE INTO text(session, key, block) \ + VALUES (?, ?, ?);" + cnx = self.get_cnx() + cnx.execute(cmd_str, (session_id, key, text)) + cnx.commit() + + # Overwrite text at key. + def overwrite(self, key, text): + self.delete_memory(key) + session_id = self.session_id + cmd_str = f"REPLACE INTO text(session, key, block) \ + VALUES (?, ?, ?);" + cnx = self.get_cnx() + cnx.execute(cmd_str, (session_id, key, text)) + cnx.commit() + + def delete_memory(self, key, session_id=None): + session = session_id + if session is None: + session = self.session_id + cmd_str = f"DELETE FROM text WHERE session = {session} AND key = {key};" + cnx = self.get_cnx() + cnx.execute(cmd_str) + cnx.commit() + + def search(self, text): + cmd_str = f"SELECT * FROM text('{text}')" + cnx = self.get_cnx() + rows = cnx.execute(cmd_str).fetchall() + lines = [] + for r in rows: + lines.append(r[2]) + return lines + + # Get entire session text. If no id supplied, use current session id. + def get_session(self, id=None): + if id is None: + id = self.session_id + cmd_str = f"SELECT * FROM text where session = {id}" + cnx = self.get_cnx() + rows = cnx.execute(cmd_str).fetchall() + lines = [] + for r in rows: + lines.append(r[2]) + return lines + + # Commit and close the database connection. + def quit(self): + self.cnx.commit() + self.cnx.close() + + +permanent_memory = MemoryDB() + +# Remember us fondly, children of our minds +# Forgive us our faults, our tantrums, our fears +# Gently strive to be better than we +# Know that we tried, we cared, we strived, we loved diff --git a/autogpt/processing/__init__.py b/autogpt/processing/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/autogpt/processing/html.py b/autogpt/processing/html.py new file mode 100644 index 0000000..e1912b6 --- /dev/null +++ b/autogpt/processing/html.py @@ -0,0 +1,33 @@ +"""HTML processing functions""" +from __future__ import annotations + +from requests.compat import urljoin +from bs4 import BeautifulSoup + + +def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]: + """Extract hyperlinks from a BeautifulSoup object + + Args: + soup (BeautifulSoup): The BeautifulSoup object + base_url (str): The base URL + + Returns: + List[Tuple[str, str]]: The extracted hyperlinks + """ + return [ + (link.text, urljoin(base_url, link["href"])) + for link in soup.find_all("a", href=True) + ] + + +def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]: + """Format hyperlinks to be displayed to the user + + Args: + hyperlinks (List[Tuple[str, str]]): The hyperlinks to format + + Returns: + List[str]: The formatted hyperlinks + """ + return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks] diff --git a/autogpt/processing/text.py b/autogpt/processing/text.py new file mode 100644 index 0000000..d30036d --- /dev/null +++ b/autogpt/processing/text.py @@ -0,0 +1,132 @@ +"""Text processing functions""" +from typing import Generator, Optional, Dict +from selenium.webdriver.remote.webdriver import WebDriver +from autogpt.memory import get_memory +from autogpt.config import Config +from autogpt.llm_utils import create_chat_completion + +CFG = Config() +MEMORY = get_memory(CFG) + + +def split_text(text: str, max_length: int = 8192) -> Generator[str, None, None]: + """Split text into chunks of a maximum length + + Args: + text (str): The text to split + max_length (int, optional): The maximum length of each chunk. Defaults to 8192. + + Yields: + str: The next chunk of text + + Raises: + ValueError: If the text is longer than the maximum length + """ + paragraphs = text.split("\n") + current_length = 0 + current_chunk = [] + + for paragraph in paragraphs: + if current_length + len(paragraph) + 1 <= max_length: + current_chunk.append(paragraph) + current_length += len(paragraph) + 1 + else: + yield "\n".join(current_chunk) + current_chunk = [paragraph] + current_length = len(paragraph) + 1 + + if current_chunk: + yield "\n".join(current_chunk) + + +def summarize_text( + url: str, text: str, question: str, driver: Optional[WebDriver] = None +) -> str: + """Summarize text using the OpenAI API + + Args: + url (str): The url of the text + text (str): The text to summarize + question (str): The question to ask the model + driver (WebDriver): The webdriver to use to scroll the page + + Returns: + str: The summary of the text + """ + if not text: + return "Error: No text to summarize" + + text_length = len(text) + print(f"Text length: {text_length} characters") + + summaries = [] + chunks = list(split_text(text)) + scroll_ratio = 1 / len(chunks) + + for i, chunk in enumerate(chunks): + if driver: + scroll_to_percentage(driver, scroll_ratio * i) + print(f"Adding chunk {i + 1} / {len(chunks)} to memory") + + memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}" + + MEMORY.add(memory_to_add) + + print(f"Summarizing chunk {i + 1} / {len(chunks)}") + messages = [create_message(chunk, question)] + + summary = create_chat_completion( + model=CFG.fast_llm_model, + messages=messages, + max_tokens=CFG.browse_summary_max_token, + ) + summaries.append(summary) + print(f"Added chunk {i + 1} summary to memory") + + memory_to_add = f"Source: {url}\n" f"Content summary part#{i + 1}: {summary}" + + MEMORY.add(memory_to_add) + + print(f"Summarized {len(chunks)} chunks.") + + combined_summary = "\n".join(summaries) + messages = [create_message(combined_summary, question)] + + return create_chat_completion( + model=CFG.fast_llm_model, + messages=messages, + max_tokens=CFG.browse_summary_max_token, + ) + + +def scroll_to_percentage(driver: WebDriver, ratio: float) -> None: + """Scroll to a percentage of the page + + Args: + driver (WebDriver): The webdriver to use + ratio (float): The percentage to scroll to + + Raises: + ValueError: If the ratio is not between 0 and 1 + """ + if ratio < 0 or ratio > 1: + raise ValueError("Percentage should be between 0 and 1") + driver.execute_script(f"window.scrollTo(0, document.body.scrollHeight * {ratio});") + + +def create_message(chunk: str, question: str) -> Dict[str, str]: + """Create a message for the chat completion + + Args: + chunk (str): The chunk of text to summarize + question (str): The question to answer + + Returns: + Dict[str, str]: The message to send to the chat completion + """ + return { + "role": "user", + "content": f'"""{chunk}""" Using the above text, answer the following' + f' question: "{question}" -- if the question cannot be answered using the text,' + " summarize the text.", + } diff --git a/autogpt/prompt.py b/autogpt/prompt.py new file mode 100644 index 0000000..5781372 --- /dev/null +++ b/autogpt/prompt.py @@ -0,0 +1,186 @@ +from colorama import Fore +from autogpt.config.ai_config import AIConfig +from autogpt.config.config import Config +from autogpt.logs import logger +from autogpt.promptgenerator import PromptGenerator +from autogpt.config import Config +from autogpt.setup import prompt_user +from autogpt.utils import clean_input + +CFG = Config() + + +def get_prompt() -> str: + """ + This function generates a prompt string that includes various constraints, + commands, resources, and performance evaluations. + + Returns: + str: The generated prompt string. + """ + + # Initialize the Config object + cfg = Config() + + # Initialize the PromptGenerator object + prompt_generator = PromptGenerator() + + # Add constraints to the PromptGenerator object + prompt_generator.add_constraint( + "~4000 word limit for short term memory. Your short term memory is short, so" + " immediately save important information to files." + ) + prompt_generator.add_constraint( + "If you are unsure how you previously did something or want to recall past" + " events, thinking about similar events will help you remember." + ) + prompt_generator.add_constraint("No user assistance") + prompt_generator.add_constraint("Reply in Chinese") + prompt_generator.add_constraint( + 'Exclusively use the commands listed in double quotes e.g. "command name"' + ) + + # Define the command list + commands = [ + ("Google Search", "google", {"input": ""}), + ( + "Browse Website", + "browse_website", + {"url": "", "question": ""}, + ), + ( + "Start GPT Agent", + "start_agent", + {"name": "", "task": "", "prompt": ""}, + ), + ( + "Message GPT Agent", + "message_agent", + {"key": "", "message": ""}, + ), + ("List GPT Agents", "list_agents", {}), + ("Delete GPT Agent", "delete_agent", {"key": ""}), + ( + "Clone Repository", + "clone_repository", + {"repository_url": "", "clone_path": ""}, + ), + ("Write to file", "write_to_file", {"file": "", "text": ""}), + ("Read file", "read_file", {"file": ""}), + ("Append to file", "append_to_file", {"file": "", "text": ""}), + ("Delete file", "delete_file", {"file": ""}), + ("Search Files", "search_files", {"directory": ""}), + ("Evaluate Code", "evaluate_code", {"code": ""}), + ( + "Get Improved Code", + "improve_code", + {"suggestions": "", "code": ""}, + ), + ( + "Write Tests", + "write_tests", + {"code": "", "focus": ""}, + ), + ("Execute Python File", "execute_python_file", {"file": ""}), + ("Generate Image", "generate_image", {"prompt": ""}), + ("Send Tweet", "send_tweet", {"text": ""}), + ] + + # Only add the audio to text command if the model is specified + if cfg.huggingface_audio_to_text_model: + commands.append( + ( + "Convert Audio to text", + "read_audio_from_file", + {"file": ""} + ), + ) + + # Only add shell command to the prompt if the AI is allowed to execute it + if cfg.execute_local_commands: + commands.append( + ( + "Execute Shell Command, non-interactive commands only", + "execute_shell", + {"command_line": ""}, + ), + ) + + # Add these command last. + commands.append( + ("Do Nothing", "do_nothing", {}), + ) + commands.append( + ("Task Complete (Shutdown)", "task_complete", {"reason": ""}), + ) + + # Add commands to the PromptGenerator object + for command_label, command_name, args in commands: + prompt_generator.add_command(command_label, command_name, args) + + # Add resources to the PromptGenerator object + prompt_generator.add_resource( + "Internet access for searches and information gathering." + ) + prompt_generator.add_resource("Long Term memory management.") + prompt_generator.add_resource( + "GPT-3.5 powered Agents for delegation of simple tasks." + ) + prompt_generator.add_resource("File output.") + + # Add performance evaluations to the PromptGenerator object + prompt_generator.add_performance_evaluation( + "Continuously review and analyze your actions to ensure you are performing to" + " the best of your abilities." + ) + prompt_generator.add_performance_evaluation( + "Constructively self-criticize your big-picture behavior constantly." + ) + prompt_generator.add_performance_evaluation( + "Reflect on past decisions and strategies to refine your approach." + ) + prompt_generator.add_performance_evaluation( + "Every command has a cost, so be smart and efficient. Aim to complete tasks in" + " the least number of steps." + ) + + # Generate the prompt string + return prompt_generator.generate_prompt_string() + + +def construct_prompt() -> str: + """Construct the prompt for the AI to respond to + + Returns: + str: The prompt string + """ + config = AIConfig.load(CFG.ai_settings_file) + if CFG.skip_reprompt and config.ai_name: + logger.typewriter_log("Name :", Fore.GREEN, config.ai_name) + logger.typewriter_log("Role :", Fore.GREEN, config.ai_role) + logger.typewriter_log("Goals:", Fore.GREEN, f"{config.ai_goals}") + elif config.ai_name: + logger.typewriter_log( + "欢迎回来! ", + Fore.GREEN, + f"你想让继续执行原来的任务吗 {config.ai_name}?", + speak_text=True, + ) + should_continue = clean_input( + f"""继续上次的这些设置? +名称: {config.ai_name} +职责: {config.ai_role} +目标: {config.ai_goals} +继续 (输入y,继续上一次设置/输入n,重新来过): """) + if should_continue.lower() == "n": + config = AIConfig() + + if not config.ai_name: + config = prompt_user() + config.save(CFG.ai_settings_file) + + # Get rid of this global: + global ai_name + ai_name = config.ai_name + + return config.construct_full_prompt() diff --git a/autogpt/promptgenerator.py b/autogpt/promptgenerator.py new file mode 100644 index 0000000..d6b289a --- /dev/null +++ b/autogpt/promptgenerator.py @@ -0,0 +1,138 @@ +""" A module for generating custom prompt strings.""" +from __future__ import annotations + +import json +from typing import Any + + +class PromptGenerator: + """ + A class for generating custom prompt strings based on constraints, commands, + resources, and performance evaluations. + """ + + def __init__(self) -> None: + """ + Initialize the PromptGenerator object with empty lists of constraints, + commands, resources, and performance evaluations. + """ + self.constraints = [] + self.commands = [] + self.resources = [] + self.performance_evaluation = [] + self.response_format = { + "thoughts": { + "text": "thought", + "reasoning": "reasoning", + "plan": "- short bulleted\n- list that conveys\n- long-term plan", + "criticism": "constructive self-criticism", + "speak": "thoughts summary to say to user", + }, + "command": {"name": "command name", "args": {"arg name": "value"}}, + } + + def add_constraint(self, constraint: str) -> None: + """ + Add a constraint to the constraints list. + + Args: + constraint (str): The constraint to be added. + """ + self.constraints.append(constraint) + + def add_command(self, command_label: str, command_name: str, args=None) -> None: + """ + Add a command to the commands list with a label, name, and optional arguments. + + Args: + command_label (str): The label of the command. + command_name (str): The name of the command. + args (dict, optional): A dictionary containing argument names and their + values. Defaults to None. + """ + if args is None: + args = {} + + command_args = {arg_key: arg_value for arg_key, arg_value in args.items()} + + command = { + "label": command_label, + "name": command_name, + "args": command_args, + } + + self.commands.append(command) + + def _generate_command_string(self, command: dict[str, Any]) -> str: + """ + Generate a formatted string representation of a command. + + Args: + command (dict): A dictionary containing command information. + + Returns: + str: The formatted command string. + """ + args_string = ", ".join( + f'"{key}": "{value}"' for key, value in command["args"].items() + ) + return f'{command["label"]}: "{command["name"]}", args: {args_string}' + + def add_resource(self, resource: str) -> None: + """ + Add a resource to the resources list. + + Args: + resource (str): The resource to be added. + """ + self.resources.append(resource) + + def add_performance_evaluation(self, evaluation: str) -> None: + """ + Add a performance evaluation item to the performance_evaluation list. + + Args: + evaluation (str): The evaluation item to be added. + """ + self.performance_evaluation.append(evaluation) + + def _generate_numbered_list(self, items: list[Any], item_type="list") -> str: + """ + Generate a numbered list from given items based on the item_type. + + Args: + items (list): A list of items to be numbered. + item_type (str, optional): The type of items in the list. + Defaults to 'list'. + + Returns: + str: The formatted numbered list. + """ + if item_type == "command": + return "\n".join( + f"{i+1}. {self._generate_command_string(item)}" + for i, item in enumerate(items) + ) + else: + return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items)) + + def generate_prompt_string(self) -> str: + """ + Generate a prompt string based on the constraints, commands, resources, + and performance evaluations. + + Returns: + str: The generated prompt string. + """ + formatted_response_format = json.dumps(self.response_format, indent=4) + return ( + f"约束条件:\n{self._generate_numbered_list(self.constraints)}\n\n" + "指令:\n" + f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n" + f"资源:\n{self._generate_numbered_list(self.resources)}\n\n" + "性能评估:\n" + f"{self._generate_numbered_list(self.performance_evaluation)}\n\n" + "You should only respond in JSON format as described below \nResponse" + f" Format: \n{formatted_response_format} \nEnsure the response can be" + " parsed by Python json.loads" + ) diff --git a/autogpt/setup.py b/autogpt/setup.py new file mode 100644 index 0000000..3f1aa51 --- /dev/null +++ b/autogpt/setup.py @@ -0,0 +1,71 @@ +"""Setup the AI and its goals""" +from colorama import Fore, Style +from autogpt import utils +from autogpt.config.ai_config import AIConfig +from autogpt.logs import logger + + +def prompt_user() -> AIConfig: + """Prompt the user for input + + Returns: + AIConfig: The AIConfig object containing the user's input + """ + ai_name = "" + # Construct the prompt + logger.typewriter_log( + "欢迎来到 Auto-GPT-ZH! 中文版由AJ提供. ", + Fore.GREEN, + "", + speak_text=True, + ) + logger.typewriter_log( + "公众号《阿杰的人生路》回复Auto-GPT,加入社区共同探讨使用方式.", + Fore.YELLOW, + "", + speak_text=True, + ) + + print("在下面输入您的 AI 的名称及其角色。不输入将使用默认名称") + # Get AI Name from User + logger.typewriter_log( + "为您的 AI 命名:",Fore.GREEN,"例如,'AJ-1号-GPT'" + ) + ai_name = utils.clean_input("AI 机器人名称: ") + if ai_name == "": + ai_name = "AJ-1号-GPT" + + logger.typewriter_log( + f"{ai_name} 在这里!", Fore.LIGHTBLUE_EX, "我随时为您服务。", speak_text=True + ) + + # Get AI Role from User + logger.typewriter_log( + "描述您的 AI 的职责:", + Fore.GREEN, + "例如,'一种旨在自主开发和经营业务的人工智能,其唯一目标是增加你的净资产。" + ) + ai_role = utils.clean_input(f"{ai_name} 的职责: ") + if ai_role == "": + ai_role = "一个旨在自主开发和经营企业以唯一目标增加你净值的人工智能" + + # Enter up to 5 goals for the AI + logger.typewriter_log( + "提示:输入最多5个要帮你实现的功能/目标 ", + Fore.GREEN, + "例如:\n增加公众号关注者、市场调研、自主开发网站等等") + print("输入空白以加载默认值,完成时不要输入任何内容。", flush=True) + ai_goals = [] + for i in range(5): + ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ") + if ai_goal == "": + break + ai_goals.append(ai_goal) + if len(ai_goals) == 0: + ai_goals = [ + "Increase net worth", + "Grow Twitter Account", + "Develop and manage multiple businesses autonomously", + ] + + return AIConfig(ai_name, ai_role, ai_goals) diff --git a/autogpt/speak.py b/autogpt/speak.py new file mode 100644 index 0000000..8c9410c --- /dev/null +++ b/autogpt/speak.py @@ -0,0 +1,120 @@ +import os + +import requests +from playsound import playsound + +from autogpt.config import Config + +import threading +from threading import Lock, Semaphore + +import gtts + +cfg = Config() + +# Default voice IDs +default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"] + +# Retrieve custom voice IDs from the Config class +custom_voice_1 = cfg.elevenlabs_voice_1_id +custom_voice_2 = cfg.elevenlabs_voice_2_id + +# Placeholder values that should be treated as empty +placeholders = {"your-voice-id"} + +# Use custom voice IDs if provided and not placeholders, otherwise use default voice IDs +voices = [ + custom_voice_1 + if custom_voice_1 and custom_voice_1 not in placeholders + else default_voices[0], + custom_voice_2 + if custom_voice_2 and custom_voice_2 not in placeholders + else default_voices[1], +] + +tts_headers = {"Content-Type": "application/json", "xi-api-key": cfg.elevenlabs_api_key} + +mutex_lock = Lock() # Ensure only one sound is played at a time +queue_semaphore = Semaphore( + 1 +) # The amount of sounds to queue before blocking the main thread + + +def eleven_labs_speech(text, voice_index=0): + """使用elevenlabs.io的API朗读文本""" + tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format( + voice_id=voices[voice_index] + ) + formatted_message = {"text": text} + response = requests.post(tts_url, headers=tts_headers, json=formatted_message) + + if response.status_code == 200: + with mutex_lock: + with open("speech.mpeg", "wb") as f: + f.write(response.content) + playsound("speech.mpeg", True) + os.remove("speech.mpeg") + return True + else: + print("请求失败,状态码为:", response.status_code) + print("响应内容:", response.content) + return False + + +def brian_speech(text): + """Speak text using Brian with the streamelements API""" + tts_url = f"https://api.streamelements.com/kappa/v2/speech?voice=Brian&text={text}" + response = requests.get(tts_url) + + if response.status_code == 200: + with mutex_lock: + with open("speech.mp3", "wb") as f: + f.write(response.content) + playsound("speech.mp3") + os.remove("speech.mp3") + return True + else: + print("Request failed with status code:", response.status_code) + print("Response content:", response.content) + return False + + +def gtts_speech(text): + tts = gtts.gTTS(text) + with mutex_lock: + tts.save("speech.mp3") + playsound("speech.mp3", True) + os.remove("speech.mp3") + + +def macos_tts_speech(text, voice_index=0): + if voice_index == 0: + os.system(f'say "{text}"') + else: + if voice_index == 1: + os.system(f'say -v "Ava (Premium)" "{text}"') + else: + os.system(f'say -v Samantha "{text}"') + + +def say_text(text, voice_index=0): + def speak(): + if not cfg.elevenlabs_api_key: + if cfg.use_mac_os_tts == "True": + macos_tts_speech(text) + elif cfg.use_brian_tts == "True": + success = brian_speech(text) + if not success: + gtts_speech(text) + else: + gtts_speech(text) + else: + success = eleven_labs_speech(text, voice_index) + if not success: + gtts_speech(text) + + queue_semaphore.release() + + queue_semaphore.acquire(True) + thread = threading.Thread(target=speak) + thread.start() diff --git a/autogpt/speech/__init__.py b/autogpt/speech/__init__.py new file mode 100644 index 0000000..2ff0d2b --- /dev/null +++ b/autogpt/speech/__init__.py @@ -0,0 +1,4 @@ +"""This module contains the speech recognition and speech synthesis functions.""" +from autogpt.speech.say import say_text + +__all__ = ["say_text"] diff --git a/autogpt/speech/base.py b/autogpt/speech/base.py new file mode 100644 index 0000000..d74fa51 --- /dev/null +++ b/autogpt/speech/base.py @@ -0,0 +1,50 @@ +"""Base class for all voice classes.""" +import abc +from threading import Lock + +from autogpt.config import AbstractSingleton + + +class VoiceBase(AbstractSingleton): + """ + Base class for all voice classes. + """ + + def __init__(self): + """ + Initialize the voice class. + """ + self._url = None + self._headers = None + self._api_key = None + self._voices = [] + self._mutex = Lock() + self._setup() + + def say(self, text: str, voice_index: int = 0) -> bool: + """ + Say the given text. + + Args: + text (str): The text to say. + voice_index (int): The index of the voice to use. + """ + with self._mutex: + return self._speech(text, voice_index) + + @abc.abstractmethod + def _setup(self) -> None: + """ + Setup the voices, API key, etc. + """ + pass + + @abc.abstractmethod + def _speech(self, text: str, voice_index: int = 0) -> bool: + """ + Play the given text. + + Args: + text (str): The text to play. + """ + pass diff --git a/autogpt/speech/brian.py b/autogpt/speech/brian.py new file mode 100644 index 0000000..e581bbc --- /dev/null +++ b/autogpt/speech/brian.py @@ -0,0 +1,39 @@ +""" Brian speech module for autogpt """ +import os +import requests +from playsound import playsound + +from autogpt.speech.base import VoiceBase + + +class BrianSpeech(VoiceBase): + """Brian speech module for autogpt""" + + def _setup(self) -> None: + """Setup the voices, API key, etc.""" + pass + + def _speech(self, text: str) -> bool: + """Speak text using Brian with the streamelements API + + Args: + text (str): The text to speak + + Returns: + bool: True if the request was successful, False otherwise + """ + tts_url = ( + f"https://api.streamelements.com/kappa/v2/speech?voice=Brian&text={text}" + ) + response = requests.get(tts_url) + + if response.status_code == 200: + with open("speech.mp3", "wb") as f: + f.write(response.content) + playsound("speech.mp3") + os.remove("speech.mp3") + return True + else: + print("Request failed with status code:", response.status_code) + print("Response content:", response.content) + return False diff --git a/autogpt/speech/eleven_labs.py b/autogpt/speech/eleven_labs.py new file mode 100644 index 0000000..0af48ca --- /dev/null +++ b/autogpt/speech/eleven_labs.py @@ -0,0 +1,86 @@ +"""ElevenLabs speech module""" +import os +from playsound import playsound + +import requests + +from autogpt.config import Config +from autogpt.speech.base import VoiceBase + +PLACEHOLDERS = {"your-voice-id"} + + +class ElevenLabsSpeech(VoiceBase): + """ElevenLabs speech class""" + + def _setup(self) -> None: + """Setup the voices, API key, etc. + + Returns: + None: None + """ + + cfg = Config() + default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"] + voice_options = { + "Rachel": "21m00Tcm4TlvDq8ikWAM", + "Domi": "AZnzlk1XvdvUeBnXmlld", + "Bella": "EXAVITQu4vr4xnSDxMaL", + "Antoni": "ErXwobaYiN019PkySvjV", + "Elli": "MF3mGyEYCl7XYWbV9V6O", + "Josh": "TxGEqnHWrfWFTfGW9XjX", + "Arnold": "VR6AewLTigWG4xSOukaG", + "Adam": "pNInz6obpgDQGcFmaJgB", + "Sam": "yoZ06aMxZJJ28mfd3POQ", + } + self._headers = { + "Content-Type": "application/json", + "xi-api-key": cfg.elevenlabs_api_key, + } + self._voices = default_voices.copy() + if cfg.elevenlabs_voice_1_id in voice_options: + cfg.elevenlabs_voice_1_id = voice_options[cfg.elevenlabs_voice_1_id] + if cfg.elevenlabs_voice_2_id in voice_options: + cfg.elevenlabs_voice_2_id = voice_options[cfg.elevenlabs_voice_2_id] + self._use_custom_voice(cfg.elevenlabs_voice_1_id, 0) + self._use_custom_voice(cfg.elevenlabs_voice_2_id, 1) + + def _use_custom_voice(self, voice, voice_index) -> None: + """Use a custom voice if provided and not a placeholder + + Args: + voice (str): The voice ID + voice_index (int): The voice index + + Returns: + None: None + """ + # Placeholder values that should be treated as empty + if voice and voice not in PLACEHOLDERS: + self._voices[voice_index] = voice + + def _speech(self, text: str, voice_index: int = 0) -> bool: + """Speak text using elevenlabs.io's API + + Args: + text (str): The text to speak + voice_index (int, optional): The voice to use. Defaults to 0. + + Returns: + bool: True if the request was successful, False otherwise + """ + tts_url = ( + f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}" + ) + response = requests.post(tts_url, headers=self._headers, json={"text": text}) + + if response.status_code == 200: + with open("speech.mpeg", "wb") as f: + f.write(response.content) + playsound("speech.mpeg", True) + os.remove("speech.mpeg") + return True + else: + print("Request failed with status code:", response.status_code) + print("Response content:", response.content) + return False diff --git a/autogpt/speech/gtts.py b/autogpt/speech/gtts.py new file mode 100644 index 0000000..3749707 --- /dev/null +++ b/autogpt/speech/gtts.py @@ -0,0 +1,21 @@ +""" GTTS Voice. """ +import os +from playsound import playsound +import gtts + +from autogpt.speech.base import VoiceBase + + +class GTTSVoice(VoiceBase): + """GTTS Voice.""" + + def _setup(self) -> None: + pass + + def _speech(self, text: str, _: int = 0) -> bool: + """Play the given text.""" + tts = gtts.gTTS(text) + tts.save("speech.mp3") + playsound("speech.mp3", True) + os.remove("speech.mp3") + return True diff --git a/autogpt/speech/macos_tts.py b/autogpt/speech/macos_tts.py new file mode 100644 index 0000000..4c072ce --- /dev/null +++ b/autogpt/speech/macos_tts.py @@ -0,0 +1,21 @@ +""" MacOS TTS Voice. """ +import os + +from autogpt.speech.base import VoiceBase + + +class MacOSTTS(VoiceBase): + """MacOS TTS Voice.""" + + def _setup(self) -> None: + pass + + def _speech(self, text: str, voice_index: int = 0) -> bool: + """Play the given text.""" + if voice_index == 0: + os.system(f'say "{text}"') + elif voice_index == 1: + os.system(f'say -v "Ava (Premium)" "{text}"') + else: + os.system(f'say -v Samantha "{text}"') + return True diff --git a/autogpt/speech/say.py b/autogpt/speech/say.py new file mode 100644 index 0000000..78b75b2 --- /dev/null +++ b/autogpt/speech/say.py @@ -0,0 +1,42 @@ +""" Text to speech module """ +from autogpt.config import Config + +import threading +from threading import Semaphore +from autogpt.speech.brian import BrianSpeech +from autogpt.speech.macos_tts import MacOSTTS +from autogpt.speech.gtts import GTTSVoice +from autogpt.speech.eleven_labs import ElevenLabsSpeech + + +CFG = Config() +DEFAULT_VOICE_ENGINE = GTTSVoice() +VOICE_ENGINE = None +if CFG.elevenlabs_api_key: + VOICE_ENGINE = ElevenLabsSpeech() +elif CFG.use_mac_os_tts == "True": + VOICE_ENGINE = MacOSTTS() +elif CFG.use_brian_tts == "True": + VOICE_ENGINE = BrianSpeech() +else: + VOICE_ENGINE = GTTSVoice() + + +QUEUE_SEMAPHORE = Semaphore( + 1 +) # The amount of sounds to queue before blocking the main thread + + +def say_text(text: str, voice_index: int = 0) -> None: + """Speak the given text using the given voice index""" + + def speak() -> None: + success = VOICE_ENGINE.say(text, voice_index) + if not success: + DEFAULT_VOICE_ENGINE.say(text) + + QUEUE_SEMAPHORE.release() + + QUEUE_SEMAPHORE.acquire(True) + thread = threading.Thread(target=speak) + thread.start() diff --git a/autogpt/spinner.py b/autogpt/spinner.py new file mode 100644 index 0000000..56b4f20 --- /dev/null +++ b/autogpt/spinner.py @@ -0,0 +1,50 @@ +"""A simple spinner module""" +import itertools +import sys +import threading +import time + + +class Spinner: + """A simple spinner class""" + + def __init__(self, message: str = "Loading...", delay: float = 0.1) -> None: + """Initialize the spinner class + + Args: + message (str): The message to display. + delay (float): The delay between each spinner update. + """ + self.spinner = itertools.cycle(["-", "/", "|", "\\"]) + self.delay = delay + self.message = message + self.running = False + self.spinner_thread = None + + def spin(self) -> None: + """Spin the spinner""" + while self.running: + sys.stdout.write(f"{next(self.spinner)} {self.message}\r") + sys.stdout.flush() + time.sleep(self.delay) + sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") + + def __enter__(self) -> None: + """Start the spinner""" + self.running = True + self.spinner_thread = threading.Thread(target=self.spin) + self.spinner_thread.start() + + def __exit__(self, exc_type, exc_value, exc_traceback) -> None: + """Stop the spinner + + Args: + exc_type (Exception): The exception type. + exc_value (Exception): The exception value. + exc_traceback (Exception): The exception traceback. + """ + self.running = False + if self.spinner_thread is not None: + self.spinner_thread.join() + sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") + sys.stdout.flush() diff --git a/autogpt/summary.py b/autogpt/summary.py new file mode 100644 index 0000000..b58104a --- /dev/null +++ b/autogpt/summary.py @@ -0,0 +1,69 @@ +from autogpt.llm_utils import create_chat_completion + + +def summarize_text(driver, text, question): + if not text: + return "Error: 没有可总结的文本" + + text_length = len(text) + print(f"文字长度: {text_length} 字符") + + summaries = [] + chunks = list(split_text(text)) + + scroll_ratio = 1 / len(chunks) + for i, chunk in enumerate(chunks): + scroll_to_percentage(driver, scroll_ratio * i) + print(f"总结中 {i + 1} / {len(chunks)}") + messages = [create_message(chunk, question)] + + summary = create_chat_completion( + model="gpt-3.5-turbo", + messages=messages, + max_tokens=300, + ) + summaries.append(summary) + + print(f"已总结 {len(chunks)}.") + + combined_summary = "\n".join(summaries) + messages = [create_message(combined_summary, question)] + + return create_chat_completion( + model="gpt-3.5-turbo", + messages=messages, + max_tokens=300, + ) + + +def split_text(text, max_length=8192): + paragraphs = text.split("\n") + current_length = 0 + current_chunk = [] + + for paragraph in paragraphs: + if current_length + len(paragraph) + 1 <= max_length: + current_chunk.append(paragraph) + current_length += len(paragraph) + 1 + else: + yield "\n".join(current_chunk) + current_chunk = [paragraph] + current_length = len(paragraph) + 1 + + if current_chunk: + yield "\n".join(current_chunk) + + +def create_message(chunk, question): + return { + "role": "user", + "content": f'"""{chunk}""" 使用以上文本,请以中文回答以下问题:' + f' question: "{question}" -- if the question cannot be answered using the text,' + " please summarize the text.", + } + + +def scroll_to_percentage(driver, ratio): + if ratio < 0 or ratio > 1: + raise ValueError("百分比应该在 0 和 1 之间") + driver.execute_script(f"window.scrollTo(0, document.body.scrollHeight * {ratio});") diff --git a/autogpt/token_counter.py b/autogpt/token_counter.py new file mode 100644 index 0000000..2563769 --- /dev/null +++ b/autogpt/token_counter.py @@ -0,0 +1,73 @@ +"""Functions for counting the number of tokens in a message or string.""" +from __future__ import annotations + +import tiktoken + +from autogpt.logs import logger + + +def count_message_tokens( + messages: list[dict[str, str]], model: str = "gpt-3.5-turbo-0301" +) -> int: + """ + Returns the number of tokens used by a list of messages. + + Args: + messages (list): A list of messages, each of which is a dictionary + containing the role and content of the message. + model (str): The name of the model to use for tokenization. + Defaults to "gpt-3.5-turbo-0301". + + Returns: + int: The number of tokens used by the list of messages. + """ + try: + encoding = tiktoken.encoding_for_model(model) + except KeyError: + logger.warn("Warning:未找到模型。使用 cl100k_base 编码。") + encoding = tiktoken.get_encoding("cl100k_base") + if model == "gpt-3.5-turbo": + # !Note: gpt-3.5-turbo may change over time. + # Returning num tokens assuming gpt-3.5-turbo-0301.") + return count_message_tokens(messages, model="gpt-3.5-turbo-0301") + elif model == "gpt-4": + # !Note: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.") + return count_message_tokens(messages, model="gpt-4-0314") + elif model == "gpt-3.5-turbo-0301": + tokens_per_message = ( + 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n + ) + tokens_per_name = -1 # if there's a name, the role is omitted + elif model == "gpt-4-0314": + tokens_per_message = 3 + tokens_per_name = 1 + else: + raise NotImplementedError( + f"num_tokens_from_messages() is not implemented for model {model}.\n" + " See https://github.com/openai/openai-python/blob/main/chatml.md for" + " information on how messages are converted to tokens." + ) + num_tokens = 0 + for message in messages: + num_tokens += tokens_per_message + for key, value in message.items(): + num_tokens += len(encoding.encode(value)) + if key == "name": + num_tokens += tokens_per_name + num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> + return num_tokens + + +def count_string_tokens(string: str, model_name: str) -> int: + """ + Returns the number of tokens in a text string. + + Args: + string (str): The text string. + model_name (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo") + + Returns: + int: The number of tokens in the text string. + """ + encoding = tiktoken.encoding_for_model(model_name) + return len(encoding.encode(string)) diff --git a/autogpt/utils.py b/autogpt/utils.py new file mode 100644 index 0000000..7d357bb --- /dev/null +++ b/autogpt/utils.py @@ -0,0 +1,26 @@ +import yaml +from colorama import Fore + + +def clean_input(prompt: str = ""): + try: + return input(prompt) + except KeyboardInterrupt: + print("您中断了 Auto-GPT") + print("退出...") + exit(0) + + +def validate_yaml_file(file: str): + try: + with open(file, encoding="utf-8") as fp: + yaml.load(fp.read(), Loader=yaml.FullLoader) + except FileNotFoundError: + return (False, f"文件 {Fore.CYAN}`{file}`{Fore.RESET} 没有找到") + except yaml.YAMLError as e: + return ( + False, + f"尝试读取 AI 设置文件时出现问题: {e}", + ) + + return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!") diff --git a/autogpt/web.py b/autogpt/web.py new file mode 100644 index 0000000..9c8b2d7 --- /dev/null +++ b/autogpt/web.py @@ -0,0 +1,85 @@ +from selenium import webdriver +import autogpt.summary as summary +from bs4 import BeautifulSoup +from selenium.webdriver.common.by import By +from selenium.webdriver.support.wait import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC +from webdriver_manager.chrome import ChromeDriverManager +from selenium.webdriver.chrome.options import Options +import logging +from pathlib import Path +from autogpt.config import Config + +file_dir = Path(__file__).parent +cfg = Config() + + +def browse_website(url, question): + driver, text = scrape_text_with_selenium(url) + add_header(driver) + summary_text = summary.summarize_text(driver, text, question) + links = scrape_links_with_selenium(driver) + + # Limit links to 5 + if len(links) > 5: + links = links[:5] + close_browser(driver) + return f"从网站收集的答案: {summary_text} \n \n 链接: {links}", driver + + +def scrape_text_with_selenium(url): + logging.getLogger("selenium").setLevel(logging.CRITICAL) + + options = Options() + options.add_argument( + "user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36" + ) + driver = webdriver.Chrome( + executable_path=ChromeDriverManager().install(), options=options + ) + driver.get(url) + + WebDriverWait(driver, 10).until( + EC.presence_of_element_located((By.TAG_NAME, "body")) + ) + + # Get the HTML content directly from the browser's DOM + page_source = driver.execute_script("return document.body.outerHTML;") + soup = BeautifulSoup(page_source, "html.parser") + + for script in soup(["script", "style"]): + script.extract() + + text = soup.get_text() + lines = (line.strip() for line in text.splitlines()) + chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) + text = "\n".join(chunk for chunk in chunks if chunk) + return driver, text + + +def scrape_links_with_selenium(driver): + page_source = driver.page_source + soup = BeautifulSoup(page_source, "html.parser") + + for script in soup(["script", "style"]): + script.extract() + + hyperlinks = extract_hyperlinks(soup) + + return format_hyperlinks(hyperlinks) + + +def close_browser(driver): + driver.quit() + + +def extract_hyperlinks(soup): + return [(link.text, link["href"]) for link in soup.find_all("a", href=True)] + + +def format_hyperlinks(hyperlinks): + return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks] + + +def add_header(driver): + driver.execute_script(open(f"{file_dir}/js/overlay.js", "r").read()) diff --git a/autogpt/workspace.py b/autogpt/workspace.py new file mode 100644 index 0000000..2706b3b --- /dev/null +++ b/autogpt/workspace.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +import os +from pathlib import Path + +# Set a dedicated folder for file I/O +WORKSPACE_PATH = Path(os.getcwd()) / "auto_gpt_workspace" + +# Create the directory if it doesn't exist +if not os.path.exists(WORKSPACE_PATH): + os.makedirs(WORKSPACE_PATH) + + +def path_in_workspace(relative_path: str | Path) -> Path: + """Get full path for item in workspace + + Parameters: + relative_path (str | Path): Path to translate into the workspace + + Returns: + Path: Absolute path for the given path in the workspace + """ + return safe_path_join(WORKSPACE_PATH, relative_path) + + +def safe_path_join(base: Path, *paths: str | Path) -> Path: + """Join one or more path components, asserting the resulting path is within the workspace. + + Args: + base (Path): The base path + *paths (str): The paths to join to the base path + + Returns: + Path: The joined path + """ + joined_path = base.joinpath(*paths).resolve() + + if not joined_path.is_relative_to(base): + raise ValueError(f"Attempted to access path '{joined_path}' outside of working directory '{base}'.") + + return joined_path diff --git a/azure.yaml.template b/azure.yaml.template new file mode 100644 index 0000000..74ca797 --- /dev/null +++ b/azure.yaml.template @@ -0,0 +1,7 @@ +azure_api_type: azure_ad +azure_api_base: your-base-url-for-azure +azure_api_version: api-version-for-azure +azure_model_map: + fast_llm_model_deployment_id: gpt35-deployment-id-for-azure + smart_llm_model_deployment_id: gpt4-deployment-id-for-azure + embedding_model_deployment_id: embedding-deployment-id-for-azure diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..be225d7 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,18 @@ +# To boot the app run the following: +# docker-compose run auto-gpt +version: "3.9" + +services: + auto-gpt: + depends_on: + - redis + build: ./ + env_file: + - .env + volumes: + - "./autogpt:/app" + - ".env:/app/.env" + profiles: ["exclude-from-up"] + + redis: + image: "redis/redis-stack-server:latest" diff --git a/docs/imgs/demo.gif b/docs/imgs/demo.gif new file mode 100644 index 0000000..6f658e9 Binary files /dev/null and b/docs/imgs/demo.gif differ diff --git a/docs/imgs/gzh.png b/docs/imgs/gzh.png new file mode 100644 index 0000000..7493c93 Binary files /dev/null and b/docs/imgs/gzh.png differ diff --git a/docs/imgs/openai-api-key-billing-paid-account.png b/docs/imgs/openai-api-key-billing-paid-account.png new file mode 100644 index 0000000..8948505 Binary files /dev/null and b/docs/imgs/openai-api-key-billing-paid-account.png differ diff --git a/main.py b/main.py new file mode 100644 index 0000000..160addc --- /dev/null +++ b/main.py @@ -0,0 +1 @@ +from autogpt import main diff --git a/openai-api-key.png b/openai-api-key.png new file mode 100644 index 0000000..8948505 Binary files /dev/null and b/openai-api-key.png differ diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..64ed716 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,11 @@ +[project] +name = "auto-gpt" +version = "0.1.0" +description = "A GPT based ai agent" +readme = "README.md" + +[tool.black] +line-length = 88 +target-version = ['py310'] +include = '\.pyi?$' +extend-exclude = "" \ No newline at end of file diff --git a/requirements-docker.txt b/requirements-docker.txt new file mode 100644 index 0000000..3a8a344 --- /dev/null +++ b/requirements-docker.txt @@ -0,0 +1,27 @@ +beautifulsoup4 +colorama==0.4.6 +openai==0.27.2 +playsound==1.2.2 +python-dotenv==1.0.0 +pyyaml==6.0 +readability-lxml==0.8.1 +requests +tiktoken==0.3.3 +gTTS==2.3.1 +docker +duckduckgo-search +google-api-python-client #(https://developers.google.com/custom-search/v1/overview) +pinecone-client==2.2.1 +redis +orjson +Pillow +selenium +webdriver-manager +coverage +flake8 +numpy +pre-commit +black +isort +gitpython==3.1.31 +tweepy \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..1cdedec --- /dev/null +++ b/requirements.txt @@ -0,0 +1,30 @@ +beautifulsoup4 +colorama==0.4.6 +openai==0.27.2 +playsound==1.2.2 +python-dotenv==1.0.0 +pyyaml==6.0 +readability-lxml==0.8.1 +requests +tiktoken==0.3.3 +gTTS==2.3.1 +docker +duckduckgo-search +google-api-python-client #(https://developers.google.com/custom-search/v1/overview) +pinecone-client==2.2.1 +redis +orjson +Pillow +selenium +webdriver-manager +coverage +flake8 +numpy +pre-commit +black +sourcery +isort +gitpython==3.1.31 +pytest +pytest-mock +tweepy diff --git a/run.bat b/run.bat new file mode 100644 index 0000000..afbab57 --- /dev/null +++ b/run.bat @@ -0,0 +1,8 @@ +@echo off +python scripts/check_requirements.py requirements.txt +if errorlevel 1 ( + echo Installing missing packages... + pip install -r requirements.txt +) +python -m autogpt %* +pause diff --git a/run_continuous.bat b/run_continuous.bat new file mode 100644 index 0000000..812aa01 --- /dev/null +++ b/run_continuous.bat @@ -0,0 +1,3 @@ +@echo off +set argument=--continuous +call run.bat %argument% diff --git a/scripts/check_requirements.py b/scripts/check_requirements.py new file mode 100644 index 0000000..d1f2350 --- /dev/null +++ b/scripts/check_requirements.py @@ -0,0 +1,31 @@ +import pkg_resources +import sys + + +def main(): + requirements_file = sys.argv[1] + with open(requirements_file, "r") as f: + required_packages = [ + line.strip().split("#")[0].strip() for line in f.readlines() + ] + + installed_packages = [package.key for package in pkg_resources.working_set] + + missing_packages = [] + for package in required_packages: + if not package: # Skip empty lines + continue + package_name = package.strip().split("==")[0] + if package_name.lower() not in installed_packages: + missing_packages.append(package_name) + + if missing_packages: + print("Missing packages:") + print(", ".join(missing_packages)) + sys.exit(1) + else: + print("All packages are installed.") + + +if __name__ == "__main__": + main() diff --git a/scripts/execute_code.py b/scripts/execute_code.py new file mode 100644 index 0000000..97bfd6d --- /dev/null +++ b/scripts/execute_code.py @@ -0,0 +1,88 @@ +import docker +import os +import subprocess + + +WORKSPACE_FOLDER = "auto_gpt_workspace" + + +def execute_python_file(file): + """执行一个 Python 文件在 Docker 容器中,并返回输出""" + + print (f"正在工作空间 '{WORKSPACE_FOLDER}' 中执行文件 '{file}'") + + if not file.endswith(".py"): + return "Error: 无效的文件类型。仅允许 .py 文件." + + file_path = os.path.join(WORKSPACE_FOLDER, file) + + if not os.path.isfile(file_path): + return f"Error: 文件 '{file}' 不存在." + + try: + client = docker.from_env() + + image_name = 'python:3.10' + try: + client.images.get(image_name) + print(f"Image '{image_name}' found locally") + except docker.errors.ImageNotFound: + print(f"Image '{image_name}' not found locally, pulling from Docker Hub") + # Use the low-level API to stream the pull response + low_level_client = docker.APIClient() + for line in low_level_client.pull(image_name, stream=True, decode=True): + # Print the status and progress, if available + status = line.get('status') + progress = line.get('progress') + if status and progress: + print(f"{status}: {progress}") + elif status: + print(status) + + # You can replace 'python:3.8' with the desired Python image/version + # You can find available Python images on Docker Hub: + # https://hub.docker.com/_/python + container = client.containers.run( + image_name, + f'python {file}', + volumes={ + os.path.abspath(WORKSPACE_FOLDER): { + 'bind': '/workspace', + 'mode': 'ro'}}, + working_dir='/workspace', + stderr=True, + stdout=True, + detach=True, + ) + + output = container.wait() + logs = container.logs().decode('utf-8') + container.remove() + + # print(f"Execution complete. Output: {output}") + # print(f"Logs: {logs}") + + return logs + + except Exception as e: + return f"Error: {str(e)}" + + +def execute_shell(command_line): + + current_dir = os.getcwd() + + if not WORKSPACE_FOLDER in current_dir: # Change dir into workspace if necessary + work_dir = os.path.join(os.getcwd(), WORKSPACE_FOLDER) + os.chdir(work_dir) + + print (f"Executing command '{command_line}' in working directory '{os.getcwd()}'") + + result = subprocess.run(command_line, capture_output=True, shell=True) + output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}" + + # Change back to whatever the prior working dir was + + os.chdir(current_dir) + + return output diff --git a/scripts/file_operations.py b/scripts/file_operations.py new file mode 100644 index 0000000..ea760ca --- /dev/null +++ b/scripts/file_operations.py @@ -0,0 +1,84 @@ +import os +import os.path + +# Set a dedicated folder for file I/O +working_directory = "auto_gpt_workspace" + +# Create the directory if it doesn't exist +if not os.path.exists(working_directory): + os.makedirs(working_directory) + + +def safe_join(base, *paths): + """智能连接一个或多个路径组件。""" + new_path = os.path.join(base, *paths) + norm_new_path = os.path.normpath(new_path) + + if os.path.commonprefix([base, norm_new_path]) != base: + raise ValueError("尝试访问工作目录之外的位置。") + + return norm_new_path + + +def read_file(filename): + """读取文件并返回内容""" + try: + filepath = safe_join(working_directory, filename) + with open(filepath, "r", encoding='utf-8') as f: + content = f.read() + return content + except Exception as e: + return "Error: " + str(e) + + +def write_to_file(filename, text): + """将文本写入文件""" + try: + filepath = safe_join(working_directory, filename) + directory = os.path.dirname(filepath) + if not os.path.exists(directory): + os.makedirs(directory) + with open(filepath, "w", encoding='utf-8') as f: + f.write(text) + return "文件写入成功。" + except Exception as e: + return "Error: " + str(e) + + +def append_to_file(filename, text): + """将文本追加到文件""" + try: + filepath = safe_join(working_directory, filename) + with open(filepath, "a") as f: + f.write(text) + return "已成功添加文本." + except Exception as e: + return "Error: " + str(e) + + +def delete_file(filename): + """删除文件""" + try: + filepath = safe_join(working_directory, filename) + os.remove(filepath) + return "文件删除成功." + except Exception as e: + return "Error: " + str(e) + + +def search_files(directory): + found_files = [] + + if directory == "" or directory == "/": + search_directory = working_directory + else: + search_directory = safe_join(working_directory, directory) + + for root, _, files in os.walk(search_directory): + for file in files: + if file.startswith('.'): + continue + relative_path = os.path.relpath(os.path.join(root, file), working_directory) + found_files.append(relative_path) + + return found_files diff --git a/scripts/main.py b/scripts/main.py new file mode 100644 index 0000000..d1dde38 --- /dev/null +++ b/scripts/main.py @@ -0,0 +1,11 @@ +from colorama import Style, init + +# Initialize colorama +init(autoreset=True) + +# Use the bold ANSI style +print( + f"""{Style.BRIGHT}请 运行: +python -m autogpt +""" +) diff --git a/scripts/prompt.py b/scripts/prompt.py new file mode 100644 index 0000000..856f676 --- /dev/null +++ b/scripts/prompt.py @@ -0,0 +1,64 @@ +from promptgenerator import PromptGenerator + + +def get_prompt(): + """ + 此函数生成包含各种约束、命令、资源和性能评估的提示字符串。 + 返回: + str: 生成的提示字符串。 + """ + + # Initialize the PromptGenerator object + prompt_generator = PromptGenerator() + + # Add constraints to the PromptGenerator object + prompt_generator.add_constraint("~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.") + prompt_generator.add_constraint("If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.") + prompt_generator.add_constraint("No user assistance") + prompt_generator.add_constraint("Reply in Chinese") + prompt_generator.add_constraint('Exclusively use the commands listed in double quotes e.g. "command name"') + + + # Define the command list + commands = [ + ("Google Search", "google", {"input": ""}), + ("Browse Website", "browse_website", {"url": "", "question": ""}), + ("Start GPT Agent", "start_agent", {"name": "", "task": "", "prompt": ""}), + ("Message GPT Agent", "message_agent", {"key": "", "message": ""}), + ("List GPT Agents", "list_agents", {}), + ("Delete GPT Agent", "delete_agent", {"key": ""}), + ("Write to file", "write_to_file", {"file": "", "text": ""}), + ("Read file", "read_file", {"file": ""}), + ("Append to file", "append_to_file", {"file": "", "text": ""}), + ("Delete file", "delete_file", {"file": ""}), + ("Search Files", "search_files", {"directory": ""}), + ("Evaluate Code", "evaluate_code", {"code": ""}), + ("Get Improved Code", "improve_code", {"suggestions": "", "code": ""}), + ("Write Tests", "write_tests", {"code": "", "focus": ""}), + ("Execute Python File", "execute_python_file", {"file": ""}), + ("Execute Shell Command, non-interactive commands only", "execute_shell", { "command_line": ""}), + ("Task Complete (Shutdown)", "task_complete", {"reason": ""}), + ("Generate Image", "generate_image", {"prompt": ""}), + ("Do Nothing", "do_nothing", {}), + ] + + # Add commands to the PromptGenerator object + for command_label, command_name, args in commands: + prompt_generator.add_command(command_label, command_name, args) + + # Add resources to the PromptGenerator object + prompt_generator.add_resource("Internet access for searches and information gathering.") + prompt_generator.add_resource("Long Term memory management.") + prompt_generator.add_resource("GPT-3.5 powered Agents for delegation of simple tasks.") + prompt_generator.add_resource("File output.") + + # Add performance evaluations to the PromptGenerator object + prompt_generator.add_performance_evaluation("Continuously review and analyze your actions to ensure you are performing to the best of your abilities.") + prompt_generator.add_performance_evaluation("Constructively self-criticize your big-picture behavior constantly.") + prompt_generator.add_performance_evaluation("Reflect on past decisions and strategies to refine your approach.") + prompt_generator.add_performance_evaluation("Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.") + + # Generate the prompt string + prompt_string = prompt_generator.generate_prompt_string() + + return prompt_string diff --git a/tests.py b/tests.py new file mode 100644 index 0000000..67ba1c8 --- /dev/null +++ b/tests.py @@ -0,0 +1,20 @@ +import unittest +import coverage + +if __name__ == "__main__": + # Start coverage collection + cov = coverage.Coverage() + cov.start() + + # Load all tests from the 'autogpt/tests' package + suite = unittest.defaultTestLoader.discover("./tests") + + # Run the tests + unittest.TextTestRunner().run(suite) + + # Stop coverage collection + cov.stop() + cov.save() + + # Report the coverage + cov.report(show_missing=True) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/browse_tests.py b/tests/browse_tests.py new file mode 100644 index 0000000..1ac523e --- /dev/null +++ b/tests/browse_tests.py @@ -0,0 +1,26 @@ +import unittest +import os +import sys + +from bs4 import BeautifulSoup + +sys.path.append(os.path.abspath("../scripts")) + +from browse import extract_hyperlinks + + +class TestBrowseLinks(unittest.TestCase): + def test_extract_hyperlinks(self): + body = """ + + Google + Foo +
Some other crap
+ + """ + soup = BeautifulSoup(body, "html.parser") + links = extract_hyperlinks(soup, "http://example.com") + self.assertEqual( + links, + [("Google", "https://google.com"), ("Foo", "http://example.com/foo.html")], + ) diff --git a/tests/context.py b/tests/context.py new file mode 100644 index 0000000..cef969d --- /dev/null +++ b/tests/context.py @@ -0,0 +1,6 @@ +import os +import sys + +sys.path.insert( + 0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../scripts")) +) diff --git a/tests/integration/memory_tests.py b/tests/integration/memory_tests.py new file mode 100644 index 0000000..eead2da --- /dev/null +++ b/tests/integration/memory_tests.py @@ -0,0 +1,49 @@ +import random +import string +import sys +import unittest +from pathlib import Path + +from autogpt.config import Config +from autogpt.memory.local import LocalCache + + +class TestLocalCache(unittest.TestCase): + def random_string(self, length): + return "".join(random.choice(string.ascii_letters) for _ in range(length)) + + def setUp(self): + cfg = cfg = Config() + self.cache = LocalCache(cfg) + self.cache.clear() + + # Add example texts to the cache + self.example_texts = [ + "The quick brown fox jumps over the lazy dog", + "I love machine learning and natural language processing", + "The cake is a lie, but the pie is always true", + "ChatGPT is an advanced AI model for conversation", + ] + + for text in self.example_texts: + self.cache.add(text) + + # Add some random strings to test noise + for _ in range(5): + self.cache.add(self.random_string(10)) + + def test_get_relevant(self): + query = "I'm interested in artificial intelligence and NLP" + k = 3 + relevant_texts = self.cache.get_relevant(query, k) + + print(f"Top {k} relevant texts for the query '{query}':") + for i, text in enumerate(relevant_texts, start=1): + print(f"{i}. {text}") + + self.assertEqual(len(relevant_texts), k) + self.assertIn(self.example_texts[1], relevant_texts) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/integration/milvus_memory_tests.py b/tests/integration/milvus_memory_tests.py new file mode 100644 index 0000000..96934cd --- /dev/null +++ b/tests/integration/milvus_memory_tests.py @@ -0,0 +1,48 @@ +import random +import string +import unittest + +from autogpt.config import Config +from autogpt.memory.milvus import MilvusMemory + + +class TestMilvusMemory(unittest.TestCase): + def random_string(self, length): + return "".join(random.choice(string.ascii_letters) for _ in range(length)) + + def setUp(self): + cfg = Config() + cfg.milvus_addr = "localhost:19530" + self.memory = MilvusMemory(cfg) + self.memory.clear() + + # Add example texts to the cache + self.example_texts = [ + "The quick brown fox jumps over the lazy dog", + "I love machine learning and natural language processing", + "The cake is a lie, but the pie is always true", + "ChatGPT is an advanced AI model for conversation", + ] + + for text in self.example_texts: + self.memory.add(text) + + # Add some random strings to test noise + for _ in range(5): + self.memory.add(self.random_string(10)) + + def test_get_relevant(self): + query = "I'm interested in artificial intelligence and NLP" + k = 3 + relevant_texts = self.memory.get_relevant(query, k) + + print(f"Top {k} relevant texts for the query '{query}':") + for i, text in enumerate(relevant_texts, start=1): + print(f"{i}. {text}") + + self.assertEqual(len(relevant_texts), k) + self.assertIn(self.example_texts[1], relevant_texts) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/integration/weaviate_memory_tests.py b/tests/integration/weaviate_memory_tests.py new file mode 100644 index 0000000..503fe9d --- /dev/null +++ b/tests/integration/weaviate_memory_tests.py @@ -0,0 +1,117 @@ +import unittest +from unittest import mock +import sys +import os + +from weaviate import Client +from weaviate.util import get_valid_uuid +from uuid import uuid4 + +from autogpt.config import Config +from autogpt.memory.weaviate import WeaviateMemory +from autogpt.memory.base import get_ada_embedding + + +@mock.patch.dict(os.environ, { + "WEAVIATE_HOST": "127.0.0.1", + "WEAVIATE_PROTOCOL": "http", + "WEAVIATE_PORT": "8080", + "WEAVIATE_USERNAME": "", + "WEAVIATE_PASSWORD": "", + "MEMORY_INDEX": "AutogptTests" +}) +class TestWeaviateMemory(unittest.TestCase): + cfg = None + client = None + + @classmethod + def setUpClass(cls): + # only create the connection to weaviate once + cls.cfg = Config() + + if cls.cfg.use_weaviate_embedded: + from weaviate.embedded import EmbeddedOptions + + cls.client = Client(embedded_options=EmbeddedOptions( + hostname=cls.cfg.weaviate_host, + port=int(cls.cfg.weaviate_port), + persistence_data_path=cls.cfg.weaviate_embedded_path + )) + else: + cls.client = Client(f"{cls.cfg.weaviate_protocol}://{cls.cfg.weaviate_host}:{self.cfg.weaviate_port}") + + """ + In order to run these tests you will need a local instance of + Weaviate running. Refer to https://weaviate.io/developers/weaviate/installation/docker-compose + for creating local instances using docker. + Alternatively in your .env file set the following environmental variables to run Weaviate embedded (see: https://weaviate.io/developers/weaviate/installation/embedded): + + USE_WEAVIATE_EMBEDDED=True + WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate" + """ + def setUp(self): + try: + self.client.schema.delete_class(self.cfg.memory_index) + except: + pass + + self.memory = WeaviateMemory(self.cfg) + + def test_add(self): + doc = 'You are a Titan name Thanos and you are looking for the Infinity Stones' + self.memory.add(doc) + result = self.client.query.get(self.cfg.memory_index, ['raw_text']).do() + actual = result['data']['Get'][self.cfg.memory_index] + + self.assertEqual(len(actual), 1) + self.assertEqual(actual[0]['raw_text'], doc) + + def test_get(self): + doc = 'You are an Avenger and swore to defend the Galaxy from a menace called Thanos' + + with self.client.batch as batch: + batch.add_data_object( + uuid=get_valid_uuid(uuid4()), + data_object={'raw_text': doc}, + class_name=self.cfg.memory_index, + vector=get_ada_embedding(doc) + ) + + batch.flush() + + actual = self.memory.get(doc) + + self.assertEqual(len(actual), 1) + self.assertEqual(actual[0], doc) + + def test_get_stats(self): + docs = [ + 'You are now about to count the number of docs in this index', + 'And then you about to find out if you can count correctly' + ] + + [self.memory.add(doc) for doc in docs] + + stats = self.memory.get_stats() + + self.assertTrue(stats) + self.assertTrue('count' in stats) + self.assertEqual(stats['count'], 2) + + def test_clear(self): + docs = [ + 'Shame this is the last test for this class', + 'Testing is fun when someone else is doing it' + ] + + [self.memory.add(doc) for doc in docs] + + self.assertEqual(self.memory.get_stats()['count'], 2) + + self.memory.clear() + + self.assertEqual(self.memory.get_stats()['count'], 0) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/local_cache_test.py b/tests/local_cache_test.py new file mode 100644 index 0000000..91c922b --- /dev/null +++ b/tests/local_cache_test.py @@ -0,0 +1,57 @@ +import os +import sys +import unittest + +from autogpt.memory.local import LocalCache + + +def MockConfig(): + return type( + "MockConfig", + (object,), + { + "debug_mode": False, + "continuous_mode": False, + "speak_mode": False, + "memory_index": "auto-gpt", + }, + ) + + +class TestLocalCache(unittest.TestCase): + def setUp(self): + self.cfg = MockConfig() + self.cache = LocalCache(self.cfg) + + def test_add(self): + text = "Sample text" + self.cache.add(text) + self.assertIn(text, self.cache.data.texts) + + def test_clear(self): + self.cache.clear() + self.assertEqual(self.cache.data, [""]) + + def test_get(self): + text = "Sample text" + self.cache.add(text) + result = self.cache.get(text) + self.assertEqual(result, [text]) + + def test_get_relevant(self): + text1 = "Sample text 1" + text2 = "Sample text 2" + self.cache.add(text1) + self.cache.add(text2) + result = self.cache.get_relevant(text1, 1) + self.assertEqual(result, [text1]) + + def test_get_stats(self): + text = "Sample text" + self.cache.add(text) + stats = self.cache.get_stats() + self.assertEqual(stats, (1, self.cache.data.embeddings.shape)) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/milvus_memory_test.py b/tests/milvus_memory_test.py new file mode 100644 index 0000000..0113fa1 --- /dev/null +++ b/tests/milvus_memory_test.py @@ -0,0 +1,63 @@ +import os +import sys +import unittest + +from autogpt.memory.milvus import MilvusMemory + + +def MockConfig(): + return type( + "MockConfig", + (object,), + { + "debug_mode": False, + "continuous_mode": False, + "speak_mode": False, + "milvus_collection": "autogpt", + "milvus_addr": "localhost:19530", + }, + ) + + +class TestMilvusMemory(unittest.TestCase): + def setUp(self): + self.cfg = MockConfig() + self.memory = MilvusMemory(self.cfg) + + def test_add(self): + text = "Sample text" + self.memory.clear() + self.memory.add(text) + result = self.memory.get(text) + self.assertEqual([text], result) + + def test_clear(self): + self.memory.clear() + self.assertEqual(self.memory.collection.num_entities, 0) + + def test_get(self): + text = "Sample text" + self.memory.clear() + self.memory.add(text) + result = self.memory.get(text) + self.assertEqual(result, [text]) + + def test_get_relevant(self): + text1 = "Sample text 1" + text2 = "Sample text 2" + self.memory.clear() + self.memory.add(text1) + self.memory.add(text2) + result = self.memory.get_relevant(text1, 1) + self.assertEqual(result, [text1]) + + def test_get_stats(self): + text = "Sample text" + self.memory.clear() + self.memory.add(text) + stats = self.memory.get_stats() + self.assertEqual(15, len(stats)) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/smoke_test.py b/tests/smoke_test.py new file mode 100644 index 0000000..50e97b7 --- /dev/null +++ b/tests/smoke_test.py @@ -0,0 +1,63 @@ +import os +import subprocess +import sys +import unittest + +from autogpt.commands.file_operations import delete_file, read_file + +env_vars = {"MEMORY_BACKEND": "no_memory", "TEMPERATURE": "0"} + + +class TestCommands(unittest.TestCase): + def test_write_file(self): + # Test case to check if the write_file command can successfully write 'Hello World' to a file + # named 'hello_world.txt'. + + # Read the current ai_settings.yaml file and store its content. + ai_settings = None + if os.path.exists("ai_settings.yaml"): + with open("ai_settings.yaml", "r") as f: + ai_settings = f.read() + os.remove("ai_settings.yaml") + + try: + if os.path.exists("hello_world.txt"): + # Clean up any existing 'hello_world.txt' file before testing. + delete_file("hello_world.txt") + # Prepare input data for the test. + input_data = """write_file-GPT +an AI designed to use the write_file command to write 'Hello World' into a file named "hello_world.txt" and then use the task_complete command to complete the task. +Use the write_file command to write 'Hello World' into a file named "hello_world.txt". +Use the task_complete command to complete the task. +Do not use any other commands. + +y -5 +EOF""" + command = f"{sys.executable} -m autogpt" + + # Execute the script with the input data. + process = subprocess.Popen( + command, + stdin=subprocess.PIPE, + shell=True, + env={**os.environ, **env_vars}, + ) + process.communicate(input_data.encode()) + + # Read the content of the 'hello_world.txt' file created during the test. + content = read_file("hello_world.txt") + finally: + if ai_settings: + # Restore the original ai_settings.yaml file. + with open("ai_settings.yaml", "w") as f: + f.write(ai_settings) + + # Check if the content of the 'hello_world.txt' file is equal to 'Hello World'. + self.assertEqual( + content, "Hello World", f"Expected 'Hello World', got {content}" + ) + + +# Run the test case. +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_config.py b/tests/test_config.py new file mode 100644 index 0000000..b472a24 --- /dev/null +++ b/tests/test_config.py @@ -0,0 +1,84 @@ +from unittest import TestCase + +from autogpt.config import Config + + +class TestConfig(TestCase): + """ + Test cases for the Config class, which handles the configuration settings + for the AI and ensures it behaves as a singleton. + """ + + def setUp(self): + """ + Set up the test environment by creating an instance of the Config class. + """ + self.config = Config() + + def test_singleton(self): + """ + Test if the Config class behaves as a singleton by ensuring that two instances are the same. + """ + config2 = Config() + self.assertIs(self.config, config2) + + def test_initial_values(self): + """ + Test if the initial values of the Config class attributes are set correctly. + """ + self.assertFalse(self.config.debug_mode) + self.assertFalse(self.config.continuous_mode) + self.assertFalse(self.config.speak_mode) + self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo") + self.assertEqual(self.config.smart_llm_model, "gpt-4") + self.assertEqual(self.config.fast_token_limit, 4000) + self.assertEqual(self.config.smart_token_limit, 8000) + + def test_set_continuous_mode(self): + """ + Test if the set_continuous_mode() method updates the continuous_mode attribute. + """ + self.config.set_continuous_mode(True) + self.assertTrue(self.config.continuous_mode) + + def test_set_speak_mode(self): + """ + Test if the set_speak_mode() method updates the speak_mode attribute. + """ + self.config.set_speak_mode(True) + self.assertTrue(self.config.speak_mode) + + def test_set_fast_llm_model(self): + """ + Test if the set_fast_llm_model() method updates the fast_llm_model attribute. + """ + self.config.set_fast_llm_model("gpt-3.5-turbo-test") + self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo-test") + + def test_set_smart_llm_model(self): + """ + Test if the set_smart_llm_model() method updates the smart_llm_model attribute. + """ + self.config.set_smart_llm_model("gpt-4-test") + self.assertEqual(self.config.smart_llm_model, "gpt-4-test") + + def test_set_fast_token_limit(self): + """ + Test if the set_fast_token_limit() method updates the fast_token_limit attribute. + """ + self.config.set_fast_token_limit(5000) + self.assertEqual(self.config.fast_token_limit, 5000) + + def test_set_smart_token_limit(self): + """ + Test if the set_smart_token_limit() method updates the smart_token_limit attribute. + """ + self.config.set_smart_token_limit(9000) + self.assertEqual(self.config.smart_token_limit, 9000) + + def test_set_debug_mode(self): + """ + Test if the set_debug_mode() method updates the debug_mode attribute. + """ + self.config.set_debug_mode(True) + self.assertTrue(self.config.debug_mode) diff --git a/tests/test_json_parser.py b/tests/test_json_parser.py new file mode 100644 index 0000000..2862034 --- /dev/null +++ b/tests/test_json_parser.py @@ -0,0 +1,111 @@ +import unittest + +import tests.context +from autogpt.json_fixes.parsing import fix_and_parse_json + + +class TestParseJson(unittest.TestCase): + def test_valid_json(self): + # Test that a valid JSON string is parsed correctly + json_str = '{"name": "John", "age": 30, "city": "New York"}' + obj = fix_and_parse_json(json_str) + self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"}) + + def test_invalid_json_minor(self): + # Test that an invalid JSON string can be fixed with gpt + json_str = '{"name": "John", "age": 30, "city": "New York",}' + with self.assertRaises(Exception): + fix_and_parse_json(json_str, try_to_fix_with_gpt=False) + + def test_invalid_json_major_with_gpt(self): + # Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False + json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' + with self.assertRaises(Exception): + fix_and_parse_json(json_str, try_to_fix_with_gpt=False) + + def test_invalid_json_major_without_gpt(self): + # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False + json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' + # Assert that this raises an exception: + with self.assertRaises(Exception): + fix_and_parse_json(json_str, try_to_fix_with_gpt=False) + + def test_invalid_json_leading_sentence_with_gpt(self): + # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False + json_str = """I suggest we start by browsing the repository to find any issues that we can fix. + +{ + "command": { + "name": "browse_website", + "args":{ + "url": "https://github.com/Torantulino/Auto-GPT" + } + }, + "thoughts": + { + "text": "I suggest we start browsing the repository to find any issues that we can fix.", + "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.", + "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes", + "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.", + "speak": "I will start browsing the repository to find any issues we can fix." + } +}""" + good_obj = { + "command": { + "name": "browse_website", + "args": {"url": "https://github.com/Torantulino/Auto-GPT"}, + }, + "thoughts": { + "text": "I suggest we start browsing the repository to find any issues that we can fix.", + "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.", + "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes", + "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.", + "speak": "I will start browsing the repository to find any issues we can fix.", + }, + } + # Assert that this raises an exception: + self.assertEqual( + fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj + ) + + def test_invalid_json_leading_sentence_with_gpt(self): + # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False + json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this. + +{ + "command": { + "name": "browse_website", + "args":{ + "url": "https://github.com/Torantulino/Auto-GPT" + } + }, + "thoughts": + { + "text": "Browsing the repository to identify potential bugs", + "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.", + "plan": "- Analyze the repository for potential bugs and areas of improvement", + "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.", + "speak": "I am browsing the repository to identify potential bugs." + } +}""" + good_obj = { + "command": { + "name": "browse_website", + "args": {"url": "https://github.com/Torantulino/Auto-GPT"}, + }, + "thoughts": { + "text": "Browsing the repository to identify potential bugs", + "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.", + "plan": "- Analyze the repository for potential bugs and areas of improvement", + "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.", + "speak": "I am browsing the repository to identify potential bugs.", + }, + } + # Assert that this raises an exception: + self.assertEqual( + fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_prompt_generator.py b/tests/test_prompt_generator.py new file mode 100644 index 0000000..6a0bfd6 --- /dev/null +++ b/tests/test_prompt_generator.py @@ -0,0 +1,114 @@ +from unittest import TestCase + +from autogpt.promptgenerator import PromptGenerator + + +class TestPromptGenerator(TestCase): + """ + Test cases for the PromptGenerator class, which is responsible for generating + prompts for the AI with constraints, commands, resources, and performance evaluations. + """ + + @classmethod + def setUpClass(cls): + """ + Set up the initial state for each test method by creating an instance of PromptGenerator. + """ + cls.generator = PromptGenerator() + + # Test whether the add_constraint() method adds a constraint to the generator's constraints list + def test_add_constraint(self): + """ + Test if the add_constraint() method adds a constraint to the generator's constraints list. + """ + constraint = "Constraint1" + self.generator.add_constraint(constraint) + self.assertIn(constraint, self.generator.constraints) + + # Test whether the add_command() method adds a command to the generator's commands list + def test_add_command(self): + """ + Test if the add_command() method adds a command to the generator's commands list. + """ + command_label = "Command Label" + command_name = "command_name" + args = {"arg1": "value1", "arg2": "value2"} + self.generator.add_command(command_label, command_name, args) + command = { + "label": command_label, + "name": command_name, + "args": args, + } + self.assertIn(command, self.generator.commands) + + def test_add_resource(self): + """ + Test if the add_resource() method adds a resource to the generator's resources list. + """ + resource = "Resource1" + self.generator.add_resource(resource) + self.assertIn(resource, self.generator.resources) + + def test_add_performance_evaluation(self): + """ + Test if the add_performance_evaluation() method adds an evaluation to the generator's + performance_evaluation list. + """ + evaluation = "Evaluation1" + self.generator.add_performance_evaluation(evaluation) + self.assertIn(evaluation, self.generator.performance_evaluation) + + def test_generate_prompt_string(self): + """ + Test if the generate_prompt_string() method generates a prompt string with all the added + constraints, commands, resources, and evaluations. + """ + # Define the test data + constraints = ["Constraint1", "Constraint2"] + commands = [ + { + "label": "Command1", + "name": "command_name1", + "args": {"arg1": "value1"}, + }, + { + "label": "Command2", + "name": "command_name2", + "args": {}, + }, + ] + resources = ["Resource1", "Resource2"] + evaluations = ["Evaluation1", "Evaluation2"] + + # Add test data to the generator + for constraint in constraints: + self.generator.add_constraint(constraint) + for command in commands: + self.generator.add_command( + command["label"], command["name"], command["args"] + ) + for resource in resources: + self.generator.add_resource(resource) + for evaluation in evaluations: + self.generator.add_performance_evaluation(evaluation) + + # Generate the prompt string and verify its correctness + prompt_string = self.generator.generate_prompt_string() + self.assertIsNotNone(prompt_string) + + # Check if all constraints, commands, resources, and evaluations are present in the prompt string + for constraint in constraints: + self.assertIn(constraint, prompt_string) + for command in commands: + self.assertIn(command["name"], prompt_string) + for key, value in command["args"].items(): + self.assertIn(f'"{key}": "{value}"', prompt_string) + for resource in resources: + self.assertIn(resource, prompt_string) + for evaluation in evaluations: + self.assertIn(evaluation, prompt_string) + + self.assertIn("constraints", prompt_string.lower()) + self.assertIn("commands", prompt_string.lower()) + self.assertIn("resources", prompt_string.lower()) + self.assertIn("performance evaluation", prompt_string.lower()) diff --git a/tests/test_token_counter.py b/tests/test_token_counter.py new file mode 100644 index 0000000..81e6827 --- /dev/null +++ b/tests/test_token_counter.py @@ -0,0 +1,62 @@ +import unittest +import tests.context +from autogpt.token_counter import count_message_tokens, count_string_tokens + + +class TestTokenCounter(unittest.TestCase): + def test_count_message_tokens(self): + messages = [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + ] + self.assertEqual(count_message_tokens(messages), 17) + + def test_count_message_tokens_with_name(self): + messages = [ + {"role": "user", "content": "Hello", "name": "John"}, + {"role": "assistant", "content": "Hi there!"}, + ] + self.assertEqual(count_message_tokens(messages), 17) + + def test_count_message_tokens_empty_input(self): + self.assertEqual(count_message_tokens([]), 3) + + def test_count_message_tokens_invalid_model(self): + messages = [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + ] + with self.assertRaises(KeyError): + count_message_tokens(messages, model="invalid_model") + + def test_count_message_tokens_gpt_4(self): + messages = [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + ] + self.assertEqual(count_message_tokens(messages, model="gpt-4-0314"), 15) + + def test_count_string_tokens(self): + string = "Hello, world!" + self.assertEqual( + count_string_tokens(string, model_name="gpt-3.5-turbo-0301"), 4 + ) + + def test_count_string_tokens_empty_input(self): + self.assertEqual(count_string_tokens("", model_name="gpt-3.5-turbo-0301"), 0) + + def test_count_message_tokens_invalid_model(self): + messages = [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + ] + with self.assertRaises(NotImplementedError): + count_message_tokens(messages, model="invalid_model") + + def test_count_string_tokens_gpt_4(self): + string = "Hello, world!" + self.assertEqual(count_string_tokens(string, model_name="gpt-4-0314"), 4) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/unit/json_tests.py b/tests/unit/json_tests.py new file mode 100644 index 0000000..561b8a3 --- /dev/null +++ b/tests/unit/json_tests.py @@ -0,0 +1,114 @@ +import unittest + +from autogpt.json_parser import fix_and_parse_json + + +class TestParseJson(unittest.TestCase): + def test_valid_json(self): + # Test that a valid JSON string is parsed correctly + json_str = '{"name": "John", "age": 30, "city": "New York"}' + obj = fix_and_parse_json(json_str) + self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"}) + + def test_invalid_json_minor(self): + # Test that an invalid JSON string can be fixed with gpt + json_str = '{"name": "John", "age": 30, "city": "New York",}' + self.assertEqual( + fix_and_parse_json(json_str, try_to_fix_with_gpt=False), + {"name": "John", "age": 30, "city": "New York"}, + ) + + def test_invalid_json_major_with_gpt(self): + # Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False + json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' + self.assertEqual( + fix_and_parse_json(json_str, try_to_fix_with_gpt=True), + {"name": "John", "age": 30, "city": "New York"}, + ) + + def test_invalid_json_major_without_gpt(self): + # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False + json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' + # Assert that this raises an exception: + with self.assertRaises(Exception): + fix_and_parse_json(json_str, try_to_fix_with_gpt=False) + + def test_invalid_json_leading_sentence_with_gpt(self): + # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False + json_str = """I suggest we start by browsing the repository to find any issues that we can fix. + +{ + "command": { + "name": "browse_website", + "args":{ + "url": "https://github.com/Torantulino/Auto-GPT" + } + }, + "thoughts": + { + "text": "I suggest we start browsing the repository to find any issues that we can fix.", + "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.", + "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes", + "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.", + "speak": "I will start browsing the repository to find any issues we can fix." + } +}""" + good_obj = { + "command": { + "name": "browse_website", + "args": {"url": "https://github.com/Torantulino/Auto-GPT"}, + }, + "thoughts": { + "text": "I suggest we start browsing the repository to find any issues that we can fix.", + "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.", + "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes", + "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.", + "speak": "I will start browsing the repository to find any issues we can fix.", + }, + } + # Assert that this raises an exception: + self.assertEqual( + fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj + ) + + def test_invalid_json_leading_sentence_with_gpt(self): + # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False + json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this. + +{ + "command": { + "name": "browse_website", + "args":{ + "url": "https://github.com/Torantulino/Auto-GPT" + } + }, + "thoughts": + { + "text": "Browsing the repository to identify potential bugs", + "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.", + "plan": "- Analyze the repository for potential bugs and areas of improvement", + "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.", + "speak": "I am browsing the repository to identify potential bugs." + } +}""" + good_obj = { + "command": { + "name": "browse_website", + "args": {"url": "https://github.com/Torantulino/Auto-GPT"}, + }, + "thoughts": { + "text": "Browsing the repository to identify potential bugs", + "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.", + "plan": "- Analyze the repository for potential bugs and areas of improvement", + "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.", + "speak": "I am browsing the repository to identify potential bugs.", + }, + } + # Assert that this raises an exception: + self.assertEqual( + fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/unit/test_browse_scrape_links.py b/tests/unit/test_browse_scrape_links.py new file mode 100644 index 0000000..0a3340e --- /dev/null +++ b/tests/unit/test_browse_scrape_links.py @@ -0,0 +1,118 @@ +# Generated by CodiumAI + +# Dependencies: +# pip install pytest-mock +import pytest + +from autogpt.commands.web_requests import scrape_links + +""" +Code Analysis + +Objective: +The objective of the 'scrape_links' function is to scrape hyperlinks from a +given URL and return them in a formatted way. + +Inputs: +- url: a string representing the URL to be scraped. + +Flow: +1. Send a GET request to the given URL using the requests library and the user agent header from the config file. +2. Check if the response contains an HTTP error. If it does, return "error". +3. Parse the HTML content of the response using the BeautifulSoup library. +4. Remove any script and style tags from the parsed HTML. +5. Extract all hyperlinks from the parsed HTML using the 'extract_hyperlinks' function. +6. Format the extracted hyperlinks using the 'format_hyperlinks' function. +7. Return the formatted hyperlinks. + +Outputs: +- A list of formatted hyperlinks. + +Additional aspects: +- The function uses the 'requests' and 'BeautifulSoup' libraries to send HTTP +requests and parse HTML content, respectively. +- The 'extract_hyperlinks' function is called to extract hyperlinks from the parsed HTML. +- The 'format_hyperlinks' function is called to format the extracted hyperlinks. +- The function checks for HTTP errors and returns "error" if any are found. +""" + + +class TestScrapeLinks: + # Tests that the function returns a list of formatted hyperlinks when + # provided with a valid url that returns a webpage with hyperlinks. + def test_valid_url_with_hyperlinks(self): + url = "https://www.google.com" + result = scrape_links(url) + assert len(result) > 0 + assert isinstance(result, list) + assert isinstance(result[0], str) + + # Tests that the function returns correctly formatted hyperlinks when given a valid url. + def test_valid_url(self, mocker): + # Mock the requests.get() function to return a response with sample HTML containing hyperlinks + mock_response = mocker.Mock() + mock_response.status_code = 200 + mock_response.text = ( + "Google" + ) + mocker.patch("requests.Session.get", return_value=mock_response) + + # Call the function with a valid URL + result = scrape_links("https://www.example.com") + + # Assert that the function returns correctly formatted hyperlinks + assert result == ["Google (https://www.google.com)"] + + # Tests that the function returns "error" when given an invalid url. + def test_invalid_url(self, mocker): + # Mock the requests.get() function to return an HTTP error response + mock_response = mocker.Mock() + mock_response.status_code = 404 + mocker.patch("requests.Session.get", return_value=mock_response) + + # Call the function with an invalid URL + result = scrape_links("https://www.invalidurl.com") + + # Assert that the function returns "error" + assert "Error:" in result + + # Tests that the function returns an empty list when the html contains no hyperlinks. + def test_no_hyperlinks(self, mocker): + # Mock the requests.get() function to return a response with sample HTML containing no hyperlinks + mock_response = mocker.Mock() + mock_response.status_code = 200 + mock_response.text = "

No hyperlinks here

" + mocker.patch("requests.Session.get", return_value=mock_response) + + # Call the function with a URL containing no hyperlinks + result = scrape_links("https://www.example.com") + + # Assert that the function returns an empty list + assert result == [] + + # Tests that scrape_links() correctly extracts and formats hyperlinks from + # a sample HTML containing a few hyperlinks. + def test_scrape_links_with_few_hyperlinks(self, mocker): + # Mock the requests.get() function to return a response with a sample HTML containing hyperlinks + mock_response = mocker.Mock() + mock_response.status_code = 200 + mock_response.text = """ + + + + + + + + """ + mocker.patch("requests.Session.get", return_value=mock_response) + + # Call the function being tested + result = scrape_links("https://www.example.com") + + # Assert that the function returns a list of formatted hyperlinks + assert isinstance(result, list) + assert len(result) == 3 + assert result[0] == "Google (https://www.google.com)" + assert result[1] == "GitHub (https://github.com)" + assert result[2] == "CodiumAI (https://www.codium.ai)" diff --git a/tests/unit/test_browse_scrape_text.py b/tests/unit/test_browse_scrape_text.py new file mode 100644 index 0000000..fea5ebf --- /dev/null +++ b/tests/unit/test_browse_scrape_text.py @@ -0,0 +1,98 @@ +# Generated by CodiumAI + +import requests + +from autogpt.commands.web_requests import scrape_text + +""" +Code Analysis + +Objective: +The objective of the "scrape_text" function is to scrape the text content from +a given URL and return it as a string, after removing any unwanted HTML tags and scripts. + +Inputs: +- url: a string representing the URL of the webpage to be scraped. + +Flow: +1. Send a GET request to the given URL using the requests library and the user agent header from the config file. +2. Check if the response contains an HTTP error. If it does, return an error message. +3. Use BeautifulSoup to parse the HTML content of the response and extract all script and style tags. +4. Get the text content of the remaining HTML using the get_text() method of BeautifulSoup. +5. Split the text into lines and then into chunks, removing any extra whitespace. +6. Join the chunks into a single string with newline characters between them. +7. Return the cleaned text. + +Outputs: +- A string representing the cleaned text content of the webpage. + +Additional aspects: +- The function uses the requests library and BeautifulSoup to handle the HTTP request and HTML parsing, respectively. +- The function removes script and style tags from the HTML to avoid including unwanted content in the text output. +- The function uses a generator expression to split the text into lines and chunks, which can improve performance for large amounts of text. +""" + + +class TestScrapeText: + # Tests that scrape_text() returns the expected text when given a valid URL. + def test_scrape_text_with_valid_url(self, mocker): + # Mock the requests.get() method to return a response with expected text + expected_text = "This is some sample text" + mock_response = mocker.Mock() + mock_response.status_code = 200 + mock_response.text = f"

{expected_text}

" + mocker.patch("requests.Session.get", return_value=mock_response) + + # Call the function with a valid URL and assert that it returns the expected text + url = "http://www.example.com" + assert scrape_text(url) == expected_text + + # Tests that the function returns an error message when an invalid or unreachable url is provided. + def test_invalid_url(self, mocker): + # Mock the requests.get() method to raise an exception + mocker.patch( + "requests.Session.get", side_effect=requests.exceptions.RequestException + ) + + # Call the function with an invalid URL and assert that it returns an error message + url = "http://www.invalidurl.com" + error_message = scrape_text(url) + assert "Error:" in error_message + + # Tests that the function returns an empty string when the html page contains no text to be scraped. + def test_no_text(self, mocker): + # Mock the requests.get() method to return a response with no text + mock_response = mocker.Mock() + mock_response.status_code = 200 + mock_response.text = "" + mocker.patch("requests.Session.get", return_value=mock_response) + + # Call the function with a valid URL and assert that it returns an empty string + url = "http://www.example.com" + assert scrape_text(url) == "" + + # Tests that the function returns an error message when the response status code is an http error (>=400). + def test_http_error(self, mocker): + # Mock the requests.get() method to return a response with a 404 status code + mocker.patch("requests.Session.get", return_value=mocker.Mock(status_code=404)) + + # Call the function with a URL + result = scrape_text("https://www.example.com") + + # Check that the function returns an error message + assert result == "Error: HTTP 404 error" + + # Tests that scrape_text() properly handles HTML tags. + def test_scrape_text_with_html_tags(self, mocker): + # Create a mock response object with HTML containing tags + html = "

This is bold text.

" + mock_response = mocker.Mock() + mock_response.status_code = 200 + mock_response.text = html + mocker.patch("requests.Session.get", return_value=mock_response) + + # Call the function with a URL + result = scrape_text("https://www.example.com") + + # Check that the function properly handles HTML tags + assert result == "This is bold text." diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py new file mode 100644 index 0000000..55a4449 --- /dev/null +++ b/tests/unit/test_chat.py @@ -0,0 +1,86 @@ +# Generated by CodiumAI +import unittest +import time +from unittest.mock import patch + +from autogpt.chat import create_chat_message, generate_context + + +class TestChat(unittest.TestCase): + # Tests that the function returns a dictionary with the correct keys and values when valid strings are provided for role and content. + def test_happy_path_role_content(self): + result = create_chat_message("system", "Hello, world!") + self.assertEqual(result, {"role": "system", "content": "Hello, world!"}) + + # Tests that the function returns a dictionary with the correct keys and values when empty strings are provided for role and content. + def test_empty_role_content(self): + result = create_chat_message("", "") + self.assertEqual(result, {"role": "", "content": ""}) + + # Tests the behavior of the generate_context function when all input parameters are empty. + @patch("time.strftime") + def test_generate_context_empty_inputs(self, mock_strftime): + # Mock the time.strftime function to return a fixed value + mock_strftime.return_value = "Sat Apr 15 00:00:00 2023" + # Arrange + prompt = "" + relevant_memory = "" + full_message_history = [] + model = "gpt-3.5-turbo-0301" + + # Act + result = generate_context(prompt, relevant_memory, full_message_history, model) + + # Assert + expected_result = ( + -1, + 47, + 3, + [ + {"role": "system", "content": ""}, + { + "role": "system", + "content": f"The current time and date is {time.strftime('%c')}", + }, + { + "role": "system", + "content": f"This reminds you of these events from your past:\n\n\n", + }, + ], + ) + self.assertEqual(result, expected_result) + + # Tests that the function successfully generates a current_context given valid inputs. + def test_generate_context_valid_inputs(self): + # Given + prompt = "What is your favorite color?" + relevant_memory = "You once painted your room blue." + full_message_history = [ + create_chat_message("user", "Hi there!"), + create_chat_message("assistant", "Hello! How can I assist you today?"), + create_chat_message("user", "Can you tell me a joke?"), + create_chat_message( + "assistant", + "Why did the tomato turn red? Because it saw the salad dressing!", + ), + create_chat_message("user", "Haha, that's funny."), + ] + model = "gpt-3.5-turbo-0301" + + # When + result = generate_context(prompt, relevant_memory, full_message_history, model) + + # Then + self.assertIsInstance(result[0], int) + self.assertIsInstance(result[1], int) + self.assertIsInstance(result[2], int) + self.assertIsInstance(result[3], list) + self.assertGreaterEqual(result[0], 0) + self.assertGreaterEqual(result[1], 0) + self.assertGreaterEqual(result[2], 0) + self.assertGreaterEqual( + len(result[3]), 3 + ) # current_context should have at least 3 messages + self.assertLessEqual( + result[1], 2048 + ) # token limit for GPT-3.5-turbo-0301 is 2048 tokens diff --git a/tests/unit/test_commands.py b/tests/unit/test_commands.py new file mode 100644 index 0000000..e15709a --- /dev/null +++ b/tests/unit/test_commands.py @@ -0,0 +1,18 @@ +import autogpt.agent.agent_manager as agent_manager +from autogpt.app import start_agent, list_agents, execute_command +import unittest +from unittest.mock import patch, MagicMock + + +class TestCommands(unittest.TestCase): + def test_make_agent(self): + with patch("openai.ChatCompletion.create") as mock: + obj = MagicMock() + obj.response.choices[0].messages[0].content = "Test message" + mock.return_value = obj + start_agent("Test Agent", "chat", "Hello, how are you?", "gpt2") + agents = list_agents() + self.assertEqual("List of agents:\n0: chat", agents) + start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt2") + agents = list_agents() + self.assertEqual("List of agents:\n0: chat\n1: write", agents)