Skip to content

Commit

Permalink
Refactor alignment command into separate module
Browse files Browse the repository at this point in the history
The alignment command was previously defined in the cli.py file. This commit moves the alignment command into its own module, alignment.py, improving the organization of the codebase. The alignment command is now imported in the cli.py and commands/__init__.py files. This change enhances the modularity of the code, making it easier to maintain and extend in the future.
  • Loading branch information
TechNickAI committed Aug 1, 2023
1 parent f9c5966 commit 42055d1
Show file tree
Hide file tree
Showing 3 changed files with 43 additions and 33 deletions.
36 changes: 4 additions & 32 deletions aicodebot/cli.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from aicodebot import version as aicodebot_version
from aicodebot.coder import CREATIVE_TEMPERATURE, DEFAULT_MAX_TOKENS, Coder
from aicodebot.commands import configure, debug, learn, sidekick, sidekick_agent
from aicodebot.coder import DEFAULT_MAX_TOKENS, Coder
from aicodebot.commands import alignment, configure, debug, learn, sidekick, sidekick_agent
from aicodebot.config import read_config
from aicodebot.helpers import exec_and_get_output, logger
from aicodebot.output import OurMarkdown as Markdown, RichLiveCallbackHandler, get_console
Expand Down Expand Up @@ -35,8 +35,9 @@ def cli(ctx, debug_output):
langchain.debug = debug_output


cli.add_command(debug)
cli.add_command(alignment)
cli.add_command(configure)
cli.add_command(debug)
cli.add_command(sidekick)
if os.getenv("AICODEBOT_ENABLE_EXPERIMENTAL_FEATURES"):
cli.add_command(learn)
Expand All @@ -51,35 +52,6 @@ def cli(ctx, debug_output):
# Keep the commands in alphabetical order.


@cli.command()
@click.option("-t", "--response-token-size", type=int, default=350)
@click.option("-v", "--verbose", count=True)
def alignment(response_token_size, verbose):
"""A message from AICodeBot about AI Alignment ❤ + 🤖."""

# Load the prompt
prompt = get_prompt("alignment")
logger.trace(f"Prompt: {prompt}")

# Set up the language model
model_name = Coder.get_llm_model_name(Coder.get_token_length(prompt.template) + response_token_size)

with Live(Markdown(""), auto_refresh=True) as live:
llm = Coder.get_llm(
model_name,
verbose,
response_token_size,
temperature=CREATIVE_TEMPERATURE,
streaming=True,
callbacks=[RichLiveCallbackHandler(live, console.bot_style)],
)

# Set up the chain
chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)

chain.run({})


@cli.command()
@click.option("-v", "--verbose", count=True)
@click.option("-t", "--response-token-size", type=int, default=250)
Expand Down
3 changes: 2 additions & 1 deletion aicodebot/commands/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from .alignment import alignment
from .configure import configure
from .debug import debug
from .learn import learn
from .sidekick import sidekick, sidekick_agent

__all__ = ["configure", "debug", "learn", "sidekick", "sidekick_agent"]
__all__ = ["alignment", "configure", "debug", "learn", "sidekick", "sidekick_agent"]
37 changes: 37 additions & 0 deletions aicodebot/commands/alignment.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
from aicodebot.coder import CREATIVE_TEMPERATURE, Coder
from aicodebot.helpers import logger
from aicodebot.output import OurMarkdown, RichLiveCallbackHandler, get_console
from aicodebot.prompts import get_prompt
from langchain.chains import LLMChain
from rich.live import Live
import click


@click.command()
@click.option("-t", "--response-token-size", type=int, default=350)
@click.option("-v", "--verbose", count=True)
def alignment(response_token_size, verbose):
"""A message from AICodeBot about AI Alignment ❤ + 🤖."""
console = get_console()

# Load the prompt
prompt = get_prompt("alignment")
logger.trace(f"Prompt: {prompt}")

# Set up the language model
model_name = Coder.get_llm_model_name(Coder.get_token_length(prompt.template) + response_token_size)

with Live(OurMarkdown(""), auto_refresh=True) as live:
llm = Coder.get_llm(
model_name,
verbose,
response_token_size,
temperature=CREATIVE_TEMPERATURE,
streaming=True,
callbacks=[RichLiveCallbackHandler(live, console.bot_style)],
)

# Set up the chain
chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)

chain.run({})

0 comments on commit 42055d1

Please sign in to comment.