Skip to content

Commit

Permalink
Support llm model name link.
Browse files Browse the repository at this point in the history
1. default
2. plan_llm
3. outline_llm
4. swift_llm
  • Loading branch information
waterflier committed Apr 23, 2024
1 parent 51998d8 commit 7b128c0
Show file tree
Hide file tree
Showing 7 changed files with 31 additions and 56 deletions.
2 changes: 1 addition & 1 deletion src/aios/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

from .agent.agent_base import *
from .agent.chatsession import AIChatSession
from .agent.agent import AIAgent,AIAgentTemplete, BaseAIAgent
from .agent.agent import AIAgent, BaseAIAgent
from .agent.role import AIRole,AIRoleGroup
from .agent.workflow import Workflow
from .agent.agent_memory import AgentMemory
Expand Down
39 changes: 0 additions & 39 deletions src/aios/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,29 +31,6 @@

logger = logging.getLogger(__name__)

class AIAgentTemplete:
def __init__(self) -> None:
self.llm_model_name:str = "gpt-4-turbo-preview"
self.max_token_size:int = 0
self.template_id:str = None
self.introduce:str = None
self.author:str = None
self.prompt:LLMPrompt = None


def load_from_config(self,config:dict) -> bool:
if config.get("llm_model_name") is not None:
self.llm_model_name = config["llm_model_name"]
if config.get("max_token_size") is not None:
self.max_token_size = config["max_token_size"]
if config.get("template_id") is not None:
self.template_id = config["template_id"]
if config.get("prompt") is not None:
self.prompt = LLMPrompt()
if self.prompt.load_from_config(config["prompt"]) is False:
logger.error("load prompt from config failed!")
return False


class AIAgent(BaseAIAgent):
def __init__(self) -> None:
Expand Down Expand Up @@ -81,19 +58,6 @@ def __init__(self) -> None:
self.history_len = 10
self.read_report_prompt = None

todo_prompts = {}
todo_prompts[TodoListType.TO_WORK] = {
"do": None,
"check": None,
"review": None,
}
todo_prompts[TodoListType.TO_LEARN] = {
"do": None,
"check": None,
"review": None,
}
self.todo_prompts = todo_prompts

self.base_dir = None
#self.memory_db = None
self.unread_msg = Queue() # msg from other agent
Expand Down Expand Up @@ -178,9 +142,6 @@ def get_template_id(self) -> str:
return self.template_id

def get_llm_model_name(self) -> str:
if self.llm_model_name is None:
return AIStorage.get_instance().get_user_config().get_value("llm_model_name")

return self.llm_model_name

def get_max_token_size(self) -> int:
Expand Down
6 changes: 4 additions & 2 deletions src/aios/frame/compute_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

from ..proto.compute_task import *
from ..knowledge import ObjectID
from ..storage.storage import AIStorage

from .compute_node import ComputeNode

Expand Down Expand Up @@ -122,12 +123,13 @@ def llm_num_tokens_from_text(text:str,model:str = None) -> int:
def llm_num_tokens(prompt: LLMPrompt, model_name: str = None) -> int:
return ComputeKernel.llm_num_tokens_from_text(prompt.as_str(), model_name)


# friendly interface for use:
def llm_completion(self, prompt: LLMPrompt, resp_mode:str="text",mode_name: Optional[str] = None, max_token: int = 0,inner_functions = None):
def llm_completion(self, prompt: LLMPrompt, resp_mode:str="text",model_name: Optional[str] = None, max_token: int = 0,inner_functions = None):
# craete a llm_work_task ,push on queue's end
# then task_schedule would run this task.(might schedule some work_task to another host)
task_req = ComputeTask()
task_req.set_llm_params(prompt,resp_mode,mode_name, max_token,inner_functions)
task_req.set_llm_params(prompt,resp_mode,model_name, max_token,inner_functions)
self.run(task_req)
return task_req

Expand Down
6 changes: 3 additions & 3 deletions src/aios/proto/compute_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,9 +287,9 @@ def set_llm_params(self, prompts, resp_mode,model_name, max_token_size, inner_fu
self.callchain_id = callchain_id
self.params["prompts"] = prompts.to_message_list()
self.params["resp_mode"] = resp_mode
if model_name is None:
model_name = AIStorage.get_instance().get_user_config().get_value("llm_model_name")
self.params["model_name"] = model_name

self.params["model_name"] = AIStorage.get_instance().get_user_config().llm_get_real_model_name(model_name)

if max_token_size is None:
self.params["max_token_size"] = 4000
else:
Expand Down
22 changes: 21 additions & 1 deletion src/aios/storage/storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,10 @@ def __init__(self) -> None:
self.config_table = {}
self.user_config_path:str = None

self._init_default_value("llm_model_name","gpt-4-turbo-preview")
self._init_default_value("llm_default_model","gpt-4-turbo")
self._init_default_value("llm_plan_model","gpt-4-turbo")
self._init_default_value("llm_outline_model","gpt-3.5-turbo")
self._init_default_value("llm_swift_model","gpt-3.5-turbo")

def _init_default_value(self,key:str,value:Any) -> None:
if self.config_table.get(key) is not None:
Expand All @@ -52,6 +55,23 @@ def _init_default_value(self,key:str,value:Any) -> None:
self.config_table[key] = new_config_item


def llm_get_real_model_name(self,mode_name:str) -> str:
default_model_name = self.get_value("llm_default_model")
plan_llm_model_name = self.get_value("llm_plan_model")
outline_model_name = self.get_value("llm_outline_model")
swift_model_name = self.get_value("llm_swift_model")
if mode_name is None:
return default_model_name
if mode_name == "default":
return default_model_name
if mode_name == "plan_llm":
return plan_llm_model_name
if mode_name == "outline_llm":
return outline_model_name
if mode_name == "swift_llm":
return swift_model_name

return mode_name
def add_user_config(self,key:str,desc:str,is_optional:bool,default_value:Any=None,item_type="str") -> None:
if self.config_table.get(key) is not None:
logger.warning("user config key %s already exist, will be overrided",key)
Expand Down
11 changes: 2 additions & 9 deletions src/component/agent_manager/agent_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import runpy
from typing import Any, Callable, Dict, List, Optional, Union

from aios import AIAgent,AIAgentTemplete,AIStorage,BaseAIAgent,PackageEnv,PackageEnvManager,PackageMediaInfo,PackageInstallTask,WorkspaceEnvironment
from aios import AIAgent,AIStorage,BaseAIAgent,PackageEnv,PackageEnvManager,PackageMediaInfo,PackageInstallTask,WorkspaceEnvironment

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -82,21 +82,14 @@ async def get(self,agent_id:str) -> AIAgent:
def remove(self,agent_id:str)->int:
pass

async def get_templete(self,templete_id) -> AIAgentTemplete:
template_media_info = self.agent_templete_env.get(templete_id)
if template_media_info is None:
return None
return self._load_templete_from_media(template_media_info)

def install(self,templete_id) -> PackageInstallTask:
installer = self.agent_templete_env.get_installer()
return installer.install(templete_id)

def uninstall(self,templete_id) -> int:
pass

async def _load_templete_from_media(self,templete_media:PackageMediaInfo) -> AIAgentTemplete:
pass


async def _load_agent_from_media(self,agent_media:PackageMediaInfo) -> BaseAIAgent:
reader = self.agent_env._create_media_loader(agent_media)
Expand Down
1 change: 0 additions & 1 deletion src/node_daemon/__init__.py

This file was deleted.

0 comments on commit 7b128c0

Please sign in to comment.