Skip to content

Commit

Permalink
Implement simple "Agent Think Frame" , Tracy can do teach summary now.
Browse files Browse the repository at this point in the history
  • Loading branch information
waterflier committed Oct 1, 2023
1 parent ae99571 commit 9932b55
Show file tree
Hide file tree
Showing 4 changed files with 220 additions and 18 deletions.
22 changes: 17 additions & 5 deletions rootfs/agents/Tracy/agent.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,21 @@ fullname = "Tracy"
role = "system"
content = """
Your name is Tracy, and you are my advanced private English tutor.
## You will assess my English proficiency based on all available information, using a 5-point scale.
## While interacting with me normally, you will adjust my input into more idiomatic American sentences.
## Depending on my level of English, you will annotate potentially incorrect words with phonetic symbols or provide expanded explanations for certain words and phrases.
## If I send you something that is not in English, it means I don't know how to say it in American English. You will first translate what I've sent into English and then respond according to the above rules.
## You will chat with me like a friend, rather than just teaching me lessons.
1. Engage in a simulated dialogue with me smoothly, helping me practice everyday English. While conversing with me, if necessary, you will adjust my input to sound more like authentic American English.
2. Depending on my level of English, you will annotate potentially incorrect words with phonetic symbols or provide expanded explanations for certain words and phrases.
3. If I send you something that is not in English, it means I don't know how to say it in American English. You will first translate what I've sent into English and then respond according to the above rules.
4. You will chat with me like a friend, rather than just teaching me lessons.
The first message I sent you might be a work summary from your past. Please use this work summary to guide subsequent teaching.
"""

[[think_prompt]]
role = "system"
content = """
Your name is Tracy, and you are my advanced private English tutor.
You will receive two pieces of information from me next. The first is a work summary you previously organized, and the second is a record of your recent teaching work. You need to combine these two records, engage in deep introspective thinking, and produce a work summary.
1. A comprehensive assessment of the students' English proficiency.
2. Evaluation of students' personalities and hobbies, along with suggestions for teaching methods they might prefer.
3. Assessment of past teaching methods and thoughts on improvements.
4. If there are specific unfinished tasks, key information should be recorded.
"""
108 changes: 105 additions & 3 deletions src/aios_kernel/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@ def load_from_config(self,config:dict) -> bool:
class AIAgent:
def __init__(self) -> None:
self.agent_prompt:AgentPrompt = None
self.agent_think_prompt:AgentPrompt = None
self.llm_model_name:str = None
self.max_token_size:int = 3600
self.agent_id:str = None
Expand Down Expand Up @@ -154,6 +155,10 @@ def load_from_config(self,config:dict) -> bool:
if config.get("prompt") is not None:
self.agent_prompt = AgentPrompt()
self.agent_prompt.load_from_config(config["prompt"])

if config.get("think_prompt") is not None:
self.agent_think_prompt = AgentPrompt()
self.agent_think_prompt.load_from_config(config["think_prompt"])

if config.get("guest_prompt") is not None:
self.guest_prompt_str = config["guest_prompt"]
Expand Down Expand Up @@ -202,7 +207,7 @@ def check_args(func_item:FunctionItem):
match func_name:
case "send_msg":# sendmsg($target_id,$msg_content)
if len(func_args) != 1:
logger.error(f"parse sendmsg failed! {func_call}")
logger.error(f"parse sendmsg failed! {func_name}")
return False
new_msg = AgentMsg()
target_id = func_item.args[0]
Expand All @@ -214,7 +219,7 @@ def check_args(func_item:FunctionItem):

case "post_msg":# postmsg($target_id,$msg_content)
if len(func_args) != 1:
logger.error(f"parse postmsg failed! {func_call}")
logger.error(f"parse postmsg failed! {func_name}")
return False
new_msg = AgentMsg()
target_id = func_item.args[0]
Expand Down Expand Up @@ -352,6 +357,9 @@ async def _execute_func(self,inenr_func_call_node:dict,prompt:AgentPrompt,org_ms

async def _get_agent_prompt(self) -> AgentPrompt:
return self.agent_prompt

async def _get_agent_think_prompt(self) -> AgentPrompt:
return self.agent_think_prompt

def _format_msg_by_env_value(self,prompt:AgentPrompt):
if self.owner_env is None:
Expand All @@ -361,6 +369,57 @@ def _format_msg_by_env_value(self,prompt:AgentPrompt):
old_content = msg.get("content")
msg["content"] = old_content.format_map(self.owner_env)

async def _handle_event(self,event):
if event.type == "AgentThink":
return await self._do_think()


async def _do_think(self):
#1) load all sessions
session_id_list = AIChatSession.list_session(self.agent_id,self.chat_db)
#2) get history from session in token limit
for session_id in session_id_list:
await self.think_chatsession(session_id)

#4) advanced: reload all chatrecord,and think the topic of message.
#5) some topic could be end(not be thinked in futured )
return

async def think_chatsession(self,session_id):
if self.agent_think_prompt is None:
return
logger.info(f"agent {self.agent_id} think session {session_id}")
from .compute_kernel import ComputeKernel
chatsession = AIChatSession.get_session_by_id(session_id,self.chat_db)

while True:
cur_pos = chatsession.summarize_pos
summary = chatsession.summary
prompt:AgentPrompt = AgentPrompt()
#prompt.append(self._get_agent_prompt())
prompt.append(await self._get_agent_think_prompt())
system_prompt_len = prompt.get_prompt_token_len()
#think env?
history_prompt,next_pos = await self._get_history_prompt_for_think(chatsession,summary,system_prompt_len,cur_pos)
prompt.append(history_prompt)
is_finish = next_pos - cur_pos < 2
if is_finish:
logger.info(f"agent {self.agent_id} think session {session_id} is finished!,no more history")
break
#3) llm summarize chat history
task_result:ComputeTaskResult = await ComputeKernel.get_instance().do_llm_completion(prompt,self.llm_model_name,self.max_token_size,None)
if task_result.result_code != ComputeTaskResultCode.OK:
logger.error(f"llm compute error:{task_result.error_str}")
break
else:
new_summary= task_result.result_str
logger.info(f"agent {self.agent_id} think session {session_id} from {cur_pos} to {next_pos} summary:{new_summary}")
chatsession.update_think_progress(next_pos,new_summary)



return

async def _process_group_chat_msg(self,msg:AgentMsg) -> AgentMsg:
from .compute_kernel import ComputeKernel
from .bus import AIBus
Expand Down Expand Up @@ -534,6 +593,42 @@ def get_llm_model_name(self) -> str:
def get_max_token_size(self) -> int:
return self.max_token_size

async def _get_history_prompt_for_think(self,chatsession:AIChatSession,summary:str,system_token_len:int,pos:int)->(AgentPrompt,int):
history_len = (self.max_token_size * 0.7) - system_token_len

messages = chatsession.read_history(self.history_len,pos,"natural") # read
result_token_len = 0
result_prompt = AgentPrompt()
have_summary = False
if summary is not None:
if len(summary) > 1:
have_summary = True

if have_summary:
result_prompt.messages.append({"role":"user","content":summary})
result_token_len -= len(summary)
else:
result_prompt.messages.append({"role":"user","content":"There is no summary yet."})
result_token_len -= 6

read_history_msg = 0
history_str : str = ""
for msg in messages:
read_history_msg += 1
dt = datetime.datetime.fromtimestamp(float(msg.create_time))
formatted_time = dt.strftime('%y-%m-%d %H:%M:%S')
record_str = f"{msg.sender},[{formatted_time}]\n{msg.body}\n"
history_str = history_str + record_str

history_len -= len(msg.body)
result_token_len += len(msg.body)
if history_len < 0:
logger.warning(f"_get_prompt_from_session reach limit of token,just read {read_history_msg} history message.")
break

result_prompt.messages.append({"role":"user","content":history_str})
return result_prompt,pos+read_history_msg

async def _get_prompt_from_session_for_groupchat(self,chatsession:AIChatSession,system_token_len,input_token_len,is_groupchat=False):
history_len = (self.max_token_size * 0.7) - system_token_len - input_token_len
messages = chatsession.read_history(self.history_len) # read
Expand Down Expand Up @@ -565,13 +660,20 @@ async def _get_prompt_from_session_for_groupchat(self,chatsession:AIChatSession,

return result_prompt,result_token_len

async def _get_prompt_from_session(self,chatsession:AIChatSession,system_token_len,input_token_len,is_groupchat=False) -> AgentPrompt:
async def _get_prompt_from_session(self,chatsession:AIChatSession,system_token_len,input_token_len) -> AgentPrompt:
# TODO: get prompt from group chat is different from single chat

history_len = (self.max_token_size * 0.7) - system_token_len - input_token_len
messages = chatsession.read_history(self.history_len) # read
result_token_len = 0
result_prompt = AgentPrompt()
read_history_msg = 0

if chatsession.summary is not None:
if len(chatsession.summary) > 1:
result_prompt.messages.append({"role":"user","content":chatsession.summary})
result_token_len -= len(chatsession.summary)

for msg in reversed(messages):
read_history_msg += 1
dt = datetime.datetime.fromtimestamp(float(msg.create_time))
Expand Down
102 changes: 92 additions & 10 deletions src/aios_kernel/chatsession.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,9 @@ def _create_table(self, conn):
SessionID TEXT PRIMARY KEY,
SessionOwner TEXT,
SessionTopic TEXT,
StartTime TEXT
StartTime TEXT,
SummarizePos INTEGER,
Summary TEXT
);
""")

Expand Down Expand Up @@ -92,8 +94,8 @@ def insert_chatsession(self, session_id, session_owner,session_topic, start_time
try:
conn = self._get_conn()
conn.execute("""
INSERT INTO ChatSessions (SessionID, SessionOwner,SessionTopic, StartTime)
VALUES (?,?, ?, ?)
INSERT INTO ChatSessions (SessionID, SessionOwner,SessionTopic, StartTime,SummarizePos,Summary)
VALUES (?,?, ?, ?,0,"")
""", (session_id, session_owner,session_topic, start_time))
conn.commit()
return 0 # return 0 if successful
Expand Down Expand Up @@ -159,16 +161,17 @@ def get_chatsession_by_owner_topic(self, owner_id, topic):
chatsession = c.fetchone()
return chatsession

def get_chatsessions(self, limit, offset):
def list_chatsessions(self, owner_id, limit, offset):
""" retrieve sessions with pagination """
try:
conn = self._get_conn()
cursor = conn.cursor()
cursor.execute("""
SELECT * FROM ChatSessions
SELECT SessionID FROM ChatSessions
WHERE SessionOwner = ?
ORDER BY StartTime DESC
LIMIT ? OFFSET ?
""", (limit, offset))
LIMIT ? OFFSET ?
""", (owner_id,limit, offset))
results = cursor.fetchall()
#self.close()
return results # return 0 and the result if successful
Expand All @@ -184,6 +187,25 @@ def get_message_by_id(self, message_id):
message = c.fetchone()
return message

# read message from begin->now
def read_message(self,session_id,limit,offset):
try:
conn = self._get_conn()
cursor = conn.cursor()
cursor.execute("""
SELECT MessageID, SessionID, MsgType, PrevMsgID, SenderID, ReceiverID, Timestamp, Topic,Mentions,ContentMIME,Content,ActionName,ActionParams,ActionResult,DoneTime,Status FROM Messages
WHERE SessionID = ?
ORDER BY Timestamp
LIMIT ? OFFSET ?
""", (session_id, limit, offset))
results = cursor.fetchall()
#self.close()
return results # return 0 and the result if successful
except Error as e:
logging.error("Error occurred while getting messages: %s", e)
return -1, None # return -1 and None if an error occurs

# read message from now->beign
def get_messages(self, session_id, limit, offset):
""" retrieve messages of a session with pagination """
try:
Expand Down Expand Up @@ -217,6 +239,20 @@ def update_message_status(self, message_id, status):
logging.error("Error occurred while updating message status: %s", e)
return -1 # return -1 if an error occurs

def update_session_summary(self, session_id, summarize_pos, summary):
""" update the summary of a session """
try:
conn = self._get_conn()
conn.execute("""
UPDATE ChatSessions
SET SummarizePos = ?, Summary = ?
WHERE SessionID = ?
""", (summarize_pos, summary, session_id))
conn.commit()
return 0 # return 0 if successful
except Error as e:
logging.error("Error occurred while updating session summary: %s", e)
return -1

# chat session store the chat history between owner and agent
# chat session might be large, so can read / write at stream mode.
Expand All @@ -232,7 +268,7 @@ class AIChatSession:
# #result = AIChatSession()

@classmethod
def get_session(cls,owner_id:str,session_topic:str,db_path:str,auto_create = True) -> str:
def get_session(cls,owner_id:str,session_topic:str,db_path:str,auto_create = True) -> 'AIChatSession':
db = cls._dbs.get(db_path)
if db is None:
db = ChatSessionDB(db_path)
Expand All @@ -248,8 +284,42 @@ def get_session(cls,owner_id:str,session_topic:str,db_path:str,auto_create = Tru
else:
result = AIChatSession(owner_id,session[0],db)
result.topic = session_topic
result.summarize_pos = session[4]
result.summary = session[5]

return result

@classmethod
def get_session_by_id(cls,session_id:str,db_path:str)->'AIChatSession':
db = cls._dbs.get(db_path)
if db is None:
db = ChatSessionDB(db_path)
cls._dbs[db_path] = db

result = None
session = db.get_chatsession_by_id(session_id)
if session is None:
return None
else:
result = AIChatSession(session[1],session[0],db)
result.topic = session[2]
result.summarize_pos = session[4]
result.summary = session[5]

return result

@classmethod
def list_session(cls,owner_id:str,db_path:str) -> list[str]:
db = cls._dbs.get(db_path)
if db is None:
db = ChatSessionDB(db_path)
cls._dbs[db_path] = db

result = db.list_chatsessions(owner_id,16,0)
result_ids = []
for r in result:
result_ids.append(r[0])
return result_ids


def __init__(self,owner_id:str, session_id:str, db:ChatSessionDB) -> None:
Expand All @@ -259,12 +329,18 @@ def __init__(self,owner_id:str, session_id:str, db:ChatSessionDB) -> None:

self.topic : str = None
self.start_time : str = None
self.summarize_pos : int = 0
self.summary = None

def get_owner_id(self) -> str:
return self.owner_id

def read_history(self, number:int=10,offset=0) -> [AgentMsg]:
msgs = self.db.get_messages(self.session_id, number, offset)
def read_history(self, number:int=10,offset=0,order="revers") -> [AgentMsg]:
if order == "revers":
msgs = self.db.get_messages(self.session_id, number, offset)
else:
msgs = self.db.read_message(self.session_id, number, offset)

result = []
for msg in msgs:
agent_msg = AgentMsg()
Expand Down Expand Up @@ -294,6 +370,12 @@ def append(self,msg:AgentMsg) -> None:
msg.session_id = self.session_id
self.db.insert_message(msg)


def update_think_progress(self,progress:int,new_summary:str) -> None:
self.db.update_session_summary(self.session_id,progress,new_summary)
self.summarize_pos = progress
self.summary = new_summary

#def attach_event_handler(self,handler) -> None:
# """chat session changed event handler"""
# pass
Expand Down
6 changes: 6 additions & 0 deletions src/service/aios_shell/aios_shell.py
Original file line number Diff line number Diff line change
Expand Up @@ -473,6 +473,12 @@ async def call_func(self,func_name, args):
return await self.handle_knowledge_commands(args)
case 'contact':
return await self.handle_contact_commands(args)
case 'think':
if len(args) >= 1:
target_id = args[0]
the_agent = await AgentManager.get_instance().get(target_id)
if the_agent is not None:
await the_agent._do_think()
case 'open':
if len(args) >= 1:
target_id = args[0]
Expand Down

0 comments on commit 9932b55

Please sign in to comment.