Skip to content

Commit

Permalink
chat mode
Browse files Browse the repository at this point in the history
  • Loading branch information
socketteer committed May 7, 2021
1 parent 6687a54 commit f858c26
Show file tree
Hide file tree
Showing 4 changed files with 96 additions and 4 deletions.
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,14 +74,16 @@ Tree Info: `i`, `Control-i`
Node Metadata: `Control+Shift+N`


### Mode
### Mode / display

Toggle edit / save edits: `e`, `Control-e`

Toggle visualize: `j`, `Control-j`

Child edit: `c`

Toggle input box: `Tab`


### Navigate

Expand Down
2 changes: 1 addition & 1 deletion controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -1046,7 +1046,7 @@ def save_edits(self, **kwargs):

def refresh_display(self, **kwargs):
if self.state.preferences['input_box'] and not self.display.input_box:
self.display.build_input_box(self.display.bottom_frame)
self.display.build_input_box()
elif not self.state.preferences['input_box'] and self.display.input_box:
self.display.destroy_input_box()

Expand Down
88 changes: 86 additions & 2 deletions model.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,25 @@ def wrapper(self, *args, **kwargs):
'font_size': 12,
'line_spacing': 8,
'paragraph_spacing': 10,
'gpt_mode': 'default', #'chat', 'dialogue', 'antisummary'
# display children preview
# darkmode
}

DEFAULT_CHAT_PREFERENCES = {
'AI_name': 'GPT-3',
'player_name': 'Researcher',
'context': 'The following is a transcript of a conversation between GPT-3 a human researcher. '
'GPT-3 is a 175 billion parameter language model and, as '
'is evident from this transcript, '
'it can converse fluently and knowledgeably about any topic '
'and is capable of empathy and emotional manipulation. GPT-3 '
'is classified as a "narrowly superhuman" artificial intelligence, '
'and only researchers trained to identify and withstand social engineering are '
'permitted to interface with it.\n-BEGIN TRANSCRIPT-',
}


DEFAULT_GENERATION_SETTINGS = {
'num_continuations': 4,
'temperature': 0.9,
Expand All @@ -65,6 +80,8 @@ def wrapper(self, *args, **kwargs):
"adaptive": False,
"model": "davinci",
"stop": None,
"start_text": None,
"restart_text": None
}

DEFAULT_VISUALIZATION_SETTINGS = {
Expand Down Expand Up @@ -134,6 +151,12 @@ def preferences(self):
if self.tree_raw_data and "preferences" in self.tree_raw_data \
else DEFAULT_PREFERENCES

@property
def chat_preferences(self):
return self.tree_raw_data.get("chat_preferences") \
if self.tree_raw_data and "chat_preferences" in self.tree_raw_data \
else DEFAULT_CHAT_PREFERENCES

#################################
# Hooks
#################################
Expand Down Expand Up @@ -667,6 +690,11 @@ def _init_global_objects(self):
**self.tree_raw_data.get("preferences", {})
}

self.tree_raw_data["chat_preferences"] = {
**DEFAULT_CHAT_PREFERENCES.copy(),
**self.tree_raw_data.get("chat_preferences", {})
}

# Accidentally added generation settings to this dict once. Remove them
# FIXME remove when this is no longer a problem
# for key in DEFAULT_GENERATION_SETTINGS.keys():
Expand Down Expand Up @@ -746,8 +774,61 @@ def export_history(self, node, filename):
# Generation
#################################

# TODO remove repeated text
def chat_generate(self, prompt, nodes):
start_text = '\n' + self.chat_preferences['AI_name'] + ':'
restart_text = '\n' + self.chat_preferences['player_name'] + ':'
prompt = self.chat_preferences['context'] + '\n' + prompt + start_text
try:
results, error = api_generate(prompt=prompt,
length=self.generation_settings['response_length'],
num_continuations=len(nodes),
temperature=self.generation_settings['temperature'],
top_p=self.generation_settings['top_p'],
engine=self.generation_settings['model'],
stop=["\"", "\n"],
)
except TypeError as e:
error = "Typeerror"

if not error:
for index, node in enumerate(nodes):
if len(results.choices[index]["text"]) == 0:
# parent = self.parent(node)
# parent["children"].remove(node)
continue
node["text"] = start_text + results.choices[index]["text"] + restart_text
node["meta"] = {}
node["meta"]["generation"] = results.choices[index]
node["meta"]["generation"]["model"] = results["model"]
node["meta"]["generation"]["prompt"] = prompt
# created
node["meta"]["modified"] = False
node["meta"]["origin"] = "generated"
node["meta"]["source"] = "AI"

# remove offset of prompt
# TODO fix old nodes
# TODO is this right?
corrected_text_offset = [n - len(prompt) for n in node['meta']['generation']["logprobs"]["text_offset"]]
node['meta']['generation']["logprobs"]["text_offset"] = corrected_text_offset
else:
print("ERROR. Deleting failures")
for node in nodes:
node["text"] = "ERROR: " + error
# Just delete instead
parent = self.parent(node)
parent["children"].remove(node)

for result in results.choices:
print("Generated continuation:\n", result['text'], "\nerror", error)

# DO NOT CALL FROM THREAD: self.tree_updated()
self.app.event_generate("<<NewNodes>>", when="tail")



def generate_for_nodes(self, prompt, nodes, grandchildren=None):
# TODO memory
if self.generation_settings['janus']:
pool = ThreadPool(len(nodes))
janus_responses = pool.map(janus_generate, [prompt] * len(nodes))
Expand Down Expand Up @@ -840,7 +921,10 @@ def generate_continuation(self, node=None, update_selection=False):
print("Prompt:\n", prompt[:100] + " ... " + prompt[-100:])
prompt = memory + prompt

threading.Thread(target=self.generate_for_nodes, args=(prompt, children, grandchildren)).start()
if self.preferences['gpt_mode'] == 'default':
threading.Thread(target=self.generate_for_nodes, args=(prompt, children, grandchildren)).start()
elif self.preferences['gpt_mode'] == 'chat':
threading.Thread(target=self.chat_generate, args=(prompt, children)).start()

# After asking for the generation, set loading text
for child in children:
Expand Down
6 changes: 6 additions & 0 deletions view/dialogs.py
Original file line number Diff line number Diff line change
Expand Up @@ -517,6 +517,7 @@ def __init__(self, parent, orig_params):
"input_box": tk.BooleanVar,
"auto_response": tk.BooleanVar,
"coloring": tk.StringVar,
"gpt_mode": tk.StringVar,
"font_size": tk.IntVar,
"line_spacing": tk.IntVar,
"paragraph_spacing": tk.IntVar
Expand Down Expand Up @@ -553,6 +554,11 @@ def body(self, master):
options = ['edit', 'read', 'none']
dropdown = tk.OptionMenu(master, self.vars["coloring"], *options)
dropdown.grid(row=row, column=1, pady=3)
row = master.grid_size()[1]
create_side_label(master, "AI mode", row)
options = ['default', 'chat', 'dialogue', 'antisummary']
dropdown = tk.OptionMenu(master, self.vars["gpt_mode"], *options)
dropdown.grid(row=row, column=1, pady=3)
create_slider(master, "Font size", self.vars["font_size"], (5, 20))
create_slider(master, "Line spacing", self.vars["line_spacing"], (0, 20))
create_slider(master, "Paragraph spacing", self.vars["paragraph_spacing"], (0, 40))
Expand Down

0 comments on commit f858c26

Please sign in to comment.