Skip to content

Commit

Permalink
v0.7201 - perplexity handling improvements & fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
FlyingFathead committed May 28, 2024
1 parent 42fbc7d commit 0b25e90
Show file tree
Hide file tree
Showing 5 changed files with 87 additions and 31 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ yt-dlp>=2024.3.10
- Use the `configmerger.py` to update old configuration files into a newer version's `config.ini`. You can do this by creating a copy of your existing config to i.e. a file named `myconfig.txt` and including in it the lines you want to keep for the newer version. Then, just run `python configmerger.py config.ini myconfig.txt` and all your existing config lines will be migrated to the new one. Works in most cases, but remember to be careful and double-check any migration issues with i.e. `diff`!

# Changelog
- v0.7201 - added Perplexity API model configuration to `config.ini`
- v0.72 - improved error catching + messaging with Perplexity's API
- v0.71 - holiday mentions via Python's `holidays` module (can be extended)
- v0.708 - improved astronomy data combining via WeatherAPI
Expand Down
51 changes: 34 additions & 17 deletions api_perplexity_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,22 +11,41 @@
import os
import httpx
import asyncio
import configparser

from langdetect import detect
from telegram import constants

# Load the configuration file
config = configparser.ConfigParser()
config.read('config.ini')

# ~~~~~~~~~
# variables
# ~~~~~~~~~

# Global variable for chunk size
# Set this value as needed
CHUNK_SIZE = 500
# Define fallback default values
DEFAULT_PERPLEXITY_MODEL = "llama-3-sonar-large-32k-online"
DEFAULT_PERPLEXITY_MAX_TOKENS = 1024
DEFAULT_PERPLEXITY_TEMPERATURE = 0.0
DEFAULT_PERPLEXITY_MAX_RETRIES = 3
DEFAULT_PERPLEXITY_RETRY_DELAY = 25
DEFAULT_PERPLEXITY_TIMEOUT = 30
# chunk sizes are for translations
DEFAULT_CHUNK_SIZE = 500

# Perplexity API settings from config with fallback defaults
PERPLEXITY_MODEL = config.get('Perplexity', 'Model', fallback=DEFAULT_PERPLEXITY_MODEL)
PERPLEXITY_MAX_TOKENS = config.getint('Perplexity', 'MaxTokens', fallback=DEFAULT_PERPLEXITY_MAX_TOKENS)
PERPLEXITY_TEMPERATURE = config.getfloat('Perplexity', 'Temperature', fallback=DEFAULT_PERPLEXITY_TEMPERATURE)
PERPLEXITY_MAX_RETRIES = config.getint('Perplexity', 'MaxRetries', fallback=DEFAULT_PERPLEXITY_MAX_RETRIES)
PERPLEXITY_RETRY_DELAY = config.getint('Perplexity', 'RetryDelay', fallback=DEFAULT_PERPLEXITY_RETRY_DELAY)
PERPLEXITY_TIMEOUT = config.getint('Perplexity', 'Timeout', fallback=DEFAULT_PERPLEXITY_TIMEOUT)
CHUNK_SIZE = config.getint('Perplexity', 'ChunkSize', fallback=DEFAULT_CHUNK_SIZE)

# Assuming you've set PERPLEXITY_API_KEY in your environment variables
PERPLEXITY_API_KEY = os.getenv("PERPLEXITY_API_KEY")

# main perplexity function
# Main Perplexity function
async def fact_check_with_perplexity(question: str):
url = "https://api.perplexity.ai/chat/completions"
headers = {
Expand All @@ -35,10 +54,10 @@ async def fact_check_with_perplexity(question: str):
"Accept": "application/json",
}
data = {
"model": "sonar-small-online", # Specifying the model
"model": PERPLEXITY_MODEL, # Specifying the model
"stream": False,
"max_tokens": 1024,
"temperature": 0.0, # Adjust based on how deterministic you want the responses to be
"max_tokens": PERPLEXITY_MAX_TOKENS,
"temperature": PERPLEXITY_TEMPERATURE, # Adjust based on how deterministic you want the responses to be
"messages": [
{
"role": "user",
Expand All @@ -47,8 +66,8 @@ async def fact_check_with_perplexity(question: str):
]
}

async with httpx.AsyncClient(timeout=30) as client: # Increased timeout
for attempt in range(3): # Retry mechanism
async with httpx.AsyncClient(timeout=PERPLEXITY_TIMEOUT) as client: # Increased timeout
for attempt in range(PERPLEXITY_MAX_RETRIES): # Retry mechanism
try:
response = await client.post(url, json=data, headers=headers)
if response.status_code == 200:
Expand All @@ -69,7 +88,7 @@ async def fact_check_with_perplexity(question: str):

return None

# queries perplexity
# Queries Perplexity
async def query_perplexity(bot, chat_id, question: str):
logging.info(f"Querying Perplexity with question: {question}")
response_data = await fact_check_with_perplexity(question)
Expand Down Expand Up @@ -108,26 +127,24 @@ async def translate_response(bot, user_message, perplexity_response):
# Directly convert and return if language detection fails; assuming English or Markdown needs HTML conversion
formatted_response = format_headers_for_telegram(perplexity_response)
return markdown_to_html(formatted_response)

# Check if the detected language is English, skip translation if it is
if user_lang == 'en':
logging.info("User's question is in English, converting Markdown to HTML.")
formatted_response = format_headers_for_telegram(perplexity_response)
return markdown_to_html(formatted_response)
else:
# await context.bot.send_message(chat_id=update.effective_chat.id, text="<i>Translating, please wait...</i>", parse_mode=telegram.ParseMode.HTML)
logging.info(f"User's question is in {user_lang}, proceeding with translation.")

# System message to guide the model for translating
system_message = {
"role": "system",
"content": f"Translate the message to: {user_lang}."
}

# Prepare the chat history with only the Perplexity's response as the assistant's message to be translated
chat_history = [
system_message,
# {"role": "user", "content": user_message},
{"role": "user", "content": perplexity_response}
]

Expand Down Expand Up @@ -158,7 +175,7 @@ async def translate_response(bot, user_message, perplexity_response):
return translated_reply
except Exception as e:
logging.error(f"Error processing translation response: {e}")
return f"Translation failed due to an error: {e}"
return f"Translation failed due to an error: {e}"
else:
logging.error(f"Error in translating response: {response.text}")
return f"Failed to translate, API returned status code {response.status_code}: {response.text}"
Expand Down
21 changes: 21 additions & 0 deletions config.ini
Original file line number Diff line number Diff line change
Expand Up @@ -119,3 +119,24 @@ ElasticsearchEnabled = False
# ~~~~~~~~~~~~~~~~~~~~~
[HolidaySettings]
EnableHolidayNotification = true

# ~~~~~~~~~~~~~~~
# Perplexity API
# ~~~~~~~~~~~~~~~
[Perplexity]
# Model name to use with Perplexity API
Model = llama-3-sonar-large-32k-online

# Maximum tokens for Perplexity API response
MaxTokens = 1024

# Temperature for Perplexity API response
Temperature = 0.0

# Retry settings for Perplexity API
MaxRetries = 3
RetryDelay = 25
Timeout = 30

# Chunk size for translation
ChunkSize = 500
2 changes: 1 addition & 1 deletion main.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
# https://github.com/FlyingFathead/TelegramBot-OpenAI-API
#
# version of this program
version_number = "0.72"
version_number = "0.7201"

# experimental modules
import requests
Expand Down
43 changes: 30 additions & 13 deletions text_message_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,14 +474,12 @@ async def handle_message(bot, update: Update, context: CallbackContext, logger)
# ~~~~~~~~~~~~~~
# Perplexity API
# ~~~~~~~~~~~~~~

# Handling the Perplexity API call with automatic translation
# Handling the Perplexity API call with automatic translation
elif function_name == 'query_perplexity':
arguments = json.loads(function_call.get('arguments', '{}'))
question = arguments.get('question', '')

if question:

# Make the asynchronous API call to query Perplexity
perplexity_response = await query_perplexity(context.bot, chat_id, question)

Expand All @@ -494,18 +492,32 @@ async def handle_message(bot, update: Update, context: CallbackContext, logger)
chat_history.append({"role": "system", "content": "Perplexity API is currently unavailable due to server issues. Please inform the user about this issue."})
else:
if perplexity_response is not None:
# Append the bot's reply to the chat history before sending it
chat_history.append({"role": "system", "content": f"[Response from Perplexity API, translate to user's language as needed] {perplexity_response}"})
context.chat_data['chat_history'] = chat_history # Update the chat data with the new history
# Flag for translation in progress
context.user_data['active_translation'] = True

# Translate or process the response as necessary
bot_reply_formatted = await translate_response_chunked(bot, user_message, perplexity_response, context, update)

# After translation or processing is completed, clear the active translation flag
context.user_data.pop('active_translation', None)

await context.bot.send_message(
chat_id=update.effective_chat.id,
text=perplexity_response,
parse_mode=ParseMode.HTML
)
if bot_reply_formatted and not bot_reply_formatted.startswith("Error"): # Check for a valid, non-error response
# Append the bot's reply to the chat history before sending it
chat_history.append({"role": "assistant", "content": bot_reply_formatted})
context.chat_data['chat_history'] = chat_history # Update the chat data with the new history

response_sent = True # Indicate that a response has been sent
break # Exit the loop since response has been handled
await context.bot.send_message(
chat_id=update.effective_chat.id,
text=bot_reply_formatted,
parse_mode=ParseMode.HTML
)

response_sent = True # Indicate that a response has been sent
break # Exit the loop since response has been handled
else:
# Log the error and maybe send a different message or handle the error differently
logging.error("Error processing or translating the Perplexity response.")
chat_history.append({"role": "system", "content": "Fallback to base model due to processing error in Perplexity response."})

else:
logging.error("No valid response from Perplexity, Perplexity response was None or empty.")
Expand All @@ -517,6 +529,11 @@ async def handle_message(bot, update: Update, context: CallbackContext, logger)

context.chat_data['chat_history'] = chat_history


# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# old code; to be nuked and paved ...
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

# # Handling the Perplexity API call with automatic translation
# elif function_name == 'query_perplexity':
# arguments = json.loads(function_call.get('arguments', '{}'))
Expand Down

0 comments on commit 0b25e90

Please sign in to comment.