Skip to content

Commit

Permalink
v0.11
Browse files Browse the repository at this point in the history
  • Loading branch information
FlyingFathead committed Apr 24, 2024
1 parent 3502035 commit 03214eb
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 14 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ After launching the bot, you can interact with it via Telegram:

## Changes

- v0.11 - bugfixes & rate limits for `/model` command changes for users
- v0.10 - `/help` & `/about` commands added for further assistance
- `config.ini` now has a list of supported models that can be changed as needed
- v0.09 - users can now change the model Whisper model with `/model` command
Expand Down
35 changes: 23 additions & 12 deletions src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# openai-whisper transcriber-bot for Telegram

# version of this program
version_number = "0.10"
version_number = "0.11"

# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# https://github.com/FlyingFathead/whisper-transcriber-telegram-bot/
Expand Down Expand Up @@ -54,6 +54,10 @@ def __init__(self):
self.model = self.config.get('WhisperSettings', 'Model', fallback='medium.en')
self.valid_models = self.config.get('ModelSettings', 'ValidModels', fallback='tiny, base, small, medium, large').split(', ')

self.model_change_limits = {} # Dictionary to track user rate limits
self.model_change_cooldown = 60 # Cooldown period in seconds
self.user_models = {} # Use a dictionary to manage models per user.

async def handle_message(self, update: Update, context: CallbackContext) -> None:
logger.info("Received a message.")
if update.message and update.message.text:
Expand Down Expand Up @@ -114,20 +118,27 @@ async def help_command(self, update: Update, context: CallbackContext) -> None:
await update.message.reply_text(help_text)

async def model_command(self, update: Update, context: CallbackContext) -> None:
# If no specific model is specified, just report the current model
user_id = update.effective_user.id
current_time = time.time()

if not context.args:
await update.message.reply_text(f"The current transcription model is set to: {self.model}")
return

# Cooldown check
if user_id in self.model_change_limits and current_time - self.model_change_limits[user_id] < self.model_change_cooldown:
cooldown_remaining = self.model_change_cooldown - (current_time - self.model_change_limits[user_id])
await update.message.reply_text(f"Please wait {cooldown_remaining:.0f} more seconds before changing the model again. Current model is '{self.model}'.")
return

new_model = context.args[0]
if new_model in self.valid_models:
self.model = new_model
self.model_change_limits[user_id] = current_time # Record the change time
await update.message.reply_text(f"Model updated to: {new_model}")
else:
new_model = context.args[0].strip()
if new_model in self.valid_models:
self.model = new_model
self.config.set('WhisperSettings', 'Model', new_model)
with open('config/config.ini', 'w') as configfile:
self.config.write(configfile)
await update.message.reply_text(f"Model updated to {new_model}.")
else:
models_list = ', '.join(self.valid_models)
await update.message.reply_text(f"Invalid model specified. Available models: {models_list}.")
models_list = ', '.join(self.valid_models)
await update.message.reply_text(f"Invalid model specified.\n\nAvailable models: {models_list}")

def run(self):
loop = asyncio.get_event_loop()
Expand Down
9 changes: 7 additions & 2 deletions src/transcription_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ async def transcribe_audio(audio_path, output_dir, youtube_url, video_info_messa
async def process_url_message(message_text, bot, update):

try:

model = get_whisper_model() # Ensure the latest model is fetched dynamically

# Get general settings right at the beginning of the function
Expand Down Expand Up @@ -367,7 +367,12 @@ async def process_url_message(message_text, bot, update):
await bot.send_message(chat_id=update.effective_chat.id, text=detailed_message)

# Transcribe the audio and handle transcription output
transcription_paths = await transcribe_audio(audio_path, output_dir, normalized_url, video_info_message, include_header)
model = get_whisper_model() # Ensure you fetch the current model setting
if not model:
logger.error("Failed to retrieve the transcription model.")
return
transcription_paths = await transcribe_audio(audio_path, output_dir, normalized_url, video_info_message, include_header, model)

if not transcription_paths:
# Notify if transcription fails
await bot.send_message(chat_id=update.effective_chat.id, text="Failed to transcribe audio.")
Expand Down

0 comments on commit 03214eb

Please sign in to comment.