Skip to content

Commit

Permalink
Only check Ollama if its selected
Browse files Browse the repository at this point in the history
  • Loading branch information
luandro committed Mar 25, 2024
1 parent bf87893 commit fa54e4a
Showing 1 changed file with 3 additions and 1 deletion.
4 changes: 3 additions & 1 deletion app/rag/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def setup_llm(local_mode):
Settings.llm = TogetherLLM(model=togetherai_model, api_key=togetherai_api_key)
elif used_llm == groq_model:
Settings.llm = Groq(model=groq_model, api_key=groq_api_key)
else:
elif used_llm == ollama_model:
# Check if the OLLAMA_MODEL is available
try:
logging.info(f"Checking if {ollama_model} is available...")
Expand All @@ -62,4 +62,6 @@ def setup_llm(local_mode):
Settings.llm = Ollama(
model=ollama_model, request_timeout=ollama_timeout, base_url=ollama_base_url
)
else:
raise ValueError(f"No LLM configured for model: {used_llm}")
return Settings.llm

0 comments on commit fa54e4a

Please sign in to comment.