fix: Correct model selection logic in Teto AI response function

This commit is contained in:
Slipstream 2025-05-16 21:24:01 -06:00
parent 168ddf1ba4
commit 62014d00be
Signed by: slipstream
GPG Key ID: 13E498CE010AC6FD

View File

@ -107,7 +107,7 @@ async def _teto_reply_ai_with_messages(messages, system_mode="reply", model_over
# Determine the model to use
# Use the model_override if provided, otherwise use the default
model_to_use = model_override if model_to_use else DEFAULT_AI_MODEL
model_to_use = model_override if model_override is not None else DEFAULT_AI_MODEL
payload = {
"model": model_to_use,