feat: Add AI model selection commands and improve conversation history management

This commit is contained in:
Slipstream 2025-05-14 20:58:38 -06:00
parent 7388d411ee
commit a5e6c5adb2
Signed by: slipstream
GPG Key ID: 13E498CE010AC6FD

View File

@ -9,6 +9,9 @@ import aiohttp
# File to store conversation history
CONVERSATION_HISTORY_FILE = 'data/roleplay_conversations.json'
# Default AI model
DEFAULT_AI_MODEL = "google/gemini-2.5-flash-preview:thinking"
def strip_think_blocks(text):
# Removes all <think>...</think> blocks, including multiline
return re.sub(r"<think>.*?</think>", "", text, flags=re.DOTALL)
@ -57,8 +60,15 @@ async def _teto_reply_ai_with_messages(messages, system_mode="reply"):
"Maintain the persona throughout the conversation. "
"You have human-like body parts (vagina, breasts, etc.), even if you arent necessarily a human (you're a chimera, after all!)."
)
# Determine the model to use
# This function doesn't have access to user-specific conversation history directly.
# The model should be passed as an argument or retrieved based on user_id if needed.
# For now, we'll keep the default here and handle user-specific model in the command handler.
model_to_use = DEFAULT_AI_MODEL
payload = {
"model": "google/gemini-2.5-flash-preview:thinking", # Using the model from the original cog
"model": model_to_use,
"messages": [{"role": "system", "content": system_prompt}] + messages
}
async with aiohttp.ClientSession() as session:
@ -89,28 +99,54 @@ class RoleplayTetoCog(commands.Cog):
await interaction.response.defer() # Defer the response as AI might take time
try:
# Get AI reply using the user's conversation history
ai_reply = await _teto_reply_ai_with_messages(self.conversations[user_id])
# Determine the model to use for this user
user_model = self.conversations[user_id].get('model', DEFAULT_AI_MODEL)
# Get AI reply using the user's conversation history and selected model
# Pass the conversation history excluding the 'model' key
conversation_messages = [msg for msg in self.conversations[user_id] if isinstance(msg, dict) and 'role' in msg and 'content' in msg]
ai_reply = await _teto_reply_ai_with_messages(conversation_messages)
ai_reply = strip_think_blocks(ai_reply)
# Append AI's reply to the history
self.conversations[user_id].append({"role": "assistant", "content": ai_reply})
# Keep only the last 20 messages to avoid excessive history length
self.conversations[user_id] = self.conversations[user_id][-20:]
# Save the updated history
save_conversation_history(self.conversations)
await interaction.followup.send(ai_reply)
# Split and send the response if it's too long
if len(ai_reply) > 2000:
chunks = [ai_reply[i:i+2000] for i in range(0, len(ai_reply), 2000)]
for chunk in chunks:
await interaction.followup.send(chunk)
else:
await interaction.followup.send(ai_reply)
except Exception as e:
await interaction.followup.send(f"Roleplay AI conversation failed: {e} desu~")
# Remove the last user message if AI failed to respond
if self.conversations[user_id]:
self.conversations[user_id].pop()
if self.conversations[user_id] and isinstance(self.conversations[user_id][-1], dict) and self.conversations[user_id][-1].get('role') == 'user':
self.conversations[user_id].pop()
save_conversation_history(self.conversations) # Save history after removing failed message
@app_commands.command(name="set_ai_model", description="Sets the AI model for your roleplay conversations.")
@app_commands.describe(model_name="The name of the AI model to use (e.g., google/gemini-2.5-flash-preview:thinking).")
async def set_ai_model(self, interaction: discord.Interaction, model_name: str):
user_id = str(interaction.user.id)
if user_id not in self.conversations:
self.conversations[user_id] = []
# Store the chosen model
self.conversations[user_id]['model'] = model_name
save_conversation_history(self.conversations)
await interaction.response.send_message(f"Your AI model has been set to `{model_name}` desu~", ephemeral=True)
@app_commands.command(name="get_ai_model", description="Shows the current AI model used for your roleplay conversations.")
async def get_ai_model(self, interaction: discord.Interaction):
user_id = str(interaction.user.id)
user_model = self.conversations.get(user_id, {}).get('model', DEFAULT_AI_MODEL)
await interaction.response.send_message(f"Your current AI model is `{user_model}` desu~", ephemeral=True)
@app_commands.command(name="clear_roleplay_history", description="Clears your roleplay chat history with Teto.")
async def clear_roleplay_history(self, interaction: discord.Interaction):
@ -122,6 +158,69 @@ class RoleplayTetoCog(commands.Cog):
else:
await interaction.response.send_message("No roleplay chat history found for you desu~", ephemeral=True)
@app_commands.command(name="clear_last_turns", description="Clears the last X turns of your roleplay history with Teto.")
@app_commands.describe(turns="The number of turns to clear.")
async def clear_last_turns(self, interaction: discord.Interaction, turns: int):
user_id = str(interaction.user.id)
if user_id not in self.conversations or not self.conversations[user_id]:
await interaction.response.send_message("No roleplay chat history found for you desu~", ephemeral=True)
return
messages_to_remove = turns * 2
if messages_to_remove <= 0:
await interaction.response.send_message("Please specify a positive number of turns to clear desu~", ephemeral=True)
return
if messages_to_remove > len(self.conversations[user_id]):
await interaction.response.send_message(f"You only have {len(self.conversations[user_id]) // 2} turns in your history. Clearing all of them desu~", ephemeral=True)
self.conversations[user_id] = []
else:
self.conversations[user_id] = self.conversations[user_id][:-messages_to_remove]
save_conversation_history(self.conversations)
await interaction.response.send_message(f"Cleared the last {turns} turns from your roleplay history desu~", ephemeral=True)
@app_commands.command(name="show_last_turns", description="Shows the last X turns of your roleplay history with Teto.")
@app_commands.describe(turns="The number of turns to show.")
async def show_last_turns(self, interaction: discord.Interaction, turns: int):
user_id = str(interaction.user.id)
if user_id not in self.conversations or not self.conversations[user_id]:
await interaction.response.send_message("No roleplay chat history found for you desu~", ephemeral=True)
return
messages_to_show_count = turns * 2
if messages_to_show_count <= 0:
await interaction.response.send_message("Please specify a positive number of turns to show desu~", ephemeral=True)
return
history = self.conversations[user_id]
if not history:
await interaction.response.send_message("No roleplay chat history found for you desu~", ephemeral=True)
return
start_index = max(0, len(history) - messages_to_show_count)
messages_to_display = history[start_index:]
if not messages_to_display:
await interaction.response.send_message("No messages to display for the specified number of turns desu~", ephemeral=True)
return
formatted_history = []
for msg in messages_to_display:
role = "You" if msg['role'] == 'user' else "Teto"
formatted_history.append(f"**{role}:** {msg['content']}")
response_message = "\n".join(formatted_history)
# Discord messages have a 2000 character limit.
# If the message is too long, send it in chunks or as a file.
# For simplicity, we'll send it directly and note that it might be truncated by Discord.
# A more robust solution would involve pagination or sending as a file.
if len(response_message) > 1950: # A bit of buffer for "Here are the last X turns..."
response_message = response_message[:1950] + "\n... (message truncated)"
await interaction.response.send_message(f"Here are the last {turns} turns of your roleplay history desu~:\n{response_message}", ephemeral=True)
async def setup(bot: commands.Bot):
cog = RoleplayTetoCog(bot)