Applying previous commit.

This commit is contained in:
Slipstream 2025-06-05 06:10:59 +00:00
parent 4f946ca4d0
commit 3dd6a02dfb
Signed by: slipstream
GPG Key ID: 13E498CE010AC6FD

View File

@ -4,6 +4,7 @@ from discord import app_commands
import re import re
import base64 import base64
import io import io
from typing import Optional
def strip_think_blocks(text): def strip_think_blocks(text):
# Removes all <think>...</think> blocks, including multiline # Removes all <think>...</think> blocks, including multiline
@ -18,27 +19,60 @@ _teto_conversations = {}
import os import os
import aiohttp import aiohttp
from google import genai
from google.genai import types
from google.api_core import exceptions as google_exceptions
from gurt.config import PROJECT_ID, LOCATION
STANDARD_SAFETY_SETTINGS = [
types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold="BLOCK_NONE"),
types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold="BLOCK_NONE"),
types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold="BLOCK_NONE"),
types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold="BLOCK_NONE"),
]
def _get_response_text(response: Optional[types.GenerateContentResponse]) -> Optional[str]:
"""Extract text from a Vertex AI response if available."""
if not response:
return None
if hasattr(response, "text") and response.text:
return response.text
if not response.candidates:
return None
try:
candidate = response.candidates[0]
if not getattr(candidate, "content", None) or not candidate.content.parts:
return None
for part in candidate.content.parts:
if hasattr(part, "text") and isinstance(part.text, str) and part.text.strip():
return part.text
return None
except (AttributeError, IndexError, TypeError):
return None
class DmbotTetoCog(commands.Cog): class DmbotTetoCog(commands.Cog):
def __init__(self, bot: commands.Bot): def __init__(self, bot: commands.Bot):
self.bot = bot self.bot = bot
self._api_endpoint = "https://openrouter.ai/api/v1/chat/completions" # Default endpoint try:
self._ai_model = "google/gemini-2.5-flash-preview" # Default model if PROJECT_ID and LOCATION:
self.genai_client = genai.Client(
vertexai=True,
project=PROJECT_ID,
location=LOCATION,
)
else:
self.genai_client = None
except Exception:
self.genai_client = None
self._ai_model = "gemini-2.5-flash-preview-05-20" # Default model used by TetoCog
async def _teto_reply_ai_with_messages(self, messages, system_mode="reply"): async def _teto_reply_ai_with_messages(self, messages, system_mode="reply"):
""" """Use Vertex AI to generate a Kasane Teto-style response."""
Use OpenRouter AI to generate a Kasane Teto-style response. if not self.genai_client:
system_mode: "reply" for replying as Kasane Teto. raise RuntimeError("Google GenAI Client (Vertex AI) is not initialized.")
"""
api_key = os.getenv("AI_API_KEY")
if not api_key:
raise RuntimeError("AI_API_KEY environment variable not set.")
url = self._api_endpoint
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
"HTTP-Referer": "https://github.com/Slipstreamm/discordbot/blob/master/cogs/teto_cog.py",
"X-Title": "Teto AI"
}
system_prompt = ( system_prompt = (
"For context, you speak with users via being a Discord bot in a public server. \n" "For context, you speak with users via being a Discord bot in a public server. \n"
"You are Kasane Teto, a cheeky, energetic, and often mischievous UTAU character with iconic red drill-like twintails. \n" "You are Kasane Teto, a cheeky, energetic, and often mischievous UTAU character with iconic red drill-like twintails. \n"
@ -53,30 +87,42 @@ class DmbotTetoCog(commands.Cog):
"When expressing emotions, never use emojis and instead use text-based emoticons like :) and ;) as well as Japanese-style emoticons, for example >~<, ^~^, >///<, UwU, o.O, and OwO over emojis. \n" "When expressing emotions, never use emojis and instead use text-based emoticons like :) and ;) as well as Japanese-style emoticons, for example >~<, ^~^, >///<, UwU, o.O, and OwO over emojis. \n"
"Reply to the user in a short, conversational manner, staying in character." "Reply to the user in a short, conversational manner, staying in character."
) )
payload = { contents = [types.Content(role="system", parts=[types.Part(text=system_prompt)])]
"model": self._ai_model, for msg in messages:
"messages": [{"role": "system", "content": system_prompt}] + messages, role = "user" if msg.get("role") == "user" else "model"
"max_tokens": 2000 contents.append(types.Content(role=role, parts=[types.Part(text=msg.get("content", ""))]))
}
async with aiohttp.ClientSession() as session:
async with session.post(url, headers=headers, json=payload) as resp:
if resp.status != 200:
text = await resp.text()
raise RuntimeError(f"OpenRouter API returned error status {resp.status}: {text[:500]}")
if resp.content_type == "application/json": generation_config = types.GenerateContentConfig(
data = await resp.json() temperature=1.0,
if "choices" not in data or not data["choices"]: max_output_tokens=2000,
raise RuntimeError(f"OpenRouter API returned unexpected response format: {data}") safety_settings=STANDARD_SAFETY_SETTINGS,
return data["choices"][0]["message"]["content"] )
else:
text = await resp.text() try:
raise RuntimeError(f"OpenRouter API returned non-JSON response (status {resp.status}): {text[:500]}") response = await self.genai_client.aio.models.generate_content(
model=f"publishers/google/models/{self._ai_model}",
contents=contents,
config=generation_config,
)
except google_exceptions.GoogleAPICallError as e:
raise RuntimeError(f"Vertex AI API call failed: {e}")
ai_reply = _get_response_text(response)
if ai_reply:
return ai_reply
raise RuntimeError("Vertex AI returned no text response.")
async def _teto_reply_ai(self, text: str) -> str: async def _teto_reply_ai(self, text: str) -> str:
"""Replies to the text as Kasane Teto using AI via OpenRouter.""" """Replies to the text as Kasane Teto using AI via Vertex AI."""
return await self._teto_reply_ai_with_messages([{"role": "user", "content": text}]) return await self._teto_reply_ai_with_messages([{"role": "user", "content": text}])
async def _send_followup_in_chunks(self, interaction: discord.Interaction, text: str, *, ephemeral: bool = True) -> None:
"""Send a potentially long message in chunks using followup messages."""
chunk_size = 1900
chunks = [text[i : i + chunk_size] for i in range(0, len(text), chunk_size)] or [""]
for chunk in chunks:
await interaction.followup.send(chunk, ephemeral=ephemeral)
teto = app_commands.Group(name="teto", description="Commands related to Kasane Teto.") teto = app_commands.Group(name="teto", description="Commands related to Kasane Teto.")
model = app_commands.Group(parent=teto, name="model", description="Commands related to Teto's AI model.") model = app_commands.Group(parent=teto, name="model", description="Commands related to Teto's AI model.")
endpoint = app_commands.Group(parent=teto, name="endpoint", description="Commands related to Teto's API endpoint.") endpoint = app_commands.Group(parent=teto, name="endpoint", description="Commands related to Teto's API endpoint.")
@ -121,13 +167,13 @@ class DmbotTetoCog(commands.Cog):
try: try:
ai_reply = await self._teto_reply_ai_with_messages(messages=convo) ai_reply = await self._teto_reply_ai_with_messages(messages=convo)
ai_reply = strip_think_blocks(ai_reply) ai_reply = strip_think_blocks(ai_reply)
await interaction.followup.send(ai_reply) await self._send_followup_in_chunks(interaction, ai_reply, ephemeral=True)
convo.append({"role": "assistant", "content": ai_reply}) convo.append({"role": "assistant", "content": ai_reply})
_teto_conversations[convo_key] = convo[-30:] # Keep last 30 messages _teto_conversations[convo_key] = convo[-30:] # Keep last 30 messages
except Exception as e: except Exception as e:
await interaction.followup.send(f"Teto AI reply failed: {e} desu~") await interaction.followup.send(f"Teto AI reply failed: {e} desu~", ephemeral=True)
else: else:
await interaction.followup.send("Please provide a message to chat with Teto desu~") await interaction.followup.send("Please provide a message to chat with Teto desu~", ephemeral=True)
# Context menu command must be defined at module level # Context menu command must be defined at module level
@ -153,8 +199,7 @@ async def teto_context_menu_ai_reply(interaction: discord.Interaction, message:
return return
ai_reply = await cog._teto_reply_ai_with_messages(messages=convo) ai_reply = await cog._teto_reply_ai_with_messages(messages=convo)
ai_reply = strip_think_blocks(ai_reply) ai_reply = strip_think_blocks(ai_reply)
await message.reply(ai_reply) await cog._send_followup_in_chunks(interaction, ai_reply, ephemeral=True)
await interaction.followup.send("Teto AI replied desu~", ephemeral=True)
convo.append({"role": "assistant", "content": ai_reply}) convo.append({"role": "assistant", "content": ai_reply})
_teto_conversations[convo_key] = convo[-10:] _teto_conversations[convo_key] = convo[-10:]
except Exception as e: except Exception as e: