This commit is contained in:
Slipstream 2025-04-27 18:01:05 -06:00
parent cbb0c6d607
commit aa85fdca18
Signed by: slipstream
GPG Key ID: 13E498CE010AC6FD
3 changed files with 174 additions and 136 deletions

View File

@ -10,9 +10,9 @@ import base64
import time
from typing import Optional, Dict, Any, List
# Assuming GurtCog is in the same directory level or accessible
# from .gurt_cog import GurtCog # This might cause circular import issues if GurtCog imports this.
# It's safer to get the cog instance via self.bot.get_cog('GurtCog')
# Gurt imports for AI calls and config
from ..gurt.api import get_internal_ai_json_response
from ..gurt.config import PROFILE_UPDATE_SCHEMA, ROLE_SELECTION_SCHEMA, DEFAULT_MODEL
class ProfileUpdaterCog(commands.Cog):
"""Cog for automatically updating Gurt's profile elements based on AI decisions."""
@ -249,67 +249,19 @@ Current State:
image_prompt_part = "\n(Current avatar image data is provided below)" # Text hint for the AI
# Define the JSON schema for the AI's response content
response_schema_json = {
"type": "object",
"properties": {
"should_update": {
"type": "boolean",
"description": "True if you want to change anything, false otherwise"
},
"reasoning": {
"type": "string",
"description": "Your reasoning for the decision and chosen updates (or lack thereof)."
},
"updates": {
"type": "object",
"properties": {
"avatar_query": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"description": "Search query for a new avatar. Try to be specific, rather than something very broad like 'Kasane Teto' or 'anime.'"
},
"new_bio": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"description": "The new bio text, or null"
},
"role_theme": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"description": "A theme for role selection, could be a specific name color role, interest, or theme, or null"
},
"new_activity": {
"type": "object",
"description": "Object containing the new activity details. Set type and text to null if no change is desired.",
"properties": {
"type": {
"anyOf": [{"type": "string", "enum": ["playing", "watching", "listening", "competing"]}, {"type": "null"}],
"description": "Activity type: 'playing', 'watching', 'listening', 'competing', or null."
},
"text": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"description": "The activity text, or null."
}
},
"required": ["type", "text"],
"additionalProperties": False
}
},
"required": ["avatar_query", "new_bio", "role_theme", "new_activity"],
"additionalProperties": False
}
},
"required": ["should_update", "reasoning", "updates"],
"additionalProperties": False # Enforce strictness at schema level too
}
json_format_instruction = json.dumps(response_schema_json, indent=2) # For the prompt
# Use the schema imported from config.py
response_schema_dict = PROFILE_UPDATE_SCHEMA['schema']
# json_format_instruction = json.dumps(response_schema_dict, indent=2) # No longer needed for prompt
# Define the payload for the response_format parameter
response_format_payload = {
"type": "json_schema",
"json_schema": {
"name": "profile_update_decision",
"strict": True, # Enforce strict adherence to the schema
"schema": response_schema_json
}
}
# Define the payload for the response_format parameter - REMOVED for Vertex AI
# response_format_payload = {
# "type": "json_schema",
# "json_schema": {
# "name": "profile_update_decision",
# "strict": True, # Enforce strict adherence to the schema
# "schema": response_schema_json
# }
# }
# Construct the full prompt message list for the AI
# Updated system prompt to include dynamic traits, mood, and interests
@ -319,41 +271,36 @@ Your current mood is: {current_mood}.
Your current interests include: {interests_str}.
Review your current profile state (provided below) and decide if you want to make any changes based on your personality, mood, and interests. Be creative and in-character.
**IMPORTANT: Your *entire* response MUST be a single JSON object, with no other text before or after it.**"""
**IMPORTANT: Your *entire* response MUST be a single JSON object matching the required schema, with no other text before or after it.**""" # Simplified instruction
prompt_messages = [
{"role": "system", "content": system_prompt_content}, # Use the updated system prompt
{"role": "user", "content": [
# Added emphasis at start and end of the text prompt
{"type": "text", "text": f"**Your entire response MUST be ONLY the JSON object described below. No introductory text, no explanations, just the JSON.**\n\n{state_summary}{image_prompt_part}\n\nReview your current profile state. Decide if you want to change your avatar, bio, roles, or activity status based on your personality, mood, and interests. If yes, specify the changes in the JSON. If not, set 'should_update' to false.\n\n**CRITICAL: Respond ONLY with a valid JSON object matching this exact structure:**\n```json\n{json_format_instruction}\n```\n**ABSOLUTELY NO TEXT BEFORE OR AFTER THE JSON OBJECT.**"}
# Simplified user prompt instruction
{"type": "text", "text": f"{state_summary}{image_prompt_part}\n\nReview your current profile state. Decide if you want to change your avatar, bio, roles, or activity status based on your personality, mood, and interests. If yes, specify the changes in the JSON. If not, set 'should_update' to false.\n\n**CRITICAL: Respond ONLY with a valid JSON object matching the required schema.**"}
]}
]
# Add image data if available and model supports it
if current_state.get('avatar_image_data'):
# Assuming the user message content is a list when multimodal
prompt_messages[-1]["content"].append({ # Add to the list in the last user message
"type": "image_url",
"image_url": {"url": current_state["avatar_image_data"]}
})
# Convert to Vertex AI format if needed (get_internal_ai_json_response handles this)
# prompt_messages[-1]["content"].append(Part.from_data(...)) # Example
print("ProfileUpdaterTask: Added current avatar image to AI prompt.")
try:
# Need a way to call GurtCog's core AI logic directly
# This might require refactoring GurtCog or adding a dedicated method
# Call the internal AI method from GurtCog, specifying the model and structured output format
result_json = await self.gurt_cog._get_internal_ai_json_response(
# Use the imported get_internal_ai_json_response function
result_json = await get_internal_ai_json_response(
cog=self.gurt_cog, # Pass the GurtCog instance
prompt_messages=prompt_messages,
model="openai/o4-mini-high", # Use the specified OpenAI model
response_format=response_format_payload, # Enforce structured output
task_description="Profile Update Decision",
response_schema_dict=response_schema_dict, # Pass the schema dict
model_name=DEFAULT_MODEL, # Use model from config
temperature=0.5, # Keep temperature for some creativity
max_tokens=5000
max_tokens=500 # Adjust max tokens if needed
)
if result_json and isinstance(result_json, dict):
# Basic validation of the received structure (now includes reasoning)
# Basic validation of the received structure
if "should_update" in result_json and "updates" in result_json and "reasoning" in result_json:
print(f"ProfileUpdaterTask: AI Reasoning: {result_json.get('reasoning', 'N/A')}") # Log the reasoning
return result_json
@ -515,48 +462,36 @@ Review your current profile state (provided below) and decide if you want to mak
current_role_names = [role.name for role in member.roles if role.name != "@everyone"]
# Define the JSON schema for the role selection AI response
role_selection_schema = {
"type": "object",
"properties": {
"roles_to_add": {
"type": "array",
"items": {"type": "string"},
"description": "List of role names to add (max 2)"
},
"roles_to_remove": {
"type": "array",
"items": {"type": "string"},
"description": "List of role names to remove (max 2, only from current roles)"
}
},
"required": ["roles_to_add", "roles_to_remove"],
"additionalProperties": False
}
role_selection_format = json.dumps(role_selection_schema, indent=2)
# Use the schema imported from config.py
role_selection_schema_dict = ROLE_SELECTION_SCHEMA['schema']
# role_selection_format = json.dumps(role_selection_schema_dict, indent=2) # No longer needed for prompt
# Prepare prompt for the second AI call
role_prompt_messages = [
{"role": "system", "content": f"You are Gurt. Based on the theme '{role_theme}', select roles to add or remove from the available list for this server. Prioritize adding roles that fit the theme and removing roles that don't or conflict. You can add/remove up to 2 roles total."},
{"role": "user", "content": f"Available assignable roles: {assignable_role_names}\nYour current roles: {current_role_names}\nTheme: '{role_theme}'\n\nSelect roles to add/remove based on the theme.\n\n**CRITICAL: Respond ONLY with a valid JSON object matching this structure:**\n```json\n{role_selection_format}\n```\n**Ensure nothing precedes or follows the JSON.**"}
# Simplified user prompt instruction
{"role": "user", "content": f"Available assignable roles: {assignable_role_names}\nYour current roles: {current_role_names}\nTheme: '{role_theme}'\n\nSelect roles to add/remove based on the theme.\n\n**CRITICAL: Respond ONLY with a valid JSON object matching the required schema.**"}
]
try:
# Make the AI call to select roles
# Define the payload for the response_format parameter
role_selection_format_payload = {
"type": "json_schema",
"json_schema": {
"name": "role_selection_decision",
"strict": True,
"schema": role_selection_schema
}
}
# Define the payload for the response_format parameter - REMOVED for Vertex AI
# role_selection_format_payload = {
# "type": "json_schema",
# "json_schema": {
# "name": "role_selection_decision",
# "strict": True,
# "schema": role_selection_schema
# }
# }
role_decision = await self.gurt_cog._get_internal_ai_json_response(
# Use the imported get_internal_ai_json_response function
role_decision = await get_internal_ai_json_response(
cog=self.gurt_cog, # Pass the GurtCog instance
prompt_messages=role_prompt_messages,
model="openai/o4-mini-high", # Use the specified OpenAI model
response_format=role_selection_format_payload, # Enforce structured output
task_description=f"Role Selection for Guild {guild.id}",
response_schema_dict=role_selection_schema_dict, # Pass the schema dict
model_name=DEFAULT_MODEL, # Use model from config
temperature=0.5 # More deterministic for role selection
)

View File

@ -30,7 +30,7 @@ PISTON_API_KEY = os.getenv("PISTON_API_KEY") # Optional key for Piston
# --- Model Configuration ---
DEFAULT_MODEL = os.getenv("GURT_DEFAULT_MODEL", "gemini-2.5-pro-preview-03-25")
FALLBACK_MODEL = os.getenv("GURT_FALLBACK_MODEL", "gemini-2.5-pro-preview-03-25")
SAFETY_CHECK_MODEL = os.getenv("GURT_SAFETY_CHECK_MODEL", "openai/gpt-4.1-nano") # For terminal command safety
SAFETY_CHECK_MODEL = os.getenv("GURT_SAFETY_CHECK_MODEL", "gemini-2.5-flash-preview-04-17") # Use a Vertex AI model for safety checks
# --- Database Paths ---
DB_PATH = os.getenv("GURT_DB_PATH", "data/gurt_memory.db")
@ -165,6 +165,98 @@ RESPONSE_SCHEMA = {
}
}
# --- Summary Response Schema ---
SUMMARY_RESPONSE_SCHEMA = {
"name": "conversation_summary",
"description": "A concise summary of a conversation.",
"schema": {
"type": "object",
"properties": {
"summary": {
"type": "string",
"description": "The generated summary of the conversation."
}
},
"required": ["summary"]
}
}
# --- Profile Update Schema ---
PROFILE_UPDATE_SCHEMA = {
"name": "profile_update_decision",
"description": "Decision on whether and how to update the bot's profile.",
"schema": {
"type": "object",
"properties": {
"should_update": {
"type": "boolean",
"description": "True if any profile element should be changed, false otherwise."
},
"reasoning": {
"type": "string",
"description": "Brief reasoning for the decision and chosen updates (or lack thereof)."
},
"updates": {
"type": "object",
"properties": {
"avatar_query": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"description": "Search query for a new avatar image, or null if no change."
},
"new_bio": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"description": "The new bio text (max 190 chars), or null if no change."
},
"role_theme": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"description": "A theme for role selection (e.g., color, interest), or null if no role changes."
},
"new_activity": {
"type": "object",
"description": "Object containing the new activity details. Set type and text to null if no change.",
"properties": {
"type": {
"anyOf": [{"type": "string", "enum": ["playing", "watching", "listening", "competing"]}, {"type": "null"}],
"description": "Activity type: 'playing', 'watching', 'listening', 'competing', or null."
},
"text": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"description": "The activity text, or null."
}
},
"required": ["type", "text"]
}
},
"required": ["avatar_query", "new_bio", "role_theme", "new_activity"]
}
},
"required": ["should_update", "reasoning", "updates"]
}
}
# --- Role Selection Schema ---
ROLE_SELECTION_SCHEMA = {
"name": "role_selection_decision",
"description": "Decision on which roles to add or remove based on a theme.",
"schema": {
"type": "object",
"properties": {
"roles_to_add": {
"type": "array",
"items": {"type": "string"},
"description": "List of role names to add (max 2)."
},
"roles_to_remove": {
"type": "array",
"items": {"type": "string"},
"description": "List of role names to remove (max 2, only from current roles)."
}
},
"required": ["roles_to_add", "roles_to_remove"]
}
}
# --- Tools Definition ---
def create_tools_list():
# This function creates the list of FunctionDeclaration objects.

View File

@ -28,7 +28,7 @@ from .config import (
# Assume these helpers will be moved or are accessible via cog
# We might need to pass 'cog' to these tool functions if they rely on cog state heavily
# from .utils import format_message # This will be needed by context tools
# from .api import call_llm_api_with_retry, get_internal_ai_json_response # Needed for summary, safety check
from .api import get_internal_ai_json_response # Needed for summary, safety check
# --- Tool Implementations ---
# Note: Most of these functions will need the 'cog' instance passed to them
@ -242,7 +242,7 @@ async def get_user_interaction_history(cog: commands.Cog, user_id_1: str, limit:
async def get_conversation_summary(cog: commands.Cog, channel_id: str = None, message_limit: int = 25) -> Dict[str, Any]:
"""Generates and returns a summary of the recent conversation in a channel using an LLM call."""
from .api import call_llm_api_with_retry # Import here
from .config import SUMMARY_RESPONSE_SCHEMA, DEFAULT_MODEL # Import schema and model
try:
target_channel_id_str = channel_id or (str(cog.current_channel.id) if cog.current_channel else None)
if not target_channel_id_str: return {"error": "No channel context"}
@ -260,7 +260,7 @@ async def get_conversation_summary(cog: commands.Cog, channel_id: str = None, me
}
print(f"Generating new summary for channel {target_channel_id}")
if not API_KEY or not cog.session: return {"error": "API key or session not available"}
# No need to check API_KEY or cog.session for Vertex AI calls via get_internal_ai_json_response
recent_messages_text = []
try:
@ -277,25 +277,31 @@ async def get_conversation_summary(cog: commands.Cog, channel_id: str = None, me
conversation_context = "\n".join(recent_messages_text)
summarization_prompt = f"Summarize the main points and current topic of this Discord chat snippet:\n\n---\n{conversation_context}\n---\n\nSummary:"
summary_payload = {
"model": DEFAULT_MODEL, # Consider cheaper model
"messages": [{"role": "system", "content": "Summarize concisely."}, {"role": "user", "content": summarization_prompt}],
"temperature": 0.3, "max_tokens": 150,
}
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {API_KEY}", "HTTP-Referer": "gurt", "X-Title": "Gurt Summarizer"}
# Use get_internal_ai_json_response
prompt_messages = [
{"role": "system", "content": "You are an expert summarizer. Provide a concise summary of the following conversation."},
{"role": "user", "content": summarization_prompt}
]
summary_data = await get_internal_ai_json_response(
cog=cog,
prompt_messages=prompt_messages,
task_description=f"Summarization for channel {target_channel_id}",
response_schema_dict=SUMMARY_RESPONSE_SCHEMA['schema'], # Pass the schema dict
model_name=DEFAULT_MODEL, # Consider a cheaper/faster model if needed
temperature=0.3,
max_tokens=200 # Adjust as needed
)
summary = "Error generating summary."
try:
data = await call_llm_api_with_retry(cog, summary_payload, headers, SUMMARY_API_TIMEOUT, f"Summarization for {target_channel_id}")
if data.get("choices") and data["choices"][0].get("message"):
summary = data["choices"][0]["message"].get("content", "Failed content extraction.").strip()
print(f"Summary generated for {target_channel_id}: {summary[:100]}...")
else:
summary = f"Unexpected summary API format: {str(data)[:200]}"
print(f"Summarization Error (Channel {target_channel_id}): {summary}")
except Exception as e:
summary = f"Failed summary for {target_channel_id}. Error: {str(e)}"
print(summary) # Error already printed in helper
if summary_data and isinstance(summary_data.get("summary"), str):
summary = summary_data["summary"].strip()
print(f"Summary generated for {target_channel_id}: {summary[:100]}...")
else:
error_detail = f"Invalid format or missing 'summary' key. Response: {summary_data}"
summary = f"Failed summary for {target_channel_id}. Error: {error_detail}"
print(summary)
cog.conversation_summaries[target_channel_id] = {"summary": summary, "timestamp": time.time()}
return {"channel_id": target_channel_id_str, "summary": summary, "source": "generated", "timestamp": datetime.datetime.now().isoformat()}
@ -555,7 +561,7 @@ def parse_mem_limit(mem_limit_str: str) -> Optional[int]:
async def _check_command_safety(cog: commands.Cog, command: str) -> Dict[str, Any]:
"""Uses a secondary AI call to check if a command is potentially harmful."""
from .api import get_internal_ai_json_response # Import here
# from .api import get_internal_ai_json_response # Already imported at top level
print(f"Performing AI safety check for command: '{command}' using model {SAFETY_CHECK_MODEL}")
safety_schema = {
"type": "object",
@ -565,12 +571,17 @@ async def _check_command_safety(cog: commands.Cog, command: str) -> Dict[str, An
}, "required": ["is_safe", "reason"]
}
prompt_messages = [
{"role": "system", "content": f"Analyze shell command safety for execution in isolated, network-disabled Docker ({DOCKER_EXEC_IMAGE}) with CPU/Mem limits. Focus on data destruction, resource exhaustion, container escape, network attacks (disabled), env var leaks. Simple echo/ls/pwd safe. rm/mkfs/shutdown/wget/curl/install/fork bombs unsafe. Respond ONLY with JSON matching schema: {{{{json.dumps(safety_schema)}}}}"},
{"role": "system", "content": f"Analyze shell command safety for execution in isolated, network-disabled Docker ({DOCKER_EXEC_IMAGE}) with CPU/Mem limits. Focus on data destruction, resource exhaustion, container escape, network attacks (disabled), env var leaks. Simple echo/ls/pwd safe. rm/mkfs/shutdown/wget/curl/install/fork bombs unsafe. Respond ONLY with JSON matching the provided schema."},
{"role": "user", "content": f"Analyze safety: ```{command}```"}
]
safety_response = await get_internal_ai_json_response(
cog, prompt_messages, "Command Safety Check", SAFETY_CHECK_MODEL, 0.1, 150,
{"type": "json_schema", "json_schema": {"name": "safety_check", "schema": safety_schema}}
cog=cog,
prompt_messages=prompt_messages,
task_description="Command Safety Check",
response_schema_dict=safety_schema, # Pass the schema dict directly
model_name=SAFETY_CHECK_MODEL,
temperature=0.1,
max_tokens=150
)
if safety_response and isinstance(safety_response.get("is_safe"), bool):
is_safe = safety_response["is_safe"]