f
This commit is contained in:
Slipstream 2025-04-27 00:03:14 -06:00
parent 2e2036b13f
commit b7d4b6bfbb
Signed by: slipstream
GPG Key ID: 13E498CE010AC6FD
8 changed files with 697 additions and 169 deletions

View File

@ -5,14 +5,22 @@ import datetime
from typing import Dict, List, Optional, Any, Union
from fastapi import FastAPI, HTTPException, Depends, Header, Request, Response
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles # Added for static files
from fastapi.responses import FileResponse # Added for serving HTML
from pydantic import BaseModel, Field
import discord
from discord.ext import commands
import aiohttp
import threading
from typing import Optional # Added for GurtCog type hint
# This file contains the API endpoints for syncing conversations between
# the Flutter app and the Discord bot.
# the Flutter app and the Discord bot, AND the Gurt stats endpoint.
# --- Placeholder for GurtCog instance ---
# This needs to be set by the script that starts the bot and API server
from discordbot.gurt.cog import GurtCog # Import GurtCog for type hint and access
gurt_cog_instance: Optional[GurtCog] = None
# ============= Models =============
@ -390,6 +398,44 @@ async def delete_conversation(
return {"success": True, "message": "Conversation deleted"}
# --- Gurt Stats Endpoint ---
@api_app.get("/gurt/stats")
async def get_gurt_stats_api():
"""Get internal statistics for the Gurt bot."""
if not gurt_cog_instance:
raise HTTPException(status_code=503, detail="Gurt cog not available")
try:
stats_data = await gurt_cog_instance.get_gurt_stats()
# Convert potential datetime objects if any (though get_gurt_stats should return serializable types)
# For safety, let's ensure basic types or handle conversion if needed later.
return stats_data
except Exception as e:
print(f"Error retrieving Gurt stats via API: {e}")
import traceback
traceback.print_exc()
raise HTTPException(status_code=500, detail=f"Error retrieving Gurt stats: {e}")
# --- Gurt Dashboard Static Files ---
# Mount static files directory (adjust path if needed, assuming dashboard files are in discordbot/gurt_dashboard)
# Check if the directory exists before mounting
dashboard_dir = "discordbot/gurt_dashboard"
if os.path.exists(dashboard_dir) and os.path.isdir(dashboard_dir):
api_app.mount("/gurt/static", StaticFiles(directory=dashboard_dir), name="gurt_static")
print(f"Mounted Gurt dashboard static files from: {dashboard_dir}")
# Route for the main dashboard HTML
@api_app.get("/gurt/dashboard", response_class=FileResponse)
async def get_gurt_dashboard():
dashboard_html_path = os.path.join(dashboard_dir, "index.html")
if os.path.exists(dashboard_html_path):
return dashboard_html_path
else:
raise HTTPException(status_code=404, detail="Dashboard index.html not found")
else:
print(f"Warning: Gurt dashboard directory '{dashboard_dir}' not found. Dashboard endpoints will not be available.")
@api_app.get("/settings")
async def get_user_settings(user_id: str = Depends(verify_discord_token)):
"""Get user settings"""

View File

@ -46,13 +46,16 @@ async def call_llm_api_with_retry(
"""
last_exception = None
original_model = payload.get("model")
current_model_key = original_model # Track the model used in the current attempt
using_fallback = False
start_time = time.monotonic() # Start timer before the loop
if not cog.session:
raise Exception(f"aiohttp session not initialized in GurtCog for {request_desc}")
for attempt in range(API_RETRY_ATTEMPTS + 1): # Corrected range
try:
current_model_key = payload["model"] # Get model for this attempt
model_desc = f"fallback model {FALLBACK_MODEL}" if using_fallback else f"primary model {original_model}"
print(f"Sending API request for {request_desc} using {model_desc} (Attempt {attempt + 1}/{API_RETRY_ATTEMPTS + 1})...")
@ -70,7 +73,13 @@ async def call_llm_api_with_retry(
print(error_msg)
last_exception = ValueError(error_msg) # Treat as non-retryable format error
break # Exit retry loop
print(f"API request successful for {request_desc}.")
# --- Success Logging ---
elapsed_time = time.monotonic() - start_time
cog.api_stats[current_model_key]['success'] += 1
cog.api_stats[current_model_key]['total_time'] += elapsed_time
cog.api_stats[current_model_key]['count'] += 1
print(f"API request successful for {request_desc} ({current_model_key}) in {elapsed_time:.2f}s.")
return data # Success
elif response.status == 429: # Rate limit error
@ -80,6 +89,7 @@ async def call_llm_api_with_retry(
if using_fallback or original_model != DEFAULT_MODEL:
if attempt < API_RETRY_ATTEMPTS:
cog.api_stats[current_model_key]['retries'] += 1 # Log retry
wait_time = API_RETRY_DELAY * (attempt + 2)
print(f"Waiting {wait_time} seconds before retrying...")
await asyncio.sleep(wait_time)
@ -92,7 +102,7 @@ async def call_llm_api_with_retry(
payload["model"] = FALLBACK_MODEL
using_fallback = True
await asyncio.sleep(1)
continue
continue # Retry immediately with fallback
elif response.status >= 500: # Retry on server errors
error_text = await response.text()
@ -100,6 +110,7 @@ async def call_llm_api_with_retry(
print(f"{error_msg} (Attempt {attempt + 1})")
last_exception = Exception(error_msg)
if attempt < API_RETRY_ATTEMPTS:
cog.api_stats[current_model_key]['retries'] += 1 # Log retry
await asyncio.sleep(API_RETRY_DELAY * (attempt + 1))
continue
else:
@ -114,7 +125,7 @@ async def call_llm_api_with_retry(
payload["model"] = FALLBACK_MODEL
using_fallback = True
await asyncio.sleep(1)
continue
continue # Retry immediately with fallback
last_exception = Exception(error_msg)
break
@ -124,6 +135,7 @@ async def call_llm_api_with_retry(
print(error_msg)
last_exception = asyncio.TimeoutError(error_msg)
if attempt < API_RETRY_ATTEMPTS:
cog.api_stats[current_model_key]['retries'] += 1 # Log retry
await asyncio.sleep(API_RETRY_DELAY * (attempt + 1))
continue
else:
@ -133,11 +145,20 @@ async def call_llm_api_with_retry(
print(error_msg)
last_exception = e
if attempt < API_RETRY_ATTEMPTS:
cog.api_stats[current_model_key]['retries'] += 1 # Log retry
await asyncio.sleep(API_RETRY_DELAY * (attempt + 1))
continue
else:
break
# --- Failure Logging ---
elapsed_time = time.monotonic() - start_time
final_model_key = payload["model"] # Model used in the last failed attempt
cog.api_stats[final_model_key]['failure'] += 1
cog.api_stats[final_model_key]['total_time'] += elapsed_time
cog.api_stats[final_model_key]['count'] += 1
print(f"API request failed for {request_desc} ({final_model_key}) after {attempt + 1} attempts in {elapsed_time:.2f}s.")
raise last_exception or Exception(f"API request failed for {request_desc} after {API_RETRY_ATTEMPTS + 1} attempts.")
# --- JSON Parsing Helper ---
@ -251,6 +272,7 @@ async def process_requested_tools(cog: 'GurtCog', tool_requests: List[Dict[str,
continue
print(f"Executing tool: {function_name} with args: {function_args}")
tool_start_time = time.monotonic() # Start timer for this tool
if function_name in TOOL_MAPPING:
try:
# Get the actual function implementation from the mapping
@ -263,16 +285,28 @@ async def process_requested_tools(cog: 'GurtCog', tool_requests: List[Dict[str,
# and don't directly need the `cog` instance passed here.
# If they *are* methods of GurtCog, they'll have `self` automatically.
result = await tool_func(cog, **function_args) # Pass cog if needed by tool impl
# --- Tool Success Logging ---
tool_elapsed_time = time.monotonic() - tool_start_time
cog.tool_stats[function_name]['success'] += 1
cog.tool_stats[function_name]['total_time'] += tool_elapsed_time
cog.tool_stats[function_name]['count'] += 1
print(f"Tool '{function_name}' executed successfully in {tool_elapsed_time:.2f}s.")
tool_results_for_api.append({
"role": "tool",
"content": json.dumps(result),
"name": function_name
})
print(f"Tool '{function_name}' executed successfully.")
except Exception as e:
# --- Tool Failure Logging ---
tool_elapsed_time = time.monotonic() - tool_start_time
cog.tool_stats[function_name]['failure'] += 1
cog.tool_stats[function_name]['total_time'] += tool_elapsed_time
cog.tool_stats[function_name]['count'] += 1
error_message = f"Error executing tool {function_name}: {str(e)}"
print(error_message)
import traceback
print(f"{error_message} (Took {tool_elapsed_time:.2f}s)")
import traceback # Keep traceback for debugging
traceback.print_exc()
tool_results_for_api.append({
"role": "tool",
@ -280,8 +314,13 @@ async def process_requested_tools(cog: 'GurtCog', tool_requests: List[Dict[str,
"name": function_name
})
else:
# --- Tool Not Found Logging ---
tool_elapsed_time = time.monotonic() - tool_start_time # Still record time even if not found
cog.tool_stats[function_name]['failure'] += 1 # Count as failure
cog.tool_stats[function_name]['total_time'] += tool_elapsed_time
cog.tool_stats[function_name]['count'] += 1
error_message = f"Tool '{function_name}' not found or implemented."
print(error_message)
print(f"{error_message} (Took {tool_elapsed_time:.2f}s)")
tool_results_for_api.append({
"role": "tool",
"content": json.dumps({"error": error_message}),

View File

@ -33,6 +33,7 @@ from .memory import MemoryManager # Import from local memory.py
from .background import background_processing_task
from .commands import setup_commands # Import the setup helper
from .listeners import on_ready_listener, on_message_listener, on_reaction_add_listener, on_reaction_remove_listener # Import listener functions
from . import config as GurtConfig # Import config module for get_gurt_stats
# Tool mapping is used internally by api.py/process_requested_tools, no need to import here directly unless cog methods call tools directly (they shouldn't)
# Analysis, context, prompt, api, utils functions are called by listeners/commands/background task, not directly by cog methods here usually.
@ -122,6 +123,10 @@ class GurtCog(commands.Cog, name="Gurt"): # Added explicit Cog name
self.background_task: Optional[asyncio.Task] = None
self.last_evolution_update = time.time() # Used in background task
# --- Stats Tracking ---
self.api_stats = defaultdict(lambda: {"success": 0, "failure": 0, "retries": 0, "total_time": 0.0, "count": 0}) # Keyed by model name
self.tool_stats = defaultdict(lambda: {"success": 0, "failure": 0, "total_time": 0.0, "count": 0}) # Keyed by tool name
# --- Setup Commands and Listeners ---
# Add commands defined in commands.py
setup_commands(self)
@ -206,6 +211,107 @@ class GurtCog(commands.Cog, name="Gurt"): # Added explicit Cog name
self.user_relationships[user_id_1][user_id_2] = new_score
# print(f"Updated relationship {user_id_1}-{user_id_2}: {current_score:.1f} -> {new_score:.1f} ({change:+.1f})") # Debug log
async def get_gurt_stats(self) -> Dict[str, Any]:
"""Collects various internal stats for Gurt."""
stats = {"config": {}, "runtime": {}, "memory": {}, "api_stats": {}, "tool_stats": {}}
# --- Config ---
# Selectively pull relevant config values, avoid exposing secrets
stats["config"]["default_model"] = GurtConfig.DEFAULT_MODEL
stats["config"]["fallback_model"] = GurtConfig.FALLBACK_MODEL
stats["config"]["safety_check_model"] = GurtConfig.SAFETY_CHECK_MODEL
stats["config"]["db_path"] = GurtConfig.DB_PATH
stats["config"]["chroma_path"] = GurtConfig.CHROMA_PATH
stats["config"]["semantic_model_name"] = GurtConfig.SEMANTIC_MODEL_NAME
stats["config"]["max_user_facts"] = GurtConfig.MAX_USER_FACTS
stats["config"]["max_general_facts"] = GurtConfig.MAX_GENERAL_FACTS
stats["config"]["mood_change_interval_min"] = GurtConfig.MOOD_CHANGE_INTERVAL_MIN
stats["config"]["mood_change_interval_max"] = GurtConfig.MOOD_CHANGE_INTERVAL_MAX
stats["config"]["evolution_update_interval"] = GurtConfig.EVOLUTION_UPDATE_INTERVAL
stats["config"]["context_window_size"] = GurtConfig.CONTEXT_WINDOW_SIZE
stats["config"]["api_timeout"] = GurtConfig.API_TIMEOUT
stats["config"]["summary_api_timeout"] = GurtConfig.SUMMARY_API_TIMEOUT
stats["config"]["proactive_lull_threshold"] = GurtConfig.PROACTIVE_LULL_THRESHOLD
stats["config"]["proactive_bot_silence_threshold"] = GurtConfig.PROACTIVE_BOT_SILENCE_THRESHOLD
stats["config"]["interest_update_interval"] = GurtConfig.INTEREST_UPDATE_INTERVAL
stats["config"]["interest_decay_interval_hours"] = GurtConfig.INTEREST_DECAY_INTERVAL_HOURS
stats["config"]["learning_update_interval"] = GurtConfig.LEARNING_UPDATE_INTERVAL
stats["config"]["topic_update_interval"] = GurtConfig.TOPIC_UPDATE_INTERVAL
stats["config"]["sentiment_update_interval"] = GurtConfig.SENTIMENT_UPDATE_INTERVAL
stats["config"]["docker_command_timeout"] = GurtConfig.DOCKER_COMMAND_TIMEOUT
stats["config"]["api_key_set"] = bool(GurtConfig.API_KEY) # Don't expose key itself
stats["config"]["tavily_api_key_set"] = bool(GurtConfig.TAVILY_API_KEY)
stats["config"]["piston_api_url_set"] = bool(GurtConfig.PISTON_API_URL)
# --- Runtime ---
stats["runtime"]["current_mood"] = self.current_mood
stats["runtime"]["last_mood_change_timestamp"] = self.last_mood_change
stats["runtime"]["needs_json_reminder"] = self.needs_json_reminder
stats["runtime"]["last_learning_update_timestamp"] = self.last_learning_update
stats["runtime"]["last_interest_update_timestamp"] = self.last_interest_update
stats["runtime"]["last_evolution_update_timestamp"] = self.last_evolution_update
stats["runtime"]["background_task_running"] = bool(self.background_task and not self.background_task.done())
stats["runtime"]["active_topics_channels"] = len(self.active_topics)
stats["runtime"]["conversation_history_channels"] = len(self.conversation_history)
stats["runtime"]["thread_history_threads"] = len(self.thread_history)
stats["runtime"]["user_conversation_mappings"] = len(self.user_conversation_mapping)
stats["runtime"]["channel_activity_tracked"] = len(self.channel_activity)
stats["runtime"]["conversation_topics_tracked"] = len(self.conversation_topics)
stats["runtime"]["user_relationships_pairs"] = sum(len(v) for v in self.user_relationships.values())
stats["runtime"]["conversation_summaries_cached"] = len(self.conversation_summaries)
stats["runtime"]["channel_topics_cached"] = len(self.channel_topics_cache)
stats["runtime"]["message_cache_global_count"] = len(self.message_cache['global_recent'])
stats["runtime"]["message_cache_mentioned_count"] = len(self.message_cache['mentioned'])
stats["runtime"]["active_conversations_count"] = len(self.active_conversations)
stats["runtime"]["bot_last_spoke_channels"] = len(self.bot_last_spoke)
stats["runtime"]["message_reply_map_size"] = len(self.message_reply_map)
stats["runtime"]["conversation_sentiment_channels"] = len(self.conversation_sentiment)
stats["runtime"]["gurt_participation_topics_count"] = len(self.gurt_participation_topics)
stats["runtime"]["gurt_message_reactions_tracked"] = len(self.gurt_message_reactions)
# --- Memory (via MemoryManager) ---
try:
# Personality
personality = await self.memory_manager.get_all_personality_traits()
stats["memory"]["personality_traits"] = personality
# Interests
interests = await self.memory_manager.get_interests(limit=20, min_level=0.01) # Get top 20
stats["memory"]["top_interests"] = interests
# Fact Counts (Requires adding methods to MemoryManager or direct query)
# Example placeholder - needs implementation in MemoryManager or here
user_fact_count = await self.memory_manager._db_fetchone("SELECT COUNT(*) FROM user_facts")
general_fact_count = await self.memory_manager._db_fetchone("SELECT COUNT(*) FROM general_facts")
stats["memory"]["user_facts_count"] = user_fact_count[0] if user_fact_count else 0
stats["memory"]["general_facts_count"] = general_fact_count[0] if general_fact_count else 0
# ChromaDB Stats (Placeholder - ChromaDB client API might offer this)
stats["memory"]["chromadb_message_collection_count"] = await asyncio.to_thread(self.memory_manager.semantic_collection.count) if self.memory_manager.semantic_collection else "N/A"
stats["memory"]["chromadb_fact_collection_count"] = await asyncio.to_thread(self.memory_manager.fact_collection.count) if self.memory_manager.fact_collection else "N/A"
except Exception as e:
stats["memory"]["error"] = f"Failed to retrieve memory stats: {e}"
# --- API & Tool Stats ---
# Convert defaultdicts to regular dicts for JSON serialization
stats["api_stats"] = dict(self.api_stats)
stats["tool_stats"] = dict(self.tool_stats)
# Calculate average times where count > 0
for model, data in stats["api_stats"].items():
if data["count"] > 0:
data["average_time_ms"] = round((data["total_time"] / data["count"]) * 1000, 2)
else:
data["average_time_ms"] = 0
for tool, data in stats["tool_stats"].items():
if data["count"] > 0:
data["average_time_ms"] = round((data["total_time"] / data["count"]) * 1000, 2)
else:
data["average_time_ms"] = 0
return stats
# Setup function for loading the cog
async def setup(bot):

View File

@ -1,8 +1,12 @@
import discord
from discord import app_commands # Import app_commands
from discord.ext import commands
import random
import os
from typing import TYPE_CHECKING
import time # Import time for timestamps
import json # Import json for formatting
import datetime # Import datetime for formatting
from typing import TYPE_CHECKING, Optional, Dict, Any, List, Tuple # Add more types
# Relative imports (assuming API functions are in api.py)
# We need access to the cog instance for state and methods like get_ai_response
@ -10,176 +14,204 @@ from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .cog import GurtCog # For type hinting
from .config import MOOD_OPTIONS # Import for choices
# --- Command Implementations ---
# Note: These functions assume they will be registered as commands associated with a GurtCog instance.
# The 'cog' parameter will be implicitly passed by discord.py when registered correctly.
# --- Helper Function for Embeds ---
def create_gurt_embed(title: str, description: str = "", color=discord.Color.blue()) -> discord.Embed:
"""Creates a standard Gurt-themed embed."""
embed = discord.Embed(title=title, description=description, color=color)
# Placeholder icon URL, replace if Gurt has one
# embed.set_footer(text="Gurt", icon_url="https://example.com/gurt_icon.png")
embed.set_footer(text="Gurt")
return embed
@commands.command(name="gurt")
async def gurt_command(cog: 'GurtCog', ctx: commands.Context):
"""The main gurt command"""
from .config import GURT_RESPONSES # Import here
response = random.choice(GURT_RESPONSES)
await ctx.send(response)
# --- Helper Function for Stats Embeds ---
def format_stats_embeds(stats: Dict[str, Any]) -> List[discord.Embed]:
"""Formats the collected stats into multiple embeds."""
embeds = []
main_embed = create_gurt_embed("Gurt Internal Stats", color=discord.Color.green())
ts_format = "<t:{ts}:R>" # Relative timestamp
@commands.command(name="gurtai")
async def gurt_ai_command(cog: 'GurtCog', ctx: commands.Context, *, prompt: str):
"""Get a response from the AI"""
from .api import get_ai_response # Import API function
# Runtime Stats
runtime = stats.get("runtime", {})
main_embed.add_field(name="Current Mood", value=f"{runtime.get('current_mood', 'N/A')} (Changed {ts_format.format(ts=int(runtime.get('last_mood_change_timestamp', 0)))})", inline=False)
main_embed.add_field(name="Background Task", value="Running" if runtime.get('background_task_running') else "Stopped", inline=True)
main_embed.add_field(name="Needs JSON Reminder", value=str(runtime.get('needs_json_reminder', 'N/A')), inline=True)
main_embed.add_field(name="Last Evolution", value=ts_format.format(ts=int(runtime.get('last_evolution_update_timestamp', 0))), inline=True)
main_embed.add_field(name="Active Topics Channels", value=str(runtime.get('active_topics_channels', 'N/A')), inline=True)
main_embed.add_field(name="Conv History Channels", value=str(runtime.get('conversation_history_channels', 'N/A')), inline=True)
main_embed.add_field(name="Thread History Threads", value=str(runtime.get('thread_history_threads', 'N/A')), inline=True)
main_embed.add_field(name="User Relationships Pairs", value=str(runtime.get('user_relationships_pairs', 'N/A')), inline=True)
main_embed.add_field(name="Cached Summaries", value=str(runtime.get('conversation_summaries_cached', 'N/A')), inline=True)
main_embed.add_field(name="Cached Channel Topics", value=str(runtime.get('channel_topics_cached', 'N/A')), inline=True)
main_embed.add_field(name="Global Msg Cache", value=str(runtime.get('message_cache_global_count', 'N/A')), inline=True)
main_embed.add_field(name="Mention Msg Cache", value=str(runtime.get('message_cache_mentioned_count', 'N/A')), inline=True)
main_embed.add_field(name="Active Convos", value=str(runtime.get('active_conversations_count', 'N/A')), inline=True)
main_embed.add_field(name="Sentiment Channels", value=str(runtime.get('conversation_sentiment_channels', 'N/A')), inline=True)
main_embed.add_field(name="Gurt Participation Topics", value=str(runtime.get('gurt_participation_topics_count', 'N/A')), inline=True)
main_embed.add_field(name="Tracked Reactions", value=str(runtime.get('gurt_message_reactions_tracked', 'N/A')), inline=True)
embeds.append(main_embed)
# Create a pseudo-message object or pass necessary info
# For simplicity, we'll pass the context's message object,
# but modify its content for the AI call.
ai_message = ctx.message
ai_message.content = prompt # Override content with the prompt argument
try:
# Show typing indicator
async with ctx.typing():
# Get AI response bundle
response_bundle = await get_ai_response(cog, ai_message) # Pass cog and message
# Check for errors or no response
error_msg = response_bundle.get("error")
initial_response = response_bundle.get("initial_response")
final_response = response_bundle.get("final_response")
response_to_use = final_response if final_response else initial_response
if error_msg:
print(f"Error in gurtai command: {error_msg}")
await ctx.reply(f"Sorry, I'm having trouble thinking right now. Details: {error_msg}")
return
if not response_to_use or not response_to_use.get("should_respond", False):
await ctx.reply("I don't have anything to say about that right now.")
return
response_text = response_to_use.get("content", "")
if not response_text:
await ctx.reply("I decided not to respond with text.")
return
# Handle long responses
if len(response_text) > 1900:
filepath = f'gurt_response_{ctx.author.id}.txt'
try:
with open(filepath, 'w', encoding='utf-8') as f: f.write(response_text)
await ctx.send("Response too long, sending as file:", file=discord.File(filepath))
except Exception as file_e: print(f"Error writing/sending long response file: {file_e}")
finally:
try: os.remove(filepath)
except OSError as os_e: print(f"Error removing temp file {filepath}: {os_e}")
# Memory Stats
memory_embed = create_gurt_embed("Gurt Memory Stats", color=discord.Color.orange())
memory = stats.get("memory", {})
if memory.get("error"):
memory_embed.description = f"⚠️ Error retrieving memory stats: {memory['error']}"
else:
await ctx.reply(response_text)
memory_embed.add_field(name="User Facts", value=str(memory.get('user_facts_count', 'N/A')), inline=True)
memory_embed.add_field(name="General Facts", value=str(memory.get('general_facts_count', 'N/A')), inline=True)
memory_embed.add_field(name="Chroma Messages", value=str(memory.get('chromadb_message_collection_count', 'N/A')), inline=True)
memory_embed.add_field(name="Chroma Facts", value=str(memory.get('chromadb_fact_collection_count', 'N/A')), inline=True)
except Exception as e:
error_message = f"Error processing gurtai request: {str(e)}"
print(f"Exception in gurt_ai_command: {error_message}")
import traceback
traceback.print_exc()
await ctx.reply("Sorry, an unexpected error occurred.")
personality = memory.get("personality_traits", {})
if personality:
p_items = [f"`{k}`: {v}" for k, v in personality.items()]
memory_embed.add_field(name="Personality Traits", value="\n".join(p_items) if p_items else "None", inline=False)
@commands.command(name="gurtmodel")
@commands.is_owner() # Keep owner check for sensitive commands
async def set_model_command(cog: 'GurtCog', ctx: commands.Context, *, model: str):
"""Set the AI model to use (Owner only)"""
# Model setting might need to update config or cog state directly
# For now, let's assume it updates a cog attribute.
# Validation might be better handled in config loading or a dedicated setter.
# if not model.endswith(":free"): # Example validation
# await ctx.reply("Error: Model name must end with `:free`. Setting not updated.")
# return
interests = memory.get("top_interests", [])
if interests:
i_items = [f"`{t}`: {l:.2f}" for t, l in interests]
memory_embed.add_field(name="Top Interests", value="\n".join(i_items) if i_items else "None", inline=False)
embeds.append(memory_embed)
cog.default_model = model # Update the cog's default model attribute
# TODO: Consider if this needs to persist somewhere or update config dynamically.
await ctx.reply(f"AI model temporarily set to: `{model}` for this session.")
print(f"Gurt model changed to {model} by {ctx.author.name}")
# API Stats
api_stats = stats.get("api_stats", {})
if api_stats:
api_embed = create_gurt_embed("Gurt API Stats", color=discord.Color.red())
for model, data in api_stats.items():
avg_time = data.get('average_time_ms', 0)
value = (f"✅ Success: {data.get('success', 0)}\n"
f"❌ Failure: {data.get('failure', 0)}\n"
f"🔁 Retries: {data.get('retries', 0)}\n"
f"⏱️ Avg Time: {avg_time} ms\n"
f"📊 Count: {data.get('count', 0)}")
api_embed.add_field(name=f"Model: `{model}`", value=value, inline=True)
embeds.append(api_embed)
@commands.command(name="gurtstatus")
async def gurt_status_command(cog: 'GurtCog', ctx: commands.Context):
"""Display the current status of Gurt Bot"""
embed = discord.Embed(
title="Gurt Bot Status",
description="Current configuration and status",
color=discord.Color.green()
)
embed.add_field(name="Current Model", value=f"`{cog.default_model}`", inline=False)
embed.add_field(name="API Session", value="Active" if cog.session and not cog.session.closed else "Inactive", inline=True)
# Add other relevant status info from the cog if needed
# embed.add_field(name="Current Mood", value=cog.current_mood, inline=True)
await ctx.send(embed=embed)
# Tool Stats
tool_stats = stats.get("tool_stats", {})
if tool_stats:
tool_embed = create_gurt_embed("Gurt Tool Stats", color=discord.Color.purple())
for tool, data in tool_stats.items():
avg_time = data.get('average_time_ms', 0)
value = (f"✅ Success: {data.get('success', 0)}\n"
f"❌ Failure: {data.get('failure', 0)}\n"
f"⏱️ Avg Time: {avg_time} ms\n"
f"📊 Count: {data.get('count', 0)}")
tool_embed.add_field(name=f"Tool: `{tool}`", value=value, inline=True)
embeds.append(tool_embed)
@commands.command(name="gurthelp")
async def gurt_help_command(cog: 'GurtCog', ctx: commands.Context):
"""Display help information for Gurt Bot"""
from .config import TOOLS # Import TOOLS definition
# Config Stats (Less critical, maybe separate embed if needed)
config_embed = create_gurt_embed("Gurt Config Overview", color=discord.Color.greyple())
config = stats.get("config", {})
config_embed.add_field(name="Default Model", value=f"`{config.get('default_model', 'N/A')}`", inline=True)
config_embed.add_field(name="Fallback Model", value=f"`{config.get('fallback_model', 'N/A')}`", inline=True)
config_embed.add_field(name="Semantic Model", value=f"`{config.get('semantic_model_name', 'N/A')}`", inline=True)
config_embed.add_field(name="Max User Facts", value=str(config.get('max_user_facts', 'N/A')), inline=True)
config_embed.add_field(name="Max General Facts", value=str(config.get('max_general_facts', 'N/A')), inline=True)
config_embed.add_field(name="Context Window", value=str(config.get('context_window_size', 'N/A')), inline=True)
config_embed.add_field(name="API Key Set", value=str(config.get('api_key_set', 'N/A')), inline=True)
config_embed.add_field(name="Tavily Key Set", value=str(config.get('tavily_api_key_set', 'N/A')), inline=True)
config_embed.add_field(name="Piston URL Set", value=str(config.get('piston_api_url_set', 'N/A')), inline=True)
embeds.append(config_embed)
embed = discord.Embed(
title="Gurt Bot Help",
description="Gurt is an autonomous AI participant.",
color=discord.Color.purple()
)
embed.add_field(
name="Commands",
value=f"`{cog.bot.command_prefix}gurt` - Gurt!\n"
f"`{cog.bot.command_prefix}gurtai <prompt>` - Ask Gurt AI directly\n"
f"`{cog.bot.command_prefix}gurtstatus` - Show current status\n"
f"`{cog.bot.command_prefix}gurthelp` - This help message\n"
f"`{cog.bot.command_prefix}gurtmodel <model>` - Set AI model (Owner)\n"
f"`{cog.bot.command_prefix}force_profile_update` - Trigger profile update (Owner)",
inline=False
)
embed.add_field(
name="Autonomous Behavior",
value="Gurt listens and responds naturally based on conversation, mentions, and interests.",
inline=False
)
# Dynamically list available tools from config
tool_list = "\n".join([f"- `{tool['function']['name']}`: {tool['function']['description']}" for tool in TOOLS])
embed.add_field(name="Available AI Tools", value=tool_list, inline=False)
await ctx.send(embed=embed)
# Limit to 10 embeds max for Discord API
return embeds[:10]
@commands.command(name="force_profile_update")
@commands.is_owner()
async def force_profile_update_command(cog: 'GurtCog', ctx: commands.Context):
"""Manually triggers the profile update cycle (Owner only)."""
# This command interacts with another cog, which is complex after refactoring.
# Option 1: Keep this command in a separate 'owner' cog that knows about other cogs.
# Option 2: Use bot events/listeners for inter-cog communication.
# Option 3: Access the other cog directly via self.bot.get_cog (simplest for now).
profile_updater_cog = cog.bot.get_cog('ProfileUpdaterCog')
if not profile_updater_cog:
await ctx.reply("Error: ProfileUpdaterCog not found.")
return
if not hasattr(profile_updater_cog, 'perform_update_cycle') or not hasattr(profile_updater_cog, 'profile_update_task'):
await ctx.reply("Error: ProfileUpdaterCog is missing required methods/tasks.")
return
try:
await ctx.reply("Manually triggering profile update cycle...")
await profile_updater_cog.perform_update_cycle()
# Restarting the loop might be internal to that cog now
if hasattr(profile_updater_cog.profile_update_task, 'restart'):
profile_updater_cog.profile_update_task.restart()
await ctx.reply("Profile update cycle triggered and timer reset.")
else:
await ctx.reply("Profile update cycle triggered (task restart mechanism not found).")
print(f"Profile update cycle manually triggered by {ctx.author.name}.")
except Exception as e:
await ctx.reply(f"An error occurred while triggering the profile update: {e}")
print(f"Error during manual profile update trigger: {e}")
import traceback
traceback.print_exc()
# Helper function to add these commands to the cog instance
# --- Command Setup Function ---
# This function will be called from GurtCog's setup method
def setup_commands(cog: 'GurtCog'):
"""Adds the commands defined in this file to the GurtCog."""
# Add commands directly to the bot instance, associated with the cog
cog.bot.add_command(gurt_command)
cog.bot.add_command(gurt_ai_command)
cog.bot.add_command(set_model_command)
cog.bot.add_command(gurt_status_command)
cog.bot.add_command(gurt_help_command)
cog.bot.add_command(force_profile_update_command)
"""Adds Gurt-specific commands to the cog."""
# Example using app_commands - adapt existing commands if needed
@cog.bot.tree.command(name="gurtmood", description="Check or set Gurt's current mood.")
@app_commands.describe(mood="Optional: Set Gurt's mood to one of the available options.")
@app_commands.choices(mood=[
app_commands.Choice(name=m, value=m) for m in cog.MOOD_OPTIONS # Use cog's MOOD_OPTIONS
])
async def gurtmood(interaction: discord.Interaction, mood: Optional[app_commands.Choice[str]] = None):
"""Handles the /gurtmood command."""
if mood:
cog.current_mood = mood.value
cog.last_mood_change = time.time()
await interaction.response.send_message(f"Gurt's mood set to: {mood.value}", ephemeral=True)
else:
time_since_change = time.time() - cog.last_mood_change
await interaction.response.send_message(f"Gurt's current mood is: {cog.current_mood} (Set {int(time_since_change // 60)} minutes ago)", ephemeral=True)
@cog.bot.tree.command(name="gurtmemory", description="Interact with Gurt's memory.")
@app_commands.describe(
action="Choose an action: add_user, add_general, get_user, get_general",
user="The user for user-specific actions (mention or ID).",
fact="The fact to add (for add actions).",
query="A keyword to search for (for get_general)."
)
@app_commands.choices(action=[
app_commands.Choice(name="Add User Fact", value="add_user"),
app_commands.Choice(name="Add General Fact", value="add_general"),
app_commands.Choice(name="Get User Facts", value="get_user"),
app_commands.Choice(name="Get General Facts", value="get_general"),
])
async def gurtmemory(interaction: discord.Interaction, action: app_commands.Choice[str], user: Optional[discord.User] = None, fact: Optional[str] = None, query: Optional[str] = None):
"""Handles the /gurtmemory command."""
await interaction.response.defer(ephemeral=True) # Defer for potentially slow DB operations
target_user_id = str(user.id) if user else None
action_value = action.value
if action_value == "add_user":
if not target_user_id or not fact:
await interaction.followup.send("Please provide both a user and a fact to add.", ephemeral=True)
return
result = await cog.memory_manager.add_user_fact(target_user_id, fact)
await interaction.followup.send(f"Add User Fact Result: `{json.dumps(result)}`", ephemeral=True)
elif action_value == "add_general":
if not fact:
await interaction.followup.send("Please provide a fact to add.", ephemeral=True)
return
result = await cog.memory_manager.add_general_fact(fact)
await interaction.followup.send(f"Add General Fact Result: `{json.dumps(result)}`", ephemeral=True)
elif action_value == "get_user":
if not target_user_id:
await interaction.followup.send("Please provide a user to get facts for.", ephemeral=True)
return
facts = await cog.memory_manager.get_user_facts(target_user_id) # Get newest by default
if facts:
facts_str = "\n- ".join(facts)
await interaction.followup.send(f"**Facts for {user.display_name}:**\n- {facts_str}", ephemeral=True)
else:
await interaction.followup.send(f"No facts found for {user.display_name}.", ephemeral=True)
elif action_value == "get_general":
facts = await cog.memory_manager.get_general_facts(query=query, limit=10) # Get newest/filtered
if facts:
facts_str = "\n- ".join(facts)
title = f"**General Facts{f' matching "{query}"' if query else ''}:**"
await interaction.followup.send(f"{title}\n- {facts_str}", ephemeral=True)
else:
await interaction.followup.send(f"No general facts found{f' matching "{query}"' if query else ''}.", ephemeral=True)
else:
await interaction.followup.send("Invalid action specified.", ephemeral=True)
# --- Gurt Stats Command ---
@cog.bot.tree.command(name="gurtstats", description="Display Gurt's internal statistics.")
async def gurtstats(interaction: discord.Interaction):
"""Handles the /gurtstats command."""
await interaction.response.defer(ephemeral=True) # Defer as stats collection might take time
try:
stats_data = await cog.get_gurt_stats()
embeds = format_stats_embeds(stats_data)
await interaction.followup.send(embeds=embeds, ephemeral=True)
except Exception as e:
print(f"Error in /gurtstats command: {e}")
import traceback
traceback.print_exc()
await interaction.followup.send("An error occurred while fetching Gurt's stats.", ephemeral=True)
print("Gurt commands setup in cog.")

36
gurt_dashboard/index.html Normal file
View File

@ -0,0 +1,36 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Gurt Stats Dashboard</title>
<link rel="stylesheet" href="style.css">
</head>
<body>
<h1>Gurt Internal Stats</h1>
<p>Last Updated: <span id="last-updated">Never</span></p>
<div id="stats-container">
<div class="stats-section" id="runtime-stats">
<h2>Runtime</h2>
<!-- Runtime stats will be populated here -->
</div>
<div class="stats-section" id="memory-stats">
<h2>Memory</h2>
<!-- Memory stats will be populated here -->
</div>
<div class="stats-section" id="api-stats">
<h2>API Stats</h2>
<!-- API stats will be populated here -->
</div>
<div class="stats-section" id="tool-stats">
<h2>Tool Stats</h2>
<!-- Tool stats will be populated here -->
</div>
<div class="stats-section" id="config-stats">
<h2>Config Overview</h2>
<!-- Config stats will be populated here -->
</div>
</div>
<script src="script.js"></script>
</body>
</html>

176
gurt_dashboard/script.js Normal file
View File

@ -0,0 +1,176 @@
const API_ENDPOINT = '/discordapi/gurt/stats'; // Relative path to the API endpoint
const REFRESH_INTERVAL = 15000; // Refresh every 15 seconds (in milliseconds)
const lastUpdatedElement = document.getElementById('last-updated');
const runtimeStatsContainer = document.getElementById('runtime-stats');
const memoryStatsContainer = document.getElementById('memory-stats');
const apiStatsContainer = document.getElementById('api-stats');
const toolStatsContainer = document.getElementById('tool-stats');
const configStatsContainer = document.getElementById('config-stats');
function formatTimestamp(unixTimestamp) {
if (!unixTimestamp || unixTimestamp === 0) return 'N/A';
const date = new Date(unixTimestamp * 1000);
return date.toLocaleString(); // Adjust format as needed
}
function createStatItem(label, value, isCode = false) {
const item = document.createElement('div');
item.classList.add('stat-item');
const labelSpan = document.createElement('span');
labelSpan.classList.add('stat-label');
labelSpan.textContent = label + ':';
item.appendChild(labelSpan);
const valueSpan = document.createElement('span');
valueSpan.classList.add('stat-value');
if (isCode) {
const code = document.createElement('code');
code.textContent = value;
valueSpan.appendChild(code);
} else {
valueSpan.textContent = value;
}
item.appendChild(valueSpan);
return item;
}
function createListStatItem(label, items) {
const item = document.createElement('div');
item.classList.add('stat-item');
const labelSpan = document.createElement('span');
labelSpan.classList.add('stat-label');
labelSpan.textContent = label + ':';
item.appendChild(labelSpan);
if (items && items.length > 0) {
const list = document.createElement('ul');
list.classList.add('stat-list');
items.forEach(content => {
const li = document.createElement('li');
li.textContent = content;
list.appendChild(li);
});
item.appendChild(list);
} else {
const valueSpan = document.createElement('span');
valueSpan.classList.add('stat-value');
valueSpan.textContent = 'None';
item.appendChild(valueSpan);
}
return item;
}
function renderStats(stats) {
// Clear previous stats
runtimeStatsContainer.innerHTML = '<h2>Runtime</h2>';
memoryStatsContainer.innerHTML = '<h2>Memory</h2>';
apiStatsContainer.innerHTML = '<h2>API Stats</h2>';
toolStatsContainer.innerHTML = '<h2>Tool Stats</h2>';
configStatsContainer.innerHTML = '<h2>Config Overview</h2>';
// Runtime Stats
const runtime = stats.runtime || {};
runtimeStatsContainer.appendChild(createStatItem('Current Mood', runtime.current_mood || 'N/A'));
runtimeStatsContainer.appendChild(createStatItem('Mood Changed', formatTimestamp(runtime.last_mood_change_timestamp)));
runtimeStatsContainer.appendChild(createStatItem('Background Task Running', runtime.background_task_running ? 'Yes' : 'No'));
runtimeStatsContainer.appendChild(createStatItem('Needs JSON Reminder', runtime.needs_json_reminder ? 'Yes' : 'No'));
runtimeStatsContainer.appendChild(createStatItem('Last Evolution', formatTimestamp(runtime.last_evolution_update_timestamp)));
runtimeStatsContainer.appendChild(createStatItem('Active Topics Channels', runtime.active_topics_channels || 0));
runtimeStatsContainer.appendChild(createStatItem('Conv History Channels', runtime.conversation_history_channels || 0));
runtimeStatsContainer.appendChild(createStatItem('Thread History Threads', runtime.thread_history_threads || 0));
runtimeStatsContainer.appendChild(createStatItem('User Relationships Pairs', runtime.user_relationships_pairs || 0));
runtimeStatsContainer.appendChild(createStatItem('Cached Summaries', runtime.conversation_summaries_cached || 0));
runtimeStatsContainer.appendChild(createStatItem('Cached Channel Topics', runtime.channel_topics_cached || 0));
runtimeStatsContainer.appendChild(createStatItem('Global Msg Cache', runtime.message_cache_global_count || 0));
runtimeStatsContainer.appendChild(createStatItem('Mention Msg Cache', runtime.message_cache_mentioned_count || 0));
runtimeStatsContainer.appendChild(createStatItem('Active Convos', runtime.active_conversations_count || 0));
runtimeStatsContainer.appendChild(createStatItem('Sentiment Channels', runtime.conversation_sentiment_channels || 0));
runtimeStatsContainer.appendChild(createStatItem('Gurt Participation Topics', runtime.gurt_participation_topics_count || 0));
runtimeStatsContainer.appendChild(createStatItem('Tracked Reactions', runtime.gurt_message_reactions_tracked || 0));
// Memory Stats
const memory = stats.memory || {};
if (memory.error) {
const errorItem = document.createElement('div');
errorItem.classList.add('stat-item', 'error');
errorItem.textContent = `Error: ${memory.error}`;
memoryStatsContainer.appendChild(errorItem);
} else {
memoryStatsContainer.appendChild(createStatItem('User Facts', memory.user_facts_count || 0));
memoryStatsContainer.appendChild(createStatItem('General Facts', memory.general_facts_count || 0));
memoryStatsContainer.appendChild(createStatItem('Chroma Messages', memory.chromadb_message_collection_count || 'N/A'));
memoryStatsContainer.appendChild(createStatItem('Chroma Facts', memory.chromadb_fact_collection_count || 'N/A'));
const personality = memory.personality_traits || {};
const pItems = Object.entries(personality).map(([k, v]) => `${k}: ${v}`);
memoryStatsContainer.appendChild(createListStatItem('Personality Traits', pItems));
const interests = memory.top_interests || [];
const iItems = interests.map(([t, l]) => `${t}: ${l.toFixed(2)}`);
memoryStatsContainer.appendChild(createListStatItem('Top Interests', iItems));
}
// API Stats
const apiStats = stats.api_stats || {};
if (Object.keys(apiStats).length === 0) {
apiStatsContainer.appendChild(createStatItem('No API calls recorded yet.', ''));
} else {
for (const [model, data] of Object.entries(apiStats)) {
const value = `Success: ${data.success || 0}, Failure: ${data.failure || 0}, Retries: ${data.retries || 0}, Avg Time: ${data.average_time_ms || 0} ms, Count: ${data.count || 0}`;
apiStatsContainer.appendChild(createStatItem(model, value, true));
}
}
// Tool Stats
const toolStats = stats.tool_stats || {};
if (Object.keys(toolStats).length === 0) {
toolStatsContainer.appendChild(createStatItem('No tool calls recorded yet.', ''));
} else {
for (const [tool, data] of Object.entries(toolStats)) {
const value = `Success: ${data.success || 0}, Failure: ${data.failure || 0}, Avg Time: ${data.average_time_ms || 0} ms, Count: ${data.count || 0}`;
toolStatsContainer.appendChild(createStatItem(tool, value, true));
}
}
// Config Stats
const config = stats.config || {};
configStatsContainer.appendChild(createStatItem('Default Model', config.default_model || 'N/A', true));
configStatsContainer.appendChild(createStatItem('Fallback Model', config.fallback_model || 'N/A', true));
configStatsContainer.appendChild(createStatItem('Semantic Model', config.semantic_model_name || 'N/A', true));
configStatsContainer.appendChild(createStatItem('Max User Facts', config.max_user_facts || 'N/A'));
configStatsContainer.appendChild(createStatItem('Max General Facts', config.max_general_facts || 'N/A'));
configStatsContainer.appendChild(createStatItem('Context Window', config.context_window_size || 'N/A'));
configStatsContainer.appendChild(createStatItem('API Key Set', config.api_key_set ? 'Yes' : 'No'));
configStatsContainer.appendChild(createStatItem('Tavily Key Set', config.tavily_api_key_set ? 'Yes' : 'No'));
configStatsContainer.appendChild(createStatItem('Piston URL Set', config.piston_api_url_set ? 'Yes' : 'No'));
}
async function fetchStats() {
try {
const response = await fetch(API_ENDPOINT);
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const stats = await response.json();
renderStats(stats);
lastUpdatedElement.textContent = new Date().toLocaleTimeString();
} catch (error) {
console.error('Error fetching stats:', error);
lastUpdatedElement.textContent = `Error fetching stats at ${new Date().toLocaleTimeString()}`;
// Optionally display an error message in the UI
runtimeStatsContainer.innerHTML = '<h2>Runtime</h2><p class="error">Could not load stats.</p>';
memoryStatsContainer.innerHTML = '<h2>Memory</h2>';
apiStatsContainer.innerHTML = '<h2>API Stats</h2>';
toolStatsContainer.innerHTML = '<h2>Tool Stats</h2>';
configStatsContainer.innerHTML = '<h2>Config Overview</h2>';
}
}
// Initial fetch and set interval
fetchStats();
setInterval(fetchStats, REFRESH_INTERVAL);

79
gurt_dashboard/style.css Normal file
View File

@ -0,0 +1,79 @@
body {
font-family: sans-serif;
line-height: 1.6;
margin: 20px;
background-color: #f4f4f4;
color: #333;
}
h1, h2 {
color: #333;
border-bottom: 1px solid #ccc;
padding-bottom: 5px;
}
#stats-container {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
gap: 20px;
}
.stats-section {
background-color: #fff;
padding: 15px;
border-radius: 5px;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
}
.stats-section h2 {
margin-top: 0;
font-size: 1.2em;
}
.stat-item {
margin-bottom: 10px;
padding-bottom: 10px;
border-bottom: 1px dotted #eee;
}
.stat-item:last-child {
border-bottom: none;
margin-bottom: 0;
padding-bottom: 0;
}
.stat-label {
font-weight: bold;
display: block;
margin-bottom: 3px;
}
.stat-value {
font-family: monospace;
word-wrap: break-word;
}
.stat-value code {
background-color: #eee;
padding: 2px 4px;
border-radius: 3px;
}
.stat-list {
list-style: none;
padding-left: 0;
}
.stat-list li {
margin-bottom: 5px;
}
.error {
color: red;
font-weight: bold;
}
#last-updated {
font-style: italic;
color: #555;
}

16
main.py
View File

@ -11,10 +11,11 @@ from commands import load_all_cogs, reload_all_cogs
from error_handler import handle_error, patch_discord_methods, store_interaction_content
from utils import reload_script
# Import the unified API service runner
# Import the unified API service runner and the sync API module
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from discordbot.run_unified_api import start_api_in_thread
import discord_bot_sync_api # Import the module to set the cog instance
# Check if API dependencies are available
try:
@ -234,6 +235,19 @@ async def main():
async with bot:
# Load all cogs from the 'cogs' directory
await load_all_cogs(bot)
# --- Share GurtCog instance with the sync API ---
try:
gurt_cog = bot.get_cog("Gurt") # Get the loaded GurtCog instance
if gurt_cog:
discord_bot_sync_api.gurt_cog_instance = gurt_cog
print("Successfully shared GurtCog instance with discord_bot_sync_api.")
else:
print("Warning: GurtCog not found after loading cogs. Stats API might not work.")
except Exception as e:
print(f"Error sharing GurtCog instance: {e}")
# ------------------------------------------------
# Start the bot using start() for async context
await bot.start(TOKEN)
finally: