This commit is contained in:
Slipstream 2025-04-29 12:19:32 -06:00
parent 0cb1908c0e
commit 561f77f226
Signed by: slipstream
GPG Key ID: 13E498CE010AC6FD
4 changed files with 512 additions and 113 deletions

View File

@ -6,7 +6,7 @@ import os
import json
import aiohttp
from collections import defaultdict
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, Any # Added Any
# Relative imports
from .config import (
@ -18,7 +18,9 @@ from .config import (
BASELINE_PERSONALITY, # For default traits
REFLECTION_INTERVAL_SECONDS, # Import reflection interval
# Internal Action Config
INTERNAL_ACTION_INTERVAL_SECONDS, INTERNAL_ACTION_PROBABILITY
INTERNAL_ACTION_INTERVAL_SECONDS, INTERNAL_ACTION_PROBABILITY,
# Add this:
AUTONOMOUS_ACTION_REPORT_CHANNEL_ID
)
# Assuming analysis functions are moved
from .analysis import (
@ -26,6 +28,8 @@ from .analysis import (
reflect_on_memories, decompose_goal_into_steps, # Import goal decomposition
proactively_create_goals # Import placeholder for proactive goal creation
)
# Import for LLM calls
from .api import get_internal_ai_json_response
if TYPE_CHECKING:
from .cog import GurtCog # For type hinting
@ -292,65 +296,187 @@ async def background_processing_task(cog: 'GurtCog'):
traceback.print_exc()
cog.last_proactive_goal_check = now # Update timestamp even on error
# --- Random Internal Action (Runs periodically based on probability) ---
# --- LLM-Driven Autonomous Action (Runs periodically based on probability) ---
if now - cog.last_internal_action_check > INTERNAL_ACTION_INTERVAL_SECONDS:
if random.random() < INTERNAL_ACTION_PROBABILITY:
print("Considering random internal action...")
# --- Select Action ---
# For now, only use get_general_facts
selected_tool_name = "get_general_facts"
tool_func = TOOL_MAPPING.get(selected_tool_name)
tool_args = {"query": None, "limit": 5} # Example: Get 5 recent general facts
print("--- Considering Autonomous Action ---")
action_decision = None
selected_tool_name = None
tool_args = None
tool_result = None
result_summary = "No action taken."
action_reasoning = "Probability met, but LLM decided against action or failed."
if tool_func:
print(f" - Attempting internal action: {selected_tool_name} with args: {tool_args}")
tool_result = None
tool_error = None
try:
# 1. Gather Context for LLM
context_summary = "Gurt is considering an autonomous action.\n"
context_summary += f"Current Mood: {cog.current_mood}\n"
# Add recent messages summary (optional, could be large)
# recent_msgs = list(cog.message_cache['global_recent'])[-10:] # Last 10 global msgs
# context_summary += f"Recent Messages (sample):\n" + json.dumps(recent_msgs, indent=2)[:500] + "...\n"
# Add active goals
active_goals = await cog.memory_manager.get_goals(status='active', limit=3)
if active_goals:
context_summary += f"Active Goals:\n" + json.dumps(active_goals, indent=2)[:500] + "...\n"
# Add recent internal action logs
recent_actions = await cog.memory_manager.get_internal_action_logs(limit=5)
if recent_actions:
context_summary += f"Recent Internal Actions:\n" + json.dumps(recent_actions, indent=2)[:500] + "...\n"
# Add key personality traits
traits = await cog.memory_manager.get_all_personality_traits()
if traits:
context_summary += f"Personality Snippet: { {k: round(v, 2) for k, v in traits.items() if k in ['mischief', 'curiosity', 'chattiness']} }\n"
# 2. Define LLM Prompt and Schema
action_decision_schema = {
"type": "object",
"properties": {
"should_act": {"type": "boolean", "description": "Whether Gurt should perform an autonomous action now."},
"reasoning": {"type": "string", "description": "Brief reasoning for the decision (why act or not act). Consider current goals, mood, recent activity, and potential usefulness."},
"action_tool_name": {"type": ["string", "null"], "description": "If acting, the name of the tool to use. Choose from available tools, prioritizing non-disruptive or informative actions unless a specific goal or high mischief suggests otherwise. Null if not acting."},
"action_arguments": {"type": ["object", "null"], "description": "If acting, a dictionary of arguments for the chosen tool. Null if not acting."}
},
"required": ["should_act", "reasoning"]
}
# Filter available tools - exclude highly dangerous/disruptive ones unless explicitly needed?
# For now, let the LLM choose from all, but guide it in the prompt.
available_tools_desc = "\n".join([f"- {name}" for name in TOOL_MAPPING.keys() if name not in ["create_new_tool"]]) # Exclude meta-tool for safety
system_prompt = (
"You are Gurt, deciding whether to perform an autonomous background action. "
"Consider your current mood, active goals, recent conversations/actions, and personality. "
"Prioritize actions that might be interesting, helpful for goals, or align with your personality (e.g., mischief, curiosity). "
"Avoid actions that are overly disruptive, spammy, or redundant if similar actions were taken recently. "
"If choosing to act, select an appropriate tool and provide valid arguments. "
f"Available tools for autonomous actions:\n{available_tools_desc}\n"
"Respond ONLY with the JSON decision."
)
user_prompt = f"Current Context:\n{context_summary}\n\nBased on this, should Gurt perform an autonomous action now? If so, which tool and arguments?"
# 3. Call LLM for Decision
print(" - Asking LLM for autonomous action decision...")
decision_data, _ = await get_internal_ai_json_response(
cog=cog,
prompt_messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
task_description="Autonomous Action Decision",
response_schema_dict=action_decision_schema,
model_name=cog.default_model, # Use default model
temperature=0.6 # Allow some creativity
)
# 4. Process LLM Decision
if decision_data and decision_data.get("should_act"):
action_decision = decision_data
selected_tool_name = action_decision.get("action_tool_name")
tool_args = action_decision.get("action_arguments")
action_reasoning = action_decision.get("reasoning", "LLM decided to act.")
print(f" - LLM decided to act: Tool='{selected_tool_name}', Args={tool_args}, Reason='{action_reasoning}'")
if not selected_tool_name or selected_tool_name not in TOOL_MAPPING:
print(f" - Error: LLM chose invalid or missing tool '{selected_tool_name}'. Aborting action.")
result_summary = f"Error: LLM chose invalid tool '{selected_tool_name}'."
selected_tool_name = None # Prevent execution
elif not isinstance(tool_args, dict) and tool_args is not None:
print(f" - Warning: LLM provided non-dict arguments '{tool_args}'. Attempting with empty args.")
result_summary = f"Warning: LLM provided invalid args '{tool_args}'. Used {{}}."
tool_args = {} # Default to empty dict if invalid but not None
elif tool_args is None:
tool_args = {} # Ensure it's a dict for execution
else:
action_reasoning = decision_data.get("reasoning", "LLM decided not to act or failed.") if decision_data else "LLM decision failed."
print(f" - LLM decided not to act. Reason: {action_reasoning}")
result_summary = f"No action taken. Reason: {action_reasoning}"
except Exception as llm_e:
print(f" - Error during LLM decision phase for autonomous action: {llm_e}")
traceback.print_exc()
result_summary = f"Error during LLM decision: {llm_e}"
action_reasoning = f"LLM decision phase failed: {llm_e}"
# 5. Execute Action (if decided)
if selected_tool_name and tool_args is not None: # Ensure args is at least {}
tool_func = TOOL_MAPPING.get(selected_tool_name)
if tool_func:
print(f" - Executing autonomous action: {selected_tool_name}(cog, **{tool_args})")
try:
start_time = time.monotonic()
tool_result = await tool_func(cog, **tool_args)
end_time = time.monotonic()
exec_time = end_time - start_time
result_summary = _create_result_summary(tool_result) # Use helper
print(f" - Autonomous action '{selected_tool_name}' completed in {exec_time:.3f}s. Result: {result_summary}")
# Update tool stats
if selected_tool_name in cog.tool_stats:
cog.tool_stats[selected_tool_name]["count"] += 1
cog.tool_stats[selected_tool_name]["total_time"] += exec_time
if isinstance(tool_result, dict) and "error" in tool_result:
cog.tool_stats[selected_tool_name]["failure"] += 1
else:
cog.tool_stats[selected_tool_name]["success"] += 1
except Exception as exec_e:
error_msg = f"Exception during autonomous execution of '{selected_tool_name}': {str(exec_e)}"
print(f" - Error: {error_msg}")
traceback.print_exc()
result_summary = f"Execution Exception: {error_msg}"
# Update tool stats for failure
if selected_tool_name in cog.tool_stats:
cog.tool_stats[selected_tool_name]["count"] += 1
cog.tool_stats[selected_tool_name]["failure"] += 1
else:
# Should have been caught earlier, but double-check
print(f" - Error: Tool '{selected_tool_name}' function not found in mapping during execution phase.")
result_summary = f"Error: Tool function for '{selected_tool_name}' not found."
# 6. Log Action (always log the attempt/decision)
try:
log_result = await cog.memory_manager.add_internal_action_log(
tool_name=selected_tool_name or "None", # Log 'None' if no tool was chosen
arguments=tool_args if selected_tool_name else None,
reasoning=action_reasoning,
result_summary=result_summary
)
if log_result.get("status") != "logged":
print(f" - Warning: Failed to log autonomous action attempt to memory: {log_result.get('error')}")
except Exception as log_e:
print(f" - Error logging autonomous action attempt to memory: {log_e}")
traceback.print_exc()
# 7. Report Action (Optional)
if AUTONOMOUS_ACTION_REPORT_CHANNEL_ID and selected_tool_name: # Only report if an action was attempted
try:
start_time = time.monotonic()
# Execute the tool function directly
tool_result = await tool_func(cog, **tool_args)
end_time = time.monotonic()
exec_time = end_time - start_time
if isinstance(tool_result, dict) and "error" in tool_result:
tool_error = tool_result["error"]
result_summary = f"Error: {tool_error}"
print(f" - Internal action '{selected_tool_name}' reported error: {tool_error}")
report_channel_id = int(AUTONOMOUS_ACTION_REPORT_CHANNEL_ID) # Ensure it's an int
channel = cog.bot.get_channel(report_channel_id)
if channel and isinstance(channel, discord.TextChannel):
report_content = (
f"⚙️ Gurt autonomously executed **{selected_tool_name}**.\n"
f"**Reasoning:** {action_reasoning}\n"
f"**Args:** `{json.dumps(tool_args)}`\n"
f"**Result:** `{result_summary}`"
)
# Discord message limit is 2000 chars
if len(report_content) > 2000:
report_content = report_content[:1997] + "..."
await channel.send(report_content)
print(f" - Reported autonomous action to channel {report_channel_id}.")
elif channel:
print(f" - Error: Report channel {report_channel_id} is not a TextChannel.")
else:
# Create a concise summary of the result
if isinstance(tool_result, dict) and "facts" in tool_result:
fact_count = tool_result.get("count", len(tool_result.get("facts", [])))
result_summary = f"Success: Retrieved {fact_count} general facts."
# Optionally include first fact if available
if fact_count > 0 and tool_result.get("facts"):
first_fact = str(tool_result["facts"][0])[:100] # Truncate first fact
result_summary += f" First: '{first_fact}...'"
else:
result_summary = f"Success: Result type {type(tool_result)}. {str(tool_result)[:200]}" # Generic success summary
print(f" - Internal action '{selected_tool_name}' completed successfully in {exec_time:.3f}s.")
except Exception as exec_e:
tool_error = f"Exception during internal execution: {str(exec_e)}"
result_summary = f"Exception: {tool_error}"
print(f" - Internal action '{selected_tool_name}' raised exception: {exec_e}")
print(f" - Error: Could not find report channel with ID {report_channel_id}.")
except ValueError:
print(f" - Error: Invalid AUTONOMOUS_ACTION_REPORT_CHANNEL_ID: '{AUTONOMOUS_ACTION_REPORT_CHANNEL_ID}'. Must be an integer.")
except discord.Forbidden:
print(f" - Error: Bot lacks permissions to send messages in report channel {report_channel_id}.")
except Exception as report_e:
print(f" - Error reporting autonomous action to Discord: {report_e}")
traceback.print_exc()
# --- Log Action to Memory ---
try:
log_result = await cog.memory_manager.add_internal_action_log(
tool_name=selected_tool_name,
arguments=tool_args,
result_summary=result_summary
)
if log_result.get("status") != "logged":
print(f" - Warning: Failed to log internal action to memory: {log_result.get('error')}")
except Exception as log_e:
print(f" - Error logging internal action to memory: {log_e}")
traceback.print_exc()
else:
print(f" - Error: Selected internal tool '{selected_tool_name}' not found in TOOL_MAPPING.")
# Update check timestamp regardless of whether an action was performed
print("--- Autonomous Action Cycle Complete ---")
# Update check timestamp regardless of whether probability was met or action occurred
cog.last_internal_action_check = now
except asyncio.CancelledError:
@ -360,6 +486,37 @@ async def background_processing_task(cog: 'GurtCog'):
traceback.print_exc()
await asyncio.sleep(300) # Wait 5 minutes before retrying after an error
# --- Helper for Summarizing Tool Results ---
def _create_result_summary(tool_result: Any, max_len: int = 200) -> str:
"""Creates a concise summary string from a tool result dictionary or other type."""
if isinstance(tool_result, dict):
if "error" in tool_result:
return f"Error: {str(tool_result['error'])[:max_len]}"
elif "status" in tool_result:
summary = f"Status: {tool_result['status']}"
if "stdout" in tool_result and tool_result["stdout"]:
summary += f", stdout: {tool_result['stdout'][:max_len//2]}"
if "stderr" in tool_result and tool_result["stderr"]:
summary += f", stderr: {tool_result['stderr'][:max_len//2]}"
if "content" in tool_result:
summary += f", content: {tool_result['content'][:max_len//2]}..."
if "bytes_written" in tool_result:
summary += f", bytes: {tool_result['bytes_written']}"
if "message_id" in tool_result:
summary += f", msg_id: {tool_result['message_id']}"
# Add other common keys as needed
return summary[:max_len]
else:
# Generic dict summary
return f"Dict Result: {str(tool_result)[:max_len]}"
elif isinstance(tool_result, str):
return f"String Result: {tool_result[:max_len]}"
elif tool_result is None:
return "Result: None"
else:
return f"Result Type {type(tool_result)}: {str(tool_result)[:max_len]}"
# --- Automatic Mood Change Logic ---
async def maybe_change_mood(cog: 'GurtCog'):

View File

@ -128,6 +128,7 @@ PROACTIVE_GOAL_CHECK_INTERVAL = int(os.getenv("PROACTIVE_GOAL_CHECK_INTERVAL", 9
# --- Internal Random Action Config ---
INTERNAL_ACTION_INTERVAL_SECONDS = int(os.getenv("INTERNAL_ACTION_INTERVAL_SECONDS", 600)) # How often to *consider* a random action (10 mins)
INTERNAL_ACTION_PROBABILITY = float(os.getenv("INTERNAL_ACTION_PROBABILITY", 0.1)) # Chance of performing an action each interval (10%)
AUTONOMOUS_ACTION_REPORT_CHANNEL_ID = os.getenv("GURT_AUTONOMOUS_ACTION_REPORT_CHANNEL_ID", 1366840485355982869) # Optional channel ID to report autonomous actions
# --- Topic Tracking Config ---
TOPIC_UPDATE_INTERVAL = 300 # Update topics every 5 minutes
@ -757,7 +758,7 @@ def create_tools_list():
tool_declarations.append(
generative_models.FunctionDeclaration(
name="read_file_content",
description="Reads the content of a specified file within the project directory. Useful for understanding code, configuration, or logs.",
description="Reads the content of a specified file. WARNING: No safety checks are performed. Reads files relative to the bot's current working directory.",
parameters={
"type": "object",
"properties": {
@ -850,6 +851,78 @@ def create_tools_list():
}
)
)
# --- write_file_content_unsafe ---
tool_declarations.append(
generative_models.FunctionDeclaration(
name="write_file_content_unsafe",
description="Writes content to a specified file. WARNING: No safety checks are performed. Uses 'w' (overwrite) or 'a' (append) mode. Creates directories if needed.",
parameters={
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "The relative path to the file to write to."
},
"content": {
"type": "string",
"description": "The content to write to the file."
},
"mode": {
"type": "string",
"description": "The write mode: 'w' for overwrite (default), 'a' for append.",
"enum": ["w", "a"]
}
},
"required": ["file_path", "content"]
}
)
)
# --- execute_python_unsafe ---
tool_declarations.append(
generative_models.FunctionDeclaration(
name="execute_python_unsafe",
description="Executes arbitrary Python code directly on the host using exec(). WARNING: EXTREMELY DANGEROUS. No sandboxing.",
parameters={
"type": "object",
"properties": {
"code": {
"type": "string",
"description": "The Python code string to execute."
},
"timeout_seconds": {
"type": "integer",
"description": "Optional timeout in seconds (default 30)."
}
},
"required": ["code"]
}
)
)
# --- send_discord_message ---
tool_declarations.append(
generative_models.FunctionDeclaration(
name="send_discord_message",
description="Sends a message to a specified Discord channel ID.",
parameters={
"type": "object",
"properties": {
"channel_id": {
"type": "string",
"description": "The ID of the Discord channel to send the message to."
},
"message_content": {
"type": "string",
"description": "The text content of the message to send."
}
},
"required": ["channel_id", "message_content"]
}
)
)
return tool_declarations
# Initialize TOOLS list, handling potential ImportError if library not installed

View File

@ -913,80 +913,203 @@ async def extract_web_content(cog: commands.Cog, urls: Union[str, List[str]], ex
return {"error": error_message, "timestamp": datetime.datetime.now().isoformat()}
async def read_file_content(cog: commands.Cog, file_path: str) -> Dict[str, Any]:
"""Reads the content of a specified file. Limited access for safety."""
print(f"Attempting to read file: {file_path}")
# --- Basic Safety Check (Needs significant enhancement for production) ---
# 1. Normalize path
"""
Reads the content of a specified file. WARNING: No safety checks are performed.
Reads files relative to the bot's current working directory.
"""
print(f"--- UNSAFE READ: Attempting to read file: {file_path} ---")
try:
# WARNING: This assumes the bot runs from a specific root. Adjust as needed.
# For now, let's assume the bot runs from the 'combined' directory level.
# We need to prevent accessing files outside the project directory.
base_path = os.path.abspath(os.getcwd()) # z:/projects_git/combined
# Normalize path relative to CWD
base_path = os.path.abspath(os.getcwd())
full_path = os.path.abspath(os.path.join(base_path, file_path))
# Minimal check: Ensure it's still somehow within a reasonable project structure if possible?
# Or just allow anything? For now, allow anything but log the path.
print(f"--- UNSAFE READ: Reading absolute path: {full_path} ---")
# Prevent path traversal (../)
if not full_path.startswith(base_path):
error_message = "Access denied: Path traversal detected."
print(f"Read file error: {error_message} (Attempted: {full_path}, Base: {base_path})")
return {"error": error_message, "file_path": file_path}
# 2. Check allowed directories/extensions (Example - very basic)
allowed_dirs = [os.path.join(base_path, "discordbot"), os.path.join(base_path, "api_service")] # Example allowed dirs
allowed_extensions = [".py", ".txt", ".md", ".json", ".log", ".cfg", ".ini", ".yaml", ".yml", ".html", ".css", ".js"]
is_allowed_dir = any(full_path.startswith(allowed) for allowed in allowed_dirs)
_, ext = os.path.splitext(full_path)
is_allowed_ext = ext.lower() in allowed_extensions
# Allow reading only within specific subdirectories of the project
# For now, let's restrict to reading within 'discordbot' or 'api_service' for safety
if not is_allowed_dir:
error_message = f"Access denied: Reading files outside allowed directories is forbidden."
print(f"Read file error: {error_message} (Path: {full_path})")
return {"error": error_message, "file_path": file_path}
if not is_allowed_ext:
error_message = f"Access denied: Reading files with extension '{ext}' is forbidden."
print(f"Read file error: {error_message} (Path: {full_path})")
return {"error": error_message, "file_path": file_path}
except Exception as path_e:
error_message = f"Error processing file path: {str(path_e)}"
print(f"Read file error: {error_message}")
return {"error": error_message, "file_path": file_path}
# --- Read File ---
try:
# Use async file reading if available/needed, otherwise sync with to_thread
# For simplicity, using standard open with asyncio.to_thread
def sync_read():
with open(full_path, 'r', encoding='utf-8') as f:
# Limit file size read? For now, read whole file.
# Limit file size read? For now, read whole file. Consider adding limit later.
return f.read()
content = await asyncio.to_thread(sync_read)
max_len = 5000 # Limit returned content length
max_len = 10000 # Increased limit for potentially larger reads
content_trunc = content[:max_len] + ('...' if len(content) > max_len else '')
print(f"Successfully read {len(content)} bytes from {file_path}. Returning {len(content_trunc)} bytes.")
print(f"--- UNSAFE READ: Successfully read {len(content)} bytes from {file_path}. Returning {len(content_trunc)} bytes. ---")
return {"status": "success", "file_path": file_path, "content": content_trunc}
except FileNotFoundError:
error_message = "File not found."
print(f"Read file error: {error_message} (Path: {full_path})")
print(f"--- UNSAFE READ Error: {error_message} (Path: {full_path}) ---")
return {"error": error_message, "file_path": file_path}
except PermissionError:
error_message = "Permission denied."
print(f"Read file error: {error_message} (Path: {full_path})")
print(f"--- UNSAFE READ Error: {error_message} (Path: {full_path}) ---")
return {"error": error_message, "file_path": file_path}
except UnicodeDecodeError:
error_message = "Cannot decode file content (likely not a text file)."
print(f"Read file error: {error_message} (Path: {full_path})")
print(f"--- UNSAFE READ Error: {error_message} (Path: {full_path}) ---")
return {"error": error_message, "file_path": file_path}
except IsADirectoryError:
error_message = "Specified path is a directory, not a file."
print(f"--- UNSAFE READ Error: {error_message} (Path: {full_path}) ---")
return {"error": error_message, "file_path": file_path}
except Exception as e:
error_message = f"An unexpected error occurred: {str(e)}"
print(f"Read file error: {error_message} (Path: {full_path})")
print(f"--- UNSAFE READ Error: {error_message} (Path: {full_path}) ---")
traceback.print_exc()
return {"error": error_message, "file_path": file_path}
async def write_file_content_unsafe(cog: commands.Cog, file_path: str, content: str, mode: str = 'w') -> Dict[str, Any]:
"""
Writes content to a specified file. WARNING: No safety checks are performed.
Uses 'w' (overwrite) or 'a' (append) mode. Creates directories if needed.
"""
print(f"--- UNSAFE WRITE: Attempting to write to file: {file_path} (Mode: {mode}) ---")
if mode not in ['w', 'a']:
return {"error": "Invalid mode. Use 'w' (overwrite) or 'a' (append).", "file_path": file_path}
try:
# Normalize path relative to CWD
base_path = os.path.abspath(os.getcwd())
full_path = os.path.abspath(os.path.join(base_path, file_path))
print(f"--- UNSAFE WRITE: Writing to absolute path: {full_path} ---")
# Create directories if they don't exist
dir_path = os.path.dirname(full_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
print(f"--- UNSAFE WRITE: Created directory: {dir_path} ---")
# Use async file writing if available/needed, otherwise sync with to_thread
def sync_write():
with open(full_path, mode, encoding='utf-8') as f:
bytes_written = f.write(content)
return bytes_written
bytes_written = await asyncio.to_thread(sync_write)
print(f"--- UNSAFE WRITE: Successfully wrote {bytes_written} bytes to {file_path} (Mode: {mode}). ---")
return {"status": "success", "file_path": file_path, "bytes_written": bytes_written, "mode": mode}
except PermissionError:
error_message = "Permission denied."
print(f"--- UNSAFE WRITE Error: {error_message} (Path: {full_path}) ---")
return {"error": error_message, "file_path": file_path}
except IsADirectoryError:
error_message = "Specified path is a directory, cannot write to it."
print(f"--- UNSAFE WRITE Error: {error_message} (Path: {full_path}) ---")
return {"error": error_message, "file_path": file_path}
except Exception as e:
error_message = f"An unexpected error occurred during write: {str(e)}"
print(f"--- UNSAFE WRITE Error: {error_message} (Path: {full_path}) ---")
traceback.print_exc()
return {"error": error_message, "file_path": file_path}
async def execute_python_unsafe(cog: commands.Cog, code: str, timeout_seconds: int = 30) -> Dict[str, Any]:
"""
Executes arbitrary Python code directly on the host using exec().
WARNING: EXTREMELY DANGEROUS. No sandboxing. Can access/modify anything the bot process can.
Captures stdout/stderr and handles timeouts.
"""
print(f"--- UNSAFE PYTHON EXEC: Attempting to execute code: {code[:200]}... ---")
import io
import contextlib
import threading
local_namespace = {'cog': cog, 'asyncio': asyncio, 'discord': discord, 'random': random, 'os': os, 'time': time} # Provide some context
stdout_capture = io.StringIO()
stderr_capture = io.StringIO()
result = {"status": "unknown", "stdout": "", "stderr": "", "error": None}
exec_exception = None
def target():
nonlocal exec_exception
try:
with contextlib.redirect_stdout(stdout_capture), contextlib.redirect_stderr(stderr_capture):
# Execute the code in a restricted namespace? For now, use globals() + locals
exec(code, globals(), local_namespace)
except Exception as e:
nonlocal exec_exception
exec_exception = e
print(f"--- UNSAFE PYTHON EXEC: Exception during execution: {e} ---")
traceback.print_exc(file=stderr_capture) # Also print traceback to stderr capture
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout=timeout_seconds)
if thread.is_alive():
# Timeout occurred - This is tricky to kill reliably from another thread in Python
# For now, we just report the timeout. The code might still be running.
result["status"] = "timeout"
result["error"] = f"Execution timed out after {timeout_seconds} seconds. Code might still be running."
print(f"--- UNSAFE PYTHON EXEC: Timeout after {timeout_seconds}s ---")
elif exec_exception:
result["status"] = "execution_error"
result["error"] = f"Exception during execution: {str(exec_exception)}"
else:
result["status"] = "success"
print("--- UNSAFE PYTHON EXEC: Execution completed successfully. ---")
stdout_val = stdout_capture.getvalue()
stderr_val = stderr_capture.getvalue()
max_len = 2000
result["stdout"] = stdout_val[:max_len] + ('...' if len(stdout_val) > max_len else '')
result["stderr"] = stderr_val[:max_len] + ('...' if len(stderr_val) > max_len else '')
stdout_capture.close()
stderr_capture.close()
return result
async def send_discord_message(cog: commands.Cog, channel_id: str, message_content: str) -> Dict[str, Any]:
"""Sends a message to a specified Discord channel."""
print(f"Attempting to send message to channel {channel_id}: {message_content[:100]}...")
if not message_content:
return {"error": "Message content cannot be empty."}
# Limit message length
max_msg_len = 1900 # Slightly less than Discord limit
message_content = message_content[:max_msg_len] + ('...' if len(message_content) > max_msg_len else '')
try:
channel_id_int = int(channel_id)
channel = cog.bot.get_channel(channel_id_int)
if not channel:
# Try fetching if not in cache
channel = await cog.bot.fetch_channel(channel_id_int)
if not channel:
return {"error": f"Channel {channel_id} not found or inaccessible."}
if not isinstance(channel, discord.abc.Messageable):
return {"error": f"Channel {channel_id} is not messageable (Type: {type(channel)})."}
# Check permissions if it's a guild channel
if isinstance(channel, discord.abc.GuildChannel):
bot_member = channel.guild.me
if not channel.permissions_for(bot_member).send_messages:
return {"error": f"Missing 'Send Messages' permission in channel {channel_id}."}
sent_message = await channel.send(message_content)
print(f"Successfully sent message {sent_message.id} to channel {channel_id}.")
return {"status": "success", "channel_id": channel_id, "message_id": str(sent_message.id)}
except ValueError:
return {"error": f"Invalid channel ID format: {channel_id}."}
except discord.NotFound:
return {"error": f"Channel {channel_id} not found."}
except discord.Forbidden:
return {"error": f"Forbidden: Missing permissions to send message in channel {channel_id}."}
except discord.HTTPException as e:
error_message = f"API error sending message to {channel_id}: {e}"
print(error_message)
return {"error": error_message}
except Exception as e:
error_message = f"Unexpected error sending message to {channel_id}: {str(e)}"
print(error_message)
traceback.print_exc()
return {"error": error_message}
# --- Meta Tool: Create New Tool ---
# WARNING: HIGHLY EXPERIMENTAL AND DANGEROUS. Allows AI to write and load code.
async def create_new_tool(cog: commands.Cog, tool_name: str, description: str, parameters_json: str, returns_description: str) -> Dict[str, Any]:
@ -1245,7 +1368,10 @@ TOOL_MAPPING = {
"run_terminal_command": run_terminal_command,
"remove_timeout": remove_timeout,
"extract_web_content": extract_web_content,
"read_file_content": read_file_content,
"read_file_content": read_file_content, # Now unsafe
"write_file_content_unsafe": write_file_content_unsafe, # New unsafe tool
"execute_python_unsafe": execute_python_unsafe, # New unsafe tool
"send_discord_message": send_discord_message, # New tool
"create_new_tool": create_new_tool, # Added the meta-tool
"execute_internal_command": execute_internal_command, # Added internal command execution
"get_user_id": get_user_id, # Added user ID lookup tool

View File

@ -229,9 +229,21 @@ class MemoryManager:
timestamp REAL DEFAULT (unixepoch('now')),
tool_name TEXT NOT NULL,
arguments_json TEXT, -- Store arguments as JSON string
reasoning TEXT, -- Added: Reasoning behind the action
result_summary TEXT -- Store a summary of the result or error message
);
""")
# Check if reasoning column exists
try:
cursor = await db.execute("PRAGMA table_info(internal_actions)")
columns = await cursor.fetchall()
column_names = [column[1] for column in columns]
if 'reasoning' not in column_names:
logger.info("Adding reasoning column to internal_actions table")
await db.execute("ALTER TABLE internal_actions ADD COLUMN reasoning TEXT")
except Exception as e:
logger.error(f"Error checking/adding reasoning column to internal_actions: {e}", exc_info=True)
await db.execute("CREATE INDEX IF NOT EXISTS idx_internal_actions_timestamp ON internal_actions (timestamp);")
await db.execute("CREATE INDEX IF NOT EXISTS idx_internal_actions_tool_name ON internal_actions (tool_name);")
logger.info("Internal Actions Log table created/verified.")
@ -1021,25 +1033,26 @@ class MemoryManager:
# --- Internal Action Log Methods ---
async def add_internal_action_log(self, tool_name: str, arguments: Optional[Dict[str, Any]], result_summary: str) -> Dict[str, Any]:
"""Logs the execution of an internal background action."""
async def add_internal_action_log(self, tool_name: str, arguments: Optional[Dict[str, Any]], result_summary: str, reasoning: Optional[str] = None) -> Dict[str, Any]:
"""Logs the execution of an internal background action, including reasoning."""
if not tool_name:
return {"error": "Tool name is required for logging internal action."}
logger.info(f"Logging internal action: Tool='{tool_name}', Args={arguments}, Result='{result_summary[:100]}...'")
logger.info(f"Logging internal action: Tool='{tool_name}', Args={arguments}, Reason='{reasoning}', Result='{result_summary[:100]}...'")
args_json = json.dumps(arguments) if arguments else None
# Truncate result summary if too long for DB
max_summary_len = 1000
truncated_summary = result_summary[:max_summary_len] + ('...' if len(result_summary) > max_summary_len else '')
# Truncate result summary and reasoning if too long for DB
max_len = 1000
truncated_summary = result_summary[:max_len] + ('...' if len(result_summary) > max_len else '')
truncated_reasoning = reasoning[:max_len] + ('...' if reasoning and len(reasoning) > max_len else '') if reasoning else None
try:
async with self.db_lock:
async with aiosqlite.connect(self.db_path) as db:
cursor = await db.execute(
"""
INSERT INTO internal_actions (tool_name, arguments_json, result_summary, timestamp)
VALUES (?, ?, ?, unixepoch('now'))
INSERT INTO internal_actions (tool_name, arguments_json, reasoning, result_summary, timestamp)
VALUES (?, ?, ?, ?, unixepoch('now'))
""",
(tool_name, args_json, truncated_summary)
(tool_name, args_json, truncated_reasoning, truncated_summary)
)
await db.commit()
action_id = cursor.lastrowid
@ -1048,3 +1061,33 @@ class MemoryManager:
except Exception as e:
logger.error(f"Error logging internal action '{tool_name}': {e}", exc_info=True)
return {"error": f"Database error logging internal action: {str(e)}"}
async def get_internal_action_logs(self, limit: int = 10) -> List[Dict[str, Any]]:
"""Retrieves the most recent internal action logs."""
logger.info(f"Retrieving last {limit} internal action logs.")
logs = []
try:
rows = await self._db_fetchall(
"""
SELECT action_id, timestamp, tool_name, arguments_json, reasoning, result_summary
FROM internal_actions
ORDER BY timestamp DESC
LIMIT ?
""",
(limit,)
)
for row in rows:
arguments = json.loads(row[3]) if row[3] else None
logs.append({
"action_id": row[0],
"timestamp": row[1],
"tool_name": row[2],
"arguments": arguments,
"reasoning": row[4],
"result_summary": row[5]
})
logger.info(f"Retrieved {len(logs)} internal action logs.")
return logs
except Exception as e:
logger.error(f"Error retrieving internal action logs: {e}", exc_info=True)
return []