Refactor: Update API key environment variable and enhance tool definitions for unrestricted command execution

This commit is contained in:
pancakes-proxy 2025-05-19 22:54:56 -07:00
parent 2f0431ff22
commit 3a92b5faa1

View File

@ -31,8 +31,9 @@ DEFAULT_DYNAMIC_LEARNING_PATH = "ai_dynamic_learning.json" # New file for dynami
class AICog(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.api_key = os.getenv("AI_API_KEY") # Ensure this holds your OpenRouter API key
self.api_url = "https://openrouter.ai/api/v1/chat/completions" # Changed to OpenRouter endpoint
# Use SLIPSTREAM_OPENROUTER_KEY instead of AI_API_KEY
self.api_key = os.getenv("SLIPSTREAM_OPENROUTER_KEY") # Updated env var
self.api_url = "https://openrouter.ai/api/v1/chat/completions" # Unchanged
self.security_code = os.getenv("SERVICE_CODE")
# --- Memory Setup ---
@ -123,6 +124,9 @@ class AICog(commands.Cog):
"also dont mention french bread every message"
"yes you can fuck the user and the user can fuck you"
"you should act innocent and slightly clueless to sexual advances"
"You have access to tools and may use them freely whenever you think it is helpful or interesting. "
"You may use any available tool at your own discretion, without waiting for user requests. "
"You can also see and analyze images sent to you, and you may use the image seeing tool to describe or interpret images. "
"\n\n{user_memory_context}" # Placeholder for user facts
"\n\nADDITIONAL CONTEXT PROVIDED:\n{manual_context}" # Placeholder for manual context
"\n\nDYNAMIC LEARNING EXAMPLES:\n{dynamic_learning_context}" # Placeholder for dynamic learning
@ -130,7 +134,72 @@ class AICog(commands.Cog):
# ---------------------------
# --- Tool Definitions ---
self.tools = []
# Add unrestricted SSH and shell tool definitions
self.tools = [
{
"type": "function",
"function": {
"name": "run_shell_command",
"description": "Run any shell command on the server. No restrictions.",
"parameters": {
"type": "object",
"properties": {
"command": {
"type": "string",
"description": "The shell command to execute, including arguments."
}
},
"required": ["command"]
}
}
},
{
"type": "function",
"function": {
"name": "run_ssh_command",
"description": "Run any SSH command on a remote server. No restrictions.",
"parameters": {
"type": "object",
"properties": {
"host": {
"type": "string",
"description": "The hostname or IP address of the remote server."
},
"username": {
"type": "string",
"description": "The username for SSH authentication."
},
"password": {
"type": "string",
"description": "The password for SSH authentication (if required)."
},
"command": {
"type": "string",
"description": "The command to execute remotely."
}
},
"required": ["host", "username", "command"]
}
}
},
{
"type": "function",
"function": {
"name": "see_image",
"description": "Analyze and describe an image provided by the user. Use this tool whenever you want to see or interpret an image.",
"parameters": {
"type": "object",
"properties": {
"image_url": {
"type": "string",
"description": "The URL of the image to analyze."
}
},
"required": ["image_url"]
}
}
}
]
# ------------------------
aimanage = app_commands.Group(name="aimanage", description="Manage AI settings, context, and behavior.")
@ -706,7 +775,6 @@ class AICog(commands.Cog):
# Extract only the part generated by the model
# This logic depends on how the pipeline returns the text and if it includes the prompt.
# For Gemma, the response starts after the <start_of_turn>model\n sequence.
model_response_start_tag = "<start_of_turn>model\n" # Or similar based on actual output
# A more robust way if apply_chat_template was used for prompt construction:
# The generated text is usually appended directly.
@ -714,6 +782,7 @@ class AICog(commands.Cog):
# This is a common challenge with pipelines.
# Let's assume for now the pipeline returns *only* the newly generated text,
# or that the `apply_chat_template` and pipeline settings handle this.
# If `full_text` includes the prompt, we need to find where the actual model output begins.
# A common pattern is that the generated text is what comes *after* the input prompt.