From b525a4caaafe571e67bd7bae95defe970c920123 Mon Sep 17 00:00:00 2001 From: Slipstream Date: Wed, 30 Apr 2025 08:11:48 -0600 Subject: [PATCH] dhjjee --- cogs/profile_updater_cog.py | 4 ++-- gurt/analysis.py | 4 ++-- gurt/api.py | 2 +- gurt/background.py | 2 +- gurt/tools.py | 6 +++--- wheatley/api.py | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cogs/profile_updater_cog.py b/cogs/profile_updater_cog.py index 277e1d4..b197d9f 100644 --- a/cogs/profile_updater_cog.py +++ b/cogs/profile_updater_cog.py @@ -309,7 +309,7 @@ Review your current profile state (provided below) and decide if you want to mak prompt_messages=prompt_messages, task_description="Profile Update Decision", response_schema_dict=response_schema_dict, # Pass the schema dict - model_name=DEFAULT_MODEL, # Use model from config + model_name_override=DEFAULT_MODEL, # Use model from config temperature=0.5, # Keep temperature for some creativity max_tokens=500 # Adjust max tokens if needed ) @@ -506,7 +506,7 @@ Review your current profile state (provided below) and decide if you want to mak prompt_messages=role_prompt_messages, task_description=f"Role Selection for Guild {guild.id}", response_schema_dict=role_selection_schema_dict, # Pass the schema dict - model_name=DEFAULT_MODEL, # Use model from config + model_name_override=DEFAULT_MODEL, # Use model from config temperature=0.5 # More deterministic for role selection ) diff --git a/gurt/analysis.py b/gurt/analysis.py index 2404059..54df69a 100644 --- a/gurt/analysis.py +++ b/gurt/analysis.py @@ -497,7 +497,7 @@ async def reflect_on_memories(cog: 'GurtCog'): prompt_messages=reflection_prompt, task_description=f"User Fact Reflection ({user_name})", response_schema_dict=synthesis_schema, - model_name=SYNTHESIS_MODEL, + model_name_override=SYNTHESIS_MODEL, temperature=0.4, max_tokens=SYNTHESIS_MAX_TOKENS ) @@ -559,7 +559,7 @@ async def decompose_goal_into_steps(cog: 'GurtCog', goal_description: str) -> Op prompt_messages=decomposition_prompt_messages, task_description=f"Goal Decomposition ({goal_description[:30]}...)", response_schema_dict=GOAL_DECOMPOSITION_SCHEMA['schema'], - model_name=cog.fallback_model, # Use fallback model for planning potentially + model_name_override=cog.fallback_model, # Use fallback model for planning potentially temperature=0.3, max_tokens=1000 # Allow more tokens for potentially complex plans ) diff --git a/gurt/api.py b/gurt/api.py index 864a304..fef66df 100644 --- a/gurt/api.py +++ b/gurt/api.py @@ -1092,7 +1092,7 @@ async def get_proactive_ai_response(cog: 'GurtCog', message: discord.Message, tr prompt_messages=planning_prompt_messages, task_description=f"Proactive Planning ({trigger_reason})", response_schema_dict=PROACTIVE_PLAN_SCHEMA['schema'], - model_name=FALLBACK_MODEL, # Use a potentially faster/cheaper model for planning + model_name_override=FALLBACK_MODEL, # Use a potentially faster/cheaper model for planning temperature=0.5, max_tokens=2000 ) diff --git a/gurt/background.py b/gurt/background.py index 4e7cd27..aebd13d 100644 --- a/gurt/background.py +++ b/gurt/background.py @@ -487,7 +487,7 @@ async def background_processing_task(cog: 'GurtCog'): prompt_messages=[{"role": "system", "content": follow_up_system_prompt}, {"role": "user", "content": follow_up_user_prompt}], task_description="Autonomous Follow-up Action Decision", response_schema_dict=follow_up_schema, - model_name=cog.default_model, + model_name_override=cog.default_model, temperature=0.5 ) diff --git a/gurt/tools.py b/gurt/tools.py index 68bb6b4..9e49d5b 100644 --- a/gurt/tools.py +++ b/gurt/tools.py @@ -292,7 +292,7 @@ async def get_conversation_summary(cog: commands.Cog, channel_id: str = None, me prompt_messages=prompt_messages, task_description=f"Summarization for channel {target_channel_id}", response_schema_dict=SUMMARY_RESPONSE_SCHEMA['schema'], # Pass the schema dict - model_name=DEFAULT_MODEL, # Consider a cheaper/faster model if needed + model_name_override=DEFAULT_MODEL, # Consider a cheaper/faster model if needed temperature=0.3, max_tokens=200 # Adjust as needed ) @@ -648,7 +648,7 @@ async def _check_command_safety(cog: commands.Cog, command: str) -> Dict[str, An prompt_messages=prompt_messages, task_description="Command Safety Check", response_schema_dict=safety_schema, # Pass the schema dict directly - model_name=SAFETY_CHECK_MODEL, + model_name_override=SAFETY_CHECK_MODEL, temperature=0.1, max_tokens=1000 # Increased token limit ) @@ -1176,7 +1176,7 @@ async def create_new_tool(cog: commands.Cog, tool_name: str, description: str, p prompt_messages=generation_prompt_messages, task_description=f"Generate code for new tool '{tool_name}'", response_schema_dict=generation_schema, - model_name=cog.default_model, # Use default model for generation + model_name_override=cog.default_model, # Use default model for generation temperature=0.3, # Lower temperature for more predictable code max_tokens=5000 # Allow ample space for code generation ) diff --git a/wheatley/api.py b/wheatley/api.py index 2a1669b..988ba95 100644 --- a/wheatley/api.py +++ b/wheatley/api.py @@ -817,7 +817,7 @@ async def get_proactive_ai_response(cog: 'WheatleyCog', message: discord.Message prompt_messages=planning_prompt_messages, task_description=f"Proactive Planning ({trigger_reason})", response_schema_dict=PROACTIVE_PLAN_SCHEMA['schema'], - model_name=FALLBACK_MODEL, # Use a potentially faster/cheaper model for planning + model_name_override=FALLBACK_MODEL, # Use a potentially faster/cheaper model for planning temperature=0.5, max_tokens=300 )