dhjjee
This commit is contained in:
parent
46138a8823
commit
b525a4caaa
@ -309,7 +309,7 @@ Review your current profile state (provided below) and decide if you want to mak
|
||||
prompt_messages=prompt_messages,
|
||||
task_description="Profile Update Decision",
|
||||
response_schema_dict=response_schema_dict, # Pass the schema dict
|
||||
model_name=DEFAULT_MODEL, # Use model from config
|
||||
model_name_override=DEFAULT_MODEL, # Use model from config
|
||||
temperature=0.5, # Keep temperature for some creativity
|
||||
max_tokens=500 # Adjust max tokens if needed
|
||||
)
|
||||
@ -506,7 +506,7 @@ Review your current profile state (provided below) and decide if you want to mak
|
||||
prompt_messages=role_prompt_messages,
|
||||
task_description=f"Role Selection for Guild {guild.id}",
|
||||
response_schema_dict=role_selection_schema_dict, # Pass the schema dict
|
||||
model_name=DEFAULT_MODEL, # Use model from config
|
||||
model_name_override=DEFAULT_MODEL, # Use model from config
|
||||
temperature=0.5 # More deterministic for role selection
|
||||
)
|
||||
|
||||
|
@ -497,7 +497,7 @@ async def reflect_on_memories(cog: 'GurtCog'):
|
||||
prompt_messages=reflection_prompt,
|
||||
task_description=f"User Fact Reflection ({user_name})",
|
||||
response_schema_dict=synthesis_schema,
|
||||
model_name=SYNTHESIS_MODEL,
|
||||
model_name_override=SYNTHESIS_MODEL,
|
||||
temperature=0.4,
|
||||
max_tokens=SYNTHESIS_MAX_TOKENS
|
||||
)
|
||||
@ -559,7 +559,7 @@ async def decompose_goal_into_steps(cog: 'GurtCog', goal_description: str) -> Op
|
||||
prompt_messages=decomposition_prompt_messages,
|
||||
task_description=f"Goal Decomposition ({goal_description[:30]}...)",
|
||||
response_schema_dict=GOAL_DECOMPOSITION_SCHEMA['schema'],
|
||||
model_name=cog.fallback_model, # Use fallback model for planning potentially
|
||||
model_name_override=cog.fallback_model, # Use fallback model for planning potentially
|
||||
temperature=0.3,
|
||||
max_tokens=1000 # Allow more tokens for potentially complex plans
|
||||
)
|
||||
|
@ -1092,7 +1092,7 @@ async def get_proactive_ai_response(cog: 'GurtCog', message: discord.Message, tr
|
||||
prompt_messages=planning_prompt_messages,
|
||||
task_description=f"Proactive Planning ({trigger_reason})",
|
||||
response_schema_dict=PROACTIVE_PLAN_SCHEMA['schema'],
|
||||
model_name=FALLBACK_MODEL, # Use a potentially faster/cheaper model for planning
|
||||
model_name_override=FALLBACK_MODEL, # Use a potentially faster/cheaper model for planning
|
||||
temperature=0.5,
|
||||
max_tokens=2000
|
||||
)
|
||||
|
@ -487,7 +487,7 @@ async def background_processing_task(cog: 'GurtCog'):
|
||||
prompt_messages=[{"role": "system", "content": follow_up_system_prompt}, {"role": "user", "content": follow_up_user_prompt}],
|
||||
task_description="Autonomous Follow-up Action Decision",
|
||||
response_schema_dict=follow_up_schema,
|
||||
model_name=cog.default_model,
|
||||
model_name_override=cog.default_model,
|
||||
temperature=0.5
|
||||
)
|
||||
|
||||
|
@ -292,7 +292,7 @@ async def get_conversation_summary(cog: commands.Cog, channel_id: str = None, me
|
||||
prompt_messages=prompt_messages,
|
||||
task_description=f"Summarization for channel {target_channel_id}",
|
||||
response_schema_dict=SUMMARY_RESPONSE_SCHEMA['schema'], # Pass the schema dict
|
||||
model_name=DEFAULT_MODEL, # Consider a cheaper/faster model if needed
|
||||
model_name_override=DEFAULT_MODEL, # Consider a cheaper/faster model if needed
|
||||
temperature=0.3,
|
||||
max_tokens=200 # Adjust as needed
|
||||
)
|
||||
@ -648,7 +648,7 @@ async def _check_command_safety(cog: commands.Cog, command: str) -> Dict[str, An
|
||||
prompt_messages=prompt_messages,
|
||||
task_description="Command Safety Check",
|
||||
response_schema_dict=safety_schema, # Pass the schema dict directly
|
||||
model_name=SAFETY_CHECK_MODEL,
|
||||
model_name_override=SAFETY_CHECK_MODEL,
|
||||
temperature=0.1,
|
||||
max_tokens=1000 # Increased token limit
|
||||
)
|
||||
@ -1176,7 +1176,7 @@ async def create_new_tool(cog: commands.Cog, tool_name: str, description: str, p
|
||||
prompt_messages=generation_prompt_messages,
|
||||
task_description=f"Generate code for new tool '{tool_name}'",
|
||||
response_schema_dict=generation_schema,
|
||||
model_name=cog.default_model, # Use default model for generation
|
||||
model_name_override=cog.default_model, # Use default model for generation
|
||||
temperature=0.3, # Lower temperature for more predictable code
|
||||
max_tokens=5000 # Allow ample space for code generation
|
||||
)
|
||||
|
@ -817,7 +817,7 @@ async def get_proactive_ai_response(cog: 'WheatleyCog', message: discord.Message
|
||||
prompt_messages=planning_prompt_messages,
|
||||
task_description=f"Proactive Planning ({trigger_reason})",
|
||||
response_schema_dict=PROACTIVE_PLAN_SCHEMA['schema'],
|
||||
model_name=FALLBACK_MODEL, # Use a potentially faster/cheaper model for planning
|
||||
model_name_override=FALLBACK_MODEL, # Use a potentially faster/cheaper model for planning
|
||||
temperature=0.5,
|
||||
max_tokens=300
|
||||
)
|
||||
|
Loading…
x
Reference in New Issue
Block a user