This commit is contained in:
Slipstream 2025-04-29 11:41:21 -06:00
parent 4141473acd
commit 4c9c0f25cc
Signed by: slipstream
GPG Key ID: 13E498CE010AC6FD
2 changed files with 10 additions and 10 deletions

View File

@ -658,7 +658,7 @@ async def get_ai_response(cog: 'GurtCog', message: discord.Message, model_name:
# --- Config for checking tool calls within the loop (using model_with_tools) ---
# No schema enforcement needed here, just checking for function calls
generation_config_tool_check = GenerationConfig(
temperature=0.75, # Or desired temp for tool reasoning
temperature=0.5, # Or desired temp for tool reasoning
max_output_tokens=10000 # Allow ample tokens for reasoning + function call
)
# Force *any* tool use if the model deems it necessary
@ -802,7 +802,7 @@ async def get_ai_response(cog: 'GurtCog', message: discord.Message, model_name:
# Use the model_without_tools instance
processed_response_schema = _preprocess_schema_for_vertex(RESPONSE_SCHEMA['schema'])
generation_config_final_json = GenerationConfig(
temperature=0.75, # Or desired final temp
temperature=0.6, # Or desired final temp
max_output_tokens=10000, # Or desired final max tokens
response_mime_type="application/json",
response_schema=processed_response_schema
@ -985,8 +985,8 @@ async def get_proactive_ai_response(cog: 'GurtCog', message: discord.Message, tr
# Preprocess the schema before passing it to GenerationConfig
processed_response_schema_proactive = _preprocess_schema_for_vertex(RESPONSE_SCHEMA['schema'])
generation_config_final = GenerationConfig(
temperature=0.8, # Use original proactive temp
max_output_tokens=200,
temperature=0.6, # Use original proactive temp
max_output_tokens=2000,
response_mime_type="application/json",
response_schema=processed_response_schema_proactive # Use preprocessed schema
)

View File

@ -647,11 +647,11 @@ async def _check_command_safety(cog: commands.Cog, command: str) -> Dict[str, An
cog=cog,
prompt_messages=prompt_messages,
task_description="Command Safety Check",
response_schema_dict=safety_schema, # Pass the schema dict directly
model_name=SAFETY_CHECK_MODEL,
temperature=0.1,
max_tokens=150
)
response_schema_dict=safety_schema, # Pass the schema dict directly
model_name=SAFETY_CHECK_MODEL,
temperature=0.1,
max_tokens=1000 # Increased token limit
)
# --- Log the raw response text ---
print(f"--- Raw AI Safety Check Response Text ---\n{safety_response_raw}\n---------------------------------------")
@ -1055,7 +1055,7 @@ async def create_new_tool(cog: commands.Cog, tool_name: str, description: str, p
response_schema_dict=generation_schema,
model_name=cog.default_model, # Use default model for generation
temperature=0.3, # Lower temperature for more predictable code
max_tokens=1500 # Allow ample space for code generation
max_tokens=5000 # Allow ample space for code generation
)
# Unpack the tuple, we only need the parsed data here
generated_parsed_data, _ = generated_data if generated_data else (None, None)