a
This commit is contained in:
parent
c3bc8121f9
commit
26d5c54d77
@ -4682,11 +4682,12 @@ Otherwise, STAY SILENT. Do not respond just to be present or because you *can*.
|
||||
async def _get_internal_ai_json_response(
|
||||
self,
|
||||
prompt_messages: List[Dict[str, Any]],
|
||||
json_schema: Dict[str, Any],
|
||||
task_description: str,
|
||||
model: Optional[str] = None,
|
||||
temperature: float = 0.7,
|
||||
max_tokens: int = 500
|
||||
max_tokens: int = 500,
|
||||
response_format: Optional[Dict[str, Any]] = None, # Added response_format parameter
|
||||
json_schema: Optional[Dict[str, Any]] = None # Keep for backward compatibility or simple cases? Or remove? Let's remove for clarity.
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Makes an AI call expecting a specific JSON response format for internal tasks,
|
||||
@ -4694,11 +4695,12 @@ Otherwise, STAY SILENT. Do not respond just to be present or because you *can*.
|
||||
|
||||
Args:
|
||||
prompt_messages: The list of messages forming the prompt.
|
||||
json_schema: The JSON schema the AI response should conform to.
|
||||
task_description: A description of the task for logging/headers.
|
||||
model: The specific model to use (defaults to GurtCog's default).
|
||||
temperature: The temperature for the AI call.
|
||||
max_tokens: The maximum tokens for the response.
|
||||
response_format: Optional dictionary defining the structured output format (e.g., for JSON schema).
|
||||
max_tokens: The maximum tokens for the response.
|
||||
|
||||
Returns:
|
||||
The parsed JSON dictionary if successful, None otherwise.
|
||||
@ -4712,25 +4714,35 @@ Otherwise, STAY SILENT. Do not respond just to be present or because you *can*.
|
||||
payload = {} # Initialize payload in outer scope for finally block
|
||||
|
||||
try:
|
||||
# Add final instruction for JSON format
|
||||
json_format_instruction = json.dumps(json_schema, indent=2)
|
||||
# Add final instruction for JSON format (remains important for the AI)
|
||||
# If response_format is provided and contains a schema, use that for the instruction.
|
||||
# Otherwise, fall back to a generic JSON instruction if needed, or omit if not expecting JSON.
|
||||
# For this function, we generally expect JSON, so let's keep a generic instruction if no schema provided.
|
||||
json_instruction_content = "**CRITICAL: Your response MUST consist *only* of the raw JSON object itself.**"
|
||||
if response_format and response_format.get("type") == "json_schema":
|
||||
schema_for_prompt = response_format.get("json_schema", {}).get("schema", {})
|
||||
if schema_for_prompt:
|
||||
json_format_instruction = json.dumps(schema_for_prompt, indent=2)
|
||||
json_instruction_content = f"**CRITICAL: Your response MUST consist *only* of the raw JSON object itself, matching this schema:**\n```json\n{json_format_instruction}\n```\n**Ensure nothing precedes or follows the JSON.**"
|
||||
|
||||
prompt_messages.append({
|
||||
"role": "user",
|
||||
"content": f"**CRITICAL: Your response MUST consist *only* of the raw JSON object itself, matching this schema:**\n```json\n{json_format_instruction}\n```\n**Ensure nothing precedes or follows the JSON.**"
|
||||
"content": json_instruction_content
|
||||
})
|
||||
|
||||
|
||||
payload = {
|
||||
"model": model or self.default_model,
|
||||
"messages": prompt_messages,
|
||||
"temperature": temperature,
|
||||
"max_tokens": max_tokens,
|
||||
"max_tokens": max_tokens
|
||||
# No tools needed for this specific internal call type usually
|
||||
# "response_format": { # Potentially use if model supports schema enforcement without tools
|
||||
# "type": "json_object", # Or "json_schema" if supported
|
||||
# # "json_schema": json_schema # If using json_schema type
|
||||
# }
|
||||
}
|
||||
|
||||
# Add response_format to payload if provided
|
||||
if response_format:
|
||||
payload["response_format"] = response_format
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
|
@ -216,20 +216,68 @@ Current State:
|
||||
if current_state.get('avatar_image_data'):
|
||||
image_prompt_part = "\n(Current avatar image data is provided below)" # Text hint for the AI
|
||||
|
||||
# Define the expected JSON structure for the AI's response
|
||||
# Define the JSON schema for the AI's response content
|
||||
response_schema_json = {
|
||||
"should_update": "boolean (true if you want to change anything, false otherwise)",
|
||||
"updates": {
|
||||
"avatar_query": "string | null (Search query for a new avatar, e.g., 'Kasane Teto fanart', or null)",
|
||||
"new_bio": "string | null (The new bio text, or null)",
|
||||
"role_theme": "string | null (A theme for role selection, e.g., 'cool color roles', 'anime fan roles', or null)",
|
||||
"new_activity": {
|
||||
"type": "string | null (Activity type: 'playing', 'watching', 'listening', 'competing')",
|
||||
"text": "string | null (The activity text)"
|
||||
} # Can be null if no activity change
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"should_update": {
|
||||
"type": "boolean",
|
||||
"description": "True if you want to change anything, false otherwise"
|
||||
},
|
||||
"reasoning": {
|
||||
"type": "string",
|
||||
"description": "Your reasoning for the decision and chosen updates (or lack thereof)."
|
||||
},
|
||||
"updates": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"avatar_query": {
|
||||
"type": ["string", "null"],
|
||||
"description": "Search query for a new avatar, e.g., 'Kasane Teto fanart', or null"
|
||||
},
|
||||
"new_bio": {
|
||||
"type": ["string", "null"],
|
||||
"description": "The new bio text, or null"
|
||||
},
|
||||
"role_theme": {
|
||||
"type": ["string", "null"],
|
||||
"description": "A theme for role selection, e.g., 'cool color roles', 'anime fan roles', or null"
|
||||
},
|
||||
"new_activity": {
|
||||
"type": ["object", "null"],
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": ["string", "null"],
|
||||
"enum": ["playing", "watching", "listening", "competing", None],
|
||||
"description": "Activity type: 'playing', 'watching', 'listening', 'competing', or null"
|
||||
},
|
||||
"text": {
|
||||
"type": ["string", "null"],
|
||||
"description": "The activity text, or null"
|
||||
}
|
||||
},
|
||||
# If new_activity is not null, both type and text should ideally be present,
|
||||
# but allow nulls within the object for flexibility if AI omits one.
|
||||
# The handling logic in _update_activity already checks for nulls.
|
||||
}
|
||||
},
|
||||
# No required fields within 'updates' itself, as any part can be null
|
||||
}
|
||||
},
|
||||
"required": ["should_update", "reasoning", "updates"],
|
||||
"additionalProperties": False # Enforce strictness at schema level too
|
||||
}
|
||||
json_format_instruction = json.dumps(response_schema_json, indent=2) # For the prompt
|
||||
|
||||
# Define the payload for the response_format parameter
|
||||
response_format_payload = {
|
||||
"type": "json_schema",
|
||||
"json_schema": {
|
||||
"name": "profile_update_decision",
|
||||
"strict": True, # Enforce strict adherence to the schema
|
||||
"schema": response_schema_json
|
||||
}
|
||||
}
|
||||
json_format_instruction = json.dumps(response_schema_json, indent=2)
|
||||
|
||||
# Construct the full prompt message list for the AI
|
||||
prompt_messages = [
|
||||
@ -253,20 +301,22 @@ Current State:
|
||||
try:
|
||||
# Need a way to call GurtCog's core AI logic directly
|
||||
# This might require refactoring GurtCog or adding a dedicated method
|
||||
# Call the internal AI method from GurtCog
|
||||
# Call the internal AI method from GurtCog, specifying the model and structured output format
|
||||
result_json = await self.gurt_cog._get_internal_ai_json_response(
|
||||
prompt_messages=prompt_messages,
|
||||
json_schema=response_schema_json, # Use the schema defined earlier
|
||||
model="openai/o4-mini-high", # Use the specified OpenAI model
|
||||
response_format=response_format_payload, # Enforce structured output
|
||||
task_description="Profile Update Decision",
|
||||
temperature=0.5 # Lowered temperature for better instruction following
|
||||
temperature=0.5 # Keep temperature for some creativity
|
||||
)
|
||||
|
||||
if result_json and isinstance(result_json, dict):
|
||||
# Basic validation of the received structure
|
||||
if "should_update" in result_json and "updates" in result_json:
|
||||
# Basic validation of the received structure (now includes reasoning)
|
||||
if "should_update" in result_json and "updates" in result_json and "reasoning" in result_json:
|
||||
print(f"ProfileUpdaterTask: AI Reasoning: {result_json.get('reasoning', 'N/A')}") # Log the reasoning
|
||||
return result_json
|
||||
else:
|
||||
print(f"ProfileUpdaterTask: AI response missing required keys. Response: {result_json}")
|
||||
print(f"ProfileUpdaterTask: AI response missing required keys (should_update, updates, reasoning). Response: {result_json}")
|
||||
return None
|
||||
else:
|
||||
print(f"ProfileUpdaterTask: AI response was not a dictionary. Response: {result_json}")
|
||||
|
Loading…
x
Reference in New Issue
Block a user