refactor: Rename ModerationCog to AIModerationCog for clarity and consistency

This commit is contained in:
Slipstream 2025-06-01 17:43:02 -06:00
parent c0896a691d
commit 5442175679
Signed by: slipstream
GPG Key ID: 13E498CE010AC6FD

View File

@ -179,7 +179,7 @@ Please reach out to one of these. We've also alerted our server's support team s
You matter, and help is available.
"""
class ModerationCog(commands.Cog):
class AIModerationCog(commands.Cog):
"""
A Discord Cog that uses Google Vertex AI to moderate messages based on server rules.
"""
@ -193,11 +193,11 @@ class ModerationCog(commands.Cog):
project=PROJECT_ID,
location=LOCATION,
)
print(f"ModerationCog: Google GenAI Client initialized for Vertex AI project '{PROJECT_ID}' in location '{LOCATION}'.")
print(f"AIModerationCog: Google GenAI Client initialized for Vertex AI project '{PROJECT_ID}' in location '{LOCATION}'.")
else:
print("ModerationCog: PROJECT_ID or LOCATION not found in config. Google GenAI Client not initialized.")
print("AIModerationCog: PROJECT_ID or LOCATION not found in config. Google GenAI Client not initialized.")
except Exception as e:
print(f"ModerationCog: Error initializing Google GenAI Client for Vertex AI: {e}")
print(f"AIModerationCog: Error initializing Google GenAI Client for Vertex AI: {e}")
self.last_ai_decisions = collections.deque(maxlen=5) # Store last 5 AI decisions
# Supported image file extensions
@ -206,20 +206,20 @@ class ModerationCog(commands.Cog):
self.gif_extensions = ['.gif']
# Supported video file extensions (Vertex AI typically processes first frame of videos as image)
self.video_extensions = ['.mp4', '.webm', '.mov', '.avi', '.mkv', '.flv'] # Expanded list
print("ModerationCog Initialized.")
print("AIModerationCog Initialized.")
async def cog_load(self):
"""Called when the cog is loaded."""
print("ModerationCog cog_load started.")
print("AIModerationCog cog_load started.")
if not self.genai_client:
print("\n" + "="*60)
print("=== WARNING: ModerationCog - Vertex AI Client not initialized! ===")
print("=== WARNING: AIModerationCog - Vertex AI Client not initialized! ===")
print("=== The Moderation Cog requires a valid Vertex AI setup. ===")
print(f"=== Check PROJECT_ID and LOCATION in gurt.config and GCP authentication. ===")
print("="*60 + "\n")
else:
print("ModerationCog: Vertex AI Client seems to be initialized.")
print("ModerationCog cog_load finished.")
print("AIModerationCog: Vertex AI Client seems to be initialized.")
print("AIModerationCog cog_load finished.")
# _load_openrouter_models is no longer needed.
@ -227,7 +227,7 @@ class ModerationCog(commands.Cog):
"""Clean up when the cog is unloaded."""
# The genai.Client doesn't have an explicit close method in the same way aiohttp.ClientSession does.
# It typically manages its own resources.
print("ModerationCog Unloaded.")
print("AIModerationCog Unloaded.")
async def process_image(self, attachment: discord.Attachment) -> tuple[str, bytes]:
"""
@ -584,42 +584,42 @@ class ModerationCog(commands.Cog):
(Adapted from teto_cog.py)
"""
if not response:
print("[ModerationCog._get_response_text] Received None response object.")
print("[AIModerationCog._get_response_text] Received None response object.")
return None
if hasattr(response, 'text') and response.text: # Some simpler responses might have .text directly
print("[ModerationCog._get_response_text] Found text directly in response.text attribute.")
print("[AIModerationCog._get_response_text] Found text directly in response.text attribute.")
return response.text
if not response.candidates:
print(f"[ModerationCog._get_response_text] Response object has no candidates. Response: {response}")
print(f"[AIModerationCog._get_response_text] Response object has no candidates. Response: {response}")
return None
try:
candidate = response.candidates[0]
if not hasattr(candidate, 'content') or not candidate.content:
print(f"[ModerationCog._get_response_text] Candidate 0 has no 'content'. Candidate: {candidate}")
print(f"[AIModerationCog._get_response_text] Candidate 0 has no 'content'. Candidate: {candidate}")
return None
if not hasattr(candidate.content, 'parts') or not candidate.content.parts:
print(f"[ModerationCog._get_response_text] Candidate 0 content has no 'parts' or parts list is empty. types.Content: {candidate.content}")
print(f"[AIModerationCog._get_response_text] Candidate 0 content has no 'parts' or parts list is empty. types.Content: {candidate.content}")
return None
for i, part in enumerate(candidate.content.parts):
if hasattr(part, 'text') and part.text is not None:
if isinstance(part.text, str) and part.text.strip():
print(f"[ModerationCog._get_response_text] Found non-empty text in part {i}.")
print(f"[AIModerationCog._get_response_text] Found non-empty text in part {i}.")
return part.text
else:
print(f"[ModerationCog._get_response_text] types.Part {i} has 'text' attribute, but it's empty or not a string: {part.text!r}")
print(f"[ModerationCog._get_response_text] No usable text part found in candidate 0 after iterating through all parts.")
print(f"[AIModerationCog._get_response_text] types.Part {i} has 'text' attribute, but it's empty or not a string: {part.text!r}")
print(f"[AIModerationCog._get_response_text] No usable text part found in candidate 0 after iterating through all parts.")
return None
except (AttributeError, IndexError, TypeError) as e:
print(f"[ModerationCog._get_response_text] Error accessing response structure: {type(e).__name__}: {e}")
print(f"[AIModerationCog._get_response_text] Error accessing response structure: {type(e).__name__}: {e}")
print(f"Problematic response object: {response}")
return None
except Exception as e:
print(f"[ModerationCog._get_response_text] Unexpected error extracting text: {e}")
print(f"[AIModerationCog._get_response_text] Unexpected error extracting text: {e}")
print(f"Response object during error: {response}")
return None
@ -1614,10 +1614,10 @@ CRITICAL: Do NOT output anything other than the required JSON response.
# Setup function required by discord.py to load the cog
async def setup(bot: commands.Bot):
"""Loads the ModerationCog."""
"""Loads the AIModerationCog."""
# The API key is now fetched in cog_load, so we don't need to check here.
await bot.add_cog(ModerationCog(bot))
print("ModerationCog has been loaded.")
await bot.add_cog(AIModerationCog(bot))
print("AIModerationCog has been loaded.")
if __name__ == "__main__":
# Server rules to provide context to the AI