From 285e44a7c637509aa23d7b930dbedd630cba7634 Mon Sep 17 00:00:00 2001 From: Codex Date: Fri, 6 Jun 2025 18:45:22 +0000 Subject: [PATCH] Fix moderation timeout button name --- cogs/aimod.py | 818 +++++++++++++------------------------------------- 1 file changed, 203 insertions(+), 615 deletions(-) diff --git a/cogs/aimod.py b/cogs/aimod.py index 0813184..7f4305e 100644 --- a/cogs/aimod.py +++ b/cogs/aimod.py @@ -36,9 +36,7 @@ DEFAULT_VERTEX_AI_MODEL = "gemini-2.5-flash-preview-05-20" # Example Vertex AI # Define standard safety settings using google.generativeai types STANDARD_SAFETY_SETTINGS = [ - types.SafetySetting( - category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold="BLOCK_NONE" - ), + types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold="BLOCK_NONE"), types.SafetySetting( category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold="BLOCK_NONE", @@ -47,9 +45,7 @@ STANDARD_SAFETY_SETTINGS = [ category=types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold="BLOCK_NONE", ), - types.SafetySetting( - category=types.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold="BLOCK_NONE" - ), + types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold="BLOCK_NONE"), ] # Environment variable for the authorization secret (still used for other API calls) @@ -79,9 +75,7 @@ except Exception as e: # Initialize User Infractions if not os.path.exists(USER_INFRACTIONS_PATH): with open(USER_INFRACTIONS_PATH, "w", encoding="utf-8") as f: - json.dump( - {}, f - ) # Stores infractions as { "guild_id_user_id": [infraction_list] } + json.dump({}, f) # Stores infractions as { "guild_id_user_id": [infraction_list] } try: with open(USER_INFRACTIONS_PATH, "r", encoding="utf-8") as f: USER_INFRACTIONS = json.load(f) @@ -236,13 +230,9 @@ class AIModerationCog(commands.Cog): "AIModerationCog: PROJECT_ID or LOCATION not found in config. Google GenAI Client not initialized." ) except Exception as e: - print( - f"AIModerationCog: Error initializing Google GenAI Client for Vertex AI: {e}" - ) + print(f"AIModerationCog: Error initializing Google GenAI Client for Vertex AI: {e}") - self.last_ai_decisions = collections.deque( - maxlen=5 - ) # Store last 5 AI decisions + self.last_ai_decisions = collections.deque(maxlen=5) # Store last 5 AI decisions self.config_lock = CONFIG_LOCK # Supported image file extensions self.image_extensions = [ @@ -265,9 +255,7 @@ class AIModerationCog(commands.Cog): ".mkv", ".flv", ] # Expanded list - self.backup_task = self.bot.loop.create_task( - self.backup_infractions_periodically() - ) + self.backup_task = self.bot.loop.create_task(self.backup_infractions_periodically()) print("AIModerationCog Initialized.") class QuickActionView(discord.ui.View): @@ -295,21 +283,13 @@ class AIModerationCog(commands.Cog): async def on_submit(self, interaction: discord.Interaction): if not interaction.user.guild_permissions.ban_members: - await interaction.response.send_message( - "You lack permission to ban members.", ephemeral=True - ) + await interaction.response.send_message("You lack permission to ban members.", ephemeral=True) return try: - await self.view.target.ban( - reason=self.reason.value or "Escalated via mod panel" - ) - await interaction.response.send_message( - f"Banned {self.view.target.mention}.", ephemeral=True - ) + await self.view.target.ban(reason=self.reason.value or "Escalated via mod panel") + await interaction.response.send_message(f"Banned {self.view.target.mention}.", ephemeral=True) except Exception as e: # noqa: BLE001 - await interaction.response.send_message( - f"Failed to ban: {e}", ephemeral=True - ) + await interaction.response.send_message(f"Failed to ban: {e}", ephemeral=True) self.view.disable_all_items() if self.view.message: await self.view.message.edit(view=self.view) @@ -329,21 +309,13 @@ class AIModerationCog(commands.Cog): async def on_submit(self, interaction: discord.Interaction): if not interaction.user.guild_permissions.kick_members: - await interaction.response.send_message( - "You lack permission to kick members.", ephemeral=True - ) + await interaction.response.send_message("You lack permission to kick members.", ephemeral=True) return try: - await self.view.target.kick( - reason=self.reason.value or "Escalated via mod panel" - ) - await interaction.response.send_message( - f"Kicked {self.view.target.mention}.", ephemeral=True - ) + await self.view.target.kick(reason=self.reason.value or "Escalated via mod panel") + await interaction.response.send_message(f"Kicked {self.view.target.mention}.", ephemeral=True) except Exception as e: # noqa: BLE001 - await interaction.response.send_message( - f"Failed to kick: {e}", ephemeral=True - ) + await interaction.response.send_message(f"Failed to kick: {e}", ephemeral=True) self.view.disable_all_items() if self.view.message: await self.view.message.edit(view=self.view) @@ -388,9 +360,7 @@ class AIModerationCog(commands.Cog): async def on_submit(self, interaction: discord.Interaction): if not interaction.user.guild_permissions.moderate_members: - await interaction.response.send_message( - "You lack permission to timeout members.", ephemeral=True - ) + await interaction.response.send_message("You lack permission to timeout members.", ephemeral=True) return delta = self.parse_duration(self.duration.value) if not delta or delta > datetime.timedelta(days=28): @@ -401,83 +371,57 @@ class AIModerationCog(commands.Cog): return try: until = discord.utils.utcnow() + delta - await self.view.target.timeout( - until, reason=self.reason.value or "Escalated via mod panel" - ) + await self.view.target.timeout(until, reason=self.reason.value or "Escalated via mod panel") await interaction.response.send_message( f"Timed out {self.view.target.mention} for {self.duration.value}.", ephemeral=True, ) except Exception as e: # noqa: BLE001 - await interaction.response.send_message( - f"Failed to timeout: {e}", ephemeral=True - ) + await interaction.response.send_message(f"Failed to timeout: {e}", ephemeral=True) self.view.disable_all_items() if self.view.message: await self.view.message.edit(view=self.view) @discord.ui.button(label="Escalate Ban", style=discord.ButtonStyle.danger) - async def escalate( - self, interaction: discord.Interaction, button: discord.ui.Button - ): + async def escalate(self, interaction: discord.Interaction, button: discord.ui.Button): if not interaction.user.guild_permissions.ban_members: - await interaction.response.send_message( - "You lack permission to ban members.", ephemeral=True - ) + await interaction.response.send_message("You lack permission to ban members.", ephemeral=True) return self.message = interaction.message await interaction.response.send_modal(self.BanModal(self)) @discord.ui.button(label="Kick", style=discord.ButtonStyle.primary) - async def kick( - self, interaction: discord.Interaction, button: discord.ui.Button - ): + async def kick(self, interaction: discord.Interaction, button: discord.ui.Button): if not interaction.user.guild_permissions.kick_members: - await interaction.response.send_message( - "You lack permission to kick members.", ephemeral=True - ) + await interaction.response.send_message("You lack permission to kick members.", ephemeral=True) return self.message = interaction.message await interaction.response.send_modal(self.KickModal(self)) @discord.ui.button(label="Timeout", style=discord.ButtonStyle.secondary) - async def timeout( - self, interaction: discord.Interaction, button: discord.ui.Button - ): + async def timeout_action(self, interaction: discord.Interaction, button: discord.ui.Button): if not interaction.user.guild_permissions.moderate_members: - await interaction.response.send_message( - "You lack permission to timeout members.", ephemeral=True - ) + await interaction.response.send_message("You lack permission to timeout members.", ephemeral=True) return self.message = interaction.message await interaction.response.send_modal(self.TimeoutModal(self)) @discord.ui.button(label="Ignore", style=discord.ButtonStyle.secondary) - async def ignore( - self, interaction: discord.Interaction, button: discord.ui.Button - ): + async def ignore(self, interaction: discord.Interaction, button: discord.ui.Button): if interaction.user.guild_permissions.manage_messages: await interaction.message.delete() - await interaction.response.send_message( - "Notification dismissed.", ephemeral=True - ) + await interaction.response.send_message("Notification dismissed.", ephemeral=True) else: - await interaction.response.send_message( - "No permission to manage messages.", ephemeral=True - ) + await interaction.response.send_message("No permission to manage messages.", ephemeral=True) async def cog_load(self): """Called when the cog is loaded.""" print("AIModerationCog cog_load started.") if not self.genai_client: print("\n" + "=" * 60) - print( - "=== WARNING: AIModerationCog - Vertex AI Client not initialized! ===" - ) + print("=== WARNING: AIModerationCog - Vertex AI Client not initialized! ===") print("=== The Moderation Cog requires a valid Vertex AI setup. ===") - print( - f"=== Check PROJECT_ID and LOCATION in gurt.config and GCP authentication. ===" - ) + print(f"=== Check PROJECT_ID and LOCATION in gurt.config and GCP authentication. ===") print("=" * 60 + "\n") else: print("AIModerationCog: Vertex AI Client seems to be initialized.") @@ -498,9 +442,7 @@ class AIModerationCog(commands.Cog): await self.bot.wait_until_ready() while not self.bot.is_closed(): timestamp = datetime.datetime.utcnow().strftime("%Y%m%d_%H%M%S") - backup_path = os.path.join( - INFRACTION_BACKUP_DIR, f"user_infractions_{timestamp}.json" - ) + backup_path = os.path.join(INFRACTION_BACKUP_DIR, f"user_infractions_{timestamp}.json") try: shutil.copy(USER_INFRACTIONS_PATH, backup_path) except Exception as e: # noqa: BLE001 @@ -520,9 +462,7 @@ class AIModerationCog(commands.Cog): try: # Download the image image_bytes = await attachment.read() - mime_type = ( - attachment.content_type or "image/jpeg" - ) # Default to jpeg if not specified + mime_type = attachment.content_type or "image/jpeg" # Default to jpeg if not specified # Return the image bytes and mime type return mime_type, image_bytes @@ -562,9 +502,7 @@ class AIModerationCog(commands.Cog): print(f"Error processing GIF: {e}") return None, None - async def process_attachment( - self, attachment: discord.Attachment - ) -> tuple[str, bytes, str]: + async def process_attachment(self, attachment: discord.Attachment) -> tuple[str, bytes, str]: """ Process any attachment and return the appropriate image data. @@ -650,9 +588,7 @@ class AIModerationCog(commands.Cog): return None, None # --- AI Moderation Command Group --- - aimod_group = app_commands.Group( - name="aimod", description="AI Moderation commands." - ) + aimod_group = app_commands.Group(name="aimod", description="AI Moderation commands.") config_subgroup = app_commands.Group( name="config", description="Configure AI moderation settings.", @@ -685,61 +621,35 @@ class AIModerationCog(commands.Cog): async with CONFIG_LOCK: global GUILD_CONFIG GUILD_CONFIG = json.loads(data) - async with aiofiles.open( - USER_INFRACTIONS_PATH, "r", encoding="utf-8" - ) as f2: + async with aiofiles.open(USER_INFRACTIONS_PATH, "r", encoding="utf-8") as f2: data2 = await f2.read() async with CONFIG_LOCK: global USER_INFRACTIONS USER_INFRACTIONS = json.loads(data2) - await interaction.response.send_message( - "Configuration synced from disk.", ephemeral=True - ) + await interaction.response.send_message("Configuration synced from disk.", ephemeral=True) except Exception as e: # noqa: BLE001 - await interaction.response.send_message( - f"Failed to reload configuration: {e}", ephemeral=True - ) + await interaction.response.send_message(f"Failed to reload configuration: {e}", ephemeral=True) - @config_subgroup.command( - name="log_channel", description="Set the moderation log channel." - ) + @config_subgroup.command(name="log_channel", description="Set the moderation log channel.") @app_commands.describe(channel="The text channel to use for moderation logs.") @app_commands.checks.has_permissions(administrator=True) - async def modset_log_channel( - self, interaction: discord.Interaction, channel: discord.TextChannel - ): + async def modset_log_channel(self, interaction: discord.Interaction, channel: discord.TextChannel): await set_guild_config(interaction.guild.id, "MOD_LOG_CHANNEL_ID", channel.id) - await interaction.response.send_message( - f"Moderation log channel set to {channel.mention}.", ephemeral=False - ) + await interaction.response.send_message(f"Moderation log channel set to {channel.mention}.", ephemeral=False) - @config_subgroup.command( - name="suggestions_channel", description="Set the suggestions channel." - ) + @config_subgroup.command(name="suggestions_channel", description="Set the suggestions channel.") @app_commands.describe(channel="The text channel to use for suggestions.") @app_commands.checks.has_permissions(administrator=True) - async def modset_suggestions_channel( - self, interaction: discord.Interaction, channel: discord.TextChannel - ): - await set_guild_config( - interaction.guild.id, "SUGGESTIONS_CHANNEL_ID", channel.id - ) - await interaction.response.send_message( - f"Suggestions channel set to {channel.mention}.", ephemeral=False - ) + async def modset_suggestions_channel(self, interaction: discord.Interaction, channel: discord.TextChannel): + await set_guild_config(interaction.guild.id, "SUGGESTIONS_CHANNEL_ID", channel.id) + await interaction.response.send_message(f"Suggestions channel set to {channel.mention}.", ephemeral=False) - @config_subgroup.command( - name="moderator_role", description="Set the moderator role." - ) + @config_subgroup.command(name="moderator_role", description="Set the moderator role.") @app_commands.describe(role="The role that identifies moderators.") @app_commands.checks.has_permissions(administrator=True) - async def modset_moderator_role( - self, interaction: discord.Interaction, role: discord.Role - ): + async def modset_moderator_role(self, interaction: discord.Interaction, role: discord.Role): await set_guild_config(interaction.guild.id, "MODERATOR_ROLE_ID", role.id) - await interaction.response.send_message( - f"Moderator role set to {role.mention}.", ephemeral=False - ) + await interaction.response.send_message(f"Moderator role set to {role.mention}.", ephemeral=False) @config_subgroup.command( name="suicidal_ping_role", @@ -747,13 +657,9 @@ class AIModerationCog(commands.Cog): ) @app_commands.describe(role="The role to ping for urgent suicidal content alerts.") @app_commands.checks.has_permissions(administrator=True) - async def modset_suicidal_ping_role( - self, interaction: discord.Interaction, role: discord.Role - ): + async def modset_suicidal_ping_role(self, interaction: discord.Interaction, role: discord.Role): await set_guild_config(interaction.guild.id, "SUICIDAL_PING_ROLE_ID", role.id) - await interaction.response.send_message( - f"Suicidal content ping role set to {role.mention}.", ephemeral=False - ) + await interaction.response.send_message(f"Suicidal content ping role set to {role.mention}.", ephemeral=False) @config_subgroup.command( name="add_nsfw_channel", @@ -761,17 +667,13 @@ class AIModerationCog(commands.Cog): ) @app_commands.describe(channel="The text channel to mark as NSFW for the bot.") @app_commands.checks.has_permissions(administrator=True) - async def modset_add_nsfw_channel( - self, interaction: discord.Interaction, channel: discord.TextChannel - ): + async def modset_add_nsfw_channel(self, interaction: discord.Interaction, channel: discord.TextChannel): guild_id = interaction.guild.id nsfw_channels: list[int] = get_guild_config(guild_id, "NSFW_CHANNEL_IDS", []) if channel.id not in nsfw_channels: nsfw_channels.append(channel.id) await set_guild_config(guild_id, "NSFW_CHANNEL_IDS", nsfw_channels) - await interaction.response.send_message( - f"{channel.mention} added to NSFW channels list.", ephemeral=False - ) + await interaction.response.send_message(f"{channel.mention} added to NSFW channels list.", ephemeral=False) else: await interaction.response.send_message( f"{channel.mention} is already in the NSFW channels list.", @@ -784,9 +686,7 @@ class AIModerationCog(commands.Cog): ) @app_commands.describe(channel="The text channel to remove from the NSFW list.") @app_commands.checks.has_permissions(administrator=True) - async def modset_remove_nsfw_channel( - self, interaction: discord.Interaction, channel: discord.TextChannel - ): + async def modset_remove_nsfw_channel(self, interaction: discord.Interaction, channel: discord.TextChannel): guild_id = interaction.guild.id nsfw_channels: list[int] = get_guild_config(guild_id, "NSFW_CHANNEL_IDS", []) if channel.id in nsfw_channels: @@ -809,9 +709,7 @@ class AIModerationCog(commands.Cog): guild_id = interaction.guild.id nsfw_channel_ids: list[int] = get_guild_config(guild_id, "NSFW_CHANNEL_IDS", []) if not nsfw_channel_ids: - await interaction.response.send_message( - "No NSFW channels are currently configured.", ephemeral=False - ) + await interaction.response.send_message("No NSFW channels are currently configured.", ephemeral=False) return channel_mentions = [] @@ -889,14 +787,10 @@ class AIModerationCog(commands.Cog): description="View a user's AI moderation infraction history (mod/admin only).", ) @app_commands.describe(user="The user to view infractions for") - async def viewinfractions( - self, interaction: discord.Interaction, user: discord.Member - ): + async def viewinfractions(self, interaction: discord.Interaction, user: discord.Member): # Check if user has permission (admin or moderator role) moderator_role_id = get_guild_config(interaction.guild.id, "MODERATOR_ROLE_ID") - moderator_role = ( - interaction.guild.get_role(moderator_role_id) if moderator_role_id else None - ) + moderator_role = interaction.guild.get_role(moderator_role_id) if moderator_role_id else None has_permission = interaction.user.guild_permissions.administrator or ( moderator_role and moderator_role in interaction.user.roles @@ -913,9 +807,7 @@ class AIModerationCog(commands.Cog): infractions = get_user_infraction_history(interaction.guild.id, user.id) if not infractions: - await interaction.response.send_message( - f"{user.mention} has no recorded infractions.", ephemeral=False - ) + await interaction.response.send_message(f"{user.mention} has no recorded infractions.", ephemeral=False) return # Create an embed to display the infractions @@ -927,9 +819,7 @@ class AIModerationCog(commands.Cog): # Add each infraction to the embed for i, infraction in enumerate(infractions, 1): - timestamp = infraction.get("timestamp", "Unknown date")[:19].replace( - "T", " " - ) # Format ISO timestamp + timestamp = infraction.get("timestamp", "Unknown date")[:19].replace("T", " ") # Format ISO timestamp rule = infraction.get("rule_violated", "Unknown rule") action = infraction.get("action_taken", "Unknown action") reason = infraction.get("reasoning", "No reason provided") @@ -954,14 +844,10 @@ class AIModerationCog(commands.Cog): description="Clear a user's AI moderation infraction history (admin only).", ) @app_commands.describe(user="The user to clear infractions for") - async def clearinfractions( - self, interaction: discord.Interaction, user: discord.Member - ): + async def clearinfractions(self, interaction: discord.Interaction, user: discord.Member): # Check if user has administrator permission if not interaction.user.guild_permissions.administrator: - await interaction.response.send_message( - "You must be an administrator to use this command.", ephemeral=True - ) + await interaction.response.send_message("You must be an administrator to use this command.", ephemeral=True) return # Get the user's infraction history @@ -995,9 +881,7 @@ class AIModerationCog(commands.Cog): uid = int(key.split("_", 1)[1]) counts[uid] = len(infractions) if not counts: - await interaction.response.send_message( - "No infractions recorded for this guild.", ephemeral=True - ) + await interaction.response.send_message("No infractions recorded for this guild.", ephemeral=True) return sorted_users = sorted(counts.items(), key=lambda x: x[1])[:5] lines = [] @@ -1030,26 +914,16 @@ class AIModerationCog(commands.Cog): async with CONFIG_LOCK: global USER_INFRACTIONS USER_INFRACTIONS = json.loads(data) - await interaction.response.send_message( - f"Infractions restored from {backups[-1]}", ephemeral=False - ) + await interaction.response.send_message(f"Infractions restored from {backups[-1]}", ephemeral=False) except Exception as e: # noqa: BLE001 - await interaction.response.send_message( - f"Failed to restore infractions: {e}", ephemeral=True - ) + await interaction.response.send_message(f"Failed to restore infractions: {e}", ephemeral=True) - @model_subgroup.command( - name="set", description="Change the AI model used for moderation (admin only)." - ) - @app_commands.describe( - model="The Vertex AI model to use (e.g., 'gemini-1.5-flash-001', 'gemini-1.0-pro')" - ) + @model_subgroup.command(name="set", description="Change the AI model used for moderation (admin only).") + @app_commands.describe(model="The Vertex AI model to use (e.g., 'gemini-1.5-flash-001', 'gemini-1.0-pro')") async def modsetmodel(self, interaction: discord.Interaction, model: str): # Check if user has administrator permission if not interaction.user.guild_permissions.administrator: - await interaction.response.send_message( - "You must be an administrator to use this command.", ephemeral=True - ) + await interaction.response.send_message("You must be an administrator to use this command.", ephemeral=True) return # Validate the model name (basic validation for Vertex AI) @@ -1076,9 +950,7 @@ class AIModerationCog(commands.Cog): # @modsetmodel.autocomplete('model') # Autocomplete removed as OpenRouter models are not used. # async def modsetmodel_autocomplete(...): # This function is now removed. - @model_subgroup.command( - name="get", description="View the current AI model used for moderation." - ) + @model_subgroup.command(name="get", description="View the current AI model used for moderation.") async def modgetmodel(self, interaction: discord.Interaction): # Get the model from guild config, fall back to global default guild_id = interaction.guild.id @@ -1091,17 +963,13 @@ class AIModerationCog(commands.Cog): color=discord.Color.blue(), ) embed.add_field(name="Model In Use", value=f"`{model_used}`", inline=False) - embed.add_field( - name="Default Model", value=f"`{DEFAULT_VERTEX_AI_MODEL}`", inline=False - ) + embed.add_field(name="Default Model", value=f"`{DEFAULT_VERTEX_AI_MODEL}`", inline=False) embed.set_footer(text="Use /aimod model set to change the model") await interaction.response.send_message(embed=embed, ephemeral=False) # --- Helper Function to Safely Extract Text from Vertex AI Response --- - def _get_response_text( - self, response: Optional[types.GenerateContentResponse] - ) -> Optional[str]: + def _get_response_text(self, response: Optional[types.GenerateContentResponse]) -> Optional[str]: """ Safely extracts the text content from the first text part of a GenerateContentResponse. Handles potential errors and lack of text parts gracefully. @@ -1111,26 +979,18 @@ class AIModerationCog(commands.Cog): print("[AIModerationCog._get_response_text] Received None response object.") return None - if ( - hasattr(response, "text") and response.text - ): # Some simpler responses might have .text directly - print( - "[AIModerationCog._get_response_text] Found text directly in response.text attribute." - ) + if hasattr(response, "text") and response.text: # Some simpler responses might have .text directly + print("[AIModerationCog._get_response_text] Found text directly in response.text attribute.") return response.text if not response.candidates: - print( - f"[AIModerationCog._get_response_text] Response object has no candidates. Response: {response}" - ) + print(f"[AIModerationCog._get_response_text] Response object has no candidates. Response: {response}") return None try: candidate = response.candidates[0] if not hasattr(candidate, "content") or not candidate.content: - print( - f"[AIModerationCog._get_response_text] Candidate 0 has no 'content'. Candidate: {candidate}" - ) + print(f"[AIModerationCog._get_response_text] Candidate 0 has no 'content'. Candidate: {candidate}") return None if not hasattr(candidate.content, "parts") or not candidate.content.parts: print( @@ -1141,9 +1001,7 @@ class AIModerationCog(commands.Cog): for i, part in enumerate(candidate.content.parts): if hasattr(part, "text") and part.text is not None: if isinstance(part.text, str) and part.text.strip(): - print( - f"[AIModerationCog._get_response_text] Found non-empty text in part {i}." - ) + print(f"[AIModerationCog._get_response_text] Found non-empty text in part {i}.") return part.text else: print( @@ -1155,15 +1013,11 @@ class AIModerationCog(commands.Cog): return None except (AttributeError, IndexError, TypeError) as e: - print( - f"[AIModerationCog._get_response_text] Error accessing response structure: {type(e).__name__}: {e}" - ) + print(f"[AIModerationCog._get_response_text] Error accessing response structure: {type(e).__name__}: {e}") print(f"Problematic response object: {response}") return None except Exception as e: - print( - f"[AIModerationCog._get_response_text] Unexpected error extracting text: {e}" - ) + print(f"[AIModerationCog._get_response_text] Unexpected error extracting text: {e}") print(f"Response object during error: {response}") return None @@ -1187,9 +1041,7 @@ class AIModerationCog(commands.Cog): Returns: A dictionary containing the AI's decision, or None if an error occurs. """ - print( - f"query_vertex_ai called. Vertex AI client available: {self.genai_client is not None}" - ) + print(f"query_vertex_ai called. Vertex AI client available: {self.genai_client is not None}") if not self.genai_client: print("Error: Vertex AI Client is not available. Cannot query API.") return None @@ -1316,12 +1168,7 @@ Example Response (Notify Mods): server_role_str = "Admin" else: perms = member.guild_permissions - if ( - perms.manage_messages - or perms.kick_members - or perms.ban_members - or perms.moderate_members - ): + if perms.manage_messages or perms.kick_members or perms.ban_members or perms.moderate_members: server_role_str = "Moderator" print(f"role: {server_role_str}") @@ -1330,22 +1177,18 @@ Example Response (Notify Mods): replied_to_message_content = "N/A (Not a reply)" if message.reference and message.reference.message_id: try: - replied_to_msg = await message.channel.fetch_message( - message.reference.message_id + replied_to_msg = await message.channel.fetch_message(message.reference.message_id) + replied_to_message_content = ( + f"User '{replied_to_msg.author.name}' said: \"{replied_to_msg.content[:200]}\"" ) - replied_to_message_content = f"User '{replied_to_msg.author.name}' said: \"{replied_to_msg.content[:200]}\"" if len(replied_to_msg.content) > 200: replied_to_message_content += "..." except discord.NotFound: replied_to_message_content = "N/A (Replied-to message not found)" except discord.Forbidden: - replied_to_message_content = ( - "N/A (Cannot fetch replied-to message - permissions)" - ) + replied_to_message_content = "N/A (Cannot fetch replied-to message - permissions)" except Exception as e: - replied_to_message_content = ( - f"N/A (Error fetching replied-to message: {e})" - ) + replied_to_message_content = f"N/A (Error fetching replied-to message: {e})" # --- Fetch Recent Channel History --- recent_channel_history_str = "N/A (Could not fetch history)" @@ -1353,30 +1196,18 @@ Example Response (Notify Mods): history_messages = [] # Fetch last 11 messages (current + 10 previous). We'll filter out the current one async for prev_msg in message.channel.history(limit=11, before=message): - if ( - prev_msg.id != message.id - ): # Ensure we don't include the current message itself - author_name = ( - prev_msg.author.name + " (BOT)" - if prev_msg.author.bot - else prev_msg.author.name - ) + if prev_msg.id != message.id: # Ensure we don't include the current message itself + author_name = prev_msg.author.name + " (BOT)" if prev_msg.author.bot else prev_msg.author.name history_messages.append( f"- {author_name}: \"{prev_msg.content[:150]}{'...' if len(prev_msg.content) > 150 else ''}\" (ID: {prev_msg.id})" ) if history_messages: # Reverse to show oldest first in the snippet, then take the last 10. - recent_channel_history_str = "\n".join( - list(reversed(history_messages))[:10] - ) + recent_channel_history_str = "\n".join(list(reversed(history_messages))[:10]) else: - recent_channel_history_str = ( - "No recent messages before this one in the channel." - ) + recent_channel_history_str = "No recent messages before this one in the channel." except discord.Forbidden: - recent_channel_history_str = ( - "N/A (Cannot fetch channel history - permissions)" - ) + recent_channel_history_str = "N/A (Cannot fetch channel history - permissions)" except Exception as e: recent_channel_history_str = f"N/A (Error fetching channel history: {e})" @@ -1415,9 +1246,7 @@ CRITICAL: Do NOT output anything other than the required JSON response. # Add images in the proper OpenRouter format if image_data_list and len(image_data_list) > 0: try: - for i, (mime_type, image_bytes, attachment_type, filename) in enumerate( - image_data_list - ): + for i, (mime_type, image_bytes, attachment_type, filename) in enumerate(image_data_list): try: # Encode image to base64 base64_image = base64.b64encode(image_bytes).decode("utf-8") @@ -1425,17 +1254,11 @@ CRITICAL: Do NOT output anything other than the required JSON response. image_data_url = f"data:{mime_type};base64,{base64_image}" # Add image in OpenRouter format - user_prompt_content_list.append( - {"type": "image_url", "image_url": {"url": image_data_url}} - ) + user_prompt_content_list.append({"type": "image_url", "image_url": {"url": image_data_url}}) - print( - f"Added attachment #{i+1}: {filename} ({attachment_type}) to the prompt" - ) + print(f"Added attachment #{i+1}: {filename} ({attachment_type}) to the prompt") except Exception as e: - print( - f"Error encoding image data for attachment {filename}: {e}" - ) + print(f"Error encoding image data for attachment {filename}: {e}") except Exception as e: print(f"Error processing image data: {e}") # Add a text note about the error @@ -1455,20 +1278,13 @@ CRITICAL: Do NOT output anything other than the required JSON response. server_role_str = "Admin" else: perms = member.guild_permissions - if ( - perms.manage_messages - or perms.kick_members - or perms.ban_members - or perms.moderate_members - ): + if perms.manage_messages or perms.kick_members or perms.ban_members or perms.moderate_members: server_role_str = "Moderator" replied_to_message_content = "N/A (Not a reply)" if message.reference and message.reference.message_id: try: - replied_to_msg = await message.channel.fetch_message( - message.reference.message_id - ) + replied_to_msg = await message.channel.fetch_message(message.reference.message_id) replied_to_message_content = f"User '{replied_to_msg.author.name}' said: \"{replied_to_msg.content[:200]}{'...' if len(replied_to_msg.content) > 200 else ''}\"" except Exception as e: replied_to_message_content = f"N/A (Error fetching replied-to: {e})" @@ -1481,9 +1297,7 @@ CRITICAL: Do NOT output anything other than the required JSON response. if prev_msg.id != message.id ] recent_channel_history_str = ( - "\n".join(list(reversed(history_messages))[:10]) - if history_messages - else "No recent messages." + "\n".join(list(reversed(history_messages))[:10]) if history_messages else "No recent messages." ) except Exception as e: recent_channel_history_str = f"N/A (Error fetching history: {e})" @@ -1533,18 +1347,13 @@ CRITICAL: Do NOT output anything other than the required JSON response. ] clean_mime_type = mime_type.split(";")[0].lower() - if ( - clean_mime_type in supported_image_mimes - or attachment_type == "video" - ): # Video frame is jpeg + if clean_mime_type in supported_image_mimes or attachment_type == "video": # Video frame is jpeg vertex_parts.append( types.Part( inline_data=types.Blob( data=image_bytes, mime_type=( - clean_mime_type - if clean_mime_type in supported_image_mimes - else "image/jpeg" + clean_mime_type if clean_mime_type in supported_image_mimes else "image/jpeg" ), ) ) @@ -1553,9 +1362,7 @@ CRITICAL: Do NOT output anything other than the required JSON response. f"Added attachment {filename} ({attachment_type} as {clean_mime_type if clean_mime_type in supported_image_mimes else 'image/jpeg'}) to Vertex prompt" ) else: - print( - f"Skipping attachment {filename} due to unsupported MIME type for Vertex: {mime_type}" - ) + print(f"Skipping attachment {filename} due to unsupported MIME type for Vertex: {mime_type}") vertex_parts.append( types.Part( text=f"[System Note: Attachment '{filename}' of type '{mime_type}' was not processed as it's not directly supported for vision by the current model configuration.]" @@ -1563,17 +1370,11 @@ CRITICAL: Do NOT output anything other than the required JSON response. ) except Exception as e: print(f"Error processing attachment {filename} for Vertex AI: {e}") - vertex_parts.append( - types.Part( - text=f"[System Note: Error processing attachment '{filename}'.]" - ) - ) + vertex_parts.append(types.Part(text=f"[System Note: Error processing attachment '{filename}'.]")) # Get guild-specific model if configured, otherwise use default guild_id = message.guild.id - model_id_to_use = get_guild_config( - guild_id, "AI_MODEL", DEFAULT_VERTEX_AI_MODEL - ) + model_id_to_use = get_guild_config(guild_id, "AI_MODEL", DEFAULT_VERTEX_AI_MODEL) # Vertex model path is usually like "publishers/google/models/gemini-1.5-flash-001" # If model_id_to_use is just "gemini-1.5-flash-001", prepend "publishers/google/models/" if not model_id_to_use.startswith("publishers/google/models/"): @@ -1581,9 +1382,7 @@ CRITICAL: Do NOT output anything other than the required JSON response. else: model_path = model_id_to_use - thinking_config = types.ThinkingConfig( - thinking_budget=0 # Example manual thinking budget - ) + thinking_config = types.ThinkingConfig(thinking_budget=0) # Example manual thinking budget generation_config = types.GenerateContentConfig( temperature=0.2, @@ -1623,9 +1422,7 @@ CRITICAL: Do NOT output anything other than the required JSON response. temperature=generation_config.temperature, # from existing config max_output_tokens=generation_config.max_output_tokens, # from existing config safety_settings=generation_config.safety_settings, # from existing config - system_instruction=types.Content( - role="system", parts=[types.Part(text=system_prompt_text)] - ), + system_instruction=types.Content(role="system", parts=[types.Part(text=system_prompt_text)]), thinking_config=generation_config.thinking_config, # from existing config # response_mime_type="application/json", # Consider if model supports this for forcing JSON ) @@ -1643,23 +1440,12 @@ CRITICAL: Do NOT output anything other than the required JSON response. if not ai_response_content: print("Error: AI response content is empty or could not be extracted.") # Log safety ratings if available - if ( - response - and response.candidates - and response.candidates[0].safety_ratings - ): + if response and response.candidates and response.candidates[0].safety_ratings: ratings = ", ".join( - [ - f"{r.category.name}: {r.probability.name}" - for r in response.candidates[0].safety_ratings - ] + [f"{r.category.name}: {r.probability.name}" for r in response.candidates[0].safety_ratings] ) print(f"Safety Ratings: {ratings}") - if ( - response - and response.candidates - and response.candidates[0].finish_reason - ): + if response and response.candidates and response.candidates[0].finish_reason: print(f"Finish Reason: {response.candidates[0].finish_reason.name}") return None @@ -1667,23 +1453,16 @@ CRITICAL: Do NOT output anything other than the required JSON response. try: # Clean potential markdown code blocks if ai_response_content.startswith("```json"): - ai_response_content = ai_response_content.strip("```json\n").strip( - "`\n " - ) + ai_response_content = ai_response_content.strip("```json\n").strip("`\n ") elif ai_response_content.startswith("```"): - ai_response_content = ai_response_content.strip("```\n").strip( - "`\n " - ) + ai_response_content = ai_response_content.strip("```\n").strip("`\n ") ai_decision = json.loads(ai_response_content) # Basic validation of the parsed JSON structure if ( not isinstance(ai_decision, dict) - or not all( - k in ai_decision - for k in ["violation", "rule_violated", "reasoning", "action"] - ) + or not all(k in ai_decision for k in ["violation", "rule_violated", "reasoning", "action"]) or not isinstance(ai_decision.get("violation"), bool) ): print( @@ -1695,23 +1474,17 @@ CRITICAL: Do NOT output anything other than the required JSON response. return ai_decision except json.JSONDecodeError as e: - print( - f"Error: Could not decode JSON response from AI: {e}. Response: {ai_response_content}" - ) + print(f"Error: Could not decode JSON response from AI: {e}. Response: {ai_response_content}") return None except Exception as e: # Catch other parsing errors - print( - f"Error parsing AI response structure: {e}. Response: {ai_response_content}" - ) + print(f"Error parsing AI response structure: {e}. Response: {ai_response_content}") return None except google_exceptions.GoogleAPICallError as e: print(f"Error calling Vertex AI API: {e}") return None except Exception as e: - print( - f"An unexpected error occurred during Vertex AI query for message {message.id}: {e}" - ) + print(f"An unexpected error occurred during Vertex AI query for message {message.id}: {e}") return None async def handle_violation( @@ -1729,21 +1502,13 @@ CRITICAL: Do NOT output anything other than the required JSON response. rule_violated = ai_decision.get("rule_violated", "Unknown") reasoning = ai_decision.get("reasoning", "No reasoning provided.") - action = ai_decision.get( - "action", "NOTIFY_MODS" - ).upper() # Default to notify mods + action = ai_decision.get("action", "NOTIFY_MODS").upper() # Default to notify mods guild_id = message.guild.id # Get guild_id once user_id = message.author.id # Get user_id once moderator_role_id = get_guild_config(guild_id, "MODERATOR_ROLE_ID") - moderator_role = ( - message.guild.get_role(moderator_role_id) if moderator_role_id else None - ) - mod_ping = ( - moderator_role.mention - if moderator_role - else f"Moderators (Role ID {moderator_role_id} not found)" - ) + moderator_role = message.guild.get_role(moderator_role_id) if moderator_role_id else None + mod_ping = moderator_role.mention if moderator_role else f"Moderators (Role ID {moderator_role_id} not found)" current_timestamp_iso = datetime.datetime.now(datetime.timezone.utc).isoformat() @@ -1769,9 +1534,7 @@ CRITICAL: Do NOT output anything other than the required JSON response. "rule_violated": rule_violated, "reasoning": reasoning, "violation": ai_decision.get("violation", False), - "message_content": ( - message.content[:1024] if message.content else "" - ), + "message_content": (message.content[:1024] if message.content else ""), "full_message_content": message.content if message.content else "", "ai_model": model_used, "result": "pending_system_action", # Indicates AI decision received, system action pending @@ -1781,15 +1544,11 @@ CRITICAL: Do NOT output anything other than the required JSON response. "Content-Type": "application/json", } async with aiohttp.ClientSession() as http_session: # Renamed session to avoid conflict - async with http_session.post( - post_url, headers=headers, json=payload, timeout=10 - ) as resp: + async with http_session.post(post_url, headers=headers, json=payload, timeout=10) as resp: # This payload is just for the initial AI decision log # The actual outcome will be logged after the action is performed if resp.status >= 400: - print( - f"Failed to POST initial AI decision log: {resp.status}" - ) + print(f"Failed to POST initial AI decision log: {resp.status}") else: print("MOD_LOG_API_SECRET not set; skipping initial action POST.") except Exception as e: @@ -1806,18 +1565,10 @@ CRITICAL: Do NOT output anything other than the required JSON response. value=f"{message.author.mention} (`{message.author.id}`)", inline=False, ) - notification_embed.add_field( - name="Channel", value=message.channel.mention, inline=False - ) - notification_embed.add_field( - name="Rule Violated", value=f"**Rule {rule_violated}**", inline=True - ) - notification_embed.add_field( - name="AI Suggested Action", value=f"`{action}`", inline=True - ) - notification_embed.add_field( - name="AI Reasoning", value=f"_{reasoning}_", inline=False - ) + notification_embed.add_field(name="Channel", value=message.channel.mention, inline=False) + notification_embed.add_field(name="Rule Violated", value=f"**Rule {rule_violated}**", inline=True) + notification_embed.add_field(name="AI Suggested Action", value=f"`{action}`", inline=True) + notification_embed.add_field(name="AI Reasoning", value=f"_{reasoning}_", inline=False) notification_embed.add_field( name="Message Link", value=f"[Jump to Message]({message.jump_url})", @@ -1825,9 +1576,7 @@ CRITICAL: Do NOT output anything other than the required JSON response. ) # Log message content and attachments for audit purposes msg_content = message.content if message.content else "*No text content*" - notification_embed.add_field( - name="Message Content", value=msg_content[:1024], inline=False - ) + notification_embed.add_field(name="Message Content", value=msg_content[:1024], inline=False) # Add attachment information if present if message.attachments: @@ -1837,67 +1586,39 @@ CRITICAL: Do NOT output anything other than the required JSON response. f"{i+1}. {attachment.filename} ({attachment.content_type}) - [Link]({attachment.url})" ) attachment_text = "\n".join(attachment_info) - notification_embed.add_field( - name="Attachments", value=attachment_text[:1024], inline=False - ) + notification_embed.add_field(name="Attachments", value=attachment_text[:1024], inline=False) # Add the first image as a thumbnail if it's an image type for attachment in message.attachments: if any( attachment.filename.lower().endswith(ext) - for ext in self.image_extensions - + self.gif_extensions - + self.video_extensions + for ext in self.image_extensions + self.gif_extensions + self.video_extensions ): notification_embed.set_thumbnail(url=attachment.url) break # Use the model_used variable that was defined earlier - notification_embed.set_footer( - text=f"AI Model: {model_used}. Learnhelp AI Moderation." - ) - notification_embed.timestamp = ( - discord.utils.utcnow() - ) # Using discord.utils.utcnow() which is still supported + notification_embed.set_footer(text=f"AI Model: {model_used}. Learnhelp AI Moderation.") + notification_embed.timestamp = discord.utils.utcnow() # Using discord.utils.utcnow() which is still supported action_taken_message = "" # To append to the notification testing_mode = get_guild_config(guild_id, "TESTING_MODE", False) if testing_mode: - action_taken_message = ( - f"[TEST MODE] Would have taken action `{action}`. No changes made." - ) + action_taken_message = f"[TEST MODE] Would have taken action `{action}`. No changes made." notification_embed.color = discord.Color.greyple() log_channel_id = get_guild_config(message.guild.id, "MOD_LOG_CHANNEL_ID") - log_channel = ( - self.bot.get_channel(log_channel_id) - if log_channel_id - else message.channel - ) + log_channel = self.bot.get_channel(log_channel_id) if log_channel_id else message.channel if action == "SUICIDAL": - suicidal_role_id = get_guild_config( - message.guild.id, "SUICIDAL_PING_ROLE_ID" - ) - suicidal_role = ( - message.guild.get_role(suicidal_role_id) - if suicidal_role_id - else None - ) + suicidal_role_id = get_guild_config(message.guild.id, "SUICIDAL_PING_ROLE_ID") + suicidal_role = message.guild.get_role(suicidal_role_id) if suicidal_role_id else None ping_target = ( - suicidal_role.mention - if suicidal_role - else f"Role ID {suicidal_role_id} (Suicidal Content)" + suicidal_role.mention if suicidal_role else f"Role ID {suicidal_role_id} (Suicidal Content)" ) if not suicidal_role: print(f"ERROR: Suicidal ping role ID {suicidal_role_id} not found.") final_message = f"{ping_target}\n{action_taken_message}" else: - suggestions_id = get_guild_config( - message.guild.id, "SUGGESTIONS_CHANNEL_ID" - ) - suggestion_note = ( - f"\nPlease review <#{suggestions_id}> for rule updates." - if suggestions_id - else "" - ) + suggestions_id = get_guild_config(message.guild.id, "SUGGESTIONS_CHANNEL_ID") + suggestion_note = f"\nPlease review <#{suggestions_id}> for rule updates." if suggestions_id else "" final_message = f"{mod_ping}\n{action_taken_message}{suggestion_note}" await log_channel.send( content=final_message, @@ -1909,28 +1630,18 @@ CRITICAL: Do NOT output anything other than the required JSON response. # --- Perform Actions --- try: if action == "BAN": - action_taken_message = ( - f"Action Taken: User **BANNED** and message deleted." - ) + action_taken_message = f"Action Taken: User **BANNED** and message deleted." notification_embed.color = discord.Color.dark_red() try: await message.delete() except discord.NotFound: print("Message already deleted before banning.") except discord.Forbidden: - print( - f"WARNING: Missing permissions to delete message before banning user {message.author}." - ) - action_taken_message += ( - " (Failed to delete message - check permissions)" - ) + print(f"WARNING: Missing permissions to delete message before banning user {message.author}.") + action_taken_message += " (Failed to delete message - check permissions)" ban_reason = f"AI Mod: Rule {rule_violated}. Reason: {reasoning}" - await message.guild.ban( - message.author, reason=ban_reason, delete_message_days=1 - ) - print( - f"BANNED user {message.author} for violating rule {rule_violated}." - ) + await message.guild.ban(message.author, reason=ban_reason, delete_message_days=1) + print(f"BANNED user {message.author} for violating rule {rule_violated}.") await add_user_infraction( guild_id, user_id, @@ -1941,28 +1652,18 @@ CRITICAL: Do NOT output anything other than the required JSON response. ) elif action == "KICK": - action_taken_message = ( - f"Action Taken: User **KICKED** and message deleted." - ) - notification_embed.color = discord.Color.from_rgb( - 255, 127, 0 - ) # Dark Orange + action_taken_message = f"Action Taken: User **KICKED** and message deleted." + notification_embed.color = discord.Color.from_rgb(255, 127, 0) # Dark Orange try: await message.delete() except discord.NotFound: print("Message already deleted before kicking.") except discord.Forbidden: - print( - f"WARNING: Missing permissions to delete message before kicking user {message.author}." - ) - action_taken_message += ( - " (Failed to delete message - check permissions)" - ) + print(f"WARNING: Missing permissions to delete message before kicking user {message.author}.") + action_taken_message += " (Failed to delete message - check permissions)" kick_reason = f"AI Mod: Rule {rule_violated}. Reason: {reasoning}" await message.author.kick(reason=kick_reason) - print( - f"KICKED user {message.author} for violating rule {rule_violated}." - ) + print(f"KICKED user {message.author} for violating rule {rule_violated}.") await add_user_infraction( guild_id, user_id, @@ -1986,29 +1687,22 @@ CRITICAL: Do NOT output anything other than the required JSON response. duration_readable = "1 day" if duration_seconds > 0: - action_taken_message = f"Action Taken: User **TIMED OUT for {duration_readable}** and message deleted." + action_taken_message = ( + f"Action Taken: User **TIMED OUT for {duration_readable}** and message deleted." + ) notification_embed.color = discord.Color.blue() try: await message.delete() except discord.NotFound: - print( - f"Message already deleted before timeout for {message.author}." - ) + print(f"Message already deleted before timeout for {message.author}.") except discord.Forbidden: - print( - f"WARNING: Missing permissions to delete message before timeout for {message.author}." - ) - action_taken_message += ( - " (Failed to delete message - check permissions)" - ) + print(f"WARNING: Missing permissions to delete message before timeout for {message.author}.") + action_taken_message += " (Failed to delete message - check permissions)" - timeout_reason = ( - f"AI Mod: Rule {rule_violated}. Reason: {reasoning}" - ) + timeout_reason = f"AI Mod: Rule {rule_violated}. Reason: {reasoning}" # discord.py timeout takes a timedelta object await message.author.timeout( - discord.utils.utcnow() - + datetime.timedelta(seconds=duration_seconds), + discord.utils.utcnow() + datetime.timedelta(seconds=duration_seconds), reason=timeout_reason, ) print( @@ -2023,35 +1717,23 @@ CRITICAL: Do NOT output anything other than the required JSON response. current_timestamp_iso, ) else: - action_taken_message = ( - "Action Taken: **Unknown timeout duration, notifying mods.**" - ) - action = ( - "NOTIFY_MODS" # Fallback if timeout duration is not recognized - ) - print( - f"Unknown timeout duration for action {action}. Defaulting to NOTIFY_MODS." - ) + action_taken_message = "Action Taken: **Unknown timeout duration, notifying mods.**" + action = "NOTIFY_MODS" # Fallback if timeout duration is not recognized + print(f"Unknown timeout duration for action {action}. Defaulting to NOTIFY_MODS.") elif action == "DELETE": action_taken_message = f"Action Taken: Message **DELETED**." await message.delete() - print( - f"DELETED message from {message.author} for violating rule {rule_violated}." - ) + print(f"DELETED message from {message.author} for violating rule {rule_violated}.") # Typically, a simple delete isn't a formal infraction unless it's part of a WARN. # If you want to log deletes as infractions, add: # add_user_infraction(guild_id, user_id, rule_violated, "DELETE", reasoning, current_timestamp_iso) elif action == "WARN": - action_taken_message = ( - f"Action Taken: Message **DELETED** (AI suggested WARN)." - ) + action_taken_message = f"Action Taken: Message **DELETED** (AI suggested WARN)." notification_embed.color = discord.Color.orange() await message.delete() # Warnings usually involve deleting the offending message - print( - f"DELETED message from {message.author} (AI suggested WARN for rule {rule_violated})." - ) + print(f"DELETED message from {message.author} (AI suggested WARN for rule {rule_violated}).") try: dm_channel = await message.author.create_dm() await dm_channel.send( @@ -2060,9 +1742,7 @@ CRITICAL: Do NOT output anything other than the required JSON response. ) action_taken_message += " User notified via DM with warning." except discord.Forbidden: - print( - f"Could not DM warning to {message.author} (DMs likely disabled)." - ) + print(f"Could not DM warning to {message.author} (DMs likely disabled).") action_taken_message += " (Could not DM user for warning)." except Exception as e: print(f"Error sending warning DM to {message.author}: {e}") @@ -2079,9 +1759,7 @@ CRITICAL: Do NOT output anything other than the required JSON response. elif action == "NOTIFY_MODS": action_taken_message = "Action Taken: **Moderator review requested.**" notification_embed.color = discord.Color.gold() - print( - f"Notifying moderators about potential violation (Rule {rule_violated}) by {message.author}." - ) + print(f"Notifying moderators about potential violation (Rule {rule_violated}) by {message.author}.") # NOTIFY_MODS itself isn't an infraction on the user, but a request for human review. # If mods take action, they would log it manually or via a mod command. if notify_mods_message: @@ -2092,57 +1770,43 @@ CRITICAL: Do NOT output anything other than the required JSON response. ) elif action == "SUICIDAL": - action_taken_message = ( - "Action Taken: **User DMed resources, relevant role notified.**" - ) + action_taken_message = "Action Taken: **User DMed resources, relevant role notified.**" # No infraction is typically logged for "SUICIDAL" as it's a support action. notification_embed.title = "🚨 Suicidal Content Detected 🚨" - notification_embed.color = ( - discord.Color.dark_purple() - ) # A distinct color + notification_embed.color = discord.Color.dark_purple() # A distinct color notification_embed.description = "AI analysis detected content indicating potential suicidal ideation." - print( - f"SUICIDAL content detected from {message.author}. DMing resources and notifying role." - ) + print(f"SUICIDAL content detected from {message.author}. DMing resources and notifying role.") # DM the user with help resources try: dm_channel = await message.author.create_dm() await dm_channel.send(SUICIDAL_HELP_RESOURCES) action_taken_message += " User successfully DMed." except discord.Forbidden: - print( - f"Could not DM suicidal help resources to {message.author} (DMs likely disabled)." - ) + print(f"Could not DM suicidal help resources to {message.author} (DMs likely disabled).") action_taken_message += " (Could not DM user - DMs disabled)." except Exception as e: - print( - f"Error sending suicidal help resources DM to {message.author}: {e}" - ) + print(f"Error sending suicidal help resources DM to {message.author}: {e}") action_taken_message += f" (Error DMing user: {e})." # The message itself is usually not deleted for suicidal content, to allow for intervention. # If deletion is desired, add: await message.delete() here. else: # Includes "IGNORE" or unexpected actions - if ai_decision.get( - "violation" - ): # If violation is true but action is IGNORE - action_taken_message = "Action Taken: **None** (AI suggested IGNORE despite flagging violation - Review Recommended)." + if ai_decision.get("violation"): # If violation is true but action is IGNORE + action_taken_message = ( + "Action Taken: **None** (AI suggested IGNORE despite flagging violation - Review Recommended)." + ) notification_embed.color = discord.Color.light_grey() print( f"AI flagged violation ({rule_violated}) but suggested IGNORE for message by {message.author}. Notifying mods for review." ) else: # This case shouldn't be reached if called correctly, but handle defensively - print( - f"No action taken for message by {message.author} (AI Action: {action}, Violation: False)" - ) + print(f"No action taken for message by {message.author} (AI Action: {action}, Violation: False)") return # Don't notify if no violation and action is IGNORE # --- Send Notification to Moderators/Relevant Role --- log_channel_id = get_guild_config(message.guild.id, "MOD_LOG_CHANNEL_ID") - log_channel = ( - self.bot.get_channel(log_channel_id) if log_channel_id else None - ) + log_channel = self.bot.get_channel(log_channel_id) if log_channel_id else None if not log_channel: print( f"ERROR: Moderation log channel (ID: {log_channel_id}) not found or not configured. Defaulting to message channel." @@ -2155,18 +1819,10 @@ CRITICAL: Do NOT output anything other than the required JSON response. return if action == "SUICIDAL": - suicidal_role_id = get_guild_config( - message.guild.id, "SUICIDAL_PING_ROLE_ID" - ) - suicidal_role = ( - message.guild.get_role(suicidal_role_id) - if suicidal_role_id - else None - ) + suicidal_role_id = get_guild_config(message.guild.id, "SUICIDAL_PING_ROLE_ID") + suicidal_role = message.guild.get_role(suicidal_role_id) if suicidal_role_id else None ping_target = ( - suicidal_role.mention - if suicidal_role - else f"Role ID {suicidal_role_id} (Suicidal Content)" + suicidal_role.mention if suicidal_role else f"Role ID {suicidal_role_id} (Suicidal Content)" ) if not suicidal_role: print(f"ERROR: Suicidal ping role ID {suicidal_role_id} not found.") @@ -2177,14 +1833,8 @@ CRITICAL: Do NOT output anything other than the required JSON response. view=self.QuickActionView(self, message.author), ) elif moderator_role: # For other violations - suggestions_id = get_guild_config( - message.guild.id, "SUGGESTIONS_CHANNEL_ID" - ) - suggestion_note = ( - f"\nPlease review <#{suggestions_id}> for rule updates." - if suggestions_id - else "" - ) + suggestions_id = get_guild_config(message.guild.id, "SUGGESTIONS_CHANNEL_ID") + suggestion_note = f"\nPlease review <#{suggestions_id}> for rule updates." if suggestions_id else "" final_message = f"{mod_ping}\n{action_taken_message}{suggestion_note}" await log_channel.send( content=final_message, @@ -2192,14 +1842,10 @@ CRITICAL: Do NOT output anything other than the required JSON response. view=self.QuickActionView(self, message.author), ) else: # Fallback if moderator role is also not found for non-suicidal actions - print( - f"ERROR: Moderator role ID {moderator_role_id} not found for action {action}." - ) + print(f"ERROR: Moderator role ID {moderator_role_id} not found for action {action}.") except discord.Forbidden as e: - print( - f"ERROR: Missing Permissions to perform action '{action}' for rule {rule_violated}. Details: {e}" - ) + print(f"ERROR: Missing Permissions to perform action '{action}' for rule {rule_violated}. Details: {e}") # Try to notify mods about the failure if moderator_role: try: @@ -2209,17 +1855,11 @@ CRITICAL: Do NOT output anything other than the required JSON response. f"Reasoning: _{reasoning}_\nMessage Link: {message.jump_url}" ) except discord.Forbidden: - print( - "FATAL: Bot lacks permission to send messages, even error notifications." - ) + print("FATAL: Bot lacks permission to send messages, even error notifications.") except discord.NotFound: - print( - f"Message {message.id} was likely already deleted when trying to perform action '{action}'." - ) + print(f"Message {message.id} was likely already deleted when trying to perform action '{action}'.") except Exception as e: - print( - f"An unexpected error occurred during action execution for message {message.id}: {e}" - ) + print(f"An unexpected error occurred during action execution for message {message.id}: {e}") # Try to notify mods about the unexpected error if moderator_role: try: @@ -2229,9 +1869,7 @@ CRITICAL: Do NOT output anything other than the required JSON response. f"Rule: {rule_violated}, Action Attempted: {action}\nMessage Link: {message.jump_url}" ) except discord.Forbidden: - print( - "FATAL: Bot lacks permission to send messages, even error notifications." - ) + print("FATAL: Bot lacks permission to send messages, even error notifications.") @commands.Cog.listener(name="on_message") async def message_listener(self, message: discord.Message): @@ -2252,14 +1890,10 @@ CRITICAL: Do NOT output anything other than the required JSON response. return # Check if moderation is enabled for this guild if not get_guild_config(message.guild.id, "ENABLED", False): - print( - f"Moderation disabled for guild {message.guild.id}. Ignoring message {message.id}." - ) + print(f"Moderation disabled for guild {message.guild.id}. Ignoring message {message.id}.") return if get_guild_config(message.guild.id, "EVENT_MODE", False): - print( - f"Event mode enabled for guild {message.guild.id}. Ignoring message {message.id}." - ) + print(f"Event mode enabled for guild {message.guild.id}. Ignoring message {message.id}.") return # --- Suicidal Content Check --- @@ -2273,28 +1907,18 @@ CRITICAL: Do NOT output anything other than the required JSON response. if message.attachments: # Process all attachments for attachment in message.attachments: - mime_type, image_bytes, attachment_type = await self.process_attachment( - attachment - ) + mime_type, image_bytes, attachment_type = await self.process_attachment(attachment) if mime_type and image_bytes and attachment_type: - image_data_list.append( - (mime_type, image_bytes, attachment_type, attachment.filename) - ) - print( - f"Processed attachment: {attachment.filename} as {attachment_type}" - ) + image_data_list.append((mime_type, image_bytes, attachment_type, attachment.filename)) + print(f"Processed attachment: {attachment.filename} as {attachment_type}") # Log the number of attachments processed if image_data_list: - print( - f"Processed {len(image_data_list)} attachments for message {message.id}" - ) + print(f"Processed {len(image_data_list)} attachments for message {message.id}") # Only proceed with AI analysis if there's text to analyze or attachments if not message_content and not image_data_list: - print( - f"Ignoring message {message.id} with no content or valid attachments." - ) + print(f"Ignoring message {message.id} with no content or valid attachments.") return # NSFW channel check removed - AI will handle this context @@ -2302,9 +1926,7 @@ CRITICAL: Do NOT output anything other than the required JSON response. # --- Call AI for Analysis (All Rules) --- # Check if the Vertex AI client is available if not self.genai_client: - print( - f"Skipping AI analysis for message {message.id}: Vertex AI client is not initialized." - ) + print(f"Skipping AI analysis for message {message.id}: Vertex AI client is not initialized.") return # Prepare user history for the AI @@ -2316,9 +1938,7 @@ CRITICAL: Do NOT output anything other than the required JSON response. f"- Action: {infr.get('action_taken', 'N/A')} for Rule {infr.get('rule_violated', 'N/A')} on {infr.get('timestamp', 'N/A')[:10]}. Reason: {infr.get('reasoning', 'N/A')[:50]}..." ) user_history_summary = ( - "\n".join(history_summary_parts) - if history_summary_parts - else "No prior infractions recorded." + "\n".join(history_summary_parts) if history_summary_parts else "No prior infractions recorded." ) # Limit history summary length to prevent excessively long prompts @@ -2326,17 +1946,11 @@ CRITICAL: Do NOT output anything other than the required JSON response. if len(user_history_summary) > max_history_len: user_history_summary = user_history_summary[: max_history_len - 3] + "..." - print( - f"Analyzing message {message.id} from {message.author} in #{message.channel.name} with history..." - ) + print(f"Analyzing message {message.id} from {message.author} in #{message.channel.name} with history...") if image_data_list: attachment_types = [data[2] for data in image_data_list] - print( - f"Including {len(image_data_list)} attachments in analysis: {', '.join(attachment_types)}" - ) - ai_decision = await self.query_vertex_ai( - message, message_content, user_history_summary, image_data_list - ) + print(f"Including {len(image_data_list)} attachments in analysis: {', '.join(attachment_types)}") + ai_decision = await self.query_vertex_ai(message, message_content, user_history_summary, image_data_list) # --- Process AI Decision --- if not ai_decision: @@ -2349,13 +1963,9 @@ CRITICAL: Do NOT output anything other than the required JSON response. "author_name": str(message.author), "author_id": message.author.id, "message_content_snippet": ( - message.content[:100] + "..." - if len(message.content) > 100 - else message.content + message.content[:100] + "..." if len(message.content) > 100 else message.content ), - "timestamp": datetime.datetime.now( - datetime.timezone.utc - ).isoformat(), + "timestamp": datetime.datetime.now(datetime.timezone.utc).isoformat(), "ai_decision": { "error": "Failed to get valid AI decision", "raw_response": None, @@ -2371,9 +1981,7 @@ CRITICAL: Do NOT output anything other than the required JSON response. "author_name": str(message.author), "author_id": message.author.id, "message_content_snippet": ( - message.content[:100] + "..." - if len(message.content) > 100 - else message.content + message.content[:100] + "..." if len(message.content) > 100 else message.content ), "timestamp": datetime.datetime.now(datetime.timezone.utc).isoformat(), "ai_decision": ai_decision, @@ -2385,16 +1993,12 @@ CRITICAL: Do NOT output anything other than the required JSON response. # Handle the violation based on AI decision without overrides # Pass notify_mods_message if the action is NOTIFY_MODS notify_mods_message = ( - ai_decision.get("notify_mods_message") - if ai_decision.get("action") == "NOTIFY_MODS" - else None + ai_decision.get("notify_mods_message") if ai_decision.get("action") == "NOTIFY_MODS" else None ) await self.handle_violation(message, ai_decision, notify_mods_message) else: # AI found no violation - print( - f"AI analysis complete for message {message.id}. No violation detected." - ) + print(f"AI analysis complete for message {message.id}. No violation detected.") @debug_subgroup.command( name="last_decisions", @@ -2403,19 +2007,13 @@ CRITICAL: Do NOT output anything other than the required JSON response. @app_commands.checks.has_permissions(administrator=True) async def aidebug_last_decisions(self, interaction: discord.Interaction): if not self.last_ai_decisions: - await interaction.response.send_message( - "No AI decisions have been recorded yet.", ephemeral=True - ) + await interaction.response.send_message("No AI decisions have been recorded yet.", ephemeral=True) return - embed = discord.Embed( - title="Last 5 AI Moderation Decisions", color=discord.Color.purple() - ) + embed = discord.Embed(title="Last 5 AI Moderation Decisions", color=discord.Color.purple()) embed.timestamp = discord.utils.utcnow() - for i, record in enumerate( - reversed(list(self.last_ai_decisions)) - ): # Show newest first + for i, record in enumerate(reversed(list(self.last_ai_decisions))): # Show newest first decision_info = record.get("ai_decision", {}) violation = decision_info.get("violation", "N/A") rule_violated = decision_info.get("rule_violated", "N/A") @@ -2448,31 +2046,21 @@ CRITICAL: Do NOT output anything other than the required JSON response. value=field_value, inline=False, ) - if ( - len(embed.fields) >= 5 - ): # Limit to 5 fields in one embed for very long entries, or send multiple embeds + if len(embed.fields) >= 5: # Limit to 5 fields in one embed for very long entries, or send multiple embeds break if not embed.fields: # Should not happen if self.last_ai_decisions is not empty - await interaction.response.send_message( - "Could not format AI decisions.", ephemeral=True - ) + await interaction.response.send_message("Could not format AI decisions.", ephemeral=True) return await interaction.response.send_message(embed=embed, ephemeral=True) @aidebug_last_decisions.error - async def aidebug_last_decisions_error( - self, interaction: discord.Interaction, error: app_commands.AppCommandError - ): + async def aidebug_last_decisions_error(self, interaction: discord.Interaction, error: app_commands.AppCommandError): if isinstance(error, app_commands.MissingPermissions): - await interaction.response.send_message( - "You must be an administrator to use this command.", ephemeral=True - ) + await interaction.response.send_message("You must be an administrator to use this command.", ephemeral=True) else: - await interaction.response.send_message( - f"An error occurred: {error}", ephemeral=True - ) + await interaction.response.send_message(f"An error occurred: {error}", ephemeral=True) print(f"Error in aidebug_last_decisions command: {error}")