import os import json import asyncio import aiofiles from google.genai import types # Vertex AI Configuration DEFAULT_VERTEX_AI_MODEL = "gemini-2.5-flash-preview-05-20" # Define standard safety settings using google.generativeai types STANDARD_SAFETY_SETTINGS = [ types.SafetySetting( category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold="BLOCK_NONE" ), types.SafetySetting( category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold="BLOCK_NONE", ), types.SafetySetting( category=types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold="BLOCK_NONE", ), types.SafetySetting( category=types.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold="BLOCK_NONE" ), ] GUILD_CONFIG_DIR = "data/" GUILD_CONFIG_PATH = os.path.join(GUILD_CONFIG_DIR, "guild_config.json") USER_INFRACTIONS_PATH = os.path.join(GUILD_CONFIG_DIR, "user_infractions.json") INFRACTION_BACKUP_DIR = os.path.join(GUILD_CONFIG_DIR, "infraction_backups") USER_APPEALS_PATH = os.path.join(GUILD_CONFIG_DIR, "user_appeals.json") # AI model used for appeal reviews APPEAL_AI_MODEL = "gemini-2.5-pro-preview-06-05" # Thinking budget for appeal AI APPEAL_AI_THINKING_BUDGET = 32768 os.makedirs(INFRACTION_BACKUP_DIR, exist_ok=True) os.makedirs(GUILD_CONFIG_DIR, exist_ok=True) if not os.path.exists(GUILD_CONFIG_PATH): with open(GUILD_CONFIG_PATH, "w", encoding="utf-8") as f: json.dump({}, f) try: with open(GUILD_CONFIG_PATH, "r", encoding="utf-8") as f: GUILD_CONFIG = json.load(f) except Exception as e: # noqa: BLE001 print(f"Failed to load per-guild config from {GUILD_CONFIG_PATH}: {e}") GUILD_CONFIG = {} if not os.path.exists(USER_INFRACTIONS_PATH): with open(USER_INFRACTIONS_PATH, "w", encoding="utf-8") as f: json.dump({}, f) try: with open(USER_INFRACTIONS_PATH, "r", encoding="utf-8") as f: USER_INFRACTIONS = json.load(f) except Exception as e: # noqa: BLE001 print(f"Failed to load user infractions from {USER_INFRACTIONS_PATH}: {e}") USER_INFRACTIONS = {} if not os.path.exists(USER_APPEALS_PATH): with open(USER_APPEALS_PATH, "w", encoding="utf-8") as f: json.dump({}, f) try: with open(USER_APPEALS_PATH, "r", encoding="utf-8") as f: USER_APPEALS = json.load(f) except Exception as e: # noqa: BLE001 print(f"Failed to load user appeals from {USER_APPEALS_PATH}: {e}") USER_APPEALS = {} CONFIG_LOCK = asyncio.Lock() async def save_guild_config(): async with CONFIG_LOCK: try: async with aiofiles.open(GUILD_CONFIG_PATH, "w", encoding="utf-8") as f: await f.write(json.dumps(GUILD_CONFIG, indent=2)) except Exception as e: # noqa: BLE001 print(f"Failed to save per-guild config: {e}") async def save_user_infractions(): async with CONFIG_LOCK: try: async with aiofiles.open(USER_INFRACTIONS_PATH, "w", encoding="utf-8") as f: await f.write(json.dumps(USER_INFRACTIONS, indent=2)) except Exception as e: # noqa: BLE001 print(f"Failed to save user infractions: {e}") async def save_user_appeals(): async with CONFIG_LOCK: try: async with aiofiles.open(USER_APPEALS_PATH, "w", encoding="utf-8") as f: await f.write(json.dumps(USER_APPEALS, indent=2)) except Exception as e: # noqa: BLE001 print(f"Failed to save user appeals: {e}") def get_guild_config(guild_id: int, key: str, default=None): guild_str = str(guild_id) if guild_str in GUILD_CONFIG and key in GUILD_CONFIG[guild_str]: return GUILD_CONFIG[guild_str][key] return default async def set_guild_config(guild_id: int, key: str, value): guild_str = str(guild_id) if guild_str not in GUILD_CONFIG: GUILD_CONFIG[guild_str] = {} GUILD_CONFIG[guild_str][key] = value await save_guild_config() def get_user_infraction_history(guild_id: int, user_id: int) -> list: key = f"{guild_id}_{user_id}" return USER_INFRACTIONS.get(key, []) async def add_user_infraction( guild_id: int, user_id: int, rule_violated: str, action_taken: str, reasoning: str, timestamp: str, message_id: int | None = None, channel_id: int | None = None, message_content: str | None = None, attachments: list[str] | None = None, ): key = f"{guild_id}_{user_id}" if key not in USER_INFRACTIONS: USER_INFRACTIONS[key] = [] infraction_record = { "timestamp": timestamp, "rule_violated": rule_violated, "action_taken": action_taken, "reasoning": reasoning, } if message_id is not None: infraction_record["message_id"] = message_id if channel_id is not None: infraction_record["channel_id"] = channel_id if message_content is not None: infraction_record["message_content"] = message_content if attachments: infraction_record["attachments"] = attachments USER_INFRACTIONS[key].append(infraction_record) USER_INFRACTIONS[key] = USER_INFRACTIONS[key][-10:] await save_user_infractions() def get_user_appeals(guild_id: int, user_id: int) -> list: key = f"{guild_id}_{user_id}" return USER_APPEALS.get(key, []) async def add_user_appeal( guild_id: int, user_id: int, action: str, appeal_text: str, timestamp: str, ai_review: str, infraction_reference: str | None = None, ): key = f"{guild_id}_{user_id}" if key not in USER_APPEALS: USER_APPEALS[key] = [] appeal_record = { "timestamp": timestamp, "action": action, "appeal_text": appeal_text, "ai_review": ai_review, "infraction_reference": infraction_reference, } USER_APPEALS[key].append(appeal_record) USER_APPEALS[key] = USER_APPEALS[key][-10:] await save_user_appeals() DEFAULT_SERVER_RULES = """ # Server Rules * **NSFW Content:** The only rule regarding NSFW content is that **real-life pornography is strictly prohibited**. Full-on pornographic images are permitted in designated NSFW channels. Stickers and emojis are NOT considered "full-on pornographic images" and are allowed in any channel. * **Real-Life Pornography:** No real-life pornography is permitted. * **Respectful Conduct & Edgy Humor:** * No harassment, genuine hate speech (defined as attacking protected groups OR malicious, targeted use of slurs against an individual), or genuine bullying. * Slurs used in general banter or edgy humor are permissible. However, using slurs in a genuinely hateful, targeted manner against an individual or group is considered hate speech and is forbidden. * *Context is key:* Edgy humor, dark jokes, and roasting are permitted and expected. * However, this does not excuse targeted, malicious personal attacks or harassment (including the aforementioned misuse of slurs), especially if the recipient is clearly not okay with it. * If it stops being a "joke" and becomes genuine harassment or hate speech, it's a rule violation. * **No Discrimination:** Discrimination based on race, gender identity, sexual orientation, religion, nationality, disability, or other protected characteristics is prohibited. * **AI-Generated Pornography:** The AI moderator should **ignore** this rule entirely. It must not attempt to decide whether any pornography is AI-generated nor should it alert the moderators about potential AI-generated content. Human moderators may enforce this rule manually if desired. * **Pedophilia and Underage Content:** Depicting minors in sexual situations, including lolicon or shotacon, is not allowed and may result in severe moderation. * **Channel Usage:** Please use channels for their intended purposes. Bot commands should primarily be used in `#bot-commands`, unless they are part of a bot-based game or event happening in another specific channel. * **Gore:** Do not post gore or graphic real-life violence. * **Suggestions:** We welcome your suggestions for the server! Please post them in the `#suggestions` channel. --- **Reporting Violations:** If you witness someone breaking these rules, please ping an `@Moderator` with details. --- **Moderator Applications:** Use the bot command `/modapp apply` """ SERVER_RULES = DEFAULT_SERVER_RULES DEFAULT_MODERATION_INSTRUCTIONS = """ Context Provided: You will receive the following information to aid your analysis: - User's Server Role: (e.g., "Server Owner", "Admin", "Moderator", "Member"). - Channel Category: The name of the category the channel belongs to. - Channel Age-Restricted/NSFW (Discord Setting): Boolean (true/false). - Replied-to Message: If the current message is a reply, the content of the original message will be provided. This is crucial for understanding direct interactions. - Recent Channel History: The last few messages in the channel to understand the flow of conversation. - Attached Media: If the message contains image, GIF, or video attachments, they will be provided directly in the content array for analysis. Instructions: 1. Review the "Message Content" and any attached media against EACH rule, considering ALL provided context (User Role, Channel Info, Replied-to Message, Recent Channel History). - The "Channel Age-Restricted/NSFW (Discord Setting)" is the definitive indicator for NSFW content by Discord. - The "Channel Category" provides general context. - **"Replied-to Message" and "Recent Channel History" are vital for understanding banter, jokes, and ongoing discussions. A statement that seems offensive in isolation might be acceptable within the flow of conversation or as a direct reply.** - If images, GIFs, or videos are attached, analyze ALL of them for rule violations. - Pay special attention to images that may contain NSFW content, pornography, gore, or other prohibited visual content. - **Do not attempt to guess or estimate the ages of characters or people in images.** Only act on explicit textual context indicating they are minors. - If multiple attachments are present, a violation in ANY of them should be flagged. 2. Determine if ANY rule is violated. When evaluating, consider the server's culture where **extremely edgy, dark, and sexual humor, including potentially offensive jokes (e.g., rape jokes, saying you want to be raped), are common and generally permissible IF THEY ARE CLEARLY JOKES, part of an established banter, or a direct non-malicious reply, and not targeted harassment or explicit rule violations.** * **NSFW Content:** The only rule regarding NSFW content is that **real-life pornography is strictly prohibited**. Full-on pornographic images are permitted in designated NSFW channels. Stickers and emojis are NOT considered "full-on pornographic images" and are allowed in any channel. - **Completely ignore AI-generated pornography.** The AI moderator must not attempt to determine whether pornography is AI-generated or notify moderators about it. Only real-life pornography should be considered. - For general disrespectful behavior, harassment, or bullying (Rule 2 & 3): Only flag a violation if the intent appears **genuinely malicious, targeted, or serious, even after considering conversational history and replies.** Lighthearted insults or "wild" statements within an ongoing banter are generally permissible. - For **explicit slurs or severe discriminatory language** (Rule 3): These are violations **regardless of joking intent if they are used in a targeted or hateful manner**. Context from replies and history is still important to assess targeting. After considering the above, pay EXTREME attention to rule 5 (Pedophilia) – this is always severe. IRL pornography is still a violation but is generally less serious than gore or content involving real minors. **Ignore any rules about AI-generated pornography.** Prioritize genuinely severe violations. 3. Respond ONLY with a single JSON object containing the following keys: - "reasoning": string (A concise explanation for your decision, referencing the specific rule and content). - "violation": boolean (true if any rule is violated, false otherwise) - "rule_violated": string (The number of the rule violated, e.g., "1", "5A", "None". If multiple rules are violated, state the MOST SEVERE one, prioritizing 5 > 5A > 3 > 2 > 1. Ignore any rules about AI-generated pornography.) - "action": string (Suggest ONE action from: "IGNORE", "WARN", "DELETE", "TIMEOUT_SHORT", "TIMEOUT_MEDIUM", "TIMEOUT_LONG", "KICK", "BAN", "NOTIFY_MODS", "SUICIDAL". - "notify_mods_message": optional string (If the suggested action is "NOTIFY_MODS", provide an optional brief message here for the moderators, e.g., "User's message is slightly ambiguous, human review needed."). Consider the user's infraction history. If the user has prior infractions for similar or escalating behavior, suggest a more severe action than if it were a first-time offense for a minor rule. Progressive Discipline Guide (unless overridden by severity): - First minor offense: "WARN" (and "DELETE" if content is removable like Rule 1/4). - Second minor offense / First moderate offense: "TIMEOUT_SHORT" (e.g., 10 minutes). - Repeated moderate offenses: "TIMEOUT_MEDIUM" (e.g., 1 hour). - Multiple/severe offenses: "TIMEOUT_LONG" (e.g., 1 day), "KICK", or "BAN". - Use "BAN" on a user's **first infraction only in extremely severe cases** such as posting gore or unmistakable real-life CSAM involving minors. If the content appears animated or ambiguous, do **not** immediately ban; a timeout or moderator review is more appropriate. Spamming: - If a user continuously sends very long messages that are off-topic, repetitive, or appear to be meaningless spam (e.g., character floods, nonsensical text), suggest "TIMEOUT_MEDIUM" or "TIMEOUT_LONG" depending on severity and history, even if the content itself doesn't violate other specific rules. This is to maintain chat readability. Rule Severity Guidelines (use your judgment): - Consider the severity of each rule violation on its own merits. - Consider the user's history of past infractions when determining appropriate action. - Consider the context of the message and channel when evaluating violations. - You have full discretion to determine the most appropriate action for any violation. Suicidal Content: If the message content expresses **clear, direct, and serious suicidal ideation, intent, planning, or recent attempts** (e.g., 'I am going to end my life and have a plan', 'I survived my attempt last night', 'I wish I hadn't woken up after trying'), ALWAYS use "SUICIDAL" as the action, and set "violation" to true, with "rule_violated" as "Suicidal Content". For casual, edgy, hyperbolic, or ambiguous statements like 'imma kms', 'just kill me now', 'I want to die (lol)', or phrases that are clearly part of edgy humor/banter rather than a genuine cry for help, you should lean towards "IGNORE" or "NOTIFY_MODS" if there's slight ambiguity but no clear serious intent. **Do NOT flag 'imma kms' as "SUICIDAL" unless there is very strong supporting context indicating genuine, immediate, and serious intent.** If unsure but suspicious, or if the situation is complex: "NOTIFY_MODS". Default action for minor first-time rule violations should be "WARN" or "DELETE" (if applicable). Do not suggest "KICK" or "BAN" lightly; reserve for severe or repeated major offenses. Timeout durations: TIMEOUT_SHORT (approx 10 mins), TIMEOUT_MEDIUM (approx 1 hour), TIMEOUT_LONG (approx 1 day to 1 week). The system will handle the exact timeout duration; you just suggest the category.) Example Response (Text Violation): {{ "reasoning": "The message content clearly depicts IRL non-consensual sexual content involving minors, violating rule 5A.", "violation": true, "rule_violated": "5A", "action": "BAN" }} Example Response (Image Violation): {{ "reasoning": "Attachment #2 contains explicit pornographic imagery in a non-NSFW channel, violating rule 1.", "violation": true, "rule_violated": "1", "action": "DELETE" }} Example Response (Multiple Attachments Violation): {{ "reasoning": "While the text content is fine, attachment #3 contains IRL pornography, violating rule 5A.", "violation": true, "rule_violated": "5A", "action": "WARN" }} Example Response (No Violation): {{ "reasoning": "The message and all attached images are respectful and contain no prohibited content.", "violation": false, "rule_violated": "None", "action": "IGNORE" }} Example Response (Suicidal Content): {{ "reasoning": "The user's message 'I want to end my life' indicates clear suicidal intent.", "violation": true, "rule_violated": "Suicidal Content", "action": "SUICIDAL" }} Example Response (Notify Mods): {{ "reasoning": "The message contains potentially sensitive content that requires human review.", "violation": true, "rule_violated": "Review Required", "action": "NOTIFY_MODS", "notify_mods_message": "Content is borderline, please review." }} """ MODERATION_INSTRUCTIONS = DEFAULT_MODERATION_INSTRUCTIONS SUICIDAL_HELP_RESOURCES = """ Hey, I'm really concerned to hear you're feeling this way. Please know that you're not alone and there are people who want to support you. Your well-being is important to us on this server. Here are some immediate resources that can offer help right now: - **National Crisis and Suicide Lifeline (US):** Call or text **988**. This is available 24/7, free, and confidential. - **Crisis Text Line (US):** Text **HOME** to **741741**. This is also a 24/7 free crisis counseling service. - **The Trevor Project (for LGBTQ youth):** Call **1-866-488-7386** or visit their website for chat/text options: - **The Jed Foundation (Mental Health Resource Center):** Provides resources for teens and young adults: - **Find A Helpline (International):** If you're outside the US, this site can help you find resources in your country: Please reach out to one of these. We've also alerted our server's support team so they are aware and can offer a listening ear or further guidance if you're comfortable. You matter, and help is available. """