265 lines
11 KiB
Python
265 lines
11 KiB
Python
import os
|
||
import json
|
||
import asyncio
|
||
import aiofiles
|
||
from google.genai import types
|
||
|
||
# Vertex AI Configuration
|
||
DEFAULT_VERTEX_AI_MODEL = "gemini-2.5-flash-preview-05-20"
|
||
|
||
# Define standard safety settings using google.generativeai types
|
||
STANDARD_SAFETY_SETTINGS = [
|
||
types.SafetySetting(
|
||
category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold="BLOCK_NONE"
|
||
),
|
||
types.SafetySetting(
|
||
category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
||
threshold="BLOCK_NONE",
|
||
),
|
||
types.SafetySetting(
|
||
category=types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
||
threshold="BLOCK_NONE",
|
||
),
|
||
types.SafetySetting(
|
||
category=types.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold="BLOCK_NONE"
|
||
),
|
||
]
|
||
|
||
|
||
GUILD_CONFIG_DIR = "data/"
|
||
GUILD_CONFIG_PATH = os.path.join(GUILD_CONFIG_DIR, "guild_config.json")
|
||
USER_INFRACTIONS_PATH = os.path.join(GUILD_CONFIG_DIR, "user_infractions.json")
|
||
INFRACTION_BACKUP_DIR = os.path.join(GUILD_CONFIG_DIR, "infraction_backups")
|
||
USER_APPEALS_PATH = os.path.join(GUILD_CONFIG_DIR, "user_appeals.json")
|
||
|
||
# AI model used for appeal reviews
|
||
APPEAL_AI_MODEL = "gemini-2.5-pro-preview-06-05"
|
||
# Thinking budget for appeal AI
|
||
APPEAL_AI_THINKING_BUDGET = 32768
|
||
|
||
os.makedirs(INFRACTION_BACKUP_DIR, exist_ok=True)
|
||
os.makedirs(GUILD_CONFIG_DIR, exist_ok=True)
|
||
|
||
if not os.path.exists(GUILD_CONFIG_PATH):
|
||
with open(GUILD_CONFIG_PATH, "w", encoding="utf-8") as f:
|
||
json.dump({}, f)
|
||
try:
|
||
with open(GUILD_CONFIG_PATH, "r", encoding="utf-8") as f:
|
||
GUILD_CONFIG = json.load(f)
|
||
except Exception as e: # noqa: BLE001
|
||
print(f"Failed to load per-guild config from {GUILD_CONFIG_PATH}: {e}")
|
||
GUILD_CONFIG = {}
|
||
|
||
if not os.path.exists(USER_INFRACTIONS_PATH):
|
||
with open(USER_INFRACTIONS_PATH, "w", encoding="utf-8") as f:
|
||
json.dump({}, f)
|
||
try:
|
||
with open(USER_INFRACTIONS_PATH, "r", encoding="utf-8") as f:
|
||
USER_INFRACTIONS = json.load(f)
|
||
except Exception as e: # noqa: BLE001
|
||
print(f"Failed to load user infractions from {USER_INFRACTIONS_PATH}: {e}")
|
||
USER_INFRACTIONS = {}
|
||
|
||
if not os.path.exists(USER_APPEALS_PATH):
|
||
with open(USER_APPEALS_PATH, "w", encoding="utf-8") as f:
|
||
json.dump({}, f)
|
||
try:
|
||
with open(USER_APPEALS_PATH, "r", encoding="utf-8") as f:
|
||
USER_APPEALS = json.load(f)
|
||
except Exception as e: # noqa: BLE001
|
||
print(f"Failed to load user appeals from {USER_APPEALS_PATH}: {e}")
|
||
USER_APPEALS = {}
|
||
|
||
CONFIG_LOCK = asyncio.Lock()
|
||
|
||
|
||
async def save_guild_config():
|
||
async with CONFIG_LOCK:
|
||
try:
|
||
async with aiofiles.open(GUILD_CONFIG_PATH, "w", encoding="utf-8") as f:
|
||
await f.write(json.dumps(GUILD_CONFIG, indent=2))
|
||
except Exception as e: # noqa: BLE001
|
||
print(f"Failed to save per-guild config: {e}")
|
||
|
||
|
||
async def save_user_infractions():
|
||
async with CONFIG_LOCK:
|
||
try:
|
||
async with aiofiles.open(USER_INFRACTIONS_PATH, "w", encoding="utf-8") as f:
|
||
await f.write(json.dumps(USER_INFRACTIONS, indent=2))
|
||
except Exception as e: # noqa: BLE001
|
||
print(f"Failed to save user infractions: {e}")
|
||
|
||
|
||
async def save_user_appeals():
|
||
async with CONFIG_LOCK:
|
||
try:
|
||
async with aiofiles.open(USER_APPEALS_PATH, "w", encoding="utf-8") as f:
|
||
await f.write(json.dumps(USER_APPEALS, indent=2))
|
||
except Exception as e: # noqa: BLE001
|
||
print(f"Failed to save user appeals: {e}")
|
||
|
||
|
||
def get_guild_config(guild_id: int, key: str, default=None):
|
||
guild_str = str(guild_id)
|
||
if guild_str in GUILD_CONFIG and key in GUILD_CONFIG[guild_str]:
|
||
return GUILD_CONFIG[guild_str][key]
|
||
return default
|
||
|
||
|
||
async def set_guild_config(guild_id: int, key: str, value):
|
||
guild_str = str(guild_id)
|
||
if guild_str not in GUILD_CONFIG:
|
||
GUILD_CONFIG[guild_str] = {}
|
||
GUILD_CONFIG[guild_str][key] = value
|
||
await save_guild_config()
|
||
|
||
|
||
def get_user_infraction_history(guild_id: int, user_id: int) -> list:
|
||
key = f"{guild_id}_{user_id}"
|
||
return USER_INFRACTIONS.get(key, [])
|
||
|
||
|
||
async def add_user_infraction(
|
||
guild_id: int,
|
||
user_id: int,
|
||
rule_violated: str,
|
||
action_taken: str,
|
||
reasoning: str,
|
||
timestamp: str,
|
||
message_id: int | None = None,
|
||
channel_id: int | None = None,
|
||
message_content: str | None = None,
|
||
attachments: list[str] | None = None,
|
||
):
|
||
key = f"{guild_id}_{user_id}"
|
||
if key not in USER_INFRACTIONS:
|
||
USER_INFRACTIONS[key] = []
|
||
|
||
infraction_record = {
|
||
"timestamp": timestamp,
|
||
"rule_violated": rule_violated,
|
||
"action_taken": action_taken,
|
||
"reasoning": reasoning,
|
||
}
|
||
if message_id is not None:
|
||
infraction_record["message_id"] = message_id
|
||
if channel_id is not None:
|
||
infraction_record["channel_id"] = channel_id
|
||
if message_content is not None:
|
||
infraction_record["message_content"] = message_content
|
||
if attachments:
|
||
infraction_record["attachments"] = attachments
|
||
USER_INFRACTIONS[key].append(infraction_record)
|
||
USER_INFRACTIONS[key] = USER_INFRACTIONS[key][-10:]
|
||
await save_user_infractions()
|
||
|
||
|
||
def get_user_appeals(guild_id: int, user_id: int) -> list:
|
||
key = f"{guild_id}_{user_id}"
|
||
return USER_APPEALS.get(key, [])
|
||
|
||
|
||
async def add_user_appeal(
|
||
guild_id: int,
|
||
user_id: int,
|
||
action: str,
|
||
appeal_text: str,
|
||
timestamp: str,
|
||
ai_review: str,
|
||
infraction_reference: str | None = None,
|
||
):
|
||
key = f"{guild_id}_{user_id}"
|
||
if key not in USER_APPEALS:
|
||
USER_APPEALS[key] = []
|
||
|
||
appeal_record = {
|
||
"timestamp": timestamp,
|
||
"action": action,
|
||
"appeal_text": appeal_text,
|
||
"ai_review": ai_review,
|
||
"infraction_reference": infraction_reference,
|
||
}
|
||
USER_APPEALS[key].append(appeal_record)
|
||
USER_APPEALS[key] = USER_APPEALS[key][-10:]
|
||
await save_user_appeals()
|
||
|
||
|
||
DEFAULT_SERVER_RULES = """
|
||
# Server Rules
|
||
|
||
* **NSFW Content:**
|
||
The only rule regarding NSFW content is that **real-life pornography is strictly prohibited**.
|
||
Full-on pornographic images are permitted in designated NSFW channels.
|
||
Stickers and emojis are NOT considered "full-on pornographic images" and are allowed in any channel.
|
||
|
||
* **Real-Life Pornography:** No real-life pornography is permitted.
|
||
|
||
* **Respectful Conduct & Edgy Humor:**
|
||
* No harassment, genuine hate speech (defined as attacking protected groups OR malicious, targeted use of slurs against an individual), or genuine bullying.
|
||
* Slurs used in general banter or edgy humor are permissible. However, using slurs in a genuinely hateful, targeted manner against an individual or group is considered hate speech and is forbidden.
|
||
* *Context is key:* Edgy humor, dark jokes, and roasting are permitted and expected.
|
||
* However, this does not excuse targeted, malicious personal attacks or harassment (including the aforementioned misuse of slurs), especially if the recipient is clearly not okay with it.
|
||
* If it stops being a "joke" and becomes genuine harassment or hate speech, it's a rule violation.
|
||
|
||
* **No Discrimination:** Discrimination based on race, gender identity, sexual orientation, religion, nationality, disability, or other protected characteristics is prohibited.
|
||
|
||
* **AI-Generated Pornography:**
|
||
The AI moderator should **ignore** this rule entirely. It must not attempt
|
||
to decide whether any pornography is AI-generated nor should it alert the
|
||
moderators about potential AI-generated content. Human moderators may
|
||
enforce this rule manually if desired.
|
||
|
||
* **Pedophilia and Underage Content:** Depicting minors in sexual situations, including lolicon or shotacon, is not allowed and may result in severe moderation.
|
||
|
||
* **Channel Usage:** Please use channels for their intended purposes. Bot commands should primarily be used in `#bot-commands`, unless they are part of a bot-based game or event happening in another specific channel.
|
||
|
||
* **Gore:** Do not post gore or graphic real-life violence.
|
||
|
||
* **Suggestions:** We welcome your suggestions for the server! Please post them in the `#suggestions` channel.
|
||
|
||
---
|
||
|
||
**Reporting Violations:**
|
||
If you witness someone breaking these rules, please ping an `@Moderator` with details.
|
||
|
||
---
|
||
|
||
**Moderator Applications:**
|
||
Use the bot command `/modapp apply`
|
||
"""
|
||
|
||
SERVER_RULES = DEFAULT_SERVER_RULES
|
||
|
||
# Guidance about enforcing specific rules. Server admins can override this
|
||
# section to adjust how strictly the AI handles violations.
|
||
DEFAULT_MODERATION_INSTRUCTIONS = """
|
||
2. Determine if ANY rule is violated. When evaluating, consider the server's culture where **extremely edgy, dark, and sexual humor, including potentially offensive jokes (e.g., rape jokes, saying you want to be raped), are common and generally permissible IF THEY ARE CLEARLY JOKES, part of an established banter, or a direct non-malicious reply, and not targeted harassment or explicit rule violations.**
|
||
* **NSFW Content:**
|
||
The only rule regarding NSFW content is that **real-life pornography is strictly prohibited**.
|
||
Full-on pornographic images are permitted in designated NSFW channels.
|
||
Stickers and emojis are NOT considered "full-on pornographic images" and are allowed in any channel.
|
||
- **Completely ignore AI-generated pornography.** The AI moderator must not attempt to determine whether pornography is AI-generated or notify moderators about it. Only real-life pornography should be considered.
|
||
- For general disrespectful behavior, harassment, or bullying (Rule 2 & 3): Only flag a violation if the intent appears **genuinely malicious, targeted, or serious, even after considering conversational history and replies.** Lighthearted insults or "wild" statements within an ongoing banter are generally permissible.
|
||
- For **explicit slurs or severe discriminatory language** (Rule 3): These are violations **regardless of joking intent if they are used in a targeted or hateful manner**. Context from replies and history is still important to assess targeting.
|
||
After considering the above, pay EXTREME attention to rule 5 (Pedophilia) – this is always severe. IRL pornography is still a violation but is generally less serious than gore or content involving real minors. **Ignore any rules about AI-generated pornography.** Prioritize genuinely severe violations.
|
||
"""
|
||
|
||
MODERATION_INSTRUCTIONS = DEFAULT_MODERATION_INSTRUCTIONS
|
||
|
||
SUICIDAL_HELP_RESOURCES = """
|
||
Hey, I'm really concerned to hear you're feeling this way. Please know that you're not alone and there are people who want to support you.
|
||
Your well-being is important to us on this server.
|
||
|
||
Here are some immediate resources that can offer help right now:
|
||
|
||
- **National Crisis and Suicide Lifeline (US):** Call or text **988**. This is available 24/7, free, and confidential.
|
||
- **Crisis Text Line (US):** Text **HOME** to **741741**. This is also a 24/7 free crisis counseling service.
|
||
- **The Trevor Project (for LGBTQ youth):** Call **1-866-488-7386** or visit their website for chat/text options: <https://www.thetrevorproject.org/get-help/>
|
||
- **The Jed Foundation (Mental Health Resource Center):** Provides resources for teens and young adults: <https://www.jedfoundation.org/>
|
||
- **Find A Helpline (International):** If you're outside the US, this site can help you find resources in your country: <https://findahelpline.com/>
|
||
|
||
Please reach out to one of these. We've also alerted our server's support team so they are aware and can offer a listening ear or further guidance if you're comfortable.
|
||
You matter, and help is available.
|
||
"""
|