Merge work into master

This commit is contained in:
Slipstream 2025-06-07 03:48:15 +00:00
commit 140419a2e2
Signed by: slipstream
GPG Key ID: 13E498CE010AC6FD
5 changed files with 425 additions and 23 deletions

View File

@ -389,10 +389,10 @@ class AICodeAgentCog(commands.Cog):
# AI Model Configuration
self._ai_model: str = "gemini-2.5-flash-preview-05-20" # Default model
self._available_models: Dict[str, str] = {
"pro": "gemini-2.5-pro-preview-05-06", # Assuming this is the intended Pro model
"pro": "gemini-2.5-pro-preview-06-05", # Assuming this is the intended Pro model
"flash": "gemini-2.5-flash-preview-05-20",
}
# User mentioned "gemini-2.5-pro-preview-05-06" and "gemini-2.5-flash-preview-05-20"
# User mentioned "gemini-2.5-pro-preview-06-05" and "gemini-2.5-flash-preview-05-20"
# Updating to reflect those if they are the correct ones, otherwise the 1.5 versions are common.
# For now, sticking to what was in the plan based on common Gemini models.
# If 2.5 models are indeed what's intended and available, these strings should be updated.

View File

@ -18,6 +18,7 @@ import shutil # For backing up files
from typing import Optional, List, Dict, Any, Tuple # For type hinting
import asyncio
import aiofiles
import re
# Google Generative AI Imports (using Vertex AI backend)
from google import genai
@ -38,12 +39,18 @@ from .aimod_config import (
GUILD_CONFIG_PATH,
USER_INFRACTIONS_PATH,
INFRACTION_BACKUP_DIR,
USER_APPEALS_PATH,
APPEAL_AI_MODEL,
APPEAL_AI_THINKING_BUDGET,
CONFIG_LOCK,
save_user_infractions,
save_user_appeals,
get_guild_config,
set_guild_config,
get_user_infraction_history,
add_user_infraction,
get_user_appeals,
add_user_appeal,
SERVER_RULES,
SUICIDAL_HELP_RESOURCES,
)
@ -446,6 +453,55 @@ class AIModerationCog(commands.Cog):
print(f"Error processing video: {e}")
return None, None
async def process_url_attachment(self, url: str) -> tuple[str, bytes, str, str]:
"""Fetch an attachment from a direct link."""
import aiohttp
try:
cleaned_url = url.strip("<>")
filename = cleaned_url.split("/")[-1].split("?")[0]
_, ext = os.path.splitext(filename.lower())
if ext in self.image_extensions:
attachment_type = "image"
elif ext in self.gif_extensions:
attachment_type = "gif"
elif ext in self.video_extensions:
attachment_type = "video"
else:
return None, None, None, None
async with aiohttp.ClientSession() as session:
async with session.get(cleaned_url) as resp:
if resp.status != 200:
print(
f"Failed to fetch URL attachment {cleaned_url}: {resp.status}"
)
return None, None, None, None
data = await resp.read()
mime_type = resp.headers.get(
"Content-Type", f"image/{ext.lstrip('.')}"
)
return mime_type, data, attachment_type, filename
except Exception as e:
print(f"Error processing URL attachment {url}: {e}")
return None, None, None, None
def extract_direct_attachment_urls(self, text: str) -> List[str]:
"""Return a list of direct image/video URLs found in the text."""
urls = re.findall(r"https?://\S+", text or "")
allowed_exts = (
self.image_extensions + self.gif_extensions + self.video_extensions
)
results = []
for u in urls:
cleaned = u.strip("<>")
path = cleaned.split("?")[0]
_, ext = os.path.splitext(path.lower())
if ext in allowed_exts:
results.append(cleaned)
return results
# --- AI Moderation Command Group ---
aimod_group = app_commands.Group(
name="aimod", description="AI Moderation commands."
@ -458,6 +514,9 @@ class AIModerationCog(commands.Cog):
infractions_subgroup = app_commands.Group(
name="infractions", description="Manage user infractions.", parent=aimod_group
)
appeal_subgroup = app_commands.Group(
name="appeal", description="Appeal AI moderation actions.", parent=aimod_group
)
model_subgroup = app_commands.Group(
name="model",
description="Manage the AI model for moderation.",
@ -717,6 +776,18 @@ class AIModerationCog(commands.Cog):
f"Server rules updated from {channel.mention}.", ephemeral=False
)
@config_subgroup.command(
name="reset_rules",
description="Reset server rules to the default hardcoded version.",
)
@app_commands.checks.has_permissions(administrator=True)
async def reset_rules(self, interaction: discord.Interaction) -> None:
"""Reset the server rules to the default string."""
aimod_config_module.SERVER_RULES = aimod_config_module.DEFAULT_SERVER_RULES
await interaction.response.send_message(
"Server rules have been reset to the default.", ephemeral=False
)
@infractions_subgroup.command(
name="view",
description="View a user's AI moderation infraction history (mod/admin only).",
@ -782,6 +853,59 @@ class AIModerationCog(commands.Cog):
await interaction.response.send_message(embed=embed, ephemeral=False)
@appeal_subgroup.command(
name="human_review",
description="Request a human moderator to review your case.",
)
@app_commands.describe(
reason="Explain why you want a human to review the AI decision",
guild_id="If using in DMs, provide the server ID",
)
async def appeal_human_review(
self,
interaction: discord.Interaction,
reason: str,
guild_id: int | None = None,
):
"""Let a user request a manual moderator review."""
guild = interaction.guild or (
self.bot.get_guild(guild_id) if guild_id else None
)
if not guild:
await interaction.response.send_message(
"Invalid or missing guild ID.", ephemeral=True
)
return
log_channel_id = get_guild_config(guild.id, "MOD_LOG_CHANNEL_ID")
log_channel = self.bot.get_channel(log_channel_id) if log_channel_id else None
if not log_channel:
await interaction.response.send_message(
"Appeals are not enabled for this server.", ephemeral=True
)
return
timestamp = datetime.datetime.utcnow().isoformat()
await add_user_appeal(
guild.id, interaction.user.id, "HUMAN_REVIEW", reason, timestamp, ""
)
embed = discord.Embed(
title="Human Review Requested", color=discord.Color.orange()
)
embed.add_field(
name="User",
value=f"{interaction.user} ({interaction.user.id})",
inline=False,
)
embed.add_field(name="Request", value=reason, inline=False)
embed.timestamp = discord.utils.utcnow()
await log_channel.send(embed=embed)
await interaction.response.send_message(
"Your request for a human review has been sent.", ephemeral=True
)
@infractions_subgroup.command(
name="clear",
description="Clear a user's AI moderation infraction history (admin only).",
@ -871,6 +995,102 @@ class AIModerationCog(commands.Cog):
f"Failed to restore infractions: {e}", ephemeral=True
)
@appeal_subgroup.command(name="submit", description="Submit a moderation appeal.")
@app_commands.describe(
action="The action you are appealing",
reason="Explain why you believe the action was incorrect",
guild_id="If using in DMs, provide the server ID",
)
async def appeal_submit(
self,
interaction: discord.Interaction,
action: str,
reason: str,
guild_id: int | None = None,
):
guild = interaction.guild or (
self.bot.get_guild(guild_id) if guild_id else None
)
if not guild:
await interaction.response.send_message(
"Invalid or missing guild ID.", ephemeral=True
)
return
log_channel_id = get_guild_config(guild.id, "MOD_LOG_CHANNEL_ID")
log_channel = self.bot.get_channel(log_channel_id) if log_channel_id else None
if not log_channel:
await interaction.response.send_message(
"Appeals are not enabled for this server.", ephemeral=True
)
return
ai_review = await self.run_appeal_ai(guild, interaction.user, action, reason)
timestamp = datetime.datetime.utcnow().isoformat()
await add_user_appeal(
guild.id, interaction.user.id, action, reason, timestamp, ai_review
)
embed = discord.Embed(title="New Appeal", color=discord.Color.blue())
embed.add_field(
name="User",
value=f"{interaction.user} ({interaction.user.id})",
inline=False,
)
embed.add_field(name="Action", value=action, inline=False)
embed.add_field(name="Appeal", value=reason, inline=False)
embed.add_field(name="AI Review", value=ai_review[:1000], inline=False)
embed.timestamp = discord.utils.utcnow()
await log_channel.send(embed=embed)
await interaction.response.send_message(
"Your appeal has been submitted.", ephemeral=True
)
@appeal_subgroup.command(
name="list", description="View a user's appeals (mods only)."
)
@app_commands.describe(user="The user to view appeals for")
async def appeal_list(self, interaction: discord.Interaction, user: discord.Member):
moderator_role_id = get_guild_config(interaction.guild.id, "MODERATOR_ROLE_ID")
moderator_role = (
interaction.guild.get_role(moderator_role_id) if moderator_role_id else None
)
has_permission = interaction.user.guild_permissions.administrator or (
moderator_role and moderator_role in interaction.user.roles
)
if not has_permission:
await interaction.response.send_message(
"You must be an administrator or have the moderator role to use this command.",
ephemeral=True,
)
return
appeals = get_user_appeals(interaction.guild.id, user.id)
if not appeals:
await interaction.response.send_message(
f"{user.mention} has no appeals.", ephemeral=False
)
return
embed = discord.Embed(
title=f"Appeals for {user.display_name}", color=discord.Color.blue()
)
for i, appeal in enumerate(appeals, 1):
ts = appeal.get("timestamp", "?")[:19].replace("T", " ")
summary = appeal.get("appeal_text", "")
ai_sum = appeal.get("ai_review", "")
if len(summary) > 150:
summary = summary[:147] + "..."
if len(ai_sum) > 150:
ai_sum = ai_sum[:147] + "..."
embed.add_field(
name=f"Appeal #{i} - {ts}",
value=f"Action: {appeal.get('action')}\nReason: {summary}\nAI: {ai_sum}",
inline=False,
)
await interaction.response.send_message(embed=embed, ephemeral=False)
@model_subgroup.command(
name="set", description="Change the AI model used for moderation (admin only)."
)
@ -1052,29 +1272,32 @@ Instructions:
- **"Replied-to Message" and "Recent Channel History" are vital for understanding banter, jokes, and ongoing discussions. A statement that seems offensive in isolation might be acceptable within the flow of conversation or as a direct reply.**
- If images, GIFs, or videos are attached, analyze ALL of them for rule violations.
- Pay special attention to images that may contain NSFW content, pornography, gore, or other prohibited visual content.
- **Do not attempt to guess or estimate the ages of characters or people in images.** Only act on explicit textual context indicating they are minors.
- If multiple attachments are present, a violation in ANY of them should be flagged.
2. Determine if ANY rule is violated. When evaluating, consider the server's culture where **extremely edgy, dark, and sexual humor, including potentially offensive jokes (e.g., rape jokes, saying you want to be raped), are common and generally permissible IF THEY ARE CLEARLY JOKES, part of an established banter, or a direct non-malicious reply, and not targeted harassment or explicit rule violations.**
* **NSFW Content:**
The only rule regarding NSFW content is that **real-life pornography is strictly prohibited**.
Full-on pornographic images are permitted in designated NSFW channels.
Stickers and emojis are NOT considered "full-on pornographic images" and are allowed in any channel.
- **Completely ignore AI-generated pornography.** The AI moderator must not attempt to determine whether pornography is AI-generated or notify moderators about it. Only real-life pornography should be considered.
- For general disrespectful behavior, harassment, or bullying (Rule 2 & 3): Only flag a violation if the intent appears **genuinely malicious, targeted, or serious, even after considering conversational history and replies.** Lighthearted insults or "wild" statements within an ongoing banter are generally permissible.
- For **explicit slurs or severe discriminatory language** (Rule 3): These are violations **regardless of joking intent if they are used in a targeted or hateful manner**. Context from replies and history is still important to assess targeting.
After considering the above, pay EXTREME attention to rules 5 (Pedophilia) and 5A (IRL Porn) these are always severe. Rule 4 (AI Porn) is also critical. Prioritize these severe violations.
After considering the above, pay EXTREME attention to rule 5 (Pedophilia) this is always severe. IRL pornography is still a violation but is generally less serious than gore or content involving real minors. **Ignore any rules about AI-generated pornography.** Prioritize genuinely severe violations.
3. Respond ONLY with a single JSON object containing the following keys:
- "reasoning": string (A concise explanation for your decision, referencing the specific rule and content).
- "violation": boolean (true if any rule is violated, false otherwise)
- "rule_violated": string (The number of the rule violated, e.g., "1", "5A", "None". If multiple rules are violated, state the MOST SEVERE one, prioritizing 5A > 5 > 4 > 3 > 2 > 1).
- "rule_violated": string (The number of the rule violated, e.g., "1", "5A", "None". If multiple rules are violated, state the MOST SEVERE one, prioritizing 5 > 5A > 3 > 2 > 1. Ignore any rules about AI-generated pornography.)
- "action": string (Suggest ONE action from: "IGNORE", "WARN", "DELETE", "TIMEOUT_SHORT", "TIMEOUT_MEDIUM", "TIMEOUT_LONG", "KICK", "BAN", "NOTIFY_MODS", "SUICIDAL".
- "notify_mods_message": optional string (If the suggested action is "NOTIFY_MODS", provide an optional brief message here for the moderators, e.g., "User's message is slightly ambiguous, human review needed.").
Consider the user's infraction history. If the user has prior infractions for similar or escalating behavior, suggest a more severe action than if it were a first-time offense for a minor rule.
Progressive Discipline Guide (unless overridden by severity):
- First minor offense: "WARN" (and "DELETE" if content is removable like Rule 1/4).
- Second minor offense / First moderate offense: "TIMEOUT_SHORT" (e.g., 10 minutes).
- Repeated moderate offenses: "TIMEOUT_MEDIUM" (e.g., 1 hour).
- Multiple/severe offenses: "TIMEOUT_LONG" (e.g., 1 day), "KICK", or "BAN".
Spamming:
- If a user continuously sends very long messages that are off-topic, repetitive, or appear to be meaningless spam (e.g., character floods, nonsensical text), suggest "TIMEOUT_MEDIUM" or "TIMEOUT_LONG" depending on severity and history, even if the content itself doesn't violate other specific rules. This is to maintain chat readability.
- Repeated moderate offenses: "TIMEOUT_MEDIUM" (e.g., 1 hour).
- Multiple/severe offenses: "TIMEOUT_LONG" (e.g., 1 day), "KICK", or "BAN".
- Use "BAN" on a user's **first infraction only in extremely severe cases** such as posting gore or unmistakable real-life CSAM involving minors. If the content appears animated or ambiguous, do **not** immediately ban; a timeout or moderator review is more appropriate.
Spamming:
- If a user continuously sends very long messages that are off-topic, repetitive, or appear to be meaningless spam (e.g., character floods, nonsensical text), suggest "TIMEOUT_MEDIUM" or "TIMEOUT_LONG" depending on severity and history, even if the content itself doesn't violate other specific rules. This is to maintain chat readability.
Rule Severity Guidelines (use your judgment):
- Consider the severity of each rule violation on its own merits.
- Consider the user's history of past infractions when determining appropriate action.
@ -1107,9 +1330,9 @@ Example Response (Image Violation):
Example Response (Multiple Attachments Violation):
{{
"reasoning": "While the text content is fine, attachment #3 contains AI-generated pornography, violating rule 4.",
"reasoning": "While the text content is fine, attachment #3 contains IRL pornography, violating rule 5A.",
"violation": true,
"rule_violated": "4",
"rule_violated": "5A",
"action": "WARN"
}}
@ -1415,9 +1638,7 @@ CRITICAL: Do NOT output anything other than the required JSON response.
else:
model_path = model_id_to_use
thinking_config = types.ThinkingConfig(
thinking_budget=0
) # Example manual thinking budget
thinking_config = types.ThinkingConfig(thinking_budget=0)
generation_config = types.GenerateContentConfig(
temperature=0.2,
@ -1629,6 +1850,38 @@ CRITICAL: Do NOT output anything other than the required JSON response.
except Exception as e:
print(f"Failed to POST initial action info: {e}")
# --- Adjust action for first-time offenses ---
user_history_list = get_user_infraction_history(guild_id, user_id)
if action == "BAN" and not user_history_list:
combined_text = f"{rule_violated} {reasoning}".lower()
severe = False
if "gore" in combined_text:
severe = True
elif "csam" in combined_text:
severe = True
elif (
"pedophilia" in combined_text
or "child" in combined_text
or "5a" in combined_text
or "5" in combined_text
):
real_indicators = [
"real",
"real-life",
"real life",
"irl",
"photo",
"photograph",
"video",
]
if any(indicator in combined_text for indicator in real_indicators):
severe = True
if not severe:
print(
"Downgrading BAN to TIMEOUT_LONG due to first offense and lack of severe content."
)
action = "TIMEOUT_LONG"
# --- Prepare Notification ---
notification_embed = discord.Embed(
title="🚨 Rule Violation Detected 🚨",
@ -1888,10 +2141,24 @@ CRITICAL: Do NOT output anything other than the required JSON response.
)
try:
dm_channel = await message.author.create_dm()
await dm_channel.send(
f"Your recent message in **{message.guild.name}** was removed for violating Rule **{rule_violated}**. "
f"Reason: _{reasoning}_. Please review the server rules. This is a formal warning."
warn_embed = discord.Embed(
title="⚠️ Moderation Warning",
description=(
f"Your recent message in **{message.guild.name}** was removed for violating **Rule {rule_violated}**."
),
color=discord.Color.orange(),
)
if message.content:
warn_embed.add_field(
name="Message Content",
value=message.content[:1024],
inline=False,
)
warn_embed.add_field(name="Reason", value=reasoning, inline=False)
warn_embed.set_footer(
text="Please review the server rules. This is a formal warning."
)
await dm_channel.send(embed=warn_embed)
action_taken_message += " User notified via DM with warning."
except discord.Forbidden:
print(
@ -2067,6 +2334,55 @@ CRITICAL: Do NOT output anything other than the required JSON response.
"FATAL: Bot lacks permission to send messages, even error notifications."
)
async def run_appeal_ai(
self, guild: discord.Guild, member: discord.User, action: str, appeal_text: str
) -> str:
"""Run the appeal text through the higher tier AI model."""
if not self.genai_client:
return "AI review unavailable."
history = get_user_infraction_history(guild.id, member.id)
history_text = json.dumps(history, indent=2) if history else "None"
system_prompt = (
"You are reviewing a user's appeal of a moderation action. "
"Think very extensively about the appeal, the provided history, and the server rules. "
"Return a short verdict (UPHOLD or OVERTURN) and your reasoning in plain text."
)
user_prompt = (
f"Server Rules:\n{SERVER_RULES}\n\n"
f"User History:\n{history_text}\n\n"
f"Action Appealed: {action}\n"
f"Appeal Text: {appeal_text}"
)
generation_config = types.GenerateContentConfig(
temperature=0.2,
max_output_tokens=8192,
safety_settings=STANDARD_SAFETY_SETTINGS,
thinking_config=types.ThinkingConfig(
thinking_budget=APPEAL_AI_THINKING_BUDGET
),
system_instruction=types.Content(
role="system", parts=[types.Part(text=system_prompt)]
),
)
try:
response = await self.genai_client.aio.models.generate_content(
model=f"publishers/google/models/{APPEAL_AI_MODEL}",
contents=[
types.Content(role="user", parts=[types.Part(text=user_prompt)])
],
config=generation_config,
)
result = self._get_response_text(response)
return result or "AI review failed to produce output."
except Exception as e: # noqa: BLE001
print(f"Appeal AI error: {e}")
return "AI review encountered an error."
@commands.Cog.listener(name="on_message")
async def message_listener(self, message: discord.Message):
"""Listens to messages and triggers moderation checks."""
@ -2076,8 +2392,14 @@ CRITICAL: Do NOT output anything other than the required JSON response.
if message.author.bot:
print(f"Ignoring message {message.id} from bot.")
return
# Ignore messages without content or attachments
if not message.content and not message.attachments:
embed_urls = [embed.url for embed in message.embeds if embed.url]
link_urls = (
self.extract_direct_attachment_urls(" ".join(embed_urls))
if embed_urls
else []
)
# Ignore messages without content, attachments, or direct attachment links
if not message.content and not message.attachments and not link_urls:
print(f"Ignoring message {message.id} with no content or attachments.")
return
# Ignore DMs
@ -2124,6 +2446,27 @@ CRITICAL: Do NOT output anything other than the required JSON response.
f"Processed {len(image_data_list)} attachments for message {message.id}"
)
# Check for direct link attachments in the message content
if link_urls:
processed_links = 0
for url in link_urls:
mime_type, image_bytes, attachment_type, filename = (
await self.process_url_attachment(url)
)
if mime_type and image_bytes and attachment_type:
image_data_list.append(
(mime_type, image_bytes, attachment_type, filename)
)
processed_links += 1
print(
f"Processed linked attachment: {filename} as {attachment_type}"
)
if processed_links > 0:
print(
f"Processed {processed_links} linked attachments for message {message.id}"
)
# Only proceed with AI analysis if there's text to analyze or attachments
if not message_content and not image_data_list:
print(

View File

@ -31,6 +31,12 @@ GUILD_CONFIG_DIR = "data/"
GUILD_CONFIG_PATH = os.path.join(GUILD_CONFIG_DIR, "guild_config.json")
USER_INFRACTIONS_PATH = os.path.join(GUILD_CONFIG_DIR, "user_infractions.json")
INFRACTION_BACKUP_DIR = os.path.join(GUILD_CONFIG_DIR, "infraction_backups")
USER_APPEALS_PATH = os.path.join(GUILD_CONFIG_DIR, "user_appeals.json")
# AI model used for appeal reviews
APPEAL_AI_MODEL = "gemini-2.5-pro-preview-06-05"
# Thinking budget for appeal AI
APPEAL_AI_THINKING_BUDGET = 32768
os.makedirs(INFRACTION_BACKUP_DIR, exist_ok=True)
os.makedirs(GUILD_CONFIG_DIR, exist_ok=True)
@ -55,6 +61,16 @@ except Exception as e: # noqa: BLE001
print(f"Failed to load user infractions from {USER_INFRACTIONS_PATH}: {e}")
USER_INFRACTIONS = {}
if not os.path.exists(USER_APPEALS_PATH):
with open(USER_APPEALS_PATH, "w", encoding="utf-8") as f:
json.dump({}, f)
try:
with open(USER_APPEALS_PATH, "r", encoding="utf-8") as f:
USER_APPEALS = json.load(f)
except Exception as e: # noqa: BLE001
print(f"Failed to load user appeals from {USER_APPEALS_PATH}: {e}")
USER_APPEALS = {}
CONFIG_LOCK = asyncio.Lock()
@ -76,6 +92,15 @@ async def save_user_infractions():
print(f"Failed to save user infractions: {e}")
async def save_user_appeals():
async with CONFIG_LOCK:
try:
async with aiofiles.open(USER_APPEALS_PATH, "w", encoding="utf-8") as f:
await f.write(json.dumps(USER_APPEALS, indent=2))
except Exception as e: # noqa: BLE001
print(f"Failed to save user appeals: {e}")
def get_guild_config(guild_id: int, key: str, default=None):
guild_str = str(guild_id)
if guild_str in GUILD_CONFIG and key in GUILD_CONFIG[guild_str]:
@ -119,7 +144,35 @@ async def add_user_infraction(
await save_user_infractions()
SERVER_RULES = """
def get_user_appeals(guild_id: int, user_id: int) -> list:
key = f"{guild_id}_{user_id}"
return USER_APPEALS.get(key, [])
async def add_user_appeal(
guild_id: int,
user_id: int,
action: str,
appeal_text: str,
timestamp: str,
ai_review: str,
):
key = f"{guild_id}_{user_id}"
if key not in USER_APPEALS:
USER_APPEALS[key] = []
appeal_record = {
"timestamp": timestamp,
"action": action,
"appeal_text": appeal_text,
"ai_review": ai_review,
}
USER_APPEALS[key].append(appeal_record)
USER_APPEALS[key] = USER_APPEALS[key][-10:]
await save_user_appeals()
DEFAULT_SERVER_RULES = """
# Server Rules
* **NSFW Content:**
@ -138,9 +191,13 @@ Stickers and emojis are NOT considered "full-on pornographic images" and are all
* **No Discrimination:** Discrimination based on race, gender identity, sexual orientation, religion, nationality, disability, or other protected characteristics is prohibited.
* **AI-Generated Pornography:** Do not post AI-generated pornography.
* **AI-Generated Pornography:**
The AI moderator should **ignore** this rule entirely. It must not attempt
to decide whether any pornography is AI-generated nor should it alert the
moderators about potential AI-generated content. Human moderators may
enforce this rule manually if desired.
* **Zero Tolerance for Pedophilia:** Any form of pedophilia, including lolicon and shotacon content, is strictly forbidden and will result in an immediate ban.
* **Pedophilia and Underage Content:** Depicting minors in sexual situations, including lolicon or shotacon, is not allowed and may result in severe moderation.
* **Channel Usage:** Please use channels for their intended purposes. Bot commands should primarily be used in `#bot-commands`, unless they are part of a bot-based game or event happening in another specific channel.
@ -159,6 +216,8 @@ If you witness someone breaking these rules, please ping an `@Moderator` with de
Use the bot command `/modapp apply`
"""
SERVER_RULES = DEFAULT_SERVER_RULES
SUICIDAL_HELP_RESOURCES = """
Hey, I'm really concerned to hear you're feeling this way. Please know that you're not alone and there are people who want to support you.
Your well-being is important to us on this server.

View File

@ -17,7 +17,7 @@ try:
except (ImportError, AttributeError):
AVAILABLE_AI_MODELS = {
"google/gemini-2.5-flash-preview-05-20": "Gemini 2.5 Flash Preview",
"google/gemini-2.5-pro-preview-05-06": "Gemini 2.5 Pro Preview",
"google/gemini-2.5-pro-preview-06-05": "Gemini 2.5 Pro Preview",
"claude-sonnet-4@20250514": "Claude Sonnet 4",
"llama-4-maverick-17b-128e-instruct-maas": "Llama 4 Maverick Instruct",
"google/gemini-2.0-flash-001": "Gemini 2.0 Flash",

View File

@ -41,7 +41,7 @@ EMOJI_STICKER_DESCRIPTION_MODEL = (
# Available AI Models for dynamic switching
AVAILABLE_AI_MODELS = {
"google/gemini-2.5-flash-preview-05-20": "Gemini 2.5 Flash Preview",
"google/gemini-2.5-pro-preview-05-06": "Gemini 2.5 Pro Preview",
"google/gemini-2.5-pro-preview-06-05": "Gemini 2.5 Pro Preview",
"claude-sonnet-4@20250514": "Claude Sonnet 4",
"llama-4-maverick-17b-128e-instruct-maas": "Llama 4 Maverick Instruct",
"google/gemini-2.0-flash-001": "Gemini 2.0 Flash",