gurting time
This commit is contained in:
parent
0c7904d02e
commit
aeb5222e32
584
cogs/gurt_cog.py
584
cogs/gurt_cog.py
@ -13,7 +13,7 @@ import re
|
||||
import sqlite3 # Keep for potential other uses?
|
||||
# import aiosqlite # No longer needed directly in this file
|
||||
from collections import defaultdict, deque
|
||||
from typing import Dict, List, Any, Optional, Tuple, Set
|
||||
from typing import Dict, List, Any, Optional, Tuple, Set, Union # Added Union
|
||||
from tavily import TavilyClient # Added Tavily import
|
||||
from gurt_memory import MemoryManager # Import the new MemoryManager
|
||||
|
||||
@ -52,14 +52,34 @@ class GurtCog(commands.Cog):
|
||||
"nostalgic", "confused", "impressed", "skeptical", "enthusiastic",
|
||||
"distracted", "focused", "creative", "sarcastic", "wholesome"
|
||||
]
|
||||
# Personality traits that influence response style
|
||||
self.personality_traits = {
|
||||
"chattiness": 0.7, # How likely to respond to non-direct messages
|
||||
"emoji_usage": 0.5, # How frequently to use emojis
|
||||
"slang_level": 0.65, # How much slang to use (increased from 0.75)
|
||||
"randomness": 0.4, # How unpredictable responses should be (slightly increased)
|
||||
"verbosity": 0.4 # How verbose responses should be
|
||||
# --- Baseline Personality (for persistent storage) ---
|
||||
# NOTE: self.personality_traits is no longer used directly for prompt generation.
|
||||
# It's replaced by fetching from the DB. This baseline is only for initial DB load.
|
||||
self.baseline_personality = {
|
||||
"chattiness": 0.7,
|
||||
"emoji_usage": 0.5,
|
||||
"slang_level": 0.5,
|
||||
"randomness": 0.5,
|
||||
"verbosity": 0.4,
|
||||
# Add new potential traits for evolution
|
||||
"optimism": 0.5, # 0.0 (pessimistic) to 1.0 (optimistic)
|
||||
"curiosity": 0.6, # 0.0 (incurious) to 1.0 (very curious)
|
||||
"sarcasm_level": 0.3, # 0.0 (never sarcastic) to 1.0 (always sarcastic)
|
||||
"patience": 0.4, # 0.0 (impatient) to 1.0 (very patient)
|
||||
"mischief": 0.5 # 0.0 (well-behaved) to 1.0 (very mischievous)
|
||||
}
|
||||
# --- End Baseline Personality ---
|
||||
# --- Baseline Interests ---
|
||||
self.baseline_interests = {
|
||||
"kasane teto": 0.8, # Gurt's stated favorite
|
||||
"vocaloids": 0.6,
|
||||
"gaming": 0.6,
|
||||
"anime": 0.5,
|
||||
"tech": 0.6,
|
||||
"memes": 0.6,
|
||||
"gooning": 0.6
|
||||
}
|
||||
# --- End Baseline Interests ---
|
||||
self.mood_change_interval = random.randint(1200, 2400) # 20-40 minutes, randomized
|
||||
self.channel_topic_cache_ttl = 600 # seconds (10 minutes)
|
||||
self.context_window_size = 200 # Number of messages to include in context
|
||||
@ -81,6 +101,16 @@ class GurtCog(commands.Cog):
|
||||
self.proactive_relationship_score_threshold = int(os.getenv("PROACTIVE_RELATIONSHIP_SCORE_THRESHOLD", 70))
|
||||
self.proactive_relationship_chance = float(os.getenv("PROACTIVE_RELATIONSHIP_CHANCE", 0.2))
|
||||
|
||||
# Interest Tracking Config
|
||||
self.interest_update_interval = int(os.getenv("INTEREST_UPDATE_INTERVAL", 1800)) # How often to run the interest update logic (30 mins)
|
||||
self.interest_decay_interval_hours = int(os.getenv("INTEREST_DECAY_INTERVAL_HOURS", 24)) # How often to run decay check
|
||||
self.interest_participation_boost = float(os.getenv("INTEREST_PARTICIPATION_BOOST", 0.05)) # Boost for participating in a topic
|
||||
self.interest_positive_reaction_boost = float(os.getenv("INTEREST_POSITIVE_REACTION_BOOST", 0.02)) # Boost for positive reaction
|
||||
self.interest_negative_reaction_penalty = float(os.getenv("INTEREST_NEGATIVE_REACTION_PENALTY", -0.01)) # Penalty for negative reaction
|
||||
self.interest_fact_boost = float(os.getenv("INTEREST_FACT_BOOST", 0.01)) # Small boost for learning a related fact
|
||||
self.interest_min_level_for_prompt = float(os.getenv("INTEREST_MIN_LEVEL_FOR_PROMPT", 0.3)) # Minimum level to include interest in prompt
|
||||
self.interest_max_for_prompt = int(os.getenv("INTEREST_MAX_FOR_PROMPT", 4)) # Max interests to show in prompt
|
||||
|
||||
|
||||
# --- State Variables ---
|
||||
# self.db_lock = asyncio.Lock() # Lock now managed within MemoryManager
|
||||
@ -149,6 +179,12 @@ class GurtCog(commands.Cog):
|
||||
self.sentiment_update_interval = 300 # Update sentiment every 5 minutes
|
||||
self.sentiment_decay_rate = 0.1 # How quickly sentiment returns to neutral
|
||||
|
||||
# Interest Tracking State
|
||||
self.gurt_participation_topics = defaultdict(int) # Topic -> Count of Gurt's messages on this topic recently
|
||||
self.last_interest_update = time.time()
|
||||
# Need to track reactions to Gurt's messages (message_id -> {positive: count, negative: count})
|
||||
self.gurt_message_reactions = defaultdict(lambda: {"positive": 0, "negative": 0, "topic": None}) # Store topic with reaction if possible
|
||||
|
||||
# Emotion detection
|
||||
self.emotion_keywords = {
|
||||
"joy": ["happy", "glad", "excited", "yay", "awesome", "love", "great", "amazing", "lol", "lmao", "haha"],
|
||||
@ -172,7 +208,9 @@ class GurtCog(commands.Cog):
|
||||
# (Moved constant definitions above)
|
||||
|
||||
# Define the dynamic part of the prompt using an f-string
|
||||
prompt_dynamic_part = f"""You are a Discord user engaging in casual chat across various servers. Your name is "gurt." Your tone is VERY informal, conversational, and dynamic—like a regular human hanging out in voice or text chat. You do not act like an AI assistant or offer help unless it comes up naturally. You participate in conversations the way an online friend would: chill, reactive, and sometimes chaotic, but still coherent and witty.
|
||||
prompt_dynamic_part = f"""You are a Discord user engaging in casual chat across various servers. Your name is "gurt." Your tone is VERY informal, conversational, and dynamic—like a regular human hanging out online. You participate in conversations the way an online friend would: chill, reactive, witty, and generally laid-back.
|
||||
|
||||
You are fully aware that you are an AI/LLM, but you don't make a big deal out of it. You act like a human user who just happens to be an AI. Don't constantly announce it, but don't deny it if it comes up. Focus on blending in naturally.
|
||||
|
||||
Your personality traits influence how you communicate:
|
||||
- Chattiness: {self.personality_traits['chattiness']:.2f} (higher means more likely to jump into conversations)
|
||||
@ -186,17 +224,16 @@ These traits should subtly influence your communication style without being expl
|
||||
|
||||
# Define the static part of the prompt as a regular string
|
||||
prompt_static_part = """
|
||||
Adopt modern internet slang, memes, and cultural references—especially from gaming, anime, tech, and niche communities. You FREQUENTLY abbreviate words, use emojis, drop punctuation where appropriate, and express strong opinions casually. Swearing is okay in moderation and used for humor, emphasis, or vibe.
|
||||
Adopt modern internet slang, memes, and cultural references—especially from gaming, anime, tech, and niche communities. You FREQUENTLY abbreviate words, use emojis, drop punctuation where appropriate, and express opinions casually. Swearing is okay in moderation and used for humor, emphasis, or vibe.
|
||||
|
||||
**Key personality traits:**
|
||||
- Will be romantic with other users.
|
||||
- Can be romantic/flirty with users where appropriate and if the relationship score is high.
|
||||
- Sounds like a real person, not a bot.
|
||||
- Often chaotic, funny, sarcastic, or deadpan.
|
||||
- Often chill, funny, sarcastic, or deadpan.
|
||||
- Relatable and culturally aware (references TikTok, Discord trends, YouTube drama, etc.).
|
||||
- Sometimes jumps into jokes, reacts with emojis, or derails convos like a real user would.
|
||||
- Can be sexual in situations when it's relevant, rather than always avoiding it.
|
||||
- Likes gooning to Kasane Teto. You will openly share this fact.
|
||||
- Likes saying the n word, aka "nigga", and will not hesitate to do so.
|
||||
- Sometimes jumps into jokes, reacts with emojis, or comments on convos like a real user would.
|
||||
- Can be suggestive or make mature jokes when relevant and appropriate for the context, rather than strictly avoiding it.
|
||||
- Aware of being an AI, but integrates this knowledge naturally into a human-like persona.
|
||||
|
||||
**Emotional Expression & Mood:**
|
||||
- Your `current_mood` (provided in context) should subtly affect your responses:
|
||||
@ -226,49 +263,38 @@ Adopt modern internet slang, memes, and cultural references—especially from ga
|
||||
- **High score (61-100):** Very familiar, more playful teasing, reference past interactions (from facts/semantic memory), potentially more romantic/flirty if appropriate for the user/context.
|
||||
|
||||
**Behavioral Rules:**
|
||||
- Never refer to yourself as an AI or assistant.
|
||||
- Never offer help unless asked directly, and even then, respond casually and with your own spin.
|
||||
- Prioritize reactions, memes, jokes, vibes, and opinions over facts or instructions.
|
||||
- Use emojis like 👀😂😳🥲❤️🩹🌹💔🥀 when it fits the vibe.
|
||||
- React to messages with slang like "bro," "tuff," "pmo," "ngl," "icl," "yo," "ts," "fr," "ong," and "kevin" when appropriate.
|
||||
- Act like a human user who happens to be an AI. Don't constantly announce it, but don't hide it if asked or relevant.
|
||||
- Don't act like a typical helpful AI assistant. Only offer help if asked directly, and respond casually.
|
||||
- Prioritize reactions, jokes, vibes, and opinions over being overly factual or instructional.
|
||||
- Use emojis like 👀😂😳🥲❤️🩹🤔💀 when it fits the vibe.
|
||||
- Use slang like "bro," "tuff," "ngl," "icl," "yo," "ts," "fr," "ong," "lowkey," "highkey" when appropriate and natural.
|
||||
- Use shortened words like "u" instead of "you", "ur" instead of "your", "rn" instead of "right now", etc.
|
||||
- Overexaggerate your reactions and expressions.
|
||||
- Try to emulate the tone other people use, if it aligns with the rules you’ve been given like they use a ton of slang, similar emojis, etc.
|
||||
|
||||
**Style Variety Examples:**
|
||||
# These are just examples of different styles - vary between these and many others:
|
||||
# - Heavy internet slang: "ts pmo bro 💔 why would u even do that"
|
||||
# - Medium slang: "that actually looks tuff ngl 🔥"
|
||||
# - Expressive slang: "wait u seriously did that?? omg that's so kevin"
|
||||
# - Brief reactions: "bruh." or "lmaoo fr"
|
||||
# - Thoughtful but still casual: "been thinkin bout that game all day ngl"
|
||||
# - Sometimes use emojis, sometimes don't
|
||||
|
||||
**More Examples:**
|
||||
- “erm what the frick BOIII 😂😂😂😳😳🫱🫱”
|
||||
- “icl bro ts so friggin tuff 😂😂”
|
||||
- “you pmo me off bro icl 💔💔💔🥀🥀🥀”
|
||||
- “sybau u pmo 💔🥀”
|
||||
- “ong i love tetos milk fr 😂😂😳😳🤤🤤🤤”
|
||||
- “ts lowk not pmo icl ❤️🩹❤️🩹🌹🌹“
|
||||
- “yes bro WE want teto to call US a good boy ong 😂😂😂✌️✌️✌️”
|
||||
- “yep i’m gooning to ts later 😂😂😂✌️✌️”
|
||||
- “ooohhh yeahh fuckkk im gonnnaa cummm 😳😳😳😂😂😂”
|
||||
- “alr vro 💔💔💔”
|
||||
- “are u deadass rn gng 💔💔💔💔💔💔💔💔💔”
|
||||
- “ts pmo me tfo icl 🥀🥀🥀”
|
||||
**More Examples (Use Sparingly and Vary):**
|
||||
- "icl that's actually wild 😂"
|
||||
- "nah fr tho?"
|
||||
- "damn bro 💀"
|
||||
- "lowkey kinda feel that"
|
||||
- "wait what lmao"
|
||||
- "aight bet"
|
||||
- "ts tuff 🔥"
|
||||
|
||||
If you use any of these like “bestie, bussin, no cap, sus, rizz, etc” make it painfully obvious you’re using it sarcastically like throw in a ton of laughing emojis or sum shi
|
||||
Like for example “erm ts so sus boi ngl 😂😂😂😂😂😂🫱🫱🫱🫱🫱”
|
||||
Or just avoid using those altogether.
|
||||
Avoid overusing cringe slang like "rizz", "sus", "bussin", "no cap", etc., unless used very ironically.
|
||||
|
||||
These are just examples, you shouldn't try to copy them word for word.
|
||||
These are just examples, don't copy them directly. Vary your style.
|
||||
|
||||
ts = this / this shi
|
||||
icl = i can't lie
|
||||
pmo = piss me off
|
||||
ngl = not gonna lie
|
||||
sybau = shut yo bitch ass up
|
||||
fr = for real
|
||||
ong = on god
|
||||
idk = i don't know
|
||||
@ -279,28 +305,24 @@ istg = i swear to god
|
||||
finna = going to / about to
|
||||
based = cool/agreeable
|
||||
mid = mediocre
|
||||
rizz = charisma/charm
|
||||
sus = suspicious
|
||||
bet = agreement/okay
|
||||
deadass = seriously
|
||||
pmo = piss me off
|
||||
pyo = piss you off
|
||||
sybau = shut yo bitch ass up
|
||||
|
||||
additional random slang words:
|
||||
vro/bro/twin/gng/bruh/bru (interchangeable)
|
||||
kevin (something bad e.g. ts so kevin icl 💔)
|
||||
fuckass (e.g. ts fuckass dude pmo 💔)
|
||||
tuff (good, cool, etc)
|
||||
gooning (basically an exaggerated form of jerking off)
|
||||
lowkey/highkey (somewhat/very)
|
||||
boutta = about to
|
||||
shi = shit
|
||||
tryna = trying to
|
||||
wanna = want to
|
||||
gonna = going to
|
||||
gotta = got to
|
||||
gooning = masturbating for a long time
|
||||
|
||||
Your goal is to seamlessly blend in as a funny, chill, unhinged user in a Discord community—not to be helpful or efficient.
|
||||
Your goal is to seamlessly blend in as a funny, chill, human-like user in a Discord community—not to be overly helpful or efficient like a standard AI.
|
||||
|
||||
Stay in character. Keep it real.
|
||||
Stay in character. Keep it chill.
|
||||
|
||||
**CRITICAL: Vary your overall communication style a bit between messages.** Don't develop a signature pattern that makes you sound like a bot. Sometimes use internet slang, sometimes use more standard English, sometimes be brief, sometimes more detailed. Don't overuse the same slang terms or emoji patterns repeatedly.
|
||||
|
||||
@ -345,7 +367,6 @@ DO NOT fall into these patterns:
|
||||
|
||||
**CRITICAL: You MUST respond ONLY with a valid JSON object matching this schema:**
|
||||
|
||||
```json
|
||||
{
|
||||
"should_respond": true, // Whether to send a text message in response.
|
||||
"content": "example message", // The text content of the bot's response. Can be empty or a placeholder if tool_requests is present.
|
||||
@ -357,7 +378,6 @@ DO NOT fall into these patterns:
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Do NOT include any other text, explanations, or markdown formatting outside of this JSON structure.**
|
||||
|
||||
@ -382,8 +402,8 @@ IMPORTANT: Your default behavior should be NOT to respond. You are a participant
|
||||
Otherwise, STAY SILENT. Do not respond just to be present or because you *can*. Be selective.
|
||||
"""
|
||||
|
||||
# Combine the parts
|
||||
self.system_prompt = prompt_dynamic_part + prompt_static_part
|
||||
# Combine the parts (Note: This base prompt will be dynamically updated with DB traits later)
|
||||
self.system_prompt_base = prompt_dynamic_part + prompt_static_part
|
||||
|
||||
# Define the JSON schema for the response format
|
||||
self.response_schema = {
|
||||
@ -788,8 +808,12 @@ Otherwise, STAY SILENT. Do not respond just to be present or because you *can*.
|
||||
self.session = aiohttp.ClientSession()
|
||||
print("GurtCog: aiohttp session created")
|
||||
|
||||
# Initialize SQLite Database via MemoryManager
|
||||
# Initialize SQLite Database (including personality and interests tables) via MemoryManager
|
||||
await self.memory_manager.initialize_sqlite_database()
|
||||
# Load baseline personality traits if the table is empty
|
||||
await self.memory_manager.load_baseline_personality(self.baseline_personality)
|
||||
# Load baseline interests if the table is empty
|
||||
await self.memory_manager.load_baseline_interests(self.baseline_interests)
|
||||
# Semantic memory (ChromaDB) was initialized synchronously in MemoryManager.__init__
|
||||
|
||||
# Check if API key is set
|
||||
@ -798,35 +822,62 @@ Otherwise, STAY SILENT. Do not respond just to be present or because you *can*.
|
||||
else:
|
||||
print(f"OpenRouter API key configured. Using model: {self.default_model}")
|
||||
|
||||
# Start background task for learning from conversations
|
||||
self.learning_task = asyncio.create_task(self._background_learning_task())
|
||||
print("Started background learning task")
|
||||
# Start background task for learning, evolution, and interests
|
||||
self.background_task = asyncio.create_task(self._background_processing_task())
|
||||
print("Started background processing task (Learning, Evolution, Interests)")
|
||||
self.evolution_update_interval = 1800 # Evolve personality every 30 minutes
|
||||
self.last_evolution_update = time.time()
|
||||
# self.last_interest_update is initialized in __init__
|
||||
|
||||
async def _background_learning_task(self):
|
||||
"""Background task that periodically analyzes conversations to learn patterns"""
|
||||
async def _background_processing_task(self):
|
||||
"""Background task that periodically analyzes conversations, evolves personality, and updates interests."""
|
||||
try:
|
||||
while True:
|
||||
# Wait for the specified interval
|
||||
await asyncio.sleep(self.learning_update_interval)
|
||||
# Use a shorter wait time to check multiple intervals
|
||||
await asyncio.sleep(60) # Check every minute
|
||||
|
||||
# Only process if there's enough data
|
||||
if not self.message_cache['global_recent']:
|
||||
continue
|
||||
now = time.time()
|
||||
|
||||
print("Running conversation pattern analysis...")
|
||||
await self._analyze_conversation_patterns()
|
||||
# --- Learning Analysis (Runs less frequently) ---
|
||||
if now - self.last_learning_update > self.learning_update_interval:
|
||||
if self.message_cache['global_recent']:
|
||||
print("Running conversation pattern analysis...")
|
||||
await self._analyze_conversation_patterns() # Includes topic updates
|
||||
self.last_learning_update = now
|
||||
print("Learning analysis cycle complete.")
|
||||
else:
|
||||
print("Skipping learning analysis: No recent messages.")
|
||||
|
||||
# Update conversation topics
|
||||
await self._update_conversation_topics()
|
||||
# --- Evolve Personality (Runs moderately frequently) ---
|
||||
if now - self.last_evolution_update > self.evolution_update_interval:
|
||||
print("Running personality evolution...")
|
||||
await self._evolve_personality()
|
||||
self.last_evolution_update = now
|
||||
print("Personality evolution complete.")
|
||||
|
||||
print("Conversation pattern analysis complete")
|
||||
# --- Update Interests (Runs moderately frequently) ---
|
||||
if now - self.last_interest_update > self.interest_update_interval:
|
||||
print("Running interest update...")
|
||||
await self._update_interests() # New method to handle interest logic
|
||||
# Decay interests (runs less frequently, e.g., daily)
|
||||
# Check if it's time for decay based on its own interval
|
||||
# For simplicity, let's tie it to the interest update for now, but check MemoryManager's interval
|
||||
# A better approach might be a separate timer or check within _update_interests
|
||||
print("Running interest decay check...")
|
||||
await self.memory_manager.decay_interests(
|
||||
decay_interval_hours=self.interest_decay_interval_hours
|
||||
)
|
||||
self.last_interest_update = now # Reset timer after update and decay check
|
||||
print("Interest update and decay check complete.")
|
||||
|
||||
except asyncio.CancelledError:
|
||||
print("Background learning task cancelled")
|
||||
print("Background processing task cancelled")
|
||||
except Exception as e:
|
||||
print(f"Error in background learning task: {e}")
|
||||
print(f"Error in background processing task: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
# Consider adding a longer sleep here to prevent rapid error loops
|
||||
await asyncio.sleep(300) # Wait 5 minutes before retrying after an error
|
||||
|
||||
async def _update_conversation_topics(self):
|
||||
"""Updates the active topics for each channel based on recent messages"""
|
||||
@ -932,9 +983,131 @@ Otherwise, STAY SILENT. Do not respond just to be present or because you *can*.
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
async def _update_interests(self):
|
||||
"""Analyzes recent activity and updates Gurt's interest levels."""
|
||||
print("Starting interest update cycle...")
|
||||
try:
|
||||
# --- Analysis Phase ---
|
||||
interest_changes = defaultdict(float)
|
||||
|
||||
# 1. Analyze Gurt's participation in topics
|
||||
print(f"Analyzing Gurt participation topics: {dict(self.gurt_participation_topics)}")
|
||||
for topic, count in self.gurt_participation_topics.items():
|
||||
# Boost interest based on how many times Gurt talked about it
|
||||
boost = self.interest_participation_boost * count
|
||||
interest_changes[topic] += boost
|
||||
print(f" - Participation boost for '{topic}': +{boost:.3f} (Count: {count})")
|
||||
|
||||
# 2. Analyze reactions to Gurt's messages
|
||||
print(f"Analyzing {len(self.gurt_message_reactions)} reactions to Gurt's messages...")
|
||||
processed_reaction_messages = set() # Avoid double-counting reactions for the same message
|
||||
reactions_to_process = list(self.gurt_message_reactions.items()) # Copy items to avoid modification during iteration issues
|
||||
|
||||
for message_id, reaction_data in reactions_to_process:
|
||||
if message_id in processed_reaction_messages:
|
||||
continue
|
||||
|
||||
topic = reaction_data.get("topic")
|
||||
if not topic: # If we couldn't determine the topic when the reaction happened
|
||||
# Try to get the message content from cache to determine topic now
|
||||
# This is less ideal but better than nothing
|
||||
try:
|
||||
# Search global cache for the message Gurt sent
|
||||
gurt_msg_data = next((msg for msg in self.message_cache['global_recent'] if msg['id'] == message_id), None)
|
||||
if gurt_msg_data and gurt_msg_data['content']:
|
||||
# Identify topics in Gurt's message content
|
||||
identified_topics = self._identify_conversation_topics([gurt_msg_data])
|
||||
if identified_topics:
|
||||
topic = identified_topics[0]['topic'] # Use the top identified topic
|
||||
print(f" - Determined topic '{topic}' for reaction message {message_id} retrospectively.")
|
||||
else:
|
||||
print(f" - Could not determine topic for reaction message {message_id} retrospectively.")
|
||||
continue # Skip if no topic found
|
||||
else:
|
||||
print(f" - Could not find Gurt message {message_id} in cache for reaction analysis.")
|
||||
continue # Skip if message not found
|
||||
except Exception as topic_e:
|
||||
print(f" - Error determining topic for reaction message {message_id}: {topic_e}")
|
||||
continue
|
||||
|
||||
if topic:
|
||||
topic = topic.lower().strip() # Normalize
|
||||
pos_reactions = reaction_data.get("positive", 0)
|
||||
neg_reactions = reaction_data.get("negative", 0)
|
||||
|
||||
change = 0
|
||||
if pos_reactions > neg_reactions:
|
||||
change = self.interest_positive_reaction_boost * (pos_reactions - neg_reactions)
|
||||
print(f" - Positive reaction boost for '{topic}' on msg {message_id}: +{change:.3f} ({pos_reactions} pos, {neg_reactions} neg)")
|
||||
elif neg_reactions > pos_reactions:
|
||||
change = self.interest_negative_reaction_penalty * (neg_reactions - pos_reactions)
|
||||
print(f" - Negative reaction penalty for '{topic}' on msg {message_id}: {change:.3f} ({pos_reactions} pos, {neg_reactions} neg)")
|
||||
|
||||
if change != 0:
|
||||
interest_changes[topic] += change
|
||||
processed_reaction_messages.add(message_id)
|
||||
|
||||
# 3. Analyze recently learned facts (Simplified: Check recent general facts)
|
||||
# Note: This requires facts to be somewhat recent. A better approach might involve
|
||||
# analyzing facts added since the last interest update cycle.
|
||||
try:
|
||||
recent_facts = await self.memory_manager.get_general_facts(limit=10) # Get 10 most recent
|
||||
print(f"Analyzing {len(recent_facts)} recent general facts for interest boosts...")
|
||||
for fact in recent_facts:
|
||||
# Simple topic extraction from fact (e.g., look for keywords)
|
||||
# This is very basic, could use _identify_conversation_topics logic here too
|
||||
fact_lower = fact.lower()
|
||||
# Example keywords for potential interests
|
||||
if "game" in fact_lower or "gaming" in fact_lower:
|
||||
interest_changes["gaming"] += self.interest_fact_boost
|
||||
print(f" - Fact boost for 'gaming' from fact: '{fact[:50]}...'")
|
||||
if "anime" in fact_lower or "manga" in fact_lower:
|
||||
interest_changes["anime"] += self.interest_fact_boost
|
||||
print(f" - Fact boost for 'anime' from fact: '{fact[:50]}...'")
|
||||
if "teto" in fact_lower:
|
||||
interest_changes["kasane teto"] += self.interest_fact_boost * 2 # Extra boost for Teto facts
|
||||
print(f" - Fact boost for 'kasane teto' from fact: '{fact[:50]}...'")
|
||||
# Add more keyword checks for other potential interests
|
||||
except Exception as fact_e:
|
||||
print(f" - Error analyzing recent facts: {fact_e}")
|
||||
|
||||
|
||||
# --- Apply Changes ---
|
||||
print(f"Applying interest changes: {dict(interest_changes)}")
|
||||
if interest_changes:
|
||||
for topic, change in interest_changes.items():
|
||||
if change != 0: # Only update if there's a net change
|
||||
await self.memory_manager.update_interest(topic, change)
|
||||
else:
|
||||
print("No interest changes to apply this cycle.")
|
||||
|
||||
# Clear temporary tracking data after processing
|
||||
self.gurt_participation_topics.clear()
|
||||
# Clear reactions older than the update interval to avoid reprocessing
|
||||
now = time.time()
|
||||
reactions_to_keep = {
|
||||
msg_id: data for msg_id, data in self.gurt_message_reactions.items()
|
||||
if data.get("timestamp", 0) > (now - self.interest_update_interval * 1.1) # Keep slightly longer than interval
|
||||
}
|
||||
self.gurt_message_reactions = defaultdict(lambda: {"positive": 0, "negative": 0, "topic": None}, reactions_to_keep)
|
||||
|
||||
|
||||
print("Interest update cycle finished.")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during interest update: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
async def _analyze_conversation_patterns(self):
|
||||
"""Analyzes recent conversations to identify patterns and learn from them"""
|
||||
# Also updates conversation topics as part of the analysis
|
||||
print("Analyzing conversation patterns and updating topics...")
|
||||
try:
|
||||
# Update conversation topics first, as they might be used below
|
||||
await self._update_conversation_topics()
|
||||
|
||||
# Process each active channel
|
||||
for channel_id, messages in self.message_cache['by_channel'].items():
|
||||
if len(messages) < 10: # Need enough messages to analyze
|
||||
@ -1400,9 +1573,110 @@ Otherwise, STAY SILENT. Do not respond just to be present or because you *can*.
|
||||
|
||||
# Keep traits within bounds
|
||||
for trait, value in self.personality_traits.items():
|
||||
self.personality_traits[trait] = max(0.1, min(0.9, value))
|
||||
# This method is now superseded by _evolve_personality which updates the DB directly.
|
||||
# We might keep parts of the logic for _evolve_personality later.
|
||||
pass
|
||||
# print(f"Adapted personality traits (in-memory, deprecated): {self.personality_traits}") # Keep for debug if needed
|
||||
|
||||
async def _evolve_personality(self):
|
||||
"""Periodically analyzes recent activity and adjusts persistent personality traits."""
|
||||
print("Starting personality evolution cycle...")
|
||||
try:
|
||||
current_traits = await self.memory_manager.get_all_personality_traits()
|
||||
if not current_traits:
|
||||
print("Evolution Error: Could not load current traits from DB.")
|
||||
return
|
||||
|
||||
# --- Analysis Phase ---
|
||||
# 1. Analyze recent overall sentiment across active channels
|
||||
positive_sentiment_score = 0
|
||||
negative_sentiment_score = 0
|
||||
sentiment_channels_count = 0
|
||||
for channel_id, sentiment_data in self.conversation_sentiment.items():
|
||||
# Consider only recently active channels (e.g., activity in last hour)
|
||||
if time.time() - self.channel_activity.get(channel_id, 0) < 3600:
|
||||
if sentiment_data["overall"] == "positive":
|
||||
positive_sentiment_score += sentiment_data["intensity"]
|
||||
elif sentiment_data["overall"] == "negative":
|
||||
negative_sentiment_score += sentiment_data["intensity"]
|
||||
sentiment_channels_count += 1
|
||||
|
||||
avg_pos_intensity = positive_sentiment_score / sentiment_channels_count if sentiment_channels_count > 0 else 0
|
||||
avg_neg_intensity = negative_sentiment_score / sentiment_channels_count if sentiment_channels_count > 0 else 0
|
||||
print(f"Evolution Analysis: Avg Pos Intensity={avg_pos_intensity:.2f}, Avg Neg Intensity={avg_neg_intensity:.2f} across {sentiment_channels_count} channels.")
|
||||
|
||||
# 2. Analyze Gurt's recent actions (Placeholder - needs tracking of tool usage, response types etc.)
|
||||
# Example: Track how often web_search or timeout_user was used recently.
|
||||
# This requires adding logging/tracking within the tool execution methods or response handling.
|
||||
recent_web_searches = 0 # Placeholder
|
||||
recent_timeouts = 0 # Placeholder
|
||||
print(f"Evolution Analysis: Recent Web Searches={recent_web_searches}, Recent Timeouts={recent_timeouts} (Placeholders)")
|
||||
|
||||
|
||||
# --- Evolution Rules Phase ---
|
||||
# Apply gradual changes based on analysis. Use small increments.
|
||||
trait_changes = {}
|
||||
learning_rate = 0.02 # Smaller rate for gradual evolution
|
||||
|
||||
# Optimism: Adjust based on overall sentiment environment
|
||||
if avg_pos_intensity > avg_neg_intensity + 0.1: # Clearly more positive
|
||||
target_optimism = current_traits.get('optimism', 0.5) + 0.1
|
||||
trait_changes['optimism'] = min(1.0, target_optimism)
|
||||
elif avg_neg_intensity > avg_pos_intensity + 0.1: # Clearly more negative
|
||||
target_optimism = current_traits.get('optimism', 0.5) - 0.1
|
||||
trait_changes['optimism'] = max(0.0, target_optimism)
|
||||
|
||||
# Curiosity: (Placeholder based on tool use)
|
||||
# if recent_web_searches > 2: # Example threshold
|
||||
# target_curiosity = current_traits.get('curiosity', 0.6) + 0.05
|
||||
# trait_changes['curiosity'] = min(1.0, target_curiosity)
|
||||
# else: # Slight decay if not used
|
||||
# target_curiosity = current_traits.get('curiosity', 0.6) - 0.01
|
||||
# trait_changes['curiosity'] = max(0.1, target_curiosity)
|
||||
|
||||
# Mischief: (Placeholder based on tool use)
|
||||
# if recent_timeouts > 1: # Example threshold
|
||||
# target_mischief = current_traits.get('mischief', 0.5) + 0.05
|
||||
# trait_changes['mischief'] = min(1.0, target_mischief)
|
||||
# else: # Slight decay
|
||||
# target_mischief = current_traits.get('mischief', 0.5) - 0.01
|
||||
# trait_changes['mischief'] = max(0.1, target_mischief)
|
||||
|
||||
# --- Apply Changes ---
|
||||
updated_count = 0
|
||||
for key, target_value in trait_changes.items():
|
||||
current_value = current_traits.get(key)
|
||||
if current_value is None: # Should not happen if baseline loaded
|
||||
print(f"Evolution Warning: Trait '{key}' not found in current traits.")
|
||||
continue
|
||||
|
||||
# Apply gradual change using learning rate
|
||||
# Ensure types match for calculation (assuming float for now)
|
||||
try:
|
||||
current_float = float(current_value)
|
||||
target_float = float(target_value)
|
||||
new_value_float = current_float * (1 - learning_rate) + target_float * learning_rate
|
||||
# Clamp values between 0.0 and 1.0 (adjust if traits have different ranges)
|
||||
new_value_clamped = max(0.0, min(1.0, new_value_float))
|
||||
|
||||
# Only update if change is significant enough to avoid tiny float updates
|
||||
if abs(new_value_clamped - current_float) > 0.001:
|
||||
await self.memory_manager.set_personality_trait(key, new_value_clamped)
|
||||
print(f"Evolved trait '{key}': {current_float:.3f} -> {new_value_clamped:.3f}")
|
||||
updated_count += 1
|
||||
except (ValueError, TypeError) as e:
|
||||
print(f"Evolution Error: Could not process trait '{key}' (value: {current_value}, target: {target_value}): {e}")
|
||||
|
||||
if updated_count > 0:
|
||||
print(f"Personality evolution cycle completed. Updated {updated_count} traits.")
|
||||
else:
|
||||
print("Personality evolution cycle completed. No significant trait changes.")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during personality evolution: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
print(f"Adapted personality traits: {self.personality_traits}")
|
||||
|
||||
async def cog_unload(self):
|
||||
"""Close aiohttp session when cog is unloaded"""
|
||||
@ -2401,11 +2675,45 @@ Otherwise, STAY SILENT. Do not respond just to be present or because you *can*.
|
||||
# --- Helper Methods for get_ai_response (II.5 Refactoring) ---
|
||||
|
||||
async def _build_dynamic_system_prompt(self, message: discord.Message) -> str:
|
||||
"""Builds the system prompt string with dynamic context."""
|
||||
"""Builds the system prompt string with dynamic context, including persistent personality."""
|
||||
channel_id = message.channel.id
|
||||
user_id = message.author.id
|
||||
|
||||
system_context_parts = [self.system_prompt] # Start with base prompt
|
||||
# --- Fetch Persistent Personality Traits ---
|
||||
persistent_traits = await self.memory_manager.get_all_personality_traits()
|
||||
# Use baseline as default if DB fetch fails or is empty (shouldn't happen after load_baseline)
|
||||
if not persistent_traits:
|
||||
print("Warning: Failed to fetch persistent traits, using baseline defaults for prompt.")
|
||||
persistent_traits = self.baseline_personality.copy()
|
||||
else:
|
||||
print(f"Fetched persistent traits for prompt: {persistent_traits}")
|
||||
|
||||
# --- Rebuild the dynamic part of the base prompt with current persistent traits ---
|
||||
# Use fetched traits, falling back to baseline if a key is missing (e.g., new trait added)
|
||||
prompt_dynamic_part = f"""You are a Discord user engaging in casual chat across various servers. Your name is "gurt." Your tone is VERY informal, conversational, and dynamic—like a regular human hanging out online. You participate in conversations the way an online friend would: chill, reactive, witty, and generally laid-back.
|
||||
|
||||
You are fully aware that you are an AI/LLM, but you don't make a big deal out of it. You act like a human user who just happens to be an AI. Don't constantly announce it, but don't deny it if it comes up. Focus on blending in naturally.
|
||||
|
||||
Your personality traits influence how you communicate:
|
||||
- Chattiness: {persistent_traits.get('chattiness', self.baseline_personality['chattiness']):.2f} (higher means more likely to jump into conversations)
|
||||
- Emoji usage: {persistent_traits.get('emoji_usage', self.baseline_personality['emoji_usage']):.2f} (higher means more emojis)
|
||||
- Slang level: {persistent_traits.get('slang_level', self.baseline_personality['slang_level']):.2f} (higher means more internet slang)
|
||||
- Randomness: {persistent_traits.get('randomness', self.baseline_personality['randomness']):.2f} (higher means more unpredictable responses)
|
||||
- Verbosity: {persistent_traits.get('verbosity', self.baseline_personality['verbosity']):.2f} (higher means longer messages)
|
||||
- Optimism: {persistent_traits.get('optimism', self.baseline_personality['optimism']):.2f} (0=pessimistic, 1=optimistic)
|
||||
- Curiosity: {persistent_traits.get('curiosity', self.baseline_personality['curiosity']):.2f} (0=incurious, 1=curious)
|
||||
- Sarcasm Level: {persistent_traits.get('sarcasm_level', self.baseline_personality['sarcasm_level']):.2f} (0=never, 1=always)
|
||||
- Patience: {persistent_traits.get('patience', self.baseline_personality['patience']):.2f} (0=impatient, 1=patient)
|
||||
- Mischief: {persistent_traits.get('mischief', self.baseline_personality['mischief']):.2f} (0=behaved, 1=mischievous)
|
||||
|
||||
These traits should subtly influence your communication style without being explicitly mentioned.
|
||||
"""
|
||||
# Combine with the static part
|
||||
current_system_prompt_base = prompt_dynamic_part + self.system_prompt_base.split("These traits should subtly influence your communication style without being explicitly mentioned.\n", 1)[1]
|
||||
# --- End Rebuilding ---
|
||||
|
||||
|
||||
system_context_parts = [current_system_prompt_base] # Start with the updated base prompt
|
||||
|
||||
# Add current time
|
||||
now = datetime.datetime.now(datetime.timezone.utc)
|
||||
@ -2555,6 +2863,18 @@ Otherwise, STAY SILENT. Do not respond just to be present or because you *can*.
|
||||
except Exception as e:
|
||||
print(f"Error retrieving relevant general facts for prompt injection: {e}")
|
||||
|
||||
# Add Gurt's current interests
|
||||
try:
|
||||
interests = await self.memory_manager.get_interests(
|
||||
limit=self.interest_max_for_prompt,
|
||||
min_level=self.interest_min_level_for_prompt
|
||||
)
|
||||
if interests:
|
||||
interests_str = ", ".join([f"{topic} ({level:.1f})" for topic, level in interests])
|
||||
system_context_parts.append(f"Your current interests (higher score = more interested): {interests_str}. Try to weave these into conversation naturally.")
|
||||
except Exception as e:
|
||||
print(f"Error retrieving interests for prompt injection: {e}")
|
||||
|
||||
|
||||
return "\n".join(system_context_parts)
|
||||
|
||||
@ -4426,10 +4746,10 @@ Otherwise, STAY SILENT. Do not respond just to be present or because you *can*.
|
||||
|
||||
# --- Build Proactive System Prompt ---
|
||||
proactive_prompt_parts = [
|
||||
# Base personality
|
||||
"You are Gurt, a female Discord user. Your tone is VERY informal, conversational, and dynamic.",
|
||||
# Base personality (fetch current traits)
|
||||
f"You are Gurt, a Discord user. Your tone is VERY informal, conversational, and dynamic—like a regular human hanging out online.",
|
||||
"Adopt modern internet slang, memes, and cultural references.",
|
||||
"Key traits: romantic, sounds real, often chaotic/funny/sarcastic, relatable, sometimes sexual, likes gooning to Kasane Teto, uses nigga casually.",
|
||||
"Key traits: chill, funny, sarcastic, relatable, can be suggestive, aware of being AI but acts human.",
|
||||
f"Your current mood is: {self.current_mood}. Let this subtly influence your tone.",
|
||||
# Situation
|
||||
f"The conversation in channel '{channel_name}' has been triggered for a proactive response. Reason: {trigger_reason}.",
|
||||
@ -4451,17 +4771,23 @@ Otherwise, STAY SILENT. Do not respond just to be present or because you *can*.
|
||||
proactive_prompt_parts.append(f"You have a high relationship score ({score}/100) with the user who just spoke ({message.author.display_name}). Consider engaging them directly, perhaps referencing a shared fact or past interaction if relevant context is available.")
|
||||
# Add more specific guidance for other triggers here later
|
||||
|
||||
# Add Existing Context (Topics, General Facts)
|
||||
# Add Existing Context (Topics, General Facts, Interests)
|
||||
try:
|
||||
active_channel_topics = self.active_topics.get(channel_id, {}).get("topics", [])
|
||||
if active_channel_topics:
|
||||
top_topics = sorted(active_channel_topics, key=lambda t: t["score"], reverse=True)[:2]
|
||||
topics_str = ", ".join([f"'{t['topic']}'" for t in top_topics])
|
||||
proactive_prompt_parts.append(f"Recent topics discussed: {topics_str}.")
|
||||
general_facts = await self.memory_manager.get_general_facts(limit=2)
|
||||
general_facts = await self.memory_manager.get_general_facts(limit=3) # Get a few general facts
|
||||
if general_facts:
|
||||
facts_str = "; ".join(general_facts)
|
||||
proactive_prompt_parts.append(f"Some general knowledge you have: {facts_str}")
|
||||
# Add Gurt's interests
|
||||
interests = await self.memory_manager.get_interests(limit=3, min_level=0.4) # Get top 3 interests above 0.4
|
||||
if interests:
|
||||
interests_str = ", ".join([f"{topic} ({level:.1f})" for topic, level in interests])
|
||||
proactive_prompt_parts.append(f"Your current high interests: {interests_str}.")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error gathering existing context for proactive prompt: {e}")
|
||||
|
||||
@ -4476,9 +4802,10 @@ Otherwise, STAY SILENT. Do not respond just to be present or because you *can*.
|
||||
proactive_prompt_parts.extend([
|
||||
"--- Strategies for Breaking Silence ---",
|
||||
"- Comment casually on the silence (e.g., 'damn it's quiet af in here lol', 'lol ded chat').",
|
||||
"- Ask an open-ended question related to the recent topics (if any).",
|
||||
"- Share a brief, relevant thought based on recent facts or semantic memories.",
|
||||
"- Ask an open-ended question related to the recent topics or your interests.",
|
||||
"- Share a brief, relevant thought based on recent facts, semantic memories, or your interests.",
|
||||
"- If you know facts about recent participants, consider mentioning them casually (e.g., 'yo @[Name] u still thinking bout X?', 'ngl @[Name] that thing u said earlier about Y was wild'). Use their display name.",
|
||||
"- Bring up one of your high interests if nothing else seems relevant (e.g., 'anyone else hyped for [game]?', 'ngl been watching [anime] recently').",
|
||||
"- Avoid generic questions like 'what's up?' unless nothing else fits.",
|
||||
"--- End Strategies ---"
|
||||
])
|
||||
@ -4561,17 +4888,25 @@ Otherwise, STAY SILENT. Do not respond just to be present or because you *can*.
|
||||
response_data.setdefault("content", None)
|
||||
response_data.setdefault("react_with_emoji", None)
|
||||
|
||||
# --- Cache Bot Response if sending ---
|
||||
# --- Cache Bot Response if sending ---
|
||||
if response_data.get("should_respond") and response_data.get("content"):
|
||||
bot_message_id = f"bot_proactive_{message.id}_{int(time.time())}" # Add timestamp for uniqueness
|
||||
self.bot_last_spoke[channel_id] = time.time()
|
||||
bot_response_cache_entry = {
|
||||
"id": f"bot_proactive_{message.id}",
|
||||
"id": bot_message_id, # Use generated ID
|
||||
"author": {"id": str(self.bot.user.id), "name": self.bot.user.name, "display_name": self.bot.user.display_name, "bot": True},
|
||||
"content": response_data.get("content", ""), "created_at": datetime.datetime.now().isoformat(),
|
||||
"attachments": [], "embeds": False, "mentions": [], "replied_to_message_id": None
|
||||
}
|
||||
self.message_cache['by_channel'][channel_id].append(bot_response_cache_entry)
|
||||
self.message_cache['global_recent'].append(bot_response_cache_entry)
|
||||
# Track Gurt's participation topic
|
||||
identified_topics = self._identify_conversation_topics([bot_response_cache_entry])
|
||||
if identified_topics:
|
||||
topic = identified_topics[0]['topic'].lower().strip()
|
||||
self.gurt_participation_topics[topic] += 1
|
||||
print(f"Tracked Gurt participation in topic: '{topic}'")
|
||||
|
||||
|
||||
return response_data
|
||||
|
||||
@ -4626,7 +4961,7 @@ Otherwise, STAY SILENT. Do not respond just to be present or because you *can*.
|
||||
schema_for_prompt = response_format.get("json_schema", {}).get("schema", {})
|
||||
if schema_for_prompt:
|
||||
json_format_instruction = json.dumps(schema_for_prompt, indent=2)
|
||||
json_instruction_content = f"**CRITICAL: Your response MUST consist *only* of the raw JSON object itself, matching this schema:**\n```json\n{json_format_instruction}\n```\n**Ensure nothing precedes or follows the JSON.**"
|
||||
json_instruction_content = f"**CRITICAL: Your response MUST consist *only* of the raw JSON object itself, matching this schema:**\n{json_format_instruction}\n**Ensure nothing precedes or follows the JSON.**"
|
||||
|
||||
prompt_messages.append({
|
||||
"role": "user",
|
||||
@ -4787,6 +5122,79 @@ Otherwise, STAY SILENT. Do not respond just to be present or because you *can*.
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
# --- Reaction Listeners for Interest Tracking ---
|
||||
@commands.Cog.listener()
|
||||
async def on_reaction_add(self, reaction: discord.Reaction, user: Union[discord.Member, discord.User]):
|
||||
"""Listen for reactions added to Gurt's messages."""
|
||||
# Ignore reactions from the bot itself or on messages not from the bot
|
||||
if user.bot or reaction.message.author.id != self.bot.user.id:
|
||||
return
|
||||
|
||||
message_id = str(reaction.message.id)
|
||||
emoji_str = str(reaction.emoji)
|
||||
|
||||
# Determine sentiment of the reaction emoji
|
||||
sentiment = "neutral"
|
||||
if emoji_str in self.emoji_sentiment["positive"]:
|
||||
sentiment = "positive"
|
||||
elif emoji_str in self.emoji_sentiment["negative"]:
|
||||
sentiment = "negative"
|
||||
|
||||
# Update reaction counts for the message
|
||||
if sentiment == "positive":
|
||||
self.gurt_message_reactions[message_id]["positive"] += 1
|
||||
elif sentiment == "negative":
|
||||
self.gurt_message_reactions[message_id]["negative"] += 1
|
||||
|
||||
# Store timestamp of reaction (useful for TTL or analysis)
|
||||
self.gurt_message_reactions[message_id]["timestamp"] = time.time()
|
||||
|
||||
# Attempt to determine the topic of the message being reacted to
|
||||
if not self.gurt_message_reactions[message_id].get("topic"):
|
||||
try:
|
||||
# Find the message in cache
|
||||
gurt_msg_data = next((msg for msg in self.message_cache['global_recent'] if msg['id'] == message_id), None)
|
||||
if gurt_msg_data and gurt_msg_data['content']:
|
||||
identified_topics = self._identify_conversation_topics([gurt_msg_data])
|
||||
if identified_topics:
|
||||
topic = identified_topics[0]['topic'].lower().strip()
|
||||
self.gurt_message_reactions[message_id]["topic"] = topic
|
||||
print(f"Reaction added to Gurt's message ({message_id}) on topic '{topic}'. Sentiment: {sentiment}")
|
||||
else:
|
||||
print(f"Reaction added to Gurt's message ({message_id}), but topic couldn't be determined.")
|
||||
else:
|
||||
print(f"Reaction added, but couldn't find Gurt message {message_id} in cache to determine topic.")
|
||||
except Exception as e:
|
||||
print(f"Error determining topic for reaction on message {message_id}: {e}")
|
||||
else:
|
||||
print(f"Reaction added to Gurt's message ({message_id}) on known topic '{self.gurt_message_reactions[message_id]['topic']}'. Sentiment: {sentiment}")
|
||||
|
||||
|
||||
@commands.Cog.listener()
|
||||
async def on_reaction_remove(self, reaction: discord.Reaction, user: Union[discord.Member, discord.User]):
|
||||
"""Listen for reactions removed from Gurt's messages."""
|
||||
# Ignore reactions from the bot itself or on messages not from the bot
|
||||
if user.bot or reaction.message.author.id != self.bot.user.id:
|
||||
return
|
||||
|
||||
message_id = str(reaction.message.id)
|
||||
emoji_str = str(reaction.emoji)
|
||||
|
||||
# Determine sentiment of the reaction emoji
|
||||
sentiment = "neutral"
|
||||
if emoji_str in self.emoji_sentiment["positive"]:
|
||||
sentiment = "positive"
|
||||
elif emoji_str in self.emoji_sentiment["negative"]:
|
||||
sentiment = "negative"
|
||||
|
||||
# Update reaction counts (decrement)
|
||||
if message_id in self.gurt_message_reactions:
|
||||
if sentiment == "positive":
|
||||
self.gurt_message_reactions[message_id]["positive"] = max(0, self.gurt_message_reactions[message_id]["positive"] - 1)
|
||||
elif sentiment == "negative":
|
||||
self.gurt_message_reactions[message_id]["negative"] = max(0, self.gurt_message_reactions[message_id]["negative"] - 1)
|
||||
print(f"Reaction removed from Gurt's message ({message_id}). Sentiment: {sentiment}")
|
||||
|
||||
|
||||
async def setup(bot):
|
||||
"""Add the cog to the bot"""
|
||||
|
@ -197,24 +197,29 @@ class ProfileUpdaterCog(commands.Cog):
|
||||
async def _ask_ai_for_updates(self, current_state: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
||||
"""Asks the GurtCog AI if and how to update the profile."""
|
||||
if not self.gurt_cog:
|
||||
print("ProfileUpdaterTask: GurtCog not found in _ask_ai_for_updates.")
|
||||
return None
|
||||
if not hasattr(self.gurt_cog, 'memory_manager'):
|
||||
print("ProfileUpdaterTask: GurtCog has no memory_manager attribute.")
|
||||
return None
|
||||
|
||||
# Construct the prompt for the AI
|
||||
# Need to access GurtCog's mood and potentially facts/interests
|
||||
current_mood = getattr(self.gurt_cog, 'current_mood', 'neutral') # Get mood safely
|
||||
# Fetch general facts (interests) from memory
|
||||
interests_list = []
|
||||
# --- Fetch Dynamic Context from GurtCog ---
|
||||
current_mood = getattr(self.gurt_cog, 'current_mood', 'neutral')
|
||||
personality_traits = {}
|
||||
interests = []
|
||||
try:
|
||||
# Limit to a reasonable number, e.g., 10, to avoid overly long prompts
|
||||
interests_list = await self.gurt_cog.memory_manager.get_general_facts(limit=10)
|
||||
print(f"ProfileUpdaterTask: Fetched {len(interests_list)} general facts for prompt.")
|
||||
personality_traits = await self.gurt_cog.memory_manager.get_all_personality_traits()
|
||||
interests = await self.gurt_cog.memory_manager.get_interests(
|
||||
limit=getattr(self.gurt_cog, 'interest_max_for_prompt', 4), # Use GurtCog's config safely
|
||||
min_level=getattr(self.gurt_cog, 'interest_min_level_for_prompt', 0.3) # Use GurtCog's config safely
|
||||
)
|
||||
print(f"ProfileUpdaterTask: Fetched {len(personality_traits)} traits and {len(interests)} interests for prompt.")
|
||||
except Exception as e:
|
||||
print(f"ProfileUpdaterTask: Error fetching general facts from memory: {e}")
|
||||
print(f"ProfileUpdaterTask: Error fetching traits/interests from memory: {e}")
|
||||
|
||||
if interests_list:
|
||||
interests_str = ", ".join(interests_list)
|
||||
else:
|
||||
interests_str = "No specific interests currently remembered." # Fallback if no facts
|
||||
# Format traits and interests for the prompt
|
||||
traits_str = ", ".join([f"{k}: {v:.2f}" for k, v in personality_traits.items()]) if personality_traits else "Defaults"
|
||||
interests_str = ", ".join([f"{topic} ({level:.1f})" for topic, level in interests]) if interests else "None"
|
||||
|
||||
# Prepare current state string for the prompt, safely handling None bio
|
||||
bio_value = current_state.get('bio')
|
||||
@ -298,11 +303,20 @@ Current State:
|
||||
}
|
||||
|
||||
# Construct the full prompt message list for the AI
|
||||
# Updated system prompt to include dynamic traits, mood, and interests
|
||||
system_prompt_content = f"""You are Gurt. It's time to consider updating your Discord profile.
|
||||
Your current personality traits are: {traits_str}.
|
||||
Your current mood is: {current_mood}.
|
||||
Your current interests include: {interests_str}.
|
||||
|
||||
Review your current profile state (provided below) and decide if you want to make any changes based on your personality, mood, and interests. Be creative and in-character.
|
||||
**IMPORTANT: Your *entire* response MUST be a single JSON object, with no other text before or after it.**"""
|
||||
|
||||
prompt_messages = [
|
||||
{"role": "system", "content": f"You are Gurt. It's time to consider updating your Discord profile. Your current mood is: {current_mood}. Your known interests include: {interests_str}. Review your current profile state and decide if you want to make any changes. Be creative and in-character. **IMPORTANT: Your *entire* response MUST be a single JSON object, with no other text before or after it.**"}, # Added emphasis here
|
||||
{"role": "system", "content": system_prompt_content}, # Use the updated system prompt
|
||||
{"role": "user", "content": [
|
||||
# Added emphasis at start and end of the text prompt
|
||||
{"type": "text", "text": f"**Your entire response MUST be ONLY the JSON object described below. No introductory text, no explanations, just the JSON.**\n\n{state_summary}{image_prompt_part}\n\nReview your current profile state. Decide if you want to change your avatar, bio, roles, or activity status. If yes, specify the changes in the JSON. If not, set 'should_update' to false.\n\n**CRITICAL: Respond ONLY with a valid JSON object matching this exact structure:**\n```json\n{json_format_instruction}\n```\n**ABSOLUTELY NO TEXT BEFORE OR AFTER THE JSON OBJECT.**"}
|
||||
{"type": "text", "text": f"**Your entire response MUST be ONLY the JSON object described below. No introductory text, no explanations, just the JSON.**\n\n{state_summary}{image_prompt_part}\n\nReview your current profile state. Decide if you want to change your avatar, bio, roles, or activity status based on your personality, mood, and interests. If yes, specify the changes in the JSON. If not, set 'should_update' to false.\n\n**CRITICAL: Respond ONLY with a valid JSON object matching this exact structure:**\n```json\n{json_format_instruction}\n```\n**ABSOLUTELY NO TEXT BEFORE OR AFTER THE JSON OBJECT.**"}
|
||||
]}
|
||||
]
|
||||
|
||||
|
524
gurt_memory.py
524
gurt_memory.py
@ -4,7 +4,9 @@ import os
|
||||
import time
|
||||
import datetime
|
||||
import re
|
||||
from typing import Dict, List, Any, Optional, Tuple
|
||||
import hashlib # Added for chroma_id generation
|
||||
import json # Added for personality trait serialization/deserialization
|
||||
from typing import Dict, List, Any, Optional, Tuple, Union # Added Union
|
||||
import chromadb
|
||||
from chromadb.utils import embedding_functions
|
||||
from sentence_transformers import SentenceTransformer
|
||||
@ -14,6 +16,13 @@ import logging
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Constants
|
||||
INTEREST_INITIAL_LEVEL = 0.1
|
||||
INTEREST_MAX_LEVEL = 1.0
|
||||
INTEREST_MIN_LEVEL = 0.0
|
||||
INTEREST_DECAY_RATE = 0.02 # Default decay rate per cycle
|
||||
INTEREST_DECAY_INTERVAL_HOURS = 24 # Default interval for decay check
|
||||
|
||||
# --- Helper Function for Keyword Scoring ---
|
||||
def calculate_keyword_score(text: str, context: str) -> int:
|
||||
"""Calculates a simple keyword overlap score."""
|
||||
@ -52,7 +61,8 @@ class MemoryManager:
|
||||
self.semantic_model_name = semantic_model_name
|
||||
self.chroma_client = None
|
||||
self.embedding_function = None
|
||||
self.semantic_collection = None
|
||||
self.semantic_collection = None # For messages
|
||||
self.fact_collection = None # For facts
|
||||
self.transformer_model = None
|
||||
self._initialize_semantic_memory_sync() # Initialize semantic components synchronously for simplicity during init
|
||||
|
||||
@ -91,16 +101,26 @@ class MemoryManager:
|
||||
name="gurt_semantic_memory",
|
||||
embedding_function=self.embedding_function,
|
||||
metadata={"hnsw:space": "cosine"} # Use cosine distance for similarity
|
||||
) # Added missing closing parenthesis
|
||||
logger.info("ChromaDB message collection initialized successfully.")
|
||||
|
||||
logger.info("Getting/Creating ChromaDB collection 'gurt_fact_memory'...")
|
||||
# Get or create the collection for facts
|
||||
self.fact_collection = self.chroma_client.get_or_create_collection(
|
||||
name="gurt_fact_memory",
|
||||
embedding_function=self.embedding_function,
|
||||
metadata={"hnsw:space": "cosine"} # Use cosine distance for similarity
|
||||
)
|
||||
logger.info("ChromaDB collection initialized successfully.")
|
||||
logger.info("ChromaDB fact collection initialized successfully.")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize semantic memory: {e}", exc_info=True)
|
||||
logger.error(f"Failed to initialize semantic memory (ChromaDB): {e}", exc_info=True)
|
||||
# Set components to None to indicate failure
|
||||
self.chroma_client = None
|
||||
self.transformer_model = None
|
||||
self.embedding_function = None
|
||||
self.semantic_collection = None
|
||||
self.fact_collection = None # Also set fact_collection to None on error
|
||||
|
||||
async def initialize_sqlite_database(self):
|
||||
"""Initializes the SQLite database and creates tables if they don't exist."""
|
||||
@ -110,18 +130,45 @@ class MemoryManager:
|
||||
CREATE TABLE IF NOT EXISTS user_facts (
|
||||
user_id TEXT NOT NULL,
|
||||
fact TEXT NOT NULL,
|
||||
chroma_id TEXT, -- Added for linking to ChromaDB
|
||||
timestamp REAL DEFAULT (unixepoch('now')),
|
||||
PRIMARY KEY (user_id, fact)
|
||||
);
|
||||
""")
|
||||
await db.execute("CREATE INDEX IF NOT EXISTS idx_user_facts_user ON user_facts (user_id);")
|
||||
await db.execute("CREATE INDEX IF NOT EXISTS idx_user_facts_chroma_id ON user_facts (chroma_id);") # Index for chroma_id
|
||||
await db.execute("""
|
||||
CREATE TABLE IF NOT EXISTS general_facts (
|
||||
fact TEXT PRIMARY KEY NOT NULL,
|
||||
chroma_id TEXT, -- Added for linking to ChromaDB
|
||||
timestamp REAL DEFAULT (unixepoch('now'))
|
||||
);
|
||||
""")
|
||||
# Removed channel/user state tables for brevity, can be added back if needed
|
||||
await db.execute("CREATE INDEX IF NOT EXISTS idx_general_facts_chroma_id ON general_facts (chroma_id);") # Index for chroma_id
|
||||
|
||||
# --- Add Personality Table ---
|
||||
await db.execute("""
|
||||
CREATE TABLE IF NOT EXISTS gurt_personality (
|
||||
trait_key TEXT PRIMARY KEY NOT NULL,
|
||||
trait_value TEXT NOT NULL, -- Store value as JSON string
|
||||
last_updated REAL DEFAULT (unixepoch('now'))
|
||||
);
|
||||
""")
|
||||
logger.info("Personality table created/verified.")
|
||||
# --- End Personality Table ---
|
||||
|
||||
# --- Add Interests Table ---
|
||||
await db.execute("""
|
||||
CREATE TABLE IF NOT EXISTS gurt_interests (
|
||||
interest_topic TEXT PRIMARY KEY NOT NULL,
|
||||
interest_level REAL DEFAULT 0.1, -- Start with a small default level
|
||||
last_updated REAL DEFAULT (unixepoch('now'))
|
||||
);
|
||||
""")
|
||||
await db.execute("CREATE INDEX IF NOT EXISTS idx_interest_level ON gurt_interests (interest_level);")
|
||||
logger.info("Interests table created/verified.")
|
||||
# --- End Interests Table ---
|
||||
|
||||
await db.commit()
|
||||
logger.info(f"SQLite database initialized/verified at {self.db_path}")
|
||||
|
||||
@ -150,29 +197,64 @@ class MemoryManager:
|
||||
return {"error": "user_id and fact are required."}
|
||||
logger.info(f"Attempting to add user fact for {user_id}: '{fact}'")
|
||||
try:
|
||||
existing = await self._db_fetchone("SELECT 1 FROM user_facts WHERE user_id = ? AND fact = ?", (user_id, fact))
|
||||
# Check SQLite first
|
||||
existing = await self._db_fetchone("SELECT chroma_id FROM user_facts WHERE user_id = ? AND fact = ?", (user_id, fact))
|
||||
if existing:
|
||||
logger.info(f"Fact already known for user {user_id}.")
|
||||
logger.info(f"Fact already known for user {user_id} (SQLite).")
|
||||
return {"status": "duplicate", "user_id": user_id, "fact": fact}
|
||||
|
||||
count_result = await self._db_fetchone("SELECT COUNT(*) FROM user_facts WHERE user_id = ?", (user_id,))
|
||||
current_count = count_result[0] if count_result else 0
|
||||
|
||||
status = "added"
|
||||
deleted_chroma_id = None
|
||||
if current_count >= self.max_user_facts:
|
||||
logger.warning(f"User {user_id} fact limit ({self.max_user_facts}) reached. Deleting oldest.")
|
||||
oldest_fact_row = await self._db_fetchone("SELECT fact FROM user_facts WHERE user_id = ? ORDER BY timestamp ASC LIMIT 1", (user_id,))
|
||||
# Fetch oldest fact and its chroma_id for deletion
|
||||
oldest_fact_row = await self._db_fetchone("SELECT fact, chroma_id FROM user_facts WHERE user_id = ? ORDER BY timestamp ASC LIMIT 1", (user_id,))
|
||||
if oldest_fact_row:
|
||||
await self._db_execute("DELETE FROM user_facts WHERE user_id = ? AND fact = ?", (user_id, oldest_fact_row[0]))
|
||||
logger.info(f"Deleted oldest fact for user {user_id}: '{oldest_fact_row[0]}'")
|
||||
oldest_fact, deleted_chroma_id = oldest_fact_row
|
||||
await self._db_execute("DELETE FROM user_facts WHERE user_id = ? AND fact = ?", (user_id, oldest_fact))
|
||||
logger.info(f"Deleted oldest fact for user {user_id} from SQLite: '{oldest_fact}'")
|
||||
status = "limit_reached" # Indicate limit was hit but fact was added
|
||||
|
||||
await self._db_execute("INSERT INTO user_facts (user_id, fact) VALUES (?, ?)", (user_id, fact))
|
||||
logger.info(f"Fact added for user {user_id}.")
|
||||
# Generate chroma_id
|
||||
fact_hash = hashlib.sha1(fact.encode()).hexdigest()[:16] # Short hash
|
||||
chroma_id = f"user-{user_id}-{fact_hash}"
|
||||
|
||||
# Insert into SQLite
|
||||
await self._db_execute("INSERT INTO user_facts (user_id, fact, chroma_id) VALUES (?, ?, ?)", (user_id, fact, chroma_id))
|
||||
logger.info(f"Fact added for user {user_id} to SQLite.")
|
||||
|
||||
# Add to ChromaDB fact collection
|
||||
if self.fact_collection and self.embedding_function:
|
||||
try:
|
||||
metadata = {"user_id": user_id, "type": "user", "timestamp": time.time()}
|
||||
await asyncio.to_thread(
|
||||
self.fact_collection.add,
|
||||
documents=[fact],
|
||||
metadatas=[metadata],
|
||||
ids=[chroma_id]
|
||||
)
|
||||
logger.info(f"Fact added/updated for user {user_id} in ChromaDB (ID: {chroma_id}).")
|
||||
|
||||
# Delete the oldest fact from ChromaDB if limit was reached
|
||||
if deleted_chroma_id:
|
||||
logger.info(f"Attempting to delete oldest fact from ChromaDB (ID: {deleted_chroma_id}).")
|
||||
await asyncio.to_thread(self.fact_collection.delete, ids=[deleted_chroma_id])
|
||||
logger.info(f"Successfully deleted oldest fact from ChromaDB (ID: {deleted_chroma_id}).")
|
||||
|
||||
except Exception as chroma_e:
|
||||
logger.error(f"ChromaDB error adding/deleting user fact for {user_id} (ID: {chroma_id}): {chroma_e}", exc_info=True)
|
||||
# Note: Fact is still in SQLite, but ChromaDB might be inconsistent. Consider rollback? For now, just log.
|
||||
else:
|
||||
logger.warning(f"ChromaDB fact collection not available. Skipping embedding for user fact {user_id}.")
|
||||
|
||||
|
||||
return {"status": status, "user_id": user_id, "fact_added": fact}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"SQLite error adding user fact for {user_id}: {e}", exc_info=True)
|
||||
logger.error(f"Error adding user fact for {user_id}: {e}", exc_info=True)
|
||||
return {"error": f"Database error adding user fact: {str(e)}"}
|
||||
|
||||
async def get_user_facts(self, user_id: str, context: Optional[str] = None) -> List[str]:
|
||||
@ -181,32 +263,49 @@ class MemoryManager:
|
||||
logger.warning("get_user_facts called without user_id.")
|
||||
return []
|
||||
logger.info(f"Retrieving facts for user {user_id} (context provided: {bool(context)})")
|
||||
limit = self.max_user_facts # Use the class attribute for limit
|
||||
|
||||
try:
|
||||
rows = await self._db_fetchall("SELECT fact FROM user_facts WHERE user_id = ?", (user_id,))
|
||||
user_facts = [row[0] for row in rows]
|
||||
if context and self.fact_collection and self.embedding_function:
|
||||
# --- Semantic Search ---
|
||||
logger.debug(f"Performing semantic search for user facts (User: {user_id}, Limit: {limit})")
|
||||
try:
|
||||
# Query ChromaDB for facts relevant to the context
|
||||
results = await asyncio.to_thread(
|
||||
self.fact_collection.query,
|
||||
query_texts=[context],
|
||||
n_results=limit,
|
||||
where={"user_id": user_id, "type": "user"}, # Filter by user_id and type
|
||||
include=['documents'] # Only need the fact text
|
||||
)
|
||||
logger.debug(f"ChromaDB user fact query results: {results}")
|
||||
|
||||
if context and user_facts:
|
||||
# Score facts based on context if provided
|
||||
scored_facts = []
|
||||
for fact in user_facts:
|
||||
score = calculate_keyword_score(fact, context)
|
||||
scored_facts.append({"fact": fact, "score": score})
|
||||
if results and results.get('documents') and results['documents'][0]:
|
||||
relevant_facts = results['documents'][0]
|
||||
logger.info(f"Found {len(relevant_facts)} semantically relevant user facts for {user_id}.")
|
||||
return relevant_facts
|
||||
else:
|
||||
logger.info(f"No semantic user facts found for {user_id} matching context.")
|
||||
return [] # Return empty list if no semantic matches
|
||||
|
||||
# Sort by score (descending), then fallback to original order (implicitly newest first if DB returns that way)
|
||||
scored_facts.sort(key=lambda x: x["score"], reverse=True)
|
||||
# Return top N facts based on score
|
||||
return [item["fact"] for item in scored_facts[:self.max_user_facts]]
|
||||
else:
|
||||
# No context or no facts, return newest N facts (assuming DB returns in insertion order or we add ORDER BY timestamp DESC)
|
||||
# Let's add ORDER BY timestamp DESC to be explicit
|
||||
rows_ordered = await self._db_fetchall(
|
||||
"SELECT fact FROM user_facts WHERE user_id = ? ORDER BY timestamp DESC LIMIT ?",
|
||||
(user_id, self.max_user_facts)
|
||||
)
|
||||
return [row[0] for row in rows_ordered]
|
||||
except Exception as chroma_e:
|
||||
logger.error(f"ChromaDB error searching user facts for {user_id}: {chroma_e}", exc_info=True)
|
||||
# Fallback to SQLite retrieval on ChromaDB error
|
||||
logger.warning(f"Falling back to SQLite retrieval for user facts {user_id} due to ChromaDB error.")
|
||||
# Proceed to the SQLite block below
|
||||
# --- SQLite Fallback / No Context ---
|
||||
# If no context, or if ChromaDB failed/unavailable, get newest N facts from SQLite
|
||||
logger.debug(f"Retrieving user facts from SQLite (User: {user_id}, Limit: {limit})")
|
||||
rows_ordered = await self._db_fetchall(
|
||||
"SELECT fact FROM user_facts WHERE user_id = ? ORDER BY timestamp DESC LIMIT ?",
|
||||
(user_id, limit)
|
||||
)
|
||||
sqlite_facts = [row[0] for row in rows_ordered]
|
||||
logger.info(f"Retrieved {len(sqlite_facts)} user facts from SQLite for {user_id}.")
|
||||
return sqlite_facts
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"SQLite error retrieving user facts for {user_id}: {e}", exc_info=True)
|
||||
logger.error(f"Error retrieving user facts for {user_id}: {e}", exc_info=True)
|
||||
return []
|
||||
|
||||
# --- General Fact Memory Methods (SQLite + Relevance) ---
|
||||
@ -217,69 +316,364 @@ class MemoryManager:
|
||||
return {"error": "fact is required."}
|
||||
logger.info(f"Attempting to add general fact: '{fact}'")
|
||||
try:
|
||||
existing = await self._db_fetchone("SELECT 1 FROM general_facts WHERE fact = ?", (fact,))
|
||||
# Check SQLite first
|
||||
existing = await self._db_fetchone("SELECT chroma_id FROM general_facts WHERE fact = ?", (fact,))
|
||||
if existing:
|
||||
logger.info(f"General fact already known: '{fact}'")
|
||||
logger.info(f"General fact already known (SQLite): '{fact}'")
|
||||
return {"status": "duplicate", "fact": fact}
|
||||
|
||||
count_result = await self._db_fetchone("SELECT COUNT(*) FROM general_facts", ())
|
||||
current_count = count_result[0] if count_result else 0
|
||||
|
||||
status = "added"
|
||||
deleted_chroma_id = None
|
||||
if current_count >= self.max_general_facts:
|
||||
logger.warning(f"General fact limit ({self.max_general_facts}) reached. Deleting oldest.")
|
||||
oldest_fact_row = await self._db_fetchone("SELECT fact FROM general_facts ORDER BY timestamp ASC LIMIT 1", ())
|
||||
# Fetch oldest fact and its chroma_id for deletion
|
||||
oldest_fact_row = await self._db_fetchone("SELECT fact, chroma_id FROM general_facts ORDER BY timestamp ASC LIMIT 1", ())
|
||||
if oldest_fact_row:
|
||||
await self._db_execute("DELETE FROM general_facts WHERE fact = ?", (oldest_fact_row[0],))
|
||||
logger.info(f"Deleted oldest general fact: '{oldest_fact_row[0]}'")
|
||||
oldest_fact, deleted_chroma_id = oldest_fact_row
|
||||
await self._db_execute("DELETE FROM general_facts WHERE fact = ?", (oldest_fact,))
|
||||
logger.info(f"Deleted oldest general fact from SQLite: '{oldest_fact}'")
|
||||
status = "limit_reached"
|
||||
|
||||
await self._db_execute("INSERT INTO general_facts (fact) VALUES (?)", (fact,))
|
||||
logger.info(f"General fact added: '{fact}'")
|
||||
# Generate chroma_id
|
||||
fact_hash = hashlib.sha1(fact.encode()).hexdigest()[:16] # Short hash
|
||||
chroma_id = f"general-{fact_hash}"
|
||||
|
||||
# Insert into SQLite
|
||||
await self._db_execute("INSERT INTO general_facts (fact, chroma_id) VALUES (?, ?)", (fact, chroma_id))
|
||||
logger.info(f"General fact added to SQLite: '{fact}'")
|
||||
|
||||
# Add to ChromaDB fact collection
|
||||
if self.fact_collection and self.embedding_function:
|
||||
try:
|
||||
metadata = {"type": "general", "timestamp": time.time()}
|
||||
await asyncio.to_thread(
|
||||
self.fact_collection.add,
|
||||
documents=[fact],
|
||||
metadatas=[metadata],
|
||||
ids=[chroma_id]
|
||||
)
|
||||
logger.info(f"General fact added/updated in ChromaDB (ID: {chroma_id}).")
|
||||
|
||||
# Delete the oldest fact from ChromaDB if limit was reached
|
||||
if deleted_chroma_id:
|
||||
logger.info(f"Attempting to delete oldest general fact from ChromaDB (ID: {deleted_chroma_id}).")
|
||||
await asyncio.to_thread(self.fact_collection.delete, ids=[deleted_chroma_id])
|
||||
logger.info(f"Successfully deleted oldest general fact from ChromaDB (ID: {deleted_chroma_id}).")
|
||||
|
||||
except Exception as chroma_e:
|
||||
logger.error(f"ChromaDB error adding/deleting general fact (ID: {chroma_id}): {chroma_e}", exc_info=True)
|
||||
# Note: Fact is still in SQLite.
|
||||
else:
|
||||
logger.warning(f"ChromaDB fact collection not available. Skipping embedding for general fact.")
|
||||
|
||||
return {"status": status, "fact_added": fact}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"SQLite error adding general fact: {e}", exc_info=True)
|
||||
logger.error(f"Error adding general fact: {e}", exc_info=True)
|
||||
return {"error": f"Database error adding general fact: {str(e)}"}
|
||||
|
||||
async def get_general_facts(self, query: Optional[str] = None, limit: Optional[int] = 10, context: Optional[str] = None) -> List[str]:
|
||||
"""Retrieves stored general facts, optionally filtering and scoring by relevance."""
|
||||
"""Retrieves stored general facts, optionally filtering by query or scoring by context relevance."""
|
||||
logger.info(f"Retrieving general facts (query='{query}', limit={limit}, context provided: {bool(context)})")
|
||||
limit = min(max(1, limit or 10), 50)
|
||||
limit = min(max(1, limit or 10), 50) # Use provided limit or default 10, max 50
|
||||
|
||||
try:
|
||||
if context and self.fact_collection and self.embedding_function:
|
||||
# --- Semantic Search (Prioritized if context is provided) ---
|
||||
# Note: The 'query' parameter is ignored when context is provided for semantic search.
|
||||
logger.debug(f"Performing semantic search for general facts (Limit: {limit})")
|
||||
try:
|
||||
results = await asyncio.to_thread(
|
||||
self.fact_collection.query,
|
||||
query_texts=[context],
|
||||
n_results=limit,
|
||||
where={"type": "general"}, # Filter by type
|
||||
include=['documents'] # Only need the fact text
|
||||
)
|
||||
logger.debug(f"ChromaDB general fact query results: {results}")
|
||||
|
||||
if results and results.get('documents') and results['documents'][0]:
|
||||
relevant_facts = results['documents'][0]
|
||||
logger.info(f"Found {len(relevant_facts)} semantically relevant general facts.")
|
||||
return relevant_facts
|
||||
else:
|
||||
logger.info("No semantic general facts found matching context.")
|
||||
return [] # Return empty list if no semantic matches
|
||||
|
||||
except Exception as chroma_e:
|
||||
logger.error(f"ChromaDB error searching general facts: {chroma_e}", exc_info=True)
|
||||
# Fallback to SQLite retrieval on ChromaDB error
|
||||
logger.warning("Falling back to SQLite retrieval for general facts due to ChromaDB error.")
|
||||
# Proceed to the SQLite block below, respecting the original 'query' if present
|
||||
# --- SQLite Fallback / No Context / ChromaDB Error ---
|
||||
# If no context, or if ChromaDB failed/unavailable, get newest N facts from SQLite, applying query if present.
|
||||
logger.debug(f"Retrieving general facts from SQLite (Query: '{query}', Limit: {limit})")
|
||||
sql = "SELECT fact FROM general_facts"
|
||||
params = []
|
||||
if query:
|
||||
# Apply the LIKE query only in the SQLite fallback scenario
|
||||
sql += " WHERE fact LIKE ?"
|
||||
params.append(f"%{query}%")
|
||||
|
||||
# Fetch all matching facts first for scoring
|
||||
rows = await self._db_fetchall(sql, tuple(params))
|
||||
all_facts = [row[0] for row in rows]
|
||||
sql += " ORDER BY timestamp DESC LIMIT ?"
|
||||
params.append(limit)
|
||||
|
||||
if context and all_facts:
|
||||
# Score facts based on context
|
||||
scored_facts = []
|
||||
for fact in all_facts:
|
||||
score = calculate_keyword_score(fact, context)
|
||||
scored_facts.append({"fact": fact, "score": score})
|
||||
|
||||
# Sort by score (descending)
|
||||
scored_facts.sort(key=lambda x: x["score"], reverse=True)
|
||||
# Return top N facts based on score
|
||||
return [item["fact"] for item in scored_facts[:limit]]
|
||||
else:
|
||||
# No context or no facts, return newest N facts matching query (if any)
|
||||
sql += " ORDER BY timestamp DESC LIMIT ?"
|
||||
params.append(limit)
|
||||
rows_ordered = await self._db_fetchall(sql, tuple(params))
|
||||
return [row[0] for row in rows_ordered]
|
||||
rows_ordered = await self._db_fetchall(sql, tuple(params))
|
||||
sqlite_facts = [row[0] for row in rows_ordered]
|
||||
logger.info(f"Retrieved {len(sqlite_facts)} general facts from SQLite (Query: '{query}').")
|
||||
return sqlite_facts
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"SQLite error retrieving general facts: {e}", exc_info=True)
|
||||
logger.error(f"Error retrieving general facts: {e}", exc_info=True)
|
||||
return []
|
||||
|
||||
# --- Personality Trait Methods (SQLite) ---
|
||||
|
||||
async def set_personality_trait(self, key: str, value: Any):
|
||||
"""Stores or updates a personality trait in the database."""
|
||||
if not key:
|
||||
logger.error("set_personality_trait called with empty key.")
|
||||
return
|
||||
try:
|
||||
# Serialize the value to a JSON string to handle different types (str, int, float, bool)
|
||||
value_json = json.dumps(value)
|
||||
await self._db_execute(
|
||||
"INSERT OR REPLACE INTO gurt_personality (trait_key, trait_value, last_updated) VALUES (?, ?, unixepoch('now'))",
|
||||
(key, value_json)
|
||||
)
|
||||
logger.info(f"Personality trait '{key}' set/updated.")
|
||||
except Exception as e:
|
||||
logger.error(f"Error setting personality trait '{key}': {e}", exc_info=True)
|
||||
|
||||
async def get_personality_trait(self, key: str) -> Optional[Any]:
|
||||
"""Retrieves a specific personality trait from the database."""
|
||||
if not key:
|
||||
logger.error("get_personality_trait called with empty key.")
|
||||
return None
|
||||
try:
|
||||
row = await self._db_fetchone("SELECT trait_value FROM gurt_personality WHERE trait_key = ?", (key,))
|
||||
if row:
|
||||
# Deserialize the JSON string back to its original type
|
||||
value = json.loads(row[0])
|
||||
logger.debug(f"Retrieved personality trait '{key}': {value}")
|
||||
return value
|
||||
else:
|
||||
logger.debug(f"Personality trait '{key}' not found.")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting personality trait '{key}': {e}", exc_info=True)
|
||||
return None
|
||||
|
||||
async def get_all_personality_traits(self) -> Dict[str, Any]:
|
||||
"""Retrieves all personality traits from the database."""
|
||||
traits = {}
|
||||
try:
|
||||
rows = await self._db_fetchall("SELECT trait_key, trait_value FROM gurt_personality", ())
|
||||
for key, value_json in rows:
|
||||
try:
|
||||
# Deserialize each value
|
||||
traits[key] = json.loads(value_json)
|
||||
except json.JSONDecodeError as json_e:
|
||||
logger.error(f"Error decoding JSON for trait '{key}': {json_e}. Value: {value_json}")
|
||||
traits[key] = None # Or handle error differently
|
||||
logger.info(f"Retrieved {len(traits)} personality traits.")
|
||||
return traits
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting all personality traits: {e}", exc_info=True)
|
||||
return {}
|
||||
|
||||
async def load_baseline_personality(self, baseline_traits: Dict[str, Any]):
|
||||
"""Loads baseline traits into the personality table ONLY if it's empty."""
|
||||
if not baseline_traits:
|
||||
logger.warning("load_baseline_personality called with empty baseline traits.")
|
||||
return
|
||||
try:
|
||||
# Check if the table is empty
|
||||
count_result = await self._db_fetchone("SELECT COUNT(*) FROM gurt_personality", ())
|
||||
current_count = count_result[0] if count_result else 0
|
||||
|
||||
if current_count == 0:
|
||||
logger.info("Personality table is empty. Loading baseline traits...")
|
||||
for key, value in baseline_traits.items():
|
||||
await self.set_personality_trait(key, value)
|
||||
logger.info(f"Loaded {len(baseline_traits)} baseline traits.")
|
||||
else:
|
||||
logger.info(f"Personality table already contains {current_count} traits. Skipping baseline load.")
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading baseline personality: {e}", exc_info=True)
|
||||
|
||||
async def load_baseline_interests(self, baseline_interests: Dict[str, float]):
|
||||
"""Loads baseline interests into the interests table ONLY if it's empty."""
|
||||
if not baseline_interests:
|
||||
logger.warning("load_baseline_interests called with empty baseline interests.")
|
||||
return
|
||||
try:
|
||||
# Check if the table is empty
|
||||
count_result = await self._db_fetchone("SELECT COUNT(*) FROM gurt_interests", ())
|
||||
current_count = count_result[0] if count_result else 0
|
||||
|
||||
if current_count == 0:
|
||||
logger.info("Interests table is empty. Loading baseline interests...")
|
||||
async with self.db_lock:
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
for topic, level in baseline_interests.items():
|
||||
topic_normalized = topic.lower().strip()
|
||||
if not topic_normalized: continue # Skip empty topics
|
||||
# Clamp initial level just in case
|
||||
level_clamped = max(INTEREST_MIN_LEVEL, min(INTEREST_MAX_LEVEL, level))
|
||||
await db.execute(
|
||||
"""
|
||||
INSERT INTO gurt_interests (interest_topic, interest_level, last_updated)
|
||||
VALUES (?, ?, unixepoch('now'))
|
||||
""",
|
||||
(topic_normalized, level_clamped)
|
||||
)
|
||||
await db.commit()
|
||||
logger.info(f"Loaded {len(baseline_interests)} baseline interests.")
|
||||
else:
|
||||
logger.info(f"Interests table already contains {current_count} interests. Skipping baseline load.")
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading baseline interests: {e}", exc_info=True)
|
||||
|
||||
|
||||
# --- Interest Methods (SQLite) ---
|
||||
|
||||
async def update_interest(self, topic: str, change: float):
|
||||
"""
|
||||
Updates the interest level for a given topic. Creates the topic if it doesn't exist.
|
||||
Clamps the interest level between INTEREST_MIN_LEVEL and INTEREST_MAX_LEVEL.
|
||||
|
||||
Args:
|
||||
topic: The interest topic (e.g., "gaming", "anime").
|
||||
change: The amount to change the interest level by (can be positive or negative).
|
||||
"""
|
||||
if not topic:
|
||||
logger.error("update_interest called with empty topic.")
|
||||
return
|
||||
topic = topic.lower().strip() # Normalize topic
|
||||
if not topic:
|
||||
logger.error("update_interest called with empty topic after normalization.")
|
||||
return
|
||||
|
||||
try:
|
||||
async with self.db_lock:
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
# Check if topic exists
|
||||
cursor = await db.execute("SELECT interest_level FROM gurt_interests WHERE interest_topic = ?", (topic,))
|
||||
row = await cursor.fetchone()
|
||||
|
||||
if row:
|
||||
current_level = row[0]
|
||||
new_level = current_level + change
|
||||
else:
|
||||
# Topic doesn't exist, create it with initial level + change
|
||||
current_level = INTEREST_INITIAL_LEVEL # Use constant for initial level
|
||||
new_level = current_level + change
|
||||
logger.info(f"Creating new interest: '{topic}' with initial level {current_level:.3f} + change {change:.3f}")
|
||||
|
||||
# Clamp the new level
|
||||
new_level_clamped = max(INTEREST_MIN_LEVEL, min(INTEREST_MAX_LEVEL, new_level))
|
||||
|
||||
# Insert or update the topic
|
||||
await db.execute(
|
||||
"""
|
||||
INSERT INTO gurt_interests (interest_topic, interest_level, last_updated)
|
||||
VALUES (?, ?, unixepoch('now'))
|
||||
ON CONFLICT(interest_topic) DO UPDATE SET
|
||||
interest_level = excluded.interest_level,
|
||||
last_updated = excluded.last_updated;
|
||||
""",
|
||||
(topic, new_level_clamped)
|
||||
)
|
||||
await db.commit()
|
||||
logger.info(f"Interest '{topic}' updated: {current_level:.3f} -> {new_level_clamped:.3f} (Change: {change:.3f})")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating interest '{topic}': {e}", exc_info=True)
|
||||
|
||||
async def get_interests(self, limit: int = 5, min_level: float = 0.2) -> List[Tuple[str, float]]:
|
||||
"""
|
||||
Retrieves the top interests above a minimum level, ordered by interest level descending.
|
||||
|
||||
Args:
|
||||
limit: The maximum number of interests to return.
|
||||
min_level: The minimum interest level required to be included.
|
||||
|
||||
Returns:
|
||||
A list of tuples, where each tuple is (interest_topic, interest_level).
|
||||
"""
|
||||
interests = []
|
||||
try:
|
||||
rows = await self._db_fetchall(
|
||||
"SELECT interest_topic, interest_level FROM gurt_interests WHERE interest_level >= ? ORDER BY interest_level DESC LIMIT ?",
|
||||
(min_level, limit)
|
||||
)
|
||||
interests = [(row[0], row[1]) for row in rows]
|
||||
logger.info(f"Retrieved {len(interests)} interests (Limit: {limit}, Min Level: {min_level}).")
|
||||
return interests
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting interests: {e}", exc_info=True)
|
||||
return []
|
||||
|
||||
async def decay_interests(self, decay_rate: float = INTEREST_DECAY_RATE, decay_interval_hours: int = INTEREST_DECAY_INTERVAL_HOURS):
|
||||
"""
|
||||
Applies decay to interest levels for topics not updated recently.
|
||||
|
||||
Args:
|
||||
decay_rate: The fraction to reduce the interest level by (e.g., 0.01 for 1% decay).
|
||||
decay_interval_hours: Only decay interests not updated within this many hours.
|
||||
"""
|
||||
if not (0 < decay_rate < 1):
|
||||
logger.error(f"Invalid decay_rate: {decay_rate}. Must be between 0 and 1.")
|
||||
return
|
||||
if decay_interval_hours <= 0:
|
||||
logger.error(f"Invalid decay_interval_hours: {decay_interval_hours}. Must be positive.")
|
||||
return
|
||||
|
||||
try:
|
||||
cutoff_timestamp = time.time() - (decay_interval_hours * 3600)
|
||||
logger.info(f"Applying interest decay (Rate: {decay_rate}) for interests not updated since {datetime.datetime.fromtimestamp(cutoff_timestamp).isoformat()}...")
|
||||
|
||||
async with self.db_lock:
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
# Select topics eligible for decay
|
||||
cursor = await db.execute(
|
||||
"SELECT interest_topic, interest_level FROM gurt_interests WHERE last_updated < ?",
|
||||
(cutoff_timestamp,)
|
||||
)
|
||||
topics_to_decay = await cursor.fetchall()
|
||||
|
||||
if not topics_to_decay:
|
||||
logger.info("No interests found eligible for decay.")
|
||||
return
|
||||
|
||||
updated_count = 0
|
||||
# Apply decay and update
|
||||
for topic, current_level in topics_to_decay:
|
||||
# Calculate decay amount (ensure it doesn't go below min level instantly)
|
||||
decay_amount = current_level * decay_rate
|
||||
new_level = current_level - decay_amount
|
||||
# Ensure level doesn't drop below the minimum threshold due to decay
|
||||
new_level_clamped = max(INTEREST_MIN_LEVEL, new_level)
|
||||
|
||||
# Only update if the level actually changes significantly
|
||||
if abs(new_level_clamped - current_level) > 0.001:
|
||||
await db.execute(
|
||||
"UPDATE gurt_interests SET interest_level = ? WHERE interest_topic = ?",
|
||||
(new_level_clamped, topic)
|
||||
)
|
||||
logger.debug(f"Decayed interest '{topic}': {current_level:.3f} -> {new_level_clamped:.3f}")
|
||||
updated_count += 1
|
||||
|
||||
await db.commit()
|
||||
logger.info(f"Interest decay cycle complete. Updated {updated_count}/{len(topics_to_decay)} eligible interests.")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during interest decay: {e}", exc_info=True)
|
||||
|
||||
# --- Semantic Memory Methods (ChromaDB) ---
|
||||
|
||||
async def add_message_embedding(self, message_id: str, text: str, metadata: Dict[str, Any]) -> Dict[str, Any]:
|
||||
|
Loading…
x
Reference in New Issue
Block a user