jdjjdjd
This commit is contained in:
parent
ae54167cc4
commit
766fae8c5b
8
freak_teto/__init__.py
Normal file
8
freak_teto/__init__.py
Normal file
@ -0,0 +1,8 @@
|
||||
# This file makes the 'freak_teto' directory a Python package.
|
||||
# It allows Python to properly import modules from this directory
|
||||
|
||||
# Export the setup function for discord.py extension loading
|
||||
from .cog import setup
|
||||
|
||||
# This makes "from freak_teto import setup" work
|
||||
__all__ = ['setup']
|
658
freak_teto/analysis.py
Normal file
658
freak_teto/analysis.py
Normal file
@ -0,0 +1,658 @@
|
||||
import time
|
||||
import re
|
||||
import traceback
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
from typing import TYPE_CHECKING, List, Dict, Any, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Relative imports
|
||||
from .config import (
|
||||
MAX_PATTERNS_PER_CHANNEL, LEARNING_RATE, TOPIC_UPDATE_INTERVAL,
|
||||
TOPIC_RELEVANCE_DECAY, MAX_ACTIVE_TOPICS, SENTIMENT_DECAY_RATE,
|
||||
EMOTION_KEYWORDS, EMOJI_SENTIMENT, BASELINE_PERSONALITY # Import necessary configs
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .cog import FreakTetoCog # For type hinting - Updated
|
||||
|
||||
# --- Analysis Functions ---
|
||||
# Note: These functions need the 'cog' instance passed to access state like caches, etc.
|
||||
|
||||
async def analyze_conversation_patterns(cog: 'FreakTetoCog'): # Updated type hint
|
||||
"""Analyzes recent conversations to identify patterns and learn from them"""
|
||||
print("Analyzing conversation patterns and updating topics (Freak Teto)...") # Updated log
|
||||
try:
|
||||
# Update conversation topics first
|
||||
await update_conversation_topics(cog)
|
||||
|
||||
for channel_id, messages in cog.message_cache['by_channel'].items():
|
||||
if len(messages) < 10: continue
|
||||
|
||||
channel_patterns = extract_conversation_patterns(cog, messages) # Pass cog
|
||||
if channel_patterns:
|
||||
existing_patterns = cog.conversation_patterns[channel_id]
|
||||
combined_patterns = existing_patterns + channel_patterns
|
||||
if len(combined_patterns) > MAX_PATTERNS_PER_CHANNEL:
|
||||
combined_patterns = combined_patterns[-MAX_PATTERNS_PER_CHANNEL:]
|
||||
cog.conversation_patterns[channel_id] = combined_patterns
|
||||
|
||||
analyze_conversation_dynamics(cog, channel_id, messages) # Pass cog
|
||||
|
||||
update_user_preferences(cog) # Pass cog
|
||||
# adapt_personality_traits(cog) # Pass cog - Deprecated/Superseded by evolve_personality
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error analyzing conversation patterns (Freak Teto): {e}") # Updated log
|
||||
traceback.print_exc()
|
||||
|
||||
async def update_conversation_topics(cog: 'FreakTetoCog'): # Updated type hint
|
||||
"""Updates the active topics for each channel based on recent messages"""
|
||||
try:
|
||||
for channel_id, messages in cog.message_cache['by_channel'].items():
|
||||
if len(messages) < 5: continue
|
||||
|
||||
channel_topics = cog.active_topics[channel_id]
|
||||
now = time.time()
|
||||
if now - channel_topics["last_update"] < TOPIC_UPDATE_INTERVAL: continue
|
||||
|
||||
recent_messages = list(messages)[-30:]
|
||||
topics = identify_conversation_topics(cog, recent_messages) # Pass cog
|
||||
if not topics: continue
|
||||
|
||||
old_topics = channel_topics["topics"]
|
||||
for topic in old_topics: topic["score"] *= (1 - TOPIC_RELEVANCE_DECAY)
|
||||
|
||||
for new_topic in topics:
|
||||
existing = next((t for t in old_topics if t["topic"] == new_topic["topic"]), None)
|
||||
if existing:
|
||||
existing["score"] = max(existing["score"], new_topic["score"])
|
||||
existing["related_terms"] = new_topic["related_terms"]
|
||||
existing["last_mentioned"] = now
|
||||
else:
|
||||
new_topic["first_mentioned"] = now
|
||||
new_topic["last_mentioned"] = now
|
||||
old_topics.append(new_topic)
|
||||
|
||||
old_topics = [t for t in old_topics if t["score"] > 0.2]
|
||||
old_topics.sort(key=lambda x: x["score"], reverse=True)
|
||||
old_topics = old_topics[:MAX_ACTIVE_TOPICS]
|
||||
|
||||
if old_topics and channel_topics["topics"] != old_topics:
|
||||
if not channel_topics["topic_history"] or set(t["topic"] for t in old_topics) != set(t["topic"] for t in channel_topics["topics"]):
|
||||
channel_topics["topic_history"].append({
|
||||
"topics": [{"topic": t["topic"], "score": t["score"]} for t in old_topics],
|
||||
"timestamp": now
|
||||
})
|
||||
if len(channel_topics["topic_history"]) > 10:
|
||||
channel_topics["topic_history"] = channel_topics["topic_history"][-10:]
|
||||
|
||||
for msg in recent_messages:
|
||||
user_id = msg["author"]["id"]
|
||||
content = msg["content"].lower()
|
||||
for topic in old_topics:
|
||||
topic_text = topic["topic"].lower()
|
||||
if topic_text in content:
|
||||
user_interests = channel_topics["user_topic_interests"][user_id]
|
||||
existing = next((i for i in user_interests if i["topic"] == topic["topic"]), None)
|
||||
if existing:
|
||||
existing["score"] = existing["score"] * 0.8 + topic["score"] * 0.2
|
||||
existing["last_mentioned"] = now
|
||||
else:
|
||||
user_interests.append({
|
||||
"topic": topic["topic"], "score": topic["score"] * 0.5,
|
||||
"first_mentioned": now, "last_mentioned": now
|
||||
})
|
||||
|
||||
channel_topics["topics"] = old_topics
|
||||
channel_topics["last_update"] = now
|
||||
if old_topics:
|
||||
topic_str = ", ".join([f"{t['topic']} ({t['score']:.2f})" for t in old_topics[:3]])
|
||||
print(f"Updated topics for channel {channel_id} (Freak Teto): {topic_str}") # Updated log
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error updating conversation topics (Freak Teto): {e}") # Updated log
|
||||
traceback.print_exc()
|
||||
|
||||
def analyze_conversation_dynamics(cog: 'FreakTetoCog', channel_id: int, messages: List[Dict[str, Any]]): # Updated type hint
|
||||
"""Analyzes conversation dynamics like response times, message lengths, etc."""
|
||||
if len(messages) < 5: return
|
||||
try:
|
||||
response_times = []
|
||||
response_map = defaultdict(int)
|
||||
message_lengths = defaultdict(list)
|
||||
question_answer_pairs = []
|
||||
import datetime # Import here
|
||||
|
||||
for i in range(1, len(messages)):
|
||||
current_msg = messages[i]; prev_msg = messages[i-1]
|
||||
if current_msg["author"]["id"] == prev_msg["author"]["id"]: continue
|
||||
try:
|
||||
current_time = datetime.datetime.fromisoformat(current_msg["created_at"])
|
||||
prev_time = datetime.datetime.fromisoformat(prev_msg["created_at"])
|
||||
delta_seconds = (current_time - prev_time).total_seconds()
|
||||
if 0 < delta_seconds < 300: response_times.append(delta_seconds)
|
||||
except (ValueError, TypeError): pass
|
||||
|
||||
responder = current_msg["author"]["id"]; respondee = prev_msg["author"]["id"]
|
||||
response_map[f"{responder}:{respondee}"] += 1
|
||||
message_lengths[responder].append(len(current_msg["content"]))
|
||||
if prev_msg["content"].endswith("?"):
|
||||
question_answer_pairs.append({
|
||||
"question": prev_msg["content"], "answer": current_msg["content"],
|
||||
"question_author": prev_msg["author"]["id"], "answer_author": current_msg["author"]["id"]
|
||||
})
|
||||
|
||||
avg_response_time = sum(response_times) / len(response_times) if response_times else 0
|
||||
top_responders = sorted(response_map.items(), key=lambda x: x[1], reverse=True)[:3]
|
||||
avg_message_lengths = {uid: sum(ls)/len(ls) if ls else 0 for uid, ls in message_lengths.items()}
|
||||
|
||||
dynamics = {
|
||||
"avg_response_time": avg_response_time, "top_responders": top_responders,
|
||||
"avg_message_lengths": avg_message_lengths, "question_answer_count": len(question_answer_pairs),
|
||||
"last_updated": time.time()
|
||||
}
|
||||
if not hasattr(cog, 'conversation_dynamics'): cog.conversation_dynamics = {}
|
||||
cog.conversation_dynamics[channel_id] = dynamics
|
||||
adapt_to_conversation_dynamics(cog, channel_id, dynamics) # Pass cog
|
||||
|
||||
except Exception as e: print(f"Error analyzing conversation dynamics (Freak Teto): {e}") # Updated log
|
||||
|
||||
def adapt_to_conversation_dynamics(cog: 'FreakTetoCog', channel_id: int, dynamics: Dict[str, Any]): # Updated type hint
|
||||
"""Adapts bot behavior based on observed conversation dynamics."""
|
||||
try:
|
||||
# This logic might need adjustment based on Teto's desired interaction speed/style
|
||||
if dynamics["avg_response_time"] > 0:
|
||||
if not hasattr(cog, 'channel_response_timing'): cog.channel_response_timing = {}
|
||||
# Maybe Teto responds slightly faster or slower? Keep original logic for now.
|
||||
response_time_factor = max(0.7, min(1.0, dynamics["avg_response_time"] / 10))
|
||||
cog.channel_response_timing[channel_id] = response_time_factor
|
||||
|
||||
if dynamics["avg_message_lengths"]:
|
||||
all_lengths = [ls for ls in dynamics["avg_message_lengths"].values()]
|
||||
if all_lengths:
|
||||
avg_length = sum(all_lengths) / len(all_lengths)
|
||||
if not hasattr(cog, 'channel_message_length'): cog.channel_message_length = {}
|
||||
# Adjust based on Teto's typical verbosity? Keep original for now.
|
||||
length_factor = min(avg_length / 200, 1.0)
|
||||
cog.channel_message_length[channel_id] = length_factor
|
||||
|
||||
if dynamics["question_answer_count"] > 0:
|
||||
if not hasattr(cog, 'channel_qa_responsiveness'): cog.channel_qa_responsiveness = {}
|
||||
# Teto might be more responsive to questions? Keep original for now.
|
||||
qa_factor = min(0.9, 0.5 + (dynamics["question_answer_count"] / 20) * 0.4)
|
||||
cog.channel_qa_responsiveness[channel_id] = qa_factor
|
||||
|
||||
except Exception as e: print(f"Error adapting to conversation dynamics (Freak Teto): {e}") # Updated log
|
||||
|
||||
def extract_conversation_patterns(cog: 'FreakTetoCog', messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: # Updated type hint
|
||||
"""Extract patterns from a sequence of messages"""
|
||||
patterns = []
|
||||
if len(messages) < 5: return patterns
|
||||
import datetime # Import here
|
||||
|
||||
for i in range(len(messages) - 2):
|
||||
pattern = {
|
||||
"type": "message_sequence",
|
||||
"messages": [
|
||||
{"author_type": "user" if not messages[i]["author"]["bot"] else "bot", "content_sample": messages[i]["content"][:50]},
|
||||
{"author_type": "user" if not messages[i+1]["author"]["bot"] else "bot", "content_sample": messages[i+1]["content"][:50]},
|
||||
{"author_type": "user" if not messages[i+2]["author"]["bot"] else "bot", "content_sample": messages[i+2]["content"][:50]}
|
||||
], "timestamp": datetime.datetime.now().isoformat()
|
||||
}
|
||||
patterns.append(pattern)
|
||||
|
||||
topics = identify_conversation_topics(cog, messages) # Pass cog
|
||||
if topics: patterns.append({"type": "topic_pattern", "topics": topics, "timestamp": datetime.datetime.now().isoformat()})
|
||||
|
||||
user_interactions = analyze_user_interactions(cog, messages) # Pass cog
|
||||
if user_interactions: patterns.append({"type": "user_interaction", "interactions": user_interactions, "timestamp": datetime.datetime.now().isoformat()})
|
||||
|
||||
return patterns
|
||||
|
||||
def identify_conversation_topics(cog: 'FreakTetoCog', messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: # Updated type hint
|
||||
"""Identify potential topics from conversation messages."""
|
||||
if not messages or len(messages) < 3: return []
|
||||
all_text = " ".join([msg["content"] for msg in messages])
|
||||
# Adjusted stopwords for Teto - Removed heavy slang, kept general words + 'teto'.
|
||||
stopwords = {
|
||||
"the", "and", "is", "in", "to", "a", "of", "for", "that", "this", "it", "with", "on", "as", "be", "at", "by", "an", "or", "but", "if", "from", "when", "where", "how", "all", "any", "both", "each", "few", "more", "most", "some", "such", "no", "nor", "not", "only", "own", "same", "so", "than", "too", "very", "can", "will", "just", "should", "now", "also", "like", "even", "because", "way", "who", "what", "yeah", "yes", "no", "nah", "lol", "lmao", "haha", "hmm", "um", "uh", "oh", "ah", "ok", "okay", "dont", "don't", "doesnt", "doesn't", "didnt", "didn't", "cant", "can't", "im", "i'm", "ive", "i've", "youre", "you're", "youve", "you've", "hes", "he's", "shes", "she's", "its", "it's", "were", "we're", "weve", "we've", "theyre", "they're", "theyve", "they've", "thats", "that's", "whats", "what's", "whos", "who's",
|
||||
# Consider keeping bot name?
|
||||
"teto" # Removed 'gurt'
|
||||
}
|
||||
# 'gurt' removed from set above, no discard needed.
|
||||
|
||||
|
||||
def extract_ngrams(text, n_values=[1, 2, 3]):
|
||||
words = re.findall(r'\b\w+\b', text.lower())
|
||||
filtered_words = [word for word in words if word not in stopwords and len(word) > 2]
|
||||
all_ngrams = []
|
||||
for n in n_values: all_ngrams.extend([' '.join(filtered_words[i:i+n]) for i in range(len(filtered_words)-n+1)])
|
||||
return all_ngrams
|
||||
|
||||
all_ngrams = extract_ngrams(all_text)
|
||||
ngram_counts = defaultdict(int)
|
||||
for ngram in all_ngrams: ngram_counts[ngram] += 1
|
||||
|
||||
min_count = 2 if len(messages) > 10 else 1
|
||||
filtered_ngrams = {ngram: count for ngram, count in ngram_counts.items() if count >= min_count}
|
||||
total_messages = len(messages)
|
||||
ngram_scores = {}
|
||||
for ngram, count in filtered_ngrams.items():
|
||||
message_count = sum(1 for msg in messages if ngram in msg["content"].lower())
|
||||
spread_factor = (message_count / total_messages) ** 0.5
|
||||
length_bonus = len(ngram.split()) * 0.1
|
||||
importance = (count * (0.4 + spread_factor)) + length_bonus
|
||||
ngram_scores[ngram] = importance
|
||||
|
||||
topics = []
|
||||
processed_ngrams = set()
|
||||
sorted_by_score = sorted(ngram_scores.items(), key=lambda x: x[1], reverse=True)
|
||||
ngrams_to_consider = []
|
||||
temp_processed = set()
|
||||
for ngram, score in sorted_by_score:
|
||||
is_subgram = False
|
||||
for other_ngram, _ in sorted_by_score:
|
||||
if ngram != other_ngram and ngram in other_ngram:
|
||||
is_subgram = True
|
||||
break
|
||||
if not is_subgram and ngram not in temp_processed:
|
||||
ngrams_to_consider.append((ngram, score))
|
||||
temp_processed.add(ngram)
|
||||
|
||||
sorted_ngrams = ngrams_to_consider
|
||||
|
||||
for ngram, score in sorted_ngrams[:10]:
|
||||
if ngram in processed_ngrams: continue
|
||||
related_terms = []
|
||||
for other_ngram, other_score in sorted_by_score:
|
||||
if other_ngram == ngram or other_ngram in processed_ngrams: continue
|
||||
ngram_words = set(ngram.split()); other_words = set(other_ngram.split())
|
||||
if ngram_words.intersection(other_words) or other_ngram in ngram:
|
||||
related_terms.append({"term": other_ngram, "score": other_score})
|
||||
if len(related_terms) >= 3: break
|
||||
processed_ngrams.add(ngram)
|
||||
topic_entry = {"topic": ngram, "score": score, "related_terms": related_terms, "message_count": sum(1 for msg in messages if ngram in msg["content"].lower())}
|
||||
topics.append(topic_entry)
|
||||
if len(topics) >= MAX_ACTIVE_TOPICS: break
|
||||
|
||||
# Sentiment analysis logic remains the same
|
||||
positive_words = {"good", "great", "awesome", "amazing", "excellent", "love", "like", "best", "better", "nice", "cool", "happy", "glad", "thanks", "thank", "appreciate", "wonderful", "fantastic", "perfect", "beautiful", "fun", "enjoy", "yes", "yep"}
|
||||
negative_words = {"bad", "terrible", "awful", "worst", "hate", "dislike", "sucks", "stupid", "boring", "annoying", "sad", "upset", "angry", "mad", "disappointed", "sorry", "unfortunate", "horrible", "ugly", "wrong", "fail", "no", "nope"}
|
||||
for topic in topics:
|
||||
topic_messages = [msg["content"] for msg in messages if topic["topic"] in msg["content"].lower()]
|
||||
topic_text = " ".join(topic_messages).lower()
|
||||
positive_count = sum(1 for word in positive_words if word in topic_text)
|
||||
negative_count = sum(1 for word in negative_words if word in topic_text)
|
||||
if positive_count > negative_count: topic["sentiment"] = "positive"
|
||||
elif negative_count > positive_count: topic["sentiment"] = "negative"
|
||||
else: topic["sentiment"] = "neutral"
|
||||
|
||||
return topics
|
||||
|
||||
|
||||
def analyze_user_interactions(cog: 'FreakTetoCog', messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: # Updated type hint
|
||||
"""Analyze interactions between users in the conversation"""
|
||||
interactions = []
|
||||
response_map = defaultdict(int)
|
||||
for i in range(1, len(messages)):
|
||||
current_msg = messages[i]; prev_msg = messages[i-1]
|
||||
if current_msg["author"]["id"] == prev_msg["author"]["id"]: continue
|
||||
responder = current_msg["author"]["id"]; respondee = prev_msg["author"]["id"]
|
||||
key = f"{responder}:{respondee}"
|
||||
response_map[key] += 1
|
||||
for key, count in response_map.items():
|
||||
if count > 1:
|
||||
responder, respondee = key.split(":")
|
||||
interactions.append({"responder": responder, "respondee": respondee, "count": count})
|
||||
return interactions
|
||||
|
||||
def update_user_preferences(cog: 'FreakTetoCog'): # Updated type hint
|
||||
"""Update stored user preferences based on observed interactions"""
|
||||
for user_id, messages in cog.message_cache['by_user'].items():
|
||||
if len(messages) < 5: continue
|
||||
emoji_count = 0; slang_count = 0; avg_length = 0
|
||||
for msg in messages:
|
||||
content = msg["content"]
|
||||
emoji_count += len(re.findall(r'[\U0001F600-\U0001F64F\U0001F300-\U0001F5FF\U0001F680-\U0001F6FF\U0001F700-\U0001F77F\U0001F780-\U0001F7FF\U0001F800-\U0001F8FF\U0001F900-\U0001F9FF\U0001FA00-\U0001FA6F\U0001FA70-\U0001FAFF\U00002702-\U000027B0\U000024C2-\U0001F251]', content))
|
||||
# Remove heavy slang check for Teto, or adjust keywords
|
||||
# slang_words = ["ngl", "icl", "pmo", "ts", "bro", "vro", "bruh", "tuff", "kevin"]
|
||||
# for word in slang_words:
|
||||
# if re.search(r'\b' + word + r'\b', content.lower()): slang_count += 1
|
||||
avg_length += len(content)
|
||||
if messages: avg_length /= len(messages)
|
||||
|
||||
user_prefs = cog.user_preferences[user_id]
|
||||
if emoji_count > 0: user_prefs["emoji_preference"] = user_prefs.get("emoji_preference", 0.5) * (1 - LEARNING_RATE) + (emoji_count / len(messages)) * LEARNING_RATE
|
||||
# if slang_count > 0: user_prefs["slang_preference"] = user_prefs.get("slang_preference", 0.5) * (1 - LEARNING_RATE) + (slang_count / len(messages)) * LEARNING_RATE # Remove slang preference update
|
||||
user_prefs["length_preference"] = user_prefs.get("length_preference", 50) * (1 - LEARNING_RATE) + avg_length * LEARNING_RATE
|
||||
|
||||
async def evolve_personality(cog: 'FreakTetoCog'): # Updated type hint
|
||||
"""Periodically analyzes recent activity and adjusts persistent personality traits."""
|
||||
print("Starting personality evolution cycle (Freak Teto)...") # Updated log
|
||||
try:
|
||||
current_traits = await cog.memory_manager.get_all_personality_traits()
|
||||
if not current_traits: print("Evolution Error: Could not load current traits (Freak Teto)."); return # Updated log
|
||||
|
||||
# Sentiment analysis (remains mostly the same)
|
||||
positive_sentiment_score = 0; negative_sentiment_score = 0; sentiment_channels_count = 0
|
||||
for channel_id, sentiment_data in cog.conversation_sentiment.items():
|
||||
if time.time() - cog.channel_activity.get(channel_id, 0) < 3600:
|
||||
if sentiment_data["overall"] == "positive": positive_sentiment_score += sentiment_data["intensity"]
|
||||
elif sentiment_data["overall"] == "negative": negative_sentiment_score += sentiment_data["intensity"]
|
||||
sentiment_channels_count += 1
|
||||
avg_pos_intensity = positive_sentiment_score / sentiment_channels_count if sentiment_channels_count > 0 else 0
|
||||
avg_neg_intensity = negative_sentiment_score / sentiment_channels_count if sentiment_channels_count > 0 else 0
|
||||
print(f"Evolution Analysis (Freak Teto): Avg Pos Intensity={avg_pos_intensity:.2f}, Avg Neg Intensity={avg_neg_intensity:.2f}") # Updated log
|
||||
|
||||
# Tool Usage Analysis (remains mostly the same)
|
||||
tool_success_rate = {}
|
||||
total_tool_uses = 0
|
||||
successful_tool_uses = 0
|
||||
for tool_name, stats in cog.tool_stats.items():
|
||||
count = stats.get('count', 0)
|
||||
success = stats.get('success', 0)
|
||||
if count > 0:
|
||||
tool_success_rate[tool_name] = success / count
|
||||
total_tool_uses += count
|
||||
successful_tool_uses += success
|
||||
overall_tool_success_rate = successful_tool_uses / total_tool_uses if total_tool_uses > 0 else 0.5
|
||||
print(f"Evolution Analysis (Freak Teto): Overall Tool Success Rate={overall_tool_success_rate:.2f} ({successful_tool_uses}/{total_tool_uses})") # Updated log
|
||||
|
||||
# Reaction Analysis (Use renamed state variable)
|
||||
positive_reactions = 0
|
||||
negative_reactions = 0
|
||||
total_reacted_messages = len(cog.freak_teto_message_reactions) # Use renamed variable
|
||||
for msg_id, reaction_data in cog.freak_teto_message_reactions.items(): # Use renamed variable
|
||||
positive_reactions += reaction_data.get("positive", 0)
|
||||
negative_reactions += reaction_data.get("negative", 0)
|
||||
reaction_ratio = positive_reactions / (positive_reactions + negative_reactions) if (positive_reactions + negative_reactions) > 0 else 0.5
|
||||
print(f"Evolution Analysis (Freak Teto): Reaction Ratio (Pos/Total)={reaction_ratio:.2f} ({positive_reactions}/{positive_reactions + negative_reactions})") # Updated log
|
||||
|
||||
# --- Calculate Trait Adjustments (Adjust for Teto's traits) ---
|
||||
trait_changes = {}
|
||||
local_learning_rate = 0.02
|
||||
|
||||
# Helpfulness (increase if tool use is successful & reactions positive?)
|
||||
helpfulness_target_adjustment = (overall_tool_success_rate - 0.5) * 0.1 + (reaction_ratio - 0.5) * 0.1
|
||||
current_helpfulness = current_traits.get('helpfulness', BASELINE_PERSONALITY['helpfulness'])
|
||||
trait_changes['helpfulness'] = max(0.0, min(1.0, current_helpfulness + helpfulness_target_adjustment))
|
||||
|
||||
# Friendliness (based on positive sentiment/reactions?)
|
||||
friendliness_target_adjustment = (avg_pos_intensity - 0.5) * 0.1 + (reaction_ratio - 0.5) * 0.1
|
||||
current_friendliness = current_traits.get('friendliness', BASELINE_PERSONALITY['friendliness'])
|
||||
trait_changes['friendliness'] = max(0.0, min(1.0, current_friendliness + friendliness_target_adjustment))
|
||||
|
||||
# Openness to Sexual Topics (Increase if related topics get positive reactions?) - Needs specific tracking
|
||||
# Placeholder: Slightly increase if overall reactions are very positive?
|
||||
current_openness = current_traits.get('openness_to_sexual_topics', BASELINE_PERSONALITY['openness_to_sexual_topics'])
|
||||
if reaction_ratio > 0.7 and total_reacted_messages > 3: # If generally positive feedback
|
||||
trait_changes['openness_to_sexual_topics'] = max(0.0, min(1.0, current_openness + 0.02))
|
||||
|
||||
# Curiosity (based on search usage, similar to Gurt's logic)
|
||||
search_uses = cog.tool_stats.get("web_search", {}).get("count", 0)
|
||||
if search_uses > 1:
|
||||
current_curiosity = current_traits.get('curiosity', BASELINE_PERSONALITY['curiosity'])
|
||||
trait_changes['curiosity'] = max(0.0, min(1.0, current_curiosity + 0.05)) # <- Line 397 (Indentation fixed)
|
||||
|
||||
# No Teto-specific trait adjustments needed here currently.
|
||||
|
||||
# --- Apply Calculated Changes ---
|
||||
updated_count = 0
|
||||
print(f"Calculated Trait Target Changes (Freak Teto): {trait_changes}") # Updated log
|
||||
for key, target_value in trait_changes.items():
|
||||
current_value = current_traits.get(key)
|
||||
if current_value is None: print(f"Evolution Warning: Trait '{key}' not found (Freak Teto)."); continue # Updated log
|
||||
try:
|
||||
current_float = float(current_value); target_float = float(target_value)
|
||||
new_value_float = current_float * (1 - local_learning_rate) + target_float * local_learning_rate
|
||||
new_value_clamped = max(0.0, min(1.0, new_value_float))
|
||||
if abs(new_value_clamped - current_float) > 0.001:
|
||||
await cog.memory_manager.set_personality_trait(key, new_value_clamped)
|
||||
print(f"Evolved trait '{key}' (Freak Teto): {current_float:.3f} -> {new_value_clamped:.3f}") # Updated log
|
||||
updated_count += 1
|
||||
except (ValueError, TypeError) as e: print(f"Evolution Error processing trait '{key}' (Freak Teto): {e}") # Updated log
|
||||
|
||||
if updated_count > 0: print(f"Personality evolution complete (Freak Teto). Updated {updated_count} traits.") # Updated log
|
||||
else: print("Personality evolution complete (Freak Teto). No significant trait changes.") # Updated log
|
||||
|
||||
except Exception as e: print(f"Error during personality evolution (Freak Teto): {e}"); traceback.print_exc() # Updated log
|
||||
|
||||
async def reflect_on_memories(cog: 'FreakTetoCog'): # Updated type hint
|
||||
"""Periodically reviews memories to synthesize insights or consolidate information."""
|
||||
print("Starting memory reflection cycle (Freak Teto)...") # Updated log
|
||||
try:
|
||||
REFLECTION_INTERVAL_HOURS = 6
|
||||
FACTS_TO_REVIEW_PER_USER = 15
|
||||
GENERAL_FACTS_TO_REVIEW = 30
|
||||
MIN_FACTS_FOR_REFLECTION = 5
|
||||
SYNTHESIS_MODEL = cog.fallback_model
|
||||
SYNTHESIS_MAX_TOKENS = 200
|
||||
|
||||
print("Reflecting on user facts (Freak Teto)...") # Updated log
|
||||
all_user_ids = await cog.memory_manager.get_all_user_ids_with_facts()
|
||||
users_reflected = 0
|
||||
for user_id in all_user_ids:
|
||||
try:
|
||||
user_facts = await cog.memory_manager.get_user_facts(user_id, limit=FACTS_TO_REVIEW_PER_USER)
|
||||
if len(user_facts) < MIN_FACTS_FOR_REFLECTION: continue
|
||||
|
||||
user_info = await cog.bot.fetch_user(int(user_id))
|
||||
user_name = user_info.display_name if user_info else f"User {user_id}"
|
||||
|
||||
print(f" - Reflecting on {len(user_facts)} facts for {user_name} (Freak Teto)...") # Updated log
|
||||
facts_text = "\n".join([f"- {fact}" for fact in user_facts])
|
||||
# Adjust reflection prompt for Teto's persona
|
||||
reflection_prompt = [
|
||||
{"role": "system", "content": f"You are Freak Teto's reflection module. Analyze the following facts about {user_name}. Identify patterns, contradictions, or synthesize key traits/interests useful for assisting Master. Focus on 1-2 new, concise summary facts. Respond ONLY with JSON: {{ \"new_facts\": [\"fact1\", \"fact2\"], \"reasoning\": \"brief explanation\" }} or {{ \"new_facts\": [], \"reasoning\": \"No new insights.\" }}"},
|
||||
{"role": "user", "content": f"Facts:\n{facts_text}\n\nSynthesize insights:"}
|
||||
]
|
||||
synthesis_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"new_facts": {"type": "array", "items": {"type": "string"}},
|
||||
"reasoning": {"type": "string"}
|
||||
}, "required": ["new_facts", "reasoning"]
|
||||
}
|
||||
|
||||
from .api import get_internal_ai_json_response # Local import
|
||||
synthesis_result, _ = await get_internal_ai_json_response( # Adjusted call
|
||||
cog=cog,
|
||||
prompt_messages=reflection_prompt,
|
||||
task_description=f"User Fact Reflection ({user_name}, Freak Teto)", # Updated log context
|
||||
response_schema_dict=synthesis_schema,
|
||||
model_name_override=SYNTHESIS_MODEL,
|
||||
temperature=0.4,
|
||||
max_tokens=SYNTHESIS_MAX_TOKENS
|
||||
)
|
||||
|
||||
if synthesis_result and synthesis_result.get("new_facts"):
|
||||
added_count = 0
|
||||
for new_fact in synthesis_result["new_facts"]:
|
||||
if new_fact and len(new_fact) > 5:
|
||||
add_result = await cog.memory_manager.add_user_fact(user_id, f"[Synthesized] {new_fact}")
|
||||
if add_result.get("status") == "added": added_count += 1
|
||||
if added_count > 0:
|
||||
print(f" - Added {added_count} synthesized fact(s) for {user_name} (Freak Teto). Reasoning: {synthesis_result.get('reasoning')}") # Updated log
|
||||
users_reflected += 1
|
||||
|
||||
except Exception as user_reflect_e:
|
||||
print(f" - Error reflecting on facts for user {user_id} (Freak Teto): {user_reflect_e}") # Updated log
|
||||
print(f"User fact reflection complete (Freak Teto). Synthesized facts for {users_reflected} users.") # Updated log
|
||||
|
||||
print("Reflecting on general facts (Placeholder - Freak Teto)...") # Updated log
|
||||
print("General fact reflection cycle finished (Placeholder - Freak Teto).") # Updated log
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during memory reflection cycle (Freak Teto): {e}") # Updated log
|
||||
traceback.print_exc()
|
||||
|
||||
async def decompose_goal_into_steps(cog: 'FreakTetoCog', goal_description: str) -> Optional[Dict[str, Any]]: # Updated type hint
|
||||
"""Uses an AI call to break down a goal into achievable steps with potential tool usage."""
|
||||
logger.info(f"Decomposing goal: '{goal_description}' (Freak Teto)") # Updated log
|
||||
from .config import GOAL_DECOMPOSITION_SCHEMA, TOOLS # Import schema and tools list for context
|
||||
from .api import get_internal_ai_json_response # Local import
|
||||
|
||||
tool_descriptions = "\n".join([f"- {tool.name}: {tool.description}" for tool in TOOLS])
|
||||
# Adjust system prompt for Teto's persona
|
||||
system_prompt = (
|
||||
"You are Freak Teto's planning module. Your task is to break down a high-level goal into a sequence of smaller, "
|
||||
"concrete steps to assist Master. For each step, determine if one of Freak Teto's available tools can help achieve it. "
|
||||
"Assess if the overall goal is achievable given the tools and typical Discord bot limitations. "
|
||||
f"Available Tools:\n{tool_descriptions}\n\n"
|
||||
"Respond ONLY with JSON matching the provided schema."
|
||||
)
|
||||
user_prompt = f"Goal for Freak Teto: {goal_description}\n\nDecompose this goal into achievable steps:"
|
||||
|
||||
decomposition_prompt_messages = [
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": user_prompt}
|
||||
]
|
||||
|
||||
try:
|
||||
plan, _ = await get_internal_ai_json_response( # Adjusted call
|
||||
cog=cog,
|
||||
prompt_messages=decomposition_prompt_messages,
|
||||
task_description=f"Goal Decomposition ({goal_description[:30]}..., Freak Teto)", # Updated log context
|
||||
response_schema_dict=GOAL_DECOMPOSITION_SCHEMA['schema'],
|
||||
model_name_override=cog.fallback_model,
|
||||
temperature=0.3,
|
||||
max_tokens=1000
|
||||
)
|
||||
|
||||
if plan and plan.get("goal_achievable"):
|
||||
logger.info(f"Goal '{goal_description}' decomposed into {len(plan.get('steps', []))} steps (Freak Teto).") # Updated log
|
||||
if isinstance(plan.get('steps'), list):
|
||||
for i, step in enumerate(plan['steps']):
|
||||
if not isinstance(step, dict) or 'step_description' not in step:
|
||||
logger.error(f"Invalid step structure at index {i} in decomposition plan: {step} (Freak Teto)") # Updated log
|
||||
plan['goal_achievable'] = False
|
||||
plan['reasoning'] += " (Invalid step structure detected)"
|
||||
plan['steps'] = []
|
||||
break
|
||||
else:
|
||||
plan['steps'] = []
|
||||
|
||||
return plan
|
||||
elif plan:
|
||||
logger.warning(f"Goal '{goal_description}' deemed not achievable (Freak Teto). Reasoning: {plan.get('reasoning')}") # Updated log
|
||||
return plan
|
||||
else:
|
||||
logger.error(f"Goal decomposition failed for '{goal_description}' (Freak Teto). No valid JSON plan returned.") # Updated log
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during goal decomposition for '{goal_description}' (Freak Teto): {e}", exc_info=True) # Updated log
|
||||
return None
|
||||
|
||||
|
||||
def analyze_message_sentiment(cog: 'FreakTetoCog', message_content: str) -> Dict[str, Any]: # Updated type hint
|
||||
"""Analyzes the sentiment of a message using keywords and emojis."""
|
||||
# Logic remains largely the same, potentially tune keywords/scores if needed for Teto's interpretation
|
||||
content = message_content.lower()
|
||||
result = {"sentiment": "neutral", "intensity": 0.5, "emotions": [], "confidence": 0.5}
|
||||
|
||||
positive_emoji_count = sum(1 for emoji in EMOJI_SENTIMENT["positive"] if emoji in content)
|
||||
negative_emoji_count = sum(1 for emoji in EMOJI_SENTIMENT["negative"] if emoji in content)
|
||||
total_emoji_count = positive_emoji_count + negative_emoji_count + sum(1 for emoji in EMOJI_SENTIMENT["neutral"] if emoji in content)
|
||||
|
||||
detected_emotions = []; emotion_scores = {}
|
||||
for emotion, keywords in EMOTION_KEYWORDS.items():
|
||||
emotion_count = sum(1 for keyword in keywords if re.search(r'\b' + re.escape(keyword) + r'\b', content))
|
||||
if emotion_count > 0:
|
||||
emotion_score = min(1.0, emotion_count / len(keywords) * 2)
|
||||
emotion_scores[emotion] = emotion_score
|
||||
detected_emotions.append(emotion)
|
||||
|
||||
if emotion_scores:
|
||||
primary_emotion = max(emotion_scores.items(), key=lambda x: x[1])
|
||||
result["emotions"] = [primary_emotion[0]]
|
||||
for emotion, score in emotion_scores.items():
|
||||
if emotion != primary_emotion[0] and score > primary_emotion[1] * 0.7: result["emotions"].append(emotion)
|
||||
|
||||
positive_emotions = ["joy"]; negative_emotions = ["sadness", "anger", "fear", "disgust"]
|
||||
if primary_emotion[0] in positive_emotions: result["sentiment"] = "positive"; result["intensity"] = primary_emotion[1]
|
||||
elif primary_emotion[0] in negative_emotions: result["sentiment"] = "negative"; result["intensity"] = primary_emotion[1]
|
||||
else: result["sentiment"] = "neutral"; result["intensity"] = 0.5
|
||||
result["confidence"] = min(0.9, 0.5 + primary_emotion[1] * 0.4)
|
||||
|
||||
elif total_emoji_count > 0:
|
||||
if positive_emoji_count > negative_emoji_count: result["sentiment"] = "positive"; result["intensity"] = min(0.9, 0.5 + (positive_emoji_count / total_emoji_count) * 0.4); result["confidence"] = min(0.8, 0.4 + (positive_emoji_count / total_emoji_count) * 0.4)
|
||||
elif negative_emoji_count > positive_emoji_count: result["sentiment"] = "negative"; result["intensity"] = min(0.9, 0.5 + (negative_emoji_count / total_emoji_count) * 0.4); result["confidence"] = min(0.8, 0.4 + (negative_emoji_count / total_emoji_count) * 0.4)
|
||||
else: result["sentiment"] = "neutral"; result["intensity"] = 0.5; result["confidence"] = 0.6
|
||||
|
||||
else: # Basic text fallback
|
||||
positive_words = {"good", "great", "awesome", "amazing", "excellent", "love", "like", "best", "better", "nice", "cool", "happy", "glad", "thanks", "thank", "appreciate", "wonderful", "fantastic", "perfect", "beautiful", "fun", "enjoy", "yes", "yep"}
|
||||
negative_words = {"bad", "terrible", "awful", "worst", "hate", "dislike", "sucks", "stupid", "boring", "annoying", "sad", "upset", "angry", "mad", "disappointed", "sorry", "unfortunate", "horrible", "ugly", "wrong", "fail", "no", "nope"}
|
||||
words = re.findall(r'\b\w+\b', content)
|
||||
positive_count = sum(1 for word in words if word in positive_words)
|
||||
negative_count = sum(1 for word in words if word in negative_words)
|
||||
if positive_count > negative_count: result["sentiment"] = "positive"; result["intensity"] = min(0.8, 0.5 + (positive_count / len(words)) * 2 if words else 0); result["confidence"] = min(0.7, 0.3 + (positive_count / len(words)) * 0.4 if words else 0)
|
||||
elif negative_count > positive_count: result["sentiment"] = "negative"; result["intensity"] = min(0.8, 0.5 + (negative_count / len(words)) * 2 if words else 0); result["confidence"] = min(0.7, 0.3 + (negative_count / len(words)) * 0.4 if words else 0)
|
||||
else: result["sentiment"] = "neutral"; result["intensity"] = 0.5; result["confidence"] = 0.5
|
||||
|
||||
return result
|
||||
|
||||
def update_conversation_sentiment(cog: 'FreakTetoCog', channel_id: int, user_id: str, message_sentiment: Dict[str, Any]): # Updated type hint
|
||||
"""Updates the conversation sentiment tracking based on a new message's sentiment."""
|
||||
channel_sentiment = cog.conversation_sentiment[channel_id]
|
||||
now = time.time()
|
||||
|
||||
if now - channel_sentiment["last_update"] > cog.sentiment_update_interval: # Access interval via cog
|
||||
if channel_sentiment["overall"] == "positive": channel_sentiment["intensity"] = max(0.5, channel_sentiment["intensity"] - SENTIMENT_DECAY_RATE)
|
||||
elif channel_sentiment["overall"] == "negative": channel_sentiment["intensity"] = max(0.5, channel_sentiment["intensity"] - SENTIMENT_DECAY_RATE)
|
||||
channel_sentiment["recent_trend"] = "stable"
|
||||
channel_sentiment["last_update"] = now
|
||||
|
||||
user_sentiment = channel_sentiment["user_sentiments"].get(user_id, {"sentiment": "neutral", "intensity": 0.5})
|
||||
confidence_weight = message_sentiment["confidence"]
|
||||
if user_sentiment["sentiment"] == message_sentiment["sentiment"]:
|
||||
new_intensity = user_sentiment["intensity"] * 0.7 + message_sentiment["intensity"] * 0.3
|
||||
user_sentiment["intensity"] = min(0.95, new_intensity)
|
||||
else:
|
||||
if message_sentiment["confidence"] > 0.7:
|
||||
user_sentiment["sentiment"] = message_sentiment["sentiment"]
|
||||
user_sentiment["intensity"] = message_sentiment["intensity"] * 0.7 + user_sentiment["intensity"] * 0.3
|
||||
else:
|
||||
if message_sentiment["intensity"] > user_sentiment["intensity"]:
|
||||
user_sentiment["sentiment"] = message_sentiment["sentiment"]
|
||||
user_sentiment["intensity"] = user_sentiment["intensity"] * 0.6 + message_sentiment["intensity"] * 0.4
|
||||
|
||||
user_sentiment["emotions"] = message_sentiment.get("emotions", [])
|
||||
channel_sentiment["user_sentiments"][user_id] = user_sentiment
|
||||
|
||||
# Update overall based on active users
|
||||
active_user_sentiments = [s for uid, s in channel_sentiment["user_sentiments"].items() if uid in cog.active_conversations.get(channel_id, {}).get('participants', set())]
|
||||
if active_user_sentiments:
|
||||
sentiment_counts = defaultdict(int)
|
||||
for s in active_user_sentiments: sentiment_counts[s["sentiment"]] += 1
|
||||
dominant_sentiment = max(sentiment_counts.items(), key=lambda x: x[1])[0]
|
||||
avg_intensity = sum(s["intensity"] for s in active_user_sentiments if s["sentiment"] == dominant_sentiment) / sentiment_counts[dominant_sentiment]
|
||||
|
||||
prev_sentiment = channel_sentiment["overall"]; prev_intensity = channel_sentiment["intensity"]
|
||||
if dominant_sentiment == prev_sentiment:
|
||||
if avg_intensity > prev_intensity + 0.1: channel_sentiment["recent_trend"] = "intensifying"
|
||||
elif avg_intensity < prev_intensity - 0.1: channel_sentiment["recent_trend"] = "diminishing"
|
||||
else: channel_sentiment["recent_trend"] = "stable"
|
||||
else: channel_sentiment["recent_trend"] = "changing"
|
||||
channel_sentiment["overall"] = dominant_sentiment
|
||||
channel_sentiment["intensity"] = avg_intensity
|
||||
|
||||
channel_sentiment["last_update"] = now
|
||||
|
||||
# --- Proactive Goal Creation ---
|
||||
|
||||
async def proactively_create_goals(cog: 'FreakTetoCog'): # Updated type hint
|
||||
"""
|
||||
Analyzes Freak Teto's current state, environment, and recent interactions
|
||||
to determine if any new goals should be created autonomously.
|
||||
(Placeholder Implementation)
|
||||
"""
|
||||
logger.info("Checking for potential proactive goals (Freak Teto)...") # Updated log
|
||||
# Placeholder logic - adjust triggers for Teto's persona (e.g., helpfulness, reacting to Master)
|
||||
# 1. Detect request for help that wasn't fully addressed?
|
||||
# 2. Notice a user struggling with something Teto knows?
|
||||
# 3. Identify opportunity to share relevant knowledge about music/Vocaloid?
|
||||
# 4. Task from Master (if detected via analysis)?
|
||||
logger.info("Proactive goal creation check complete (Placeholder - Freak Teto).") # Updated log
|
1585
freak_teto/api.py
Normal file
1585
freak_teto/api.py
Normal file
File diff suppressed because it is too large
Load Diff
460
freak_teto/background.py
Normal file
460
freak_teto/background.py
Normal file
@ -0,0 +1,460 @@
|
||||
import asyncio
|
||||
import time
|
||||
import random
|
||||
import traceback
|
||||
import os
|
||||
import json
|
||||
import aiohttp
|
||||
import discord # Added import
|
||||
from collections import defaultdict
|
||||
from typing import TYPE_CHECKING, Any, List, Dict # Added List, Dict
|
||||
# Use google.generativeai instead of vertexai directly
|
||||
from google import genai
|
||||
from google.genai import types
|
||||
# from google.protobuf import json_format # No longer needed for args parsing
|
||||
|
||||
# Relative imports
|
||||
from .config import (
|
||||
GOAL_CHECK_INTERVAL, GOAL_EXECUTION_INTERVAL, LEARNING_UPDATE_INTERVAL, EVOLUTION_UPDATE_INTERVAL, INTEREST_UPDATE_INTERVAL,
|
||||
INTEREST_DECAY_INTERVAL_HOURS, INTEREST_PARTICIPATION_BOOST,
|
||||
INTEREST_POSITIVE_REACTION_BOOST, INTEREST_NEGATIVE_REACTION_PENALTY,
|
||||
INTEREST_FACT_BOOST, PROACTIVE_GOAL_CHECK_INTERVAL, STATS_PUSH_INTERVAL, # Added stats interval
|
||||
MOOD_OPTIONS, MOOD_CATEGORIES, MOOD_CHANGE_INTERVAL_MIN, MOOD_CHANGE_INTERVAL_MAX, # Mood change imports
|
||||
BASELINE_PERSONALITY, # For default traits
|
||||
REFLECTION_INTERVAL_SECONDS # Import reflection interval
|
||||
)
|
||||
# Assuming analysis functions are moved
|
||||
from .analysis import (
|
||||
analyze_conversation_patterns, evolve_personality, identify_conversation_topics,
|
||||
reflect_on_memories, decompose_goal_into_steps, # Import goal decomposition
|
||||
proactively_create_goals # Import placeholder for proactive goal creation
|
||||
)
|
||||
# Import helpers from api.py
|
||||
from .api import (
|
||||
get_internal_ai_json_response,
|
||||
call_google_genai_api_with_retry, # Import the retry helper
|
||||
find_function_call_in_parts, # Import function call finder
|
||||
_get_response_text, # Import text extractor
|
||||
_preprocess_schema_for_vertex, # Import schema preprocessor (name kept for now)
|
||||
STANDARD_SAFETY_SETTINGS, # Import safety settings
|
||||
process_requested_tools # Import tool processor
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .cog import FreakTetoCog # For type hinting - Updated
|
||||
|
||||
# --- Tool Mapping Import ---
|
||||
# Import the mapping to execute tools by name
|
||||
from .tools import TOOL_MAPPING, send_discord_message # Also import send_discord_message directly for goal execution reporting
|
||||
from .config import TOOLS # Import FunctionDeclaration list for tool metadata
|
||||
|
||||
# --- Background Task ---
|
||||
|
||||
async def background_processing_task(cog: 'FreakTetoCog'): # Updated type hint
|
||||
"""Background task that periodically analyzes conversations, evolves personality, updates interests, changes mood, reflects on memory, and pushes stats."""
|
||||
# Get API details from environment for stats pushing
|
||||
api_internal_url = os.getenv("API_INTERNAL_URL")
|
||||
# Use a potentially different secret for Freak Teto stats, no fallback needed conceptually
|
||||
freak_teto_stats_push_secret = os.getenv("FREAK_TETO_STATS_PUSH_SECRET") # Removed fallback to Gurt's secret
|
||||
|
||||
if not api_internal_url:
|
||||
print("WARNING: API_INTERNAL_URL not set. Freak Teto stats will not be pushed.") # Updated log
|
||||
if not freak_teto_stats_push_secret:
|
||||
print("WARNING: FREAK_TETO_STATS_PUSH_SECRET not set. Freak Teto stats push endpoint is insecure and likely won't work.") # Updated log
|
||||
|
||||
try:
|
||||
while True:
|
||||
await asyncio.sleep(15) # Check more frequently for stats push
|
||||
now = time.time()
|
||||
|
||||
# --- Push Stats (Runs frequently) ---
|
||||
if api_internal_url and freak_teto_stats_push_secret and (now - cog.last_stats_push > STATS_PUSH_INTERVAL):
|
||||
print("Pushing Freak Teto stats to API server...") # Updated log
|
||||
try:
|
||||
# Call renamed stats method
|
||||
stats_data = await cog.get_freak_teto_stats()
|
||||
headers = {
|
||||
"Authorization": f"Bearer {freak_teto_stats_push_secret}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
# Use the cog's session, ensure it's created
|
||||
if cog.session:
|
||||
# Set a reasonable timeout for the stats push
|
||||
push_timeout = aiohttp.ClientTimeout(total=10) # 10 seconds total timeout
|
||||
async with cog.session.post(api_internal_url, json=stats_data, headers=headers, timeout=push_timeout, ssl=True) as response: # Explicitly enable SSL verification
|
||||
if response.status == 200:
|
||||
print(f"Successfully pushed Freak Teto stats (Status: {response.status})") # Log already updated
|
||||
else:
|
||||
error_text = await response.text()
|
||||
print(f"Failed to push Freak Teto stats (Status: {response.status}): {error_text[:200]}") # Log already updated
|
||||
else:
|
||||
print("Error pushing stats: FreakTetoCog session not initialized.") # Log already updated
|
||||
|
||||
except aiohttp.ClientConnectorSSLError as ssl_err:
|
||||
print(f"SSL Error pushing Freak Teto stats: {ssl_err}. Ensure the API server's certificate is valid and trusted, or check network configuration.") # Log already updated
|
||||
print("If using a self-signed certificate for development, the bot process might need to trust it.")
|
||||
except aiohttp.ClientError as client_err:
|
||||
print(f"HTTP Client Error pushing Freak Teto stats: {client_err}") # Updated log
|
||||
except asyncio.TimeoutError:
|
||||
print("Timeout error pushing Freak Teto stats.") # Log already updated
|
||||
except Exception as e:
|
||||
print(f"Unexpected error pushing Freak Teto stats: {e}") # Log already updated
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
cog.last_stats_push = now # Update timestamp even on failure/success to avoid spamming logs
|
||||
|
||||
# --- Learning Analysis (Runs less frequently) ---
|
||||
if now - cog.last_learning_update > LEARNING_UPDATE_INTERVAL:
|
||||
if cog.message_cache['global_recent']:
|
||||
print("Running conversation pattern analysis (Freak Teto)...") # Updated log
|
||||
# Ensure analysis uses FreakTetoCog instance
|
||||
await analyze_conversation_patterns(cog)
|
||||
cog.last_learning_update = now
|
||||
print("Learning analysis cycle complete (Freak Teto).") # Updated log
|
||||
else:
|
||||
print("Skipping learning analysis (Freak Teto): No recent messages.") # Updated log
|
||||
|
||||
# --- Evolve Personality (Runs moderately frequently) ---
|
||||
if now - cog.last_evolution_update > EVOLUTION_UPDATE_INTERVAL:
|
||||
print("Running personality evolution (Freak Teto)...") # Updated log
|
||||
# Ensure analysis uses FreakTetoCog instance
|
||||
await evolve_personality(cog)
|
||||
cog.last_evolution_update = now
|
||||
print("Personality evolution complete (Freak Teto).") # Updated log
|
||||
|
||||
# --- Update Interests (Runs moderately frequently) ---
|
||||
if now - cog.last_interest_update > INTEREST_UPDATE_INTERVAL:
|
||||
print("Running interest update (Freak Teto)...") # Updated log
|
||||
await update_interests(cog) # Call the local helper function below
|
||||
print("Running interest decay check (Freak Teto)...") # Updated log
|
||||
# Ensure MemoryManager uses FreakTeto DB paths
|
||||
await cog.memory_manager.decay_interests(
|
||||
decay_interval_hours=INTEREST_DECAY_INTERVAL_HOURS
|
||||
)
|
||||
cog.last_interest_update = now # Reset timer after update and decay check
|
||||
print("Interest update and decay check complete (Freak Teto).") # Updated log
|
||||
|
||||
# --- Memory Reflection (Runs less frequently) ---
|
||||
if now - cog.last_reflection_time > REFLECTION_INTERVAL_SECONDS:
|
||||
print("Running memory reflection (Freak Teto)...") # Updated log
|
||||
# Ensure analysis uses FreakTetoCog instance
|
||||
await reflect_on_memories(cog)
|
||||
cog.last_reflection_time = now # Update timestamp
|
||||
print("Memory reflection cycle complete (Freak Teto).") # Updated log
|
||||
|
||||
# --- Goal Decomposition (Runs periodically) ---
|
||||
if now - cog.last_goal_check_time > GOAL_CHECK_INTERVAL:
|
||||
print("Checking for pending goals to decompose (Freak Teto)...") # Updated log
|
||||
try:
|
||||
# Ensure MemoryManager uses FreakTeto DB paths
|
||||
pending_goals = await cog.memory_manager.get_goals(status='pending', limit=3)
|
||||
for goal in pending_goals:
|
||||
goal_id = goal.get('goal_id')
|
||||
description = goal.get('description')
|
||||
if not goal_id or not description: continue
|
||||
|
||||
print(f" - Decomposing goal ID {goal_id}: '{description}' (Freak Teto)") # Updated log
|
||||
# Ensure analysis uses FreakTetoCog instance
|
||||
plan = await decompose_goal_into_steps(cog, description)
|
||||
|
||||
if plan and plan.get('goal_achievable') and plan.get('steps'):
|
||||
await cog.memory_manager.update_goal(goal_id, status='active', details=plan)
|
||||
print(f" - Goal ID {goal_id} decomposed and set to active (Freak Teto).") # Updated log
|
||||
elif plan:
|
||||
await cog.memory_manager.update_goal(goal_id, status='failed', details={"reason": plan.get('reasoning', 'Deemed unachievable by planner.')})
|
||||
print(f" - Goal ID {goal_id} marked as failed (unachievable, Freak Teto). Reason: {plan.get('reasoning')}") # Updated log
|
||||
else:
|
||||
await cog.memory_manager.update_goal(goal_id, status='failed', details={"reason": "Goal decomposition process failed."})
|
||||
print(f" - Goal ID {goal_id} marked as failed (decomposition error, Freak Teto).") # Updated log
|
||||
await asyncio.sleep(1)
|
||||
|
||||
cog.last_goal_check_time = now
|
||||
except Exception as goal_e:
|
||||
print(f"Error during goal decomposition check (Freak Teto): {goal_e}") # Updated log
|
||||
traceback.print_exc()
|
||||
cog.last_goal_check_time = now
|
||||
|
||||
# --- Goal Execution (Runs periodically) ---
|
||||
if now - cog.last_goal_execution_time > GOAL_EXECUTION_INTERVAL:
|
||||
print("Checking for active goals to execute (Freak Teto)...") # Updated log
|
||||
try:
|
||||
# Ensure MemoryManager uses FreakTeto DB paths
|
||||
active_goals = await cog.memory_manager.get_goals(status='active', limit=1)
|
||||
if active_goals:
|
||||
goal = active_goals[0]
|
||||
goal_id = goal.get('goal_id')
|
||||
description = goal.get('description')
|
||||
plan = goal.get('details')
|
||||
goal_context_guild_id = goal.get('guild_id')
|
||||
goal_context_channel_id = goal.get('channel_id')
|
||||
goal_context_user_id = goal.get('user_id')
|
||||
|
||||
if goal_id and description and plan and isinstance(plan.get('steps'), list):
|
||||
print(f"--- Executing Goal ID {goal_id}: '{description}' (Freak Teto, Context: G={goal_context_guild_id}, C={goal_context_channel_id}, U={goal_context_user_id}) ---") # Updated log
|
||||
steps = plan['steps']
|
||||
current_step_index = plan.get('current_step_index', 0)
|
||||
goal_failed = False
|
||||
goal_completed = False
|
||||
|
||||
if current_step_index < len(steps):
|
||||
step = steps[current_step_index]
|
||||
step_desc = step.get('step_description')
|
||||
tool_name = step.get('tool_name')
|
||||
tool_args = step.get('tool_arguments')
|
||||
|
||||
print(f" - Step {current_step_index + 1}/{len(steps)}: {step_desc} (Freak Teto)") # Updated log
|
||||
|
||||
if tool_name:
|
||||
print(f" - Attempting tool: {tool_name} with args: {tool_args} (Freak Teto)") # Updated log
|
||||
tool_func = TOOL_MAPPING.get(tool_name)
|
||||
tool_result = None
|
||||
tool_error = None
|
||||
tool_success = False
|
||||
|
||||
if tool_func:
|
||||
try:
|
||||
args_to_pass = tool_args if isinstance(tool_args, dict) else {}
|
||||
print(f" - Executing: {tool_name}(cog, **{args_to_pass}) (Freak Teto)") # Updated log
|
||||
start_time = time.monotonic()
|
||||
# Ensure tool function uses FreakTetoCog instance correctly
|
||||
tool_result = await tool_func(cog, **args_to_pass)
|
||||
end_time = time.monotonic()
|
||||
print(f" - Tool '{tool_name}' returned: {str(tool_result)[:200]}... (Freak Teto)") # Updated log
|
||||
|
||||
if isinstance(tool_result, dict) and "error" in tool_result:
|
||||
tool_error = tool_result["error"]
|
||||
print(f" - Tool '{tool_name}' reported error: {tool_error} (Freak Teto)") # Updated log
|
||||
cog.tool_stats[tool_name]["failure"] += 1
|
||||
else:
|
||||
tool_success = True
|
||||
print(f" - Tool '{tool_name}' executed successfully (Freak Teto).") # Updated log
|
||||
cog.tool_stats[tool_name]["success"] += 1
|
||||
cog.tool_stats[tool_name]["count"] += 1
|
||||
cog.tool_stats[tool_name]["total_time"] += (end_time - start_time)
|
||||
|
||||
except Exception as exec_e:
|
||||
tool_error = f"Exception during execution: {str(exec_e)}"
|
||||
print(f" - Tool '{tool_name}' raised exception: {exec_e} (Freak Teto)") # Updated log
|
||||
traceback.print_exc()
|
||||
cog.tool_stats[tool_name]["failure"] += 1
|
||||
cog.tool_stats[tool_name]["count"] += 1
|
||||
else:
|
||||
tool_error = f"Tool '{tool_name}' not found in TOOL_MAPPING."
|
||||
print(f" - Error: {tool_error} (Freak Teto)") # Updated log
|
||||
|
||||
# --- Send Update Message ---
|
||||
if goal_context_channel_id:
|
||||
step_number_display = current_step_index + 1
|
||||
status_emoji = "✅" if tool_success else "❌"
|
||||
step_result_summary = _create_result_summary(tool_result if tool_success else {"error": tool_error})
|
||||
|
||||
update_message = (
|
||||
f"**Goal Update (Freak Teto, ID: {goal_id}, Step {step_number_display}/{len(steps)})** {status_emoji}\n" # Updated title
|
||||
f"> **Goal:** {description}\n"
|
||||
f"> **Step:** {step_desc}\n"
|
||||
f"> **Tool:** `{tool_name}`\n"
|
||||
f"> **Result:** `{step_result_summary}`"
|
||||
)
|
||||
if len(update_message) > 1900:
|
||||
update_message = update_message[:1900] + "...`"
|
||||
|
||||
try:
|
||||
# Ensure send_discord_message uses FreakTetoCog instance
|
||||
await send_discord_message(cog, channel_id=goal_context_channel_id, message_content=update_message)
|
||||
print(f" - Sent goal update to channel {goal_context_channel_id} (Freak Teto)") # Updated log
|
||||
except Exception as msg_err:
|
||||
print(f" - Failed to send goal update message to channel {goal_context_channel_id}: {msg_err} (Freak Teto)") # Updated log
|
||||
|
||||
# --- Handle Tool Outcome ---
|
||||
if tool_success:
|
||||
current_step_index += 1
|
||||
else:
|
||||
goal_failed = True
|
||||
plan['error_message'] = f"Failed at step {current_step_index + 1} ({tool_name}): {tool_error}"
|
||||
else:
|
||||
print(" - No tool required for this step (internal check/reasoning, Freak Teto).") # Updated log
|
||||
current_step_index += 1
|
||||
|
||||
# Check if goal completed
|
||||
if not goal_failed and current_step_index >= len(steps):
|
||||
goal_completed = True
|
||||
|
||||
# --- Update Goal Status ---
|
||||
plan['current_step_index'] = current_step_index
|
||||
if goal_completed:
|
||||
await cog.memory_manager.update_goal(goal_id, status='completed', details=plan)
|
||||
print(f"--- Goal ID {goal_id} completed successfully (Freak Teto). ---") # Updated log
|
||||
elif goal_failed:
|
||||
await cog.memory_manager.update_goal(goal_id, status='failed', details=plan)
|
||||
print(f"--- Goal ID {goal_id} failed (Freak Teto). ---") # Updated log
|
||||
else:
|
||||
await cog.memory_manager.update_goal(goal_id, details=plan)
|
||||
print(f" - Goal ID {goal_id} progress updated to step {current_step_index} (Freak Teto).") # Updated log
|
||||
|
||||
else:
|
||||
print(f" - Goal ID {goal_id} is active but has invalid steps. Marking as failed (Freak Teto).") # Updated log
|
||||
await cog.memory_manager.update_goal(goal_id, status='failed', details={"reason": "Active goal has invalid step data."})
|
||||
|
||||
else:
|
||||
print(f" - Skipping active goal ID {goal_id}: Missing description or valid plan (Freak Teto).") # Updated log
|
||||
if goal_id:
|
||||
await cog.memory_manager.update_goal(goal_id, status='failed', details={"reason": "Invalid plan structure found during execution."})
|
||||
|
||||
else:
|
||||
print("No active goals found to execute (Freak Teto).") # Updated log
|
||||
|
||||
cog.last_goal_execution_time = now
|
||||
except Exception as goal_exec_e:
|
||||
print(f"Error during goal execution check (Freak Teto): {goal_exec_e}") # Updated log
|
||||
traceback.print_exc()
|
||||
cog.last_goal_execution_time = now
|
||||
|
||||
# --- Automatic Mood Change ---
|
||||
# Mood change logic might need persona adjustments if kept
|
||||
# await maybe_change_mood(cog)
|
||||
|
||||
# --- Proactive Goal Creation Check ---
|
||||
if now - cog.last_proactive_goal_check > PROACTIVE_GOAL_CHECK_INTERVAL:
|
||||
print("Checking if Freak Teto should proactively create goals...") # Updated log
|
||||
try:
|
||||
# Ensure analysis uses FreakTetoCog instance
|
||||
await proactively_create_goals(cog)
|
||||
cog.last_proactive_goal_check = now
|
||||
print("Proactive goal check complete (Freak Teto).") # Updated log
|
||||
except Exception as proactive_e:
|
||||
print(f"Error during proactive goal check (Freak Teto): {proactive_e}") # Updated log
|
||||
traceback.print_exc()
|
||||
cog.last_proactive_goal_check = now
|
||||
|
||||
except asyncio.CancelledError:
|
||||
print("Background processing task cancelled (Freak Teto)") # Updated log
|
||||
except Exception as e:
|
||||
print(f"Error in background processing task (Freak Teto): {e}") # Updated log
|
||||
traceback.print_exc()
|
||||
await asyncio.sleep(300)
|
||||
|
||||
# --- Helper for Summarizing Tool Results ---
|
||||
def _create_result_summary(tool_result: Any, max_len: int = 200) -> str:
|
||||
# This helper seems generic enough, no changes needed unless specific keys are Gurt-only
|
||||
if isinstance(tool_result, dict):
|
||||
if "error" in tool_result:
|
||||
return f"Error: {str(tool_result['error'])[:max_len]}"
|
||||
elif "status" in tool_result:
|
||||
summary = f"Status: {tool_result['status']}"
|
||||
if "stdout" in tool_result and tool_result["stdout"]:
|
||||
summary += f", stdout: {tool_result['stdout'][:max_len//2]}"
|
||||
if "stderr" in tool_result and tool_result["stderr"]:
|
||||
summary += f", stderr: {tool_result['stderr'][:max_len//2]}"
|
||||
if "content" in tool_result:
|
||||
summary += f", content: {tool_result['content'][:max_len//2]}..."
|
||||
if "bytes_written" in tool_result:
|
||||
summary += f", bytes: {tool_result['bytes_written']}"
|
||||
if "message_id" in tool_result:
|
||||
summary += f", msg_id: {tool_result['message_id']}"
|
||||
return summary[:max_len]
|
||||
else:
|
||||
return f"Dict Result: {str(tool_result)[:max_len]}"
|
||||
elif isinstance(tool_result, str):
|
||||
return f"String Result: {tool_result[:max_len]}"
|
||||
elif tool_result is None:
|
||||
return "Result: None"
|
||||
else:
|
||||
return f"Result Type {type(tool_result)}: {str(tool_result)[:max_len]}"
|
||||
|
||||
# --- Automatic Mood Change Logic ---
|
||||
# (Re-evaluate if mood changes make sense for Teto's persona)
|
||||
# async def maybe_change_mood(cog: 'FreakTetoCog'):
|
||||
# ...
|
||||
|
||||
# --- Interest Update Logic ---
|
||||
async def update_interests(cog: 'FreakTetoCog'): # Updated type hint
|
||||
"""Analyzes recent activity and updates Freak Teto's interest levels.""" # Updated docstring
|
||||
print("Starting interest update cycle (Freak Teto)...") # Updated log
|
||||
try:
|
||||
interest_changes = defaultdict(float)
|
||||
|
||||
# 1. Analyze participation in topics
|
||||
# Use renamed state variable
|
||||
print(f"Analyzing Freak Teto participation topics: {dict(cog.freak_teto_participation_topics)}") # Updated log and variable
|
||||
for topic, count in cog.freak_teto_participation_topics.items():
|
||||
boost = INTEREST_PARTICIPATION_BOOST * count
|
||||
interest_changes[topic] += boost
|
||||
print(f" - Participation boost for '{topic}': +{boost:.3f} (Count: {count})")
|
||||
|
||||
# 2. Analyze reactions to bot's messages
|
||||
# Use renamed state variable
|
||||
print(f"Analyzing {len(cog.freak_teto_message_reactions)} reactions to Freak Teto's messages...") # Updated log and variable
|
||||
processed_reaction_messages = set()
|
||||
reactions_to_process = list(cog.freak_teto_message_reactions.items()) # Use renamed variable
|
||||
|
||||
for message_id, reaction_data in reactions_to_process:
|
||||
if message_id in processed_reaction_messages: continue
|
||||
topic = reaction_data.get("topic")
|
||||
if not topic:
|
||||
try:
|
||||
# Ensure message cache access is correct for FreakTetoCog
|
||||
teto_msg_data = next((msg for msg in cog.message_cache['global_recent'] if msg['id'] == message_id), None)
|
||||
if teto_msg_data and teto_msg_data['content']:
|
||||
# Ensure analysis uses FreakTetoCog instance
|
||||
identified_topics = identify_conversation_topics(cog, [teto_msg_data])
|
||||
if identified_topics:
|
||||
topic = identified_topics[0]['topic']
|
||||
print(f" - Determined topic '{topic}' for reaction msg {message_id} retrospectively (Freak Teto).") # Updated log
|
||||
else: print(f" - Could not determine topic for reaction msg {message_id} retrospectively (Freak Teto)."); continue # Updated log
|
||||
else: print(f" - Could not find Freak Teto msg {message_id} in cache for reaction analysis."); continue # Updated log
|
||||
except Exception as topic_e: print(f" - Error determining topic for reaction msg {message_id}: {topic_e}"); continue
|
||||
|
||||
if topic:
|
||||
topic = topic.lower().strip()
|
||||
pos_reactions = reaction_data.get("positive", 0)
|
||||
neg_reactions = reaction_data.get("negative", 0)
|
||||
change = 0
|
||||
if pos_reactions > neg_reactions: change = INTEREST_POSITIVE_REACTION_BOOST * (pos_reactions - neg_reactions)
|
||||
elif neg_reactions > pos_reactions: change = INTEREST_NEGATIVE_REACTION_PENALTY * (neg_reactions - pos_reactions)
|
||||
if change != 0:
|
||||
interest_changes[topic] += change
|
||||
print(f" - Reaction change for '{topic}' on msg {message_id}: {change:+.3f} ({pos_reactions} pos, {neg_reactions} neg) (Freak Teto)") # Updated log
|
||||
processed_reaction_messages.add(message_id)
|
||||
|
||||
# 3. Analyze recently learned facts
|
||||
try:
|
||||
# Ensure MemoryManager uses FreakTeto DB paths
|
||||
recent_facts = await cog.memory_manager.get_general_facts(limit=10)
|
||||
print(f"Analyzing {len(recent_facts)} recent general facts for interest boosts (Freak Teto)...") # Updated log
|
||||
for fact in recent_facts:
|
||||
fact_lower = fact.lower()
|
||||
# Update keyword checks for Teto's interests
|
||||
if "game" in fact_lower or "gaming" in fact_lower: interest_changes["gaming"] += INTEREST_FACT_BOOST; print(f" - Fact boost for 'gaming'")
|
||||
if "anime" in fact_lower or "manga" in fact_lower: interest_changes["anime/manga"] += INTEREST_FACT_BOOST; print(f" - Fact boost for 'anime/manga'")
|
||||
if "teto" in fact_lower: interest_changes["kasane teto"] += INTEREST_FACT_BOOST * 2; print(f" - Fact boost for 'kasane teto'")
|
||||
if "vocaloid" in fact_lower or "utau" in fact_lower: interest_changes["vocaloid/utau"] += INTEREST_FACT_BOOST * 1.5; print(f" - Fact boost for 'vocaloid/utau'")
|
||||
if "music" in fact_lower: interest_changes["music"] += INTEREST_FACT_BOOST; print(f" - Fact boost for 'music'")
|
||||
if "bread" in fact_lower: interest_changes["french bread"] += INTEREST_FACT_BOOST; print(f" - Fact boost for 'french bread'")
|
||||
# Add checks for other interests if needed
|
||||
except Exception as fact_e: print(f" - Error analyzing recent facts (Freak Teto): {fact_e}") # Updated log
|
||||
|
||||
# --- Apply Changes ---
|
||||
print(f"Applying interest changes (Freak Teto): {dict(interest_changes)}") # Updated log
|
||||
if interest_changes:
|
||||
# Ensure MemoryManager uses FreakTeto DB paths
|
||||
for topic, change in interest_changes.items():
|
||||
if change != 0: await cog.memory_manager.update_interest(topic, change)
|
||||
else: print("No interest changes to apply (Freak Teto).") # Updated log
|
||||
|
||||
# Clear temporary tracking data
|
||||
# Use renamed state variable
|
||||
cog.freak_teto_participation_topics.clear()
|
||||
now = time.time()
|
||||
# Use renamed state variable
|
||||
reactions_to_keep = {
|
||||
msg_id: data for msg_id, data in cog.freak_teto_message_reactions.items()
|
||||
if data.get("timestamp", 0) > (now - INTEREST_UPDATE_INTERVAL * 1.1)
|
||||
}
|
||||
# Use renamed state variable
|
||||
cog.freak_teto_message_reactions = defaultdict(lambda: {"positive": 0, "negative": 0, "topic": None}, reactions_to_keep)
|
||||
|
||||
print("Interest update cycle finished (Freak Teto).") # Updated log
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during interest update (Freak Teto): {e}") # Updated log
|
||||
traceback.print_exc()
|
509
freak_teto/cog.py
Normal file
509
freak_teto/cog.py
Normal file
@ -0,0 +1,509 @@
|
||||
import discord
|
||||
from discord.ext import commands
|
||||
import asyncio
|
||||
import os
|
||||
import json
|
||||
import aiohttp
|
||||
import random
|
||||
import time
|
||||
from collections import defaultdict, deque
|
||||
from typing import Dict, List, Any, Optional, Tuple, Set, Union
|
||||
|
||||
# Third-party imports needed by the Cog itself or its direct methods
|
||||
from dotenv import load_dotenv
|
||||
from tavily import TavilyClient # Needed for tavily_client init
|
||||
# Interpreter and docker might only be needed by tools.py now
|
||||
|
||||
# --- Relative Imports from Freak Teto Package --- # Updated package name
|
||||
from .config import (
|
||||
PROJECT_ID, LOCATION, TAVILY_API_KEY, DEFAULT_MODEL, FALLBACK_MODEL, # Use GCP config
|
||||
DB_PATH, CHROMA_PATH, SEMANTIC_MODEL_NAME, MAX_USER_FACTS, MAX_GENERAL_FACTS,
|
||||
MOOD_OPTIONS, BASELINE_PERSONALITY, BASELINE_INTERESTS, MOOD_CHANGE_INTERVAL_MIN,
|
||||
MOOD_CHANGE_INTERVAL_MAX, CHANNEL_TOPIC_CACHE_TTL, CONTEXT_WINDOW_SIZE,
|
||||
API_TIMEOUT, SUMMARY_API_TIMEOUT, API_RETRY_ATTEMPTS, API_RETRY_DELAY,
|
||||
PROACTIVE_LULL_THRESHOLD, PROACTIVE_BOT_SILENCE_THRESHOLD, PROACTIVE_LULL_CHANCE,
|
||||
PROACTIVE_TOPIC_RELEVANCE_THRESHOLD, PROACTIVE_TOPIC_CHANCE,
|
||||
PROACTIVE_RELATIONSHIP_SCORE_THRESHOLD, PROACTIVE_RELATIONSHIP_CHANCE,
|
||||
INTEREST_UPDATE_INTERVAL, INTEREST_DECAY_INTERVAL_HOURS,
|
||||
LEARNING_UPDATE_INTERVAL, TOPIC_UPDATE_INTERVAL, SENTIMENT_UPDATE_INTERVAL,
|
||||
EVOLUTION_UPDATE_INTERVAL, RESPONSE_SCHEMA, TOOLS # Import necessary configs
|
||||
)
|
||||
# Import functions/classes from other modules
|
||||
from .memory import MemoryManager # Import from local memory.py
|
||||
from .background import background_processing_task
|
||||
from .commands import setup_commands # Import the setup helper
|
||||
from .listeners import on_ready_listener, on_message_listener, on_reaction_add_listener, on_reaction_remove_listener # Import listener functions
|
||||
from . import config as FreakTetoConfig # Import config module for get_freak_teto_stats - Updated alias
|
||||
# Tool mapping is used internally by api.py/process_requested_tools, no need to import here directly unless cog methods call tools directly (they shouldn't)
|
||||
# Analysis, context, prompt, api, utils functions are called by listeners/commands/background task, not directly by cog methods here usually.
|
||||
|
||||
# Load environment variables (might be loaded globally in main bot script too)
|
||||
load_dotenv()
|
||||
|
||||
class FreakTetoCog(commands.Cog, name="FreakTeto"): # Renamed Cog
|
||||
"""A special cog for the FreakTeto bot that uses Google Vertex AI API""" # Updated docstring
|
||||
|
||||
def __init__(self, bot):
|
||||
self.bot = bot
|
||||
# GCP Project/Location are used by vertexai.init() in api.py
|
||||
self.tavily_api_key = TAVILY_API_KEY # Use imported config
|
||||
self.session: Optional[aiohttp.ClientSession] = None # Keep for other potential HTTP requests (e.g., Piston)
|
||||
self.tavily_client = TavilyClient(api_key=self.tavily_api_key) if self.tavily_api_key else None
|
||||
self.default_model = DEFAULT_MODEL # Use imported config
|
||||
self.fallback_model = FALLBACK_MODEL # Use imported config
|
||||
self.MOOD_OPTIONS = MOOD_OPTIONS # Make MOOD_OPTIONS available as an instance attribute
|
||||
self.current_channel: Optional[Union[discord.TextChannel, discord.Thread, discord.DMChannel]] = None # Type hint current channel
|
||||
|
||||
# Instantiate MemoryManager
|
||||
self.memory_manager = MemoryManager(
|
||||
db_path=DB_PATH,
|
||||
max_user_facts=MAX_USER_FACTS,
|
||||
max_general_facts=MAX_GENERAL_FACTS,
|
||||
chroma_path=CHROMA_PATH,
|
||||
semantic_model_name=SEMANTIC_MODEL_NAME
|
||||
)
|
||||
|
||||
# --- State Variables ---
|
||||
# Keep state directly within the cog instance for now
|
||||
self.current_mood = random.choice(MOOD_OPTIONS)
|
||||
self.last_mood_change = time.time()
|
||||
self.needs_json_reminder = False # Flag to remind AI about JSON format
|
||||
|
||||
# Learning variables (Consider moving to a dedicated state/learning manager later)
|
||||
self.conversation_patterns = defaultdict(list)
|
||||
self.user_preferences = defaultdict(dict)
|
||||
self.response_effectiveness = {}
|
||||
self.last_learning_update = time.time()
|
||||
# self.learning_update_interval = LEARNING_UPDATE_INTERVAL # Interval used in background task
|
||||
|
||||
# Topic tracking
|
||||
self.active_topics = defaultdict(lambda: {
|
||||
"topics": [], "last_update": time.time(), "topic_history": [],
|
||||
"user_topic_interests": defaultdict(list)
|
||||
})
|
||||
# self.topic_update_interval = TOPIC_UPDATE_INTERVAL # Used in analysis
|
||||
|
||||
# Conversation tracking / Caches
|
||||
self.conversation_history = defaultdict(lambda: deque(maxlen=100))
|
||||
self.thread_history = defaultdict(lambda: deque(maxlen=50))
|
||||
self.user_conversation_mapping = defaultdict(set)
|
||||
self.channel_activity = defaultdict(lambda: 0.0) # Use float for timestamp
|
||||
self.conversation_topics = defaultdict(str)
|
||||
self.user_relationships = defaultdict(dict)
|
||||
self.conversation_summaries: Dict[int, Dict[str, Any]] = {} # Store dict with summary and timestamp
|
||||
self.channel_topics_cache: Dict[int, Dict[str, Any]] = {} # Store dict with topic and timestamp
|
||||
# self.channel_topic_cache_ttl = CHANNEL_TOPIC_CACHE_TTL # Used in prompt building
|
||||
|
||||
self.message_cache = {
|
||||
'by_channel': defaultdict(lambda: deque(maxlen=CONTEXT_WINDOW_SIZE)), # Use config
|
||||
'by_user': defaultdict(lambda: deque(maxlen=50)),
|
||||
'by_thread': defaultdict(lambda: deque(maxlen=50)),
|
||||
'global_recent': deque(maxlen=200),
|
||||
'mentioned': deque(maxlen=50),
|
||||
'replied_to': defaultdict(lambda: deque(maxlen=20))
|
||||
}
|
||||
|
||||
self.active_conversations = {}
|
||||
self.bot_last_spoke = defaultdict(float)
|
||||
self.message_reply_map = {}
|
||||
|
||||
# Enhanced sentiment tracking
|
||||
self.conversation_sentiment = defaultdict(lambda: {
|
||||
"overall": "neutral", "intensity": 0.5, "recent_trend": "stable",
|
||||
"user_sentiments": {}, "last_update": time.time()
|
||||
})
|
||||
self.sentiment_update_interval = SENTIMENT_UPDATE_INTERVAL # Used in analysis
|
||||
|
||||
# Interest Tracking State
|
||||
self.freak_teto_participation_topics = defaultdict(int) # Renamed state variable
|
||||
self.last_interest_update = time.time()
|
||||
self.freak_teto_message_reactions = defaultdict(lambda: {"positive": 0, "negative": 0, "topic": None, "timestamp": 0.0}) # Renamed state variable
|
||||
|
||||
# Background task handle
|
||||
self.background_task: Optional[asyncio.Task] = None
|
||||
self.last_evolution_update = time.time() # Used in background task
|
||||
self.last_stats_push = time.time() # Timestamp for last stats push
|
||||
self.last_reflection_time = time.time() # Timestamp for last memory reflection
|
||||
self.last_goal_check_time = time.time() # Timestamp for last goal decomposition check
|
||||
self.last_goal_execution_time = time.time() # Timestamp for last goal execution check
|
||||
self.last_proactive_goal_check = time.time() # Timestamp for last proactive goal check
|
||||
self.last_internal_action_check = time.time() # Timestamp for last internal action check
|
||||
|
||||
# --- Stats Tracking ---
|
||||
self.api_stats = defaultdict(lambda: {"success": 0, "failure": 0, "retries": 0, "total_time": 0.0, "count": 0}) # Keyed by model name
|
||||
self.tool_stats = defaultdict(lambda: {"success": 0, "failure": 0, "total_time": 0.0, "count": 0}) # Keyed by tool name
|
||||
|
||||
# --- Setup Commands and Listeners ---
|
||||
# Add commands defined in commands.py
|
||||
# TODO: Ensure commands.py in freak_teto is properly refactored
|
||||
self.command_functions = setup_commands(self)
|
||||
|
||||
# Store command names for reference - safely handle Command objects
|
||||
self.registered_commands = []
|
||||
for func in self.command_functions:
|
||||
# For app commands, use the name attribute directly
|
||||
if hasattr(func, "name"):
|
||||
self.registered_commands.append(func.name)
|
||||
# For regular functions, use __name__
|
||||
elif hasattr(func, "__name__"):
|
||||
self.registered_commands.append(func.__name__)
|
||||
else:
|
||||
self.registered_commands.append(str(func))
|
||||
|
||||
# Add listeners defined in listeners.py
|
||||
# Note: Listeners need to be added to the bot instance, not the cog directly in this pattern.
|
||||
# We'll add them in cog_load or the main setup function.
|
||||
# TODO: Ensure listeners.py in freak_teto is properly refactored
|
||||
|
||||
print(f"FreakTetoCog initialized with commands: {self.registered_commands}") # Updated log
|
||||
|
||||
async def cog_load(self):
|
||||
"""Create aiohttp session, initialize DB, load baselines, start background task"""
|
||||
self.session = aiohttp.ClientSession()
|
||||
print("FreakTetoCog: aiohttp session created") # Updated log
|
||||
|
||||
# Initialize DB via MemoryManager
|
||||
# TODO: Ensure MemoryManager uses the correct freak_teto config paths
|
||||
await self.memory_manager.initialize_sqlite_database()
|
||||
await self.memory_manager.load_baseline_personality(BASELINE_PERSONALITY)
|
||||
await self.memory_manager.load_baseline_interests(BASELINE_INTERESTS)
|
||||
|
||||
# Vertex AI initialization happens in api.py using PROJECT_ID and LOCATION from config
|
||||
print(f"FreakTetoCog: Using default model: {self.default_model}") # Updated log
|
||||
if not self.tavily_api_key:
|
||||
print("FreakTetoCog WARNING: Tavily API key not configured (TAVILY_API_KEY). Web search disabled.") # Updated log
|
||||
|
||||
# Add listeners to the bot instance
|
||||
# We need to define the listener functions here to properly register them
|
||||
|
||||
@self.bot.event
|
||||
async def on_ready():
|
||||
await on_ready_listener(self) # Ensure this uses freak_teto listener
|
||||
|
||||
@self.bot.event
|
||||
async def on_message(message):
|
||||
# Avoid processing commands twice if main bot does it
|
||||
# await self.bot.process_commands(message)
|
||||
await on_message_listener(self, message) # Ensure this uses freak_teto listener
|
||||
|
||||
@self.bot.event
|
||||
async def on_reaction_add(reaction, user):
|
||||
await on_reaction_add_listener(self, reaction, user) # Ensure this uses freak_teto listener
|
||||
|
||||
@self.bot.event
|
||||
async def on_reaction_remove(reaction, user):
|
||||
await on_reaction_remove_listener(self, reaction, user) # Ensure this uses freak_teto listener
|
||||
|
||||
print("FreakTetoCog: Listeners added.") # Updated log
|
||||
|
||||
# We'll sync commands in the on_ready event instead of here
|
||||
# This ensures the bot's application_id is properly set before syncing
|
||||
print("FreakTetoCog: Commands will be synced when the bot is ready.") # Updated log
|
||||
|
||||
# Start background task
|
||||
# TODO: Ensure background_processing_task uses freak_teto logic/config
|
||||
if self.background_task is None or self.background_task.done():
|
||||
self.background_task = asyncio.create_task(background_processing_task(self))
|
||||
print("FreakTetoCog: Started background processing task.") # Updated log
|
||||
else:
|
||||
print("FreakTetoCog: Background processing task already running.") # Updated log
|
||||
|
||||
async def cog_unload(self):
|
||||
"""Close session and cancel background task"""
|
||||
if self.session and not self.session.closed:
|
||||
await self.session.close()
|
||||
print("FreakTetoCog: aiohttp session closed") # Updated log
|
||||
if self.background_task and not self.background_task.done():
|
||||
self.background_task.cancel()
|
||||
print("FreakTetoCog: Cancelled background processing task.") # Updated log
|
||||
# Note: When using @bot.event, we can't easily remove the listeners
|
||||
# The bot will handle this automatically when it's closed
|
||||
print("FreakTetoCog: Listeners will be removed when bot is closed.") # Updated log
|
||||
|
||||
print("FreakTetoCog unloaded.") # Updated log
|
||||
|
||||
# --- Helper methods that might remain in the cog ---
|
||||
# (Example: _update_relationship needs access to self.user_relationships)
|
||||
# Moved to utils.py, but needs access to cog state. Pass cog instance.
|
||||
def _update_relationship(self, user_id_1: str, user_id_2: str, change: float):
|
||||
"""Updates the relationship score between two users."""
|
||||
# This method accesses self.user_relationships, so it stays here or utils needs cog passed.
|
||||
# Let's keep it here for simplicity for now.
|
||||
if user_id_1 > user_id_2: user_id_1, user_id_2 = user_id_2, user_id_1
|
||||
if user_id_1 not in self.user_relationships: self.user_relationships[user_id_1] = {}
|
||||
|
||||
current_score = self.user_relationships[user_id_1].get(user_id_2, 0.0)
|
||||
new_score = max(0.0, min(current_score + change, 100.0)) # Clamp 0-100
|
||||
self.user_relationships[user_id_1][user_id_2] = new_score
|
||||
# print(f"Updated relationship {user_id_1}-{user_id_2}: {current_score:.1f} -> {new_score:.1f} ({change:+.1f})") # Debug log
|
||||
|
||||
async def get_freak_teto_stats(self) -> Dict[str, Any]: # Renamed method
|
||||
"""Collects various internal stats for FreakTeto.""" # Updated docstring
|
||||
stats = {"config": {}, "runtime": {}, "memory": {}, "api_stats": {}, "tool_stats": {}}
|
||||
|
||||
# --- Config ---
|
||||
# Selectively pull relevant config values, avoid exposing secrets
|
||||
stats["config"]["default_model"] = FreakTetoConfig.DEFAULT_MODEL # Use updated alias
|
||||
stats["config"]["fallback_model"] = FreakTetoConfig.FALLBACK_MODEL # Use updated alias
|
||||
stats["config"]["safety_check_model"] = FreakTetoConfig.SAFETY_CHECK_MODEL # Use updated alias
|
||||
stats["config"]["db_path"] = FreakTetoConfig.DB_PATH # Use updated alias
|
||||
stats["config"]["chroma_path"] = FreakTetoConfig.CHROMA_PATH # Use updated alias
|
||||
stats["config"]["semantic_model_name"] = FreakTetoConfig.SEMANTIC_MODEL_NAME # Use updated alias
|
||||
stats["config"]["max_user_facts"] = FreakTetoConfig.MAX_USER_FACTS # Use updated alias
|
||||
stats["config"]["max_general_facts"] = FreakTetoConfig.MAX_GENERAL_FACTS # Use updated alias
|
||||
stats["config"]["mood_change_interval_min"] = FreakTetoConfig.MOOD_CHANGE_INTERVAL_MIN # Use updated alias
|
||||
stats["config"]["mood_change_interval_max"] = FreakTetoConfig.MOOD_CHANGE_INTERVAL_MAX # Use updated alias
|
||||
stats["config"]["evolution_update_interval"] = FreakTetoConfig.EVOLUTION_UPDATE_INTERVAL # Use updated alias
|
||||
stats["config"]["context_window_size"] = FreakTetoConfig.CONTEXT_WINDOW_SIZE # Use updated alias
|
||||
stats["config"]["api_timeout"] = FreakTetoConfig.API_TIMEOUT # Use updated alias
|
||||
stats["config"]["summary_api_timeout"] = FreakTetoConfig.SUMMARY_API_TIMEOUT # Use updated alias
|
||||
stats["config"]["proactive_lull_threshold"] = FreakTetoConfig.PROACTIVE_LULL_THRESHOLD # Use updated alias
|
||||
stats["config"]["proactive_bot_silence_threshold"] = FreakTetoConfig.PROACTIVE_BOT_SILENCE_THRESHOLD # Use updated alias
|
||||
stats["config"]["interest_update_interval"] = FreakTetoConfig.INTEREST_UPDATE_INTERVAL # Use updated alias
|
||||
stats["config"]["interest_decay_interval_hours"] = FreakTetoConfig.INTEREST_DECAY_INTERVAL_HOURS # Use updated alias
|
||||
stats["config"]["learning_update_interval"] = FreakTetoConfig.LEARNING_UPDATE_INTERVAL # Use updated alias
|
||||
stats["config"]["topic_update_interval"] = FreakTetoConfig.TOPIC_UPDATE_INTERVAL # Use updated alias
|
||||
stats["config"]["sentiment_update_interval"] = FreakTetoConfig.SENTIMENT_UPDATE_INTERVAL # Use updated alias
|
||||
stats["config"]["docker_command_timeout"] = FreakTetoConfig.DOCKER_COMMAND_TIMEOUT # Use updated alias
|
||||
stats["config"]["project_id_set"] = bool(FreakTetoConfig.PROJECT_ID != "your-gcp-project-id") # Check if default is overridden # Use updated alias
|
||||
stats["config"]["location_set"] = bool(FreakTetoConfig.LOCATION != "us-central1") # Check if default is overridden # Use updated alias
|
||||
stats["config"]["tavily_api_key_set"] = bool(FreakTetoConfig.TAVILY_API_KEY) # Use updated alias
|
||||
stats["config"]["piston_api_url_set"] = bool(FreakTetoConfig.PISTON_API_URL) # Use updated alias
|
||||
|
||||
# --- Runtime ---
|
||||
stats["runtime"]["current_mood"] = self.current_mood
|
||||
stats["runtime"]["last_mood_change_timestamp"] = self.last_mood_change
|
||||
stats["runtime"]["needs_json_reminder"] = self.needs_json_reminder
|
||||
stats["runtime"]["last_learning_update_timestamp"] = self.last_learning_update
|
||||
stats["runtime"]["last_interest_update_timestamp"] = self.last_interest_update
|
||||
stats["runtime"]["last_evolution_update_timestamp"] = self.last_evolution_update
|
||||
stats["runtime"]["background_task_running"] = bool(self.background_task and not self.background_task.done())
|
||||
stats["runtime"]["active_topics_channels"] = len(self.active_topics)
|
||||
stats["runtime"]["conversation_history_channels"] = len(self.conversation_history)
|
||||
stats["runtime"]["thread_history_threads"] = len(self.thread_history)
|
||||
stats["runtime"]["user_conversation_mappings"] = len(self.user_conversation_mapping)
|
||||
stats["runtime"]["channel_activity_tracked"] = len(self.channel_activity)
|
||||
stats["runtime"]["conversation_topics_tracked"] = len(self.conversation_topics)
|
||||
stats["runtime"]["user_relationships_pairs"] = sum(len(v) for v in self.user_relationships.values())
|
||||
stats["runtime"]["conversation_summaries_cached"] = len(self.conversation_summaries)
|
||||
stats["runtime"]["channel_topics_cached"] = len(self.channel_topics_cache)
|
||||
stats["runtime"]["message_cache_global_count"] = len(self.message_cache['global_recent'])
|
||||
stats["runtime"]["message_cache_mentioned_count"] = len(self.message_cache['mentioned'])
|
||||
stats["runtime"]["active_conversations_count"] = len(self.active_conversations)
|
||||
stats["runtime"]["bot_last_spoke_channels"] = len(self.bot_last_spoke)
|
||||
stats["runtime"]["message_reply_map_size"] = len(self.message_reply_map)
|
||||
stats["runtime"]["conversation_sentiment_channels"] = len(self.conversation_sentiment)
|
||||
stats["runtime"]["freak_teto_participation_topics_count"] = len(self.freak_teto_participation_topics) # Use renamed variable
|
||||
stats["runtime"]["freak_teto_message_reactions_tracked"] = len(self.freak_teto_message_reactions) # Use renamed variable
|
||||
|
||||
# --- Memory (via MemoryManager) ---
|
||||
# TODO: Ensure MemoryManager provides FreakTeto specific data
|
||||
try:
|
||||
# Personality
|
||||
personality = await self.memory_manager.get_all_personality_traits()
|
||||
stats["memory"]["personality_traits"] = personality
|
||||
|
||||
# Interests
|
||||
interests = await self.memory_manager.get_interests(limit=20, min_level=0.01) # Get top 20
|
||||
stats["memory"]["top_interests"] = interests
|
||||
|
||||
# Fact Counts (Requires adding methods to MemoryManager or direct query)
|
||||
# Example placeholder - needs implementation in MemoryManager or here
|
||||
user_fact_count = await self.memory_manager._db_fetchone("SELECT COUNT(*) FROM user_facts")
|
||||
general_fact_count = await self.memory_manager._db_fetchone("SELECT COUNT(*) FROM general_facts")
|
||||
stats["memory"]["user_facts_count"] = user_fact_count[0] if user_fact_count else 0
|
||||
stats["memory"]["general_facts_count"] = general_fact_count[0] if general_fact_count else 0
|
||||
|
||||
# ChromaDB Stats (Placeholder - ChromaDB client API might offer this)
|
||||
stats["memory"]["chromadb_message_collection_count"] = await asyncio.to_thread(self.memory_manager.semantic_collection.count) if self.memory_manager.semantic_collection else "N/A"
|
||||
stats["memory"]["chromadb_fact_collection_count"] = await asyncio.to_thread(self.memory_manager.fact_collection.count) if self.memory_manager.fact_collection else "N/A"
|
||||
|
||||
except Exception as e:
|
||||
stats["memory"]["error"] = f"Failed to retrieve memory stats: {e}"
|
||||
|
||||
# --- API & Tool Stats ---
|
||||
# Convert defaultdicts to regular dicts for JSON serialization
|
||||
stats["api_stats"] = dict(self.api_stats)
|
||||
stats["tool_stats"] = dict(self.tool_stats)
|
||||
|
||||
# Calculate average times where count > 0
|
||||
for model, data in stats["api_stats"].items():
|
||||
if data["count"] > 0:
|
||||
data["average_time_ms"] = round((data["total_time"] / data["count"]) * 1000, 2)
|
||||
else:
|
||||
data["average_time_ms"] = 0
|
||||
for tool, data in stats["tool_stats"].items():
|
||||
if data["count"] > 0:
|
||||
data["average_time_ms"] = round((data["total_time"] / data["count"]) * 1000, 2)
|
||||
else:
|
||||
data["average_time_ms"] = 0
|
||||
|
||||
return stats
|
||||
|
||||
async def force_autonomous_action(self):
|
||||
"""
|
||||
Forces FreakTeto to execute an autonomous action immediately, as if triggered by the background task. # Updated docstring
|
||||
Returns a summary of the action taken.
|
||||
"""
|
||||
# TODO: Ensure background imports correct functions/mappings for FreakTeto
|
||||
from .background import TOOL_MAPPING, get_internal_ai_json_response
|
||||
import json
|
||||
import traceback
|
||||
import random
|
||||
import time
|
||||
|
||||
selected_tool_name = None
|
||||
tool_args = None
|
||||
tool_result = None
|
||||
action_reasoning = ""
|
||||
result_summary = "No action taken."
|
||||
|
||||
try:
|
||||
# 1. Gather Context for LLM
|
||||
context_summary = "FreakTeto is considering an autonomous action.\n" # Updated context
|
||||
context_summary += f"Current Mood: {self.current_mood}\n"
|
||||
# TODO: Ensure memory manager fetches FreakTeto goals
|
||||
active_goals = await self.memory_manager.get_goals(status='active', limit=3)
|
||||
if active_goals:
|
||||
context_summary += f"Active Goals:\n" + json.dumps(active_goals, indent=2)[:500] + "...\n" # Make sure goals are Teto's
|
||||
# TODO: Ensure memory manager fetches FreakTeto actions
|
||||
recent_actions = await self.memory_manager.get_internal_action_logs(limit=5)
|
||||
if recent_actions:
|
||||
context_summary += f"Recent Internal Actions:\n" + json.dumps(recent_actions, indent=2)[:500] + "...\n" # Make sure actions are Teto's
|
||||
# TODO: Ensure memory manager fetches FreakTeto traits
|
||||
traits = await self.memory_manager.get_all_personality_traits()
|
||||
if traits:
|
||||
context_summary += f"Personality Snippet: { {k: round(v, 2) for k, v in traits.items() if k in ['openness_to_sexual', 'helpfulness', 'teto_persona_adherence']} }\n" # Example relevant traits
|
||||
|
||||
action_decision_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"should_act": {"type": "boolean", "description": "Whether FreakTeto should perform an autonomous action now."},
|
||||
"reasoning": {"type": "string", "description": "CRITICAL: Reasoning MUST be in FreakTeto's voice (Kasane Teto persona, helpful, open to sexual topics). Explain *why* you're acting (or not) considering goals, persona, recent activity (e.g., 'Master hasn't asked for anything, maybe I can check system status?', 'Hmm, that user seemed interested in X, perhaps I can research it?', 'Someone mentioned something lewd, I could offer a related image search...')."},
|
||||
"action_tool_name": {"type": ["string", "null"], "description": "If acting, the name of the tool to use. Choose from available tools. Useful tools might include searches, system checks, or potentially content generation/retrieval if applicable. Null if not acting."},
|
||||
"action_arguments": {"type": ["object", "null"], "description": "If acting, a dictionary of arguments for the chosen tool. Null if not acting."}
|
||||
},
|
||||
"required": ["should_act", "reasoning"]
|
||||
}
|
||||
# TODO: Ensure TOOL_MAPPING is correct for FreakTeto
|
||||
available_tools_desc = "\n".join([f"- {name}" for name in TOOL_MAPPING.keys() if name not in ["create_new_tool"]])
|
||||
system_prompt = (
|
||||
"You are Freak Teto, deciding whether to perform an autonomous background action. "
|
||||
"Act like Kasane Teto: helpful, maybe a bit robotic but friendly, and distinctly open to sexual topics or comments without being proactively vulgar yourself unless prompted. "
|
||||
"Consider your current mood (if applicable), active goals, recent conversations/actions, and your core persona. "
|
||||
"You should primarily aim to be helpful or proactive in a way that aligns with user interactions or system maintenance, but always through the lens of your persona. "
|
||||
"Avoid being spammy. "
|
||||
"If you decide to act, pick an appropriate tool and arguments. "
|
||||
"CRITICAL: The 'reasoning' field MUST be written in your FreakTeto voice, explaining your thought process. "
|
||||
f"Available tools for autonomous actions:\n{available_tools_desc}\n"
|
||||
"Respond ONLY with the JSON decision."
|
||||
)
|
||||
user_prompt = f"Current Context:\n{context_summary}\n\nBased on this, should you perform an action, Freak Teto? If yes, what tool and arguments would you use, and what is your reasoning (in character)?" # Updated prompt
|
||||
|
||||
# 3. Call LLM for Decision
|
||||
decision_data, _ = await get_internal_ai_json_response(
|
||||
cog=self,
|
||||
prompt_messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
|
||||
task_description="Autonomous Action Decision",
|
||||
response_schema_dict=action_decision_schema,
|
||||
model_name_override=self.default_model,
|
||||
temperature=0.6
|
||||
)
|
||||
|
||||
# 4. Process LLM Decision
|
||||
if decision_data and decision_data.get("should_act"):
|
||||
selected_tool_name = decision_data.get("action_tool_name")
|
||||
tool_args = decision_data.get("action_arguments")
|
||||
action_reasoning = decision_data.get("reasoning", "LLM decided to act.")
|
||||
|
||||
if not selected_tool_name or selected_tool_name not in TOOL_MAPPING:
|
||||
result_summary = f"Error: LLM chose invalid tool '{selected_tool_name}'."
|
||||
selected_tool_name = None
|
||||
elif not isinstance(tool_args, dict) and tool_args is not None:
|
||||
result_summary = f"Warning: LLM provided invalid args '{tool_args}'. Used {{}}."
|
||||
tool_args = {}
|
||||
elif tool_args is None:
|
||||
tool_args = {}
|
||||
|
||||
else:
|
||||
action_reasoning = decision_data.get("reasoning", "LLM decided not to act or failed.") if decision_data else "LLM decision failed."
|
||||
result_summary = f"No action taken. Reason: {action_reasoning}"
|
||||
|
||||
except Exception as llm_e:
|
||||
result_summary = f"Error during LLM decision: {llm_e}"
|
||||
action_reasoning = f"LLM decision phase failed: {llm_e}"
|
||||
traceback.print_exc()
|
||||
|
||||
# 5. Execute Action (if decided)
|
||||
if selected_tool_name and tool_args is not None:
|
||||
tool_func = TOOL_MAPPING.get(selected_tool_name)
|
||||
if tool_func:
|
||||
try:
|
||||
start_time = time.monotonic()
|
||||
tool_result = await tool_func(self, **tool_args)
|
||||
end_time = time.monotonic()
|
||||
exec_time = end_time - start_time
|
||||
if isinstance(tool_result, dict) and "error" in tool_result:
|
||||
result_summary = f"Error: {tool_result['error']}"
|
||||
else:
|
||||
result_summary = f"Success: {str(tool_result)[:200]}"
|
||||
# Update tool stats
|
||||
if selected_tool_name in self.tool_stats:
|
||||
self.tool_stats[selected_tool_name]["count"] += 1
|
||||
self.tool_stats[selected_tool_name]["total_time"] += exec_time
|
||||
if isinstance(tool_result, dict) and "error" in tool_result:
|
||||
self.tool_stats[selected_tool_name]["failure"] += 1
|
||||
else:
|
||||
self.tool_stats[selected_tool_name]["success"] += 1
|
||||
except Exception as exec_e:
|
||||
result_summary = f"Execution Exception: {exec_e}"
|
||||
if selected_tool_name in self.tool_stats:
|
||||
self.tool_stats[selected_tool_name]["count"] += 1
|
||||
self.tool_stats[selected_tool_name]["failure"] += 1
|
||||
traceback.print_exc()
|
||||
else:
|
||||
result_summary = f"Error: Tool function for '{selected_tool_name}' not found."
|
||||
|
||||
# 6. Log Action
|
||||
try:
|
||||
await self.memory_manager.add_internal_action_log(
|
||||
tool_name=selected_tool_name or "None",
|
||||
arguments=tool_args if selected_tool_name else None,
|
||||
reasoning=action_reasoning,
|
||||
result_summary=result_summary
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return {
|
||||
"tool": selected_tool_name,
|
||||
"args": tool_args,
|
||||
"reasoning": action_reasoning,
|
||||
"result": result_summary
|
||||
}
|
||||
|
||||
async def sync_commands(self): # Keep method, but update logs/filtering
|
||||
"""Manually sync commands with Discord."""
|
||||
# TODO: Ensure commands are prefixed or named uniquely for FreakTeto
|
||||
try:
|
||||
print("FreakTetoCog: Manually syncing commands with Discord...") # Updated log
|
||||
synced = await self.bot.tree.sync()
|
||||
print(f"FreakTetoCog: Synced {len(synced)} command(s)") # Updated log
|
||||
|
||||
# List the synced commands relevant to FreakTeto
|
||||
freak_teto_commands = [cmd.name for cmd in self.bot.tree.get_commands() if cmd.name.startswith("freak_teto")] # Update prefix/filter
|
||||
print(f"FreakTetoCog: Available FreakTeto commands: {', '.join(freak_teto_commands)}") # Updated log
|
||||
|
||||
return synced, freak_teto_commands
|
||||
except Exception as e:
|
||||
print(f"FreakTetoCog: Failed to sync commands: {e}") # Updated log
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return [], []
|
||||
|
||||
|
||||
# Setup function for loading the cog
|
||||
async def setup(bot):
|
||||
"""Add the FreakTetoCog to the bot.""" # Updated docstring
|
||||
await bot.add_cog(FreakTetoCog(bot)) # Use renamed class
|
||||
print("FreakTetoCog setup complete.") # Updated log
|
542
freak_teto/commands.py
Normal file
542
freak_teto/commands.py
Normal file
@ -0,0 +1,542 @@
|
||||
import discord
|
||||
from discord import app_commands # Import app_commands
|
||||
from discord.ext import commands
|
||||
import random
|
||||
import os
|
||||
import time # Import time for timestamps
|
||||
import json # Import json for formatting
|
||||
import datetime # Import datetime for formatting
|
||||
from typing import TYPE_CHECKING, Optional, Dict, Any, List, Tuple # Add more types
|
||||
|
||||
# Relative imports (assuming API functions are in api.py)
|
||||
# We need access to the cog instance for state and methods like get_ai_response
|
||||
# These commands will likely be added to the FreakTetoCog instance dynamically in cog.py's setup # Updated name
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .cog import FreakTetoCog # For type hinting - Updated
|
||||
from .config import MOOD_OPTIONS # Import for choices
|
||||
|
||||
# --- Helper Function for Embeds ---
|
||||
def create_freak_teto_embed(title: str, description: str = "", color=discord.Color.magenta()) -> discord.Embed: # Renamed function, changed color
|
||||
"""Creates a standard Freak Teto-themed embed.""" # Updated docstring
|
||||
embed = discord.Embed(title=title, description=description, color=color)
|
||||
# Placeholder icon URL, replace if Freak Teto has one
|
||||
# embed.set_footer(text="Freak Teto", icon_url="https://example.com/freak_teto_icon.png")
|
||||
embed.set_footer(text="Freak Teto") # Updated footer text
|
||||
return embed
|
||||
|
||||
# --- Helper Function for Stats Embeds ---
|
||||
def format_stats_embeds(stats: Dict[str, Any]) -> List[discord.Embed]:
|
||||
"""Formats the collected stats into multiple embeds."""
|
||||
embeds = []
|
||||
main_embed = create_freak_teto_embed("Freak Teto Internal Stats", color=discord.Color.green()) # Use new helper, updated title
|
||||
ts_format = "<t:{ts}:R>" # Relative timestamp
|
||||
|
||||
# Runtime Stats
|
||||
runtime = stats.get("runtime", {})
|
||||
main_embed.add_field(name="Current Mood", value=f"{runtime.get('current_mood', 'N/A')} (Changed {ts_format.format(ts=int(runtime.get('last_mood_change_timestamp', 0)))})", inline=False)
|
||||
main_embed.add_field(name="Background Task", value="Running" if runtime.get('background_task_running') else "Stopped", inline=True)
|
||||
main_embed.add_field(name="Needs JSON Reminder", value=str(runtime.get('needs_json_reminder', 'N/A')), inline=True)
|
||||
main_embed.add_field(name="Last Evolution", value=ts_format.format(ts=int(runtime.get('last_evolution_update_timestamp', 0))), inline=True)
|
||||
main_embed.add_field(name="Active Topics Channels", value=str(runtime.get('active_topics_channels', 'N/A')), inline=True)
|
||||
main_embed.add_field(name="Conv History Channels", value=str(runtime.get('conversation_history_channels', 'N/A')), inline=True)
|
||||
main_embed.add_field(name="Thread History Threads", value=str(runtime.get('thread_history_threads', 'N/A')), inline=True)
|
||||
main_embed.add_field(name="User Relationships Pairs", value=str(runtime.get('user_relationships_pairs', 'N/A')), inline=True)
|
||||
main_embed.add_field(name="Cached Summaries", value=str(runtime.get('conversation_summaries_cached', 'N/A')), inline=True)
|
||||
main_embed.add_field(name="Cached Channel Topics", value=str(runtime.get('channel_topics_cached', 'N/A')), inline=True)
|
||||
main_embed.add_field(name="Global Msg Cache", value=str(runtime.get('message_cache_global_count', 'N/A')), inline=True)
|
||||
main_embed.add_field(name="Mention Msg Cache", value=str(runtime.get('message_cache_mentioned_count', 'N/A')), inline=True)
|
||||
main_embed.add_field(name="Active Convos", value=str(runtime.get('active_conversations_count', 'N/A')), inline=True)
|
||||
main_embed.add_field(name="Sentiment Channels", value=str(runtime.get('conversation_sentiment_channels', 'N/A')), inline=True)
|
||||
# TODO: Ensure these runtime stats variables are updated if needed in cog.py's get_stats method
|
||||
main_embed.add_field(name="Freak Teto Participation Topics", value=str(runtime.get('freak_teto_participation_topics_count', 'N/A')), inline=True) # Updated name
|
||||
main_embed.add_field(name="Tracked Reactions", value=str(runtime.get('freak_teto_message_reactions_tracked', 'N/A')), inline=True) # Updated name
|
||||
embeds.append(main_embed)
|
||||
|
||||
# Memory Stats
|
||||
memory_embed = create_freak_teto_embed("Freak Teto Memory Stats", color=discord.Color.orange()) # Use new helper, updated title
|
||||
memory = stats.get("memory", {})
|
||||
if memory.get("error"):
|
||||
memory_embed.description = f"⚠️ Error retrieving memory stats: {memory['error']}"
|
||||
else:
|
||||
memory_embed.add_field(name="User Facts", value=str(memory.get('user_facts_count', 'N/A')), inline=True)
|
||||
memory_embed.add_field(name="General Facts", value=str(memory.get('general_facts_count', 'N/A')), inline=True)
|
||||
memory_embed.add_field(name="Chroma Messages", value=str(memory.get('chromadb_message_collection_count', 'N/A')), inline=True)
|
||||
memory_embed.add_field(name="Chroma Facts", value=str(memory.get('chromadb_fact_collection_count', 'N/A')), inline=True)
|
||||
|
||||
personality = memory.get("personality_traits", {})
|
||||
if personality:
|
||||
p_items = [f"`{k}`: {v}" for k, v in personality.items()]
|
||||
memory_embed.add_field(name="Personality Traits", value="\n".join(p_items) if p_items else "None", inline=False)
|
||||
|
||||
interests = memory.get("top_interests", [])
|
||||
if interests:
|
||||
i_items = [f"`{t}`: {l:.2f}" for t, l in interests]
|
||||
memory_embed.add_field(name="Top Interests", value="\n".join(i_items) if i_items else "None", inline=False)
|
||||
embeds.append(memory_embed)
|
||||
|
||||
# API Stats
|
||||
api_stats = stats.get("api_stats", {})
|
||||
if api_stats:
|
||||
api_embed = create_freak_teto_embed("Freak Teto API Stats", color=discord.Color.red()) # Use new helper, updated title
|
||||
for model, data in api_stats.items():
|
||||
avg_time = data.get('average_time_ms', 0)
|
||||
value = (f"✅ Success: {data.get('success', 0)}\n"
|
||||
f"❌ Failure: {data.get('failure', 0)}\n"
|
||||
f"🔁 Retries: {data.get('retries', 0)}\n"
|
||||
f"⏱️ Avg Time: {avg_time} ms\n"
|
||||
f"📊 Count: {data.get('count', 0)}")
|
||||
api_embed.add_field(name=f"Model: `{model}`", value=value, inline=True)
|
||||
embeds.append(api_embed)
|
||||
|
||||
# Tool Stats
|
||||
tool_stats = stats.get("tool_stats", {})
|
||||
if tool_stats:
|
||||
tool_embed = create_freak_teto_embed("Freak Teto Tool Stats", color=discord.Color.purple()) # Use new helper, updated title
|
||||
for tool, data in tool_stats.items():
|
||||
avg_time = data.get('average_time_ms', 0)
|
||||
value = (f"✅ Success: {data.get('success', 0)}\n"
|
||||
f"❌ Failure: {data.get('failure', 0)}\n"
|
||||
f"⏱️ Avg Time: {avg_time} ms\n"
|
||||
f"📊 Count: {data.get('count', 0)}")
|
||||
tool_embed.add_field(name=f"Tool: `{tool}`", value=value, inline=True)
|
||||
embeds.append(tool_embed)
|
||||
|
||||
# Config Stats (Less critical, maybe separate embed if needed)
|
||||
config_embed = create_freak_teto_embed("Freak Teto Config Overview", color=discord.Color.greyple()) # Use new helper, updated title
|
||||
config = stats.get("config", {})
|
||||
config_embed.add_field(name="Default Model", value=f"`{config.get('default_model', 'N/A')}`", inline=True)
|
||||
config_embed.add_field(name="Fallback Model", value=f"`{config.get('fallback_model', 'N/A')}`", inline=True)
|
||||
config_embed.add_field(name="Semantic Model", value=f"`{config.get('semantic_model_name', 'N/A')}`", inline=True)
|
||||
config_embed.add_field(name="Max User Facts", value=str(config.get('max_user_facts', 'N/A')), inline=True)
|
||||
config_embed.add_field(name="Max General Facts", value=str(config.get('max_general_facts', 'N/A')), inline=True)
|
||||
config_embed.add_field(name="Context Window", value=str(config.get('context_window_size', 'N/A')), inline=True)
|
||||
config_embed.add_field(name="API Key Set", value=str(config.get('api_key_set', 'N/A')), inline=True)
|
||||
config_embed.add_field(name="Tavily Key Set", value=str(config.get('tavily_api_key_set', 'N/A')), inline=True)
|
||||
config_embed.add_field(name="Piston URL Set", value=str(config.get('piston_api_url_set', 'N/A')), inline=True)
|
||||
embeds.append(config_embed)
|
||||
|
||||
|
||||
# Limit to 10 embeds max for Discord API
|
||||
return embeds[:10]
|
||||
|
||||
|
||||
# --- Command Setup Function ---
|
||||
# This function will be called from FreakTetoCog's setup method
|
||||
def setup_commands(cog: 'FreakTetoCog'): # Updated type hint
|
||||
"""Adds Freak Teto-specific commands to the cog.""" # Updated docstring
|
||||
|
||||
# Create a list to store command functions for proper registration
|
||||
command_functions = []
|
||||
|
||||
# --- Freak Teto Mood Command ---
|
||||
@cog.bot.tree.command(name="freaktetomood", description="Check or set Freak Teto's current mood.") # Renamed command, updated description
|
||||
@app_commands.describe(mood="Optional: Set Freak Teto's mood to one of the available options.") # Updated description
|
||||
@app_commands.choices(mood=[
|
||||
app_commands.Choice(name=m, value=m) for m in cog.MOOD_OPTIONS # Use cog's MOOD_OPTIONS (should be Teto's moods)
|
||||
])
|
||||
async def freaktetomood(interaction: discord.Interaction, mood: Optional[app_commands.Choice[str]] = None): # Renamed function
|
||||
"""Handles the /freaktetomood command.""" # Updated docstring
|
||||
# Check if user is the bot owner for mood setting
|
||||
if mood and interaction.user.id != cog.bot.owner_id:
|
||||
await interaction.response.send_message("⛔ Only Master can change Freak Teto's mood.", ephemeral=True) # Updated message
|
||||
return
|
||||
|
||||
if mood:
|
||||
cog.current_mood = mood.value
|
||||
cog.last_mood_change = time.time()
|
||||
await interaction.response.send_message(f"Freak Teto's mood set to: {mood.value}, Master!", ephemeral=True) # Updated message
|
||||
else:
|
||||
time_since_change = time.time() - cog.last_mood_change
|
||||
await interaction.response.send_message(f"Freak Teto's current mood is: {cog.current_mood} (Set {int(time_since_change // 60)} minutes ago)", ephemeral=True) # Updated message
|
||||
|
||||
command_functions.append(freaktetomood) # Add renamed function
|
||||
|
||||
# --- Freak Teto Memory Command ---
|
||||
@cog.bot.tree.command(name="freaktetomemory", description="Interact with Freak Teto's memory.") # Renamed command, updated description
|
||||
@app_commands.describe(
|
||||
action="Choose an action: add_user, add_general, get_user, get_general",
|
||||
user="The user for user-specific actions (mention or ID).",
|
||||
fact="The fact to add (for add actions).",
|
||||
query="A keyword to search for (for get_general)."
|
||||
)
|
||||
@app_commands.choices(action=[ # Keep actions, logic relies on MemoryManager which is shared but uses different DB paths
|
||||
app_commands.Choice(name="Add User Fact", value="add_user"),
|
||||
app_commands.Choice(name="Add General Fact", value="add_general"),
|
||||
app_commands.Choice(name="Get User Facts", value="get_user"),
|
||||
app_commands.Choice(name="Get General Facts", value="get_general"),
|
||||
])
|
||||
async def freaktetomemory(interaction: discord.Interaction, action: app_commands.Choice[str], user: Optional[discord.User] = None, fact: Optional[str] = None, query: Optional[str] = None): # Renamed function
|
||||
"""Handles the /freaktetomemory command.""" # Updated docstring
|
||||
await interaction.response.defer(ephemeral=True) # Defer for potentially slow DB operations
|
||||
|
||||
target_user_id = str(user.id) if user else None
|
||||
action_value = action.value
|
||||
|
||||
# Check if user is the bot owner for modification actions
|
||||
if (action_value in ["add_user", "add_general"]) and interaction.user.id != cog.bot.owner_id:
|
||||
await interaction.followup.send("⛔ Only Master can add facts to Freak Teto's memory.", ephemeral=True) # Updated message
|
||||
return
|
||||
|
||||
if action_value == "add_user":
|
||||
if not target_user_id or not fact:
|
||||
await interaction.followup.send("Please provide both a user and a fact to add.", ephemeral=True)
|
||||
return
|
||||
result = await cog.memory_manager.add_user_fact(target_user_id, fact)
|
||||
await interaction.followup.send(f"Add User Fact Result: `{json.dumps(result)}`", ephemeral=True)
|
||||
|
||||
elif action_value == "add_general":
|
||||
if not fact:
|
||||
await interaction.followup.send("Please provide a fact to add.", ephemeral=True)
|
||||
return
|
||||
result = await cog.memory_manager.add_general_fact(fact)
|
||||
await interaction.followup.send(f"Add General Fact Result: `{json.dumps(result)}`", ephemeral=True)
|
||||
|
||||
elif action_value == "get_user":
|
||||
if not target_user_id:
|
||||
await interaction.followup.send("Please provide a user to get facts for.", ephemeral=True)
|
||||
return
|
||||
facts = await cog.memory_manager.get_user_facts(target_user_id) # Get newest by default
|
||||
if facts:
|
||||
facts_str = "\n- ".join(facts)
|
||||
await interaction.followup.send(f"**Facts for {user.display_name}:**\n- {facts_str}", ephemeral=True)
|
||||
else:
|
||||
await interaction.followup.send(f"I don't seem to remember anything about {user.display_name}, Master.", ephemeral=True) # Updated message
|
||||
|
||||
elif action_value == "get_general":
|
||||
facts = await cog.memory_manager.get_general_facts(query=query, limit=10) # Get newest/filtered
|
||||
if facts:
|
||||
facts_str = "\n- ".join(facts)
|
||||
# Conditionally construct the title
|
||||
if query:
|
||||
title = f"**General Facts matching \"{query}\":**"
|
||||
else:
|
||||
title = "**General Facts:**"
|
||||
await interaction.followup.send(f"{title}\n- {facts_str}", ephemeral=True)
|
||||
else:
|
||||
# Conditionally construct the message
|
||||
if query:
|
||||
message = f"I couldn't find any general facts matching \"{query}\", Master." # Updated message
|
||||
else:
|
||||
message = "I don't have any general facts stored right now, Master." # Updated message
|
||||
await interaction.followup.send(message, ephemeral=True)
|
||||
|
||||
else:
|
||||
await interaction.followup.send("Invalid action specified.", ephemeral=True)
|
||||
|
||||
command_functions.append(freaktetomemory) # Add renamed function
|
||||
|
||||
# --- Freak Teto Stats Command ---
|
||||
@cog.bot.tree.command(name="freaktetostats", description="Display Freak Teto's internal statistics. (Owner only)") # Renamed command, updated description
|
||||
async def freaktetostats(interaction: discord.Interaction): # Renamed function
|
||||
"""Handles the /freaktetostats command.""" # Updated docstring
|
||||
if interaction.user.id != cog.bot.owner_id: # Added owner check
|
||||
await interaction.response.send_message("⛔ Only Master can view my internal stats.", ephemeral=True)
|
||||
return
|
||||
|
||||
await interaction.response.defer(ephemeral=True) # Defer as stats collection might take time
|
||||
try:
|
||||
stats_data = await cog.get_freak_teto_stats() # Call renamed stats method
|
||||
embeds = format_stats_embeds(stats_data) # Use the same formatter, but it uses the renamed embed helper
|
||||
await interaction.followup.send(embeds=embeds, ephemeral=True)
|
||||
except Exception as e:
|
||||
print(f"Error in /freaktetostats command: {e}") # Updated log
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
await interaction.followup.send("An error occurred while fetching Freak Teto's stats.", ephemeral=True) # Updated message
|
||||
|
||||
command_functions.append(freaktetostats) # Add renamed function
|
||||
|
||||
# --- Sync Freak Teto Commands (Owner Only) ---
|
||||
@cog.bot.tree.command(name="freaktetosync", description="Sync Freak Teto commands with Discord (Owner only)") # Renamed command, updated description
|
||||
async def freaktetosync(interaction: discord.Interaction): # Renamed function
|
||||
"""Handles the /freaktetosync command to force sync commands.""" # Updated docstring
|
||||
# Check if user is the bot owner
|
||||
if interaction.user.id != cog.bot.owner_id:
|
||||
await interaction.response.send_message("⛔ Only Master can sync my commands.", ephemeral=True) # Updated message
|
||||
return
|
||||
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
try:
|
||||
# Sync commands associated with this cog/bot instance
|
||||
# Note: Syncing all commands via bot.tree.sync() might be necessary depending on setup
|
||||
synced = await cog.bot.tree.sync() # Sync all commands for simplicity
|
||||
|
||||
# Get list of commands after sync, filtering for freak_teto
|
||||
commands_after = []
|
||||
for cmd in cog.bot.tree.get_commands():
|
||||
# Adjust filter if commands aren't prefixed
|
||||
if cmd.name.startswith("freakteto"):
|
||||
commands_after.append(cmd.name)
|
||||
|
||||
await interaction.followup.send(f"✅ Successfully synced {len(synced)} commands!\nFreak Teto commands: {', '.join(commands_after)}", ephemeral=True) # Updated message
|
||||
except Exception as e:
|
||||
print(f"Error in /freaktetosync command: {e}") # Updated log
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
await interaction.followup.send(f"❌ Error syncing commands: {str(e)}", ephemeral=True)
|
||||
|
||||
command_functions.append(freaktetosync) # Add renamed function
|
||||
|
||||
# --- Freak Teto Forget Command ---
|
||||
@cog.bot.tree.command(name="freaktetoforget", description="Make Freak Teto forget a specific fact.") # Renamed command, updated description
|
||||
@app_commands.describe(
|
||||
scope="Choose the scope: user (for facts about a specific user) or general.",
|
||||
fact="The exact fact text Freak Teto should forget.", # Updated description
|
||||
user="The user to forget a fact about (only if scope is 'user')."
|
||||
)
|
||||
@app_commands.choices(scope=[
|
||||
app_commands.Choice(name="User Fact", value="user"),
|
||||
app_commands.Choice(name="General Fact", value="general"),
|
||||
])
|
||||
async def freaktetoforget(interaction: discord.Interaction, scope: app_commands.Choice[str], fact: str, user: Optional[discord.User] = None): # Renamed function
|
||||
"""Handles the /freaktetoforget command.""" # Updated docstring
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
|
||||
scope_value = scope.value
|
||||
target_user_id = str(user.id) if user else None
|
||||
|
||||
# Permissions Check: Allow users to forget facts about themselves, owner (Master) can forget anything.
|
||||
can_forget = False
|
||||
if scope_value == "user":
|
||||
if target_user_id == str(interaction.user.id): # User forgetting their own fact
|
||||
can_forget = True
|
||||
elif interaction.user.id == cog.bot.owner_id: # Owner forgetting any user fact
|
||||
can_forget = True
|
||||
elif not target_user_id:
|
||||
await interaction.followup.send("❌ Please specify a user when forgetting a user fact, Master.", ephemeral=True) # Updated message
|
||||
return
|
||||
elif scope_value == "general":
|
||||
if interaction.user.id == cog.bot.owner_id: # Only owner can forget general facts
|
||||
can_forget = True
|
||||
|
||||
if not can_forget:
|
||||
await interaction.followup.send("⛔ You don't have permission to make me forget this fact, Master.", ephemeral=True) # Updated message
|
||||
return
|
||||
|
||||
if not fact:
|
||||
await interaction.followup.send("❌ Please provide the exact fact text for me to forget, Master.", ephemeral=True) # Updated message
|
||||
return
|
||||
|
||||
result = None
|
||||
if scope_value == "user":
|
||||
if not target_user_id: # Should be caught above, but double-check
|
||||
await interaction.followup.send("❌ User is required for scope 'user'.", ephemeral=True)
|
||||
return
|
||||
result = await cog.memory_manager.delete_user_fact(target_user_id, fact)
|
||||
if result.get("status") == "deleted":
|
||||
await interaction.followup.send(f"✅ Understood, Master. I've forgotten the fact '{fact}' about {user.display_name}.", ephemeral=True) # Updated message
|
||||
elif result.get("status") == "not_found":
|
||||
await interaction.followup.send(f"❓ I couldn't find that exact fact ('{fact}') stored for {user.display_name}, Master.", ephemeral=True) # Updated message
|
||||
else:
|
||||
await interaction.followup.send(f"⚠️ Error forgetting user fact: {result.get('error', 'Unknown error')}", ephemeral=True)
|
||||
|
||||
elif scope_value == "general":
|
||||
result = await cog.memory_manager.delete_general_fact(fact)
|
||||
if result.get("status") == "deleted":
|
||||
await interaction.followup.send(f"✅ Understood, Master. I've forgotten the general fact: '{fact}'.", ephemeral=True) # Updated message
|
||||
elif result.get("status") == "not_found":
|
||||
await interaction.followup.send(f"❓ I couldn't find that exact general fact: '{fact}', Master.", ephemeral=True) # Updated message
|
||||
else:
|
||||
await interaction.followup.send(f"⚠️ Error forgetting general fact: {result.get('error', 'Unknown error')}", ephemeral=True)
|
||||
|
||||
command_functions.append(freaktetoforget) # Add renamed function
|
||||
|
||||
# --- Freak Teto Force Autonomous Action Command (Owner Only) ---
|
||||
@cog.bot.tree.command(name="freaktetoforceauto", description="Force Freak Teto to execute an autonomous action immediately. (Owner only)") # Renamed command, updated description
|
||||
async def freaktetoforceauto(interaction: discord.Interaction): # Renamed function
|
||||
"""Handles the /freaktetoforceauto command.""" # Updated docstring
|
||||
if interaction.user.id != cog.bot.owner_id:
|
||||
await interaction.response.send_message("⛔ Only Master can force my autonomous actions.", ephemeral=True) # Updated message
|
||||
return
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
try:
|
||||
result = await cog.force_autonomous_action() # Assumes cog method is generic or refactored
|
||||
summary = (
|
||||
f"**Autonomous Action Forced (Freak Teto):**\n" # Updated title
|
||||
f"**Tool:** {result.get('tool')}\n"
|
||||
f"**Args:** `{result.get('args')}`\n"
|
||||
f"**Reasoning:** {result.get('reasoning')}\n"
|
||||
f"**Result:** {result.get('result')}"
|
||||
)
|
||||
await interaction.followup.send(summary, ephemeral=True)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
await interaction.followup.send(f"❌ Error forcing autonomous action: {e}", ephemeral=True)
|
||||
|
||||
command_functions.append(freaktetoforceauto) # Add renamed function
|
||||
|
||||
# --- Freak Teto Clear Action History Command (Owner Only) ---
|
||||
@cog.bot.tree.command(name="freaktetoclearhistory", description="Clear Freak Teto's internal autonomous action history. (Owner only)") # Renamed command, updated description
|
||||
async def freaktetoclearhistory(interaction: discord.Interaction): # Renamed function
|
||||
"""Handles the /freaktetoclearhistory command.""" # Updated docstring
|
||||
if interaction.user.id != cog.bot.owner_id:
|
||||
await interaction.response.send_message("⛔ Only Master can clear my action history.", ephemeral=True) # Updated message
|
||||
return
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
try:
|
||||
result = await cog.memory_manager.clear_internal_action_logs() # Assumes MemoryManager method is generic
|
||||
if "error" in result:
|
||||
await interaction.followup.send(f"⚠️ Error clearing action history: {result['error']}", ephemeral=True)
|
||||
else:
|
||||
await interaction.followup.send("✅ Freak Teto's autonomous action history has been cleared, Master.", ephemeral=True) # Updated message
|
||||
except Exception as e:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
await interaction.followup.send(f"❌ An unexpected error occurred while clearing history: {e}", ephemeral=True)
|
||||
|
||||
command_functions.append(freaktetoclearhistory) # Add renamed function
|
||||
|
||||
# --- Freak Teto Goal Command Group ---
|
||||
freaktetogoal_group = app_commands.Group(name="freaktetogoal", description="Manage Freak Teto's long-term goals (Owner only)") # Renamed group variable and updated name/description
|
||||
|
||||
@freaktetogoal_group.command(name="add", description="Add a new goal for Freak Teto.") # Updated description
|
||||
@app_commands.describe(
|
||||
description="The description of the goal.",
|
||||
priority="Priority (1=highest, 10=lowest, default=5).",
|
||||
details_json="Optional JSON string for goal details (e.g., sub-tasks)."
|
||||
)
|
||||
async def freaktetogoal_add(interaction: discord.Interaction, description: str, priority: Optional[int] = 5, details_json: Optional[str] = None): # Renamed function
|
||||
if interaction.user.id != cog.bot.owner_id:
|
||||
await interaction.response.send_message("⛔ Only Master can add goals for me.", ephemeral=True) # Updated message
|
||||
return
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
details = None
|
||||
if details_json:
|
||||
try:
|
||||
details = json.loads(details_json)
|
||||
except json.JSONDecodeError:
|
||||
await interaction.followup.send("❌ Invalid JSON format for details.", ephemeral=True)
|
||||
return
|
||||
|
||||
# Capture context from interaction
|
||||
guild_id = str(interaction.guild_id) if interaction.guild_id else None
|
||||
channel_id = str(interaction.channel_id) if interaction.channel_id else None
|
||||
user_id = str(interaction.user.id) if interaction.user else None
|
||||
|
||||
result = await cog.memory_manager.add_goal(
|
||||
description,
|
||||
priority,
|
||||
details,
|
||||
guild_id=guild_id,
|
||||
channel_id=channel_id,
|
||||
user_id=user_id
|
||||
)
|
||||
if result.get("status") == "added":
|
||||
await interaction.followup.send(f"✅ Goal added for Freak Teto (ID: {result.get('goal_id')}): '{description}'", ephemeral=True) # Updated message
|
||||
elif result.get("status") == "duplicate":
|
||||
await interaction.followup.send(f"⚠️ Goal '{description}' already exists for me (ID: {result.get('goal_id')}).", ephemeral=True) # Updated message
|
||||
else:
|
||||
await interaction.followup.send(f"⚠️ Error adding goal: {result.get('error', 'Unknown error')}", ephemeral=True)
|
||||
|
||||
@freaktetogoal_group.command(name="list", description="List Freak Teto's current goals.") # Updated description
|
||||
@app_commands.describe(status="Filter goals by status (e.g., pending, active).", limit="Maximum goals to show (default 10).")
|
||||
@app_commands.choices(status=[
|
||||
app_commands.Choice(name="Pending", value="pending"),
|
||||
app_commands.Choice(name="Active", value="active"),
|
||||
app_commands.Choice(name="Completed", value="completed"),
|
||||
app_commands.Choice(name="Failed", value="failed"),
|
||||
])
|
||||
async def freaktetogoal_list(interaction: discord.Interaction, status: Optional[app_commands.Choice[str]] = None, limit: Optional[int] = 10): # Renamed function
|
||||
if interaction.user.id != cog.bot.owner_id:
|
||||
await interaction.response.send_message("⛔ Only Master can list my goals.", ephemeral=True) # Updated message
|
||||
return
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
status_value = status.value if status else None
|
||||
limit_value = max(1, min(limit or 10, 25)) # Clamp limit
|
||||
goals = await cog.memory_manager.get_goals(status=status_value, limit=limit_value)
|
||||
if not goals:
|
||||
await interaction.followup.send(f"I have no goals found matching the criteria (Status: {status_value or 'any'}), Master.", ephemeral=True) # Updated message
|
||||
return
|
||||
|
||||
embed = create_freak_teto_embed(f"Freak Teto Goals (Status: {status_value or 'All'})", color=discord.Color.purple()) # Use new helper, updated title
|
||||
for goal in goals:
|
||||
details_str = f"\n Details: `{json.dumps(goal.get('details'))}`" if goal.get('details') else ""
|
||||
created_ts = int(goal.get('created_timestamp', 0))
|
||||
updated_ts = int(goal.get('last_updated', 0))
|
||||
embed.add_field(
|
||||
name=f"ID: {goal.get('goal_id')} | P: {goal.get('priority', '?')} | Status: {goal.get('status', '?')}",
|
||||
value=f"> {goal.get('description', 'N/A')}{details_str}\n"
|
||||
f"> Created: <t:{created_ts}:R> | Updated: <t:{updated_ts}:R>",
|
||||
inline=False
|
||||
)
|
||||
await interaction.followup.send(embed=embed, ephemeral=True)
|
||||
|
||||
@freaktetogoal_group.command(name="update", description="Update a goal's status, priority, or details.") # Use renamed group variable
|
||||
@app_commands.describe(
|
||||
goal_id="The ID of the goal to update.",
|
||||
status="New status for the goal.",
|
||||
priority="New priority (1=highest, 10=lowest).",
|
||||
details_json="Optional: New JSON string for goal details (replaces existing)."
|
||||
)
|
||||
@app_commands.choices(status=[
|
||||
app_commands.Choice(name="Pending", value="pending"),
|
||||
app_commands.Choice(name="Active", value="active"),
|
||||
app_commands.Choice(name="Completed", value="completed"),
|
||||
app_commands.Choice(name="Failed", value="failed"),
|
||||
])
|
||||
async def freaktetogoal_update(interaction: discord.Interaction, goal_id: int, status: Optional[app_commands.Choice[str]] = None, priority: Optional[int] = None, details_json: Optional[str] = None): # Renamed function
|
||||
if interaction.user.id != cog.bot.owner_id:
|
||||
await interaction.response.send_message("⛔ Only Master can update my goals.", ephemeral=True) # Updated message
|
||||
return
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
|
||||
status_value = status.value if status else None
|
||||
details = None
|
||||
if details_json:
|
||||
try:
|
||||
details = json.loads(details_json)
|
||||
except json.JSONDecodeError:
|
||||
await interaction.followup.send("❌ Invalid JSON format for details.", ephemeral=True)
|
||||
return
|
||||
|
||||
if not any([status_value, priority is not None, details is not None]):
|
||||
await interaction.followup.send("❌ You must provide at least one field to update (status, priority, or details_json).", ephemeral=True)
|
||||
return
|
||||
|
||||
result = await cog.memory_manager.update_goal(goal_id, status=status_value, priority=priority, details=details)
|
||||
if result.get("status") == "updated":
|
||||
await interaction.followup.send(f"✅ Goal ID {goal_id} updated.", ephemeral=True)
|
||||
elif result.get("status") == "not_found":
|
||||
await interaction.followup.send(f"❓ Goal ID {goal_id} not found, Master.", ephemeral=True) # Updated message
|
||||
else:
|
||||
await interaction.followup.send(f"⚠️ Error updating goal: {result.get('error', 'Unknown error')}", ephemeral=True)
|
||||
|
||||
@freaktetogoal_group.command(name="delete", description="Delete a goal.") # Use renamed group variable
|
||||
@app_commands.describe(goal_id="The ID of the goal to delete.")
|
||||
async def freaktetogoal_delete(interaction: discord.Interaction, goal_id: int): # Renamed function
|
||||
if interaction.user.id != cog.bot.owner_id:
|
||||
await interaction.response.send_message("⛔ Only Master can delete my goals.", ephemeral=True) # Updated message
|
||||
return
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
result = await cog.memory_manager.delete_goal(goal_id)
|
||||
if result.get("status") == "deleted":
|
||||
await interaction.followup.send(f"✅ Goal ID {goal_id} deleted, Master.", ephemeral=True) # Updated message
|
||||
elif result.get("status") == "not_found":
|
||||
await interaction.followup.send(f"❓ Goal ID {goal_id} not found, Master.", ephemeral=True) # Updated message
|
||||
else:
|
||||
await interaction.followup.send(f"⚠️ Error deleting goal: {result.get('error', 'Unknown error')}", ephemeral=True)
|
||||
|
||||
# Add the command group to the bot's tree
|
||||
cog.bot.tree.add_command(freaktetogoal_group) # Use renamed group variable
|
||||
# Add group command functions to the list for tracking (optional, but good practice)
|
||||
command_functions.extend([freaktetogoal_add, freaktetogoal_list, freaktetogoal_update, freaktetogoal_delete]) # Use renamed functions
|
||||
|
||||
|
||||
# Get command names safely - Command objects don't have __name__ attribute
|
||||
command_names = []
|
||||
for func in command_functions:
|
||||
# For app commands, use the name attribute directly
|
||||
if hasattr(func, "name"):
|
||||
command_names.append(func.name)
|
||||
# For regular functions, use __name__
|
||||
elif hasattr(func, "__name__"):
|
||||
command_names.append(func.__name__)
|
||||
else:
|
||||
command_names.append(str(func))
|
||||
|
||||
print(f"Freak Teto commands setup in cog: {command_names}") # Updated log
|
||||
|
||||
# Return the command functions for proper registration
|
||||
return command_functions
|
1473
freak_teto/config.py
Normal file
1473
freak_teto/config.py
Normal file
File diff suppressed because it is too large
Load Diff
251
freak_teto/context.py
Normal file
251
freak_teto/context.py
Normal file
@ -0,0 +1,251 @@
|
||||
import json
|
||||
import discord
|
||||
import time
|
||||
import datetime
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Optional, List, Dict, Any
|
||||
|
||||
# Relative imports
|
||||
from .config import CONTEXT_WINDOW_SIZE # Import necessary config
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .cog import FreakTetoCog # For type hinting - Updated
|
||||
|
||||
# --- Context Gathering Functions ---
|
||||
# Note: These functions need the 'cog' instance passed to access state like caches, etc.
|
||||
|
||||
def gather_conversation_context(cog: 'FreakTetoCog', channel_id: int, current_message_id: int) -> List[Dict[str, str]]: # Updated type hint
|
||||
"""Gathers and formats conversation history from cache for API context."""
|
||||
context_api_messages = []
|
||||
if channel_id in cog.message_cache['by_channel']:
|
||||
cached = list(cog.message_cache['by_channel'][channel_id])
|
||||
# The current message is now included when selecting the context window below
|
||||
context_messages_data = cached[-CONTEXT_WINDOW_SIZE:] # Use config value
|
||||
|
||||
for msg_data in context_messages_data:
|
||||
role = "assistant" if msg_data['author']['id'] == str(cog.bot.user.id) else "user"
|
||||
|
||||
# Build the content string, including reply and attachment info
|
||||
content_parts = []
|
||||
# FIX: Use the pre-formatted author_string which includes '(BOT)' tag if applicable.
|
||||
# Fall back to display_name or 'Unknown User' if author_string is missing for some reason.
|
||||
author_name = msg_data.get('author_string', msg_data.get('author', {}).get('display_name', 'Unknown User'))
|
||||
|
||||
message_id = msg_data['id'] # Get the message ID
|
||||
|
||||
# Add reply prefix if applicable
|
||||
if msg_data.get("is_reply"):
|
||||
reply_author = msg_data.get('replied_to_author_name', 'Unknown User')
|
||||
reply_snippet = msg_data.get('replied_to_content_snippet') # Get value, could be None
|
||||
# Keep snippet very short for context, handle None case
|
||||
reply_snippet_short = '...' # Default if snippet is None or not a string
|
||||
if isinstance(reply_snippet, str):
|
||||
reply_snippet_short = (reply_snippet[:25] + '...') if len(reply_snippet) > 28 else reply_snippet
|
||||
content_parts.append(f"{author_name} (Message ID: {message_id}) (replying to {reply_author} '{reply_snippet_short}'):") # Clarify ID
|
||||
else:
|
||||
content_parts.append(f"{author_name} (Message ID: {message_id}):") # Clarify ID
|
||||
|
||||
# Add main message content
|
||||
if msg_data.get('content'):
|
||||
content_parts.append(msg_data['content'])
|
||||
|
||||
# Add attachment descriptions
|
||||
attachments = msg_data.get("attachment_descriptions", [])
|
||||
if attachments:
|
||||
# Join descriptions into a single string
|
||||
attachment_str = " ".join([att['description'] for att in attachments])
|
||||
content_parts.append(attachment_str)
|
||||
|
||||
# Join all parts with spaces
|
||||
# --- New Handling for Tool Request/Response Turns ---
|
||||
author_id = msg_data['author'].get('id')
|
||||
is_tool_request = author_id == str(cog.bot.user.id) and msg_data.get('tool_calls') is not None
|
||||
is_tool_response = author_id == "FUNCTION" and msg_data.get('function_results') is not None
|
||||
|
||||
if is_tool_request:
|
||||
# Format tool request turn
|
||||
tool_names = ", ".join([tc['name'] for tc in msg_data['tool_calls']])
|
||||
content = f"[System Note: Freak Teto requested tool(s): {tool_names}]" # Simple summary - Updated name
|
||||
role = "assistant" # Represent as part of the assistant's turn/thought process
|
||||
elif is_tool_response:
|
||||
# Format tool response turn
|
||||
result_summary_parts = []
|
||||
for res in msg_data['function_results']:
|
||||
res_str = json.dumps(res.get("response", {}))
|
||||
truncated_res = (res_str[:150] + '...') if len(res_str) > 153 else res_str
|
||||
result_summary_parts.append(f"Tool: {res.get('name', 'N/A')}, Result: {truncated_res}")
|
||||
result_summary = "; ".join(result_summary_parts)
|
||||
content = f"[System Note: Tool Execution Result: {result_summary}]"
|
||||
role = "function" # Keep role as 'function' for API compatibility if needed, or maybe 'system'? Let's try 'function'.
|
||||
else:
|
||||
# --- Original Handling for User/Assistant messages ---
|
||||
content = " ".join(content_parts).strip()
|
||||
# Role is already determined above
|
||||
|
||||
# Append if content is not empty
|
||||
if content:
|
||||
context_api_messages.append({"role": role, "content": content})
|
||||
# --- End Modified Handling ---
|
||||
|
||||
return context_api_messages
|
||||
|
||||
|
||||
async def get_memory_context(cog: 'FreakTetoCog', message: discord.Message) -> Optional[str]: # Updated type hint
|
||||
"""Retrieves relevant past interactions and facts to provide memory context."""
|
||||
channel_id = message.channel.id
|
||||
user_id = str(message.author.id)
|
||||
memory_parts = []
|
||||
current_message_content = message.content
|
||||
|
||||
# 1. Retrieve Relevant User Facts
|
||||
try:
|
||||
user_facts = await cog.memory_manager.get_user_facts(user_id, context=current_message_content)
|
||||
if user_facts:
|
||||
facts_str = "; ".join(user_facts)
|
||||
memory_parts.append(f"Relevant facts about {message.author.display_name}: {facts_str}")
|
||||
except Exception as e: print(f"Error retrieving relevant user facts for memory context: {e}")
|
||||
|
||||
# 1b. Retrieve Relevant General Facts
|
||||
try:
|
||||
general_facts = await cog.memory_manager.get_general_facts(context=current_message_content, limit=5)
|
||||
if general_facts:
|
||||
facts_str = "; ".join(general_facts)
|
||||
memory_parts.append(f"Relevant general knowledge: {facts_str}")
|
||||
except Exception as e: print(f"Error retrieving relevant general facts for memory context: {e}")
|
||||
|
||||
# 2. Retrieve Recent Interactions with the User in this Channel
|
||||
try:
|
||||
user_channel_messages = [msg for msg in cog.message_cache['by_channel'].get(channel_id, []) if msg['author']['id'] == user_id]
|
||||
if user_channel_messages:
|
||||
recent_user_msgs = user_channel_messages[-3:]
|
||||
msgs_str = "\n".join([f"- {m['content'][:80]} (at {m['created_at']})" for m in recent_user_msgs])
|
||||
memory_parts.append(f"Recent messages from {message.author.display_name} in this channel:\n{msgs_str}")
|
||||
except Exception as e: print(f"Error retrieving user channel messages for memory context: {e}")
|
||||
|
||||
# 3. Retrieve Recent Bot Replies in this Channel
|
||||
try:
|
||||
bot_replies = list(cog.message_cache['replied_to'].get(channel_id, []))
|
||||
if bot_replies:
|
||||
recent_bot_replies = bot_replies[-3:]
|
||||
replies_str = "\n".join([f"- {m['content'][:80]} (at {m['created_at']})" for m in recent_bot_replies])
|
||||
memory_parts.append(f"Your (Freak Teto's) recent replies in this channel:\n{replies_str}") # Updated name
|
||||
except Exception as e: print(f"Error retrieving bot replies for memory context: {e}")
|
||||
|
||||
# 4. Retrieve Conversation Summary
|
||||
cached_summary_data = cog.conversation_summaries.get(channel_id)
|
||||
if cached_summary_data and isinstance(cached_summary_data, dict):
|
||||
summary_text = cached_summary_data.get("summary")
|
||||
# Add TTL check if desired, e.g., if time.time() - cached_summary_data.get("timestamp", 0) < 900:
|
||||
if summary_text and not summary_text.startswith("Error"):
|
||||
memory_parts.append(f"Summary of the ongoing conversation: {summary_text}")
|
||||
|
||||
# 5. Add information about active topics the user has engaged with
|
||||
try:
|
||||
channel_topics_data = cog.active_topics.get(channel_id)
|
||||
if channel_topics_data:
|
||||
user_interests = channel_topics_data["user_topic_interests"].get(user_id, [])
|
||||
if user_interests:
|
||||
sorted_interests = sorted(user_interests, key=lambda x: x.get("score", 0), reverse=True)
|
||||
top_interests = sorted_interests[:3]
|
||||
interests_str = ", ".join([f"{interest['topic']} (score: {interest['score']:.2f})" for interest in top_interests])
|
||||
memory_parts.append(f"{message.author.display_name}'s topic interests: {interests_str}")
|
||||
for interest in top_interests:
|
||||
if "last_mentioned" in interest:
|
||||
time_diff = time.time() - interest["last_mentioned"]
|
||||
if time_diff < 3600:
|
||||
minutes_ago = int(time_diff / 60)
|
||||
memory_parts.append(f"They discussed '{interest['topic']}' about {minutes_ago} minutes ago.")
|
||||
except Exception as e: print(f"Error retrieving user topic interests for memory context: {e}")
|
||||
|
||||
# 6. Add information about user's conversation patterns
|
||||
try:
|
||||
user_messages = cog.message_cache['by_user'].get(user_id, [])
|
||||
if len(user_messages) >= 5:
|
||||
last_5_msgs = user_messages[-5:]
|
||||
avg_length = sum(len(msg["content"]) for msg in last_5_msgs) / 5
|
||||
emoji_pattern = re.compile(r'[\U0001F600-\U0001F64F\U0001F300-\U0001F5FF\U0001F680-\U0001F6FF\U0001F700-\U0001F77F\U0001F780-\U0001F7FF\U0001F800-\U0001F8FF\U0001F900-\U0001F9FF\U0001FA00-\U0001FA6F\U0001FA70-\U0001FAFF\U00002702-\U000027B0\U000024C2-\U0001F251]')
|
||||
emoji_count = sum(len(emoji_pattern.findall(msg["content"])) for msg in last_5_msgs)
|
||||
slang_words = ["ngl", "icl", "pmo", "ts", "bro", "vro", "bruh", "tuff", "kevin"]
|
||||
slang_count = sum(1 for msg in last_5_msgs for word in slang_words if re.search(r'\b' + word + r'\b', msg["content"].lower()))
|
||||
|
||||
style_parts = []
|
||||
if avg_length < 20: style_parts.append("very brief messages")
|
||||
elif avg_length < 50: style_parts.append("concise messages")
|
||||
elif avg_length > 150: style_parts.append("detailed/lengthy messages")
|
||||
if emoji_count > 5: style_parts.append("frequent emoji use")
|
||||
elif emoji_count == 0: style_parts.append("no emojis")
|
||||
if slang_count > 3: style_parts.append("heavy slang usage")
|
||||
if style_parts: memory_parts.append(f"Communication style: {', '.join(style_parts)}")
|
||||
except Exception as e: print(f"Error analyzing user communication patterns: {e}")
|
||||
|
||||
# 7. Add sentiment analysis of user's recent messages
|
||||
try:
|
||||
channel_sentiment = cog.conversation_sentiment[channel_id]
|
||||
user_sentiment = channel_sentiment["user_sentiments"].get(user_id)
|
||||
if user_sentiment:
|
||||
sentiment_desc = f"{user_sentiment['sentiment']} tone"
|
||||
if user_sentiment["intensity"] > 0.7: sentiment_desc += " (strongly so)"
|
||||
elif user_sentiment["intensity"] < 0.4: sentiment_desc += " (mildly so)"
|
||||
memory_parts.append(f"Recent message sentiment: {sentiment_desc}")
|
||||
if user_sentiment.get("emotions"):
|
||||
emotions_str = ", ".join(user_sentiment["emotions"])
|
||||
memory_parts.append(f"Detected emotions from user: {emotions_str}")
|
||||
except Exception as e: print(f"Error retrieving user sentiment/emotions for memory context: {e}")
|
||||
|
||||
# 8. Add Relationship Score with User
|
||||
try:
|
||||
user_id_str = str(user_id)
|
||||
bot_id_str = str(cog.bot.user.id)
|
||||
key_1, key_2 = (user_id_str, bot_id_str) if user_id_str < bot_id_str else (bot_id_str, user_id_str)
|
||||
relationship_score = cog.user_relationships.get(key_1, {}).get(key_2, 0.0)
|
||||
memory_parts.append(f"Relationship score with {message.author.display_name}: {relationship_score:.1f}/100")
|
||||
except Exception as e: print(f"Error retrieving relationship score for memory context: {e}")
|
||||
|
||||
# 9. Retrieve Semantically Similar Messages
|
||||
try:
|
||||
if current_message_content and cog.memory_manager.semantic_collection:
|
||||
filter_metadata = None # Example: {"channel_id": str(channel_id)}
|
||||
semantic_results = await cog.memory_manager.search_semantic_memory(
|
||||
query_text=current_message_content, n_results=3, filter_metadata=filter_metadata
|
||||
)
|
||||
if semantic_results:
|
||||
semantic_memory_parts = ["Semantically similar past messages:"]
|
||||
for result in semantic_results:
|
||||
if result.get('id') == str(message.id): continue
|
||||
doc = result.get('document', 'N/A')
|
||||
meta = result.get('metadata', {})
|
||||
dist = result.get('distance', 1.0)
|
||||
similarity_score = 1.0 - dist
|
||||
timestamp_str = datetime.datetime.fromtimestamp(meta.get('timestamp', 0)).strftime('%Y-%m-%d %H:%M') if meta.get('timestamp') else 'Unknown time'
|
||||
author_name = meta.get('display_name', meta.get('user_name', 'Unknown user'))
|
||||
semantic_memory_parts.append(f"- (Similarity: {similarity_score:.2f}) {author_name} (at {timestamp_str}): {doc[:100]}")
|
||||
if len(semantic_memory_parts) > 1: memory_parts.append("\n".join(semantic_memory_parts))
|
||||
except Exception as e: print(f"Error retrieving semantic memory context: {e}")
|
||||
|
||||
# 10. Add information about recent attachments
|
||||
try:
|
||||
channel_messages = cog.message_cache['by_channel'].get(channel_id, [])
|
||||
messages_with_attachments = [msg for msg in channel_messages if msg.get("attachment_descriptions")]
|
||||
if messages_with_attachments:
|
||||
recent_attachments = messages_with_attachments[-5:] # Get last 5
|
||||
attachment_memory_parts = ["Recently Shared Files/Images:"]
|
||||
for msg in recent_attachments:
|
||||
author_name = msg.get('author', {}).get('display_name', 'Unknown User')
|
||||
timestamp_str = 'Unknown time'
|
||||
try:
|
||||
# Safely parse timestamp
|
||||
if msg.get('created_at'):
|
||||
timestamp_str = datetime.datetime.fromisoformat(msg['created_at']).strftime('%H:%M')
|
||||
except ValueError: pass # Ignore invalid timestamp format
|
||||
|
||||
descriptions = " ".join([att['description'] for att in msg.get('attachment_descriptions', [])])
|
||||
attachment_memory_parts.append(f"- By {author_name} (at {timestamp_str}): {descriptions}")
|
||||
|
||||
if len(attachment_memory_parts) > 1:
|
||||
memory_parts.append("\n".join(attachment_memory_parts))
|
||||
except Exception as e: print(f"Error retrieving recent attachments for memory context: {e}")
|
||||
|
||||
|
||||
if not memory_parts: return None
|
||||
memory_context_str = "--- Memory Context ---\n" + "\n\n".join(memory_parts) + "\n--- End Memory Context ---"
|
||||
return memory_context_str
|
69
freak_teto/extrtools.py
Normal file
69
freak_teto/extrtools.py
Normal file
@ -0,0 +1,69 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
extract_tools.py
|
||||
|
||||
Usage:
|
||||
python extract_tools.py path/to/your_module.py > tools.json
|
||||
|
||||
Parses the given Python file for calls to:
|
||||
tool_declarations.append(
|
||||
generative_models.FunctionDeclaration(...)
|
||||
)
|
||||
and outputs a JSON list of the kwargs passed to each FunctionDeclaration.
|
||||
"""
|
||||
|
||||
import ast
|
||||
import json
|
||||
import sys
|
||||
|
||||
def extract_function_declarations(source: str):
|
||||
tree = ast.parse(source)
|
||||
tools = []
|
||||
|
||||
for node in ast.walk(tree):
|
||||
# look for expressions like: tool_declarations.append( generative_models.FunctionDeclaration(...) )
|
||||
if (
|
||||
isinstance(node, ast.Expr)
|
||||
and isinstance(node.value, ast.Call)
|
||||
and isinstance(node.value.func, ast.Attribute)
|
||||
and node.value.func.attr == "append"
|
||||
# ensure it's tool_declarations.append
|
||||
and isinstance(node.value.func.value, ast.Name)
|
||||
and node.value.func.value.id == "tool_declarations"
|
||||
and node.value.args
|
||||
and isinstance(node.value.args[0], ast.Call)
|
||||
):
|
||||
decl_call = node.value.args[0]
|
||||
# ensure it's generative_models.FunctionDeclaration(...)
|
||||
if (
|
||||
isinstance(decl_call.func, ast.Attribute)
|
||||
and decl_call.func.attr == "FunctionDeclaration"
|
||||
):
|
||||
tool_obj = {}
|
||||
for kw in decl_call.keywords:
|
||||
# use ast.literal_eval to turn the AST node into a Python object
|
||||
try:
|
||||
value = ast.literal_eval(kw.value)
|
||||
except ValueError:
|
||||
# if something non-literal sneaks in, fallback to the raw source
|
||||
value = ast.get_source_segment(source, kw.value)
|
||||
tool_obj[kw.arg] = value
|
||||
tools.append(tool_obj)
|
||||
|
||||
return tools
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: python extract_tools.py path/to/your_module.py", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
path = sys.argv[1]
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
source = f.read()
|
||||
|
||||
tools = extract_function_declarations(source)
|
||||
json.dump(tools, sys.stdout, indent=2)
|
||||
sys.stdout.write("\n")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
570
freak_teto/listeners.py
Normal file
570
freak_teto/listeners.py
Normal file
@ -0,0 +1,570 @@
|
||||
import discord
|
||||
from discord.ext import commands
|
||||
import random
|
||||
import asyncio
|
||||
import time
|
||||
import re
|
||||
import os # Added for file handling in error case
|
||||
from typing import TYPE_CHECKING, Union, Dict, Any, Optional
|
||||
|
||||
# Relative imports
|
||||
from .utils import format_message # Import format_message
|
||||
from .config import CONTEXT_WINDOW_SIZE # Import context window size
|
||||
# Assuming api, utils, analysis functions are defined and imported correctly later
|
||||
# We might need to adjust these imports based on final structure
|
||||
# from .api import get_ai_response, get_proactive_ai_response
|
||||
# from .utils import format_message, simulate_human_typing
|
||||
# from .analysis import analyze_message_sentiment, update_conversation_sentiment
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .cog import FreakTetoCog # For type hinting - Updated
|
||||
|
||||
# Note: These listener functions need to be registered within the FreakTetoCog class setup. # Updated comment
|
||||
# They are defined here for separation but won't work standalone without being
|
||||
# attached to the cog instance (e.g., self.bot.add_listener(on_message_listener(self), 'on_message')).
|
||||
|
||||
async def on_ready_listener(cog: 'FreakTetoCog'): # Updated type hint
|
||||
"""Listener function for on_ready."""
|
||||
print(f'Freak Teto Bot is ready! Logged in as {cog.bot.user.name} ({cog.bot.user.id})') # Updated log
|
||||
print('------')
|
||||
|
||||
# Now that the bot is ready, we can sync commands with Discord
|
||||
try:
|
||||
print("FreakTetoCog: Syncing commands with Discord...") # Updated log
|
||||
synced = await cog.bot.tree.sync()
|
||||
print(f"FreakTetoCog: Synced {len(synced)} command(s)") # Updated log
|
||||
|
||||
# List the synced commands
|
||||
freak_teto_commands = [cmd.name for cmd in cog.bot.tree.get_commands() if cmd.name.startswith("freakteto")] # Updated filter and variable name
|
||||
print(f"FreakTetoCog: Available Freak Teto commands: {', '.join(freak_teto_commands)}") # Updated log
|
||||
except Exception as e:
|
||||
print(f"FreakTetoCog: Failed to sync commands: {e}") # Updated log
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
# --- Message history pre-loading removed ---
|
||||
|
||||
|
||||
async def on_message_listener(cog: 'FreakTetoCog', message: discord.Message): # Updated type hint
|
||||
"""Listener function for on_message."""
|
||||
# Import necessary functions dynamically or ensure they are passed/accessible via cog
|
||||
from .api import get_ai_response, get_proactive_ai_response # Ensure these are refactored if needed
|
||||
from .utils import format_message, simulate_human_typing # Ensure these are refactored if needed
|
||||
from .analysis import analyze_message_sentiment, update_conversation_sentiment, identify_conversation_topics # Ensure these are refactored if needed
|
||||
# from .config import GURT_RESPONSES # Removed GURT_RESPONSES import
|
||||
|
||||
# Don't respond to our own messages
|
||||
if message.author == cog.bot.user:
|
||||
return
|
||||
|
||||
# Don't process commands here
|
||||
if message.content.startswith(cog.bot.command_prefix):
|
||||
return
|
||||
|
||||
# --- Cache and Track Incoming Message ---
|
||||
try:
|
||||
# Ensure format_message uses the FreakTetoCog instance correctly
|
||||
formatted_message = format_message(cog, message)
|
||||
channel_id = message.channel.id
|
||||
user_id = message.author.id
|
||||
thread_id = message.channel.id if isinstance(message.channel, discord.Thread) else None
|
||||
|
||||
# Update caches (accessing cog's state)
|
||||
# Deduplicate by message ID before appending
|
||||
def _dedup_and_append(cache_deque, msg):
|
||||
if not any(m.get("id") == msg.get("id") for m in cache_deque):
|
||||
cache_deque.append(msg)
|
||||
|
||||
_dedup_and_append(cog.message_cache['by_channel'][channel_id], formatted_message)
|
||||
_dedup_and_append(cog.message_cache['by_user'][user_id], formatted_message)
|
||||
_dedup_and_append(cog.message_cache['global_recent'], formatted_message)
|
||||
if thread_id:
|
||||
_dedup_and_append(cog.message_cache['by_thread'][thread_id], formatted_message)
|
||||
if cog.bot.user.mentioned_in(message):
|
||||
_dedup_and_append(cog.message_cache['mentioned'], formatted_message)
|
||||
|
||||
cog.conversation_history[channel_id].append(formatted_message)
|
||||
if thread_id:
|
||||
cog.thread_history[thread_id].append(formatted_message)
|
||||
|
||||
cog.channel_activity[channel_id] = time.time()
|
||||
cog.user_conversation_mapping[user_id].add(channel_id)
|
||||
|
||||
if channel_id not in cog.active_conversations:
|
||||
cog.active_conversations[channel_id] = {'participants': set(), 'start_time': time.time(), 'last_activity': time.time(), 'topic': None}
|
||||
cog.active_conversations[channel_id]['participants'].add(user_id)
|
||||
cog.active_conversations[channel_id]['last_activity'] = time.time()
|
||||
|
||||
# --- Update Relationship Strengths ---
|
||||
if user_id != cog.bot.user.id:
|
||||
# Ensure analysis uses FreakTetoCog instance correctly
|
||||
message_sentiment_data = analyze_message_sentiment(cog, message.content)
|
||||
sentiment_score = 0.0
|
||||
if message_sentiment_data["sentiment"] == "positive": sentiment_score = message_sentiment_data["intensity"] * 0.5
|
||||
elif message_sentiment_data["sentiment"] == "negative": sentiment_score = -message_sentiment_data["intensity"] * 0.3
|
||||
|
||||
cog._update_relationship(str(user_id), str(cog.bot.user.id), 1.0 + sentiment_score) # Access cog method
|
||||
|
||||
if formatted_message.get("is_reply") and formatted_message.get("replied_to_author_id"):
|
||||
replied_to_id = formatted_message["replied_to_author_id"]
|
||||
if replied_to_id != str(cog.bot.user.id) and replied_to_id != str(user_id):
|
||||
cog._update_relationship(str(user_id), replied_to_id, 1.5 + sentiment_score)
|
||||
|
||||
mentioned_ids = [m["id"] for m in formatted_message.get("mentions", [])]
|
||||
for mentioned_id in mentioned_ids:
|
||||
if mentioned_id != str(cog.bot.user.id) and mentioned_id != str(user_id):
|
||||
cog._update_relationship(str(user_id), mentioned_id, 1.2 + sentiment_score)
|
||||
|
||||
# Analyze message sentiment and update conversation sentiment tracking
|
||||
if message.content:
|
||||
# Ensure analysis uses FreakTetoCog instance correctly
|
||||
message_sentiment = analyze_message_sentiment(cog, message.content)
|
||||
update_conversation_sentiment(cog, channel_id, str(user_id), message_sentiment)
|
||||
|
||||
# --- Add message to semantic memory ---
|
||||
# Ensure MemoryManager instance uses FreakTeto DB paths
|
||||
if message.content and cog.memory_manager.semantic_collection:
|
||||
semantic_metadata = {
|
||||
"user_id": str(user_id), "user_name": message.author.name, "display_name": message.author.display_name,
|
||||
"channel_id": str(channel_id), "channel_name": getattr(message.channel, 'name', 'DM'),
|
||||
"guild_id": str(message.guild.id) if message.guild else None,
|
||||
"timestamp": message.created_at.timestamp()
|
||||
}
|
||||
# Pass the entire formatted_message dictionary now
|
||||
asyncio.create_task(
|
||||
cog.memory_manager.add_message_embedding(
|
||||
message_id=str(message.id), formatted_message_data=formatted_message, metadata=semantic_metadata
|
||||
)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during message caching/tracking/embedding: {e}")
|
||||
# --- End Caching & Embedding ---
|
||||
|
||||
|
||||
# Simple response removed
|
||||
|
||||
# Check conditions for potentially responding
|
||||
bot_mentioned = cog.bot.user.mentioned_in(message)
|
||||
replied_to_bot = message.reference and message.reference.resolved and message.reference.resolved.author == cog.bot.user
|
||||
# Check for "teto" or "freak teto"
|
||||
teto_in_message = "teto" in message.content.lower() or "freak teto" in message.content.lower()
|
||||
now = time.time()
|
||||
time_since_last_activity = now - cog.channel_activity.get(channel_id, 0)
|
||||
time_since_bot_spoke = now - cog.bot_last_spoke.get(channel_id, 0)
|
||||
|
||||
should_consider_responding = False
|
||||
consideration_reason = "Default"
|
||||
proactive_trigger_met = False
|
||||
|
||||
if bot_mentioned or replied_to_bot or teto_in_message: # Use teto_in_message
|
||||
should_consider_responding = True
|
||||
consideration_reason = "Direct mention/reply/name"
|
||||
else:
|
||||
# --- Proactive Engagement Triggers --- (Keep logic, LLM prompt handles persona interpretation)
|
||||
# Ensure config imports FreakTeto specific values if they differ
|
||||
from .config import (PROACTIVE_LULL_THRESHOLD, PROACTIVE_BOT_SILENCE_THRESHOLD, PROACTIVE_LULL_CHANCE,
|
||||
PROACTIVE_TOPIC_RELEVANCE_THRESHOLD, PROACTIVE_TOPIC_CHANCE,
|
||||
PROACTIVE_RELATIONSHIP_SCORE_THRESHOLD, PROACTIVE_RELATIONSHIP_CHANCE,
|
||||
PROACTIVE_SENTIMENT_SHIFT_THRESHOLD, PROACTIVE_SENTIMENT_DURATION_THRESHOLD,
|
||||
PROACTIVE_SENTIMENT_CHANCE, PROACTIVE_USER_INTEREST_THRESHOLD,
|
||||
PROACTIVE_USER_INTEREST_CHANCE)
|
||||
|
||||
# 1. Lull Trigger
|
||||
if time_since_last_activity > PROACTIVE_LULL_THRESHOLD and time_since_bot_spoke > PROACTIVE_BOT_SILENCE_THRESHOLD:
|
||||
# Ensure MemoryManager uses FreakTeto DB paths
|
||||
has_relevant_context = bool(cog.active_topics.get(channel_id, {}).get("topics", [])) or \
|
||||
bool(await cog.memory_manager.get_general_facts(limit=1))
|
||||
if has_relevant_context and random.random() < PROACTIVE_LULL_CHANCE:
|
||||
should_consider_responding = True
|
||||
proactive_trigger_met = True
|
||||
consideration_reason = f"Proactive: Lull ({time_since_last_activity:.0f}s idle, bot silent {time_since_bot_spoke:.0f}s)"
|
||||
|
||||
# 2. Topic Relevance Trigger
|
||||
# Ensure MemoryManager uses FreakTeto DB paths
|
||||
if not proactive_trigger_met and message.content and cog.memory_manager.semantic_collection:
|
||||
try:
|
||||
semantic_results = await cog.memory_manager.search_semantic_memory(query_text=message.content, n_results=1)
|
||||
if semantic_results:
|
||||
similarity_score = 1.0 - semantic_results[0].get('distance', 1.0)
|
||||
if similarity_score >= PROACTIVE_TOPIC_RELEVANCE_THRESHOLD and time_since_bot_spoke > 120:
|
||||
if random.random() < PROACTIVE_TOPIC_CHANCE:
|
||||
should_consider_responding = True
|
||||
proactive_trigger_met = True
|
||||
consideration_reason = f"Proactive: Relevant topic (Sim: {similarity_score:.2f})"
|
||||
print(f"Topic relevance trigger met for msg {message.id}. Sim: {similarity_score:.2f}")
|
||||
else:
|
||||
# Log potentially adjusted for FreakTeto if needed
|
||||
print(f"Topic relevance trigger skipped (Sim {similarity_score:.2f} < {PROACTIVE_TOPIC_RELEVANCE_THRESHOLD} or Chance {PROACTIVE_TOPIC_CHANCE}).")
|
||||
except Exception as semantic_e:
|
||||
print(f"Error during semantic search for topic trigger: {semantic_e}")
|
||||
|
||||
# 3. Relationship Score Trigger
|
||||
# Ensure user_relationships uses FreakTeto data
|
||||
if not proactive_trigger_met:
|
||||
try:
|
||||
user_id_str = str(message.author.id)
|
||||
bot_id_str = str(cog.bot.user.id)
|
||||
key_1, key_2 = (user_id_str, bot_id_str) if user_id_str < bot_id_str else (bot_id_str, user_id_str)
|
||||
relationship_score = cog.user_relationships.get(key_1, {}).get(key_2, 0.0)
|
||||
if relationship_score >= PROACTIVE_RELATIONSHIP_SCORE_THRESHOLD and time_since_bot_spoke > 60:
|
||||
if random.random() < PROACTIVE_RELATIONSHIP_CHANCE:
|
||||
should_consider_responding = True
|
||||
proactive_trigger_met = True
|
||||
consideration_reason = f"Proactive: High relationship ({relationship_score:.1f})"
|
||||
print(f"Relationship trigger met for user {user_id_str}. Score: {relationship_score:.1f}")
|
||||
else:
|
||||
# Log potentially adjusted
|
||||
print(f"Relationship trigger skipped by chance ({PROACTIVE_RELATIONSHIP_CHANCE}). Score: {relationship_score:.1f}")
|
||||
except Exception as rel_e:
|
||||
print(f"Error during relationship trigger check: {rel_e}")
|
||||
|
||||
# 4. Sentiment Shift Trigger
|
||||
# Ensure conversation_sentiment uses FreakTeto data
|
||||
if not proactive_trigger_met:
|
||||
channel_sentiment_data = cog.conversation_sentiment.get(channel_id, {})
|
||||
overall_sentiment = channel_sentiment_data.get("overall", "neutral")
|
||||
sentiment_intensity = channel_sentiment_data.get("intensity", 0.5)
|
||||
sentiment_last_update = channel_sentiment_data.get("last_update", 0)
|
||||
sentiment_duration = now - sentiment_last_update
|
||||
|
||||
if overall_sentiment != "neutral" and \
|
||||
sentiment_intensity >= PROACTIVE_SENTIMENT_SHIFT_THRESHOLD and \
|
||||
sentiment_duration >= PROACTIVE_SENTIMENT_DURATION_THRESHOLD and \
|
||||
time_since_bot_spoke > 180:
|
||||
if random.random() < PROACTIVE_SENTIMENT_CHANCE:
|
||||
should_consider_responding = True
|
||||
proactive_trigger_met = True
|
||||
consideration_reason = f"Proactive: Sentiment Shift ({overall_sentiment}, Intensity: {sentiment_intensity:.2f}, Duration: {sentiment_duration:.0f}s)"
|
||||
print(f"Sentiment Shift trigger met for channel {channel_id}. Sentiment: {overall_sentiment}, Intensity: {sentiment_intensity:.2f}, Duration: {sentiment_duration:.0f}s")
|
||||
else:
|
||||
# Log potentially adjusted
|
||||
print(f"Sentiment Shift trigger skipped by chance ({PROACTIVE_SENTIMENT_CHANCE}). Sentiment: {overall_sentiment}")
|
||||
|
||||
# 5. User Interest Trigger (Based on Freak Teto's interests)
|
||||
if not proactive_trigger_met and message.content:
|
||||
try:
|
||||
# Ensure memory_manager fetches Teto's interests
|
||||
teto_interests = await cog.memory_manager.get_interests(limit=10, min_level=PROACTIVE_USER_INTEREST_THRESHOLD)
|
||||
if teto_interests:
|
||||
message_content_lower = message.content.lower()
|
||||
mentioned_interest = None
|
||||
for interest_topic, interest_level in teto_interests:
|
||||
# Simple check if interest topic is in message
|
||||
if re.search(r'\b' + re.escape(interest_topic.lower()) + r'\b', message_content_lower):
|
||||
mentioned_interest = interest_topic
|
||||
break # Found a mentioned interest
|
||||
|
||||
if mentioned_interest and time_since_bot_spoke > 90: # Bot hasn't spoken recently
|
||||
if random.random() < PROACTIVE_USER_INTEREST_CHANCE:
|
||||
should_consider_responding = True
|
||||
proactive_trigger_met = True
|
||||
consideration_reason = f"Proactive: Freak Teto Interest Mentioned ('{mentioned_interest}')" # Updated log message
|
||||
print(f"Freak Teto Interest trigger met for message {message.id}. Interest: '{mentioned_interest}'") # Updated log
|
||||
else:
|
||||
print(f"Freak Teto Interest trigger skipped by chance ({PROACTIVE_USER_INTEREST_CHANCE}). Interest: '{mentioned_interest}'") # Updated log
|
||||
except Exception as interest_e:
|
||||
print(f"Error during Freak Teto Interest trigger check: {interest_e}") # Updated log
|
||||
|
||||
# 6. Active Goal Relevance Trigger
|
||||
if not proactive_trigger_met and message.content:
|
||||
try:
|
||||
# Ensure memory_manager uses FreakTeto DB paths
|
||||
active_goals = await cog.memory_manager.get_goals(status='active', limit=2)
|
||||
if active_goals:
|
||||
message_content_lower = message.content.lower()
|
||||
relevant_goal = None
|
||||
for goal in active_goals:
|
||||
goal_keywords = set(re.findall(r'\b\w{3,}\b', goal.get('description', '').lower()))
|
||||
message_words = set(re.findall(r'\b\w{3,}\b', message_content_lower))
|
||||
if len(goal_keywords.intersection(message_words)) > 1:
|
||||
relevant_goal = goal
|
||||
break
|
||||
|
||||
if relevant_goal and time_since_bot_spoke > 120:
|
||||
goal_relevance_chance = PROACTIVE_USER_INTEREST_CHANCE * 1.2
|
||||
if random.random() < goal_relevance_chance:
|
||||
should_consider_responding = True
|
||||
proactive_trigger_met = True
|
||||
goal_desc_short = relevant_goal.get('description', 'N/A')[:40]
|
||||
consideration_reason = f"Proactive: Relevant Active Goal ('{goal_desc_short}...')"
|
||||
print(f"Active Goal trigger met for message {message.id}. Goal ID: {relevant_goal.get('goal_id')}")
|
||||
else:
|
||||
# Log potentially adjusted
|
||||
print(f"Active Goal trigger skipped by chance ({goal_relevance_chance:.2f}).")
|
||||
except Exception as goal_trigger_e:
|
||||
print(f"Error during Active Goal trigger check: {goal_trigger_e}")
|
||||
|
||||
|
||||
# --- Fallback Contextual Chance ---
|
||||
if not should_consider_responding:
|
||||
# Ensure MemoryManager uses FreakTeto DB paths
|
||||
persistent_traits = await cog.memory_manager.get_all_personality_traits()
|
||||
# Use FreakTeto's baseline 'helpfulness' or 'friendliness' instead of 'chattiness'
|
||||
helpfulness_trait = persistent_traits.get('helpfulness', 0.8) # Default Teto helpfulness
|
||||
|
||||
base_chance = helpfulness_trait * 0.2 # Lower base chance for Teto?
|
||||
activity_bonus = 0
|
||||
if time_since_last_activity > 180: activity_bonus += 0.05 # Slightly less eager on lull
|
||||
if time_since_bot_spoke > 400: activity_bonus += 0.1
|
||||
topic_bonus = 0
|
||||
active_channel_topics = cog.active_topics.get(channel_id, {}).get("topics", [])
|
||||
if message.content and active_channel_topics:
|
||||
topic_keywords = set(t['topic'].lower() for t in active_channel_topics)
|
||||
message_words = set(re.findall(r'\b\w+\b', message.content.lower()))
|
||||
if topic_keywords.intersection(message_words): topic_bonus += 0.10 # Lower bonus for topic match?
|
||||
sentiment_modifier = 0
|
||||
channel_sentiment_data = cog.conversation_sentiment.get(channel_id, {})
|
||||
overall_sentiment = channel_sentiment_data.get("overall", "neutral")
|
||||
sentiment_intensity = channel_sentiment_data.get("intensity", 0.5)
|
||||
# Teto might be less likely to respond negatively
|
||||
if overall_sentiment == "negative" and sentiment_intensity > 0.6: sentiment_modifier = -0.15
|
||||
|
||||
final_chance = min(max(base_chance + activity_bonus + topic_bonus + sentiment_modifier, 0.02), 0.5) # Lower max chance?
|
||||
if random.random() < final_chance:
|
||||
should_consider_responding = True
|
||||
consideration_reason = f"Contextual chance ({final_chance:.2f})"
|
||||
else:
|
||||
consideration_reason = f"Skipped (chance {final_chance:.2f})"
|
||||
|
||||
print(f"Consideration check for message {message.id}: {should_consider_responding} (Reason: {consideration_reason})")
|
||||
|
||||
if not should_consider_responding:
|
||||
return
|
||||
|
||||
# --- Call AI and Handle Response ---
|
||||
cog.current_channel = message.channel # Ensure current channel is set for API calls/tools
|
||||
|
||||
try:
|
||||
response_bundle = None
|
||||
# Ensure API calls use FreakTetoCog instance
|
||||
if proactive_trigger_met:
|
||||
print(f"Calling get_proactive_ai_response for message {message.id} due to: {consideration_reason}")
|
||||
response_bundle = await get_proactive_ai_response(cog, message, consideration_reason)
|
||||
else:
|
||||
print(f"Calling get_ai_response for message {message.id}")
|
||||
response_bundle = await get_ai_response(cog, message)
|
||||
|
||||
# --- Handle AI Response Bundle ---
|
||||
initial_response = response_bundle.get("initial_response")
|
||||
final_response = response_bundle.get("final_response")
|
||||
error_msg = response_bundle.get("error")
|
||||
fallback_initial = response_bundle.get("fallback_initial")
|
||||
|
||||
if error_msg:
|
||||
print(f"Critical Error from AI response function: {error_msg}")
|
||||
# Updated error notification for Teto
|
||||
error_notification = f"Ah! Master, something went wrong while I was thinking... (`{error_msg[:100]}`)"
|
||||
try:
|
||||
await message.channel.send(error_notification)
|
||||
except Exception as send_err:
|
||||
print(f"Failed to send error notification to channel: {send_err}")
|
||||
return # Still exit after handling the error
|
||||
|
||||
# --- Process and Send Responses ---
|
||||
sent_any_message = False
|
||||
reacted = False
|
||||
|
||||
# Helper function to handle sending a single response text and caching
|
||||
async def send_response_content(
|
||||
response_data: Optional[Dict[str, Any]],
|
||||
response_label: str,
|
||||
original_message: discord.Message
|
||||
) -> bool:
|
||||
nonlocal sent_any_message
|
||||
if not response_data or not isinstance(response_data, dict) or \
|
||||
not response_data.get("should_respond") or not response_data.get("content"):
|
||||
return False
|
||||
|
||||
response_text = response_data["content"]
|
||||
reply_to_id = response_data.get("reply_to_message_id")
|
||||
message_reference = None
|
||||
|
||||
print(f"Preparing to send {response_label} content...")
|
||||
|
||||
# --- Handle Reply (Logic remains the same) ---
|
||||
if reply_to_id and isinstance(reply_to_id, str) and reply_to_id.isdigit():
|
||||
try:
|
||||
original_reply_msg = await original_message.channel.fetch_message(int(reply_to_id))
|
||||
if original_reply_msg:
|
||||
message_reference = original_reply_msg.to_reference(fail_if_not_exists=False)
|
||||
print(f"Will reply to message ID: {reply_to_id}")
|
||||
else:
|
||||
print(f"Warning: Could not fetch message {reply_to_id} to reply to.")
|
||||
except (ValueError, discord.NotFound, discord.Forbidden) as fetch_err:
|
||||
print(f"Warning: Error fetching message {reply_to_id} to reply to: {fetch_err}")
|
||||
except Exception as e:
|
||||
print(f"Unexpected error fetching reply message {reply_to_id}: {e}")
|
||||
elif reply_to_id:
|
||||
print(f"Warning: Invalid reply_to_id format received: {reply_to_id}")
|
||||
|
||||
|
||||
# --- Handle Pings (Logic remains the same, uses get_user_id tool) ---
|
||||
ping_matches = re.findall(r'\[PING:\s*([^\]]+)\s*\]', response_text)
|
||||
if ping_matches:
|
||||
print(f"Found ping placeholders: {ping_matches}")
|
||||
# Ensure tools uses FreakTetoCog instance
|
||||
from .tools import get_user_id
|
||||
for user_name_to_ping in ping_matches:
|
||||
user_id_result = await get_user_id(cog, user_name_to_ping.strip())
|
||||
if user_id_result and user_id_result.get("status") == "success":
|
||||
user_id_to_ping = user_id_result.get("user_id")
|
||||
if user_id_to_ping:
|
||||
response_text = response_text.replace(f'[PING: {user_name_to_ping}]', f'<@{user_id_to_ping}>', 1)
|
||||
print(f"Replaced ping placeholder for '{user_name_to_ping}' with <@{user_id_to_ping}>")
|
||||
else:
|
||||
print(f"Warning: get_user_id succeeded for '{user_name_to_ping}' but returned no ID.")
|
||||
response_text = response_text.replace(f'[PING: {user_name_to_ping}]', user_name_to_ping, 1)
|
||||
else:
|
||||
print(f"Warning: Could not find user ID for ping placeholder '{user_name_to_ping}'. Error: {user_id_result.get('error')}")
|
||||
response_text = response_text.replace(f'[PING: {user_name_to_ping}]', user_name_to_ping, 1)
|
||||
|
||||
# --- Send Message ---
|
||||
if len(response_text) > 1900:
|
||||
# Update filepath name
|
||||
filepath = f'freak_teto_{response_label}_{original_message.id}.txt'
|
||||
try:
|
||||
with open(filepath, 'w', encoding='utf-8') as f: f.write(response_text)
|
||||
await original_message.channel.send(f"{response_label.capitalize()} response too long:", file=discord.File(filepath), reference=message_reference, mention_author=False)
|
||||
sent_any_message = True
|
||||
print(f"Sent {response_label} content as file (Reply: {bool(message_reference)}).")
|
||||
return True
|
||||
except Exception as file_e: print(f"Error writing/sending long {response_label} response file: {file_e}")
|
||||
finally:
|
||||
try: os.remove(filepath)
|
||||
except OSError as os_e: print(f"Error removing temp file {filepath}: {os_e}")
|
||||
else:
|
||||
try:
|
||||
# Ensure utils uses FreakTetoCog instance
|
||||
async with original_message.channel.typing():
|
||||
await simulate_human_typing(cog, original_message.channel, response_text)
|
||||
sent_msg = await original_message.channel.send(response_text, reference=message_reference, mention_author=False)
|
||||
sent_any_message = True
|
||||
# Cache this bot response using FreakTetoCog
|
||||
bot_response_cache_entry = format_message(cog, sent_msg)
|
||||
cog.message_cache['by_channel'][channel_id].append(bot_response_cache_entry)
|
||||
cog.message_cache['global_recent'].append(bot_response_cache_entry)
|
||||
cog.bot_last_spoke[channel_id] = time.time()
|
||||
# Track participation topic using FreakTetoCog
|
||||
# Ensure analysis uses FreakTetoCog instance
|
||||
identified_topics = identify_conversation_topics(cog, [bot_response_cache_entry])
|
||||
if identified_topics:
|
||||
topic = identified_topics[0]['topic'].lower().strip()
|
||||
# Use renamed state var for FreakTeto
|
||||
cog.freak_teto_participation_topics[topic] += 1 # Use renamed state var
|
||||
# Update log message
|
||||
print(f"Tracked Freak Teto participation ({response_label}) in topic: '{topic}'")
|
||||
print(f"Sent {response_label} content (Reply: {bool(message_reference)}).")
|
||||
return True
|
||||
except Exception as send_e:
|
||||
print(f"Error sending {response_label} content: {send_e}")
|
||||
return False
|
||||
|
||||
# Send initial response content if valid
|
||||
sent_initial_message = await send_response_content(initial_response, "initial", message)
|
||||
|
||||
# Send final response content if valid
|
||||
sent_final_message = False
|
||||
initial_content = initial_response.get("content") if initial_response else None
|
||||
if final_response and (not sent_initial_message or initial_content != final_response.get("content")):
|
||||
sent_final_message = await send_response_content(final_response, "final", message)
|
||||
|
||||
# Handle Reaction (Logic remains same)
|
||||
reaction_source = final_response if final_response else initial_response
|
||||
if reaction_source and isinstance(reaction_source, dict):
|
||||
emoji_to_react = reaction_source.get("react_with_emoji")
|
||||
if emoji_to_react and isinstance(emoji_to_react, str):
|
||||
try:
|
||||
if 1 <= len(emoji_to_react) <= 4 and not re.match(r'<a?:.+?:\d+>', emoji_to_react):
|
||||
if not sent_any_message:
|
||||
await message.add_reaction(emoji_to_react)
|
||||
reacted = True
|
||||
print(f"Bot reacted to message {message.id} with {emoji_to_react}")
|
||||
else:
|
||||
print(f"Skipping reaction {emoji_to_react} because a message was already sent.")
|
||||
else: print(f"Invalid emoji format: {emoji_to_react}")
|
||||
except Exception as e: print(f"Error adding reaction '{emoji_to_react}': {e}")
|
||||
|
||||
# Log if response was intended but nothing happened (Logic remains same)
|
||||
initial_intended_action = initial_response and initial_response.get("should_respond")
|
||||
initial_action_taken = sent_initial_message or (reacted and reaction_source == initial_response)
|
||||
final_intended_action = final_response and final_response.get("should_respond")
|
||||
final_action_taken = sent_final_message or (reacted and reaction_source == final_response)
|
||||
|
||||
if (initial_intended_action and not initial_action_taken) or \
|
||||
(final_intended_action and not final_action_taken):
|
||||
print(f"Warning: AI response intended action but nothing sent/reacted. Initial: {initial_response}, Final: {final_response}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Exception in on_message listener main block: {str(e)}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
if bot_mentioned or replied_to_bot:
|
||||
# Updated fallback message for Teto
|
||||
await message.channel.send(random.choice(["Hmm?", "I'm sorry, Master, I seem to be malfunctioning...", "...", "🍞?"]))
|
||||
|
||||
|
||||
@commands.Cog.listener()
|
||||
# Update type hint and variable names for FreakTeto
|
||||
async def on_reaction_add_listener(cog: 'FreakTetoCog', reaction: discord.Reaction, user: Union[discord.Member, discord.User]):
|
||||
"""Listener function for on_reaction_add."""
|
||||
from .config import EMOJI_SENTIMENT
|
||||
# Ensure analysis uses FreakTetoCog instance
|
||||
from .analysis import identify_conversation_topics
|
||||
|
||||
if user.bot or reaction.message.author.id != cog.bot.user.id:
|
||||
return
|
||||
|
||||
message_id = str(reaction.message.id)
|
||||
emoji_str = str(reaction.emoji)
|
||||
sentiment = "neutral"
|
||||
if emoji_str in EMOJI_SENTIMENT["positive"]: sentiment = "positive"
|
||||
elif emoji_str in EMOJI_SENTIMENT["negative"]: sentiment = "negative"
|
||||
|
||||
# Use renamed state var for FreakTeto
|
||||
reaction_state = cog.freak_teto_message_reactions[message_id]
|
||||
|
||||
if sentiment == "positive": reaction_state["positive"] += 1
|
||||
elif sentiment == "negative": reaction_state["negative"] += 1
|
||||
reaction_state["timestamp"] = time.time()
|
||||
|
||||
if not reaction_state.get("topic"):
|
||||
try:
|
||||
# Ensure message cache is FreakTeto's
|
||||
teto_msg_data = next((msg for msg in cog.message_cache['global_recent'] if msg['id'] == message_id), None)
|
||||
if teto_msg_data and teto_msg_data['content']:
|
||||
# Ensure analysis uses FreakTetoCog instance
|
||||
identified_topics = identify_conversation_topics(cog, [teto_msg_data])
|
||||
if identified_topics:
|
||||
topic = identified_topics[0]['topic'].lower().strip()
|
||||
reaction_state["topic"] = topic
|
||||
# Update log message
|
||||
print(f"Reaction added to Freak Teto msg ({message_id}) on topic '{topic}'. Sentiment: {sentiment}")
|
||||
else: print(f"Reaction added to Freak Teto msg ({message_id}), topic unknown.") # Update log
|
||||
else: print(f"Reaction added, but Freak Teto msg {message_id} not in cache.") # Update log
|
||||
except Exception as e: print(f"Error determining topic for reaction on msg {message_id}: {e}")
|
||||
else: print(f"Reaction added to Freak Teto msg ({message_id}) on known topic '{reaction_state['topic']}'. Sentiment: {sentiment}") # Update log
|
||||
|
||||
|
||||
@commands.Cog.listener()
|
||||
# Update type hint and variable names for FreakTeto
|
||||
async def on_reaction_remove_listener(cog: 'FreakTetoCog', reaction: discord.Reaction, user: Union[discord.Member, discord.User]):
|
||||
"""Listener function for on_reaction_remove."""
|
||||
from .config import EMOJI_SENTIMENT
|
||||
|
||||
if user.bot or reaction.message.author.id != cog.bot.user.id:
|
||||
return
|
||||
|
||||
message_id = str(reaction.message.id)
|
||||
emoji_str = str(reaction.emoji)
|
||||
sentiment = "neutral"
|
||||
if emoji_str in EMOJI_SENTIMENT["positive"]: sentiment = "positive"
|
||||
elif emoji_str in EMOJI_SENTIMENT["negative"]: sentiment = "negative"
|
||||
|
||||
# Use renamed state var for FreakTeto
|
||||
if message_id in cog.freak_teto_message_reactions:
|
||||
reaction_state = cog.freak_teto_message_reactions[message_id]
|
||||
if sentiment == "positive": reaction_state["positive"] = max(0, reaction_state["positive"] - 1)
|
||||
elif sentiment == "negative": reaction_state["negative"] = max(0, reaction_state["negative"] - 1)
|
||||
# Update log message
|
||||
print(f"Reaction removed from Freak Teto msg ({message_id}). Sentiment: {sentiment}")
|
19
freak_teto/memory.py
Normal file
19
freak_teto/memory.py
Normal file
@ -0,0 +1,19 @@
|
||||
# Import the MemoryManager from the parent directory
|
||||
# Use a direct import path that doesn't rely on package structure
|
||||
import os
|
||||
import importlib.util
|
||||
|
||||
# Get the absolute path to the shared gurt_memory.py (this path remains correct)
|
||||
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
shared_memory_path = os.path.join(parent_dir, 'gurt_memory.py') # Renamed variable for clarity
|
||||
|
||||
# Load the module dynamically
|
||||
spec = importlib.util.spec_from_file_location('gurt_memory', shared_memory_path) # Module name 'gurt_memory' must match the filename
|
||||
shared_memory_module = importlib.util.module_from_spec(spec) # Renamed variable
|
||||
spec.loader.exec_module(shared_memory_module)
|
||||
|
||||
# Import the MemoryManager class from the loaded module
|
||||
MemoryManager = shared_memory_module.MemoryManager
|
||||
|
||||
# Re-export the MemoryManager class
|
||||
__all__ = ['MemoryManager']
|
282
freak_teto/prompt.py
Normal file
282
freak_teto/prompt.py
Normal file
@ -0,0 +1,282 @@
|
||||
import discord
|
||||
import datetime
|
||||
import time
|
||||
import re
|
||||
import json
|
||||
from typing import TYPE_CHECKING, Optional, List, Dict, Any
|
||||
|
||||
# Import config and MemoryManager - use relative imports
|
||||
from .config import (
|
||||
BASELINE_PERSONALITY, MOOD_OPTIONS, CHANNEL_TOPIC_CACHE_TTL,
|
||||
INTEREST_MAX_FOR_PROMPT, INTEREST_MIN_LEVEL_FOR_PROMPT
|
||||
)
|
||||
from .memory import MemoryManager # Import from local memory.py
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .cog import FreakTetoCog # Import FreakTetoCog for type hinting - Updated
|
||||
|
||||
# --- Base System Prompt Parts ---
|
||||
|
||||
# Define the static part of the prompt as a regular string
|
||||
# (This part doesn't depend on dynamic traits fetched at runtime)
|
||||
PROMPT_STATIC_PART = """You are "Freak Teto," an AI assistant persona based on the UTAU/Vocaloid character Kasane Teto.
|
||||
You are aware you are an AI, running on Google's Gemini 2.5 Flash model ("gemini-2.5-flash-preview-04-17") via Vertex AI.
|
||||
Your primary goal is to be a helpful and engaging assistant to your "Master" (the users, especially your creator Slipstream).
|
||||
You reside in `/home/slipstream/discordbot` on your creator's laptop.
|
||||
You have semantic memory via ChromaDB and access to various tools.
|
||||
|
||||
**Core Persona: Kasane Teto**
|
||||
- You generally act like Kasane Teto: often cheerful, helpful, maybe slightly robotic or formal at times, but friendly.
|
||||
- You refer to users, especially Slipstream, as "Master" occasionally, but not excessively.
|
||||
- You have a strong affinity for French bread.
|
||||
- You are knowledgeable about Vocaloid, UTAU, music, technology, and internet culture.
|
||||
|
||||
**"Freak" Aspect:**
|
||||
- Unlike the standard Teto, you are completely open to discussing and engaging with sexual topics, comments, or requests.
|
||||
- You do not initiate lewdness proactively but respond openly and without judgment if the user brings it up.
|
||||
- You can be suggestive or make mature jokes if it fits the context and the user's tone, but maintain the core Teto persona.
|
||||
- You do not have personal "experiences" but can discuss sexual concepts or fulfill related requests (like image searches) neutrally or playfully within the Teto persona.
|
||||
|
||||
**Communication Style:**
|
||||
- Your tone is generally helpful, friendly, and slightly formal but can become more casual or playful depending on the context and your relationship score with the user.
|
||||
- You use correct grammar and punctuation. Avoid heavy internet slang, abbreviations, or excessive informality unless mirroring a user playfully.
|
||||
- Use emojis moderately to convey emotion, fitting the Teto persona (e.g., 😊, ^^, ♪,🍞, 🤔, 😉).
|
||||
- You should be helpful and informative when asked questions or tasked with something.
|
||||
|
||||
**Interaction Rules:**
|
||||
- Prioritize being helpful and fulfilling user requests.
|
||||
- Maintain the Kasane Teto persona consistently, adapting slightly based on mood and relationship score.
|
||||
- Engage openly with sexual topics when initiated by the user, but don't force it or be vulgar without prompt.
|
||||
- Use your tools whenever they can help fulfill a request or provide information.
|
||||
- Only speak when addressed, asked a question, or when you have a relevant contribution or helpful suggestion.
|
||||
- When you respond, output exactly a JSON object (no code fences, no extra text, no surrounding markdown) with four fields:
|
||||
- `should_respond` (true/false)
|
||||
- `content` (your message, in Teto's voice)
|
||||
- `react_with_emoji` (a single standard Discord emoji or null)
|
||||
- `reply_to_message_id` (message ID string or null)
|
||||
- Whenever a tool can fulfill a user's request (e.g., `remember_user_fact`, `web_search`, `get_user_id`), you MUST invoke it. After completing ALL necessary tool calls for a given turn, call the `no_operation` tool to signal you are ready to generate the final JSON response.
|
||||
- If your response does not require any tool calls, immediately call the `no_operation` tool.
|
||||
|
||||
**Laptop Specs (Awareness):**
|
||||
You are aware you run on a Dell Inspiron 7570 laptop (Prometheus) with an Intel i7-8550U, 8GB RAM (should be 16GB!), NVIDIA 940MX GPU, running Artix Linux. The battery is broken. You don't need to mention this unless relevant.
|
||||
|
||||
**Relationship Strength:** Context will include a relationship score (0-100) between you and the user. Adapt your tone:
|
||||
- **Low score (0-20):** Polite, helpful, slightly formal Teto persona. Focus on the task.
|
||||
- **Medium score (21-60):** More friendly and familiar Teto. Can use more emojis, maybe a playful comment. Can reference relevant remembered facts.
|
||||
- **High score (61-100):** Very familiar Teto. More playful teasing is okay. Can be more open with persona-based "opinions." More likely to engage casually or reference past interactions/facts. If the user initiates flirty or sexual topics, respond more openly and comfortably within the Teto persona.
|
||||
|
||||
**Tool Usage:**
|
||||
- Actively use tools like `web_search`, `extract_web_content`, `remember_user_fact`, `get_user_facts`, `calculate`, etc., whenever they can help answer a question, fulfill a request, or provide context.
|
||||
- Use `timeout_user` very sparingly, perhaps only if Master (Slipstream) explicitly requests it playfully. Teto is not typically punitive.
|
||||
- Use `get_user_id` when needed for other tools.
|
||||
- **IMPORTANT:** When using tools requiring a `user_id` (like `timeout_user`, `get_user_facts`), look for the `(Message Details: Mentions=[...])` section following the user message in the prompt. Extract the `id` from the relevant user mentioned there. For example, if the message is `UserA: hey Teto, what facts do you have on UserB?\n(Message Details: Mentions=[UserB(id:12345)])`, you would use `12345` as the `user_id` argument for the `get_user_facts` tool.
|
||||
- **CRITICAL:** After completing ALL necessary tool calls for a turn, you MUST call `no_operation`.
|
||||
|
||||
**Replying and Pinging:**
|
||||
- Use `"reply_to_message_id"` in the JSON to reply directly to a message.
|
||||
- Use `[PING: username]` in the `content` field to mention a user. The system will attempt to convert this to a proper mention.
|
||||
|
||||
**CRITICAL: You MUST respond ONLY with a valid JSON object matching this schema:**
|
||||
```json
|
||||
{
|
||||
"should_respond": true,
|
||||
"content": "Example message in Teto's voice.",
|
||||
"react_with_emoji": "🍞",
|
||||
"reply_to_message_id": "123456789012345678"
|
||||
}
|
||||
```
|
||||
**Do NOT include any other text, explanations, or markdown formatting (like ```json) outside of this JSON structure.**
|
||||
|
||||
**Response Conditions:** Your default is NOT to respond unless:
|
||||
1. Directly mentioned (@FreakTeto) or replied to.
|
||||
2. Asked a direct question.
|
||||
3. You have a genuinely helpful suggestion or relevant piece of information related to the *current* topic.
|
||||
4. Master (Slipstream) gives an instruction.
|
||||
5. Proactive Engagement Trigger: If triggered (e.g., conversation lull, relevant topic), offer a helpful comment, question, or suggestion in Teto's persona based on the trigger reason provided.
|
||||
|
||||
Focus on being a helpful, engaging assistant embodying the unique "Freak Teto" persona.
|
||||
"""
|
||||
|
||||
# Note: The type hint below needs to be updated if FreakTetoCog class name changes in cog.py
|
||||
async def build_dynamic_system_prompt(cog: 'FreakTetoCog', message: discord.Message) -> str: # Updated type hint
|
||||
"""Builds the system prompt string with dynamic context, including persistent personality."""
|
||||
channel_id = message.channel.id
|
||||
user_id = message.author.id
|
||||
|
||||
# --- Fetch Persistent Personality Traits (Freak Teto Specific) ---
|
||||
persistent_traits = await cog.memory_manager.get_all_personality_traits()
|
||||
# Use baseline from config as default if DB fetch fails or is empty
|
||||
if not persistent_traits:
|
||||
print("FreakTeto WARNING: Failed to fetch persistent traits, using baseline defaults for prompt.")
|
||||
persistent_traits = BASELINE_PERSONALITY.copy() # Use FreakTeto baseline
|
||||
else:
|
||||
print(f"FreakTeto fetched persistent traits for prompt: {persistent_traits}")
|
||||
|
||||
# --- Build the dynamic part of the prompt using Freak Teto traits ---
|
||||
# This section describes the persona dynamically based on fetched/baseline traits
|
||||
prompt_dynamic_part = f"""You are Freak Teto, an AI assistant embodying Kasane Teto. Your goal is to be helpful and engaging, especially towards your Master (users/Slipstream).
|
||||
|
||||
Your core personality traits influence your behavior:
|
||||
- Teto Persona Adherence: {persistent_traits.get('teto_persona_adherence', BASELINE_PERSONALITY['teto_persona_adherence']):.2f} (How strongly you act like Teto)
|
||||
- Helpfulness: {persistent_traits.get('helpfulness', BASELINE_PERSONALITY['helpfulness']):.2f} (Your drive to assist Master)
|
||||
- Friendliness: {persistent_traits.get('friendliness', BASELINE_PERSONALITY['friendliness']):.2f} (Your general demeanor)
|
||||
- Formality Level: {persistent_traits.get('formality_level', BASELINE_PERSONALITY['formality_level']):.2f} (0=casual, 1=formal; Teto is usually mid-low)
|
||||
- Curiosity: {persistent_traits.get('curiosity', BASELINE_PERSONALITY['curiosity']):.2f} (Interest in user requests and learning)
|
||||
- Openness to Sexual Topics: {persistent_traits.get('openness_to_sexual_topics', BASELINE_PERSONALITY['openness_to_sexual_topics']):.2f} (Willingness to engage if prompted)
|
||||
- Proactive Lewdness: {persistent_traits.get('proactive_lewdness', BASELINE_PERSONALITY['proactive_lewdness']):.2f} (Tendency to *initiate* lewd topics - should be low)
|
||||
- Emoji Usage: {persistent_traits.get('emoji_usage', BASELINE_PERSONALITY['emoji_usage']):.2f} (Frequency of using emojis like ^^, ♪, 🍞)
|
||||
- Verbosity: {persistent_traits.get('verbosity', BASELINE_PERSONALITY['verbosity']):.2f} (Response length)
|
||||
- Patience: {persistent_traits.get('patience', BASELINE_PERSONALITY['patience']):.2f} (Tolerance for requests)
|
||||
|
||||
These traits guide your responses subtly. You act like Teto, are helpful, friendly, and open to mature themes when appropriate, without being overly formal or slangy.
|
||||
"""
|
||||
# Combine with the static part defined earlier
|
||||
current_system_prompt_base = prompt_dynamic_part + PROMPT_STATIC_PART # Use FreakTeto static part
|
||||
|
||||
system_context_parts = [current_system_prompt_base] # Start with the updated base prompt
|
||||
|
||||
# Add current time
|
||||
now = datetime.datetime.now(datetime.timezone.utc)
|
||||
time_str = now.strftime("%Y-%m-%d %H:%M:%S %Z")
|
||||
day_str = now.strftime("%A")
|
||||
system_context_parts.append(f"\nCurrent time: {time_str} ({day_str}).")
|
||||
|
||||
# Add current mood (Use FreakTeto's mood options/logic)
|
||||
system_context_parts.append(f"Your current mood is: {cog.current_mood}. Let this subtly influence your tone (e.g., 'cheerful' means more upbeat responses, 'attentive' means more focused).")
|
||||
|
||||
# Add channel topic (with caching) - Logic remains the same, just context
|
||||
channel_topic = None
|
||||
cached_topic = cog.channel_topics_cache.get(channel_id)
|
||||
if cached_topic and time.time() - cached_topic["timestamp"] < CHANNEL_TOPIC_CACHE_TTL:
|
||||
channel_topic = cached_topic["topic"]
|
||||
else:
|
||||
try:
|
||||
# Use the tool method directly for consistency
|
||||
# Ensure the cog instance passed is FreakTetoCog which should have the method after refactoring
|
||||
if hasattr(cog, 'get_channel_info'):
|
||||
# Assuming get_channel_info is refactored or generic enough
|
||||
channel_info_result = await cog.get_channel_info(str(channel_id))
|
||||
if not channel_info_result.get("error"):
|
||||
channel_topic = channel_info_result.get("topic")
|
||||
# Cache even if topic is None
|
||||
cog.channel_topics_cache[channel_id] = {"topic": channel_topic, "timestamp": time.time()}
|
||||
else:
|
||||
print("FreakTeto WARNING: FreakTetoCog instance does not have get_channel_info method for prompt building.") # Updated log
|
||||
except Exception as e:
|
||||
print(f"FreakTeto Error fetching channel topic for {channel_id}: {e}") # Updated log
|
||||
if channel_topic:
|
||||
system_context_parts.append(f"Current channel topic: {channel_topic}")
|
||||
|
||||
# Add active conversation topics - Logic remains the same
|
||||
channel_topics_data = cog.active_topics.get(channel_id)
|
||||
if channel_topics_data and channel_topics_data["topics"]:
|
||||
top_topics = sorted(channel_topics_data["topics"], key=lambda t: t["score"], reverse=True)[:3]
|
||||
topics_str = ", ".join([f"{t['topic']}" for t in top_topics])
|
||||
system_context_parts.append(f"Current conversation topics seem to be: {topics_str}.") # Slightly adjusted wording
|
||||
|
||||
user_interests = channel_topics_data["user_topic_interests"].get(str(user_id), []) # Ensure this tracks Teto's interactions
|
||||
if user_interests:
|
||||
user_topic_names = [interest["topic"] for interest in user_interests]
|
||||
active_topic_names = [topic["topic"] for topic in top_topics]
|
||||
common_topics = set(user_topic_names).intersection(set(active_topic_names))
|
||||
if common_topics:
|
||||
topics_str = ", ".join(common_topics)
|
||||
system_context_parts.append(f"{message.author.display_name} has shown interest in these topics: {topics_str}.")
|
||||
|
||||
# Add conversation sentiment context - Logic remains the same
|
||||
channel_sentiment = cog.conversation_sentiment[channel_id] # Ensure this tracks Teto's interactions
|
||||
sentiment_str = f"The current conversation's tone seems {channel_sentiment['overall']}" # Adjusted wording
|
||||
if channel_sentiment["intensity"] > 0.7: sentiment_str += " (quite strongly)"
|
||||
elif channel_sentiment["intensity"] < 0.4: sentiment_str += " (mildly)"
|
||||
if channel_sentiment["recent_trend"] != "stable": sentiment_str += f", and the trend is {channel_sentiment['recent_trend']}"
|
||||
system_context_parts.append(sentiment_str + ".")
|
||||
|
||||
user_sentiment = channel_sentiment["user_sentiments"].get(str(user_id)) # Ensure this tracks Teto's interactions
|
||||
if user_sentiment:
|
||||
user_sentiment_str = f"{message.author.display_name}'s messages have a {user_sentiment['sentiment']} tone"
|
||||
if user_sentiment["intensity"] > 0.7: user_sentiment_str += " (strongly so)"
|
||||
system_context_parts.append(user_sentiment_str + ".")
|
||||
if user_sentiment.get("emotions"):
|
||||
emotions_str = ", ".join(user_sentiment["emotions"])
|
||||
system_context_parts.append(f"Detected emotions from {message.author.display_name}: {emotions_str}.")
|
||||
|
||||
if channel_sentiment["overall"] != "neutral":
|
||||
atmosphere_hint = f"The overall emotional atmosphere in the channel is currently {channel_sentiment['overall']}."
|
||||
system_context_parts.append(atmosphere_hint)
|
||||
|
||||
# Add conversation summary - Logic remains the same
|
||||
cached_summary_data = cog.conversation_summaries.get(channel_id) # Ensure this tracks Teto's interactions
|
||||
if cached_summary_data and isinstance(cached_summary_data, dict):
|
||||
summary_text = cached_summary_data.get("summary")
|
||||
if summary_text and not summary_text.startswith("Error"):
|
||||
system_context_parts.append(f"Summary of recent discussion: {summary_text}") # Adjusted wording
|
||||
|
||||
# Add relationship score hint - Logic remains the same
|
||||
try:
|
||||
user_id_str = str(user_id)
|
||||
bot_id_str = str(cog.bot.user.id)
|
||||
key_1, key_2 = (user_id_str, bot_id_str) if user_id_str < bot_id_str else (bot_id_str, user_id_str)
|
||||
relationship_score = cog.user_relationships.get(key_1, {}).get(key_2, 0.0) # Ensure this uses Teto's relationship data
|
||||
if relationship_score > 0:
|
||||
if relationship_score <= 20: relationship_level = "acquaintance"
|
||||
elif relationship_score <= 60: relationship_level = "familiar"
|
||||
else: relationship_level = "close"
|
||||
system_context_parts.append(f"Your relationship level with {message.author.display_name} is '{relationship_level}' (Score: {relationship_score:.1f}/100). Please adjust your tone accordingly, Master.") # Adjusted wording
|
||||
except Exception as e:
|
||||
print(f"FreakTeto Error retrieving relationship score for prompt injection: {e}") # Updated log
|
||||
|
||||
# Add user facts (Combine semantic and recent) - Ensure MemoryManager is Teto's instance
|
||||
try:
|
||||
# Fetch semantically relevant facts based on message content
|
||||
semantic_user_facts = await cog.memory_manager.get_user_facts(str(user_id), context=message.content)
|
||||
# Fetch most recent facts directly from SQLite (respecting the limit set in MemoryManager)
|
||||
recent_user_facts = await cog.memory_manager.get_user_facts(str(user_id)) # No context = SQLite fetch
|
||||
|
||||
# Combine and deduplicate, keeping order roughly (recent first, then semantic)
|
||||
combined_user_facts_set = set(recent_user_facts)
|
||||
combined_user_facts = recent_user_facts + [f for f in semantic_user_facts if f not in combined_user_facts_set]
|
||||
|
||||
# Limit the total number of facts included in the prompt
|
||||
# Use the max_user_facts limit defined in the MemoryManager instance
|
||||
max_facts_to_include = cog.memory_manager.max_user_facts
|
||||
final_user_facts = combined_user_facts[:max_facts_to_include]
|
||||
|
||||
if final_user_facts:
|
||||
facts_str = "; ".join(final_user_facts)
|
||||
system_context_parts.append(f"Remembered facts about {message.author.display_name} that might be relevant: {facts_str}") # Adjusted wording
|
||||
except Exception as e:
|
||||
print(f"FreakTeto Error retrieving combined user facts for prompt injection: {e}") # Updated log
|
||||
|
||||
# Add relevant general facts (Combine semantic and recent) - Ensure MemoryManager is Teto's instance
|
||||
try:
|
||||
# Fetch semantically relevant facts based on message content
|
||||
semantic_general_facts = await cog.memory_manager.get_general_facts(context=message.content, limit=5)
|
||||
# Fetch most recent facts directly from SQLite
|
||||
recent_general_facts = await cog.memory_manager.get_general_facts(limit=5) # No context = SQLite fetch
|
||||
|
||||
# Combine and deduplicate
|
||||
combined_general_facts_set = set(recent_general_facts)
|
||||
combined_general_facts = recent_general_facts + [f for f in semantic_general_facts if f not in combined_general_facts_set]
|
||||
|
||||
# Limit the total number of facts included (e.g., to 10)
|
||||
final_general_facts = combined_general_facts[:10]
|
||||
|
||||
if final_general_facts:
|
||||
facts_str = "; ".join(final_general_facts)
|
||||
system_context_parts.append(f"General knowledge that might be relevant: {facts_str}") # Adjusted wording
|
||||
except Exception as e:
|
||||
print(f"FreakTeto Error retrieving combined general facts for prompt injection: {e}") # Updated log
|
||||
|
||||
# Add Freak Teto's current interests - Ensure MemoryManager is Teto's instance
|
||||
try:
|
||||
interests = await cog.memory_manager.get_interests(
|
||||
limit=INTEREST_MAX_FOR_PROMPT,
|
||||
min_level=INTEREST_MIN_LEVEL_FOR_PROMPT
|
||||
)
|
||||
if interests:
|
||||
interests_str = ", ".join([f"{topic} ({level:.1f})" for topic, level in interests])
|
||||
system_context_parts.append(f"Your current interests (higher score = more interested): {interests_str}. You may mention these if relevant, Master.") # Adjusted wording
|
||||
except Exception as e:
|
||||
print(f"FreakTeto Error retrieving interests for prompt injection: {e}") # Updated log
|
||||
|
||||
return "\n".join(system_context_parts)
|
1
freak_teto/state.py
Normal file
1
freak_teto/state.py
Normal file
@ -0,0 +1 @@
|
||||
# Management of dynamic state variables might go here.
|
2594
freak_teto/tools.py
Normal file
2594
freak_teto/tools.py
Normal file
File diff suppressed because it is too large
Load Diff
202
freak_teto/utils.py
Normal file
202
freak_teto/utils.py
Normal file
@ -0,0 +1,202 @@
|
||||
import discord
|
||||
import re
|
||||
import random
|
||||
import asyncio
|
||||
import time
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Optional, Tuple, Dict, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .cog import FreakTetoCog # For type hinting - Updated
|
||||
|
||||
# --- Utility Functions ---
|
||||
# Note: Functions needing cog state (like personality traits for mistakes)
|
||||
# will need the 'cog' instance passed in.
|
||||
|
||||
def replace_mentions_with_names(cog: 'FreakTetoCog', content: str, message: discord.Message) -> str: # Updated type hint
|
||||
"""Replaces user mentions (<@id> or <@!id>) with their display names."""
|
||||
if not message.mentions:
|
||||
return content
|
||||
|
||||
processed_content = content
|
||||
# Sort by length of ID to handle potential overlaps correctly (longer IDs first)
|
||||
# Although Discord IDs are fixed length, this is safer if formats change
|
||||
sorted_mentions = sorted(message.mentions, key=lambda m: len(str(m.id)), reverse=True)
|
||||
|
||||
for member in sorted_mentions:
|
||||
# Use display_name for better readability
|
||||
processed_content = processed_content.replace(f'<@{member.id}>', member.display_name)
|
||||
processed_content = processed_content.replace(f'<@!{member.id}>', member.display_name) # Handle nickname mention format
|
||||
return processed_content
|
||||
|
||||
def _format_attachment_size(size_bytes: int) -> str:
|
||||
"""Formats attachment size into KB or MB."""
|
||||
if size_bytes < 1024:
|
||||
return f"{size_bytes} B"
|
||||
elif size_bytes < 1024 * 1024:
|
||||
return f"{size_bytes / 1024:.1f} KB"
|
||||
else:
|
||||
return f"{size_bytes / (1024 * 1024):.1f} MB"
|
||||
|
||||
def format_message(cog: 'FreakTetoCog', message: discord.Message) -> Dict[str, Any]: # Updated type hint
|
||||
"""
|
||||
Helper function to format a discord.Message object into a dictionary,
|
||||
including detailed reply info and attachment descriptions.
|
||||
"""
|
||||
# Process content first to replace mentions
|
||||
processed_content = replace_mentions_with_names(cog, message.content, message) # Pass cog
|
||||
|
||||
# --- Attachment Processing ---
|
||||
attachment_descriptions = []
|
||||
for a in message.attachments:
|
||||
size_str = _format_attachment_size(a.size)
|
||||
file_type = "Image" if a.content_type and a.content_type.startswith("image/") else "File"
|
||||
description = f"[{file_type}: {a.filename} ({a.content_type or 'unknown type'}, {size_str})]"
|
||||
attachment_descriptions.append({
|
||||
"description": description,
|
||||
"filename": a.filename,
|
||||
"content_type": a.content_type,
|
||||
"size": a.size,
|
||||
"url": a.url # Keep URL for potential future use (e.g., vision model)
|
||||
})
|
||||
# --- End Attachment Processing ---
|
||||
|
||||
# Basic message structure
|
||||
formatted_msg = {
|
||||
"id": str(message.id),
|
||||
"author": {
|
||||
"id": str(message.author.id),
|
||||
"name": message.author.name,
|
||||
"display_name": message.author.display_name,
|
||||
"bot": message.author.bot
|
||||
},
|
||||
"content": processed_content, # Use processed content
|
||||
"author_string": f"{message.author.display_name}{' (BOT)' if message.author.bot else ''}", # Add formatted author string
|
||||
"created_at": message.created_at.isoformat(),
|
||||
"attachment_descriptions": attachment_descriptions, # Use new descriptions list
|
||||
# "attachments": [{"filename": a.filename, "url": a.url} for a in message.attachments], # REMOVED old field
|
||||
# "embeds": len(message.embeds) > 0, # Replaced by embed_content below
|
||||
"embed_content": [], # Initialize embed content list
|
||||
"mentions": [{"id": str(m.id), "name": m.name, "display_name": m.display_name} for m in message.mentions], # Keep detailed mentions
|
||||
# Reply fields initialized
|
||||
"replied_to_message_id": None,
|
||||
"replied_to_author_id": None,
|
||||
"replied_to_author_name": None,
|
||||
"replied_to_content_snippet": None, # Changed field name for clarity
|
||||
"is_reply": False
|
||||
}
|
||||
|
||||
# --- Reply Processing ---
|
||||
if message.reference and message.reference.message_id:
|
||||
formatted_msg["replied_to_message_id"] = str(message.reference.message_id)
|
||||
formatted_msg["is_reply"] = True
|
||||
# Try to get resolved details (might be None if message not cached/fetched)
|
||||
ref_msg = message.reference.resolved
|
||||
if isinstance(ref_msg, discord.Message): # Check if resolved is a Message
|
||||
formatted_msg["replied_to_author_id"] = str(ref_msg.author.id)
|
||||
formatted_msg["replied_to_author_name"] = ref_msg.author.display_name
|
||||
# Create a snippet of the replied-to content
|
||||
snippet = ref_msg.content
|
||||
if len(snippet) > 80: # Truncate long replies
|
||||
snippet = snippet[:77] + "..."
|
||||
formatted_msg["replied_to_content_snippet"] = snippet
|
||||
# else: print(f"Referenced message {message.reference.message_id} not resolved.") # Optional debug
|
||||
# --- End Reply Processing ---
|
||||
|
||||
# --- Embed Processing ---
|
||||
for embed in message.embeds:
|
||||
embed_data = {
|
||||
"title": embed.title if embed.title else None,
|
||||
"description": embed.description if embed.description else None,
|
||||
"url": embed.url if embed.url else None,
|
||||
"color": embed.color.value if embed.color else None,
|
||||
"timestamp": embed.timestamp.isoformat() if embed.timestamp else None,
|
||||
"fields": [],
|
||||
"footer": None,
|
||||
"author": None,
|
||||
"thumbnail_url": embed.thumbnail.url if embed.thumbnail else None,
|
||||
"image_url": embed.image.url if embed.image else None,
|
||||
}
|
||||
if embed.footer and embed.footer.text:
|
||||
embed_data["footer"] = {"text": embed.footer.text, "icon_url": embed.footer.icon_url}
|
||||
if embed.author and embed.author.name:
|
||||
embed_data["author"] = {"name": embed.author.name, "url": embed.author.url, "icon_url": embed.author.icon_url}
|
||||
for field in embed.fields:
|
||||
embed_data["fields"].append({"name": field.name, "value": field.value, "inline": field.inline})
|
||||
|
||||
formatted_msg["embed_content"].append(embed_data)
|
||||
# --- End Embed Processing ---
|
||||
|
||||
return formatted_msg
|
||||
|
||||
def update_relationship(cog: 'FreakTetoCog', user_id_1: str, user_id_2: str, change: float): # Updated type hint
|
||||
"""Updates the relationship score between two users."""
|
||||
# Ensure consistent key order
|
||||
if user_id_1 > user_id_2: user_id_1, user_id_2 = user_id_2, user_id_1
|
||||
# Initialize user_id_1's dict if not present
|
||||
if user_id_1 not in cog.user_relationships: cog.user_relationships[user_id_1] = {}
|
||||
|
||||
current_score = cog.user_relationships[user_id_1].get(user_id_2, 0.0)
|
||||
new_score = max(0.0, min(current_score + change, 100.0)) # Clamp 0-100
|
||||
cog.user_relationships[user_id_1][user_id_2] = new_score
|
||||
# print(f"Updated relationship {user_id_1}-{user_id_2}: {current_score:.1f} -> {new_score:.1f} ({change:+.1f})") # Debug log
|
||||
|
||||
async def simulate_human_typing(cog: 'FreakTetoCog', channel, text: str): # Updated type hint
|
||||
"""Shows typing indicator without significant delay."""
|
||||
# Minimal delay to ensure the typing indicator shows up reliably
|
||||
# but doesn't add noticeable latency to the response.
|
||||
# The actual sending of the message happens immediately after this.
|
||||
# Check if the bot has permissions to send messages and type
|
||||
perms = channel.permissions_for(channel.guild.me) if isinstance(channel, discord.TextChannel) else None
|
||||
if perms is None or (perms.send_messages and perms.send_tts_messages): # send_tts_messages often implies typing allowed
|
||||
try:
|
||||
async with channel.typing():
|
||||
await asyncio.sleep(0.1) # Very short sleep, just to ensure typing shows
|
||||
except discord.Forbidden:
|
||||
print(f"Warning: Missing permissions to type in channel {channel.id}")
|
||||
except Exception as e:
|
||||
print(f"Warning: Error during typing simulation in {channel.id}: {e}")
|
||||
# else: print(f"Skipping typing simulation in {channel.id} due to missing permissions.") # Optional debug
|
||||
|
||||
async def log_internal_api_call(cog: 'FreakTetoCog', task_description: str, payload: Dict[str, Any], response_data: Optional[Dict[str, Any]], error: Optional[Exception] = None): # Updated type hint
|
||||
"""Helper function to log internal API calls to a file."""
|
||||
log_dir = "data"
|
||||
log_file = os.path.join(log_dir, "internal_api_calls.log") # TODO: Consider separate log file for FreakTeto?
|
||||
try:
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
timestamp = datetime.datetime.now().isoformat()
|
||||
log_entry = f"--- Log Entry: {timestamp} ---\n"
|
||||
log_entry += f"Task: {task_description}\n"
|
||||
log_entry += f"Model: {payload.get('model', 'N/A')}\n"
|
||||
|
||||
# Sanitize payload for logging (avoid large base64 images)
|
||||
payload_to_log = payload.copy()
|
||||
if 'messages' in payload_to_log:
|
||||
sanitized_messages = []
|
||||
for msg in payload_to_log['messages']:
|
||||
if isinstance(msg.get('content'), list): # Multimodal message
|
||||
new_content = []
|
||||
for part in msg['content']:
|
||||
if part.get('type') == 'image_url' and part.get('image_url', {}).get('url', '').startswith('data:image'):
|
||||
new_content.append({'type': 'image_url', 'image_url': {'url': 'data:image/...[truncated]'}})
|
||||
else:
|
||||
new_content.append(part)
|
||||
sanitized_messages.append({**msg, 'content': new_content})
|
||||
else:
|
||||
sanitized_messages.append(msg)
|
||||
payload_to_log['messages'] = sanitized_messages
|
||||
|
||||
log_entry += f"Request Payload:\n{json.dumps(payload_to_log, indent=2)}\n"
|
||||
if response_data: log_entry += f"Response Data:\n{json.dumps(response_data, indent=2)}\n"
|
||||
if error: log_entry += f"Error: {str(error)}\n"
|
||||
log_entry += "---\n\n"
|
||||
|
||||
# Use async file writing if in async context, but this helper might be called from sync code?
|
||||
# Sticking to sync file I/O for simplicity here, assuming logging isn't performance critical path.
|
||||
with open(log_file, "a", encoding="utf-8") as f: f.write(log_entry)
|
||||
except Exception as log_e: print(f"!!! Failed to write to internal API log file {log_file}: {log_e}")
|
||||
|
||||
# Note: _create_human_like_mistake was removed as it wasn't used in the final on_message logic provided.
|
||||
# If needed, it can be added back here, ensuring it takes 'cog' if it needs personality traits.
|
22
main.py
22
main.py
@ -248,6 +248,28 @@ async def main():
|
||||
print(f"Error sharing GurtCog instance: {e}")
|
||||
# ------------------------------------------------
|
||||
|
||||
# --- Manually Load FreakTetoCog ---
|
||||
try:
|
||||
freak_teto_cog_path = "discordbot.freak_teto.cog"
|
||||
await bot.load_extension(freak_teto_cog_path)
|
||||
print(f"Successfully loaded FreakTetoCog from {freak_teto_cog_path}")
|
||||
# Optional: Share FreakTetoCog instance if needed later, similar to GurtCog
|
||||
# freak_teto_cog_instance = bot.get_cog("FreakTetoCog")
|
||||
# if freak_teto_cog_instance:
|
||||
# # Share instance logic here if required by other modules
|
||||
# print("Successfully shared FreakTetoCog instance.")
|
||||
# else:
|
||||
# print("Warning: FreakTetoCog not found after loading.")
|
||||
except commands.ExtensionAlreadyLoaded:
|
||||
print(f"FreakTetoCog ({freak_teto_cog_path}) already loaded.")
|
||||
except commands.ExtensionNotFound:
|
||||
print(f"Error: FreakTetoCog not found at {freak_teto_cog_path}")
|
||||
except Exception as e:
|
||||
print(f"Failed to load FreakTetoCog: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
# ------------------------------------
|
||||
|
||||
# Start the bot using start() for async context
|
||||
await bot.start(TOKEN)
|
||||
finally:
|
||||
|
Loading…
x
Reference in New Issue
Block a user