feat: bot now remembers communication history per person

This commit is contained in:
Daniel Eder 2025-09-27 19:28:25 +02:00
parent 81f6c38df1
commit bd884d938f

View file

@ -4,7 +4,7 @@ import re
import sys import sys
from collections import deque from collections import deque
from dataclasses import dataclass, field from dataclasses import dataclass, field
from typing import Deque, Optional, Pattern, Tuple from typing import Deque, Dict, Optional, Pattern, Tuple
try: try:
from litellm import completion from litellm import completion
@ -33,7 +33,9 @@ class IntelligentCommunicationAgent(Agent):
) )
) )
last_output: str = field(default="", init=False) last_output: str = field(default="", init=False)
max_history_chars: int = 16_000
pending_replies: Deque[Tuple[str, str]] = field(default_factory=deque, init=False) pending_replies: Deque[Tuple[str, str]] = field(default_factory=deque, init=False)
conversation_history: Dict[str, Deque[Tuple[str, str]]] = field(default_factory=dict, init=False)
def observe(self, output: str) -> None: def observe(self, output: str) -> None:
if not output: if not output:
@ -46,17 +48,19 @@ class IntelligentCommunicationAgent(Agent):
continue continue
self.pending_replies.append((player, message)) self.pending_replies.append((player, message))
print(f"[Agent] Received message from {player}: {message}") print(f"[Agent] Received message from {player}: {message}")
self._append_history(player, "user", message)
def decide(self) -> Optional[str]: def decide(self) -> Optional[str]:
if not self.pending_replies: if not self.pending_replies:
return None return None
player, message = self.pending_replies.popleft() player, _ = self.pending_replies.popleft()
reply_text = self._generate_reply(player, message) reply_text = self._generate_reply(player)
self._append_history(player, "assistant", reply_text)
reply = f"teile {player} mit {reply_text}" reply = f"teile {player} mit {reply_text}"
print(f"[Agent] Replying to {player} with model output") print(f"[Agent] Replying to {player} with model output")
return reply return reply
def _generate_reply(self, player: str, message: str) -> str: def _generate_reply(self, player: str) -> str:
if completion is None: if completion is None:
print( print(
"[Agent] litellm is not installed; falling back to default reply", "[Agent] litellm is not installed; falling back to default reply",
@ -67,16 +71,7 @@ class IntelligentCommunicationAgent(Agent):
try: try:
response = completion( response = completion(
model=self.model, model=self.model,
messages=[ messages=self._build_messages(player),
{"role": "system", "content": self.system_prompt},
{
"role": "user",
"content": (
f"Spieler {player} schreibt: {message}\n"
"Formuliere eine kurze, freundliche Antwort."
),
},
],
temperature=self.temperature, temperature=self.temperature,
max_tokens=self.max_output_tokens, max_tokens=self.max_output_tokens,
) )
@ -90,3 +85,29 @@ class IntelligentCommunicationAgent(Agent):
return self.fallback_reply return self.fallback_reply
return content or self.fallback_reply return content or self.fallback_reply
def _build_messages(self, player: str) -> list[dict[str, str]]:
history = self.conversation_history.get(player)
messages: list[dict[str, str]] = [{"role": "system", "content": self.system_prompt}]
if not history:
return messages
for role, content in history:
messages.append({"role": role, "content": content})
return messages
def _append_history(self, player: str, role: str, content: str) -> None:
if not content:
return
history = self.conversation_history.setdefault(player, deque())
history.append((role, content))
self._trim_history(history)
def _trim_history(self, history: Deque[Tuple[str, str]]) -> None:
total_chars = sum(len(content) for _, content in history)
while len(history) > 1 and total_chars > self.max_history_chars:
_, removed_content = history.popleft()
total_chars -= len(removed_content)
if history and total_chars > self.max_history_chars:
role, content = history.pop()
trimmed = content[-self.max_history_chars :]
history.append((role, trimmed))