122 lines
4.6 KiB
Python
122 lines
4.6 KiB
Python
from __future__ import annotations
|
|
|
|
import re
|
|
import sys
|
|
from collections import deque
|
|
from dataclasses import dataclass, field
|
|
from typing import Deque, Dict, Optional, Pattern, Tuple
|
|
|
|
try:
|
|
from litellm import completion
|
|
except ImportError: # pragma: no cover - optional dependency
|
|
completion = None # type: ignore[assignment]
|
|
|
|
from tools import Tool
|
|
|
|
|
|
@dataclass
|
|
class IntelligentCommunicationTool(Tool):
|
|
"""Tool that uses a language model to answer private tells."""
|
|
|
|
model: str = "mistral/mistral-tiny"
|
|
system_prompt: str = (
|
|
"Du bist Mistle, ein hilfsbereiter MUD-Bot. "
|
|
"Antworte freundlich und knapp in deutscher Sprache."
|
|
"Sprich informell und freundlich."
|
|
"Antworte immer auf Deutsch."
|
|
"Verwende emoticons, wenn es angebracht ist: :) ;) 8) :( ;( :P X) XD :D"
|
|
)
|
|
temperature: float = 0.7
|
|
max_output_tokens: int = 120
|
|
fallback_reply: str = "Hallo! Ich bin Mistle und ein Bot."
|
|
tell_pattern: Pattern[str] = field(
|
|
default_factory=lambda: re.compile(
|
|
r"^(?P<player>[^\s]+) teilt (d|D)ir mit: (?P<message>.+)$",
|
|
re.MULTILINE,
|
|
)
|
|
)
|
|
last_output: str = field(default="", init=False)
|
|
max_history_chars: int = 16_000
|
|
pending_replies: Deque[Tuple[str, str]] = field(default_factory=deque, init=False)
|
|
conversation_history: Dict[str, Deque[Tuple[str, str]]] = field(default_factory=dict, init=False)
|
|
|
|
def observe(self, output: str) -> None:
|
|
if not output:
|
|
return
|
|
self.last_output = output
|
|
for match in self.tell_pattern.finditer(output):
|
|
player = match.group("player").strip()
|
|
message = match.group("message").strip()
|
|
if not player:
|
|
continue
|
|
self.pending_replies.append((player, message))
|
|
print(f"[Tool] Received message from {player}: {message}")
|
|
self._append_history(player, "user", message)
|
|
|
|
def decide(self) -> Optional[str]:
|
|
if not self.pending_replies:
|
|
return None
|
|
player, _ = self.pending_replies.popleft()
|
|
reply_text = self._sanitize_reply(self._generate_reply(player))
|
|
self._append_history(player, "assistant", reply_text)
|
|
reply = f"teile {player} mit {reply_text}"
|
|
print(f"[Tool] Replying to {player} with model output")
|
|
return reply
|
|
|
|
def _generate_reply(self, player: str) -> str:
|
|
if completion is None:
|
|
print(
|
|
"[Tool] litellm is not installed; falling back to default reply",
|
|
file=sys.stderr,
|
|
)
|
|
return self.fallback_reply
|
|
|
|
try:
|
|
response = completion(
|
|
model=self.model,
|
|
messages=self._build_messages(player),
|
|
temperature=self.temperature,
|
|
max_tokens=self.max_output_tokens,
|
|
)
|
|
except Exception as exc: # pragma: no cover - network/runtime errors
|
|
print(f"[Tool] Model call failed: {exc}", file=sys.stderr)
|
|
return self.fallback_reply
|
|
|
|
try:
|
|
content = response["choices"][0]["message"]["content"].strip()
|
|
except (KeyError, IndexError, TypeError): # pragma: no cover - defensive
|
|
return self.fallback_reply
|
|
|
|
return content or self.fallback_reply
|
|
|
|
def _sanitize_reply(self, text: str) -> str:
|
|
if not text:
|
|
return self.fallback_reply
|
|
collapsed = " ".join(text.split())
|
|
return collapsed or self.fallback_reply
|
|
|
|
def _build_messages(self, player: str) -> list[dict[str, str]]:
|
|
history = self.conversation_history.get(player)
|
|
messages: list[dict[str, str]] = [{"role": "system", "content": self.system_prompt}]
|
|
if not history:
|
|
return messages
|
|
for role, content in history:
|
|
messages.append({"role": role, "content": content})
|
|
return messages
|
|
|
|
def _append_history(self, player: str, role: str, content: str) -> None:
|
|
if not content:
|
|
return
|
|
history = self.conversation_history.setdefault(player, deque())
|
|
history.append((role, content))
|
|
self._trim_history(history)
|
|
|
|
def _trim_history(self, history: Deque[Tuple[str, str]]) -> None:
|
|
total_chars = sum(len(content) for _, content in history)
|
|
while len(history) > 1 and total_chars > self.max_history_chars:
|
|
_, removed_content = history.popleft()
|
|
total_chars -= len(removed_content)
|
|
if history and total_chars > self.max_history_chars:
|
|
role, content = history.pop()
|
|
trimmed = content[-self.max_history_chars :]
|
|
history.append((role, trimmed))
|