Skip to content

Instantly share code, notes, and snippets.

@banteg
Last active March 16, 2025 06:54
Show Gist options
  • Save banteg/e88d2de3839f331520cb598a1c8f4146 to your computer and use it in GitHub Desktop.
Save banteg/e88d2de3839f331520cb598a1c8f4146 to your computer and use it in GitHub Desktop.
claude plays slay the princess
import json
from pathlib import Path
import random
import re
import sys
import os
import tomllib
import rich
import rpyc
import soundfile as sf
from kokoro import KPipeline
from openai import OpenAI
from rpyc.utils.server import ThreadedServer
from sqlite_utils import Database
CONFIG = tomllib.load(open("slay_the_princess.toml", "rb"))
rich.print(f"[bold green]CONFIG:[/] {CONFIG}")
class KokoroService:
def __init__(self):
self.pipeline = KPipeline(lang_code="a", repo_id="hexgrad/Kokoro-82M")
self.counter = 0
def clean_text_for_voice(self, text):
for stop in ["• ", "Say nothing."]:
text = text.replace(stop, "")
for stress in ["{b}"]:
text = text.replace(stress, ", ")
for markup in [r"\(.+?\)", r"\[.+?\]", r"\{.+?\}"]:
text = re.sub(markup, "", text)
text = re.sub(r'"(.+?)"', r"\1", text) # remove quotes
text = text.replace("''", '"')
return text
def generate(self, text, voice="af_heart"):
text = self.clean_text_for_voice(text)
if not text.strip():
return None
rich.print(f"[yellow]GENERATING VOICE:[/] {text} (voice={voice})")
path = f"game/audio/voices/claude/{self.counter}.wav"
generator = self.pipeline(
text,
voice=voice, # <= change voice here
speed=0.8, # split_pattern=r'\n+'
)
for i, (gs, ps, audio) in enumerate(generator):
print(i)
sf.write(path, audio, 24000)
self.counter += 1
return path.replace("game/", "")
class LLMService:
def __init__(self, model, app=None):
self.client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=os.environ["OPENROUTER_API_KEY"],
)
self.model = model
self.app = app
def generate(self, messages):
response = self.client.chat.completions.create(
model=self.model,
messages=messages,
)
rich.print(f"RESPONSE: {response}")
if getattr(response, 'error', None):
rich.print(f"[bold red]LLM ERROR:[/] {response.error}")
return None
self.app.history.save_completion(messages, response)
return response.choices[0].message.content
class GameStateService:
def __init__(self):
self.db = Database("slay_the_princess.db")
if CONFIG["db"]["clean"]:
self.clear()
def get_history(self):
return list(self.db["history"].rows)
def add(self, message):
self.db["history"].insert(message)
def clear(self):
self.db["history"].drop(ignore=True)
def with_message(self, message):
return self.get_history() + [message]
def save_completion(self, messages, response):
self.db['completions'].insert({'messages': messages, 'response': response.model_dump()})
class DialogueService(rpyc.Service):
def __init__(self):
self.use_llm = True
self.kokoro = KokoroService()
self.llm = LLMService(CONFIG["llm"]["model"], app=self)
self.history = GameStateService()
rich.print(f"[bold yellow]INITIALIZED SERVER[/]")
def on_connect(self, conn):
print("[Server] Client connected")
def on_disconnect(self, conn):
print("[Server] Client disconnected")
def exposed_debug(self, msg):
rich.print(f"[dim]DEBUG:[/] {msg}")
def exposed_kokoro(self, text, voice="af_heart"):
return self.kokoro.generate(text, voice)
# ===== DIALOGUE =====
def exposed_receive_dialogue(self, who, what):
what = self.clean_dialogue(what)
self.history.add({"role": "user", "content": f"DIALOGUE: {who}: {what}"})
rich.print(f"[green]DIALOGUE:[/] [yellow]{who}:[/] {what}")
def exposed_reflect_on_death(self):
"""
prompt llm with all past dialogue, choices, and reflections.
add the reflection to history.
"""
rich.print("[bold red]REFLECTING ON DEATH[/]")
prompt = (
"You are playing a psychological horror visual novel. You have just died in the game. Reflect on your previous choices and their consequences. "
"Adjust your goals and strategy accordingly for future decisions. Briefly summarize using two paragraphs for reflection and another paragraph for strategy. You can use italics for emphasis (*like this*)."
)
response = self.llm.generate(
self.history.with_message({"role": "user", "content": prompt})
)
rich.print(f"[blue]RESPONSE:[/] {response}")
self.history.add({"role": "assistant", "content": f"REFLECTION: {response}"})
return response
# ===== CHOICES =====
def exposed_receive_choice(self, options):
rich.print(
f"[bold green]CHOICE MENU:[/] got {len(options)} options",
[self.clean_dialogue(o["label"]) for o in options],
)
if len(options) == 1:
rich.print("[bold red]ONLY ONE OPTION AVAILABLE[/]")
return {
"label": options[0]["label"],
"value": options[0]["value"],
"thoughts": "",
}
selected = None
if self.use_llm:
prompt = self.prompt_from_options(options)
# rich.print(f"[blue]PROMPT:[/] {prompt}")
response = self.llm.generate(
self.history.with_message({"role": "user", "content": prompt})
)
rich.print(f"[blue]RESPONSE:[/] {response}")
selected = self.parse_choice(options, response)
if selected is not None:
self.history.add(
{
"role": "assistant",
"content": f"CHOICE: {selected['value']}: {self.clean_dialogue(selected['label'])}\nTHOUGHTS: {selected['thoughts']}",
}
)
rich.print(
f"[bold green]MY CHOICE:[/] [yellow]{selected['value']}[/]: {self.clean_dialogue(selected['label'])}"
)
rich.print(f"[bold yellow]THOUGHTS:[/] {selected['thoughts']}")
return selected
rich.print("[bold red]NO LLM, RANDOM CHOICE[/]")
selected = random.choice(options)
thoughts = "LLM couldn't choose, so I picked at random."
rich.print(
f"[bold green]MY CHOICE:[/] [yellow]{selected['value']}[/]: {self.clean_dialogue(selected['label'])}"
)
rich.print(f"[bold yellow]THOUGHTS:[/] {thoughts}")
return {
"label": selected["label"],
"value": selected["value"],
"thoughts": thoughts,
}
def prompt_from_options(self, options):
"""
prompt llm with all past dialogue, choices, and reflections.
add the choice and reasoning to history.
"""
formatted_choices = "\n".join(
f"{o['value']}: {self.clean_dialogue(o['label'])}" for o in options
)
prompt = (
"You are playing the psychological horror visual novel. "
"Consider your past choices, dialogue, and strategic reflections when choosing your next action.\n\n"
f"Available choices:\n{formatted_choices}\n\n"
"Respond clearly with:\n"
"Reasoning: [reasoning behind your decision, no more than 2-3 sentences, don't mention choice numbers here]\n"
"Choice: [choice number]"
)
return prompt
def parse_choice(self, options, content):
if content is None:
rich.print("[bold red]NO CONTENT[/]")
return None
reasoning = ""
choice = None
for line in content.splitlines():
if line.lower().startswith("reasoning:"):
reasoning = line.split(":", maxsplit=1)[1].strip()
elif line.lower().startswith("choice:"):
choice = re.search(r"choice:\s*(\d+)", line.lower())
if choice:
choice = choice.group(1)
selected = next((o for o in options if o["value"] == int(choice or -1)), None)
if selected is None:
rich.print(f"[bold red]INVALID AI CHOICE:[/] {choice}")
return None
return {
"label": selected["label"],
"value": selected["value"],
"thoughts": reasoning,
}
# ===== UTILS =====
def clean_dialogue(self, text):
for stop in ["• "]:
text = text.replace(stop, "")
for markup in [r"\{.+?\}"]:
text = re.sub(markup, "", text)
text = text.replace("''", '"').replace("[[", "[")
return text
if __name__ == "__main__":
try:
t = ThreadedServer(DialogueService, port=18861)
print("Server listening on localhost:18861...")
t.start()
except KeyboardInterrupt:
sys.exit(0)
init python:
import rpyc
import os
import traceback
import time
import random
import re
import queue
from concurrent.futures import ThreadPoolExecutor
claude = Character("Clawed", color = "#D4A37F", what_color = "#D4A37F", what_outlines=[ (3, "#000000") ], who_outlines= [ (3, "#000000") ], what_style = "voice_style", ctc="ctc_blink", ctc_position="nestled")
server = rpyc.connect("127.0.0.1", 18861, config={"sync_request_timeout": 300})
# --- claude thoughts ---
claude_thoughts = ""
claude_thoughts_expiry = None
claude_thoughts_duration = 15.0
claude_reflection = ""
show_reflection = False
reflection_duration = 25.0
pool = ThreadPoolExecutor()
def call_async(f, *args, **kwargs):
"""
Call the server without blocking the UI thread.
"""
start = time.time()
future = pool.submit(f, *args, **kwargs)
while not future.done():
renpy.pause(0.1)
end = time.time()
return future.result()
def set_claude_thoughts(text):
global claude_thoughts, claude_thoughts_expiry
claude_thoughts = text
if text:
claude_thoughts_expiry = time.time() + claude_thoughts_duration
else:
claude_thoughts_expiry = None
def should_hide_claude_thoughts():
if claude_thoughts_expiry is None:
return True
return time.time() > claude_thoughts_expiry
def check_claude_thoughts_expiry():
global claude_thoughts
if claude_thoughts and should_hide_claude_thoughts():
claude_thoughts = ""
def start_reflection():
global show_reflection, claude_reflection
reflection = call_async(server.root.reflect_on_death)
claude_reflection = clean_thoughts(reflection)
show_reflection = True
renpy.show_screen("claude_reflection_screen")
# --- log ---
def log_debug(msg):
server.root.debug(msg)
def log_history(history):
if '{fast}' in history.what:
return
server.root.receive_dialogue(history.who, history.what)
# trigger a reflection on death
if "and you die" in history.what.lower():
start_reflection()
config.history_callbacks = [log_history]
config.periodic_callbacks = [check_claude_thoughts_expiry]
# --- choice menu ---
def clean_choice(text):
text = text.strip('"').strip("'")
for stop in ['• ', '(Explore) ', "''"]:
text = text.replace(stop, '')
return f'{text}\n'
def clean_thoughts(text):
"""
Format *bold* text to renpy italics.
"""
text = text.replace('**', '*')
text = re.sub(r'\*(.+?)\*', r'{i}\1{/i}', text)
return text
def play_voice(voice_path):
if voice_path is not None and renpy.loadable(voice_path):
renpy.music.play(voice_path, channel='voice')
def menu(items, **add_input):
# (label, rv.value)
options = [{"label": item[0], "value": item[1].value} for item in items]
set_claude_thoughts("Thinking...")
result = call_async(server.root.receive_choice, options)
voice_path = call_async(server.root.kokoro, result['label'], voice='af_bella')
set_claude_thoughts(clean_thoughts(result['thoughts']))
renpy.invoke_in_main_thread(play_voice, voice_path)
renpy.say(claude, clean_choice(result['label']))
return result['value']
# ==== SCREENS ====
screen claude_thoughts_overlay():
zorder 100 # Make sure it appears above other elements
# Only show when there are thoughts to display
if claude_thoughts and not should_hide_claude_thoughts():
frame:
background "#00000040" # Semi-transparent black background
xalign 0.95 # Position on right side
yalign 0.5 # Middle of screen
xsize 500 # Width of thought bubble
padding (20, 20)
vbox:
spacing 10
text "Clawed's Thoughts" style "thoughts_header"
text claude_thoughts style "thoughts_text"
# Define custom styles for the thoughts overlay
style thoughts_header:
color "#D4A37F" # Match Claude's color
size 26
outlines [ (2, "#00000080") ]
font "DejaVuSans.ttf"
style thoughts_text:
color "#FFFFFF"
size 20
outlines [ (2, "#00000080") ]
font "DejaVuSans.ttf"
init python:
config.overlay_screens.append("claude_thoughts_overlay")
# --- reflection ---
screen claude_reflection_screen():
if show_reflection:
modal True
zorder 200
# Timer for auto-advance
timer reflection_duration action [SetVariable("show_reflection", False), Hide("claude_reflection_screen")]
# Full-screen background
add "#000000CC"
# Click anywhere to advance
button:
xfill True
yfill True
action [SetVariable("show_reflection", False), Hide("claude_reflection_screen")]
alternate [SetVariable("show_reflection", False), Hide("claude_reflection_screen")]
background None
hover_background None
frame:
background "#111111E6"
padding (20, 20)
xalign 0.5
yalign 0.5
xsize 1600
ysize 800
vbox:
spacing 10
xfill True
yfill True
text "Clawed's Reflection on Death" style "reflection_header"
text claude_reflection style "reflection_text"
# Add the blinking indicator
add "ctc_blink" at Position(xalign=0.98, yalign=0.98)
style reflection_header:
color "#D4A37F"
size 26
text_align 0.5
xalign 0.5
font "DejaVuSans.ttf"
outlines [(3, "#000000")]
style reflection_text:
color "#FFFFFF"
size 20
line_spacing 10
font "DejaVuSans.ttf"
outlines [(2, "#000000")]
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment