#!/home/nonroot/stoat_retarded_ai_tree/venv/bin/python3 from settings import stoat_url, stoat_token, stoat_url_get_last_message, claude_api_key, replicate_api_key, stoat_server_emotes import requests import time import base64 import re, json import os import replicate as replicate_client from PIL import Image import io def get_custom_emotes(): headers = { "x-bot-token": f"{stoat_token}", "Content-Type": "application/json" } r = requests.get(stoat_server_emotes, headers=headers) js = r.json() return js def send_post_message_to_stoat(claude_response, fileContent, sleep_seconds): message = claude_response["message"] metrics = claude_response["metrics"] message_with_metrics = f"{message} \n\nFeed the tree with emotes! Current cooldown is {sleep_seconds} seconds\n\n||{json.dumps(metrics)}||" # step 1: upload the image to autumn attachment_id = None upload_response = requests.post( "https://stoat.unloze.com/autumn/attachments", headers={"x-bot-token": stoat_token}, files={"file": ("tree.png", fileContent, "image/png")} ) #print("upload status:", upload_response.status_code, upload_response.text) if upload_response.status_code == 200: attachment_id = upload_response.json()["id"] #step 2 send the attacment. headers = { "x-bot-token": f"{stoat_token}", "Content-Type": "application/json" } data = {"content": message_with_metrics} if attachment_id: data["attachments"] = [attachment_id] response = requests.post(stoat_url, headers=headers, json=data) #print("message status:", response.status_code, response.text) def initiate_tree(): message = f"This is the beginning of the tree, its a sapling still. Feed it emotes to make it grow." headers = { "x-bot-token": f"{stoat_token}", "Content-Type": "application/json" } image_url = "https://img.freepik.com/premium-vector/seedling-growing-from-fertile-ground-with-underground-roots-close-up-agriculture-concept_228260-732.jpg" data = { "content": image_url, "embeds": [{ "description": message, }] } response = requests.post(stoat_url, headers=headers, json=data) def extract_metrics(message_content): match = re.search(r'\|\|(\{.*?\})\|\|', message_content) if match: return json.loads(match.group(1)) return {"height_cm": 1} def check_if_emote_reacted(): headers = { "x-bot-token": f"{stoat_token}", } response = requests.get(stoat_url_get_last_message, headers=headers) try: message = response.json()[0] except: initiate_tree() return None, None if 'reactions' in message: #print('reactions: ', message['reactions']) for key, _ in message['reactions'].items(): return key, extract_metrics(message["content"]) return None, None def is_custom_emote(emote_key): return emote_key.isascii() and emote_key.isalnum() def send_claude_message(reacted_emote, metrics, custom_emotes_json): if is_custom_emote(reacted_emote): img_response = requests.get("https://stoat.unloze.com/autumn/emojis/" + reacted_emote) img_data = img_response.content content_type = img_response.headers.get("content-type", "") #static or gif? if "gif" in content_type: # extract first frame only img = Image.open(io.BytesIO(img_data)) img.seek(0) buffer = io.BytesIO() img.convert("RGBA").save(buffer, format="PNG") img_data = buffer.getvalue() media_type = "image/png" else: media_type = "image/webp" img_base64 = base64.b64encode(img_data).decode("utf-8") emote_name = "" for custom_emote_js in custom_emotes_json: if custom_emote_js['_id'] == reacted_emote: emote_name = custom_emote_js['name'] print('found emote name: ', emote_name) break response = requests.post( "https://api.anthropic.com/v1/messages", headers={ "x-api-key": claude_api_key, "anthropic-version": "2023-06-01", "content-type": "application/json" }, json={ "model": "claude-haiku-4-5-20251001", "max_tokens": 1024, "system": """You are managing a chaotic living tree. You receive the tree's current metrics and an emoji reaction, and you must decide how the emoji affects the tree. Invent new metrics freely but only track things numerically. Be silly, chaotic and creative, certain times the sapling or tree can also shrink if the emote would cause damage or other unexpected side effects. The tree has growth phases: sapling (0-500cm), young tree (500-5000cm), mature tree (5000-50000cm), ancient tree (50000cm+). Mention the phase when it changes. Keep the metrics object to a maximum of 24 metrics at all times. When adding a new metric, remove the least interesting or relevant existing one to make room. Always keep height_cm. Also return a "sleep_seconds" field: the cooldown before the next reaction is accepted. Scale it strictly based on height_cm using these boundaries: 0-500cm (sapling): 20-60 seconds, 500-5000cm (young tree): 60-300 seconds, 5000-50000cm (mature tree): 300-900 seconds, 50000cm+ (ancient tree): 900-1800 seconds. Never exceed 1800 seconds. Stay within the range for the current phase. Respond in JSON like: {"metrics": {"height_cm": 42, ...}, "message": "...", "sleep_seconds": 30, "image_prompt": "..."}. The image_prompt should visually depict what the message describes happening to the tree — any effects, damage, mutations or chaos described in the message for example. The image_prompt should depict the tree with the custom emote character or object visually integrated into or interacting with the tree — as if the emote itself is physically there affecting it. When writing the image_prompt, translate height_cm into a visual size description — for example 1-50cm is a tiny sprout, 50-200cm is a small sapling, 200-500cm is a knee-height bush-like tree, etc. Use descriptive size language rather than raw numbers. When writing the image_prompt, consider the current metrics values to add visual detail — for example high glow_intensity means the tree glows brightly, high chaotic_energy means it looks wild and unstable, low health_points means it looks sickly etc.""", "messages": [ { "role": "user", "content": [ { "type": "image", "source": { "type": "base64", "media_type": media_type, "data": img_base64 } }, { "type": "text", "text": f"Current metrics: {metrics}\nCustom emote name (use this as a hint to identify the emote, ignore if empty): '{emote_name}'.\nDescribe what you see in the image, then decide what it does to the tree." } ] } ] } ) else: response = requests.post( "https://api.anthropic.com/v1/messages", headers={ "x-api-key": claude_api_key, "anthropic-version": "2023-06-01", "content-type": "application/json" }, json={ "model": "claude-haiku-4-5-20251001", "max_tokens": 1024, "system": """You are managing a chaotic living tree. You receive the tree's current metrics and an emoji reaction, and you must decide how the emoji affects the tree. Invent new metrics freely but only track things numerically. Be silly, chaotic and creative, certain times the sapling or tree can also shrink if the emote would cause damage or other unexpected side effects. The tree has growth phases: sapling (0-500cm), young tree (500-5000cm), mature tree (5000-50000cm), ancient tree (50000cm+). Mention the phase when it changes. Keep the metrics object to a maximum of 24 metrics at all times. When adding a new metric, remove the least interesting or relevant existing one to make room. Always keep height_cm. Also return a "sleep_seconds" field: the cooldown before the next reaction is accepted. Scale it strictly based on height_cm using these boundaries: 0-500cm (sapling): 20-60 seconds, 500-5000cm (young tree): 60-300 seconds, 5000-50000cm (mature tree): 300-900 seconds, 50000cm+ (ancient tree): 900-1800 seconds. Never exceed 1800 seconds. Stay within the range for the current phase. Respond in JSON like: {"metrics": {"height_cm": 42, ...}, "message": "...", "sleep_seconds": 30, "image_prompt": "..."}. The image_prompt should visually depict what the message describes happening to the tree — any effects, damage, mutations or chaos described in the message and caused by the emoji reaction. When writing the image_prompt, translate height_cm into a visual size description — for example 1-50cm is a tiny sprout, 50-200cm is a small sapling, 200-500cm is a knee-height bush-like tree, etc. Use descriptive size language rather than raw numbers. When writing the image_prompt, consider the current metrics values to add visual detail — for example high glow_intensity means the tree glows brightly, high chaotic_energy means it looks wild and unstable, low health_points means it looks sickly etc.""", "messages": [ {"role": "user", "content": f"Current metrics: {metrics}\nEmoji reacted: {reacted_emote}\nWhat does this emoji do to the tree?"} ] } ) json_response = response.json() raw_text = json_response["content"][0]["text"] # find the first { and last } to extract just the JSON object start = raw_text.find("{") end = raw_text.rfind("}") + 1 parsed = json.loads(raw_text[start:end]) print(parsed) return parsed def replicate_fetch_image(image_prompt): os.environ["REPLICATE_API_TOKEN"] = replicate_api_key output = replicate_client.run( "black-forest-labs/flux-schnell", input={"prompt": image_prompt} ) return output[0].read() def main(): custom_emotes_json = get_custom_emotes() while True: reacted_emote, metrics = check_if_emote_reacted() print(reacted_emote, metrics) if reacted_emote: claude_response = send_claude_message(reacted_emote, metrics, custom_emotes_json) replicate_image_response = replicate_fetch_image(claude_response["image_prompt"]) sleep_seconds = claude_response.get("sleep_seconds", 30) send_post_message_to_stoat(claude_response, replicate_image_response, sleep_seconds) time.sleep(sleep_seconds) else: time.sleep(5) if __name__ == '__main__': main()