209 lines
11 KiB
Python
209 lines
11 KiB
Python
#!/home/nonroot/stoat_retarded_ai_tree/venv/bin/python3
|
|
|
|
from settings import stoat_url, stoat_token, stoat_url_get_last_message, claude_api_key, replicate_api_key, stoat_server_emotes
|
|
import requests
|
|
import time
|
|
import base64
|
|
import re, json
|
|
import os
|
|
import replicate as replicate_client
|
|
from PIL import Image
|
|
import io
|
|
|
|
def get_custom_emotes():
|
|
headers = {
|
|
"x-bot-token": f"{stoat_token}",
|
|
"Content-Type": "application/json"
|
|
}
|
|
r = requests.get(stoat_server_emotes, headers=headers)
|
|
js = r.json()
|
|
return js
|
|
|
|
def send_post_message_to_stoat(claude_response, fileContent, sleep_seconds):
|
|
message = claude_response["message"]
|
|
metrics = claude_response["metrics"]
|
|
message_with_metrics = f"{message} \n\nFeed the tree with emotes! Current cooldown is {sleep_seconds} seconds\n\n||{json.dumps(metrics)}||"
|
|
|
|
# step 1: upload the image to autumn
|
|
attachment_id = None
|
|
upload_response = requests.post(
|
|
"https://stoat.unloze.com/autumn/attachments",
|
|
headers={"x-bot-token": stoat_token},
|
|
files={"file": ("tree.png", fileContent, "image/png")}
|
|
)
|
|
#print("upload status:", upload_response.status_code, upload_response.text)
|
|
if upload_response.status_code == 200:
|
|
attachment_id = upload_response.json()["id"]
|
|
|
|
#step 2 send the attacment.
|
|
headers = {
|
|
"x-bot-token": f"{stoat_token}",
|
|
"Content-Type": "application/json"
|
|
}
|
|
data = {"content": message_with_metrics}
|
|
if attachment_id:
|
|
data["attachments"] = [attachment_id]
|
|
|
|
response = requests.post(stoat_url, headers=headers, json=data)
|
|
#print("message status:", response.status_code, response.text)
|
|
|
|
def initiate_tree():
|
|
message = f"This is the beginning of the tree, its a sapling still. Feed it emotes to make it grow."
|
|
headers = {
|
|
"x-bot-token": f"{stoat_token}",
|
|
"Content-Type": "application/json"
|
|
}
|
|
image_url = "https://img.freepik.com/premium-vector/seedling-growing-from-fertile-ground-with-underground-roots-close-up-agriculture-concept_228260-732.jpg"
|
|
data = {
|
|
"content": image_url,
|
|
"embeds": [{
|
|
"description": message,
|
|
}]
|
|
}
|
|
response = requests.post(stoat_url, headers=headers, json=data)
|
|
|
|
def extract_metrics(message_content):
|
|
match = re.search(r'\|\|(\{.*?\})\|\|', message_content)
|
|
if match:
|
|
return json.loads(match.group(1))
|
|
return {"height_cm": 1}
|
|
|
|
def check_if_emote_reacted():
|
|
headers = {
|
|
"x-bot-token": f"{stoat_token}",
|
|
}
|
|
response = requests.get(stoat_url_get_last_message, headers=headers)
|
|
try:
|
|
message = response.json()[0]
|
|
except:
|
|
initiate_tree()
|
|
return None, None, None
|
|
if 'reactions' in message:
|
|
#print('reactions: ', message['reactions'])
|
|
keys = []
|
|
attachment = message["attachments"][0]
|
|
attachment_url = f"https://stoat.unloze.com/autumn/attachments/{attachment['_id']}/tree.png"
|
|
for key, _ in message['reactions'].items():
|
|
keys.append(key)
|
|
return keys, extract_metrics(message["content"]), attachment_url
|
|
return None, None, None
|
|
|
|
def is_custom_emote(emote_key):
|
|
return emote_key.isascii() and emote_key.isalnum()
|
|
|
|
def send_claude_message(reacted_emotes, metrics, custom_emotes_json, attachment_url):
|
|
prev_image_data = requests.get(attachment_url).content
|
|
prev_image_base64 = base64.b64encode(prev_image_data).decode("utf-8")
|
|
|
|
unicode_emotes = []
|
|
custom_emote_dicts = []
|
|
custom_emotes_name = []
|
|
for emote in reacted_emotes:
|
|
if is_custom_emote(emote):
|
|
img_response = requests.get("https://stoat.unloze.com/autumn/emojis/" + emote)
|
|
img_data = img_response.content
|
|
content_type = img_response.headers.get("content-type", "")
|
|
#static or gif?
|
|
if "gif" in content_type:
|
|
# extract first frame only
|
|
img = Image.open(io.BytesIO(img_data))
|
|
img.seek(0)
|
|
buffer = io.BytesIO()
|
|
img.convert("RGBA").save(buffer, format="PNG")
|
|
img_data = buffer.getvalue()
|
|
media_type = "image/png"
|
|
else:
|
|
media_type = "image/webp"
|
|
|
|
img_base64 = base64.b64encode(img_data).decode("utf-8")
|
|
|
|
emote_name = ""
|
|
for custom_emote_js in custom_emotes_json:
|
|
if custom_emote_js['_id'] == emote:
|
|
emote_name = custom_emote_js['name']
|
|
print('found emote name: ', emote_name)
|
|
break
|
|
custom_emotes_name.append(emote_name)
|
|
custom_emote_dicts.append(
|
|
{
|
|
"type": "image",
|
|
"source": {
|
|
"type": "base64",
|
|
"media_type": media_type,
|
|
"data": img_base64
|
|
}
|
|
},
|
|
)
|
|
else:
|
|
unicode_emotes.append(emote)
|
|
|
|
custom_emotes_text = f"\nCustom emote names in order: '{custom_emotes_name}'." if custom_emote_dicts else ""
|
|
unicode_text = f"\nUnicode emojis reacted: {unicode_emotes}" if unicode_emotes else ""
|
|
response = requests.post(
|
|
"https://api.anthropic.com/v1/messages",
|
|
headers={
|
|
"x-api-key": claude_api_key,
|
|
"anthropic-version": "2023-06-01",
|
|
"content-type": "application/json"
|
|
},
|
|
json={
|
|
"model": "claude-haiku-4-5-20251001",
|
|
"max_tokens": 2048,
|
|
"system": """You are managing a living tree. You receive the tree's current metrics and one or many emoji reactions, and you must decide how the emojies affects the tree. Some of the emotes are unicode and some are custom. Invent new metrics freely but only track things numerically. Be silly, chaotic and creative. Based on the description of what happened to the tree, decide whether the tree grows or shrinks and by how much. Growth and shrink amounts should be proportional to the current phase — in the sapling phase typical changes are 1-50cm, young tree 1-100cm, mature tree 1-1000cm, ancient tree 1-10000cm but you can deviate. Not all emotes should cause growth — damaging, violent or destructive emotes should typically cause the tree to shrink. The tree has growth phases: sapling (0-500cm), young tree (500-50000cm), mature tree (50000-500000cm), ancient tree (500000cm+). Mention the phase when it changes. Keep the metrics object to a maximum of 24 metrics at all times. When adding a new metric, remove the least interesting or relevant existing one to make room. Always keep height_cm. Also return a "sleep_seconds" field: the cooldown before the next reactions are accepted. Scale it strictly based on height_cm using these boundaries: sapling: 20-60 seconds, young tree: 60-300 seconds, mature tree: 300-900 seconds, ancient tree: 900-1800 seconds. Never exceed 1800 seconds. Stay within the range for the current phase. Respond in JSON like: {"metrics": {"height_cm": 42, ...}, "message": "...", "sleep_seconds": 30, "image_prompt": "..."}. The image_prompt should visually depict what the message describes happening to the tree — any effects, damage, mutations or chaos described in the message for example. The image_prompt should depict the tree with the emotes characters or object visually integrated into or interacting with the tree — as if the emote itself is physically there affecting it or simply a part of the image. When writing the image_prompt, translate the tree's phase into a visual size description — for example if it is a tiny sprout, is a small sapling, is a knee-height bush-like tree, a forest of trees, a rainforest, one giant ancient tree, etc. Use descriptive size language rather than raw numbers. When writing the image_prompt, the tree must always be the main subject. Consider the current metrics values to add visual detail — for example high glow_intensity means the tree glows brightly, high chaotic_energy means it looks wild and unstable, low health_points means it looks sickly etc. Explicitly include every reacted emote in the image — if the emote represents a force, element or action (such as fire, water, explosion, sunlight, lightning, poison) show its effect directly on the tree. For everything else — characters, creatures, objects, constructions — depict them physically present in or around the tree. If you are unsure what an emote represents, depict it as a physical character or creature interacting with the tree.""",
|
|
"messages": [
|
|
{
|
|
"role": "user",
|
|
"content": [
|
|
{
|
|
"type": "image",
|
|
"source": {
|
|
"type": "base64",
|
|
"media_type": "image/webp",
|
|
"data": prev_image_base64
|
|
}
|
|
},
|
|
*custom_emote_dicts, #unpacking the list
|
|
{
|
|
"type": "text",
|
|
"text" : f"The first image is the previous tree state. {' The following images are the custom emotes reacted.' if custom_emote_dicts else ''} Carry over relevant visual elements from the previous tree. Current metrics: {metrics}\n{unicode_text}{custom_emotes_text}\nDescribe what you see and decide what the emotes do to the tree."
|
|
}
|
|
]
|
|
}
|
|
]
|
|
}
|
|
)
|
|
json_response = response.json()
|
|
raw_text = json_response["content"][0]["text"]
|
|
# find the first { and last } to extract just the JSON object
|
|
start = raw_text.find("{")
|
|
end = raw_text.rfind("}") + 1
|
|
parsed = json.loads(raw_text[start:end])
|
|
#print(parsed)
|
|
return parsed
|
|
|
|
def replicate_fetch_image(image_prompt):
|
|
os.environ["REPLICATE_API_TOKEN"] = replicate_api_key
|
|
output = replicate_client.run(
|
|
"black-forest-labs/flux-schnell",
|
|
input={"prompt": image_prompt}
|
|
)
|
|
return output[0].read()
|
|
|
|
def main():
|
|
custom_emotes_json = get_custom_emotes()
|
|
while True:
|
|
reacted_emotes, metrics, attachment_url = check_if_emote_reacted()
|
|
print(reacted_emotes, metrics)
|
|
if reacted_emotes:
|
|
claude_response = send_claude_message(reacted_emotes, metrics, custom_emotes_json, attachment_url)
|
|
replicate_image_response = replicate_fetch_image(claude_response["image_prompt"])
|
|
|
|
sleep_seconds = claude_response.get("sleep_seconds", 30)
|
|
send_post_message_to_stoat(claude_response, replicate_image_response, sleep_seconds)
|
|
time.sleep(sleep_seconds)
|
|
else:
|
|
time.sleep(5)
|
|
|
|
if __name__ == '__main__':
|
|
main()
|