add doodlebob
This commit is contained in:
@@ -6,6 +6,8 @@
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
export DISCORD_TOKEN=$(cat .token)
|
export DISCORD_TOKEN=$(cat .token)
|
||||||
|
export OPENAI_API_ENDPOINT="https://llama-cpp.reeselink.com"
|
||||||
|
export IMAGE_GEN_ENDPOINT="http://toybox.reeselink.com:1234/v1"
|
||||||
uv run main.py
|
uv run main.py
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
94
main.py
94
main.py
@@ -3,14 +3,20 @@ from discord.ext import commands
|
|||||||
import requests
|
import requests
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
|
import base64
|
||||||
|
from io import BytesIO
|
||||||
|
|
||||||
DISCORD_TOKEN = os.getenv("DISCORD_TOKEN", "placeholder")
|
DISCORD_TOKEN = os.getenv("DISCORD_TOKEN", "placeholder")
|
||||||
|
|
||||||
OPENAI_API_ENDPOINT = os.getenv("OPENAI_API_ENDPOINT")
|
OPENAI_API_ENDPOINT = os.getenv("OPENAI_API_ENDPOINT")
|
||||||
|
IMAGE_GEN_ENDPOINT = os.getenv("IMAGE_GEN_ENDPOINT")
|
||||||
|
|
||||||
if not OPENAI_API_ENDPOINT:
|
if not OPENAI_API_ENDPOINT:
|
||||||
raise Exception("OPENAI_API_ENDPOINT required.")
|
raise Exception("OPENAI_API_ENDPOINT required.")
|
||||||
|
|
||||||
|
if not IMAGE_GEN_ENDPOINT:
|
||||||
|
raise Exception("IMAGE_GEN_ENDPOINT required.")
|
||||||
|
|
||||||
# Set your OpenAI API key as an environment variable
|
# Set your OpenAI API key as an environment variable
|
||||||
# You can also pass it directly but environment variables are safer
|
# You can also pass it directly but environment variables are safer
|
||||||
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "placeholder")
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "placeholder")
|
||||||
@@ -88,6 +94,56 @@ async def copilot(ctx, *, message: str):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@bot.command(name="doodlebob")
|
||||||
|
async def doodlebob(ctx, *, message: str):
|
||||||
|
await ctx.send(f"**Doodlebob erasing {message[:100]}...**")
|
||||||
|
|
||||||
|
# Prepare the request payload to create the image gen prompt
|
||||||
|
image_prompt_payload = {
|
||||||
|
"model": "qwen3-vl-30b-a3b-instruct",
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": "Given the following message, convert it to a detailed image generation prompt that will be passed directly into an image generation model.",
|
||||||
|
},
|
||||||
|
{"role": "user", "content": message},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
image_prompt = await call_llm(ctx, image_prompt_payload)
|
||||||
|
|
||||||
|
if image_prompt == "":
|
||||||
|
print("No image prompt supplied. Check for errors.")
|
||||||
|
return
|
||||||
|
|
||||||
|
await ctx.send(f"**Doodlebob calling drone strike on {image_prompt[:100]}...**")
|
||||||
|
|
||||||
|
image_payload = {
|
||||||
|
"model": "default",
|
||||||
|
"prompt": image_prompt,
|
||||||
|
"n": 1,
|
||||||
|
"size": "1024x1024",
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post(
|
||||||
|
f"{IMAGE_GEN_ENDPOINT}/images/generations",
|
||||||
|
json=image_payload,
|
||||||
|
timeout=120,
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
result = response.json()
|
||||||
|
# Send image
|
||||||
|
image_data = BytesIO(base64.b64decode(result["data"][0]["b64_json"]))
|
||||||
|
send_img = discord.File(image_data, filename="image.png")
|
||||||
|
await ctx.send(file=send_img)
|
||||||
|
|
||||||
|
else:
|
||||||
|
print(f"❌ Error: {response.status_code}")
|
||||||
|
print(response.text)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
async def handle_chat(ctx, *, message: str, payload: dict, response_prefix: str):
|
async def handle_chat(ctx, *, message: str, payload: dict, response_prefix: str):
|
||||||
# Check if API key is set
|
# Check if API key is set
|
||||||
if not OPENAI_API_KEY:
|
if not OPENAI_API_KEY:
|
||||||
@@ -137,6 +193,44 @@ async def handle_chat(ctx, *, message: str, payload: dict, response_prefix: str)
|
|||||||
await ctx.send(f"Error: {str(e)}")
|
await ctx.send(f"Error: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
async def call_llm(ctx, payload: dict) -> str:
|
||||||
|
# Check if API key is set
|
||||||
|
if not OPENAI_API_KEY:
|
||||||
|
await ctx.send(
|
||||||
|
"Error: OpenAI API key is not configured. Please set the OPENAI_API_KEY environment variable."
|
||||||
|
)
|
||||||
|
return ""
|
||||||
|
|
||||||
|
# Set headers
|
||||||
|
headers = {
|
||||||
|
"Authorization": f"Bearer {OPENAI_API_KEY}",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Send request to OpenAI API
|
||||||
|
response = requests.post(
|
||||||
|
OPENAI_COMPLETIONS_URL, json=payload, headers=headers, timeout=300
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
result = response.json()
|
||||||
|
|
||||||
|
# Extract the generated text
|
||||||
|
generated_text = result["choices"][0]["message"]["content"].strip()
|
||||||
|
print(generated_text)
|
||||||
|
|
||||||
|
return generated_text
|
||||||
|
|
||||||
|
except requests.exceptions.HTTPError as e:
|
||||||
|
await ctx.send(f"Error: OpenAI API error - {e}")
|
||||||
|
except requests.exceptions.Timeout:
|
||||||
|
await ctx.send("Error: Request timed out. Please try again.")
|
||||||
|
except Exception as e:
|
||||||
|
await ctx.send(f"Error: {str(e)}")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
|
||||||
def get_last_messages() -> str:
|
def get_last_messages() -> str:
|
||||||
current_history: list = json.loads(open("message_log.json").read())
|
current_history: list = json.loads(open("message_log.json").read())
|
||||||
return "\n".join(current_history)
|
return "\n".join(current_history)
|
||||||
|
|||||||
Reference in New Issue
Block a user