reconfigure software ai stack

This commit is contained in:
2026-03-16 09:54:13 -04:00
parent 9ae82fc3de
commit cc75227a77
15 changed files with 526 additions and 83 deletions

View File

@@ -0,0 +1,133 @@
import base64
import os
from datetime import datetime
from io import BytesIO
import requests
from PIL import Image
# Configuration
BASE_URL = "https://llama-cpp.reeselink.com"
API_KEY = os.getenv("LLAMA_CPP_API_KEY", "") # Set if required
def call_api(endpoint, method="GET", data=None):
"""Generic API call helper"""
url = f"{BASE_URL}/v1/{endpoint}"
headers = {"Content-Type": "application/json"}
if API_KEY:
headers["Authorization"] = f"Bearer {API_KEY}"
response = requests.request(method, url, headers=headers, json=data)
return response
# 1. List Models
models_response = call_api("models")
models = models_response.json().get("data", [])
print(f"Available models: {[m['id'] for m in models]}")
# 2. Use First Model
model_id = models[1]["id"]
# 3. Chat Completion
chat_data = {
"model": model_id,
"messages": [
{"role": "system", "content": "You are helpful."},
{"role": "user", "content": "Tell me about Everquest!"},
],
"temperature": 0.95,
"max_tokens": 100,
}
response = call_api("chat/completions", "POST", chat_data)
print(response.json()["choices"][0]["message"]["content"])
def describe_image(image_path, api_key=None):
"""
Send an image to the LLM for description
"""
base_url = "https://llama-cpp.reeselink.com"
# Read and encode image to base64
with open(image_path, "rb") as f:
encoded_image = base64.b64encode(f.read()).decode("utf-8")
# Prepare headers
headers = {"Content-Type": "application/json"}
if api_key:
headers["Authorization"] = f"Bearer {api_key}"
# Create payload
payload = {
"model": "qwen3-vl-30b-a3b-instruct", # 👁️ VISION MODEL
"messages": [
{
"role": "user",
"content": [
{"type": "text", "text": "Describe this image in detail"},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{encoded_image}"},
},
],
}
],
"max_tokens": 1000,
"temperature": 0.7,
}
# Send request
response = requests.post(
f"{base_url}/v1/chat/completions", headers=headers, json=payload
)
if response.status_code == 200:
return response.json()["choices"][0]["message"]["content"]
else:
print(f"Error: {response.status_code}")
print(response.text)
return None
# description = describe_image("generated-image.png", api_key="your_key")
# print(description)
def generate_image(prompt, **kwargs):
"""
Generate image using Stable Diffusion / OpenAI compatible API
"""
base_url = "http://toybox.reeselink.com:1234/v1"
payload = {"model": "default", "prompt": prompt, "n": 1, "size": "1024x1024"}
response = requests.post(
f"http://toybox.reeselink.com:1234/v1/images/generations",
json=payload,
timeout=120,
)
if response.status_code == 200:
result = response.json()
# Save image
image_data = base64.b64decode(result["data"][0]["b64_json"])
img = Image.open(BytesIO(image_data))
filename = f"generated_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
img.save(filename)
print(f"✅ Saved: {filename}")
return result
else:
print(f"❌ Error: {response.status_code}")
print(response.text)
return None
# Usage:
result = generate_image(
prompt="A beautiful sunset over mountains, photorealistic",
negative_prompt="blurry, low quality",
steps=8,
guidance=7.5,
)