# Tests all functions in the llama-wrapper.py file # Run with: python -m pytest test_llama_wrapper.py -v from discord import message import pytest from ..llama_wrapper import ( chat_completion_think, chat_completion_instruct, image_generation, image_edit, embeddings, ) from dotenv import load_dotenv import os OPENAI_API_CHAT_ENDPOINT = os.getenv( "OPENAI_API_CHAT_ENDPOINT", "https://llama-cpp.reeselink.com" ) OPENAI_API_IMAGE_ENDPOINT = os.getenv("OPENAI_API_IMAGE_ENDPOINT") OPENAI_API_EDIT_ENDPOINT = os.getenv("OPENAI_API_EDIT_ENDPOINT") OPENAI_API_EMBED_ENDPOINT = os.getenv("OPENAI_API_EMBED_ENDPOINT") # Default models DEFAULT_CHAT_MODEL = os.getenv("DEFAULT_CHAT_MODEL", "qwen3.5-35b-a3b") DEFAULT_EMBED_MODEL = os.getenv("DEFAULT_EMBED_MODEL", "text-embedding-3-small") DEFAULT_IMAGE_MODEL = os.getenv("DEFAULT_IMAGE_MODEL", "dall-e-3") DEFAULT_EDIT_MODEL = os.getenv("DEFAULT_EDIT_MODEL", "dall-e-2") def test_chat_completion_think(): # This test will fail without an actual API endpoint # But it's here to show the structure chat_completion_think( system_prompt="You are a helpful assistant.", user_prompt="Tell me about Everquest", openai_url=OPENAI_API_CHAT_ENDPOINT, openai_api_key="placeholder", model=DEFAULT_CHAT_MODEL, max_tokens=100, ) def test_chat_completion_instruct(): # This test will fail without an actual API endpoint # But it's here to show the structure chat_completion_instruct( system_prompt="You are a helpful assistant.", user_prompt="Tell me about Everquest", openai_url=OPENAI_API_CHAT_ENDPOINT, openai_api_key="placeholder", model=DEFAULT_CHAT_MODEL, max_tokens=100, ) def test_image_generation(): # This test will fail without an actual API endpoint # But it's here to show the structure pass def test_image_edit(): # This test will fail without an actual API endpoint # But it's here to show the structure pass def test_embeddings(): # This test will fail without an actual API endpoint # But it's here to show the structure pass