WIP: code cleanup
This commit is contained in:
0
vibe_bot/tests/__init__.py
Normal file
0
vibe_bot/tests/__init__.py
Normal file
30
vibe_bot/tests/conftest.py
Normal file
30
vibe_bot/tests/conftest.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import os
|
||||
import pytest
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Try to load .env.test first, fallback to .env
|
||||
env_test_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), '.env.test')
|
||||
env_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), '.env')
|
||||
|
||||
if os.path.exists(env_test_path):
|
||||
load_dotenv(env_test_path)
|
||||
print("✓ Loaded environment variables from .env.test")
|
||||
elif os.path.exists(env_path):
|
||||
load_dotenv(env_path)
|
||||
print("✓ Loaded environment variables from .env")
|
||||
|
||||
@pytest.fixture(autouse=True, scope="session")
|
||||
def verify_env_loaded():
|
||||
"""Verify critical environment variables are loaded before tests run"""
|
||||
required_vars = [
|
||||
"DISCORD_TOKEN",
|
||||
"OPENAI_API_ENDPOINT",
|
||||
"IMAGE_GEN_ENDPOINT",
|
||||
"IMAGE_EDIT_ENDPOINT"
|
||||
]
|
||||
|
||||
missing_vars = [var for var in required_vars if var not in os.environ]
|
||||
if missing_vars:
|
||||
pytest.fail(f"Missing required environment variables: {', '.join(missing_vars)}")
|
||||
|
||||
yield
|
||||
71
vibe_bot/tests/test_llama_wrapper.py
Normal file
71
vibe_bot/tests/test_llama_wrapper.py
Normal file
@@ -0,0 +1,71 @@
|
||||
# Tests all functions in the llama-wrapper.py file
|
||||
# Run with: python -m pytest test_llama_wrapper.py -v
|
||||
|
||||
from discord import message
|
||||
import pytest
|
||||
from ..llama_wrapper import (
|
||||
chat_completion_think,
|
||||
chat_completion_instruct,
|
||||
image_generation,
|
||||
image_edit,
|
||||
embeddings,
|
||||
)
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
|
||||
OPENAI_API_CHAT_ENDPOINT = os.getenv(
|
||||
"OPENAI_API_CHAT_ENDPOINT", "https://llama-cpp.reeselink.com"
|
||||
)
|
||||
OPENAI_API_IMAGE_ENDPOINT = os.getenv("OPENAI_API_IMAGE_ENDPOINT")
|
||||
OPENAI_API_EDIT_ENDPOINT = os.getenv("OPENAI_API_EDIT_ENDPOINT")
|
||||
OPENAI_API_EMBED_ENDPOINT = os.getenv("OPENAI_API_EMBED_ENDPOINT")
|
||||
|
||||
# Default models
|
||||
DEFAULT_CHAT_MODEL = os.getenv("DEFAULT_CHAT_MODEL", "qwen3.5-35b-a3b")
|
||||
DEFAULT_EMBED_MODEL = os.getenv("DEFAULT_EMBED_MODEL", "text-embedding-3-small")
|
||||
DEFAULT_IMAGE_MODEL = os.getenv("DEFAULT_IMAGE_MODEL", "dall-e-3")
|
||||
DEFAULT_EDIT_MODEL = os.getenv("DEFAULT_EDIT_MODEL", "dall-e-2")
|
||||
|
||||
|
||||
def test_chat_completion_think():
|
||||
# This test will fail without an actual API endpoint
|
||||
# But it's here to show the structure
|
||||
chat_completion_think(
|
||||
system_prompt="You are a helpful assistant.",
|
||||
user_prompt="Tell me about Everquest",
|
||||
openai_url=OPENAI_API_CHAT_ENDPOINT,
|
||||
openai_api_key="placeholder",
|
||||
model=DEFAULT_CHAT_MODEL,
|
||||
max_tokens=100,
|
||||
)
|
||||
|
||||
|
||||
def test_chat_completion_instruct():
|
||||
# This test will fail without an actual API endpoint
|
||||
# But it's here to show the structure
|
||||
chat_completion_instruct(
|
||||
system_prompt="You are a helpful assistant.",
|
||||
user_prompt="Tell me about Everquest",
|
||||
openai_url=OPENAI_API_CHAT_ENDPOINT,
|
||||
openai_api_key="placeholder",
|
||||
model=DEFAULT_CHAT_MODEL,
|
||||
max_tokens=100,
|
||||
)
|
||||
|
||||
|
||||
def test_image_generation():
|
||||
# This test will fail without an actual API endpoint
|
||||
# But it's here to show the structure
|
||||
pass
|
||||
|
||||
|
||||
def test_image_edit():
|
||||
# This test will fail without an actual API endpoint
|
||||
# But it's here to show the structure
|
||||
pass
|
||||
|
||||
|
||||
def test_embeddings():
|
||||
# This test will fail without an actual API endpoint
|
||||
# But it's here to show the structure
|
||||
pass
|
||||
Reference in New Issue
Block a user