175 lines
4.7 KiB
Python
175 lines
4.7 KiB
Python
# Wraps the openai calls in generic functions
|
|
# Supports chat, image, edit, and embeddings
|
|
# Allows custom endpoints for each of the above supported functions
|
|
|
|
import openai
|
|
from typing import Iterable
|
|
from openai.types.chat import ChatCompletionMessageParam
|
|
from openai._types import FileTypes, SequenceNotStr
|
|
from typing import Union
|
|
from io import BufferedReader, BytesIO
|
|
|
|
|
|
def chat_completion(
|
|
system_prompt: str,
|
|
user_prompt: str,
|
|
openai_url: str,
|
|
openai_api_key: str,
|
|
model: str,
|
|
max_tokens: int = 1000,
|
|
) -> str:
|
|
client = openai.OpenAI(base_url=openai_url, api_key=openai_api_key)
|
|
messages: Iterable[ChatCompletionMessageParam] = [
|
|
{
|
|
"role": "system",
|
|
"content": system_prompt,
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": user_prompt,
|
|
},
|
|
]
|
|
response = client.chat.completions.create(
|
|
model=model, messages=messages, max_tokens=max_tokens
|
|
)
|
|
|
|
# Assert that thinking was used
|
|
if response.choices[0].message.model_extra:
|
|
assert response.choices[0].message.model_extra.get("reasoning_content")
|
|
|
|
content = response.choices[0].message.content
|
|
if content:
|
|
return content.strip()
|
|
else:
|
|
return ""
|
|
|
|
|
|
def chat_completion_with_history(
|
|
system_prompt: str,
|
|
prompts: Iterable[ChatCompletionMessageParam],
|
|
openai_url: str,
|
|
openai_api_key: str,
|
|
model: str,
|
|
max_tokens: int = 1000,
|
|
) -> str:
|
|
client = openai.OpenAI(base_url=openai_url, api_key=openai_api_key)
|
|
messages: Iterable[ChatCompletionMessageParam] = [
|
|
{
|
|
"role": "system",
|
|
"content": system_prompt,
|
|
}
|
|
] + prompts # type: ignore
|
|
response = client.chat.completions.create(
|
|
model=model,
|
|
messages=messages,
|
|
max_tokens=max_tokens,
|
|
extra_body={
|
|
"chat_template_kwargs": {"enable_thinking": False},
|
|
},
|
|
seed=-1,
|
|
)
|
|
|
|
# Assert that thinking was used
|
|
if response.choices[0].message.model_extra:
|
|
assert response.choices[0].message.model_extra.get("reasoning_content")
|
|
|
|
content = response.choices[0].message.content
|
|
if content:
|
|
return content.strip()
|
|
else:
|
|
return ""
|
|
|
|
|
|
def chat_completion_instruct(
|
|
system_prompt: str,
|
|
user_prompt: str,
|
|
openai_url: str,
|
|
openai_api_key: str,
|
|
model: str,
|
|
max_tokens: int = 1000,
|
|
) -> str:
|
|
client = openai.OpenAI(base_url=openai_url, api_key=openai_api_key)
|
|
messages: Iterable[ChatCompletionMessageParam] = [
|
|
{
|
|
"role": "system",
|
|
"content": system_prompt,
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": user_prompt,
|
|
},
|
|
]
|
|
response = client.chat.completions.create(
|
|
model=model,
|
|
messages=messages,
|
|
max_tokens=max_tokens,
|
|
extra_body={
|
|
"chat_template_kwargs": {"enable_thinking": False},
|
|
},
|
|
)
|
|
|
|
# Assert that thinking wasn't used
|
|
if response.choices[0].message.model_extra:
|
|
assert response.choices[0].message.model_extra.get("reasoning_content")
|
|
|
|
content = response.choices[0].message.content
|
|
if content:
|
|
return content.strip()
|
|
else:
|
|
return ""
|
|
|
|
|
|
def image_generation(prompt: str, openai_url: str, openai_api_key: str, n=1) -> str:
|
|
"""Generates an image using the given prompt and returns the base64 encoded image data
|
|
|
|
Returns:
|
|
str: The base64 encoded image data. Decode and write to a file.
|
|
"""
|
|
client = openai.OpenAI(base_url=openai_url, api_key=openai_api_key)
|
|
response = client.images.generate(
|
|
prompt=prompt,
|
|
n=n,
|
|
size="1024x1024",
|
|
)
|
|
if response.data:
|
|
return response.data[0].b64_json or ""
|
|
else:
|
|
return ""
|
|
|
|
|
|
def image_edit(
|
|
image: BufferedReader | BytesIO,
|
|
prompt: str,
|
|
openai_url: str,
|
|
openai_api_key: str,
|
|
n=1,
|
|
) -> str:
|
|
client = openai.OpenAI(base_url=openai_url, api_key=openai_api_key)
|
|
response = client.images.edit(
|
|
image=image,
|
|
prompt=prompt,
|
|
n=n,
|
|
size="1024x1024",
|
|
)
|
|
if response.data:
|
|
return response.data[0].b64_json or ""
|
|
else:
|
|
return ""
|
|
|
|
|
|
def embedding(
|
|
text: str, openai_url: str, openai_api_key: str, model: str
|
|
) -> list[float]:
|
|
client = openai.OpenAI(base_url=openai_url, api_key=openai_api_key)
|
|
response = client.embeddings.create(
|
|
input=[text], model=model, encoding_format="float"
|
|
)
|
|
if response:
|
|
raw_data = response[0].embedding # type: ignore
|
|
# The result could be an array of floats or an array of an array of floats.
|
|
try:
|
|
return raw_data[0]
|
|
except Exception:
|
|
return raw_data
|
|
return []
|