[Pod] # llama.cpp PublishPort=8000:8000/tcp # open-webui PublishPort=8080:8080/tcp # anything-llm PublishPort=3001:3001/tcp # ollama PublishPort=11434:11434/tcp