Skip to main content

LLM Client API

Interface for Ollama communication.

LLMClient

from owl.llm import LLMClient

client = LLMClient()

Methods

chat(messages, tools=None)

Non-streaming chat.

from owl.llm import Message

messages = [
Message(role="system", content="You are helpful."),
Message(role="user", content="Hello")
]

response = client.chat(messages, tools=None)
# response.content = "Hello! How can I help?"
# response.tool_calls = []

chat_stream(messages)

Streaming chat.

for chunk in client.chat_stream(messages):
print(chunk, end="")

simple_query(prompt, system=None)

One-shot query.

result = client.simple_query(
"Summarize this text: ...",
system="Be concise."
)

health_check()

Check Ollama connectivity.

if client.health_check():
print("Ollama is running")

list_models()

List available models.

models = client.list_models()
# ["llama2:latest", "mistral:latest", ...]

Message

@dataclass
class Message:
role: str # system, user, assistant, tool
content: str
tool_calls: List[dict] = None
tool_name: str = None

Response

@dataclass
class Response:
content: str
tool_calls: List[ToolCall]

ToolCall

@dataclass
class ToolCall:
name: str
arguments: dict