Skip to content

ollama

dandy.llm.request.ollama

OllamaRequestMessage

Bases: RequestMessage

content_as_str

Source code in dandy/llm/request/ollama.py
def content_as_str(self) -> str:
    return self.content

OllamaRequestOptions

Bases: BaseModel

num_ctx = None class-attribute instance-attribute

num_predict = None class-attribute instance-attribute

seed = None class-attribute instance-attribute

temperature = None class-attribute instance-attribute

OllamaRequestBody

Bases: BaseRequestBody

options instance-attribute

stream = False class-attribute instance-attribute

format = {} class-attribute instance-attribute

token_usage property

add_message

Source code in dandy/llm/request/ollama.py
def add_message(
    self,
    role: RoleLiteralStr,
    content: str,
    images: List[str] | None = None,
    prepend: bool = False,
) -> None:
    ollama_request_message = OllamaRequestMessage(role=role, content=content, images=images)

    if prepend:
        self.messages.insert(0, ollama_request_message)
    else:
        self.messages.append(ollama_request_message)

get_context_length

Source code in dandy/llm/request/ollama.py
def get_context_length(self) -> int:
    return self.options.num_ctx

get_max_completion_tokens

Source code in dandy/llm/request/ollama.py
def get_max_completion_tokens(self) -> int | None:
    return self.options.num_predict

get_seed

Source code in dandy/llm/request/ollama.py
def get_seed(self) -> int | None:
    return self.options.seed

get_temperature

Source code in dandy/llm/request/ollama.py
def get_temperature(self) -> float | None:
    return self.options.temperature

set_format_to_json_schema

Source code in dandy/llm/request/ollama.py
def set_format_to_json_schema(self, json_schema: dict):
    self.format = json_schema

set_format_to_text

Source code in dandy/llm/request/ollama.py
def set_format_to_text(self):
    self.format = None

to_dict

Source code in dandy/llm/request/ollama.py
def to_dict(self) -> dict:
    return self.model_dump()