WalkXR-AI / src / walkxr_ai / models / ollama_model.py
ollama_model.py
Raw
from ollama import Client
from walkxr_ai.models.base_model import BaseModel
from langsmith import traceable

class OllamaModel(BaseModel):
    def __init__(self, model_name='llama3', host='http://localhost:11434'):
        self.client = Client(host=host)
        self.model_name = model_name

    @traceable(
        name="OllamaModel.generate_response",
        tags=["llama3", "local"],
        metadata={"model": "llama3"}
    )
    def generate_response(self, prompt: str, history: list) -> str:
        try:
            messages = [
                {"role": "system", "content": "You are an empathetic AI agent."}
            ]
            messages += [
                {"role": "user", "content": f"User: {u}\nAssistant: {a}"}
                for u, a in history
            ]
            messages.append({"role": "user", "content": prompt})

            result = self.client.chat(
                model=self.model_name,
                messages=messages
            )
            return result['message']['content']
        except Exception as e:
            return f"[Model error] {str(e)}"