// Package ollama implements the go-llm v2 provider interface for Ollama // (https://ollama.com), a local model runner that exposes an OpenAI Chat // Completions-compatible endpoint. No API key is required; capability depends // on whichever model the user has pulled locally, so Rules are intentionally // empty — we trust the local user. package ollama import ( "gitea.stevedudenhoeffer.com/steve/go-llm/v2/openaicompat" ) // DefaultBaseURL points at a local Ollama instance with default port. Kept // for the openaicompat-based shim; callers should migrate to // DefaultLocalBaseURL (no /v1 suffix) which targets the native /api/chat // endpoint. const DefaultBaseURL = "http://localhost:11434/v1" // shimNew is the legacy openaicompat-based constructor, retained until the // native provider's Complete/Stream are fully implemented (Task 4 replaces // the public New() with a native-backed constructor). func shimNew(apiKey, baseURL string) *openaicompat.Provider { if baseURL == "" { baseURL = DefaultBaseURL } return openaicompat.New(apiKey, baseURL, openaicompat.Rules{}) } // New creates a new Ollama provider. An empty baseURL uses DefaultBaseURL. // Ollama ignores the API key; callers may pass "". // // Note: this constructor currently still routes through the openaicompat // shim. Subsequent commits replace the body with a native /api/chat // implementation backed by the Provider type in native.go. func New(apiKey, baseURL string) *openaicompat.Provider { return shimNew(apiKey, baseURL) }