feat: add DeepSeek, Moonshot, xAI, Groq, Ollama; drop v1; migrate TUI to v2
Five OpenAI-compatible providers join the library as first-class constructors (llm.DeepSeek, llm.Moonshot, llm.XAI, llm.Groq, llm.Ollama). Their wire-level implementation is shared via a new v2/openaicompat package which is the extracted guts of the old v2/openai provider; each provider supplies its own Rules value to declare per-model constraints (e.g., DeepSeek Reasoner rejects tools and temperature, Moonshot/xAI accept images only on *-vision* models, Groq rejects audio input). v2/openai itself becomes a thin wrapper that sets RestrictTemperature for o-series and gpt-5 models. A new provider registry (v2/registry.go) exposes llm.Providers() and drives the TUI's provider picker so adding a provider in future is a single-file change. The TUI at cmd/llm was migrated from v1 to v2 and moved to v2/cmd/llm. With nothing else depending on v1, the v1 code at the repo root (all .go files, schema/, internal/, provider/, root go.mod/go.sum) is deleted. Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,136 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
|
||||
llm "gitea.stevedudenhoeffer.com/steve/go-llm/v2"
|
||||
)
|
||||
|
||||
// Message types for async operations.
|
||||
|
||||
// ChatResponseMsg contains the response from a chat completion.
|
||||
type ChatResponseMsg struct {
|
||||
Response llm.Response
|
||||
Err error
|
||||
}
|
||||
|
||||
// ToolExecutionMsg contains results from executing tool calls, one Message
|
||||
// (RoleTool) per ToolCall, in the same order.
|
||||
type ToolExecutionMsg struct {
|
||||
Results []llm.Message
|
||||
Err error
|
||||
}
|
||||
|
||||
// ImageLoadedMsg contains a loaded image.
|
||||
type ImageLoadedMsg struct {
|
||||
Image llm.Image
|
||||
Err error
|
||||
}
|
||||
|
||||
// sendChatRequest sends a completion request with the current conversation,
|
||||
// returning a ChatResponseMsg tea.Msg when the provider responds.
|
||||
func sendChatRequest(model *llm.Model, messages []llm.Message, toolbox *llm.ToolBox, toolsEnabled bool, temperature *float64) tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
opts := buildOpts(toolbox, toolsEnabled, temperature)
|
||||
resp, err := model.Complete(context.Background(), messages, opts...)
|
||||
return ChatResponseMsg{Response: resp, Err: err}
|
||||
}
|
||||
}
|
||||
|
||||
// executeTools runs each tool call via the toolbox and returns ToolExecutionMsg
|
||||
// with one RoleTool Message per call, in the same order.
|
||||
func executeTools(toolbox *llm.ToolBox, calls []llm.ToolCall) tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
ctx := context.Background()
|
||||
results, err := toolbox.ExecuteAll(ctx, calls)
|
||||
return ToolExecutionMsg{Results: results, Err: err}
|
||||
}
|
||||
}
|
||||
|
||||
// buildOpts constructs RequestOptions from the current CLI state.
|
||||
func buildOpts(toolbox *llm.ToolBox, toolsEnabled bool, temperature *float64) []llm.RequestOption {
|
||||
var opts []llm.RequestOption
|
||||
if toolsEnabled && toolbox != nil && len(toolbox.AllTools()) > 0 {
|
||||
opts = append(opts, llm.WithTools(toolbox))
|
||||
}
|
||||
if temperature != nil {
|
||||
opts = append(opts, llm.WithTemperature(*temperature))
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// loadImageFromPath loads an image from a file path.
|
||||
func loadImageFromPath(path string) tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
path = strings.TrimSpace(path)
|
||||
path = strings.Trim(path, "\"'")
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return ImageLoadedMsg{Err: fmt.Errorf("failed to read image file: %w", err)}
|
||||
}
|
||||
|
||||
contentType := http.DetectContentType(data)
|
||||
if !strings.HasPrefix(contentType, "image/") {
|
||||
return ImageLoadedMsg{Err: fmt.Errorf("file is not an image: %s", contentType)}
|
||||
}
|
||||
|
||||
return ImageLoadedMsg{
|
||||
Image: llm.Image{
|
||||
Base64: base64.StdEncoding.EncodeToString(data),
|
||||
ContentType: contentType,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// loadImageFromURL loads an image from a URL (kept as URL, not fetched).
|
||||
func loadImageFromURL(url string) tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
return ImageLoadedMsg{Image: llm.Image{URL: strings.TrimSpace(url)}}
|
||||
}
|
||||
}
|
||||
|
||||
// loadImageFromBase64 loads an image from base64 data (raw or data: URL).
|
||||
func loadImageFromBase64(data string) tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
data = strings.TrimSpace(data)
|
||||
|
||||
if strings.HasPrefix(data, "data:") {
|
||||
parts := strings.SplitN(data, ",", 2)
|
||||
if len(parts) != 2 {
|
||||
return ImageLoadedMsg{Err: fmt.Errorf("invalid data URL format")}
|
||||
}
|
||||
mediaType := strings.TrimPrefix(parts[0], "data:")
|
||||
mediaType = strings.TrimSuffix(mediaType, ";base64")
|
||||
return ImageLoadedMsg{
|
||||
Image: llm.Image{
|
||||
Base64: parts[1],
|
||||
ContentType: mediaType,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
decoded, err := base64.StdEncoding.DecodeString(data)
|
||||
if err != nil {
|
||||
return ImageLoadedMsg{Err: fmt.Errorf("invalid base64 data: %w", err)}
|
||||
}
|
||||
contentType := http.DetectContentType(decoded)
|
||||
if !strings.HasPrefix(contentType, "image/") {
|
||||
return ImageLoadedMsg{Err: fmt.Errorf("data is not an image: %s", contentType)}
|
||||
}
|
||||
return ImageLoadedMsg{
|
||||
Image: llm.Image{
|
||||
Base64: data,
|
||||
ContentType: contentType,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user