feat: add DeepSeek, Moonshot, xAI, Groq, Ollama; drop v1; migrate TUI to v2
CI / Root Module (push) Failing after 30s
CI / Lint (push) Failing after 50s
CI / V2 Module (push) Successful in 2m14s

Five OpenAI-compatible providers join the library as first-class constructors
(llm.DeepSeek, llm.Moonshot, llm.XAI, llm.Groq, llm.Ollama). Their wire-level
implementation is shared via a new v2/openaicompat package which is the
extracted guts of the old v2/openai provider; each provider supplies its own
Rules value to declare per-model constraints (e.g., DeepSeek Reasoner rejects
tools and temperature, Moonshot/xAI accept images only on *-vision* models,
Groq rejects audio input). v2/openai itself becomes a thin wrapper that sets
RestrictTemperature for o-series and gpt-5 models.

A new provider registry (v2/registry.go) exposes llm.Providers() and drives
the TUI's provider picker so adding a provider in future is a single-file
change.

The TUI at cmd/llm was migrated from v1 to v2 and moved to v2/cmd/llm. With
nothing else depending on v1, the v1 code at the repo root (all .go files,
schema/, internal/, provider/, root go.mod/go.sum) is deleted.

Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
This commit is contained in:
2026-04-24 13:34:39 +00:00
parent 9b91b2f794
commit 34119e5a00
58 changed files with 1921 additions and 4242 deletions
+245
View File
@@ -0,0 +1,245 @@
package main
import (
"os"
"github.com/charmbracelet/bubbles/textinput"
"github.com/charmbracelet/bubbles/viewport"
tea "github.com/charmbracelet/bubbletea"
llm "gitea.stevedudenhoeffer.com/steve/go-llm/v2"
)
// State represents the current view/screen of the application.
type State int
const (
StateChat State = iota
StateProviderSelect
StateModelSelect
StateImageInput
StateToolsPanel
StateSettings
StateAPIKeyInput
)
// DisplayMessage represents a message for display in the UI.
type DisplayMessage struct {
Role llm.Role
Content string
Images int // number of images attached
}
// ProviderEntry is a CLI-local view of a registered provider, enriched with
// UI state (which model is currently chosen, whether we have a key, etc.).
type ProviderEntry struct {
Info llm.ProviderInfo
HasAPIKey bool
ModelIndex int
}
// Model is the main Bubble Tea model.
type Model struct {
// State
state State
previousState State
// Provider
client *llm.Client
chat *llm.Model
providerName string
modelName string
apiKeys map[string]string
providers []ProviderEntry
providerIndex int
// Conversation
conversation []llm.Message
messages []DisplayMessage
// Tools
toolbox *llm.ToolBox
toolsEnabled bool
// Settings
systemPrompt string
temperature *float64
// Pending images
pendingImages []llm.Image
// UI Components
input textinput.Model
viewport viewport.Model
viewportReady bool
// Selection state (for lists)
listIndex int
listItems []string
// Dimensions
width int
height int
// Loading state
loading bool
err error
// For API key input
apiKeyInput textinput.Model
}
// InitialModel creates and returns the initial model.
func InitialModel() Model {
ti := textinput.New()
ti.Placeholder = "Type your message..."
ti.Focus()
ti.CharLimit = 4096
ti.Width = 60
aki := textinput.New()
aki.Placeholder = "Enter API key..."
aki.CharLimit = 256
aki.Width = 60
aki.EchoMode = textinput.EchoPassword
// Build provider list from the go-llm registry.
registry := llm.Providers()
providers := make([]ProviderEntry, len(registry))
apiKeys := make(map[string]string)
for i, info := range registry {
entry := ProviderEntry{Info: info}
if info.EnvKey == "" {
// Key-less provider (e.g., Ollama).
entry.HasAPIKey = true
} else if key := os.Getenv(info.EnvKey); key != "" {
apiKeys[info.Name] = key
entry.HasAPIKey = true
}
providers[i] = entry
}
m := Model{
state: StateProviderSelect,
input: ti,
apiKeyInput: aki,
apiKeys: apiKeys,
providers: providers,
systemPrompt: "You are a helpful assistant.",
toolbox: createDemoToolbox(),
toolsEnabled: false,
messages: []DisplayMessage{},
conversation: []llm.Message{},
}
// Build list items for provider selection.
m.listItems = make([]string, len(providers))
for i, p := range providers {
status := " (no key)"
if p.HasAPIKey {
status = " (ready)"
if p.Info.EnvKey == "" {
status = " (local)"
}
}
m.listItems[i] = p.Info.DisplayName + status
}
return m
}
// Init initializes the model.
func (m Model) Init() tea.Cmd {
return textinput.Blink
}
// selectProvider sets up the selected provider.
func (m *Model) selectProvider(index int) error {
if index < 0 || index >= len(m.providers) {
return nil
}
p := m.providers[index]
key := m.apiKeys[p.Info.Name] // empty for key-less providers like Ollama
if p.Info.EnvKey != "" && key == "" {
return nil
}
m.providerName = p.Info.DisplayName
m.providerIndex = index
m.client = p.Info.New(key)
// Select default model.
if len(p.Info.Models) > 0 {
return m.selectModel(p.ModelIndex)
}
return nil
}
// selectModel sets the current model.
func (m *Model) selectModel(index int) error {
if m.client == nil {
return nil
}
p := m.providers[m.providerIndex]
if index < 0 || index >= len(p.Info.Models) {
return nil
}
modelName := p.Info.Models[index]
m.chat = m.client.Model(modelName)
m.modelName = modelName
m.providers[m.providerIndex].ModelIndex = index
return nil
}
// newConversation resets the conversation.
func (m *Model) newConversation() {
m.conversation = []llm.Message{}
m.messages = []DisplayMessage{}
m.pendingImages = []llm.Image{}
m.err = nil
}
// addUserMessage adds a user message to the conversation.
func (m *Model) addUserMessage(text string, images []llm.Image) {
msg := llm.Message{
Role: llm.RoleUser,
Content: llm.Content{Text: text, Images: images},
}
m.conversation = append(m.conversation, msg)
m.messages = append(m.messages, DisplayMessage{
Role: llm.RoleUser,
Content: text,
Images: len(images),
})
}
// addAssistantMessage adds an assistant message to the conversation display.
func (m *Model) addAssistantMessage(content string) {
m.messages = append(m.messages, DisplayMessage{
Role: llm.RoleAssistant,
Content: content,
})
}
// addToolCallMessage adds a tool call message to display.
func (m *Model) addToolCallMessage(name string, args string) {
m.messages = append(m.messages, DisplayMessage{
Role: llm.Role("tool_call"),
Content: name + ": " + args,
})
}
// addToolResultMessage adds a tool result message to display.
func (m *Model) addToolResultMessage(name string, result string) {
m.messages = append(m.messages, DisplayMessage{
Role: llm.Role("tool_result"),
Content: name + " -> " + result,
})
}