go-llm/llm.go
Steve Dudenhoeffer e5a046a70b Handle execution errors by appending them to the result.
Previously, execution errors were only returned in the refusal field. This update appends errors to the result field if present, ensuring they are included in the tool's output. This change improves visibility and clarity for error reporting.
2025-03-17 23:41:48 -04:00

193 lines
3.5 KiB
Go

package go_llm
import (
"context"
"github.com/sashabaranov/go-openai"
)
type Role string
const (
RoleSystem Role = "system"
RoleUser Role = "user"
RoleAssistant Role = "assistant"
)
type Image struct {
Base64 string
ContentType string
Url string
}
func (i Image) toRaw() map[string]any {
res := map[string]any{
"base64": i.Base64,
"contenttype": i.ContentType,
"url": i.Url,
}
return res
}
func (i *Image) fromRaw(raw map[string]any) Image {
var res Image
res.Base64 = raw["base64"].(string)
res.ContentType = raw["contenttype"].(string)
res.Url = raw["url"].(string)
return res
}
type Message struct {
Role Role
Name string
Text string
Images []Image
}
func (m Message) toRaw() map[string]any {
res := map[string]any{
"role": m.Role,
"name": m.Name,
"text": m.Text,
}
images := make([]map[string]any, 0, len(m.Images))
for _, img := range m.Images {
images = append(images, img.toRaw())
}
res["images"] = images
return res
}
func (m *Message) fromRaw(raw map[string]any) Message {
var res Message
res.Role = Role(raw["role"].(string))
res.Name = raw["name"].(string)
res.Text = raw["text"].(string)
images := raw["images"].([]map[string]any)
for _, img := range images {
var i Image
res.Images = append(res.Images, i.fromRaw(img))
}
return res
}
func (m Message) toChatCompletionMessages() []openai.ChatCompletionMessage {
var res openai.ChatCompletionMessage
res.Role = string(m.Role)
res.Name = m.Name
res.Content = m.Text
for _, img := range m.Images {
if img.Base64 != "" {
res.MultiContent = append(res.MultiContent, openai.ChatMessagePart{
Type: "image_url",
ImageURL: &openai.ChatMessageImageURL{
URL: "data:" + img.ContentType + ";base64," + img.Base64,
},
})
} else if img.Url != "" {
res.MultiContent = append(res.MultiContent, openai.ChatMessagePart{
Type: "image_url",
ImageURL: &openai.ChatMessageImageURL{
URL: img.Url,
},
})
}
}
return []openai.ChatCompletionMessage{res}
}
type ToolCall struct {
ID string
FunctionCall FunctionCall
}
func (t ToolCall) toRaw() map[string]any {
res := map[string]any{
"id": t.ID,
}
res["function"] = t.FunctionCall.toRaw()
return res
}
func (t ToolCall) toChatCompletionMessages() []openai.ChatCompletionMessage {
return []openai.ChatCompletionMessage{{
Role: openai.ChatMessageRoleTool,
ToolCallID: t.ID,
}}
}
type ToolCallResponse struct {
ID string
Result string
Error error
}
func (t ToolCallResponse) toRaw() map[string]any {
res := map[string]any{
"id": t.ID,
"result": t.Result,
}
if t.Error != nil {
res["error"] = t.Error.Error()
}
return res
}
func (t ToolCallResponse) toChatCompletionMessages() []openai.ChatCompletionMessage {
var refusal string
if t.Error != nil {
refusal = t.Error.Error()
}
if refusal != "" {
if t.Result != "" {
t.Result = t.Result + " (error in execution: " + refusal + ")"
} else {
t.Result = "error in execution:" + refusal
}
}
return []openai.ChatCompletionMessage{{
Role: openai.ChatMessageRoleTool,
Content: t.Result,
ToolCallID: t.ID,
}}
}
type ChatCompletion interface {
ChatComplete(ctx context.Context, req Request) (Response, error)
}
type LLM interface {
ModelVersion(modelVersion string) (ChatCompletion, error)
}
func OpenAI(key string) LLM {
return openaiImpl{key: key}
}
func Anthropic(key string) LLM {
return anthropic{key: key}
}
func Google(key string) LLM {
return google{key: key}
}