Steve Dudenhoeffer
0993a8e865
Modify `FunctionCall` struct to handle arguments as strings. Add debugging logs to facilitate error tracing and improve JSON unmarshalling in various functions.
158 lines
3.5 KiB
Go
158 lines
3.5 KiB
Go
package go_llm
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"fmt"
|
|
anth "github.com/liushuangls/go-anthropic/v2"
|
|
"log"
|
|
)
|
|
|
|
type anthropic struct {
|
|
key string
|
|
model string
|
|
}
|
|
|
|
var _ LLM = anthropic{}
|
|
|
|
func (a anthropic) ModelVersion(modelVersion string) (ChatCompletion, error) {
|
|
a.model = modelVersion
|
|
|
|
// TODO: model verification?
|
|
return a, nil
|
|
}
|
|
|
|
func (a anthropic) requestToAnthropicRequest(req Request) anth.MessagesRequest {
|
|
res := anth.MessagesRequest{
|
|
Model: anth.Model(a.model),
|
|
MaxTokens: 1000,
|
|
}
|
|
|
|
msgs := []anth.Message{}
|
|
|
|
// we gotta convert messages into anthropic messages, however
|
|
// anthropic does not have a "system" message type, so we need to
|
|
// append it to the res.System field instead
|
|
|
|
for _, msg := range req.Messages {
|
|
if msg.Role == RoleSystem {
|
|
if len(res.System) > 0 {
|
|
res.System += "\n"
|
|
}
|
|
res.System += msg.Text
|
|
} else {
|
|
role := anth.RoleUser
|
|
|
|
if msg.Role == RoleAssistant {
|
|
role = anth.RoleAssistant
|
|
}
|
|
|
|
m := anth.Message{
|
|
Role: role,
|
|
Content: []anth.MessageContent{},
|
|
}
|
|
|
|
if msg.Text != "" {
|
|
m.Content = append(m.Content, anth.MessageContent{
|
|
Type: anth.MessagesContentTypeText,
|
|
Text: &msg.Text,
|
|
})
|
|
}
|
|
|
|
for _, img := range msg.Images {
|
|
if img.Base64 != "" {
|
|
m.Content = append(m.Content, anth.NewImageMessageContent(anth.MessageContentImageSource{
|
|
Type: "base64",
|
|
MediaType: img.ContentType,
|
|
Data: img.Base64,
|
|
}))
|
|
} else if img.Url != "" {
|
|
m.Content = append(m.Content, anth.NewImageMessageContent(anth.MessageContentImageSource{
|
|
Type: "url",
|
|
MediaType: img.ContentType,
|
|
Data: img.Url,
|
|
}))
|
|
}
|
|
}
|
|
|
|
// if this has the same role as the previous message, we can append it to the previous message
|
|
// as anthropic expects alternating assistant and user roles
|
|
|
|
if len(msgs) > 0 && msgs[len(msgs)-1].Role == role {
|
|
m2 := &msgs[len(msgs)-1]
|
|
|
|
m2.Content = append(m2.Content, m.Content...)
|
|
} else {
|
|
msgs = append(msgs, m)
|
|
}
|
|
}
|
|
}
|
|
|
|
for _, tool := range req.Toolbox.funcs {
|
|
res.Tools = append(res.Tools, anth.ToolDefinition{
|
|
Name: tool.Name,
|
|
Description: tool.Description,
|
|
InputSchema: tool.Parameters,
|
|
})
|
|
}
|
|
|
|
res.Messages = msgs
|
|
|
|
if req.Temperature != nil {
|
|
res.Temperature = req.Temperature
|
|
}
|
|
|
|
log.Println("llm request to anthropic request", res)
|
|
|
|
return res
|
|
}
|
|
|
|
func (a anthropic) responseToLLMResponse(in anth.MessagesResponse) Response {
|
|
res := Response{}
|
|
|
|
for _, msg := range in.Content {
|
|
choice := ResponseChoice{}
|
|
|
|
switch msg.Type {
|
|
case anth.MessagesContentTypeText:
|
|
if msg.Text != nil {
|
|
choice.Content = *msg.Text
|
|
}
|
|
|
|
case anth.MessagesContentTypeToolUse:
|
|
if msg.MessageContentToolUse != nil {
|
|
b, e := json.Marshal(msg.MessageContentToolUse.Input)
|
|
if e != nil {
|
|
log.Println("failed to marshal input", e)
|
|
} else {
|
|
choice.Calls = append(choice.Calls, ToolCall{
|
|
ID: msg.MessageContentToolUse.ID,
|
|
FunctionCall: FunctionCall{
|
|
Name: msg.MessageContentToolUse.Name,
|
|
Arguments: string(b),
|
|
},
|
|
})
|
|
}
|
|
}
|
|
}
|
|
|
|
res.Choices = append(res.Choices, choice)
|
|
}
|
|
|
|
log.Println("anthropic response to llm response", res)
|
|
|
|
return res
|
|
}
|
|
|
|
func (a anthropic) ChatComplete(ctx context.Context, req Request) (Response, error) {
|
|
cl := anth.NewClient(a.key)
|
|
|
|
res, err := cl.CreateMessages(ctx, a.requestToAnthropicRequest(req))
|
|
|
|
if err != nil {
|
|
return Response{}, fmt.Errorf("failed to chat complete: %w", err)
|
|
}
|
|
|
|
return a.responseToLLMResponse(res), nil
|
|
}
|