Refactor entire system to be more contextual so that conversation flow can be more easily managed
This commit is contained in:
70
response.go
Normal file
70
response.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package go_llm
|
||||
|
||||
import "github.com/sashabaranov/go-openai"
|
||||
|
||||
type ResponseChoice struct {
|
||||
Index int
|
||||
Role Role
|
||||
Content string
|
||||
Refusal string
|
||||
Name string
|
||||
Calls []ToolCall
|
||||
}
|
||||
|
||||
func (r ResponseChoice) toRaw() map[string]any {
|
||||
res := map[string]any{
|
||||
"index": r.Index,
|
||||
"role": r.Role,
|
||||
"content": r.Content,
|
||||
"refusal": r.Refusal,
|
||||
"name": r.Name,
|
||||
}
|
||||
|
||||
calls := make([]map[string]any, 0, len(r.Calls))
|
||||
for _, call := range r.Calls {
|
||||
calls = append(calls, call.toRaw())
|
||||
}
|
||||
|
||||
res["calls"] = calls
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (r ResponseChoice) toChatCompletionMessages() []openai.ChatCompletionMessage {
|
||||
var res []openai.ChatCompletionMessage
|
||||
|
||||
for _, call := range r.Calls {
|
||||
res = append(res, call.toChatCompletionMessages()...)
|
||||
}
|
||||
|
||||
if r.Refusal != "" || r.Content != "" {
|
||||
res = append(res, openai.ChatCompletionMessage{
|
||||
Role: openai.ChatMessageRoleAssistant,
|
||||
Content: r.Content,
|
||||
Refusal: r.Refusal,
|
||||
})
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (r ResponseChoice) toInput() []Input {
|
||||
var res []Input
|
||||
|
||||
for _, call := range r.Calls {
|
||||
res = append(res, call)
|
||||
}
|
||||
|
||||
if r.Content != "" || r.Refusal != "" {
|
||||
res = append(res, Message{
|
||||
Role: RoleAssistant,
|
||||
Text: r.Content,
|
||||
})
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
Choices []ResponseChoice
|
||||
}
|
Reference in New Issue
Block a user