Fix unmarshalling issues and adjust logging for debugging

Modify `FunctionCall` struct to handle arguments as strings. Add debugging logs to facilitate error tracing and improve JSON unmarshalling in various functions.
This commit is contained in:
2024-11-11 00:23:01 -05:00
parent cd4ad59a38
commit 0993a8e865
5 changed files with 37 additions and 28 deletions

View File

@@ -64,9 +64,11 @@ func (o openaiImpl) requestToOpenAIRequest(request Request) oai.ChatCompletionRe
Name: tool.Name,
Description: tool.Description,
Strict: tool.Strict,
Parameters: tool.Parameters,
Parameters: tool.Parameters.Definition(),
},
})
fmt.Println("tool:", tool.Name, tool.Description, tool.Strict, tool.Parameters.Definition())
}
if request.Temperature != nil {
@@ -94,8 +96,9 @@ func (o openaiImpl) responseToLLMResponse(response oai.ChatCompletionResponse) R
res := Response{}
for _, choice := range response.Choices {
var tools []ToolCall
var toolCalls []ToolCall
for _, call := range choice.Message.ToolCalls {
fmt.Println("responseToLLMResponse: call:", call.Function.Arguments)
toolCall := ToolCall{
ID: call.ID,
FunctionCall: FunctionCall{
@@ -104,7 +107,9 @@ func (o openaiImpl) responseToLLMResponse(response oai.ChatCompletionResponse) R
},
}
tools = append(tools, toolCall)
fmt.Println("toolCall.FunctionCall.Arguments:", toolCall.FunctionCall.Arguments)
toolCalls = append(toolCalls, toolCall)
}
res.Choices = append(res.Choices, ResponseChoice{
@@ -112,6 +117,7 @@ func (o openaiImpl) responseToLLMResponse(response oai.ChatCompletionResponse) R
Role: Role(choice.Message.Role),
Name: choice.Message.Name,
Refusal: choice.Message.Refusal,
Calls: toolCalls,
})
}