Add core implementation for AI-powered question answering

Introduce multiple agents, tools, and utilities for processing, extracting, and answering user-provided questions using LLMs and external data. Key features include knowledge processing, question splitting, search term generation, and contextual knowledge handling.
This commit is contained in:
2025-03-21 11:10:48 -04:00
parent 20bcaefaa2
commit 693ac4e6a7
18 changed files with 1893 additions and 18 deletions

View File

@@ -0,0 +1,33 @@
package shared
import (
"strings"
)
// TidBit is a small piece of information that the AI has learned.
type TidBit struct {
Info string
Source string
}
type Knowledge struct {
// OriginalQuestions are the questions that was asked first to the AI before any processing was done.
OriginalQuestions []string
// RemainingQuestions is the questions that are left to find answers for.
RemainingQuestions []string
// Knowledge are the tidbits of information that the AI has learned.
Knowledge []TidBit
}
// ToMessage converts the knowledge to a message that can be sent to the LLM.
func (k Knowledge) ToMessage() string {
var learned []string
for _, t := range k.Knowledge {
learned = append(learned, t.Info)
}
return "Original questions asked:\n" + strings.Join(k.OriginalQuestions, "\n") + "\n" +
"Learned information:\n" + strings.Join(learned, "\n") + "\n" +
"Remaining questions:\n" + strings.Join(k.RemainingQuestions, "\n")
}

View File

@@ -0,0 +1,42 @@
package shared
import (
"context"
"errors"
"sync/atomic"
gollm "gitea.stevedudenhoeffer.com/steve/go-llm"
)
type ModelTracker struct {
parent gollm.ChatCompletion
maximum int64
calls int64
}
var _ gollm.ChatCompletion = &ModelTracker{}
// NewModelTracker creates a new model tracker that will limit the number of calls to the parent.
// Set to 0 to disable the limit.
func NewModelTracker(parent gollm.ChatCompletion, maximum int64) *ModelTracker {
return &ModelTracker{parent: parent, maximum: maximum}
}
var ErrModelCapacity = errors.New("maximum model capacity reached")
func (m *ModelTracker) ChatComplete(ctx context.Context, req gollm.Request) (gollm.Response, error) {
if m.maximum > 0 && atomic.AddInt64(&m.calls, 1) >= m.maximum {
return gollm.Response{}, ErrModelCapacity
}
return m.parent.ChatComplete(ctx, req)
}
// ResetCalls resets the number of calls made to the parent.
func (m *ModelTracker) ResetCalls() {
atomic.StoreInt64(&m.calls, 0)
}
func (m *ModelTracker) GetCalls() int64 {
return atomic.LoadInt64(&m.calls)
}