Add core implementation for AI-powered question answering
Introduce multiple agents, tools, and utilities for processing, extracting, and answering user-provided questions using LLMs and external data. Key features include knowledge processing, question splitting, search term generation, and contextual knowledge handling.
This commit is contained in:
42
pkg/agents/shared/modeltracker.go
Normal file
42
pkg/agents/shared/modeltracker.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package shared
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync/atomic"
|
||||
|
||||
gollm "gitea.stevedudenhoeffer.com/steve/go-llm"
|
||||
)
|
||||
|
||||
type ModelTracker struct {
|
||||
parent gollm.ChatCompletion
|
||||
maximum int64
|
||||
calls int64
|
||||
}
|
||||
|
||||
var _ gollm.ChatCompletion = &ModelTracker{}
|
||||
|
||||
// NewModelTracker creates a new model tracker that will limit the number of calls to the parent.
|
||||
// Set to 0 to disable the limit.
|
||||
func NewModelTracker(parent gollm.ChatCompletion, maximum int64) *ModelTracker {
|
||||
return &ModelTracker{parent: parent, maximum: maximum}
|
||||
}
|
||||
|
||||
var ErrModelCapacity = errors.New("maximum model capacity reached")
|
||||
|
||||
func (m *ModelTracker) ChatComplete(ctx context.Context, req gollm.Request) (gollm.Response, error) {
|
||||
if m.maximum > 0 && atomic.AddInt64(&m.calls, 1) >= m.maximum {
|
||||
return gollm.Response{}, ErrModelCapacity
|
||||
}
|
||||
|
||||
return m.parent.ChatComplete(ctx, req)
|
||||
}
|
||||
|
||||
// ResetCalls resets the number of calls made to the parent.
|
||||
func (m *ModelTracker) ResetCalls() {
|
||||
atomic.StoreInt64(&m.calls, 0)
|
||||
}
|
||||
|
||||
func (m *ModelTracker) GetCalls() int64 {
|
||||
return atomic.LoadInt64(&m.calls)
|
||||
}
|
Reference in New Issue
Block a user