feat: implement AI-assisted rename prompting feature
- Added data model for AI-assisted renaming including structures for prompts, responses, and policies. - Created implementation plan detailing the integration of Google Genkit into the CLI for renaming tasks. - Developed quickstart guide for setting up and using the new AI rename functionality. - Documented research decisions regarding Genkit orchestration and prompt composition. - Established tasks for phased implementation, including setup, foundational work, and user stories. - Implemented contract tests to ensure AI rename policies and ledger metadata are correctly applied. - Developed integration tests for validating AI rename flows, including preview, apply, and undo functionalities. - Added tooling to pin Genkit dependency for consistent builds.
This commit is contained in:
158
internal/ai/genkit/client.go
Normal file
158
internal/ai/genkit/client.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package genkit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
genaigo "github.com/firebase/genkit/go/ai"
|
||||
"github.com/openai/openai-go/option"
|
||||
|
||||
aiconfig "github.com/rogeecn/renamer/internal/ai/config"
|
||||
"github.com/rogeecn/renamer/internal/ai/prompt"
|
||||
)
|
||||
|
||||
// WorkflowRunner executes a Genkit request and returns the structured response.
|
||||
type WorkflowRunner interface {
|
||||
Run(ctx context.Context, req Request) (Result, error)
|
||||
}
|
||||
|
||||
// WorkflowFactory constructs workflow runners.
|
||||
type WorkflowFactory func(ctx context.Context, opts Options) (WorkflowRunner, error)
|
||||
|
||||
var (
|
||||
factoryMu sync.RWMutex
|
||||
defaultFactory = func(ctx context.Context, opts Options) (WorkflowRunner, error) {
|
||||
return NewWorkflow(ctx, opts)
|
||||
}
|
||||
currentFactory WorkflowFactory = defaultFactory
|
||||
)
|
||||
|
||||
// OverrideWorkflowFactory allows tests to supply custom workflow implementations.
|
||||
func OverrideWorkflowFactory(factory WorkflowFactory) {
|
||||
factoryMu.Lock()
|
||||
defer factoryMu.Unlock()
|
||||
if factory == nil {
|
||||
currentFactory = defaultFactory
|
||||
return
|
||||
}
|
||||
currentFactory = factory
|
||||
}
|
||||
|
||||
// ResetWorkflowFactory restores the default workflow constructor.
|
||||
func ResetWorkflowFactory() {
|
||||
OverrideWorkflowFactory(nil)
|
||||
}
|
||||
|
||||
func getWorkflowFactory() WorkflowFactory {
|
||||
factoryMu.RLock()
|
||||
defer factoryMu.RUnlock()
|
||||
return currentFactory
|
||||
}
|
||||
|
||||
// ClientOptions configure the Genkit client.
|
||||
type ClientOptions struct {
|
||||
Model string
|
||||
TokenProvider aiconfig.TokenProvider
|
||||
RequestOptions []option.RequestOption
|
||||
}
|
||||
|
||||
// Client orchestrates prompt execution against the configured workflow.
|
||||
type Client struct {
|
||||
model string
|
||||
tokenProvider aiconfig.TokenProvider
|
||||
requestOptions []option.RequestOption
|
||||
}
|
||||
|
||||
// NewClient constructs a client with optional overrides.
|
||||
func NewClient(opts ClientOptions) *Client {
|
||||
model := strings.TrimSpace(opts.Model)
|
||||
if model == "" {
|
||||
model = DefaultModelName
|
||||
}
|
||||
return &Client{
|
||||
model: model,
|
||||
tokenProvider: opts.TokenProvider,
|
||||
requestOptions: append([]option.RequestOption(nil), opts.RequestOptions...),
|
||||
}
|
||||
}
|
||||
|
||||
// Invocation describes a single Genkit call.
|
||||
type Invocation struct {
|
||||
Instructions string
|
||||
Prompt prompt.RenamePrompt
|
||||
Model string
|
||||
}
|
||||
|
||||
// InvocationResult carries the parsed response alongside telemetry.
|
||||
type InvocationResult struct {
|
||||
PromptHash string
|
||||
Model string
|
||||
Response prompt.RenameResponse
|
||||
ModelResponse *genaigo.ModelResponse
|
||||
PromptJSON []byte
|
||||
}
|
||||
|
||||
// Invoke executes the workflow and returns the structured response.
|
||||
func (c *Client) Invoke(ctx context.Context, inv Invocation) (InvocationResult, error) {
|
||||
model := strings.TrimSpace(inv.Model)
|
||||
if model == "" {
|
||||
model = c.model
|
||||
}
|
||||
if model == "" {
|
||||
model = DefaultModelName
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(inv.Prompt)
|
||||
if err != nil {
|
||||
return InvocationResult{}, fmt.Errorf("marshal prompt payload: %w", err)
|
||||
}
|
||||
|
||||
factory := getWorkflowFactory()
|
||||
runner, err := factory(ctx, Options{
|
||||
Model: model,
|
||||
TokenProvider: c.tokenProvider,
|
||||
RequestOptions: c.requestOptions,
|
||||
})
|
||||
if err != nil {
|
||||
return InvocationResult{}, err
|
||||
}
|
||||
|
||||
result, err := runner.Run(ctx, Request{
|
||||
Instructions: inv.Instructions,
|
||||
Payload: inv.Prompt,
|
||||
})
|
||||
if err != nil {
|
||||
return InvocationResult{}, err
|
||||
}
|
||||
|
||||
if strings.TrimSpace(result.Response.Model) == "" {
|
||||
result.Response.Model = model
|
||||
}
|
||||
|
||||
promptHash := hashPrompt(inv.Instructions, payload)
|
||||
if strings.TrimSpace(result.Response.PromptHash) == "" {
|
||||
result.Response.PromptHash = promptHash
|
||||
}
|
||||
|
||||
return InvocationResult{
|
||||
PromptHash: promptHash,
|
||||
Model: result.Response.Model,
|
||||
Response: result.Response,
|
||||
ModelResponse: result.ModelResponse,
|
||||
PromptJSON: payload,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func hashPrompt(instructions string, payload []byte) string {
|
||||
hasher := sha256.New()
|
||||
hasher.Write([]byte(strings.TrimSpace(instructions)))
|
||||
hasher.Write([]byte{'\n'})
|
||||
hasher.Write(payload)
|
||||
sum := hasher.Sum(nil)
|
||||
return hex.EncodeToString(sum)
|
||||
}
|
||||
3
internal/ai/genkit/doc.go
Normal file
3
internal/ai/genkit/doc.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package genkit
|
||||
|
||||
// Package genkit integrates the Google Genkit workflow with the CLI.
|
||||
166
internal/ai/genkit/workflow.go
Normal file
166
internal/ai/genkit/workflow.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package genkit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/firebase/genkit/go/ai"
|
||||
gogenkit "github.com/firebase/genkit/go/genkit"
|
||||
oai "github.com/firebase/genkit/go/plugins/compat_oai/openai"
|
||||
"github.com/openai/openai-go/option"
|
||||
|
||||
aiconfig "github.com/rogeecn/renamer/internal/ai/config"
|
||||
"github.com/rogeecn/renamer/internal/ai/prompt"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultModelName = "gpt-4o-mini"
|
||||
// DefaultModelName exposes the default model identifier used by the CLI.
|
||||
DefaultModelName = defaultModelName
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrMissingToken indicates the workflow could not locate a model token.
|
||||
ErrMissingToken = errors.New("genkit workflow: model token not available")
|
||||
// ErrMissingInstructions indicates that no system instructions were provided for a run.
|
||||
ErrMissingInstructions = errors.New("genkit workflow: instructions are required")
|
||||
)
|
||||
|
||||
// DataGenerator executes the Genkit request and decodes the structured response.
|
||||
type DataGenerator func(ctx context.Context, g *gogenkit.Genkit, opts ...ai.GenerateOption) (*prompt.RenameResponse, *ai.ModelResponse, error)
|
||||
|
||||
// Options configure a Workflow instance.
|
||||
type Options struct {
|
||||
Model string
|
||||
TokenProvider aiconfig.TokenProvider
|
||||
RequestOptions []option.RequestOption
|
||||
Generator DataGenerator
|
||||
}
|
||||
|
||||
// Request captures the input necessary to execute the Genkit workflow.
|
||||
type Request struct {
|
||||
Instructions string
|
||||
Payload prompt.RenamePrompt
|
||||
}
|
||||
|
||||
// Result bundles the typed response together with the raw Genkit metadata.
|
||||
type Result struct {
|
||||
Response prompt.RenameResponse
|
||||
ModelResponse *ai.ModelResponse
|
||||
}
|
||||
|
||||
// Workflow orchestrates execution of the Genkit rename pipeline.
|
||||
type Workflow struct {
|
||||
modelName string
|
||||
genkit *gogenkit.Genkit
|
||||
model ai.Model
|
||||
generate DataGenerator
|
||||
}
|
||||
|
||||
// NewWorkflow instantiates a Genkit workflow for the preferred model. When no
|
||||
// model is provided it defaults to gpt-4o-mini. The workflow requires a token
|
||||
// provider capable of resolving `<model>_MODEL_AUTH_TOKEN` secrets.
|
||||
func NewWorkflow(ctx context.Context, opts Options) (*Workflow, error) {
|
||||
modelName := strings.TrimSpace(opts.Model)
|
||||
if modelName == "" {
|
||||
modelName = defaultModelName
|
||||
}
|
||||
|
||||
token, err := resolveToken(opts.TokenProvider, modelName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if strings.TrimSpace(token) == "" {
|
||||
return nil, fmt.Errorf("%w for %q", ErrMissingToken, modelName)
|
||||
}
|
||||
|
||||
plugin := &oai.OpenAI{
|
||||
APIKey: token,
|
||||
Opts: opts.RequestOptions,
|
||||
}
|
||||
|
||||
g := gogenkit.Init(ctx, gogenkit.WithPlugins(plugin))
|
||||
model := plugin.Model(g, modelName)
|
||||
|
||||
generator := opts.Generator
|
||||
if generator == nil {
|
||||
generator = func(ctx context.Context, g *gogenkit.Genkit, opts ...ai.GenerateOption) (*prompt.RenameResponse, *ai.ModelResponse, error) {
|
||||
return gogenkit.GenerateData[prompt.RenameResponse](ctx, g, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
return &Workflow{
|
||||
modelName: modelName,
|
||||
genkit: g,
|
||||
model: model,
|
||||
generate: generator,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Run executes the workflow with the provided request and decodes the response
|
||||
// into the shared RenameResponse structure.
|
||||
func (w *Workflow) Run(ctx context.Context, req Request) (Result, error) {
|
||||
if w == nil {
|
||||
return Result{}, errors.New("genkit workflow: nil receiver")
|
||||
}
|
||||
if strings.TrimSpace(req.Instructions) == "" {
|
||||
return Result{}, ErrMissingInstructions
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(req.Payload)
|
||||
if err != nil {
|
||||
return Result{}, fmt.Errorf("marshal workflow payload: %w", err)
|
||||
}
|
||||
|
||||
options := []ai.GenerateOption{
|
||||
ai.WithModel(w.model),
|
||||
ai.WithSystem(req.Instructions),
|
||||
ai.WithPrompt(string(payload)),
|
||||
}
|
||||
|
||||
response, raw, err := w.generate(ctx, w.genkit, options...)
|
||||
if err != nil {
|
||||
return Result{}, fmt.Errorf("genkit generate: %w", err)
|
||||
}
|
||||
|
||||
return Result{
|
||||
Response: deref(response),
|
||||
ModelResponse: raw,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func resolveToken(provider aiconfig.TokenProvider, model string) (string, error) {
|
||||
if provider != nil {
|
||||
if token, err := provider.ResolveModelToken(model); err == nil && strings.TrimSpace(token) != "" {
|
||||
return token, nil
|
||||
} else if err != nil {
|
||||
return "", fmt.Errorf("resolve model token: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if direct := strings.TrimSpace(os.Getenv(aiconfig.ModelTokenKey(model))); direct != "" {
|
||||
return direct, nil
|
||||
}
|
||||
|
||||
store, err := aiconfig.NewTokenStore("")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
token, err := store.ResolveModelToken(model)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func deref(resp *prompt.RenameResponse) prompt.RenameResponse {
|
||||
if resp == nil {
|
||||
return prompt.RenameResponse{}
|
||||
}
|
||||
return *resp
|
||||
}
|
||||
Reference in New Issue
Block a user