Compare commits
3 Commits
4021323984
...
3867736858
| Author | SHA1 | Date | |
|---|---|---|---|
| 3867736858 | |||
| aa377bc7ed | |||
| 223a49ac87 |
7
.gitignore
vendored
7
.gitignore
vendored
@@ -5,6 +5,7 @@
|
||||
|
||||
# Dependency directories
|
||||
vendor/
|
||||
node_modules/
|
||||
|
||||
# IDE and editor clutter
|
||||
.vscode/
|
||||
@@ -17,7 +18,13 @@ Thumbs.db
|
||||
# Temporary files
|
||||
*.tmp
|
||||
*.swp
|
||||
*.log
|
||||
|
||||
# Environment files
|
||||
.env
|
||||
.env.*
|
||||
|
||||
# Build outputs
|
||||
dist/
|
||||
build/
|
||||
coverage/
|
||||
|
||||
@@ -11,6 +11,8 @@ Auto-generated from all feature plans. Last updated: 2025-10-29
|
||||
- Go 1.24 + `spf13/cobra`, `spf13/pflag`, internal traversal/history/output packages (005-add-insert-command)
|
||||
- Go 1.24 + `spf13/cobra`, `spf13/pflag`, Go `regexp` (RE2 engine), internal traversal/history/output packages (006-add-regex-command)
|
||||
- Local filesystem and `.renamer` ledger files (006-add-regex-command)
|
||||
- Go 1.24 (CLI), Node.js 20 + TypeScript (Google Genkit workflow) + `spf13/cobra`, internal traversal/history/output packages, Google Genkit SDK, OpenAI-compatible HTTP client for fallbacks (008-ai-rename-prompt)
|
||||
- Local filesystem plus `.renamer` append-only ledger (008-ai-rename-prompt)
|
||||
|
||||
## Project Structure
|
||||
|
||||
@@ -43,9 +45,9 @@ tests/
|
||||
- Smoke: `scripts/smoke-test-replace.sh`, `scripts/smoke-test-remove.sh`
|
||||
|
||||
## Recent Changes
|
||||
- 008-ai-rename-prompt: Added Go 1.24 (CLI), Node.js 20 + TypeScript (Google Genkit workflow) + `spf13/cobra`, internal traversal/history/output packages, Google Genkit SDK, OpenAI-compatible HTTP client for fallbacks
|
||||
- 001-sequence-numbering: Added Go 1.24 + `spf13/cobra`, `spf13/pflag`, internal traversal/history/output packages
|
||||
- 006-add-regex-command: Added Go 1.24 + `spf13/cobra`, `spf13/pflag`, Go `regexp` (RE2 engine), internal traversal/history/output packages
|
||||
- 005-add-insert-command: Added Go 1.24 + `spf13/cobra`, `spf13/pflag`, internal traversal/history/output packages
|
||||
|
||||
<!-- MANUAL ADDITIONS START -->
|
||||
<!-- MANUAL ADDITIONS END -->
|
||||
|
||||
555
cmd/ai.go
Normal file
555
cmd/ai.go
Normal file
@@ -0,0 +1,555 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rogeecn/renamer/internal/ai/genkit"
|
||||
"github.com/rogeecn/renamer/internal/ai/plan"
|
||||
"github.com/rogeecn/renamer/internal/ai/prompt"
|
||||
"github.com/rogeecn/renamer/internal/listing"
|
||||
"github.com/rogeecn/renamer/internal/output"
|
||||
)
|
||||
|
||||
type aiCommandOptions struct {
|
||||
Model string
|
||||
Debug bool
|
||||
ExportPath string
|
||||
ImportPath string
|
||||
Casing string
|
||||
Prefix string
|
||||
AllowSpaces bool
|
||||
KeepOriginalOrder bool
|
||||
BannedTokens []string
|
||||
}
|
||||
|
||||
func newAICommand() *cobra.Command {
|
||||
ops := &aiCommandOptions{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "ai",
|
||||
Short: "Generate rename plans using the AI workflow",
|
||||
Long: "Invoke the embedded AI workflow to generate, validate, and optionally apply rename plans.",
|
||||
Example: strings.TrimSpace(` # Preview an AI plan and export the raw response for edits
|
||||
renamer ai --path ./photos --dry-run --export-plan plan.json
|
||||
|
||||
# Import an edited plan and validate it without applying changes
|
||||
renamer ai --path ./photos --dry-run --import-plan plan.json
|
||||
|
||||
# Apply an edited plan after validation passes
|
||||
renamer ai --path ./photos --import-plan plan.json --yes`),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options := collectAIOptions(cmd, ops)
|
||||
return runAICommand(cmd.Context(), cmd, options)
|
||||
},
|
||||
}
|
||||
|
||||
bindAIFlags(cmd, ops)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func bindAIFlags(cmd *cobra.Command, opts *aiCommandOptions) {
|
||||
cmd.Flags().StringVar(&opts.Model, "genkit-model", genkit.DefaultModelName, fmt.Sprintf("OpenAI-compatible model identifier (default %s)", genkit.DefaultModelName))
|
||||
cmd.Flags().BoolVar(&opts.Debug, "debug-genkit", false, "Write Genkit prompt/response traces to the debug log")
|
||||
cmd.Flags().StringVar(&opts.ExportPath, "export-plan", "", "Export the raw AI plan JSON to the provided file path")
|
||||
cmd.Flags().StringVar(&opts.ImportPath, "import-plan", "", "Import an edited AI plan JSON for validation or apply")
|
||||
cmd.Flags().StringVar(&opts.Casing, "naming-casing", "kebab", "Casing style for AI-generated filenames (kebab, snake, camel, pascal, title)")
|
||||
cmd.Flags().StringVar(&opts.Prefix, "naming-prefix", "", "Static prefix AI proposals must include (alias: --prefix)")
|
||||
cmd.Flags().StringVar(&opts.Prefix, "prefix", "", "Alias for --naming-prefix")
|
||||
cmd.Flags().BoolVar(&opts.AllowSpaces, "naming-allow-spaces", false, "Permit spaces in AI-generated filenames")
|
||||
cmd.Flags().BoolVar(&opts.KeepOriginalOrder, "naming-keep-order", false, "Instruct AI to preserve original ordering of descriptive terms")
|
||||
cmd.Flags().StringSliceVar(&opts.BannedTokens, "banned", nil, "Comma-separated list of additional banned tokens (repeat flag to add more)")
|
||||
}
|
||||
|
||||
func collectAIOptions(cmd *cobra.Command, defaults *aiCommandOptions) aiCommandOptions {
|
||||
result := aiCommandOptions{
|
||||
Model: genkit.DefaultModelName,
|
||||
Debug: false,
|
||||
ExportPath: "",
|
||||
Casing: "kebab",
|
||||
}
|
||||
|
||||
if defaults != nil {
|
||||
if defaults.Model != "" {
|
||||
result.Model = defaults.Model
|
||||
}
|
||||
result.Debug = defaults.Debug
|
||||
result.ExportPath = defaults.ExportPath
|
||||
if defaults.Casing != "" {
|
||||
result.Casing = defaults.Casing
|
||||
}
|
||||
result.Prefix = defaults.Prefix
|
||||
result.AllowSpaces = defaults.AllowSpaces
|
||||
result.KeepOriginalOrder = defaults.KeepOriginalOrder
|
||||
if len(defaults.BannedTokens) > 0 {
|
||||
result.BannedTokens = append([]string(nil), defaults.BannedTokens...)
|
||||
}
|
||||
}
|
||||
|
||||
if flag := cmd.Flags().Lookup("genkit-model"); flag != nil {
|
||||
if value, err := cmd.Flags().GetString("genkit-model"); err == nil && value != "" {
|
||||
result.Model = value
|
||||
}
|
||||
}
|
||||
|
||||
if flag := cmd.Flags().Lookup("debug-genkit"); flag != nil {
|
||||
if value, err := cmd.Flags().GetBool("debug-genkit"); err == nil {
|
||||
result.Debug = value
|
||||
}
|
||||
}
|
||||
|
||||
if flag := cmd.Flags().Lookup("export-plan"); flag != nil {
|
||||
if value, err := cmd.Flags().GetString("export-plan"); err == nil && value != "" {
|
||||
result.ExportPath = value
|
||||
}
|
||||
}
|
||||
|
||||
if flag := cmd.Flags().Lookup("import-plan"); flag != nil {
|
||||
if value, err := cmd.Flags().GetString("import-plan"); err == nil && value != "" {
|
||||
result.ImportPath = value
|
||||
}
|
||||
}
|
||||
|
||||
if flag := cmd.Flags().Lookup("naming-casing"); flag != nil {
|
||||
if value, err := cmd.Flags().GetString("naming-casing"); err == nil && value != "" {
|
||||
result.Casing = value
|
||||
}
|
||||
}
|
||||
|
||||
if flag := cmd.Flags().Lookup("naming-prefix"); flag != nil {
|
||||
if value, err := cmd.Flags().GetString("naming-prefix"); err == nil {
|
||||
result.Prefix = value
|
||||
}
|
||||
}
|
||||
if flag := cmd.Flags().Lookup("prefix"); flag != nil && flag.Changed {
|
||||
if value, err := cmd.Flags().GetString("prefix"); err == nil {
|
||||
result.Prefix = value
|
||||
}
|
||||
}
|
||||
|
||||
if flag := cmd.Flags().Lookup("naming-allow-spaces"); flag != nil {
|
||||
if value, err := cmd.Flags().GetBool("naming-allow-spaces"); err == nil {
|
||||
result.AllowSpaces = value
|
||||
}
|
||||
}
|
||||
|
||||
if flag := cmd.Flags().Lookup("naming-keep-order"); flag != nil {
|
||||
if value, err := cmd.Flags().GetBool("naming-keep-order"); err == nil {
|
||||
result.KeepOriginalOrder = value
|
||||
}
|
||||
}
|
||||
|
||||
if flag := cmd.Flags().Lookup("banned"); flag != nil {
|
||||
if value, err := cmd.Flags().GetStringSlice("banned"); err == nil && len(value) > 0 {
|
||||
result.BannedTokens = append([]string(nil), value...)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func runAICommand(ctx context.Context, cmd *cobra.Command, options aiCommandOptions) error {
|
||||
scope, err := listing.ScopeFromCmd(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
applyRequested, err := getBool(cmd, "yes")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options.ImportPath = strings.TrimSpace(options.ImportPath)
|
||||
|
||||
casing, err := normalizeCasing(options.Casing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
options.Casing = casing
|
||||
prefix := strings.TrimSpace(options.Prefix)
|
||||
userBanned := sanitizeTokenSlice(options.BannedTokens)
|
||||
bannedTerms := mergeBannedTerms(defaultBannedTerms(), userBanned)
|
||||
|
||||
candidates, err := plan.CollectCandidates(ctx, scope)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ignoreSet := buildIgnoreSet(scope.WorkingDir, options.ExportPath, options.ImportPath)
|
||||
if len(ignoreSet) > 0 {
|
||||
candidates = filterIgnoredCandidates(candidates, ignoreSet)
|
||||
}
|
||||
if len(candidates) == 0 {
|
||||
fmt.Fprintln(cmd.OutOrStdout(), "No candidates found")
|
||||
return nil
|
||||
}
|
||||
|
||||
samples := make([]prompt.SampleCandidate, 0, len(candidates))
|
||||
for _, candidate := range candidates {
|
||||
samples = append(samples, prompt.SampleCandidate{
|
||||
RelativePath: candidate.OriginalPath,
|
||||
SizeBytes: candidate.SizeBytes,
|
||||
Depth: candidate.Depth,
|
||||
})
|
||||
}
|
||||
|
||||
sequence := prompt.SequenceRule{
|
||||
Style: "prefix",
|
||||
Width: 3,
|
||||
Start: 1,
|
||||
Separator: "_",
|
||||
}
|
||||
|
||||
policies := prompt.PolicyConfig{
|
||||
Prefix: prefix,
|
||||
Casing: options.Casing,
|
||||
AllowSpaces: options.AllowSpaces,
|
||||
KeepOriginalOrder: options.KeepOriginalOrder,
|
||||
ForbiddenTokens: append([]string(nil), userBanned...),
|
||||
}
|
||||
validatorPolicy := prompt.NamingPolicyConfig{
|
||||
Prefix: policies.Prefix,
|
||||
Casing: policies.Casing,
|
||||
AllowSpaces: policies.AllowSpaces,
|
||||
KeepOriginalOrder: policies.KeepOriginalOrder,
|
||||
ForbiddenTokens: append([]string(nil), policies.ForbiddenTokens...),
|
||||
}
|
||||
|
||||
var response prompt.RenameResponse
|
||||
var promptHash string
|
||||
var model string
|
||||
|
||||
if options.ImportPath != "" {
|
||||
resp, err := plan.LoadResponse(options.ImportPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response = resp
|
||||
promptHash = strings.TrimSpace(resp.PromptHash)
|
||||
model = strings.TrimSpace(resp.Model)
|
||||
if model == "" {
|
||||
model = options.Model
|
||||
}
|
||||
} else {
|
||||
builder := prompt.NewBuilder()
|
||||
promptPayload, err := builder.Build(prompt.BuildInput{
|
||||
WorkingDir: scope.WorkingDir,
|
||||
Samples: samples,
|
||||
TotalCount: len(candidates),
|
||||
Sequence: sequence,
|
||||
Policies: policies,
|
||||
BannedTerms: bannedTerms,
|
||||
Metadata: map[string]string{
|
||||
"cliVersion": "dev",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
instructions := composeInstructions(sequence, policies, bannedTerms)
|
||||
client := genkit.NewClient(genkit.ClientOptions{Model: options.Model})
|
||||
invocationResult, err := client.Invoke(ctx, genkit.Invocation{
|
||||
Instructions: instructions,
|
||||
Prompt: promptPayload,
|
||||
Model: options.Model,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response = invocationResult.Response
|
||||
promptHash = invocationResult.PromptHash
|
||||
model = invocationResult.Response.Model
|
||||
|
||||
if options.ExportPath != "" {
|
||||
if err := plan.SaveResponse(options.ExportPath, response); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(cmd.ErrOrStderr(), "AI plan exported to %s\n", options.ExportPath)
|
||||
}
|
||||
}
|
||||
|
||||
if promptHash == "" {
|
||||
if hash, err := plan.ResponseDigest(response); err == nil {
|
||||
promptHash = hash
|
||||
}
|
||||
}
|
||||
if model == "" {
|
||||
model = options.Model
|
||||
}
|
||||
response.PromptHash = promptHash
|
||||
response.Model = model
|
||||
|
||||
originals := make([]string, 0, len(candidates))
|
||||
for _, candidate := range candidates {
|
||||
originals = append(originals, candidate.OriginalPath)
|
||||
}
|
||||
|
||||
validator := plan.NewValidator(originals, validatorPolicy, bannedTerms)
|
||||
validationResult, err := validator.Validate(response)
|
||||
if err != nil {
|
||||
var vErr *plan.ValidationError
|
||||
if errors.As(err, &vErr) {
|
||||
errorWriter := cmd.ErrOrStderr()
|
||||
if len(vErr.PolicyViolations) > 0 {
|
||||
messages := make([]output.PolicyViolationMessage, 0, len(vErr.PolicyViolations))
|
||||
for _, violation := range vErr.PolicyViolations {
|
||||
messages = append(messages, output.PolicyViolationMessage{
|
||||
Original: violation.Original,
|
||||
Proposed: violation.Proposed,
|
||||
Rule: violation.Rule,
|
||||
Message: violation.Message,
|
||||
})
|
||||
}
|
||||
output.WritePolicyViolations(errorWriter, messages)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
previewPlan, err := plan.MapResponse(plan.MapInput{
|
||||
Candidates: candidates,
|
||||
SequenceWidth: sequence.Width,
|
||||
}, validationResult)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
previewPlan.PromptHash = promptHash
|
||||
if previewPlan.Model == "" {
|
||||
previewPlan.Model = model
|
||||
}
|
||||
|
||||
if err := renderAIPlan(cmd.OutOrStdout(), previewPlan); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
errorWriter := cmd.ErrOrStderr()
|
||||
if len(previewPlan.Conflicts) > 0 {
|
||||
for _, conflict := range previewPlan.Conflicts {
|
||||
fmt.Fprintf(errorWriter, "Conflict (%s): %s %s\n", conflict.Issue, conflict.OriginalPath, conflict.Details)
|
||||
}
|
||||
}
|
||||
|
||||
if options.Debug {
|
||||
output.WriteAIPlanDebug(errorWriter, promptHash, previewPlan.Warnings)
|
||||
} else if len(previewPlan.Warnings) > 0 {
|
||||
output.WriteAIPlanDebug(errorWriter, "", previewPlan.Warnings)
|
||||
}
|
||||
|
||||
if options.ImportPath == "" && options.ExportPath != "" {
|
||||
// Plan already exported earlier.
|
||||
} else if options.ImportPath != "" && options.ExportPath != "" {
|
||||
if err := plan.SaveResponse(options.ExportPath, response); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(errorWriter, "AI plan exported to %s\n", options.ExportPath)
|
||||
}
|
||||
|
||||
if !applyRequested {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(previewPlan.Conflicts) > 0 {
|
||||
return fmt.Errorf("cannot apply AI plan while conflicts remain")
|
||||
}
|
||||
|
||||
applyEntry, err := plan.Apply(ctx, plan.ApplyOptions{
|
||||
WorkingDir: scope.WorkingDir,
|
||||
Candidates: candidates,
|
||||
Response: response,
|
||||
Policies: validatorPolicy,
|
||||
PromptHash: promptHash,
|
||||
})
|
||||
if err != nil {
|
||||
var conflictErr plan.ApplyConflictError
|
||||
if errors.As(err, &conflictErr) {
|
||||
for _, conflict := range conflictErr.Conflicts {
|
||||
fmt.Fprintf(errorWriter, "Apply conflict (%s): %s %s\n", conflict.Issue, conflict.OriginalPath, conflict.Details)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintf(cmd.OutOrStdout(), "Applied %d renames. Ledger updated.\n", len(applyEntry.Operations))
|
||||
return nil
|
||||
}
|
||||
|
||||
func renderAIPlan(w io.Writer, preview plan.PreviewPlan) error {
|
||||
table := output.NewAIPlanTable()
|
||||
if err := table.Begin(w); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range preview.Entries {
|
||||
sanitized := "-"
|
||||
if len(entry.SanitizedSegments) > 0 {
|
||||
joined := strings.Join(entry.SanitizedSegments, " ")
|
||||
sanitized = "removed: " + joined
|
||||
}
|
||||
if entry.Notes != "" {
|
||||
if sanitized == "-" {
|
||||
sanitized = entry.Notes
|
||||
} else {
|
||||
sanitized = fmt.Sprintf("%s (%s)", sanitized, entry.Notes)
|
||||
}
|
||||
}
|
||||
row := output.AIPlanRow{
|
||||
Sequence: entry.SequenceLabel,
|
||||
Original: entry.OriginalPath,
|
||||
Proposed: entry.ProposedPath,
|
||||
Sanitized: sanitized,
|
||||
}
|
||||
if err := table.WriteRow(row); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return table.End(w)
|
||||
}
|
||||
|
||||
func composeInstructions(sequence prompt.SequenceRule, policies prompt.PolicyConfig, bannedTerms []string) string {
|
||||
lines := []string{
|
||||
"You are an AI assistant that proposes safe file rename plans.",
|
||||
"Return JSON matching this schema: {\"items\":[{\"original\":string,\"proposed\":string,\"sequence\":number,\"notes\"?:string}],\"warnings\"?:[string]}.",
|
||||
fmt.Sprintf("Use %s numbering with width %d starting at %d and separator %q.", sequence.Style, sequence.Width, sequence.Start, sequence.Separator),
|
||||
"Preserve original file extensions exactly as provided.",
|
||||
fmt.Sprintf("Apply %s casing to filename stems and avoid promotional or banned terms.", policies.Casing),
|
||||
"Ensure proposed names are unique and sequences remain contiguous.",
|
||||
}
|
||||
if policies.Prefix != "" {
|
||||
lines = append(lines, fmt.Sprintf("Every proposed filename must begin with the prefix %q immediately before descriptive text.", policies.Prefix))
|
||||
}
|
||||
if policies.AllowSpaces {
|
||||
lines = append(lines, "Spaces in filenames are permitted when they improve clarity.")
|
||||
} else {
|
||||
lines = append(lines, "Do not include spaces in filenames; use separators consistent with the requested casing style.")
|
||||
}
|
||||
if policies.KeepOriginalOrder {
|
||||
lines = append(lines, "Preserve the original ordering of meaningful words when generating new stems.")
|
||||
}
|
||||
if len(bannedTerms) > 0 {
|
||||
lines = append(lines, fmt.Sprintf("Never include these banned tokens (case-insensitive) in any proposed filename: %s.", strings.Join(bannedTerms, ", ")))
|
||||
}
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
func normalizeCasing(value string) (string, error) {
|
||||
trimmed := strings.TrimSpace(value)
|
||||
if trimmed == "" {
|
||||
return "kebab", nil
|
||||
}
|
||||
lower := strings.ToLower(trimmed)
|
||||
supported := map[string]string{
|
||||
"kebab": "kebab",
|
||||
"snake": "snake",
|
||||
"camel": "camel",
|
||||
"pascal": "pascal",
|
||||
"title": "title",
|
||||
}
|
||||
if normalized, ok := supported[lower]; ok {
|
||||
return normalized, nil
|
||||
}
|
||||
return "", fmt.Errorf("unsupported naming casing %q (allowed: kebab, snake, camel, pascal, title)", value)
|
||||
}
|
||||
|
||||
func sanitizeTokenSlice(values []string) []string {
|
||||
unique := make(map[string]struct{})
|
||||
for _, raw := range values {
|
||||
for _, part := range strings.Split(raw, ",") {
|
||||
trimmed := strings.TrimSpace(part)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
lower := strings.ToLower(trimmed)
|
||||
if lower == "" {
|
||||
continue
|
||||
}
|
||||
unique[lower] = struct{}{}
|
||||
}
|
||||
}
|
||||
if len(unique) == 0 {
|
||||
return nil
|
||||
}
|
||||
tokens := make([]string, 0, len(unique))
|
||||
for token := range unique {
|
||||
tokens = append(tokens, token)
|
||||
}
|
||||
sort.Strings(tokens)
|
||||
return tokens
|
||||
}
|
||||
|
||||
func mergeBannedTerms(base, extra []string) []string {
|
||||
unique := make(map[string]struct{})
|
||||
for _, token := range base {
|
||||
lower := strings.ToLower(strings.TrimSpace(token))
|
||||
if lower == "" {
|
||||
continue
|
||||
}
|
||||
unique[lower] = struct{}{}
|
||||
}
|
||||
for _, token := range extra {
|
||||
lower := strings.ToLower(strings.TrimSpace(token))
|
||||
if lower == "" {
|
||||
continue
|
||||
}
|
||||
unique[lower] = struct{}{}
|
||||
}
|
||||
result := make([]string, 0, len(unique))
|
||||
for token := range unique {
|
||||
result = append(result, token)
|
||||
}
|
||||
sort.Strings(result)
|
||||
return result
|
||||
}
|
||||
|
||||
func buildIgnoreSet(workingDir string, paths ...string) map[string]struct{} {
|
||||
ignore := make(map[string]struct{})
|
||||
for _, path := range paths {
|
||||
trimmed := strings.TrimSpace(path)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
abs, err := filepath.Abs(trimmed)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
rel, err := filepath.Rel(workingDir, abs)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(rel, "..") {
|
||||
continue
|
||||
}
|
||||
ignore[strings.ToLower(filepath.ToSlash(rel))] = struct{}{}
|
||||
}
|
||||
return ignore
|
||||
}
|
||||
|
||||
func filterIgnoredCandidates(candidates []plan.Candidate, ignore map[string]struct{}) []plan.Candidate {
|
||||
if len(ignore) == 0 {
|
||||
return candidates
|
||||
}
|
||||
filtered := make([]plan.Candidate, 0, len(candidates))
|
||||
for _, cand := range candidates {
|
||||
if _, skip := ignore[strings.ToLower(cand.OriginalPath)]; skip {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, cand)
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
func defaultBannedTerms() []string {
|
||||
terms := []string{"promo", "sale", "free", "clickbait", "sponsored"}
|
||||
sort.Strings(terms)
|
||||
return terms
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(newAICommand())
|
||||
}
|
||||
@@ -15,8 +15,9 @@ var rootCmd = &cobra.Command{
|
||||
Use: "renamer",
|
||||
Short: "Safe, scriptable batch renaming utility",
|
||||
Long: `Renamer provides preview-first, undoable rename operations for files and directories.
|
||||
Use subcommands like "preview", "rename", and "list" with shared scope flags to target exactly
|
||||
the paths you intend to change.`,
|
||||
Use subcommands like "list", "replace", "ai", and "undo" with shared scope flags to target
|
||||
the paths you intend to change. Each command supports --dry-run previews and ledger-backed undo
|
||||
workflows so you can safely iterate before applying changes.`,
|
||||
}
|
||||
|
||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||
@@ -52,6 +53,7 @@ func NewRootCommand() *cobra.Command {
|
||||
cmd.AddCommand(newRegexCommand())
|
||||
cmd.AddCommand(newSequenceCommand())
|
||||
cmd.AddCommand(newUndoCommand())
|
||||
cmd.AddCommand(newAICommand())
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -56,6 +56,9 @@ func newUndoCommand() *cobra.Command {
|
||||
fmt.Fprintf(out, "Template restored to %q\n", template)
|
||||
}
|
||||
}
|
||||
if aiMeta, ok := entry.AIMetadata(); ok {
|
||||
fmt.Fprintf(out, "AI batch restored (model=%s, promptHash=%s, files=%d)\n", aiMeta.Model, aiMeta.PromptHash, aiMeta.BatchSize)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -9,3 +9,4 @@
|
||||
- Document quoting guidance, `--dry-run` / `--yes` behavior, and automation scenarios for replace command.
|
||||
- Add `renamer list` subcommand with shared scope flags and plain/table output formats.
|
||||
- Document global scope flags and hidden-file behavior.
|
||||
- Add `renamer ai` subcommand with export/import workflow, policy enforcement flags, prompt hash telemetry, and ledger metadata for applied plans.
|
||||
|
||||
@@ -120,3 +120,20 @@ renamer extension <source-ext...> <target-ext> [flags]
|
||||
- Preview normalization: `renamer extension .jpeg .JPG .jpg --dry-run`
|
||||
- Apply case-folded extension updates: `renamer extension .yaml .yml .yml --yes --path ./configs`
|
||||
- Include hidden assets recursively: `renamer extension .TMP .tmp --recursive --hidden`
|
||||
|
||||
## AI Command Secrets
|
||||
|
||||
- AI model authentication tokens are loaded from `$HOME/.config/.renamer/<MODEL>_MODEL_AUTH_TOKEN`. The default model token file is `default_MODEL_AUTH_TOKEN`, but any `--genkit-model` override maps to the same naming scheme.
|
||||
- Token files must contain only the raw API key with no extra whitespace; restrictive permissions (owner read/write) are recommended to keep credentials private.
|
||||
|
||||
### AI Command Flags
|
||||
|
||||
- `--genkit-model <id>` overrides the default OpenAI-compatible model used by the embedded Genkit workflow. When omitted, `gpt-4o-mini` is used.
|
||||
- `--debug-genkit` streams prompt/response telemetry (including prompt hashes and warnings) to stderr so you can archive the exchange for auditing.
|
||||
- `--export-plan <path>` writes the exact AI response (prompt hash, model, warnings, and proposed items) to a JSON file. The same file can be edited and re-imported to tweak filenames before applying.
|
||||
- `--import-plan <path>` loads a previously exported or manually curated JSON plan. The CLI re-validates all entries before previewing or applying changes.
|
||||
- `--naming-casing <style>` enforces a casing policy (`kebab`, `snake`, `camel`, `pascal`, `title`). Banned tokens, prefix rules, and spacing requirements are evaluated against the imported or generated plan.
|
||||
- `--naming-prefix`, `--naming-allow-spaces`, `--naming-keep-order`, and `--banned` extend the policy envelope that both the prompt and validator obey.
|
||||
- `--yes` applies the currently loaded plan. Without `--yes`, the command remains in preview mode even when you import a plan.
|
||||
|
||||
> Tip: Run `renamer ai --path ./fixtures --dry-run --export-plan plan.json` to capture the initial draft, edit the JSON file, then `renamer ai --path ./fixtures --import-plan plan.json --yes` to apply the curated result.
|
||||
|
||||
42
go.mod
42
go.mod
@@ -1,9 +1,43 @@
|
||||
module github.com/rogeecn/renamer
|
||||
|
||||
go 1.24.0
|
||||
go 1.24.1
|
||||
|
||||
toolchain go1.24.9
|
||||
|
||||
require (
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/spf13/cobra v1.10.1 // indirect
|
||||
github.com/spf13/pflag v1.0.9 // indirect
|
||||
github.com/firebase/genkit/go v1.1.0
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/openai/openai-go v1.8.2
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/spf13/pflag v1.0.9
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/bahlo/generic-list-go v0.2.0 // indirect
|
||||
github.com/buger/jsonparser v1.1.1 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/goccy/go-yaml v1.17.1 // indirect
|
||||
github.com/google/dotprompt/go v0.0.0-20251014011017-8d056e027254 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/invopop/jsonschema v0.13.0 // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/mbleigh/raymond v0.0.0-20250414171441-6b3a58ab9e0a // indirect
|
||||
github.com/tidwall/gjson v1.18.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.1 // indirect
|
||||
github.com/tidwall/sjson v1.2.5 // indirect
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/yosida95/uritemplate/v3 v3.0.2 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||
golang.org/x/sys v0.34.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
83
go.sum
83
go.sum
@@ -1,10 +1,93 @@
|
||||
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
|
||||
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
|
||||
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
|
||||
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/firebase/genkit/go v1.1.0 h1:SQqzQt19gEubvUUCFV98TARFAzD30zT3QhseF3oTKqo=
|
||||
github.com/firebase/genkit/go v1.1.0/go.mod h1:ru1cIuxG1s3HeUjhnadVveDJ1yhinj+j+uUh0f0pyxE=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/goccy/go-yaml v1.17.1 h1:LI34wktB2xEE3ONG/2Ar54+/HJVBriAGJ55PHls4YuY=
|
||||
github.com/goccy/go-yaml v1.17.1/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
|
||||
github.com/google/dotprompt/go v0.0.0-20251014011017-8d056e027254 h1:okN800+zMJOGHLJCgry+OGzhhtH6YrjQh1rluHmOacE=
|
||||
github.com/google/dotprompt/go v0.0.0-20251014011017-8d056e027254/go.mod h1:k8cjJAQWc//ac/bMnzItyOFbfT01tgRTZGgxELCuxEQ=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E=
|
||||
github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0=
|
||||
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
||||
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
|
||||
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
||||
github.com/mbleigh/raymond v0.0.0-20250414171441-6b3a58ab9e0a h1:v2cBA3xWKv2cIOVhnzX/gNgkNXqiHfUgJtA3r61Hf7A=
|
||||
github.com/mbleigh/raymond v0.0.0-20250414171441-6b3a58ab9e0a/go.mod h1:Y6ghKH+ZijXn5d9E7qGGZBmjitx7iitZdQiIW97EpTU=
|
||||
github.com/openai/openai-go v1.8.2 h1:UqSkJ1vCOPUpz9Ka5tS0324EJFEuOvMc+lA/EarJWP8=
|
||||
github.com/openai/openai-go v1.8.2/go.mod h1:g461MYGXEXBVdV5SaR/5tNzNbSfwTBBefwc+LlDCK0Y=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
||||
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
||||
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
|
||||
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
|
||||
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
|
||||
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc=
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4=
|
||||
github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
|
||||
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
|
||||
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
|
||||
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
222
internal/ai/config/token_store.go
Normal file
222
internal/ai/config/token_store.go
Normal file
@@ -0,0 +1,222 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
|
||||
"github.com/joho/godotenv"
|
||||
)
|
||||
|
||||
const (
|
||||
configDirEnvVar = "RENAMER_CONFIG_DIR"
|
||||
defaultConfigRoot = ".renamer"
|
||||
|
||||
modelTokenSuffix = "_MODEL_AUTH_TOKEN"
|
||||
|
||||
defaultEnvFile = ".env"
|
||||
secondaryEnvFile = "tokens.env"
|
||||
errTokenNotFoundFmt = "model token %q not found in %s or the process environment"
|
||||
)
|
||||
|
||||
// TokenProvider resolves API tokens for AI models.
|
||||
type TokenProvider interface {
|
||||
ResolveModelToken(model string) (string, error)
|
||||
}
|
||||
|
||||
// TokenStore loads model authentication tokens from ~/.config/.renamer.
|
||||
type TokenStore struct {
|
||||
configDir string
|
||||
|
||||
once sync.Once
|
||||
values map[string]string
|
||||
err error
|
||||
}
|
||||
|
||||
// NewTokenStore constructs a TokenStore rooted at configDir. When configDir is
|
||||
// empty the default path of `$HOME/.config/.renamer` is used. An environment
|
||||
// override can be supplied via RENAMER_CONFIG_DIR.
|
||||
func NewTokenStore(configDir string) (*TokenStore, error) {
|
||||
root := configDir
|
||||
if root == "" {
|
||||
if override := strings.TrimSpace(os.Getenv(configDirEnvVar)); override != "" {
|
||||
root = override
|
||||
} else {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("resolve user home: %w", err)
|
||||
}
|
||||
root = filepath.Join(home, ".config", defaultConfigRoot)
|
||||
}
|
||||
}
|
||||
|
||||
return &TokenStore{
|
||||
configDir: root,
|
||||
values: make(map[string]string),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ConfigDir returns the directory the token store reads from.
|
||||
func (s *TokenStore) ConfigDir() string {
|
||||
return s.configDir
|
||||
}
|
||||
|
||||
// ResolveModelToken returns the token for the provided model name. Model names
|
||||
// are normalized to match the `<slug>_MODEL_AUTH_TOKEN` convention documented
|
||||
// for the CLI. Environment variables take precedence over file-based tokens.
|
||||
func (s *TokenStore) ResolveModelToken(model string) (string, error) {
|
||||
key := ModelTokenKey(model)
|
||||
return s.lookup(key)
|
||||
}
|
||||
|
||||
// lookup loads the requested key from either the environment or cached tokens.
|
||||
func (s *TokenStore) lookup(key string) (string, error) {
|
||||
if strings.TrimSpace(key) == "" {
|
||||
return "", errors.New("token key must not be empty")
|
||||
}
|
||||
|
||||
if val, ok := os.LookupEnv(key); ok && strings.TrimSpace(val) != "" {
|
||||
return strings.TrimSpace(val), nil
|
||||
}
|
||||
|
||||
if err := s.ensureLoaded(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if val, ok := s.values[key]; ok && strings.TrimSpace(val) != "" {
|
||||
return strings.TrimSpace(val), nil
|
||||
}
|
||||
|
||||
path := filepath.Join(s.configDir, key)
|
||||
raw, err := os.ReadFile(path)
|
||||
if err == nil {
|
||||
value := strings.TrimSpace(string(raw))
|
||||
if value != "" {
|
||||
s.values[key] = value
|
||||
return value, nil
|
||||
}
|
||||
} else if !errors.Is(err, fs.ErrNotExist) {
|
||||
return "", fmt.Errorf("read token file %s: %w", path, err)
|
||||
}
|
||||
|
||||
return "", fmt.Errorf(errTokenNotFoundFmt, key, s.configDir)
|
||||
}
|
||||
|
||||
func (s *TokenStore) ensureLoaded() error {
|
||||
s.once.Do(func() {
|
||||
s.err = s.loadEnvFiles()
|
||||
if s.err != nil {
|
||||
return
|
||||
}
|
||||
s.err = s.scanTokenFiles()
|
||||
})
|
||||
return s.err
|
||||
}
|
||||
|
||||
func (s *TokenStore) loadEnvFiles() error {
|
||||
candidates := []string{
|
||||
filepath.Join(s.configDir, defaultEnvFile),
|
||||
filepath.Join(s.configDir, secondaryEnvFile),
|
||||
}
|
||||
|
||||
for _, path := range candidates {
|
||||
envMap, err := godotenv.Read(path)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("load %s: %w", path, err)
|
||||
}
|
||||
for k, v := range envMap {
|
||||
if strings.TrimSpace(k) == "" || strings.TrimSpace(v) == "" {
|
||||
continue
|
||||
}
|
||||
s.values[k] = strings.TrimSpace(v)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *TokenStore) scanTokenFiles() error {
|
||||
entries, err := os.ReadDir(s.configDir)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("scan %s: %w", s.configDir, err)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
name := entry.Name()
|
||||
path := filepath.Join(s.configDir, name)
|
||||
|
||||
content, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read %s: %w", path, err)
|
||||
}
|
||||
|
||||
data := strings.TrimSpace(string(content))
|
||||
if data == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if parsed, perr := godotenv.Unmarshal(data); perr == nil && len(parsed) > 0 {
|
||||
for k, v := range parsed {
|
||||
if strings.TrimSpace(k) == "" || strings.TrimSpace(v) == "" {
|
||||
continue
|
||||
}
|
||||
s.values[k] = strings.TrimSpace(v)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
s.values[name] = data
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ModelTokenKey derives the token filename/environment variable for the given
|
||||
// model name following the `<slug>_MODEL_AUTH_TOKEN` convention. When model is
|
||||
// empty the default slug `default` is used.
|
||||
func ModelTokenKey(model string) string {
|
||||
slug := slugify(model)
|
||||
if slug == "" {
|
||||
slug = "default"
|
||||
}
|
||||
return slug + modelTokenSuffix
|
||||
}
|
||||
|
||||
func slugify(input string) string {
|
||||
input = strings.TrimSpace(input)
|
||||
if input == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
var b strings.Builder
|
||||
b.Grow(len(input))
|
||||
|
||||
lastUnderscore := false
|
||||
for _, r := range input {
|
||||
switch {
|
||||
case unicode.IsLetter(r) || unicode.IsDigit(r):
|
||||
b.WriteRune(unicode.ToLower(r))
|
||||
lastUnderscore = false
|
||||
default:
|
||||
if !lastUnderscore && b.Len() > 0 {
|
||||
b.WriteByte('_')
|
||||
lastUnderscore = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Trim(b.String(), "_")
|
||||
}
|
||||
158
internal/ai/genkit/client.go
Normal file
158
internal/ai/genkit/client.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package genkit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
genaigo "github.com/firebase/genkit/go/ai"
|
||||
"github.com/openai/openai-go/option"
|
||||
|
||||
aiconfig "github.com/rogeecn/renamer/internal/ai/config"
|
||||
"github.com/rogeecn/renamer/internal/ai/prompt"
|
||||
)
|
||||
|
||||
// WorkflowRunner executes a Genkit request and returns the structured response.
|
||||
type WorkflowRunner interface {
|
||||
Run(ctx context.Context, req Request) (Result, error)
|
||||
}
|
||||
|
||||
// WorkflowFactory constructs workflow runners.
|
||||
type WorkflowFactory func(ctx context.Context, opts Options) (WorkflowRunner, error)
|
||||
|
||||
var (
|
||||
factoryMu sync.RWMutex
|
||||
defaultFactory = func(ctx context.Context, opts Options) (WorkflowRunner, error) {
|
||||
return NewWorkflow(ctx, opts)
|
||||
}
|
||||
currentFactory WorkflowFactory = defaultFactory
|
||||
)
|
||||
|
||||
// OverrideWorkflowFactory allows tests to supply custom workflow implementations.
|
||||
func OverrideWorkflowFactory(factory WorkflowFactory) {
|
||||
factoryMu.Lock()
|
||||
defer factoryMu.Unlock()
|
||||
if factory == nil {
|
||||
currentFactory = defaultFactory
|
||||
return
|
||||
}
|
||||
currentFactory = factory
|
||||
}
|
||||
|
||||
// ResetWorkflowFactory restores the default workflow constructor.
|
||||
func ResetWorkflowFactory() {
|
||||
OverrideWorkflowFactory(nil)
|
||||
}
|
||||
|
||||
func getWorkflowFactory() WorkflowFactory {
|
||||
factoryMu.RLock()
|
||||
defer factoryMu.RUnlock()
|
||||
return currentFactory
|
||||
}
|
||||
|
||||
// ClientOptions configure the Genkit client.
|
||||
type ClientOptions struct {
|
||||
Model string
|
||||
TokenProvider aiconfig.TokenProvider
|
||||
RequestOptions []option.RequestOption
|
||||
}
|
||||
|
||||
// Client orchestrates prompt execution against the configured workflow.
|
||||
type Client struct {
|
||||
model string
|
||||
tokenProvider aiconfig.TokenProvider
|
||||
requestOptions []option.RequestOption
|
||||
}
|
||||
|
||||
// NewClient constructs a client with optional overrides.
|
||||
func NewClient(opts ClientOptions) *Client {
|
||||
model := strings.TrimSpace(opts.Model)
|
||||
if model == "" {
|
||||
model = DefaultModelName
|
||||
}
|
||||
return &Client{
|
||||
model: model,
|
||||
tokenProvider: opts.TokenProvider,
|
||||
requestOptions: append([]option.RequestOption(nil), opts.RequestOptions...),
|
||||
}
|
||||
}
|
||||
|
||||
// Invocation describes a single Genkit call.
|
||||
type Invocation struct {
|
||||
Instructions string
|
||||
Prompt prompt.RenamePrompt
|
||||
Model string
|
||||
}
|
||||
|
||||
// InvocationResult carries the parsed response alongside telemetry.
|
||||
type InvocationResult struct {
|
||||
PromptHash string
|
||||
Model string
|
||||
Response prompt.RenameResponse
|
||||
ModelResponse *genaigo.ModelResponse
|
||||
PromptJSON []byte
|
||||
}
|
||||
|
||||
// Invoke executes the workflow and returns the structured response.
|
||||
func (c *Client) Invoke(ctx context.Context, inv Invocation) (InvocationResult, error) {
|
||||
model := strings.TrimSpace(inv.Model)
|
||||
if model == "" {
|
||||
model = c.model
|
||||
}
|
||||
if model == "" {
|
||||
model = DefaultModelName
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(inv.Prompt)
|
||||
if err != nil {
|
||||
return InvocationResult{}, fmt.Errorf("marshal prompt payload: %w", err)
|
||||
}
|
||||
|
||||
factory := getWorkflowFactory()
|
||||
runner, err := factory(ctx, Options{
|
||||
Model: model,
|
||||
TokenProvider: c.tokenProvider,
|
||||
RequestOptions: c.requestOptions,
|
||||
})
|
||||
if err != nil {
|
||||
return InvocationResult{}, err
|
||||
}
|
||||
|
||||
result, err := runner.Run(ctx, Request{
|
||||
Instructions: inv.Instructions,
|
||||
Payload: inv.Prompt,
|
||||
})
|
||||
if err != nil {
|
||||
return InvocationResult{}, err
|
||||
}
|
||||
|
||||
if strings.TrimSpace(result.Response.Model) == "" {
|
||||
result.Response.Model = model
|
||||
}
|
||||
|
||||
promptHash := hashPrompt(inv.Instructions, payload)
|
||||
if strings.TrimSpace(result.Response.PromptHash) == "" {
|
||||
result.Response.PromptHash = promptHash
|
||||
}
|
||||
|
||||
return InvocationResult{
|
||||
PromptHash: promptHash,
|
||||
Model: result.Response.Model,
|
||||
Response: result.Response,
|
||||
ModelResponse: result.ModelResponse,
|
||||
PromptJSON: payload,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func hashPrompt(instructions string, payload []byte) string {
|
||||
hasher := sha256.New()
|
||||
hasher.Write([]byte(strings.TrimSpace(instructions)))
|
||||
hasher.Write([]byte{'\n'})
|
||||
hasher.Write(payload)
|
||||
sum := hasher.Sum(nil)
|
||||
return hex.EncodeToString(sum)
|
||||
}
|
||||
3
internal/ai/genkit/doc.go
Normal file
3
internal/ai/genkit/doc.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package genkit
|
||||
|
||||
// Package genkit integrates the Google Genkit workflow with the CLI.
|
||||
166
internal/ai/genkit/workflow.go
Normal file
166
internal/ai/genkit/workflow.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package genkit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/firebase/genkit/go/ai"
|
||||
gogenkit "github.com/firebase/genkit/go/genkit"
|
||||
oai "github.com/firebase/genkit/go/plugins/compat_oai/openai"
|
||||
"github.com/openai/openai-go/option"
|
||||
|
||||
aiconfig "github.com/rogeecn/renamer/internal/ai/config"
|
||||
"github.com/rogeecn/renamer/internal/ai/prompt"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultModelName = "gpt-4o-mini"
|
||||
// DefaultModelName exposes the default model identifier used by the CLI.
|
||||
DefaultModelName = defaultModelName
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrMissingToken indicates the workflow could not locate a model token.
|
||||
ErrMissingToken = errors.New("genkit workflow: model token not available")
|
||||
// ErrMissingInstructions indicates that no system instructions were provided for a run.
|
||||
ErrMissingInstructions = errors.New("genkit workflow: instructions are required")
|
||||
)
|
||||
|
||||
// DataGenerator executes the Genkit request and decodes the structured response.
|
||||
type DataGenerator func(ctx context.Context, g *gogenkit.Genkit, opts ...ai.GenerateOption) (*prompt.RenameResponse, *ai.ModelResponse, error)
|
||||
|
||||
// Options configure a Workflow instance.
|
||||
type Options struct {
|
||||
Model string
|
||||
TokenProvider aiconfig.TokenProvider
|
||||
RequestOptions []option.RequestOption
|
||||
Generator DataGenerator
|
||||
}
|
||||
|
||||
// Request captures the input necessary to execute the Genkit workflow.
|
||||
type Request struct {
|
||||
Instructions string
|
||||
Payload prompt.RenamePrompt
|
||||
}
|
||||
|
||||
// Result bundles the typed response together with the raw Genkit metadata.
|
||||
type Result struct {
|
||||
Response prompt.RenameResponse
|
||||
ModelResponse *ai.ModelResponse
|
||||
}
|
||||
|
||||
// Workflow orchestrates execution of the Genkit rename pipeline.
|
||||
type Workflow struct {
|
||||
modelName string
|
||||
genkit *gogenkit.Genkit
|
||||
model ai.Model
|
||||
generate DataGenerator
|
||||
}
|
||||
|
||||
// NewWorkflow instantiates a Genkit workflow for the preferred model. When no
|
||||
// model is provided it defaults to gpt-4o-mini. The workflow requires a token
|
||||
// provider capable of resolving `<model>_MODEL_AUTH_TOKEN` secrets.
|
||||
func NewWorkflow(ctx context.Context, opts Options) (*Workflow, error) {
|
||||
modelName := strings.TrimSpace(opts.Model)
|
||||
if modelName == "" {
|
||||
modelName = defaultModelName
|
||||
}
|
||||
|
||||
token, err := resolveToken(opts.TokenProvider, modelName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if strings.TrimSpace(token) == "" {
|
||||
return nil, fmt.Errorf("%w for %q", ErrMissingToken, modelName)
|
||||
}
|
||||
|
||||
plugin := &oai.OpenAI{
|
||||
APIKey: token,
|
||||
Opts: opts.RequestOptions,
|
||||
}
|
||||
|
||||
g := gogenkit.Init(ctx, gogenkit.WithPlugins(plugin))
|
||||
model := plugin.Model(g, modelName)
|
||||
|
||||
generator := opts.Generator
|
||||
if generator == nil {
|
||||
generator = func(ctx context.Context, g *gogenkit.Genkit, opts ...ai.GenerateOption) (*prompt.RenameResponse, *ai.ModelResponse, error) {
|
||||
return gogenkit.GenerateData[prompt.RenameResponse](ctx, g, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
return &Workflow{
|
||||
modelName: modelName,
|
||||
genkit: g,
|
||||
model: model,
|
||||
generate: generator,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Run executes the workflow with the provided request and decodes the response
|
||||
// into the shared RenameResponse structure.
|
||||
func (w *Workflow) Run(ctx context.Context, req Request) (Result, error) {
|
||||
if w == nil {
|
||||
return Result{}, errors.New("genkit workflow: nil receiver")
|
||||
}
|
||||
if strings.TrimSpace(req.Instructions) == "" {
|
||||
return Result{}, ErrMissingInstructions
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(req.Payload)
|
||||
if err != nil {
|
||||
return Result{}, fmt.Errorf("marshal workflow payload: %w", err)
|
||||
}
|
||||
|
||||
options := []ai.GenerateOption{
|
||||
ai.WithModel(w.model),
|
||||
ai.WithSystem(req.Instructions),
|
||||
ai.WithPrompt(string(payload)),
|
||||
}
|
||||
|
||||
response, raw, err := w.generate(ctx, w.genkit, options...)
|
||||
if err != nil {
|
||||
return Result{}, fmt.Errorf("genkit generate: %w", err)
|
||||
}
|
||||
|
||||
return Result{
|
||||
Response: deref(response),
|
||||
ModelResponse: raw,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func resolveToken(provider aiconfig.TokenProvider, model string) (string, error) {
|
||||
if provider != nil {
|
||||
if token, err := provider.ResolveModelToken(model); err == nil && strings.TrimSpace(token) != "" {
|
||||
return token, nil
|
||||
} else if err != nil {
|
||||
return "", fmt.Errorf("resolve model token: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if direct := strings.TrimSpace(os.Getenv(aiconfig.ModelTokenKey(model))); direct != "" {
|
||||
return direct, nil
|
||||
}
|
||||
|
||||
store, err := aiconfig.NewTokenStore("")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
token, err := store.ResolveModelToken(model)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func deref(resp *prompt.RenameResponse) prompt.RenameResponse {
|
||||
if resp == nil {
|
||||
return prompt.RenameResponse{}
|
||||
}
|
||||
return *resp
|
||||
}
|
||||
250
internal/ai/plan/apply.go
Normal file
250
internal/ai/plan/apply.go
Normal file
@@ -0,0 +1,250 @@
|
||||
package plan
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/rogeecn/renamer/internal/ai/prompt"
|
||||
"github.com/rogeecn/renamer/internal/history"
|
||||
)
|
||||
|
||||
// ApplyOptions describe the data required to apply an AI rename plan.
|
||||
type ApplyOptions struct {
|
||||
WorkingDir string
|
||||
Candidates []Candidate
|
||||
Response prompt.RenameResponse
|
||||
Policies prompt.NamingPolicyConfig
|
||||
PromptHash string
|
||||
}
|
||||
|
||||
// Apply executes the AI rename plan and records the outcome in the ledger.
|
||||
func Apply(ctx context.Context, opts ApplyOptions) (history.Entry, error) {
|
||||
entry := history.Entry{Command: "ai"}
|
||||
|
||||
if len(opts.Response.Items) == 0 {
|
||||
return entry, errors.New("ai apply: no items to apply")
|
||||
}
|
||||
|
||||
candidateMap := make(map[string]Candidate, len(opts.Candidates))
|
||||
for _, cand := range opts.Candidates {
|
||||
key := strings.ToLower(strings.TrimSpace(cand.OriginalPath))
|
||||
candidateMap[key] = cand
|
||||
}
|
||||
|
||||
type operation struct {
|
||||
sourceRel string
|
||||
targetRel string
|
||||
sourceAbs string
|
||||
targetAbs string
|
||||
depth int
|
||||
}
|
||||
|
||||
ops := make([]operation, 0, len(opts.Response.Items))
|
||||
seenTargets := make(map[string]string)
|
||||
|
||||
conflicts := make([]Conflict, 0)
|
||||
|
||||
for _, item := range opts.Response.Items {
|
||||
key := strings.ToLower(strings.TrimSpace(item.Original))
|
||||
cand, ok := candidateMap[key]
|
||||
if !ok {
|
||||
conflicts = append(conflicts, Conflict{
|
||||
OriginalPath: item.Original,
|
||||
Issue: "missing_candidate",
|
||||
Details: "original file not found in current scope",
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
target := strings.TrimSpace(item.Proposed)
|
||||
if target == "" {
|
||||
conflicts = append(conflicts, Conflict{
|
||||
OriginalPath: item.Original,
|
||||
Issue: "empty_target",
|
||||
Details: "proposed name cannot be empty",
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
normalizedTarget := filepath.ToSlash(filepath.Clean(target))
|
||||
if strings.HasPrefix(normalizedTarget, "../") {
|
||||
conflicts = append(conflicts, Conflict{
|
||||
OriginalPath: item.Original,
|
||||
Issue: "unsafe_target",
|
||||
Details: "proposed path escapes the working directory",
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
targetKey := strings.ToLower(normalizedTarget)
|
||||
if existing, exists := seenTargets[targetKey]; exists && existing != item.Original {
|
||||
conflicts = append(conflicts, Conflict{
|
||||
OriginalPath: item.Original,
|
||||
Issue: "duplicate_target",
|
||||
Details: fmt.Sprintf("target %q reused", normalizedTarget),
|
||||
})
|
||||
continue
|
||||
}
|
||||
seenTargets[targetKey] = item.Original
|
||||
|
||||
sourceRel := filepath.ToSlash(cand.OriginalPath)
|
||||
sourceAbs := filepath.Join(opts.WorkingDir, filepath.FromSlash(sourceRel))
|
||||
targetAbs := filepath.Join(opts.WorkingDir, filepath.FromSlash(normalizedTarget))
|
||||
|
||||
if sameFile, err := isSameFile(sourceAbs, targetAbs); err != nil {
|
||||
return history.Entry{}, err
|
||||
} else if sameFile {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := os.Stat(targetAbs); err == nil {
|
||||
conflicts = append(conflicts, Conflict{
|
||||
OriginalPath: item.Original,
|
||||
Issue: "target_exists",
|
||||
Details: fmt.Sprintf("target %q already exists", normalizedTarget),
|
||||
})
|
||||
continue
|
||||
} else if !errors.Is(err, os.ErrNotExist) {
|
||||
return history.Entry{}, err
|
||||
}
|
||||
|
||||
op := operation{
|
||||
sourceRel: sourceRel,
|
||||
targetRel: normalizedTarget,
|
||||
sourceAbs: sourceAbs,
|
||||
targetAbs: targetAbs,
|
||||
depth: cand.Depth,
|
||||
}
|
||||
ops = append(ops, op)
|
||||
}
|
||||
|
||||
if len(conflicts) > 0 {
|
||||
return history.Entry{}, ApplyConflictError{Conflicts: conflicts}
|
||||
}
|
||||
|
||||
if len(ops) == 0 {
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
sort.SliceStable(ops, func(i, j int) bool {
|
||||
return ops[i].depth > ops[j].depth
|
||||
})
|
||||
|
||||
done := make([]history.Operation, 0, len(ops))
|
||||
|
||||
revert := func() error {
|
||||
for i := len(done) - 1; i >= 0; i-- {
|
||||
op := done[i]
|
||||
src := filepath.Join(opts.WorkingDir, filepath.FromSlash(op.To))
|
||||
dst := filepath.Join(opts.WorkingDir, filepath.FromSlash(op.From))
|
||||
if err := os.Rename(src, dst); err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, op := range ops {
|
||||
if err := ctx.Err(); err != nil {
|
||||
_ = revert()
|
||||
return history.Entry{}, err
|
||||
}
|
||||
|
||||
if dir := filepath.Dir(op.targetAbs); dir != "" {
|
||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||
_ = revert()
|
||||
return history.Entry{}, err
|
||||
}
|
||||
}
|
||||
if err := os.Rename(op.sourceAbs, op.targetAbs); err != nil {
|
||||
_ = revert()
|
||||
return history.Entry{}, err
|
||||
}
|
||||
|
||||
done = append(done, history.Operation{
|
||||
From: op.sourceRel,
|
||||
To: op.targetRel,
|
||||
})
|
||||
}
|
||||
|
||||
if len(done) == 0 {
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
entry.Operations = done
|
||||
|
||||
aiMetadata := history.AIMetadata{
|
||||
PromptHash: opts.PromptHash,
|
||||
Model: opts.Response.Model,
|
||||
Policies: prompt.NamingPolicyConfig{
|
||||
Prefix: opts.Policies.Prefix,
|
||||
Casing: opts.Policies.Casing,
|
||||
AllowSpaces: opts.Policies.AllowSpaces,
|
||||
KeepOriginalOrder: opts.Policies.KeepOriginalOrder,
|
||||
ForbiddenTokens: append([]string(nil), opts.Policies.ForbiddenTokens...),
|
||||
},
|
||||
BatchSize: len(done),
|
||||
}
|
||||
|
||||
if hash, err := ResponseDigest(opts.Response); err == nil {
|
||||
aiMetadata.ResponseHash = hash
|
||||
}
|
||||
|
||||
entry.AttachAIMetadata(aiMetadata)
|
||||
|
||||
if err := history.Append(opts.WorkingDir, entry); err != nil {
|
||||
_ = revert()
|
||||
return history.Entry{}, err
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// ApplyConflictError signals that the plan contained conflicts that block apply.
|
||||
type ApplyConflictError struct {
|
||||
Conflicts []Conflict
|
||||
}
|
||||
|
||||
func (e ApplyConflictError) Error() string {
|
||||
if len(e.Conflicts) == 0 {
|
||||
return "ai apply: conflicts detected"
|
||||
}
|
||||
return fmt.Sprintf("ai apply: %d conflicts detected", len(e.Conflicts))
|
||||
}
|
||||
|
||||
// ResponseDigest returns a hash of the AI response payload for ledger metadata.
|
||||
func ResponseDigest(resp prompt.RenameResponse) (string, error) {
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hashBytes(data), nil
|
||||
}
|
||||
|
||||
func hashBytes(data []byte) string {
|
||||
sum := sha256.Sum256(data)
|
||||
return hex.EncodeToString(sum[:])
|
||||
}
|
||||
|
||||
func isSameFile(a, b string) (bool, error) {
|
||||
infoA, err := os.Stat(a)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
infoB, err := os.Stat(b)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return os.SameFile(infoA, infoB), nil
|
||||
}
|
||||
67
internal/ai/plan/conflicts.go
Normal file
67
internal/ai/plan/conflicts.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package plan
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/rogeecn/renamer/internal/ai/prompt"
|
||||
)
|
||||
|
||||
// Conflict describes an issue detected in an AI rename plan.
|
||||
type Conflict struct {
|
||||
OriginalPath string
|
||||
Issue string
|
||||
Details string
|
||||
}
|
||||
|
||||
func detectConflicts(items []prompt.RenameItem) []Conflict {
|
||||
conflicts := make([]Conflict, 0)
|
||||
|
||||
if len(items) == 0 {
|
||||
return conflicts
|
||||
}
|
||||
|
||||
targets := make(map[string][]prompt.RenameItem)
|
||||
sequences := make([]int, 0, len(items))
|
||||
|
||||
for _, item := range items {
|
||||
key := strings.ToLower(strings.TrimSpace(item.Proposed))
|
||||
if key != "" {
|
||||
targets[key] = append(targets[key], item)
|
||||
}
|
||||
if item.Sequence > 0 {
|
||||
sequences = append(sequences, item.Sequence)
|
||||
}
|
||||
}
|
||||
|
||||
for _, entries := range targets {
|
||||
if len(entries) <= 1 {
|
||||
continue
|
||||
}
|
||||
for _, entry := range entries {
|
||||
conflicts = append(conflicts, Conflict{
|
||||
OriginalPath: entry.Original,
|
||||
Issue: "duplicate_target",
|
||||
Details: fmt.Sprintf("target %q is used by multiple entries", entries[0].Proposed),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(sequences) > 0 {
|
||||
sort.Ints(sequences)
|
||||
expected := 1
|
||||
for _, seq := range sequences {
|
||||
if seq != expected {
|
||||
conflicts = append(conflicts, Conflict{
|
||||
Issue: "sequence_gap",
|
||||
Details: fmt.Sprintf("expected sequence %d but found %d", expected, seq),
|
||||
})
|
||||
expected = seq
|
||||
}
|
||||
expected++
|
||||
}
|
||||
}
|
||||
|
||||
return conflicts
|
||||
}
|
||||
3
internal/ai/plan/doc.go
Normal file
3
internal/ai/plan/doc.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package plan
|
||||
|
||||
// Package plan handles AI rename plan validation, mapping, and persistence helpers.
|
||||
39
internal/ai/plan/editor.go
Normal file
39
internal/ai/plan/editor.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package plan
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
|
||||
"github.com/rogeecn/renamer/internal/ai/prompt"
|
||||
)
|
||||
|
||||
// SaveResponse writes the AI rename response to disk for later editing.
|
||||
func SaveResponse(path string, resp prompt.RenameResponse) error {
|
||||
data, err := json.MarshalIndent(resp, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal ai plan: %w", err)
|
||||
}
|
||||
if err := os.WriteFile(path, append(data, '\n'), 0o644); err != nil {
|
||||
return fmt.Errorf("write ai plan %s: %w", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadResponse reads an edited AI rename response from disk.
|
||||
func LoadResponse(path string) (prompt.RenameResponse, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return prompt.RenameResponse{}, fmt.Errorf("plan file %s not found", path)
|
||||
}
|
||||
return prompt.RenameResponse{}, fmt.Errorf("read plan file %s: %w", path, err)
|
||||
}
|
||||
var resp prompt.RenameResponse
|
||||
if err := json.Unmarshal(data, &resp); err != nil {
|
||||
return prompt.RenameResponse{}, fmt.Errorf("parse plan file %s: %w", path, err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
190
internal/ai/plan/mapper.go
Normal file
190
internal/ai/plan/mapper.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package plan
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Candidate represents a file considered for AI renaming.
|
||||
type Candidate struct {
|
||||
OriginalPath string
|
||||
SizeBytes int64
|
||||
Depth int
|
||||
Extension string
|
||||
}
|
||||
|
||||
// MapInput configures the mapping behaviour.
|
||||
type MapInput struct {
|
||||
Candidates []Candidate
|
||||
SequenceWidth int
|
||||
}
|
||||
|
||||
// PreviewPlan aggregates entries ready for preview rendering.
|
||||
type PreviewPlan struct {
|
||||
Entries []PreviewEntry
|
||||
Warnings []string
|
||||
PromptHash string
|
||||
Model string
|
||||
Conflicts []Conflict
|
||||
}
|
||||
|
||||
// PreviewEntry is a single row in the preview table.
|
||||
type PreviewEntry struct {
|
||||
Sequence int
|
||||
SequenceLabel string
|
||||
OriginalPath string
|
||||
ProposedPath string
|
||||
SanitizedSegments []string
|
||||
Notes string
|
||||
}
|
||||
|
||||
// MapResponse converts a validated response into a preview plan.
|
||||
func MapResponse(input MapInput, validation ValidationResult) (PreviewPlan, error) {
|
||||
if input.SequenceWidth <= 0 {
|
||||
input.SequenceWidth = 3
|
||||
}
|
||||
|
||||
itemByOriginal := make(map[string]struct {
|
||||
item promptRenameItem
|
||||
}, len(validation.Items))
|
||||
for _, item := range validation.Items {
|
||||
key := normalizePath(item.Original)
|
||||
itemByOriginal[key] = struct{ item promptRenameItem }{item: promptRenameItem{
|
||||
Original: item.Original,
|
||||
Proposed: item.Proposed,
|
||||
Sequence: item.Sequence,
|
||||
Notes: item.Notes,
|
||||
}}
|
||||
}
|
||||
|
||||
entries := make([]PreviewEntry, 0, len(input.Candidates))
|
||||
for _, candidate := range input.Candidates {
|
||||
key := normalizePath(candidate.OriginalPath)
|
||||
entryData, ok := itemByOriginal[key]
|
||||
if !ok {
|
||||
return PreviewPlan{}, fmt.Errorf("ai plan: missing response for %s", candidate.OriginalPath)
|
||||
}
|
||||
|
||||
item := entryData.item
|
||||
label := formatSequence(item.Sequence, input.SequenceWidth)
|
||||
sanitized := computeSanitizedSegments(candidate.OriginalPath, item.Proposed)
|
||||
|
||||
entries = append(entries, PreviewEntry{
|
||||
Sequence: item.Sequence,
|
||||
SequenceLabel: label,
|
||||
OriginalPath: candidate.OriginalPath,
|
||||
ProposedPath: item.Proposed,
|
||||
SanitizedSegments: sanitized,
|
||||
Notes: item.Notes,
|
||||
})
|
||||
}
|
||||
|
||||
return PreviewPlan{
|
||||
Entries: entries,
|
||||
Warnings: append([]string(nil), validation.Warnings...),
|
||||
PromptHash: validation.PromptHash,
|
||||
Model: validation.Model,
|
||||
Conflicts: detectConflicts(validation.Items),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type promptRenameItem struct {
|
||||
Original string
|
||||
Proposed string
|
||||
Sequence int
|
||||
Notes string
|
||||
}
|
||||
|
||||
func formatSequence(seq, width int) string {
|
||||
if seq <= 0 {
|
||||
return ""
|
||||
}
|
||||
label := fmt.Sprintf("%0*d", width, seq)
|
||||
if len(label) < len(fmt.Sprintf("%d", seq)) {
|
||||
return fmt.Sprintf("%d", seq)
|
||||
}
|
||||
return label
|
||||
}
|
||||
|
||||
func normalizePath(path string) string {
|
||||
return strings.TrimSpace(strings.ReplaceAll(path, "\\", "/"))
|
||||
}
|
||||
|
||||
func computeSanitizedSegments(original, proposed string) []string {
|
||||
origStem := stem(original)
|
||||
propStem := stem(proposed)
|
||||
|
||||
origTokens := tokenize(origStem)
|
||||
propTokens := make(map[string]struct{}, len(origTokens))
|
||||
for _, token := range tokenize(propStem) {
|
||||
propTokens[token] = struct{}{}
|
||||
}
|
||||
|
||||
var sanitized []string
|
||||
seen := make(map[string]struct{})
|
||||
for _, token := range origTokens {
|
||||
if _, ok := propTokens[token]; ok {
|
||||
continue
|
||||
}
|
||||
if _, already := seen[token]; already {
|
||||
continue
|
||||
}
|
||||
if isNumericToken(token) {
|
||||
continue
|
||||
}
|
||||
seen[token] = struct{}{}
|
||||
sanitized = append(sanitized, token)
|
||||
}
|
||||
if len(sanitized) == 0 {
|
||||
return nil
|
||||
}
|
||||
sort.Strings(sanitized)
|
||||
return sanitized
|
||||
}
|
||||
|
||||
func stem(path string) string {
|
||||
base := filepath.Base(path)
|
||||
ext := filepath.Ext(base)
|
||||
if ext != "" {
|
||||
return base[:len(base)-len(ext)]
|
||||
}
|
||||
return base
|
||||
}
|
||||
|
||||
func tokenize(value string) []string {
|
||||
fields := strings.FieldsFunc(value, func(r rune) bool {
|
||||
if r >= '0' && r <= '9' {
|
||||
return false
|
||||
}
|
||||
if r >= 'a' && r <= 'z' {
|
||||
return false
|
||||
}
|
||||
if r >= 'A' && r <= 'Z' {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
tokens := make([]string, 0, len(fields))
|
||||
for _, field := range fields {
|
||||
normalized := strings.ToLower(field)
|
||||
if normalized == "" {
|
||||
continue
|
||||
}
|
||||
tokens = append(tokens, normalized)
|
||||
}
|
||||
return tokens
|
||||
}
|
||||
|
||||
func isNumericToken(token string) bool {
|
||||
if token == "" {
|
||||
return false
|
||||
}
|
||||
for _, r := range token {
|
||||
if r < '0' || r > '9' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
76
internal/ai/plan/scope.go
Normal file
76
internal/ai/plan/scope.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package plan
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io/fs"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/rogeecn/renamer/internal/listing"
|
||||
"github.com/rogeecn/renamer/internal/traversal"
|
||||
)
|
||||
|
||||
// CollectCandidates walks the scope described by req and returns eligible file candidates.
|
||||
func CollectCandidates(ctx context.Context, req *listing.ListingRequest) ([]Candidate, error) {
|
||||
if req == nil {
|
||||
return nil, errors.New("collect candidates: request cannot be nil")
|
||||
}
|
||||
if err := req.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := traversal.NewWalker()
|
||||
extensions := make(map[string]struct{}, len(req.Extensions))
|
||||
for _, ext := range req.Extensions {
|
||||
extensions[ext] = struct{}{}
|
||||
}
|
||||
|
||||
candidates := make([]Candidate, 0)
|
||||
|
||||
err := w.Walk(
|
||||
req.WorkingDir,
|
||||
req.Recursive,
|
||||
false, // directories are not considered candidates
|
||||
req.IncludeHidden,
|
||||
req.MaxDepth,
|
||||
func(relPath string, entry fs.DirEntry, depth int) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
if entry.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
relSlash := filepath.ToSlash(relPath)
|
||||
ext := strings.ToLower(filepath.Ext(entry.Name()))
|
||||
if len(extensions) > 0 {
|
||||
if _, match := extensions[ext]; !match {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
info, err := entry.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
candidates = append(candidates, Candidate{
|
||||
OriginalPath: relSlash,
|
||||
SizeBytes: info.Size(),
|
||||
Depth: depth,
|
||||
Extension: filepath.Ext(entry.Name()),
|
||||
})
|
||||
|
||||
return nil
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return candidates, nil
|
||||
}
|
||||
423
internal/ai/plan/validator.go
Normal file
423
internal/ai/plan/validator.go
Normal file
@@ -0,0 +1,423 @@
|
||||
package plan
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/rogeecn/renamer/internal/ai/prompt"
|
||||
)
|
||||
|
||||
// Validator checks the AI response for completeness and uniqueness rules.
|
||||
type Validator struct {
|
||||
expected []string
|
||||
expectedSet map[string]struct{}
|
||||
policies prompt.NamingPolicyConfig
|
||||
bannedSet map[string]struct{}
|
||||
}
|
||||
|
||||
// ValidationResult captures the successfully decoded response data.
|
||||
type ValidationResult struct {
|
||||
Items []prompt.RenameItem
|
||||
Warnings []string
|
||||
PromptHash string
|
||||
Model string
|
||||
}
|
||||
|
||||
// InvalidItem describes a single response entry that failed validation.
|
||||
type InvalidItem struct {
|
||||
Index int
|
||||
Original string
|
||||
Proposed string
|
||||
Reason string
|
||||
}
|
||||
|
||||
// ValidationError aggregates the issues discovered during validation.
|
||||
type ValidationError struct {
|
||||
Result ValidationResult
|
||||
MissingOriginals []string
|
||||
UnexpectedOriginals []string
|
||||
DuplicateOriginals map[string]int
|
||||
DuplicateProposed map[string][]string
|
||||
InvalidItems []InvalidItem
|
||||
PolicyViolations []PolicyViolation
|
||||
}
|
||||
|
||||
// PolicyViolation captures a single naming-policy breach.
|
||||
type PolicyViolation struct {
|
||||
Original string
|
||||
Proposed string
|
||||
Rule string
|
||||
Message string
|
||||
}
|
||||
|
||||
func (e *ValidationError) Error() string {
|
||||
if e == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
parts := make([]string, 0, 5)
|
||||
if len(e.MissingOriginals) > 0 {
|
||||
parts = append(parts, fmt.Sprintf("missing %d originals", len(e.MissingOriginals)))
|
||||
}
|
||||
if len(e.UnexpectedOriginals) > 0 {
|
||||
parts = append(parts, fmt.Sprintf("unexpected %d originals", len(e.UnexpectedOriginals)))
|
||||
}
|
||||
if len(e.DuplicateOriginals) > 0 {
|
||||
parts = append(parts, fmt.Sprintf("%d duplicate originals", len(e.DuplicateOriginals)))
|
||||
}
|
||||
if len(e.DuplicateProposed) > 0 {
|
||||
parts = append(parts, fmt.Sprintf("%d duplicate proposed names", len(e.DuplicateProposed)))
|
||||
}
|
||||
if len(e.InvalidItems) > 0 {
|
||||
parts = append(parts, fmt.Sprintf("%d invalid items", len(e.InvalidItems)))
|
||||
}
|
||||
if len(e.PolicyViolations) > 0 {
|
||||
parts = append(parts, fmt.Sprintf("%d policy violations", len(e.PolicyViolations)))
|
||||
}
|
||||
|
||||
summary := strings.Join(parts, ", ")
|
||||
if summary == "" {
|
||||
summary = "response validation failed"
|
||||
}
|
||||
return fmt.Sprintf("ai response validation failed: %s", summary)
|
||||
}
|
||||
|
||||
// HasIssues indicates whether the validation error captured any rule breaks.
|
||||
func (e *ValidationError) HasIssues() bool {
|
||||
if e == nil {
|
||||
return false
|
||||
}
|
||||
return len(e.MissingOriginals) > 0 ||
|
||||
len(e.UnexpectedOriginals) > 0 ||
|
||||
len(e.DuplicateOriginals) > 0 ||
|
||||
len(e.DuplicateProposed) > 0 ||
|
||||
len(e.InvalidItems) > 0 ||
|
||||
len(e.PolicyViolations) > 0
|
||||
}
|
||||
|
||||
// NewValidator constructs a validator for the supplied original filenames. Any
|
||||
// whitespace-only entries are discarded. Duplicate originals are collapsed to
|
||||
// ensure consistent coverage checks.
|
||||
func NewValidator(originals []string, policies prompt.NamingPolicyConfig, bannedTerms []string) Validator {
|
||||
expectedSet := make(map[string]struct{}, len(originals))
|
||||
deduped := make([]string, 0, len(originals))
|
||||
for _, original := range originals {
|
||||
trimmed := strings.TrimSpace(original)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
if _, exists := expectedSet[trimmed]; exists {
|
||||
continue
|
||||
}
|
||||
expectedSet[trimmed] = struct{}{}
|
||||
deduped = append(deduped, trimmed)
|
||||
}
|
||||
|
||||
bannedSet := make(map[string]struct{})
|
||||
for _, token := range bannedTerms {
|
||||
lower := strings.ToLower(strings.TrimSpace(token))
|
||||
if lower == "" {
|
||||
continue
|
||||
}
|
||||
bannedSet[lower] = struct{}{}
|
||||
}
|
||||
|
||||
policies.Casing = strings.ToLower(strings.TrimSpace(policies.Casing))
|
||||
policies.Prefix = strings.TrimSpace(policies.Prefix)
|
||||
policies.ForbiddenTokens = append([]string(nil), policies.ForbiddenTokens...)
|
||||
|
||||
return Validator{
|
||||
expected: deduped,
|
||||
expectedSet: expectedSet,
|
||||
policies: policies,
|
||||
bannedSet: bannedSet,
|
||||
}
|
||||
}
|
||||
|
||||
// Validate ensures the AI response covers each expected original exactly once
|
||||
// and that the proposed filenames are unique.
|
||||
func (v Validator) Validate(resp prompt.RenameResponse) (ValidationResult, error) {
|
||||
result := ValidationResult{
|
||||
Items: cloneItems(resp.Items),
|
||||
Warnings: append([]string(nil), resp.Warnings...),
|
||||
PromptHash: resp.PromptHash,
|
||||
Model: resp.Model,
|
||||
}
|
||||
|
||||
if len(resp.Items) == 0 {
|
||||
err := &ValidationError{
|
||||
Result: result,
|
||||
MissingOriginals: append([]string(nil), v.expected...),
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
|
||||
seenOriginals := make(map[string]int, len(resp.Items))
|
||||
seenProposed := make(map[string][]string, len(resp.Items))
|
||||
unexpectedSet := map[string]struct{}{}
|
||||
|
||||
invalidItems := make([]InvalidItem, 0)
|
||||
policyViolations := make([]PolicyViolation, 0)
|
||||
|
||||
for idx, item := range resp.Items {
|
||||
original := strings.TrimSpace(item.Original)
|
||||
proposed := strings.TrimSpace(item.Proposed)
|
||||
|
||||
if original == "" {
|
||||
invalidItems = append(invalidItems, InvalidItem{
|
||||
Index: idx,
|
||||
Original: item.Original,
|
||||
Proposed: item.Proposed,
|
||||
Reason: "original is empty",
|
||||
})
|
||||
} else {
|
||||
seenOriginals[original]++
|
||||
if _, ok := v.expectedSet[original]; !ok {
|
||||
unexpectedSet[original] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
if proposed == "" {
|
||||
invalidItems = append(invalidItems, InvalidItem{
|
||||
Index: idx,
|
||||
Original: item.Original,
|
||||
Proposed: item.Proposed,
|
||||
Reason: "proposed is empty",
|
||||
})
|
||||
} else {
|
||||
seenProposed[proposed] = append(seenProposed[proposed], original)
|
||||
}
|
||||
|
||||
policyViolations = append(policyViolations, v.evaluatePolicies(item)...)
|
||||
}
|
||||
|
||||
missing := make([]string, 0)
|
||||
for _, original := range v.expected {
|
||||
if seenOriginals[original] == 0 {
|
||||
missing = append(missing, original)
|
||||
}
|
||||
}
|
||||
|
||||
duplicateOriginals := make(map[string]int)
|
||||
for original, count := range seenOriginals {
|
||||
if count > 1 {
|
||||
duplicateOriginals[original] = count
|
||||
}
|
||||
}
|
||||
|
||||
duplicateProposed := make(map[string][]string)
|
||||
for proposed, sources := range seenProposed {
|
||||
if len(sources) > 1 {
|
||||
filtered := make([]string, 0, len(sources))
|
||||
for _, src := range sources {
|
||||
if strings.TrimSpace(src) != "" {
|
||||
filtered = append(filtered, src)
|
||||
}
|
||||
}
|
||||
if len(filtered) > 1 {
|
||||
duplicateProposed[proposed] = filtered
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unexpected := orderedKeys(unexpectedSet)
|
||||
|
||||
if len(missing) == 0 &&
|
||||
len(unexpected) == 0 &&
|
||||
len(duplicateOriginals) == 0 &&
|
||||
len(duplicateProposed) == 0 &&
|
||||
len(invalidItems) == 0 &&
|
||||
len(policyViolations) == 0 {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
err := &ValidationError{
|
||||
Result: result,
|
||||
MissingOriginals: missing,
|
||||
UnexpectedOriginals: unexpected,
|
||||
DuplicateOriginals: duplicateOriginals,
|
||||
DuplicateProposed: duplicateProposed,
|
||||
InvalidItems: invalidItems,
|
||||
PolicyViolations: policyViolations,
|
||||
}
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Expectation returns a copy of the expected originals tracked by the validator.
|
||||
func (v Validator) Expectation() []string {
|
||||
return append([]string(nil), v.expected...)
|
||||
}
|
||||
|
||||
func cloneItems(items []prompt.RenameItem) []prompt.RenameItem {
|
||||
if len(items) == 0 {
|
||||
return nil
|
||||
}
|
||||
cp := make([]prompt.RenameItem, len(items))
|
||||
copy(cp, items)
|
||||
return cp
|
||||
}
|
||||
|
||||
func orderedKeys(set map[string]struct{}) []string {
|
||||
if len(set) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make([]string, 0, len(set))
|
||||
for k := range set {
|
||||
out = append(out, k)
|
||||
}
|
||||
sort.Strings(out)
|
||||
return out
|
||||
}
|
||||
|
||||
func (v Validator) evaluatePolicies(item prompt.RenameItem) []PolicyViolation {
|
||||
violations := make([]PolicyViolation, 0)
|
||||
proposed := strings.TrimSpace(item.Proposed)
|
||||
if proposed == "" {
|
||||
return violations
|
||||
}
|
||||
base := filepath.Base(proposed)
|
||||
stem := base
|
||||
if ext := filepath.Ext(base); ext != "" {
|
||||
stem = base[:len(base)-len(ext)]
|
||||
}
|
||||
stemLower := strings.ToLower(stem)
|
||||
|
||||
if v.policies.Prefix != "" {
|
||||
prefixLower := strings.ToLower(v.policies.Prefix)
|
||||
if !strings.HasPrefix(stemLower, prefixLower) {
|
||||
violations = append(violations, PolicyViolation{
|
||||
Original: item.Original,
|
||||
Proposed: item.Proposed,
|
||||
Rule: "prefix",
|
||||
Message: fmt.Sprintf("expected prefix %q", v.policies.Prefix),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if !v.policies.AllowSpaces && strings.Contains(stem, " ") {
|
||||
violations = append(violations, PolicyViolation{
|
||||
Original: item.Original,
|
||||
Proposed: item.Proposed,
|
||||
Rule: "spaces",
|
||||
Message: "spaces are not allowed",
|
||||
})
|
||||
}
|
||||
|
||||
if v.policies.Casing != "" {
|
||||
if ok, message := matchesCasing(stem, v.policies); !ok {
|
||||
violations = append(violations, PolicyViolation{
|
||||
Original: item.Original,
|
||||
Proposed: item.Proposed,
|
||||
Rule: "casing",
|
||||
Message: message,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(v.bannedSet) > 0 {
|
||||
tokens := tokenize(stemLower)
|
||||
for _, token := range tokens {
|
||||
if _, ok := v.bannedSet[token]; ok {
|
||||
violations = append(violations, PolicyViolation{
|
||||
Original: item.Original,
|
||||
Proposed: item.Proposed,
|
||||
Rule: "banned",
|
||||
Message: fmt.Sprintf("contains banned token %q", token),
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return violations
|
||||
}
|
||||
|
||||
func matchesCasing(stem string, policies prompt.NamingPolicyConfig) (bool, string) {
|
||||
core := coreStem(stem, policies.Prefix)
|
||||
switch policies.Casing {
|
||||
case "kebab":
|
||||
if strings.Contains(core, " ") {
|
||||
return false, "expected kebab-case (no spaces)"
|
||||
}
|
||||
if strings.ContainsAny(core, "ABCDEFGHIJKLMNOPQRSTUVWXYZ") {
|
||||
return false, "expected kebab-case (use lowercase letters)"
|
||||
}
|
||||
return true, ""
|
||||
case "snake":
|
||||
if strings.Contains(core, " ") {
|
||||
return false, "expected snake_case (no spaces)"
|
||||
}
|
||||
if strings.ContainsAny(core, "ABCDEFGHIJKLMNOPQRSTUVWXYZ-") {
|
||||
return false, "expected snake_case (lowercase letters with underscores)"
|
||||
}
|
||||
return true, ""
|
||||
case "camel":
|
||||
if strings.ContainsAny(core, " -_") {
|
||||
return false, "expected camelCase (no separators)"
|
||||
}
|
||||
runes := []rune(core)
|
||||
if len(runes) == 0 {
|
||||
return false, "expected camelCase descriptive text"
|
||||
}
|
||||
if !unicode.IsLower(runes[0]) {
|
||||
return false, "expected camelCase (first letter lowercase)"
|
||||
}
|
||||
return true, ""
|
||||
case "pascal":
|
||||
if strings.ContainsAny(core, " -_") {
|
||||
return false, "expected PascalCase (no separators)"
|
||||
}
|
||||
runes := []rune(core)
|
||||
if len(runes) == 0 {
|
||||
return false, "expected PascalCase descriptive text"
|
||||
}
|
||||
if !unicode.IsUpper(runes[0]) {
|
||||
return false, "expected PascalCase (first letter uppercase)"
|
||||
}
|
||||
return true, ""
|
||||
case "title":
|
||||
words := strings.Fields(strings.ReplaceAll(core, "-", " "))
|
||||
if len(words) == 0 {
|
||||
return false, "expected Title Case words"
|
||||
}
|
||||
for _, word := range words {
|
||||
runes := []rune(word)
|
||||
if len(runes) == 0 {
|
||||
continue
|
||||
}
|
||||
if !unicode.IsUpper(runes[0]) {
|
||||
return false, "expected Title Case (capitalize each word)"
|
||||
}
|
||||
}
|
||||
return true, ""
|
||||
default:
|
||||
return true, ""
|
||||
}
|
||||
}
|
||||
|
||||
func coreStem(stem, prefix string) string {
|
||||
trimmed := stem
|
||||
if prefix != "" {
|
||||
lowerStem := strings.ToLower(trimmed)
|
||||
lowerPrefix := strings.ToLower(prefix)
|
||||
if strings.HasPrefix(lowerStem, lowerPrefix) {
|
||||
trimmed = trimmed[len(prefix):]
|
||||
trimmed = strings.TrimLeft(trimmed, "-_ ")
|
||||
}
|
||||
}
|
||||
i := 0
|
||||
runes := []rune(trimmed)
|
||||
for i < len(runes) {
|
||||
r := runes[i]
|
||||
if unicode.IsDigit(r) || r == '-' || r == '_' || r == ' ' {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
return string(runes[i:])
|
||||
}
|
||||
201
internal/ai/prompt/builder.go
Normal file
201
internal/ai/prompt/builder.go
Normal file
@@ -0,0 +1,201 @@
|
||||
package prompt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const defaultMaxSamples = 10
|
||||
|
||||
// SequenceRule captures the numbering instructions forwarded to the AI.
|
||||
type SequenceRule struct {
|
||||
Style string
|
||||
Width int
|
||||
Start int
|
||||
Separator string
|
||||
}
|
||||
|
||||
// PolicyConfig enumerates naming policy directives for the AI prompt.
|
||||
type PolicyConfig struct {
|
||||
Prefix string
|
||||
Casing string
|
||||
AllowSpaces bool
|
||||
KeepOriginalOrder bool
|
||||
ForbiddenTokens []string
|
||||
}
|
||||
|
||||
// SampleCandidate represents a traversal sample considered for inclusion in the prompt.
|
||||
type SampleCandidate struct {
|
||||
RelativePath string
|
||||
SizeBytes int64
|
||||
Depth int
|
||||
}
|
||||
|
||||
// BuildInput aggregates the contextual data required to assemble the AI prompt payload.
|
||||
type BuildInput struct {
|
||||
WorkingDir string
|
||||
Samples []SampleCandidate
|
||||
TotalCount int
|
||||
Sequence SequenceRule
|
||||
Policies PolicyConfig
|
||||
BannedTerms []string
|
||||
Metadata map[string]string
|
||||
}
|
||||
|
||||
// Builder constructs RenamePrompt payloads from traversal context.
|
||||
type Builder struct {
|
||||
maxSamples int
|
||||
clock func() time.Time
|
||||
}
|
||||
|
||||
// Option mutates builder configuration.
|
||||
type Option func(*Builder)
|
||||
|
||||
// WithMaxSamples overrides the number of sampled files emitted in the prompt (default 10).
|
||||
func WithMaxSamples(n int) Option {
|
||||
return func(b *Builder) {
|
||||
if n > 0 {
|
||||
b.maxSamples = n
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithClock injects a deterministic clock for metadata generation.
|
||||
func WithClock(clock func() time.Time) Option {
|
||||
return func(b *Builder) {
|
||||
if clock != nil {
|
||||
b.clock = clock
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewBuilder instantiates a Builder with default configuration.
|
||||
func NewBuilder(opts ...Option) *Builder {
|
||||
builder := &Builder{
|
||||
maxSamples: defaultMaxSamples,
|
||||
clock: time.Now().UTC,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(builder)
|
||||
}
|
||||
return builder
|
||||
}
|
||||
|
||||
// Build produces a RenamePrompt populated with traversal context.
|
||||
func (b *Builder) Build(input BuildInput) (RenamePrompt, error) {
|
||||
if strings.TrimSpace(input.WorkingDir) == "" {
|
||||
return RenamePrompt{}, errors.New("prompt builder: working directory required")
|
||||
}
|
||||
if input.TotalCount <= 0 {
|
||||
return RenamePrompt{}, errors.New("prompt builder: total count must be positive")
|
||||
}
|
||||
if strings.TrimSpace(input.Sequence.Style) == "" {
|
||||
return RenamePrompt{}, errors.New("prompt builder: sequence style required")
|
||||
}
|
||||
if input.Sequence.Width <= 0 {
|
||||
return RenamePrompt{}, errors.New("prompt builder: sequence width must be positive")
|
||||
}
|
||||
if input.Sequence.Start <= 0 {
|
||||
return RenamePrompt{}, errors.New("prompt builder: sequence start must be positive")
|
||||
}
|
||||
if strings.TrimSpace(input.Policies.Casing) == "" {
|
||||
return RenamePrompt{}, errors.New("prompt builder: naming casing required")
|
||||
}
|
||||
|
||||
samples := make([]SampleCandidate, 0, len(input.Samples))
|
||||
for _, sample := range input.Samples {
|
||||
if strings.TrimSpace(sample.RelativePath) == "" {
|
||||
continue
|
||||
}
|
||||
samples = append(samples, sample)
|
||||
}
|
||||
|
||||
sort.Slice(samples, func(i, j int) bool {
|
||||
a := strings.ToLower(samples[i].RelativePath)
|
||||
b := strings.ToLower(samples[j].RelativePath)
|
||||
if a == b {
|
||||
return samples[i].RelativePath < samples[j].RelativePath
|
||||
}
|
||||
return a < b
|
||||
})
|
||||
|
||||
max := b.maxSamples
|
||||
if max <= 0 || max > len(samples) {
|
||||
max = len(samples)
|
||||
}
|
||||
|
||||
promptSamples := make([]PromptSample, 0, max)
|
||||
for i := 0; i < max; i++ {
|
||||
sample := samples[i]
|
||||
ext := filepath.Ext(sample.RelativePath)
|
||||
promptSamples = append(promptSamples, PromptSample{
|
||||
OriginalName: sample.RelativePath,
|
||||
Extension: ext,
|
||||
SizeBytes: sample.SizeBytes,
|
||||
PathDepth: sample.Depth,
|
||||
})
|
||||
}
|
||||
|
||||
banned := normalizeBannedTerms(input.BannedTerms)
|
||||
|
||||
metadata := make(map[string]string, len(input.Metadata)+1)
|
||||
for k, v := range input.Metadata {
|
||||
if strings.TrimSpace(k) == "" || strings.TrimSpace(v) == "" {
|
||||
continue
|
||||
}
|
||||
metadata[k] = v
|
||||
}
|
||||
metadata["generatedAt"] = b.clock().Format(time.RFC3339)
|
||||
|
||||
return RenamePrompt{
|
||||
WorkingDir: promptAbs(input.WorkingDir),
|
||||
Samples: promptSamples,
|
||||
TotalCount: input.TotalCount,
|
||||
SequenceRule: SequenceRuleConfig{
|
||||
Style: input.Sequence.Style,
|
||||
Width: input.Sequence.Width,
|
||||
Start: input.Sequence.Start,
|
||||
Separator: input.Sequence.Separator,
|
||||
},
|
||||
Policies: NamingPolicyConfig{
|
||||
Prefix: input.Policies.Prefix,
|
||||
Casing: input.Policies.Casing,
|
||||
AllowSpaces: input.Policies.AllowSpaces,
|
||||
KeepOriginalOrder: input.Policies.KeepOriginalOrder,
|
||||
ForbiddenTokens: append([]string(nil), input.Policies.ForbiddenTokens...),
|
||||
},
|
||||
BannedTerms: banned,
|
||||
Metadata: metadata,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func promptAbs(dir string) string {
|
||||
return strings.TrimSpace(dir)
|
||||
}
|
||||
|
||||
func normalizeBannedTerms(values []string) []string {
|
||||
unique := make(map[string]struct{})
|
||||
for _, value := range values {
|
||||
trimmed := strings.TrimSpace(value)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
lower := strings.ToLower(trimmed)
|
||||
if lower == "" {
|
||||
continue
|
||||
}
|
||||
unique[lower] = struct{}{}
|
||||
}
|
||||
if len(unique) == 0 {
|
||||
return nil
|
||||
}
|
||||
terms := make([]string, 0, len(unique))
|
||||
for term := range unique {
|
||||
terms = append(terms, term)
|
||||
}
|
||||
sort.Strings(terms)
|
||||
return terms
|
||||
}
|
||||
3
internal/ai/prompt/doc.go
Normal file
3
internal/ai/prompt/doc.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package prompt
|
||||
|
||||
// Package prompt contains helpers for building AI prompt payloads.
|
||||
53
internal/ai/prompt/types.go
Normal file
53
internal/ai/prompt/types.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package prompt
|
||||
|
||||
// RenamePrompt captures the structured payload sent to the Genkit workflow.
|
||||
type RenamePrompt struct {
|
||||
WorkingDir string `json:"workingDir"`
|
||||
Samples []PromptSample `json:"samples"`
|
||||
TotalCount int `json:"totalCount"`
|
||||
SequenceRule SequenceRuleConfig `json:"sequenceRule"`
|
||||
Policies NamingPolicyConfig `json:"policies"`
|
||||
BannedTerms []string `json:"bannedTerms,omitempty"`
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// PromptSample represents a sampled file from the traversal scope.
|
||||
type PromptSample struct {
|
||||
OriginalName string `json:"originalName"`
|
||||
Extension string `json:"extension"`
|
||||
SizeBytes int64 `json:"sizeBytes"`
|
||||
PathDepth int `json:"pathDepth"`
|
||||
}
|
||||
|
||||
// SequenceRuleConfig captures numbering directives for the AI prompt.
|
||||
type SequenceRuleConfig struct {
|
||||
Style string `json:"style"`
|
||||
Width int `json:"width"`
|
||||
Start int `json:"start"`
|
||||
Separator string `json:"separator"`
|
||||
}
|
||||
|
||||
// NamingPolicyConfig enumerates naming policies forwarded to the AI.
|
||||
type NamingPolicyConfig struct {
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
Casing string `json:"casing"`
|
||||
AllowSpaces bool `json:"allowSpaces,omitempty"`
|
||||
KeepOriginalOrder bool `json:"keepOriginalOrder,omitempty"`
|
||||
ForbiddenTokens []string `json:"forbiddenTokens,omitempty"`
|
||||
}
|
||||
|
||||
// RenameResponse is the structured payload expected from the AI model.
|
||||
type RenameResponse struct {
|
||||
Items []RenameItem `json:"items"`
|
||||
Warnings []string `json:"warnings,omitempty"`
|
||||
PromptHash string `json:"promptHash,omitempty"`
|
||||
Model string `json:"model,omitempty"`
|
||||
}
|
||||
|
||||
// RenameItem maps an original path to the AI-proposed rename.
|
||||
type RenameItem struct {
|
||||
Original string `json:"original"`
|
||||
Proposed string `json:"proposed"`
|
||||
Sequence int `json:"sequence"`
|
||||
Notes string `json:"notes,omitempty"`
|
||||
}
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/rogeecn/renamer/internal/ai/prompt"
|
||||
)
|
||||
|
||||
const ledgerFileName = ".renamer"
|
||||
@@ -26,6 +28,65 @@ type Entry struct {
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
const aiMetadataKey = "ai"
|
||||
|
||||
// AIMetadata captures AI-specific ledger metadata for rename batches.
|
||||
type AIMetadata struct {
|
||||
PromptHash string `json:"promptHash"`
|
||||
ResponseHash string `json:"responseHash"`
|
||||
Model string `json:"model"`
|
||||
Policies prompt.NamingPolicyConfig `json:"policies"`
|
||||
BatchSize int `json:"batchSize"`
|
||||
AppliedAt time.Time `json:"appliedAt"`
|
||||
}
|
||||
|
||||
// AttachAIMetadata records AI metadata alongside the ledger entry.
|
||||
func (e *Entry) AttachAIMetadata(meta AIMetadata) {
|
||||
if e.Metadata == nil {
|
||||
e.Metadata = make(map[string]any)
|
||||
}
|
||||
if meta.AppliedAt.IsZero() {
|
||||
meta.AppliedAt = time.Now().UTC()
|
||||
}
|
||||
e.Metadata[aiMetadataKey] = meta
|
||||
}
|
||||
|
||||
// AIMetadata extracts AI metadata from the ledger entry if present.
|
||||
func (e Entry) AIMetadata() (AIMetadata, bool) {
|
||||
if e.Metadata == nil {
|
||||
return AIMetadata{}, false
|
||||
}
|
||||
raw, ok := e.Metadata[aiMetadataKey]
|
||||
if !ok {
|
||||
return AIMetadata{}, false
|
||||
}
|
||||
|
||||
switch value := raw.(type) {
|
||||
case AIMetadata:
|
||||
return value, true
|
||||
case map[string]any:
|
||||
var meta AIMetadata
|
||||
if err := remarshal(value, &meta); err != nil {
|
||||
return AIMetadata{}, false
|
||||
}
|
||||
return meta, true
|
||||
default:
|
||||
var meta AIMetadata
|
||||
if err := remarshal(value, &meta); err != nil {
|
||||
return AIMetadata{}, false
|
||||
}
|
||||
return meta, true
|
||||
}
|
||||
}
|
||||
|
||||
func remarshal(value any, target any) error {
|
||||
data, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(data, target)
|
||||
}
|
||||
|
||||
// Append writes a new entry to the ledger in newline-delimited JSON format.
|
||||
func Append(workingDir string, entry Entry) error {
|
||||
entry.Timestamp = time.Now().UTC()
|
||||
|
||||
@@ -3,6 +3,7 @@ package output
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// plainFormatter emits one entry per line suitable for piping into other tools.
|
||||
@@ -26,3 +27,41 @@ func (plainFormatter) WriteSummary(w io.Writer, summary Summary) error {
|
||||
_, err := fmt.Fprintln(w, DefaultSummaryLine(summary))
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteAIPlanDebug emits prompt hashes and warnings to the provided writer.
|
||||
func WriteAIPlanDebug(w io.Writer, promptHash string, warnings []string) {
|
||||
if w == nil {
|
||||
return
|
||||
}
|
||||
if promptHash != "" {
|
||||
fmt.Fprintf(w, "Prompt hash: %s\n", promptHash)
|
||||
}
|
||||
for _, warning := range warnings {
|
||||
if strings.TrimSpace(warning) == "" {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(w, "%s\n", warning)
|
||||
}
|
||||
}
|
||||
|
||||
// PolicyViolationMessage describes a single policy failure for display purposes.
|
||||
type PolicyViolationMessage struct {
|
||||
Original string
|
||||
Proposed string
|
||||
Rule string
|
||||
Message string
|
||||
}
|
||||
|
||||
// WritePolicyViolations prints detailed policy failure information to the writer.
|
||||
func WritePolicyViolations(w io.Writer, violations []PolicyViolationMessage) {
|
||||
if w == nil {
|
||||
return
|
||||
}
|
||||
for _, violation := range violations {
|
||||
rule := violation.Rule
|
||||
if rule == "" {
|
||||
rule = "policy"
|
||||
}
|
||||
fmt.Fprintf(w, "Policy violation (%s): %s -> %s (%s)\n", rule, violation.Original, violation.Proposed, violation.Message)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,3 +46,53 @@ func (f *tableFormatter) WriteSummary(w io.Writer, summary Summary) error {
|
||||
_, err := fmt.Fprintln(w, DefaultSummaryLine(summary))
|
||||
return err
|
||||
}
|
||||
|
||||
// AIPlanRow represents a single AI plan preview row.
|
||||
type AIPlanRow struct {
|
||||
Sequence string
|
||||
Original string
|
||||
Proposed string
|
||||
Sanitized string
|
||||
}
|
||||
|
||||
// AIPlanTable renders AI plan previews in a tabular format.
|
||||
type AIPlanTable struct {
|
||||
writer *tabwriter.Writer
|
||||
}
|
||||
|
||||
// NewAIPlanTable constructs a table for AI plan previews.
|
||||
func NewAIPlanTable() *AIPlanTable {
|
||||
return &AIPlanTable{}
|
||||
}
|
||||
|
||||
// Begin writes the header for the AI plan table.
|
||||
func (t *AIPlanTable) Begin(w io.Writer) error {
|
||||
if t.writer != nil {
|
||||
return fmt.Errorf("ai plan table already initialized")
|
||||
}
|
||||
t.writer = tabwriter.NewWriter(w, 0, 4, 2, ' ', 0)
|
||||
_, err := fmt.Fprintln(t.writer, "SEQ\tORIGINAL\tPROPOSED\tSANITIZED")
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteRow appends a plan row to the table.
|
||||
func (t *AIPlanTable) WriteRow(row AIPlanRow) error {
|
||||
if t.writer == nil {
|
||||
return fmt.Errorf("ai plan table not initialized")
|
||||
}
|
||||
_, err := fmt.Fprintf(t.writer, "%s\t%s\t%s\t%s\n", row.Sequence, row.Original, row.Proposed, row.Sanitized)
|
||||
return err
|
||||
}
|
||||
|
||||
// End flushes the table to the underlying writer.
|
||||
func (t *AIPlanTable) End(w io.Writer) error {
|
||||
if t.writer == nil {
|
||||
return fmt.Errorf("ai plan table not initialized")
|
||||
}
|
||||
if err := t.writer.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := fmt.Fprintln(w)
|
||||
t.writer = nil
|
||||
return err
|
||||
}
|
||||
|
||||
61
scripts/smoke-test-ai.sh
Executable file
61
scripts/smoke-test-ai.sh
Executable file
@@ -0,0 +1,61 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
BIN="go run"
|
||||
TMP_DIR="$(mktemp -d)"
|
||||
trap 'rm -rf "$TMP_DIR"' EXIT
|
||||
|
||||
mkdir -p "$TMP_DIR"
|
||||
touch "$TMP_DIR/raw_demo 01.txt"
|
||||
touch "$TMP_DIR/raw_demo 02.txt"
|
||||
|
||||
PLAN_JSON="$TMP_DIR/ai-plan.json"
|
||||
|
||||
echo "Generating initial AI plan preview..."
|
||||
$BIN "$ROOT_DIR/main.go" ai --path "$TMP_DIR" --dry-run --export-plan "$PLAN_JSON" >/dev/null
|
||||
|
||||
if [[ ! -s "$PLAN_JSON" ]]; then
|
||||
echo "expected plan export at $PLAN_JSON" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Editing exported plan for deterministic names..."
|
||||
python3 - <<'PY'
|
||||
import json, pathlib
|
||||
path = pathlib.Path("$PLAN_JSON")
|
||||
plan = json.loads(path.read_text())
|
||||
for idx, item in enumerate(plan.get("items", []), start=1):
|
||||
item["proposed"] = f"{idx:03d}_final_demo.txt"
|
||||
plan["warnings"] = plan.get("warnings", []) + ["edited in smoke script"]
|
||||
path.write_text(json.dumps(plan, indent=2) + "\n")
|
||||
PY
|
||||
|
||||
echo "Validating edited plan (dry run)..."
|
||||
$BIN "$ROOT_DIR/main.go" ai --path "$TMP_DIR" --dry-run --import-plan "$PLAN_JSON" >/dev/null
|
||||
|
||||
echo "Applying edited plan..."
|
||||
$BIN "$ROOT_DIR/main.go" ai --path "$TMP_DIR" --import-plan "$PLAN_JSON" --yes >/dev/null
|
||||
|
||||
if [[ ! -f "$TMP_DIR/001_final_demo.txt" ]]; then
|
||||
echo "expected 001_final_demo.txt to exist" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -f "$TMP_DIR/002_final_demo.txt" ]]; then
|
||||
echo "expected 002_final_demo.txt to exist" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Undoing AI plan application..."
|
||||
$BIN "$ROOT_DIR/main.go" undo --path "$TMP_DIR" >/dev/null
|
||||
|
||||
if [[ ! -f "$TMP_DIR/raw_demo 01.txt" ]]; then
|
||||
echo "undo failed to restore raw_demo 01.txt" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -f "$TMP_DIR/raw_demo 02.txt" ]]; then
|
||||
echo "undo failed to restore raw_demo 02.txt" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "AI smoke test succeeded."
|
||||
34
specs/008-ai-rename-prompt/checklists/requirements.md
Normal file
34
specs/008-ai-rename-prompt/checklists/requirements.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# Specification Quality Checklist: AI-Assisted Rename Prompting
|
||||
|
||||
**Purpose**: Validate specification completeness and quality before proceeding to planning
|
||||
**Created**: 2025-11-03
|
||||
**Feature**: [AI-Assisted Rename Prompting](../spec.md)
|
||||
|
||||
## Content Quality
|
||||
|
||||
- [x] No implementation details (languages, frameworks, APIs)
|
||||
- [x] Focused on user value and business needs
|
||||
- [x] Written for non-technical stakeholders
|
||||
- [x] All mandatory sections completed
|
||||
|
||||
## Requirement Completeness
|
||||
|
||||
- [x] No [NEEDS CLARIFICATION] markers remain
|
||||
- [x] Requirements are testable and unambiguous
|
||||
- [x] Success criteria are measurable
|
||||
- [x] Success criteria are technology-agnostic (no implementation details)
|
||||
- [x] All acceptance scenarios are defined
|
||||
- [x] Edge cases are identified
|
||||
- [x] Scope is clearly bounded
|
||||
- [x] Dependencies and assumptions identified
|
||||
|
||||
## Feature Readiness
|
||||
|
||||
- [x] All functional requirements have clear acceptance criteria
|
||||
- [x] User scenarios cover primary flows
|
||||
- [x] Feature meets measurable outcomes defined in Success Criteria
|
||||
- [x] No implementation details leak into specification
|
||||
|
||||
## Notes
|
||||
|
||||
- Items marked incomplete require spec updates before `/speckit.clarify` or `/speckit.plan`
|
||||
176
specs/008-ai-rename-prompt/contracts/ai-rename.openapi.yaml
Normal file
176
specs/008-ai-rename-prompt/contracts/ai-rename.openapi.yaml
Normal file
@@ -0,0 +1,176 @@
|
||||
openapi: 3.1.0
|
||||
info:
|
||||
title: Renamer AI Inline Contract
|
||||
version: 0.1.0
|
||||
description: |
|
||||
JSON payload contract exchanged between the Go `renamer ai` command and the embedded Google Genkit workflow.
|
||||
servers:
|
||||
- url: command://renamer-ai/genkit
|
||||
description: CLI-invoked Genkit runner (JSON via stdin/stdout)
|
||||
paths:
|
||||
/rename-plan:
|
||||
post:
|
||||
summary: Generate a rename plan for scoped files (invoked inline, not over HTTP)
|
||||
operationId: createRenamePlan
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenamePrompt'
|
||||
responses:
|
||||
'200':
|
||||
description: Valid rename plan
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenameResponse'
|
||||
'400':
|
||||
description: Invalid prompt payload
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ErrorResponse'
|
||||
'422':
|
||||
description: Model returned inconsistent plan
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ValidationError'
|
||||
'503':
|
||||
description: Upstream model unavailable
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ErrorResponse'
|
||||
components:
|
||||
schemas:
|
||||
RenamePrompt:
|
||||
type: object
|
||||
required: [workingDir, samples, totalCount, sequenceRule, policies]
|
||||
properties:
|
||||
workingDir:
|
||||
type: string
|
||||
samples:
|
||||
type: array
|
||||
minItems: 1
|
||||
items:
|
||||
$ref: '#/components/schemas/PromptSample'
|
||||
totalCount:
|
||||
type: integer
|
||||
minimum: 1
|
||||
sequenceRule:
|
||||
$ref: '#/components/schemas/SequenceRule'
|
||||
policies:
|
||||
$ref: '#/components/schemas/NamingPolicy'
|
||||
bannedTerms:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
PromptSample:
|
||||
type: object
|
||||
required: [originalName, extension, sizeBytes, pathDepth]
|
||||
properties:
|
||||
originalName:
|
||||
type: string
|
||||
extension:
|
||||
type: string
|
||||
sizeBytes:
|
||||
type: integer
|
||||
minimum: 0
|
||||
pathDepth:
|
||||
type: integer
|
||||
minimum: 0
|
||||
SequenceRule:
|
||||
type: object
|
||||
required: [style, width, start, separator]
|
||||
properties:
|
||||
style:
|
||||
type: string
|
||||
enum: [prefix, suffix]
|
||||
width:
|
||||
type: integer
|
||||
minimum: 1
|
||||
start:
|
||||
type: integer
|
||||
minimum: 1
|
||||
separator:
|
||||
type: string
|
||||
NamingPolicy:
|
||||
type: object
|
||||
required: [casing]
|
||||
properties:
|
||||
prefix:
|
||||
type: string
|
||||
casing:
|
||||
type: string
|
||||
enum: [kebab, snake, camel, pascal, title]
|
||||
allowSpaces:
|
||||
type: boolean
|
||||
default: false
|
||||
keepOriginalOrder:
|
||||
type: boolean
|
||||
default: false
|
||||
forbiddenTokens:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
RenameResponse:
|
||||
type: object
|
||||
required: [items]
|
||||
properties:
|
||||
items:
|
||||
type: array
|
||||
minItems: 1
|
||||
items:
|
||||
$ref: '#/components/schemas/RenameItem'
|
||||
warnings:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
promptHash:
|
||||
type: string
|
||||
model:
|
||||
type: string
|
||||
RenameItem:
|
||||
type: object
|
||||
required: [original, proposed, sequence]
|
||||
properties:
|
||||
original:
|
||||
type: string
|
||||
proposed:
|
||||
type: string
|
||||
sequence:
|
||||
type: integer
|
||||
minimum: 1
|
||||
notes:
|
||||
type: string
|
||||
ValidationError:
|
||||
type: object
|
||||
required: [message, conflicts]
|
||||
properties:
|
||||
message:
|
||||
type: string
|
||||
conflicts:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
original:
|
||||
type: string
|
||||
issue:
|
||||
type: string
|
||||
detail:
|
||||
type: string
|
||||
ErrorResponse:
|
||||
type: object
|
||||
required: [message]
|
||||
properties:
|
||||
message:
|
||||
type: string
|
||||
retryable:
|
||||
type: boolean
|
||||
99
specs/008-ai-rename-prompt/data-model.md
Normal file
99
specs/008-ai-rename-prompt/data-model.md
Normal file
@@ -0,0 +1,99 @@
|
||||
# Data Model: AI-Assisted Rename Prompting
|
||||
|
||||
## AiRenamePrompt
|
||||
- **Fields**
|
||||
- `WorkingDir` (string; absolute path)
|
||||
- `SampleFiles` ([]PromptSample)
|
||||
- `TotalCount` (int)
|
||||
- `SequenceRule` (SequenceRuleConfig)
|
||||
- `NamingPolicies` (NamingPolicyConfig)
|
||||
- `BannedTerms` ([]string)
|
||||
- `Metadata` (map[string]string; includes CLI version, timestamp)
|
||||
- **Relationships**: Built from traversal summary; sent to Genkit workflow.
|
||||
- **Validations**: Require ≥1 sample, forbid empty banned terms, limit payload to ≤2 MB serialized.
|
||||
|
||||
### PromptSample
|
||||
- **Fields**
|
||||
- `OriginalName` (string)
|
||||
- `SizeBytes` (int64)
|
||||
- `Extension` (string)
|
||||
- `PathDepth` (int)
|
||||
|
||||
### SequenceRuleConfig
|
||||
- **Fields**
|
||||
- `Style` (enum: `prefix`, `suffix`)
|
||||
- `Width` (int)
|
||||
- `Start` (int)
|
||||
- `Separator` (string)
|
||||
|
||||
### NamingPolicyConfig
|
||||
- **Fields**
|
||||
- `Prefix` (string)
|
||||
- `Casing` (enum: `kebab`, `snake`, `camel`, `pascal`, `title`)
|
||||
- `AllowSpaces` (bool)
|
||||
- `KeepOriginalOrder` (bool)
|
||||
- `ForbiddenTokens` ([]string)
|
||||
|
||||
## AiRenameResponse
|
||||
- **Fields**
|
||||
- `Items` ([]AiRenameItem)
|
||||
- `Warnings` ([]string)
|
||||
- `PromptHash` (string)
|
||||
- `Model` (string)
|
||||
- **Relationships**: Parsed from Genkit output; feeds validation pipeline.
|
||||
- **Validations**: Items length must equal scoped candidate count; `Original` names must match traversal list; `Proposed` must be unique.
|
||||
|
||||
### AiRenameItem
|
||||
- **Fields**
|
||||
- `Original` (string; relative path)
|
||||
- `Proposed` (string; sanitized stem + extension)
|
||||
- `Sequence` (int)
|
||||
- `Notes` (string; optional reasoning)
|
||||
|
||||
## AiRenamePlan
|
||||
- **Fields**
|
||||
- `Candidates` ([]PlanEntry)
|
||||
- `Conflicts` ([]PlanConflict)
|
||||
- `Policies` (NamingPolicyConfig)
|
||||
- `SequenceAppliedWidth` (int)
|
||||
- `Warnings` ([]string)
|
||||
- **Relationships**: Created post-validation for preview/apply; persisted to ledger metadata.
|
||||
- **Validations**: Enforce contiguous sequences, ensure sanitized stems non-empty, confirm banned tokens absent.
|
||||
|
||||
### PlanEntry
|
||||
- **Fields**
|
||||
- `OriginalPath` (string)
|
||||
- `ProposedPath` (string)
|
||||
- `Sequence` (int)
|
||||
- `Status` (enum: `pending`, `edited`, `skipped`, `unchanged`)
|
||||
- `SanitizedSegments` ([]string)
|
||||
|
||||
### PlanConflict
|
||||
- **Fields**
|
||||
- `OriginalPath` (string)
|
||||
- `Issue` (enum: `duplicate`, `collision`, `policy_violation`, `missing_sequence`)
|
||||
- `Details` (string)
|
||||
|
||||
## AiRenameLedgerMetadata
|
||||
- **Fields**
|
||||
- `PromptHash` (string)
|
||||
- `ResponseHash` (string)
|
||||
- `Model` (string)
|
||||
- `Policies` (NamingPolicyConfig)
|
||||
- `BatchSize` (int)
|
||||
- `AppliedAt` (time.Time)
|
||||
- **Relationships**: Stored under `Entry.Metadata["ai"]` when applying rename batch; consumed by undo for auditing.
|
||||
|
||||
## GenkitWorkflowConfig
|
||||
- **Fields**
|
||||
- `Endpoint` (string; local or remote URL)
|
||||
- `Timeout` (Duration)
|
||||
- `RetryPolicy` (RetryPolicy)
|
||||
- `ApiKeyRef` (string; environment variable name mapping to `$HOME/.config/.renamer/{name}` token file)
|
||||
|
||||
### RetryPolicy
|
||||
- **Fields**
|
||||
- `MaxAttempts` (int)
|
||||
- `BackoffInitial` (Duration)
|
||||
- `BackoffMultiplier` (float64)
|
||||
- `FallbackModel` (string; optional)
|
||||
80
specs/008-ai-rename-prompt/plan.md
Normal file
80
specs/008-ai-rename-prompt/plan.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# Implementation Plan: AI-Assisted Rename Prompting
|
||||
|
||||
**Branch**: `008-ai-rename-prompt` | **Date**: 2025-11-03 | **Spec**: `specs/008-ai-rename-prompt/spec.md`
|
||||
**Input**: Feature specification from `/specs/008-ai-rename-prompt/spec.md`
|
||||
|
||||
**Note**: This template is filled in by the `/speckit.plan` command. See `.specify/templates/commands/plan.md` for the execution workflow.
|
||||
|
||||
## Summary
|
||||
|
||||
Introduce a `renamer ai` command that embeds a Google Genkit (Go SDK) workflow inside the CLI execution path. The command collects scope metadata, calls the Genkit pipeline in-process (defaulting to an OpenAI-compatible model), validates the response for sequential, uniform, sanitized filenames, allows operator edits, and records final mappings in the undo ledger while managing `*_MODEL_AUTH_TOKEN` secrets under `$HOME/.config/.renamer/`.
|
||||
|
||||
## Technical Context
|
||||
|
||||
**Language/Version**: Go 1.24 (CLI + Google Genkit Go SDK)
|
||||
**Primary Dependencies**: `spf13/cobra`, internal traversal/history/output packages, `github.com/google/genkit/go` (with OpenAI-compatible connectors), OpenAI-compatible HTTP client for fallbacks
|
||||
**Storage**: Local filesystem plus `.renamer` append-only ledger; auth tokens cached under `$HOME/.config/.renamer/`
|
||||
**Testing**: `go test ./...` for CLI logic, `npm test` (Vitest) for Genkit prompt workflows, contract/integration suites under `tests/`
|
||||
**Target Platform**: Cross-platform CLI (macOS, Linux, Windows shells) executing in-process Genkit workflows
|
||||
**Project Type**: Single CLI project with integrated Go Genkit module
|
||||
**Performance Goals**: Generate validated rename plan for up to 1,000 files in ≤ 30 seconds round-trip
|
||||
**Constraints**: Genkit workflow must initialize quickly per invocation; AI requests limited to 2 MB payload; ensure user-provided banned terms removed
|
||||
**Scale/Scope**: Typical batches 1–1,000 files; shared Genkit pipeline available for future AI features
|
||||
|
||||
## Constitution Check
|
||||
|
||||
- Preview flow continues to render deterministic before/after tables and block apply until confirmed, satisfying Preview-First Safety.
|
||||
- Undo path records final mappings plus AI prompt/response metadata in `.renamer`, preserving Persistent Undo Ledger guarantees.
|
||||
- AI rename integration becomes a composable rule module (`internal/ai`) that declares inputs (prompt spec), validations, and postconditions while orchestrating the Go Genkit workflow inline, aligning with Composable Rule Engine.
|
||||
- Scope handling reuses existing traversal services (`internal/traversal`) so filters (`--path`, `-r`, `-d`, `--extensions`) remain enforced per Scope-Aware Traversal.
|
||||
- Cobra wiring (`cmd/ai.go`) follows existing CLI standards with help text, flag validation, tests, meeting Ergonomic CLI Stewardship.
|
||||
|
||||
## Project Structure
|
||||
|
||||
### Documentation (this feature)
|
||||
|
||||
```text
|
||||
specs/008-ai-rename-prompt/
|
||||
├── plan.md
|
||||
├── research.md
|
||||
├── data-model.md
|
||||
├── quickstart.md
|
||||
├── contracts/
|
||||
└── tasks.md
|
||||
```
|
||||
|
||||
### Source Code (repository root)
|
||||
|
||||
```text
|
||||
cmd/
|
||||
├── ai.go # Genkit-powered command wiring
|
||||
├── root.go
|
||||
|
||||
internal/
|
||||
├── ai/
|
||||
│ ├── prompt/ # Prompt assembly, policy enforcement
|
||||
│ ├── genkit/ # Go Genkit workflow definitions and model connectors
|
||||
│ └── plan/ # Response validation & editing utilities
|
||||
├── history/
|
||||
├── output/
|
||||
├── traversal/
|
||||
└── sequence/
|
||||
|
||||
tests/
|
||||
├── contract/
|
||||
├── integration/
|
||||
└── unit/
|
||||
```
|
||||
|
||||
tests/
|
||||
├── contract/
|
||||
├── integration/
|
||||
└── unit/
|
||||
|
||||
**Structure Decision**: Extend existing CLI layout by adding an `internal/ai` package that houses Go Genkit workflows invoked directly from `cmd/ai.go`; existing test directories cover the new command with contract/integration suites.
|
||||
|
||||
## Complexity Tracking
|
||||
|
||||
| Violation | Why Needed | Simpler Alternative Rejected Because |
|
||||
|-----------|------------|--------------------------------------|
|
||||
| Direct Go Genkit integration | First-class Go SDK keeps execution inline and satisfies CLI-only requirement | Manual REST integration would lose Genkit workflows (retriers, evaluators) and require bespoke prompt templating |
|
||||
40
specs/008-ai-rename-prompt/quickstart.md
Normal file
40
specs/008-ai-rename-prompt/quickstart.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Quickstart: AI-Assisted Rename Prompting
|
||||
|
||||
## Prerequisites
|
||||
- Go 1.24 environment (CLI build/test)
|
||||
- `*_MODEL_AUTH_TOKEN` stored under `$HOME/.config/.renamer/` (default OpenAI-compatible key)
|
||||
|
||||
## Install Dependencies
|
||||
```bash
|
||||
# Sync Go modules (includes google/genkit)
|
||||
go mod tidy
|
||||
```
|
||||
|
||||
## Preview AI Rename Plan
|
||||
```bash
|
||||
go run ./cmd/renamer ai \
|
||||
--path ./fixtures/batch \
|
||||
--sequence-width 3 \
|
||||
--sequence-style prefix \
|
||||
--naming-casing kebab \
|
||||
--banned "promo,ad" \
|
||||
--dry-run
|
||||
```
|
||||
> CLI invokes the in-process Genkit workflow and renders a preview table with sequential, sanitized names.
|
||||
|
||||
## Apply Approved Plan
|
||||
```bash
|
||||
go run ./cmd/renamer ai --path ./fixtures/batch --yes
|
||||
```
|
||||
> Validates the cached plan, applies filesystem renames, and writes ledger entry with AI metadata.
|
||||
|
||||
## Testing
|
||||
```bash
|
||||
# Go unit + integration tests (includes Genkit workflow tests)
|
||||
go test ./...
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
- **Genkit errors**: Run with `--debug-genkit` to emit inline prompt/response traces (written to `~/.renamer/genkit.log`).
|
||||
- **Validation failures**: Run with `--export-plan out.json` to inspect AI output and manually edit.
|
||||
- **Rate limits**: Configure `--genkit-model` flag or `GENKIT_MODEL` env variable to select a lighter model.
|
||||
21
specs/008-ai-rename-prompt/research.md
Normal file
21
specs/008-ai-rename-prompt/research.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Research Log
|
||||
|
||||
## Genkit Orchestration Strategy
|
||||
- **Decision**: Integrate the official Google Genkit Go SDK directly in `internal/ai/genkit`, executing workflows synchronously inside the `renamer ai` command without external services.
|
||||
- **Rationale**: Satisfies the CLI-only constraint, keeps deployment as a single Go binary, and leverages Genkit guardrails, evaluators, and prompt templating.
|
||||
- **Alternatives considered**: Spawning a Node.js runner (rejected—adds extra runtime and violates updated requirement); Plain REST client to foundation models (rejected—would require rebuilding Genkit safety features manually).
|
||||
|
||||
## Prompt Composition Template
|
||||
- **Decision**: Define typed Go structs mirroring the prompt schema (scope summary, sample filenames, naming policies, banned tokens) and marshal them to JSON for Genkit inputs.
|
||||
- **Rationale**: Strong typing prevents malformed prompts and aligns with Genkit Go helpers for variable interpolation and logging.
|
||||
- **Alternatives considered**: Free-form natural language prompts (rejected—harder to validate); YAML serialization (rejected—JSON is the Genkit default and reduces dependency footprint).
|
||||
|
||||
## Response Schema & Validation
|
||||
- **Decision**: Genkit workflow returns Go structs (`RenameItem`, `Warnings`) serialized as JSON; the CLI validates coverage, uniqueness, banned-term removal, and sequential numbering before building the plan.
|
||||
- **Rationale**: Mirrors existing rename planner data types, enabling reuse of preview/output logic and providing transparent audit metadata.
|
||||
- **Alternatives considered**: Returning only ordered filenames (rejected—insufficient context for debugging); CSV output (rejected—lossy and awkward for nested metadata).
|
||||
|
||||
## Offline & Failure Handling
|
||||
- **Decision**: Use Genkit middleware for retry/backoff and error classification; if the workflow fails or produces invalid data, the CLI aborts gracefully, surfaces the issue, and can export the prompt/response for manual inspection.
|
||||
- **Rationale**: Maintains Preview-First safety by never applying partial results and keeps error handling contained within the CLI execution.
|
||||
- **Alternatives considered**: Persistent background daemon (rejected—contradicts inline execution); Automatic fallback to legacy sequential numbering (rejected—changes user intent, can be added later as an explicit option).
|
||||
107
specs/008-ai-rename-prompt/spec.md
Normal file
107
specs/008-ai-rename-prompt/spec.md
Normal file
@@ -0,0 +1,107 @@
|
||||
# Feature Specification: AI-Assisted Rename Prompting
|
||||
|
||||
**Feature Branch**: `008-ai-rename-prompt`
|
||||
**Created**: 2025-11-03
|
||||
**Status**: Draft
|
||||
**Input**: User description: "实现使用AI重命名的功能,把当前的文件列表给AI使用AI进行重新命令,AI返回重命令规则后解析AI的规则进行本地重命名操作。你需要帮我考虑如何建立合适的prompt给AI实现重命名。要求:1、带序列号;2、文件名规则统一;3、文件名中去除广告推广等垃圾信息;4、如果还有其它合适规则你来适量添加。"
|
||||
|
||||
## Clarifications
|
||||
|
||||
### Session 2025-11-03
|
||||
|
||||
- Q: Which AI model should the CLI use by default? → A: Default to an OpenAI-compatible model with override flag/env.
|
||||
|
||||
## User Scenarios & Testing *(mandatory)*
|
||||
|
||||
### User Story 1 - Generate AI Rename Plan (Priority: P1)
|
||||
|
||||
As a file curator preparing bulk renames, I want the CLI to compile a clean prompt from my current file list, send it to the AI service, and receive a structured rename plan that I can preview with sequence numbers before applying changes.
|
||||
|
||||
**Why this priority**: This delivers the core value—automating consistent, sequential renames without manual rule crafting—unlocking the feature for everyday workflows.
|
||||
|
||||
**Independent Test**: With a mixed set of filenames, invoke the new `renamer ai` flow, confirm the prompt includes the sampled names and instructions, and verify the returned plan previews sequential, uniform, sanitized filenames ready for apply.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** a directory of 25 assorted media files, **When** the user runs the AI rename preview, **Then** the prompt sent to the AI lists representative samples, prescribes ordering, and the returned plan previews filenames numbered `001_...` to `025_...` without junk text.
|
||||
2. **Given** an AI response that follows the documented schema, **When** the CLI parses it, **Then** each planned rename appears in the standard preview table with sequence numbers and consistent formatting.
|
||||
|
||||
---
|
||||
|
||||
### User Story 2 - Enforce Naming Standards (Priority: P2)
|
||||
|
||||
As a brand manager, I want to configure naming guidelines (e.g., project label, casing style) that the AI prompt reinforces so that resulting filenames stay uniform across batches.
|
||||
|
||||
**Why this priority**: Allowing users to express naming policy increases trust and ensures AI output aligns with organizational standards.
|
||||
|
||||
**Independent Test**: Run the command with options specifying kebab-case and a prefix token, then confirm the generated prompt includes those rules and the AI response reflects them in the preview.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** a user-provided naming policy, **When** the AI prompt is generated, **Then** it explicitly lists casing, prefix, and separator requirements.
|
||||
2. **Given** the AI response violates the declared casing rule, **When** the CLI validates the response, **Then** the run aborts with a descriptive error explaining which filenames failed the policy check.
|
||||
|
||||
---
|
||||
|
||||
### User Story 3 - Review, Edit, and Apply Safely (Priority: P3)
|
||||
|
||||
As a cautious operator, I want to review the AI plan, make manual adjustments if needed, and only apply changes once I am satisfied that collisions, restricted terms, and missing numbers are resolved.
|
||||
|
||||
**Why this priority**: Safety controls maintain confidence in AI-driven workflows and reduce post-apply cleanup.
|
||||
|
||||
**Independent Test**: After generating an AI plan, edit a subset of proposed names, re-run validation, and ensure the final apply step records the batch with undo support and flags any remaining conflicts.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** an AI plan with two conflicting targets, **When** the operator attempts to apply without resolving them, **Then** the CLI blocks execution and enumerates the conflicting entries.
|
||||
2. **Given** the operator edits AI output to change a sequence token, **When** the plan is revalidated, **Then** the tool reorders numbers and confirms the ledger entry captures the final applied mapping.
|
||||
|
||||
---
|
||||
|
||||
### Edge Cases
|
||||
|
||||
- AI response omits some files from the original list or introduces unfamiliar entries.
|
||||
- Returned filenames exceed allowed length or include forbidden characters for the host OS.
|
||||
- AI-generated numbers skip or duplicate sequence values.
|
||||
- The AI service is unavailable, times out, or returns malformed JSON.
|
||||
- Users request rules that contradict each other (e.g., camelCase and kebab-case simultaneously).
|
||||
- Sanitization removes all meaningful characters, resulting in empty or duplicate stems.
|
||||
|
||||
## Requirements *(mandatory)*
|
||||
|
||||
### Functional Requirements
|
||||
|
||||
- **FR-001**: The CLI MUST collect the active scope (paths, filters, counts) and compose an AI prompt that includes representative filenames plus the required renaming rules (sequence numbering, uniform formatting, spam removal, additional heuristics).
|
||||
- **FR-002**: The prompt MUST instruct the AI to respond in a documented, parseable structure (e.g., JSON with original and proposed names) and to preserve file extensions; the CLI MUST default to an OpenAI-compatible model (override via flag/env).
|
||||
- **FR-003**: The system MUST validate the AI response, ensuring every scoped file has a proposed rename, sequence numbers are continuous, names are unique, and disallowed content is removed before previewing changes.
|
||||
- **FR-004**: Users MUST be able to supply optional naming policy inputs (project tag, casing preference, separator choice, forbidden words) that the prompt reflects and the validator enforces.
|
||||
- **FR-005**: The preview MUST display AI-proposed names with sequence numbers, highlight sanitized segments, and surface any entries needing manual edits before the apply step is allowed.
|
||||
- **FR-006**: The CLI MUST allow users to edit or regenerate portions of the AI plan, re-run validation, and only enable apply once all issues are resolved.
|
||||
- **FR-007**: Apply MUST record the final mappings and the AI prompt/response metadata in the `.renamer` ledger so undo can restore original names and provide auditability.
|
||||
- **FR-008**: The workflow MUST handle AI communication failures gracefully by surfacing clear errors and leaving existing files untouched.
|
||||
- **FR-009**: The system MUST prevent AI output from introducing prohibited terms, promotional phrases, or user-defined banned tokens into the resulting filenames.
|
||||
- **FR-010**: The CLI MUST support dry-run mode for AI interactions, allowing prompt/response review without executing filesystem changes.
|
||||
|
||||
### Key Entities *(include if feature involves data)*
|
||||
|
||||
- **AiRenamePrompt**: Captures scope summary, sample filenames, mandatory rules, optional user policies, and guardrails sent to the AI.
|
||||
- **AiRenameResponse**: Structured data returned by the AI containing proposed filenames, rationale, and any warnings or unresolved cases.
|
||||
- **AiRenamePlan**: Aggregated representation of validated rename operations, including sequence ordering, sanitization notes, and conflict markers used for preview/apply.
|
||||
- **AiRenameLedgerMetadata**: Audit payload storing prompt hash, response hash, applied policy parameters, and timestamp for undo traceability.
|
||||
|
||||
### Assumptions
|
||||
|
||||
- Users provide access to an AI endpoint capable of following structured prompts and returning JSON within size limits; default secret tokens (`*_MODEL_AUTH_TOKEN`) are stored under `$HOME/.config/.renamer/`.
|
||||
- File extensions must remain unchanged; only stems are rewritten.
|
||||
- Default numbering uses three-digit, zero-padded prefixes unless the user specifies a different width or format.
|
||||
- The CLI environment already authenticates outbound AI requests; this feature focuses on prompt content and result handling.
|
||||
- Promotional or spam phrases are identified via a maintainable stop-word list augmented by user-provided banned terms.
|
||||
|
||||
## Success Criteria *(mandatory)*
|
||||
|
||||
### Measurable Outcomes
|
||||
|
||||
- **SC-001**: 95% of AI-generated rename plans pass validation without requiring manual edits on the first attempt for test batches of up to 1,000 files.
|
||||
- **SC-002**: Operators can review AI preview results and apply approved renames within 5 minutes for a 200-file batch, including validation and optional edits.
|
||||
- **SC-003**: 100% of applied AI-driven rename batches produce ledger entries with complete prompt/response metadata, enabling successful undo in under 60 seconds.
|
||||
- **SC-004**: User satisfaction surveys report at least 85% agreement that AI-assisted renaming produced clearer, uniform filenames compared to prior manual methods within one release cycle.
|
||||
125
specs/008-ai-rename-prompt/tasks.md
Normal file
125
specs/008-ai-rename-prompt/tasks.md
Normal file
@@ -0,0 +1,125 @@
|
||||
# Tasks: AI-Assisted Rename Prompting
|
||||
|
||||
**Input**: Design documents from `/specs/008-ai-rename-prompt/`
|
||||
**Prerequisites**: plan.md, spec.md, research.md, data-model.md, contracts/
|
||||
|
||||
## Phase 1: Setup (Shared Infrastructure)
|
||||
|
||||
**Purpose**: Establish tooling and scaffolding for the embedded Go Genkit workflow.
|
||||
|
||||
- [x] T001 Pin Google Genkit Go SDK dependency and run tidy in `go.mod`
|
||||
- [x] T002 Scaffold `cmd/ai.go` command file with Cobra boilerplate
|
||||
- [x] T003 Create `internal/ai` package directories (`prompt`, `genkit`, `plan`) in the repository
|
||||
- [x] T004 Document model token location in `docs/cli-flags.md`
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Foundational (Blocking Prerequisites)
|
||||
|
||||
**Purpose**: Core plumbing that all user stories rely on.
|
||||
|
||||
- [x] T005 Implement env loader for `$HOME/.config/.renamer` in `internal/ai/config/token_store.go` with package `github.com/joho/godotenv`
|
||||
- [x] T006 Define shared prompt/response structs per spec in `internal/ai/prompt/types.go`
|
||||
- [x] T007 Implement Go Genkit workflow skeleton with default OpenAI-compatible model in `internal/ai/genkit/workflow.go`
|
||||
- [x] T008 Build response validator ensuring coverage/uniqueness in `internal/ai/plan/validator.go`
|
||||
- [x] T009 Add CLI flag parsing for AI options (model override, debug export) in `cmd/ai.go`
|
||||
- [x] T010 Wire ledger metadata schema for AI batches in `internal/history/history.go`
|
||||
|
||||
**Checkpoint**: Genkit workflow callable from CLI with validation scaffolding ready.
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: User Story 1 - Generate AI Rename Plan (Priority: P1) 🎯 MVP
|
||||
|
||||
**Goal**: Produce a previewable AI-generated rename plan with sequential, sanitized filenames.
|
||||
|
||||
**Independent Test**: `go run . ai --path <fixtures>` prints numbered preview (`001_...`) without spam terms and logs prompt hash.
|
||||
|
||||
### Tests for User Story 1
|
||||
|
||||
- [x] T011 [P] [US1] Add contract test covering Genkit prompt/response schema in `tests/contract/ai_prompt_contract_test.go`
|
||||
- [x] T012 [P] [US1] Add integration test for preview output on sample batch in `tests/integration/ai_preview_flow_test.go`
|
||||
|
||||
### Implementation for User Story 1
|
||||
|
||||
- [x] T013 [US1] Assemble prompt builder using traversal samples in `internal/ai/prompt/builder.go`
|
||||
- [x] T014 [US1] Execute Genkit workflow and capture telemetry in `internal/ai/genkit/client.go`
|
||||
- [x] T015 [US1] Map Genkit response to preview plan entries in `internal/ai/plan/mapper.go`
|
||||
- [x] T016 [US1] Render preview table with sequence + sanitization notes in `internal/output/table.go`
|
||||
- [x] T017 [US1] Log prompt hash and response warnings to debug output in `internal/output/plain.go`
|
||||
|
||||
**Checkpoint**: `renamer ai --dry-run` fully functional for default policies.
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: User Story 2 - Enforce Naming Standards (Priority: P2)
|
||||
|
||||
**Goal**: Allow operators to specify naming policies that the AI prompt and validator enforce.
|
||||
|
||||
**Independent Test**: `renamer ai --naming-casing kebab --prefix proj --dry-run` produces kebab-case names with `proj` prefix and fails invalid responses.
|
||||
|
||||
### Tests for User Story 2
|
||||
|
||||
- [x] T018 [P] [US2] Contract test ensuring casing/prefix rules reach Genkit input in `tests/contract/ai_policy_contract_test.go`
|
||||
- [x] T019 [P] [US2] Integration test covering policy violations in `tests/integration/ai_policy_validation_test.go`
|
||||
|
||||
### Implementation for User Story 2
|
||||
|
||||
- [x] T020 [US2] Extend CLI flags/environment parsing for naming policies in `cmd/ai.go`
|
||||
- [x] T021 [US2] Inject policy directives into prompt payload in `internal/ai/prompt/builder.go`
|
||||
- [x] T022 [US2] Enhance validator to enforce casing/prefix/banned tokens in `internal/ai/plan/validator.go`
|
||||
- [x] T023 [US2] Surface policy failures with actionable messages in `internal/output/plain.go`
|
||||
|
||||
**Checkpoint**: Policy-driven prompts and enforcement operational.
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: User Story 3 - Review, Edit, and Apply Safely (Priority: P3)
|
||||
|
||||
**Goal**: Support manual edits, conflict resolution, and safe apply/undo flows.
|
||||
|
||||
**Independent Test**: Modify exported plan, revalidate, then `renamer ai --yes` applies changes and ledger records AI metadata; undo restores originals.
|
||||
|
||||
### Tests for User Story 3
|
||||
|
||||
- [x] T024 [P] [US3] Integration test covering manual edits + apply/undo in `tests/integration/ai_apply_undo_test.go`
|
||||
- [x] T025 [P] [US3] Contract test ensuring ledger metadata captures prompt/response hashes in `tests/contract/ai_ledger_contract_test.go`
|
||||
|
||||
### Implementation for User Story 3
|
||||
|
||||
- [x] T026 [US3] Implement plan editing/export/import helpers in `internal/ai/plan/editor.go`
|
||||
- [x] T027 [US3] Revalidation workflow for edited plans in `internal/ai/plan/validator.go`
|
||||
- [x] T028 [US3] Conflict detection (duplicate targets, missing sequences) surfaced in preview in `internal/ai/plan/conflicts.go`
|
||||
- [x] T029 [US3] Apply pipeline recording AI metadata to ledger in `internal/ai/plan/apply.go`
|
||||
- [x] T030 [US3] Update undo path to respect AI metadata in `cmd/undo.go`
|
||||
|
||||
**Checkpoint**: Full review/edit/apply loop complete with undo safety.
|
||||
|
||||
---
|
||||
|
||||
## Phase 6: Polish & Cross-Cutting Concerns
|
||||
|
||||
- [x] T031 [P] Add CLI help and usage examples for `renamer ai` in `cmd/root.go`
|
||||
- [x] T032 [P] Update end-user documentation in `docs/cli-flags.md`
|
||||
- [x] T033 [P] Add smoke script exercising AI flow in `scripts/smoke-test-ai.sh`
|
||||
- [x] T034 [P] Record prompt/response telemetry opt-in in `docs/CHANGELOG.md`
|
||||
|
||||
---
|
||||
|
||||
## Dependencies
|
||||
|
||||
1. Setup → Foundational → US1 → US2 → US3 → Polish
|
||||
2. User story dependencies: US2 depends on US1; US3 depends on US1 and US2.
|
||||
|
||||
## Parallel Execution Examples
|
||||
|
||||
- During US1, prompt builder (T013) and Genkit client (T014) can proceed in parallel after foundational tasks.
|
||||
- US2 policy contract test (T018) can run alongside validator enhancements (T022) once prompt builder updates (T021) start.
|
||||
- In US3, ledger integration (T029) can progress concurrently with conflict detection (T028).
|
||||
- Polish tasks (T031–T034) may run in parallel after US3 completes.
|
||||
|
||||
## Implementation Strategy
|
||||
|
||||
1. Deliver MVP by completing Phases 1–3 (US1) to provide AI-generated preview with validation.
|
||||
2. Layer policy enforcement (US2) to align output with organizational naming standards.
|
||||
3. Finish with editing/apply safety (US3) and polish tasks before release.
|
||||
108
tests/contract/ai_ledger_contract_test.go
Normal file
108
tests/contract/ai_ledger_contract_test.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package contract
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/rogeecn/renamer/internal/ai/plan"
|
||||
"github.com/rogeecn/renamer/internal/ai/prompt"
|
||||
)
|
||||
|
||||
func TestAIApplyLedgerMetadataContract(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
writeFile(t, filepath.Join(root, "sample.txt"))
|
||||
|
||||
candidate := plan.Candidate{
|
||||
OriginalPath: "sample.txt",
|
||||
SizeBytes: 4,
|
||||
Depth: 0,
|
||||
}
|
||||
|
||||
response := prompt.RenameResponse{
|
||||
Items: []prompt.RenameItem{
|
||||
{
|
||||
Original: "sample.txt",
|
||||
Proposed: "001_sample-final.txt",
|
||||
Sequence: 1,
|
||||
},
|
||||
},
|
||||
Model: "test-model",
|
||||
PromptHash: "prompt-hash-123",
|
||||
}
|
||||
|
||||
policy := prompt.NamingPolicyConfig{Prefix: "proj", Casing: "kebab"}
|
||||
|
||||
entry, err := plan.Apply(context.Background(), plan.ApplyOptions{
|
||||
WorkingDir: root,
|
||||
Candidates: []plan.Candidate{candidate},
|
||||
Response: response,
|
||||
Policies: policy,
|
||||
PromptHash: response.PromptHash,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("apply: %v", err)
|
||||
}
|
||||
|
||||
if len(entry.Operations) != 1 {
|
||||
t.Fatalf("expected 1 operation, got %d", len(entry.Operations))
|
||||
}
|
||||
|
||||
planFile := filepath.Join(root, "001_sample-final.txt")
|
||||
if _, err := os.Stat(planFile); err != nil {
|
||||
t.Fatalf("expected renamed file: %v", err)
|
||||
}
|
||||
|
||||
ledgerPath := filepath.Join(root, ".renamer")
|
||||
file, err := os.Open(ledgerPath)
|
||||
if err != nil {
|
||||
t.Fatalf("open ledger: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
var lastLine string
|
||||
for scanner.Scan() {
|
||||
lastLine = scanner.Text()
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
t.Fatalf("scan ledger: %v", err)
|
||||
}
|
||||
|
||||
var recorded map[string]any
|
||||
if err := json.Unmarshal([]byte(lastLine), &recorded); err != nil {
|
||||
t.Fatalf("unmarshal ledger entry: %v", err)
|
||||
}
|
||||
|
||||
metaRaw, ok := recorded["metadata"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected metadata in ledger entry")
|
||||
}
|
||||
aiRaw, ok := metaRaw["ai"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected ai metadata in ledger entry")
|
||||
}
|
||||
|
||||
if aiRaw["model"] != "test-model" {
|
||||
t.Fatalf("expected model test-model, got %v", aiRaw["model"])
|
||||
}
|
||||
if aiRaw["promptHash"] != "prompt-hash-123" {
|
||||
t.Fatalf("expected prompt hash recorded, got %v", aiRaw["promptHash"])
|
||||
}
|
||||
if aiRaw["batchSize"].(float64) != 1 {
|
||||
t.Fatalf("expected batch size 1, got %v", aiRaw["batchSize"])
|
||||
}
|
||||
}
|
||||
|
||||
func writeFile(t *testing.T, path string) {
|
||||
t.Helper()
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
||||
t.Fatalf("mkdir %s: %v", path, err)
|
||||
}
|
||||
if err := os.WriteFile(path, []byte("data"), 0o644); err != nil {
|
||||
t.Fatalf("write file %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
116
tests/contract/ai_policy_contract_test.go
Normal file
116
tests/contract/ai_policy_contract_test.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package contract
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
renamercmd "github.com/rogeecn/renamer/cmd"
|
||||
"github.com/rogeecn/renamer/internal/ai/genkit"
|
||||
"github.com/rogeecn/renamer/internal/ai/prompt"
|
||||
)
|
||||
|
||||
type captureWorkflow struct {
|
||||
request genkit.Request
|
||||
}
|
||||
|
||||
func (c *captureWorkflow) Run(ctx context.Context, req genkit.Request) (genkit.Result, error) {
|
||||
c.request = req
|
||||
return genkit.Result{
|
||||
Response: prompt.RenameResponse{
|
||||
Items: []prompt.RenameItem{
|
||||
{
|
||||
Original: "alpha.txt",
|
||||
Proposed: "proj_001_sample_file.txt",
|
||||
Sequence: 1,
|
||||
},
|
||||
},
|
||||
Model: "test-model",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestAICommandAppliesNamingPoliciesToPrompt(t *testing.T) {
|
||||
genkit.ResetWorkflowFactory()
|
||||
stub := &captureWorkflow{}
|
||||
genkit.OverrideWorkflowFactory(func(ctx context.Context, opts genkit.Options) (genkit.WorkflowRunner, error) {
|
||||
return stub, nil
|
||||
})
|
||||
t.Cleanup(genkit.ResetWorkflowFactory)
|
||||
|
||||
rootDir := t.TempDir()
|
||||
createPolicyTestFile(t, filepath.Join(rootDir, "alpha.txt"))
|
||||
|
||||
rootCmd := renamercmd.NewRootCommand()
|
||||
var stdout, stderr bytes.Buffer
|
||||
rootCmd.SetOut(&stdout)
|
||||
rootCmd.SetErr(&stderr)
|
||||
rootCmd.SetArgs([]string{
|
||||
"ai",
|
||||
"--path", rootDir,
|
||||
"--dry-run",
|
||||
"--naming-casing", "snake",
|
||||
"--naming-prefix", "proj",
|
||||
"--naming-allow-spaces",
|
||||
"--naming-keep-order",
|
||||
"--banned", "alpha",
|
||||
})
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
if stdout.Len() > 0 {
|
||||
t.Logf("stdout: %s", stdout.String())
|
||||
}
|
||||
if stderr.Len() > 0 {
|
||||
t.Logf("stderr: %s", stderr.String())
|
||||
}
|
||||
t.Fatalf("command execute: %v", err)
|
||||
}
|
||||
|
||||
req := stub.request
|
||||
policies := req.Payload.Policies
|
||||
if policies.Prefix != "proj" {
|
||||
t.Fatalf("expected prefix proj, got %q", policies.Prefix)
|
||||
}
|
||||
if policies.Casing != "snake" {
|
||||
t.Fatalf("expected casing snake, got %q", policies.Casing)
|
||||
}
|
||||
if !policies.AllowSpaces {
|
||||
t.Fatalf("expected allow spaces flag to propagate")
|
||||
}
|
||||
if !policies.KeepOriginalOrder {
|
||||
t.Fatalf("expected keep original order flag to propagate")
|
||||
}
|
||||
if len(policies.ForbiddenTokens) != 1 || policies.ForbiddenTokens[0] != "alpha" {
|
||||
t.Fatalf("expected forbidden tokens to capture user list, got %#v", policies.ForbiddenTokens)
|
||||
}
|
||||
|
||||
banned := req.Payload.BannedTerms
|
||||
containsDefault := false
|
||||
containsUser := false
|
||||
for _, term := range banned {
|
||||
switch term {
|
||||
case "alpha":
|
||||
containsUser = true
|
||||
case "clickbait":
|
||||
containsDefault = true
|
||||
}
|
||||
}
|
||||
if !containsUser {
|
||||
t.Fatalf("expected banned terms to include user-provided token")
|
||||
}
|
||||
if !containsDefault {
|
||||
t.Fatalf("expected banned terms to retain default tokens")
|
||||
}
|
||||
}
|
||||
|
||||
func createPolicyTestFile(t *testing.T, path string) {
|
||||
t.Helper()
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
||||
t.Fatalf("mkdir %s: %v", path, err)
|
||||
}
|
||||
if err := os.WriteFile(path, []byte("demo"), 0o644); err != nil {
|
||||
t.Fatalf("write file %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
126
tests/contract/ai_prompt_contract_test.go
Normal file
126
tests/contract/ai_prompt_contract_test.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package contract
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
aiprompt "github.com/rogeecn/renamer/internal/ai/prompt"
|
||||
)
|
||||
|
||||
func TestRenamePromptSchemaAlignment(t *testing.T) {
|
||||
builder := aiprompt.NewBuilder(
|
||||
aiprompt.WithClock(func() time.Time {
|
||||
return time.Date(2025, 11, 3, 15, 4, 5, 0, time.UTC)
|
||||
}),
|
||||
aiprompt.WithMaxSamples(2),
|
||||
)
|
||||
|
||||
input := aiprompt.BuildInput{
|
||||
WorkingDir: "/tmp/workspace",
|
||||
TotalCount: 3,
|
||||
Sequence: aiprompt.SequenceRule{
|
||||
Style: "prefix",
|
||||
Width: 3,
|
||||
Start: 1,
|
||||
Separator: "_",
|
||||
},
|
||||
Policies: aiprompt.PolicyConfig{
|
||||
Casing: "kebab",
|
||||
},
|
||||
BannedTerms: []string{"Promo", " ", "promo", "ads"},
|
||||
Samples: []aiprompt.SampleCandidate{
|
||||
{
|
||||
RelativePath: "promo SALE 01.JPG",
|
||||
SizeBytes: 2048,
|
||||
Depth: 0,
|
||||
},
|
||||
{
|
||||
RelativePath: filepath.ToSlash(filepath.Join("nested", "Report FINAL.PDF")),
|
||||
SizeBytes: 1024,
|
||||
Depth: 1,
|
||||
},
|
||||
{
|
||||
RelativePath: "notes.txt",
|
||||
SizeBytes: 128,
|
||||
Depth: 0,
|
||||
},
|
||||
},
|
||||
Metadata: map[string]string{
|
||||
"cliVersion": "test-build",
|
||||
},
|
||||
}
|
||||
|
||||
promptPayload, err := builder.Build(input)
|
||||
if err != nil {
|
||||
t.Fatalf("Build error: %v", err)
|
||||
}
|
||||
|
||||
if promptPayload.WorkingDir != input.WorkingDir {
|
||||
t.Fatalf("expected working dir %q, got %q", input.WorkingDir, promptPayload.WorkingDir)
|
||||
}
|
||||
|
||||
if promptPayload.TotalCount != input.TotalCount {
|
||||
t.Fatalf("expected total count %d, got %d", input.TotalCount, promptPayload.TotalCount)
|
||||
}
|
||||
|
||||
if len(promptPayload.Samples) != 2 {
|
||||
t.Fatalf("expected 2 samples after max cap, got %d", len(promptPayload.Samples))
|
||||
}
|
||||
|
||||
first := promptPayload.Samples[0]
|
||||
if first.OriginalName != "nested/Report FINAL.PDF" {
|
||||
t.Fatalf("unexpected first sample name: %q", first.OriginalName)
|
||||
}
|
||||
if first.Extension != ".PDF" {
|
||||
t.Fatalf("expected extension to remain case-sensitive, got %q", first.Extension)
|
||||
}
|
||||
if first.SizeBytes != 1024 {
|
||||
t.Fatalf("expected size 1024, got %d", first.SizeBytes)
|
||||
}
|
||||
if first.PathDepth != 1 {
|
||||
t.Fatalf("expected depth 1, got %d", first.PathDepth)
|
||||
}
|
||||
|
||||
seq := promptPayload.SequenceRule
|
||||
if seq.Style != "prefix" || seq.Width != 3 || seq.Start != 1 || seq.Separator != "_" {
|
||||
t.Fatalf("sequence rule mismatch: %#v", seq)
|
||||
}
|
||||
|
||||
if promptPayload.Policies.Casing != "kebab" {
|
||||
t.Fatalf("expected casing kebab, got %q", promptPayload.Policies.Casing)
|
||||
}
|
||||
|
||||
expectedTerms := []string{"ads", "promo"}
|
||||
if len(promptPayload.BannedTerms) != len(expectedTerms) {
|
||||
t.Fatalf("expected %d banned terms, got %d", len(expectedTerms), len(promptPayload.BannedTerms))
|
||||
}
|
||||
for i, term := range expectedTerms {
|
||||
if promptPayload.BannedTerms[i] != term {
|
||||
t.Fatalf("banned term at %d mismatch: expected %q got %q", i, term, promptPayload.BannedTerms[i])
|
||||
}
|
||||
}
|
||||
|
||||
if promptPayload.Metadata["cliVersion"] != "test-build" {
|
||||
t.Fatalf("metadata cliVersion mismatch: %s", promptPayload.Metadata["cliVersion"])
|
||||
}
|
||||
if promptPayload.Metadata["generatedAt"] != "2025-11-03T15:04:05Z" {
|
||||
t.Fatalf("expected generatedAt timestamp preserved, got %q", promptPayload.Metadata["generatedAt"])
|
||||
}
|
||||
|
||||
raw, err := json.Marshal(promptPayload)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal error: %v", err)
|
||||
}
|
||||
var decoded map[string]any
|
||||
if err := json.Unmarshal(raw, &decoded); err != nil {
|
||||
t.Fatalf("unmarshal round-trip error: %v", err)
|
||||
}
|
||||
|
||||
for _, key := range []string{"workingDir", "samples", "totalCount", "sequenceRule", "policies"} {
|
||||
if _, ok := decoded[key]; !ok {
|
||||
t.Fatalf("prompt JSON missing key %q", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
198
tests/integration/ai_apply_undo_test.go
Normal file
198
tests/integration/ai_apply_undo_test.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
renamercmd "github.com/rogeecn/renamer/cmd"
|
||||
"github.com/rogeecn/renamer/internal/ai/genkit"
|
||||
"github.com/rogeecn/renamer/internal/ai/plan"
|
||||
"github.com/rogeecn/renamer/internal/ai/prompt"
|
||||
"github.com/rogeecn/renamer/internal/listing"
|
||||
)
|
||||
|
||||
func TestAIApplyAndUndoFlow(t *testing.T) {
|
||||
initialWorkflow := stubWorkflow{
|
||||
response: prompt.RenameResponse{
|
||||
Items: []prompt.RenameItem{
|
||||
{
|
||||
Original: "draft_one.txt",
|
||||
Proposed: "001_initial.txt",
|
||||
Sequence: 1,
|
||||
},
|
||||
{
|
||||
Original: "draft_two.txt",
|
||||
Proposed: "002_initial.txt",
|
||||
Sequence: 2,
|
||||
},
|
||||
},
|
||||
Model: "test-model",
|
||||
},
|
||||
}
|
||||
|
||||
genkit.OverrideWorkflowFactory(func(ctx context.Context, opts genkit.Options) (genkit.WorkflowRunner, error) {
|
||||
return initialWorkflow, nil
|
||||
})
|
||||
t.Cleanup(genkit.ResetWorkflowFactory)
|
||||
|
||||
root := t.TempDir()
|
||||
writeFile(t, filepath.Join(root, "draft_one.txt"))
|
||||
writeFile(t, filepath.Join(root, "draft_two.txt"))
|
||||
|
||||
planPath := filepath.Join(root, "ai-plan.json")
|
||||
|
||||
preview := renamercmd.NewRootCommand()
|
||||
var previewOut, previewErr bytes.Buffer
|
||||
preview.SetOut(&previewOut)
|
||||
preview.SetErr(&previewErr)
|
||||
preview.SetArgs([]string{
|
||||
"ai",
|
||||
"--path", root,
|
||||
"--dry-run",
|
||||
"--export-plan", planPath,
|
||||
})
|
||||
|
||||
if err := preview.Execute(); err != nil {
|
||||
if previewOut.Len() > 0 {
|
||||
t.Logf("preview stdout: %s", previewOut.String())
|
||||
}
|
||||
if previewErr.Len() > 0 {
|
||||
t.Logf("preview stderr: %s", previewErr.String())
|
||||
}
|
||||
t.Fatalf("initial preview: %v", err)
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(planPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read plan: %v", err)
|
||||
}
|
||||
var exported prompt.RenameResponse
|
||||
if err := json.Unmarshal(data, &exported); err != nil {
|
||||
t.Fatalf("unmarshal plan: %v", err)
|
||||
}
|
||||
|
||||
if len(exported.Items) != 2 {
|
||||
t.Fatalf("expected two plan items, got %d", len(exported.Items))
|
||||
}
|
||||
// Simulate operator edit.
|
||||
exported.Items[0].Proposed = "001_final-one.txt"
|
||||
exported.Items[1].Proposed = "002_final-two.txt"
|
||||
exported.Items[0].Notes = "custom edit"
|
||||
|
||||
modified, err := json.MarshalIndent(exported, "", " ")
|
||||
if err != nil {
|
||||
t.Fatalf("marshal modified plan: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(planPath, append(modified, '\n'), 0o644); err != nil {
|
||||
t.Fatalf("write modified plan: %v", err)
|
||||
}
|
||||
|
||||
req := &listing.ListingRequest{WorkingDir: root}
|
||||
if err := req.Validate(); err != nil {
|
||||
t.Fatalf("validate listing request: %v", err)
|
||||
}
|
||||
currentCandidates, err := plan.CollectCandidates(context.Background(), req)
|
||||
if err != nil {
|
||||
t.Fatalf("collect candidates: %v", err)
|
||||
}
|
||||
filtered := make([]plan.Candidate, 0, len(currentCandidates))
|
||||
for _, cand := range currentCandidates {
|
||||
if strings.EqualFold(cand.OriginalPath, filepath.Base(planPath)) {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, cand)
|
||||
}
|
||||
originals := make([]string, 0, len(filtered))
|
||||
for _, cand := range filtered {
|
||||
originals = append(originals, cand.OriginalPath)
|
||||
}
|
||||
validator := plan.NewValidator(originals, prompt.NamingPolicyConfig{Casing: "kebab"}, nil)
|
||||
if _, err := validator.Validate(exported); err != nil {
|
||||
t.Fatalf("pre-validation of edited plan: %v", err)
|
||||
}
|
||||
|
||||
previewEdited := renamercmd.NewRootCommand()
|
||||
var editedOut, editedErr bytes.Buffer
|
||||
previewEdited.SetOut(&editedOut)
|
||||
previewEdited.SetErr(&editedErr)
|
||||
previewEdited.SetArgs([]string{
|
||||
"ai",
|
||||
"--path", root,
|
||||
"--dry-run",
|
||||
"--import-plan", planPath,
|
||||
})
|
||||
|
||||
if err := previewEdited.Execute(); err != nil {
|
||||
if editedOut.Len() > 0 {
|
||||
t.Logf("edited stdout: %s", editedOut.String())
|
||||
}
|
||||
if editedErr.Len() > 0 {
|
||||
t.Logf("edited stderr: %s", editedErr.String())
|
||||
}
|
||||
t.Fatalf("preview edited plan: %v", err)
|
||||
}
|
||||
|
||||
if !strings.Contains(editedOut.String(), "001_final-one.txt") {
|
||||
t.Fatalf("expected edited preview to show final name, got: %s", editedOut.String())
|
||||
}
|
||||
|
||||
applyCmd := renamercmd.NewRootCommand()
|
||||
var applyOut, applyErr bytes.Buffer
|
||||
applyCmd.SetOut(&applyOut)
|
||||
applyCmd.SetErr(&applyErr)
|
||||
applyCmd.SetArgs([]string{
|
||||
"ai",
|
||||
"--path", root,
|
||||
"--import-plan", planPath,
|
||||
"--yes",
|
||||
})
|
||||
|
||||
if err := applyCmd.Execute(); err != nil {
|
||||
if applyOut.Len() > 0 {
|
||||
t.Logf("apply stdout: %s", applyOut.String())
|
||||
}
|
||||
if applyErr.Len() > 0 {
|
||||
t.Logf("apply stderr: %s", applyErr.String())
|
||||
}
|
||||
t.Fatalf("apply plan: %v", err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(filepath.Join(root, "001_final-one.txt")); err != nil {
|
||||
t.Fatalf("expected renamed file: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(root, "002_final-two.txt")); err != nil {
|
||||
t.Fatalf("expected renamed file: %v", err)
|
||||
}
|
||||
|
||||
undo := renamercmd.NewRootCommand()
|
||||
var undoOut bytes.Buffer
|
||||
undo.SetOut(&undoOut)
|
||||
undo.SetErr(&undoOut)
|
||||
undo.SetArgs([]string{"undo", "--path", root})
|
||||
|
||||
if err := undo.Execute(); err != nil {
|
||||
t.Fatalf("undo command: %v", err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(filepath.Join(root, "draft_one.txt")); err != nil {
|
||||
t.Fatalf("expected original file after undo: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(root, "draft_two.txt")); err != nil {
|
||||
t.Fatalf("expected original file after undo: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func writeFile(t *testing.T, path string) {
|
||||
t.Helper()
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
||||
t.Fatalf("mkdir %s: %v", path, err)
|
||||
}
|
||||
if err := os.WriteFile(path, []byte("data"), 0o644); err != nil {
|
||||
t.Fatalf("write file %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
84
tests/integration/ai_policy_validation_test.go
Normal file
84
tests/integration/ai_policy_validation_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
renamercmd "github.com/rogeecn/renamer/cmd"
|
||||
"github.com/rogeecn/renamer/internal/ai/genkit"
|
||||
"github.com/rogeecn/renamer/internal/ai/prompt"
|
||||
)
|
||||
|
||||
type violatingWorkflow struct{}
|
||||
|
||||
func (violatingWorkflow) Run(ctx context.Context, req genkit.Request) (genkit.Result, error) {
|
||||
return genkit.Result{
|
||||
Response: prompt.RenameResponse{
|
||||
Items: []prompt.RenameItem{
|
||||
{
|
||||
Original: "video.mov",
|
||||
Proposed: "001_clickbait-offer.mov",
|
||||
Sequence: 1,
|
||||
},
|
||||
},
|
||||
Warnings: []string{"model returned promotional phrasing"},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestAIPolicyValidationFailsWithActionableMessage(t *testing.T) {
|
||||
genkit.OverrideWorkflowFactory(func(ctx context.Context, opts genkit.Options) (genkit.WorkflowRunner, error) {
|
||||
return violatingWorkflow{}, nil
|
||||
})
|
||||
t.Cleanup(genkit.ResetWorkflowFactory)
|
||||
|
||||
rootDir := t.TempDir()
|
||||
createAIPolicyFixture(t, filepath.Join(rootDir, "video.mov"))
|
||||
|
||||
rootCmd := renamercmd.NewRootCommand()
|
||||
var stdout, stderr bytes.Buffer
|
||||
rootCmd.SetOut(&stdout)
|
||||
rootCmd.SetErr(&stderr)
|
||||
rootCmd.SetArgs([]string{
|
||||
"ai",
|
||||
"--path", rootDir,
|
||||
"--dry-run",
|
||||
"--naming-casing", "kebab",
|
||||
"--naming-prefix", "proj",
|
||||
"--banned", "offer",
|
||||
})
|
||||
|
||||
err := rootCmd.Execute()
|
||||
if err == nil {
|
||||
t.Fatalf("expected policy violation error")
|
||||
}
|
||||
|
||||
lines := stderr.String()
|
||||
if !strings.Contains(lines, "Policy violation (prefix)") {
|
||||
t.Fatalf("expected prefix violation message in stderr, got: %s", lines)
|
||||
}
|
||||
if !strings.Contains(lines, "Policy violation (banned)") {
|
||||
t.Fatalf("expected banned token message in stderr, got: %s", lines)
|
||||
}
|
||||
if !strings.Contains(err.Error(), "policy violations") {
|
||||
t.Fatalf("expected error to mention policy violations, got: %v", err)
|
||||
}
|
||||
|
||||
if stdout.Len() != 0 {
|
||||
t.Logf("stdout: %s", stdout.String())
|
||||
}
|
||||
}
|
||||
|
||||
func createAIPolicyFixture(t *testing.T, path string) {
|
||||
t.Helper()
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
||||
t.Fatalf("mkdir %s: %v", path, err)
|
||||
}
|
||||
if err := os.WriteFile(path, []byte("demo"), 0o644); err != nil {
|
||||
t.Fatalf("write file %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
116
tests/integration/ai_preview_flow_test.go
Normal file
116
tests/integration/ai_preview_flow_test.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
renamercmd "github.com/rogeecn/renamer/cmd"
|
||||
"github.com/rogeecn/renamer/internal/ai/genkit"
|
||||
"github.com/rogeecn/renamer/internal/ai/prompt"
|
||||
)
|
||||
|
||||
type stubWorkflow struct {
|
||||
response prompt.RenameResponse
|
||||
}
|
||||
|
||||
func (s stubWorkflow) Run(ctx context.Context, req genkit.Request) (genkit.Result, error) {
|
||||
return genkit.Result{Response: s.response}, nil
|
||||
}
|
||||
|
||||
func TestAIPreviewFlowRendersSequenceTable(t *testing.T) {
|
||||
workflow := stubWorkflow{
|
||||
response: prompt.RenameResponse{
|
||||
Items: []prompt.RenameItem{
|
||||
{
|
||||
Original: "promo SALE 01.JPG",
|
||||
Proposed: "001_summer-session.jpg",
|
||||
Sequence: 1,
|
||||
Notes: "Removed promotional flair",
|
||||
},
|
||||
{
|
||||
Original: "family_photo.png",
|
||||
Proposed: "002_family-photo.png",
|
||||
Sequence: 2,
|
||||
Notes: "Normalized casing",
|
||||
},
|
||||
},
|
||||
Warnings: []string{"AI warning: trimmed banned tokens"},
|
||||
PromptHash: "",
|
||||
},
|
||||
}
|
||||
|
||||
genkit.OverrideWorkflowFactory(func(ctx context.Context, opts genkit.Options) (genkit.WorkflowRunner, error) {
|
||||
return workflow, nil
|
||||
})
|
||||
defer genkit.ResetWorkflowFactory()
|
||||
|
||||
root := t.TempDir()
|
||||
createAIPreviewFile(t, filepath.Join(root, "promo SALE 01.JPG"))
|
||||
createAIPreviewFile(t, filepath.Join(root, "family_photo.png"))
|
||||
|
||||
t.Setenv("default_MODEL_AUTH_TOKEN", "test-token")
|
||||
|
||||
rootCmd := renamercmd.NewRootCommand()
|
||||
var stdout, stderr bytes.Buffer
|
||||
rootCmd.SetOut(&stdout)
|
||||
rootCmd.SetErr(&stderr)
|
||||
exportPath := filepath.Join(root, "plan.json")
|
||||
rootCmd.SetArgs([]string{
|
||||
"ai",
|
||||
"--path", root,
|
||||
"--dry-run",
|
||||
"--debug-genkit",
|
||||
"--export-plan", exportPath,
|
||||
})
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
t.Fatalf("command execute: %v", err)
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(exportPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read exported plan: %v", err)
|
||||
}
|
||||
|
||||
var exported prompt.RenameResponse
|
||||
if err := json.Unmarshal(data, &exported); err != nil {
|
||||
t.Fatalf("unmarshal exported plan: %v", err)
|
||||
}
|
||||
if len(exported.Items) != len(workflow.response.Items) {
|
||||
t.Fatalf("expected exported items %d, got %d", len(workflow.response.Items), len(exported.Items))
|
||||
}
|
||||
|
||||
out := stdout.String()
|
||||
if !strings.Contains(out, "SEQ") || !strings.Contains(out, "ORIGINAL") || !strings.Contains(out, "SANITIZED") {
|
||||
t.Fatalf("expected table headers in output, got:\n%s", out)
|
||||
}
|
||||
if !strings.Contains(out, "001") || !strings.Contains(out, "promo SALE 01.JPG") || !strings.Contains(out, "001_summer-session.jpg") {
|
||||
t.Fatalf("expected first entry in output, got:\n%s", out)
|
||||
}
|
||||
if !strings.Contains(out, "removed: promo sale") {
|
||||
t.Fatalf("expected sanitization notes in output, got:\n%s", out)
|
||||
}
|
||||
|
||||
errOut := stderr.String()
|
||||
if !strings.Contains(errOut, "Prompt hash:") {
|
||||
t.Fatalf("expected prompt hash in debug output, got:\n%s", errOut)
|
||||
}
|
||||
if !strings.Contains(errOut, "AI warning: trimmed banned tokens") {
|
||||
t.Fatalf("expected warning surfaced in debug output, got:\n%s", errOut)
|
||||
}
|
||||
}
|
||||
|
||||
func createAIPreviewFile(t *testing.T, path string) {
|
||||
t.Helper()
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
||||
t.Fatalf("mkdir %s: %v", path, err)
|
||||
}
|
||||
if err := os.WriteFile(path, []byte("test"), 0o644); err != nil {
|
||||
t.Fatalf("write file %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
9
tools/genkit.go
Normal file
9
tools/genkit.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//go:build tools
|
||||
|
||||
package tools
|
||||
|
||||
// This file ensures Go modules keep the Genkit dependency pinned even before
|
||||
// runtime wiring lands.
|
||||
import (
|
||||
_ "github.com/firebase/genkit/go/genkit"
|
||||
)
|
||||
Reference in New Issue
Block a user