Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 12 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,13 +50,23 @@ curl -sSL https://raw.githubusercontent.com/yoanbernabeu/grepai/main/install.sh
irm https://raw.githubusercontent.com/yoanbernabeu/grepai/main/install.ps1 | iex
```

Requires an embedding provider — [Ollama](https://ollama.ai) (default), [LM Studio](https://lmstudio.ai), or OpenAI.
Requires an embedding provider — [Ollama](https://ollama.ai) (default), managed local `llama.cpp`, [LM Studio](https://lmstudio.ai), or OpenAI.

**Ollama (recommended):**
```bash
ollama pull nomic-embed-text
```

**Managed local `llama.cpp`:**
```bash
grepai init --provider llamacpp
grepai model install
grepai model use bge-small-en-v1.5-q8_0
```

If you already have managed local models installed, plain `grepai init` will ask which installed `llamacpp` model to use when you choose the `llamacpp` provider.
Managed `llama.cpp` runtime support is currently limited to macOS (`arm64`, `amd64`), Linux (`amd64`), and Windows (`amd64`).

## Quick Start

```bash
Expand All @@ -68,7 +78,7 @@ grepai trace callers "Login" # Find who calls a function

## Shell Completion

grepai supports autocompletion for commands, flags, and dynamic values (workspace names, project names, providers, backends).
grepai supports autocompletion for commands, flags, and dynamic values (workspace names, project names, providers, backends, and managed model ids for `llamacpp`).

**Zsh (add to `~/.zshrc`):**
```bash
Expand Down
77 changes: 73 additions & 4 deletions cli/completion.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package cli
import (
"github.com/spf13/cobra"
"github.com/yoanbernabeu/grepai/config"
"github.com/yoanbernabeu/grepai/internal/managedassets"
)

var completionCmd = &cobra.Command{
Expand Down Expand Up @@ -92,13 +93,17 @@ func init() {
func registerCompletions() {
// Static flag completions for initCmd
_ = initCmd.RegisterFlagCompletionFunc("provider", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{
completions := []string{
"ollama\tLocal embedding with Ollama",
"lmstudio\tLocal embedding with LM Studio",
"openai\tCloud embedding with OpenAI",
"synthetic\tCloud embedding with Synthetic (free)",
"openrouter\tCloud multi-provider gateway",
}, cobra.ShellCompDirectiveNoFileComp
}
if managedLlamaCPPSupported() {
completions = append(completions, "llamacpp\tManaged local embedding with llama.cpp")
}
return completions, cobra.ShellCompDirectiveNoFileComp
})
_ = initCmd.RegisterFlagCompletionFunc("backend", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{
Expand All @@ -107,6 +112,26 @@ func registerCompletions() {
"qdrant\tQdrant vector database",
}, cobra.ShellCompDirectiveNoFileComp
})
_ = initCmd.RegisterFlagCompletionFunc("model", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
provider, _ := cmd.Flags().GetString("provider")
switch provider {
case "llamacpp":
return completeAvailableManagedModels(), cobra.ShellCompDirectiveNoFileComp
case "openai":
return []string{
"text-embedding-3-small\tOpenAI small embedding model",
"text-embedding-3-large\tOpenAI large embedding model",
}, cobra.ShellCompDirectiveNoFileComp
case "openrouter":
return []string{
"openai/text-embedding-3-small\tOpenRouter small embedding model",
"openai/text-embedding-3-large\tOpenRouter large embedding model",
"qwen/qwen3-embedding-8b\tOpenRouter Qwen code-focused embedding model",
}, cobra.ShellCompDirectiveNoFileComp
default:
return nil, cobra.ShellCompDirectiveNoFileComp
}
})

// Static flag completions for workspaceCreateCmd
_ = workspaceCreateCmd.RegisterFlagCompletionFunc("backend", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
Expand All @@ -116,15 +141,38 @@ func registerCompletions() {
}, cobra.ShellCompDirectiveNoFileComp
})
_ = workspaceCreateCmd.RegisterFlagCompletionFunc("provider", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{
completions := []string{
"ollama\tLocal embedding with Ollama",
"lmstudio\tLocal embedding with LM Studio",
"openai\tCloud embedding with OpenAI",
"synthetic\tCloud embedding with Synthetic (free)",
"openrouter\tCloud multi-provider gateway",
}, cobra.ShellCompDirectiveNoFileComp
}
if managedLlamaCPPSupported() {
completions = append(completions, "llamacpp\tManaged local embedding with llama.cpp")
}
return completions, cobra.ShellCompDirectiveNoFileComp
})

modelUseCmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) == 0 {
return completeInstalledManagedModels(), cobra.ShellCompDirectiveNoFileComp
}
return nil, cobra.ShellCompDirectiveNoFileComp
}
modelRemoveCmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) == 0 {
return completeInstalledManagedModels(), cobra.ShellCompDirectiveNoFileComp
}
return nil, cobra.ShellCompDirectiveNoFileComp
}
modelInstallCmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) == 0 {
return completeAvailableManagedModels(), cobra.ShellCompDirectiveNoFileComp
}
return nil, cobra.ShellCompDirectiveNoFileComp
}

// Static flag completions for trace commands (mode)
for _, cmd := range []*cobra.Command{traceCallersCmd, traceCalleesCmd, traceGraphCmd} {
cmd := cmd
Expand Down Expand Up @@ -218,3 +266,24 @@ func completeProjectNames(workspaceName string) []string {
}
return names
}

func completeAvailableManagedModels() []string {
models := managedassets.ListAvailableModels()
completions := make([]string, 0, len(models))
for _, model := range models {
completions = append(completions, model.ID+"\t"+model.Display)
}
return completions
}

func completeInstalledManagedModels() []string {
models, err := managedassets.LoadInstalledModels()
if err != nil {
return nil
}
completions := make([]string, 0, len(models))
for _, model := range models {
completions = append(completions, model.ID+"\tinstalled managed model")
}
return completions
}
119 changes: 119 additions & 0 deletions cli/completion_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,17 @@ import (
"testing"

"github.com/yoanbernabeu/grepai/config"
"github.com/yoanbernabeu/grepai/internal/managedassets"
)

func TestCompletionZsh_should_output_compdef(t *testing.T) {
prevProvider := initProvider
prevModel := initModel
defer func() {
initProvider = prevProvider
initModel = prevModel
}()

var buf bytes.Buffer
rootCmd.SetOut(&buf)
rootCmd.SetArgs([]string{"completion", "zsh"})
Expand All @@ -30,6 +38,13 @@ func TestCompletionZsh_should_output_compdef(t *testing.T) {
}

func TestCompletionBash_should_output_valid_script(t *testing.T) {
prevProvider := initProvider
prevModel := initModel
defer func() {
initProvider = prevProvider
initModel = prevModel
}()

var buf bytes.Buffer
rootCmd.SetOut(&buf)
rootCmd.SetArgs([]string{"completion", "bash"})
Expand All @@ -45,6 +60,13 @@ func TestCompletionBash_should_output_valid_script(t *testing.T) {
}

func TestCompletionFish_should_output_valid_script(t *testing.T) {
prevProvider := initProvider
prevModel := initModel
defer func() {
initProvider = prevProvider
initModel = prevModel
}()

var buf bytes.Buffer
rootCmd.SetOut(&buf)
rootCmd.SetArgs([]string{"completion", "fish"})
Expand All @@ -60,6 +82,13 @@ func TestCompletionFish_should_output_valid_script(t *testing.T) {
}

func TestCompletionPowershell_should_output_valid_script(t *testing.T) {
prevProvider := initProvider
prevModel := initModel
defer func() {
initProvider = prevProvider
initModel = prevModel
}()

var buf bytes.Buffer
rootCmd.SetOut(&buf)
rootCmd.SetArgs([]string{"completion", "powershell"})
Expand Down Expand Up @@ -133,3 +162,93 @@ func TestCompleteProjectNames_should_return_project_names(t *testing.T) {
t.Fatalf("expected frontend and backend, got: %v", names)
}
}

func TestCompletionScriptIncludesLlamaCPPProvider(t *testing.T) {
prevProvider := initProvider
prevModel := initModel
defer func() {
initProvider = prevProvider
initModel = prevModel
}()

var buf bytes.Buffer
rootCmd.SetOut(&buf)
rootCmd.SetArgs([]string{"__complete", "init", "--provider", ""})
defer rootCmd.SetOut(nil)

if err := rootCmd.Execute(); err != nil {
t.Fatalf("provider completion failed: %v", err)
}

if !strings.Contains(buf.String(), "llamacpp") {
t.Fatalf("expected llamacpp in completion output, got: %s", buf.String())
}
}

func TestCompletionInitModelIncludesManagedModelsForLlamaCPP(t *testing.T) {
prevProvider := initProvider
prevModel := initModel
defer func() {
initProvider = prevProvider
initModel = prevModel
}()

var buf bytes.Buffer
rootCmd.SetOut(&buf)
rootCmd.SetArgs([]string{"__complete", "init", "--provider", "llamacpp", "--model", ""})
defer rootCmd.SetOut(nil)

if err := rootCmd.Execute(); err != nil {
t.Fatalf("model completion failed: %v", err)
}

output := buf.String()
if !strings.Contains(output, "bge-small-en-v1.5-q8_0") {
t.Fatalf("expected default managed model in completion output, got: %s", output)
}
if !strings.Contains(output, "nomic-embed-text-v1.5-q8_0") {
t.Fatalf("expected Nomic managed model in completion output, got: %s", output)
}
}

func TestCompletionModelUseIncludesInstalledModels(t *testing.T) {
prevProvider := initProvider
prevModel := initModel
defer func() {
initProvider = prevProvider
initModel = prevModel
}()

tmpDir := t.TempDir()
oldHome := os.Getenv("HOME")
_ = os.Setenv("HOME", tmpDir)
defer os.Setenv("HOME", oldHome)

modelDef, err := managedassets.LookupModel("nomic-embed-text-v1.5-q8_0")
if err != nil {
t.Fatalf("LookupModel failed: %v", err)
}
if err := managedassets.SaveInstalledModels([]managedassets.InstalledModel{{
ID: modelDef.ID,
FileName: modelDef.FileName,
Path: filepath.Join(tmpDir, modelDef.FileName),
SourceURL: modelDef.URL,
SizeBytes: modelDef.SizeBytes,
Dimensions: modelDef.Dimensions,
}}); err != nil {
t.Fatalf("SaveInstalledModels failed: %v", err)
}

var buf bytes.Buffer
rootCmd.SetOut(&buf)
rootCmd.SetArgs([]string{"__complete", "model", "use", ""})
defer rootCmd.SetOut(nil)

if err := rootCmd.Execute(); err != nil {
t.Fatalf("model use completion failed: %v", err)
}

if !strings.Contains(buf.String(), modelDef.ID) {
t.Fatalf("expected installed model in completion output, got: %s", buf.String())
}
}
Loading