Skip to content

Commit

Permalink
lsp: Update LSP linting to run incrementally after file change (Styra…
Browse files Browse the repository at this point in the history
…Inc#1146)

* lsp: Run aggregate-triggered lints incrementally

This makes a change to how workspace lint runs are run. Now, when a file
with aggregate violations is changed, a full workspace lint will still
run but only for the aggregate rules the file has. This will also be
done using cached intermediate aggregate data.

* WIP

* add a polling workspace ticker

* remove prints

* Increase completions timeout
  • Loading branch information
charlieegan3 authored Oct 3, 2024
1 parent 85b7be7 commit 67162e6
Show file tree
Hide file tree
Showing 26 changed files with 2,680 additions and 1,282 deletions.
2 changes: 1 addition & 1 deletion cmd/languageserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ func init() {
ErrorLog: os.Stderr,
}

ls := lsp.NewLanguageServer(opts)
ls := lsp.NewLanguageServer(ctx, opts)

conn := lsp.NewConnectionFromLanguageServer(ctx, ls.Handle, &lsp.ConnectionOptions{
LoggingConfig: lsp.ConnectionLoggingConfig{
Expand Down
4 changes: 2 additions & 2 deletions docs/rules/imports/prefer-package-imports.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ import rego.v1
# Rule imported directly
import data.users.first_names
has_waldo {
has_waldo if {
# Not obvious where "first_names" comes from
"Waldo" in first_names
}
Expand All @@ -30,7 +30,7 @@ import rego.v1
# Package imported rather than rule
import data.users
has_waldo {
has_waldo if {
# Obvious where "first_names" comes from
"Waldo" in users.first_names
}
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ require (
github.com/sourcegraph/jsonrpc2 v0.2.0
github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
)

Expand Down Expand Up @@ -89,6 +90,5 @@ require (
google.golang.org/genproto/googleapis/rpc v0.0.0-20240820151423-278611b39280 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)
144 changes: 94 additions & 50 deletions internal/lsp/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"github.com/open-policy-agent/opa/ast"

"github.com/styrainc/regal/internal/lsp/types"
"github.com/styrainc/regal/pkg/report"
)

// Cache is used to store: current file contents (which includes unsaved changes), the latest parsed modules, and
Expand All @@ -26,12 +27,15 @@ type Cache struct {
// modules is a map of file URI to parsed AST modules from the latest file contents value
modules map[string]*ast.Module

// aggregateData stores the aggregate data from evaluations for each file.
// This is used to cache the results of expensive evaluations and can be used
// to update aggregate diagostics incrementally.
aggregateData map[string][]report.Aggregate
aggregateDataMu sync.Mutex

// diagnosticsFile is a map of file URI to diagnostics for that file
diagnosticsFile map[string][]types.Diagnostic

// diagnosticsAggregate is a map of file URI to aggregate diagnostics for that file
diagnosticsAggregate map[string][]types.Diagnostic

// diagnosticsParseErrors is a map of file URI to parse errors for that file
diagnosticsParseErrors map[string][]types.Diagnostic

Expand All @@ -54,8 +58,6 @@ type Cache struct {

diagnosticsFileMu sync.Mutex

diagnosticsAggregateMu sync.Mutex

diagnosticsParseMu sync.Mutex

builtinPositionsMu sync.Mutex
Expand All @@ -72,8 +74,9 @@ func NewCache() *Cache {

modules: make(map[string]*ast.Module),

aggregateData: make(map[string][]report.Aggregate),

diagnosticsFile: make(map[string][]types.Diagnostic),
diagnosticsAggregate: make(map[string][]types.Diagnostic),
diagnosticsParseErrors: make(map[string][]types.Diagnostic),

builtinPositionsFile: make(map[string]map[uint][]types.BuiltinPosition),
Expand All @@ -83,27 +86,6 @@ func NewCache() *Cache {
}
}

func (c *Cache) GetAllDiagnosticsForURI(fileURI string) []types.Diagnostic {
parseDiags, ok := c.GetParseErrors(fileURI)
if ok && len(parseDiags) > 0 {
return parseDiags
}

allDiags := make([]types.Diagnostic, 0)

aggDiags, ok := c.GetAggregateDiagnostics(fileURI)
if ok {
allDiags = append(allDiags, aggDiags...)
}

fileDiags, ok := c.GetFileDiagnostics(fileURI)
if ok {
allDiags = append(allDiags, fileDiags...)
}

return allDiags
}

func (c *Cache) GetAllFiles() map[string]string {
c.fileContentsMu.Lock()
defer c.fileContentsMu.Unlock()
Expand Down Expand Up @@ -180,6 +162,69 @@ func (c *Cache) SetModule(fileURI string, module *ast.Module) {
c.modules[fileURI] = module
}

// SetFileAggregates will only set aggregate data for the provided URI. Even if
// data for other files is provided, only the specified URI is updated.
func (c *Cache) SetFileAggregates(fileURI string, data map[string][]report.Aggregate) {
c.aggregateDataMu.Lock()
defer c.aggregateDataMu.Unlock()

flattenedAggregates := make([]report.Aggregate, 0)

for _, aggregates := range data {
for _, aggregate := range aggregates {
if aggregate.SourceFile() != fileURI {
continue
}

flattenedAggregates = append(flattenedAggregates, aggregate)
}
}

c.aggregateData[fileURI] = flattenedAggregates
}

func (c *Cache) SetAggregates(data map[string][]report.Aggregate) {
c.aggregateDataMu.Lock()
defer c.aggregateDataMu.Unlock()

// clear the state
c.aggregateData = make(map[string][]report.Aggregate)

for _, aggregates := range data {
for _, aggregate := range aggregates {
c.aggregateData[aggregate.SourceFile()] = append(c.aggregateData[aggregate.SourceFile()], aggregate)
}
}
}

// GetFileAggregates is used to get aggregate data for a given list of files.
// This is only used in tests to validate the cache state.
func (c *Cache) GetFileAggregates(fileURIs ...string) map[string][]report.Aggregate {
c.aggregateDataMu.Lock()
defer c.aggregateDataMu.Unlock()

includedFiles := make(map[string]struct{}, len(fileURIs))
for _, fileURI := range fileURIs {
includedFiles[fileURI] = struct{}{}
}

getAll := len(fileURIs) == 0

allAggregates := make(map[string][]report.Aggregate)

for sourceFile, aggregates := range c.aggregateData {
if _, included := includedFiles[sourceFile]; !included && !getAll {
continue
}

for _, aggregate := range aggregates {
allAggregates[aggregate.IndexKey()] = append(allAggregates[aggregate.IndexKey()], aggregate)
}
}

return allAggregates
}

func (c *Cache) GetFileDiagnostics(uri string) ([]types.Diagnostic, bool) {
c.diagnosticsFileMu.Lock()
defer c.diagnosticsFileMu.Unlock()
Expand All @@ -196,34 +241,33 @@ func (c *Cache) SetFileDiagnostics(fileURI string, diags []types.Diagnostic) {
c.diagnosticsFile[fileURI] = diags
}

func (c *Cache) ClearFileDiagnostics() {
// SetFileDiagnosticsForRules will perform a partial update of the diagnostics
// for a file given a list of evaluated rules.
func (c *Cache) SetFileDiagnosticsForRules(fileURI string, rules []string, diags []types.Diagnostic) {
c.diagnosticsFileMu.Lock()
defer c.diagnosticsFileMu.Unlock()

c.diagnosticsFile = make(map[string][]types.Diagnostic)
}

func (c *Cache) GetAggregateDiagnostics(fileURI string) ([]types.Diagnostic, bool) {
c.diagnosticsAggregateMu.Lock()
defer c.diagnosticsAggregateMu.Unlock()

val, ok := c.diagnosticsAggregate[fileURI]
ruleKeys := make(map[string]struct{}, len(rules))
for _, rule := range rules {
ruleKeys[rule] = struct{}{}
}

return val, ok
}
preservedDiagnostics := make([]types.Diagnostic, 0)

func (c *Cache) SetAggregateDiagnostics(fileURI string, diags []types.Diagnostic) {
c.diagnosticsAggregateMu.Lock()
defer c.diagnosticsAggregateMu.Unlock()
for _, diag := range c.diagnosticsFile[fileURI] {
if _, ok := ruleKeys[diag.Code]; !ok {
preservedDiagnostics = append(preservedDiagnostics, diag)
}
}

c.diagnosticsAggregate[fileURI] = diags
c.diagnosticsFile[fileURI] = append(preservedDiagnostics, diags...)
}

func (c *Cache) ClearAggregateDiagnostics() {
c.diagnosticsAggregateMu.Lock()
defer c.diagnosticsAggregateMu.Unlock()
func (c *Cache) ClearFileDiagnostics() {
c.diagnosticsFileMu.Lock()
defer c.diagnosticsFileMu.Unlock()

c.diagnosticsAggregate = make(map[string][]types.Diagnostic)
c.diagnosticsFile = make(map[string][]types.Diagnostic)
}

func (c *Cache) GetParseErrors(uri string) ([]types.Diagnostic, bool) {
Expand Down Expand Up @@ -313,14 +357,14 @@ func (c *Cache) Delete(fileURI string) {
delete(c.modules, fileURI)
c.moduleMu.Unlock()

c.aggregateDataMu.Lock()
delete(c.aggregateData, fileURI)
c.aggregateDataMu.Unlock()

c.diagnosticsFileMu.Lock()
delete(c.diagnosticsFile, fileURI)
c.diagnosticsFileMu.Unlock()

c.diagnosticsAggregateMu.Lock()
delete(c.diagnosticsAggregate, fileURI)
c.diagnosticsAggregateMu.Unlock()

c.diagnosticsParseMu.Lock()
delete(c.diagnosticsParseErrors, fileURI)
c.diagnosticsParseMu.Unlock()
Expand Down
Loading

0 comments on commit 67162e6

Please sign in to comment.