Skip to content

Commit

Permalink
fix Makefile proto gen conditions
Browse files Browse the repository at this point in the history
  • Loading branch information
francoposa committed Dec 27, 2024
1 parent a3300e9 commit 9ff9d8c
Show file tree
Hide file tree
Showing 7 changed files with 461 additions and 435 deletions.
6 changes: 3 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -324,11 +324,11 @@ GENERATE_FILES ?= true

%.pb.go: %.proto
ifeq ($(GENERATE_FILES),true)
if [ "$@" != "*_custom_types.proto" ]; then \
if case "$@" in *_custom_types.pb.go) false ;; *) true ;; esac; then \
protoc -I $(GOPATH)/src:./vendor/github.com/gogo/protobuf:./vendor:./$(@D):./pkg/storegateway/storepb --gogoslick_out=plugins=grpc,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types,:./$(@D) ./$(patsubst %.pb.go,%.proto,$@); \
else \
else \
echo "Skipping $@"; \
fi
fi
else
@echo "Warning: generating files has been disabled, but the following file needs to be regenerated: $@"
@echo "If this is unexpected, check if the last modified timestamps on $@ and $(patsubst %.pb.go,%.proto,$@) are correct."
Expand Down
574 changes: 148 additions & 426 deletions pkg/mimirpb/mimir.pb.go

Large diffs are not rendered by default.

7 changes: 1 addition & 6 deletions pkg/mimirpb/mimir.proto
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ package cortexpb;
option go_package = "mimirpb";

import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "github.com/grafana/mimir/pkg/mimirpb/mimir_custom_types.proto";
import "mimir_custom_types.proto";

option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
Expand Down Expand Up @@ -66,11 +66,6 @@ message TimeSeries {
repeated Histogram histograms = 4 [(gogoproto.nullable) = false];
}

message LabelPair {
bytes name = 1;
bytes value = 2;
}

message Sample {
// Fields order MUST match promql.FPoint so that we can cast types between them.
int64 timestamp_ms = 2;
Expand Down
24 changes: 24 additions & 0 deletions pkg/scheduler/queue/baseline.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
goos: linux
goarch: amd64
pkg: github.com/grafana/mimir/pkg/scheduler/queue
cpu: AMD Ryzen 9 PRO 6950H with Radeon Graphics
BenchmarkConcurrentQueueOperations/1_tenants/10_concurrent_producers/16_concurrent_consumers-16 3375270 3582 ns/op
BenchmarkConcurrentQueueOperations/1_tenants/10_concurrent_producers/160_concurrent_consumers-16 3158005 3817 ns/op
BenchmarkConcurrentQueueOperations/1_tenants/10_concurrent_producers/1600_concurrent_consumers-16 2469280 4872 ns/op
BenchmarkConcurrentQueueOperations/1_tenants/25_concurrent_producers/16_concurrent_consumers-16 3133611 3877 ns/op
BenchmarkConcurrentQueueOperations/1_tenants/25_concurrent_producers/160_concurrent_consumers-16 2882362 4214 ns/op
BenchmarkConcurrentQueueOperations/1_tenants/25_concurrent_producers/1600_concurrent_consumers-16 2554503 4684 ns/op
BenchmarkConcurrentQueueOperations/10_tenants/10_concurrent_producers/16_concurrent_consumers-16 3180195 3682 ns/op
BenchmarkConcurrentQueueOperations/10_tenants/10_concurrent_producers/160_concurrent_consumers-16 3052290 3901 ns/op
BenchmarkConcurrentQueueOperations/10_tenants/10_concurrent_producers/1600_concurrent_consumers-16 2420862 4755 ns/op
BenchmarkConcurrentQueueOperations/10_tenants/25_concurrent_producers/16_concurrent_consumers-16 3111976 3767 ns/op
BenchmarkConcurrentQueueOperations/10_tenants/25_concurrent_producers/160_concurrent_consumers-16 3040722 3943 ns/op
BenchmarkConcurrentQueueOperations/10_tenants/25_concurrent_producers/1600_concurrent_consumers-16 2566794 4633 ns/op
BenchmarkConcurrentQueueOperations/1000_tenants/10_concurrent_producers/16_concurrent_consumers-16 3222010 3926 ns/op
BenchmarkConcurrentQueueOperations/1000_tenants/10_concurrent_producers/160_concurrent_consumers-16 2796484 4410 ns/op
BenchmarkConcurrentQueueOperations/1000_tenants/10_concurrent_producers/1600_concurrent_consumers-16 2399295 4669 ns/op
BenchmarkConcurrentQueueOperations/1000_tenants/25_concurrent_producers/16_concurrent_consumers-16 3091362 3847 ns/op
BenchmarkConcurrentQueueOperations/1000_tenants/25_concurrent_producers/160_concurrent_consumers-16 2992759 3959 ns/op
BenchmarkConcurrentQueueOperations/1000_tenants/25_concurrent_producers/1600_concurrent_consumers-16 2485108 4807 ns/op
PASS
ok github.com/grafana/mimir/pkg/scheduler/queue 292.419s
24 changes: 24 additions & 0 deletions pkg/scheduler/queue/break.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
goos: linux
goarch: amd64
pkg: github.com/grafana/mimir/pkg/scheduler/queue
cpu: AMD Ryzen 9 PRO 6950H with Radeon Graphics
BenchmarkConcurrentQueueOperations/1_tenants/10_concurrent_producers/16_concurrent_consumers-16 3375884 3474 ns/op
BenchmarkConcurrentQueueOperations/1_tenants/10_concurrent_producers/160_concurrent_consumers-16 3243482 3666 ns/op
BenchmarkConcurrentQueueOperations/1_tenants/10_concurrent_producers/1600_concurrent_consumers-16 2816721 4222 ns/op
BenchmarkConcurrentQueueOperations/1_tenants/25_concurrent_producers/16_concurrent_consumers-16 3460167 3495 ns/op
BenchmarkConcurrentQueueOperations/1_tenants/25_concurrent_producers/160_concurrent_consumers-16 3213451 3730 ns/op
BenchmarkConcurrentQueueOperations/1_tenants/25_concurrent_producers/1600_concurrent_consumers-16 2842573 4287 ns/op
BenchmarkConcurrentQueueOperations/10_tenants/10_concurrent_producers/16_concurrent_consumers-16 3229412 3664 ns/op
BenchmarkConcurrentQueueOperations/10_tenants/10_concurrent_producers/160_concurrent_consumers-16 3111933 3913 ns/op
BenchmarkConcurrentQueueOperations/10_tenants/10_concurrent_producers/1600_concurrent_consumers-16 2624695 4583 ns/op
BenchmarkConcurrentQueueOperations/10_tenants/25_concurrent_producers/16_concurrent_consumers-16 3245578 3680 ns/op
BenchmarkConcurrentQueueOperations/10_tenants/25_concurrent_producers/160_concurrent_consumers-16 3026472 3864 ns/op
BenchmarkConcurrentQueueOperations/10_tenants/25_concurrent_producers/1600_concurrent_consumers-16 2503609 4762 ns/op
BenchmarkConcurrentQueueOperations/1000_tenants/10_concurrent_producers/16_concurrent_consumers-16 3292596 3629 ns/op
BenchmarkConcurrentQueueOperations/1000_tenants/10_concurrent_producers/160_concurrent_consumers-16 3137596 3817 ns/op
BenchmarkConcurrentQueueOperations/1000_tenants/10_concurrent_producers/1600_concurrent_consumers-16 2726665 4568 ns/op
BenchmarkConcurrentQueueOperations/1000_tenants/25_concurrent_producers/16_concurrent_consumers-16 3080842 3869 ns/op
BenchmarkConcurrentQueueOperations/1000_tenants/25_concurrent_producers/160_concurrent_consumers-16 2937258 3981 ns/op
BenchmarkConcurrentQueueOperations/1000_tenants/25_concurrent_producers/1600_concurrent_consumers-16 2483106 4968 ns/op
PASS
ok github.com/grafana/mimir/pkg/scheduler/queue 289.483s
234 changes: 234 additions & 0 deletions pkg/scheduler/queue/tree/tenant_deletion_benchmark_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,234 @@
package tree

import (
"fmt"
"math/rand"
"slices"
"strconv"
"testing"
)

type fullTenantDeleteFunc func(tenantNodes map[string][]*Node, tenantIDOrder []string, tenant string) (map[string][]*Node, []string)

func baseline(tenantNodes map[string][]*Node, tenantIDOrder []string, tenant string) (map[string][]*Node, []string) {
// delete from shared tenantNodes
for i := range tenantNodes[tenant] {
// only ever going to have a slice of length one here for simplicity
// and best representation of the actual case we are benchmarking;
// no need to check if this is the dequeuedFrom node
//if tenantNode == dequeuedFrom {
tenantNodes[tenant] = append(tenantNodes[tenant][:i], tenantNodes[tenant][i+1:]...)
//}
}

for idx, name := range tenantIDOrder {
if name == tenant {
tenantIDOrder[idx] = string(emptyTenantID)
}
}

// clear all sequential empty elements from tenantIDOrder
lastElementIndex := len(tenantIDOrder) - 1
for i := lastElementIndex; i >= 0 && tenantIDOrder[i] == ""; i-- {
tenantIDOrder = tenantIDOrder[:i]
}
return tenantNodes, tenantIDOrder
}

func breakEarly(tenantNodes map[string][]*Node, tenantIDOrder []string, tenant string) (map[string][]*Node, []string) {
// delete from shared tenantNodes
for i := range tenantNodes[tenant] {
// only ever going to have a slice of length one here for simplicity
// and best representation of the actual case we are benchmarking;
// no need to check if this is the dequeuedFrom node
//if tenantNode == dequeuedFrom {
tenantNodes[tenant] = append(tenantNodes[tenant][:i], tenantNodes[tenant][i+1:]...)
//}
}

for idx, name := range tenantIDOrder {
if name == tenant {
tenantIDOrder[idx] = string(emptyTenantID)
break
}
}

// clear all sequential empty elements from tenantIDOrder
lastElementIndex := len(tenantIDOrder) - 1
for i := lastElementIndex; i >= 0 && tenantIDOrder[i] == ""; i-- {
tenantIDOrder = tenantIDOrder[:i]
}
return tenantNodes, tenantIDOrder
}

func breakEarlyShrinkOnce(tenantNodes map[string][]*Node, tenantIDOrder []string, tenant string) (map[string][]*Node, []string) {
// delete from shared tenantNodes
for i := range tenantNodes[tenant] {
// only ever going to have a slice of length one here for simplicity
// and best representation of the actual case we are benchmarking;
// no need to check if this is the dequeuedFrom node
//if tenantNode == dequeuedFrom {
tenantNodes[tenant] = slices.Delete(tenantNodes[tenant], i, i+1)
//}
}

for idx, name := range tenantIDOrder {
if name == tenant {
tenantIDOrder[idx] = string(emptyTenantID)
break
}
}

emptyTenantIDsAtEnd := 0
for i := len(tenantIDOrder) - 1; i >= 0 && tenantIDOrder[i] == ""; i-- {
emptyTenantIDsAtEnd++
}
tenantIDOrder = slices.Delete(
tenantIDOrder,
len(tenantIDOrder)-emptyTenantIDsAtEnd,
len(tenantIDOrder),
)

return tenantNodes, tenantIDOrder
}

func breakEarlyShrinkOnceNoSlices(tenantNodes map[string][]*Node, tenantIDOrder []string, tenant string) (map[string][]*Node, []string) {
// delete from shared tenantNodes
for i := range tenantNodes[tenant] {
// only ever going to have a slice of length one here for simplicity
// and best representation of the actual case we are benchmarking;
// no need to check if this is the dequeuedFrom node
//if tenantNode == dequeuedFrom {
tenantNodes[tenant] = append(tenantNodes[tenant][:i], tenantNodes[tenant][i+1:]...)
//}
}

for idx, name := range tenantIDOrder {
if name == tenant {
tenantIDOrder[idx] = string(emptyTenantID)
break
}
}

emptyTenantIDsAtEnd := 0
for i := len(tenantIDOrder) - 1; i >= 0 && tenantIDOrder[i] == ""; i-- {
emptyTenantIDsAtEnd++
}
tenantIDOrder = tenantIDOrder[:len(tenantIDOrder)-emptyTenantIDsAtEnd]

return nil, tenantIDOrder
}

func BenchmarkFullTenantDeletion(b *testing.B) {
tenantCount := 10000

var testCases = []struct {
name string
deleteFunc fullTenantDeleteFunc
tenantNodes map[string][]*Node
tenantRotationOrder []string
tenantDeletionOrder []string
}{
{"baseline", baseline, nil, nil, nil},
{"breakEarlyShrinkOnce", breakEarlyShrinkOnce, nil, nil, nil},
{"breakEarlyShrinkOnceNoSlices", breakEarlyShrinkOnceNoSlices, nil, nil, nil},
}

makeStubs := func() (tenantNodes map[string][]*Node, tenantRotationOrder, tenantDeletionOrder []string) {
tenantNodes = make(map[string][]*Node, tenantCount)
tenantRotationOrder = make([]string, tenantCount)

for i := range tenantCount {
tenantRotationOrder[i] = strconv.Itoa(i)
tenantNodes[strconv.Itoa(i)] = append(tenantNodes[strconv.Itoa(i)], &Node{})
}

tenantDeletionOrder = make([]string, tenantCount)
copy(tenantDeletionOrder, tenantRotationOrder)
rand.Shuffle(len(tenantDeletionOrder), func(i, j int) {
tenantDeletionOrder[i], tenantDeletionOrder[j] = tenantDeletionOrder[j], tenantDeletionOrder[i]
})

return tenantNodes, tenantRotationOrder, tenantDeletionOrder
}

for _, testCase := range testCases {
b.Run(fmt.Sprintf("delete_tenant_func_%s", testCase.name), func(b *testing.B) {
for i := 0; i < b.N; i++ {
tenantNodes, tenantRotationOrder, tenantDeletionOrder := makeStubs()
testCase.tenantNodes = tenantNodes
testCase.tenantRotationOrder = tenantRotationOrder
testCase.tenantDeletionOrder = tenantDeletionOrder

tenantDeleteIdx := 0
for len(testCase.tenantRotationOrder) > 0 {
//fmt.Println("len(testCase.tenantRotationOrder): ", len(testCase.tenantRotationOrder))
testCase.tenantNodes, testCase.tenantRotationOrder = testCase.deleteFunc(
testCase.tenantNodes, testCase.tenantRotationOrder, testCase.tenantDeletionOrder[tenantDeleteIdx],
)
tenantDeleteIdx++
}
}
})
}
}

type makeQueuePathFunc func(component, tenant string) QueuePath

func baselineMakeQueuePath(component, tenant string) QueuePath {
return append([]string{component}, tenant)
}

func noAppendMakeQueuePath(component, tenant string) QueuePath {
return QueuePath{component, tenant}
}

func BenchmarkMakeQueuePath(b *testing.B) {
tenantCount := 10000

tenants := make([]string, tenantCount)
queueComponents := make([]string, tenantCount)
for i := range tenantCount {
tenants[i] = strconv.Itoa(i)
queueComponents[i] = randAdditionalQueueDimension()
}

var testCases = []struct {
name string
pathFunc makeQueuePathFunc
}{
{"baselineMakeQueuePath", baselineMakeQueuePath},
{"noAppendMakeQueuePath", noAppendMakeQueuePath},
}

for _, testCase := range testCases {
b.Run(fmt.Sprintf("queue_path_func_%s", testCase.name), func(b *testing.B) {
for i := 0; i < b.N; i++ {
for tenantIdx := range tenantCount {
_ = testCase.pathFunc(
queueComponents[tenantIdx], tenants[tenantIdx],
)
}
}
})
}
}

const ingesterQueueDimension = "ingester"
const storeGatewayQueueDimension = "store-gateway"
const ingesterAndStoreGatewayQueueDimension = "ingester-and-store-gateway"
const unknownQueueDimension = "unknown"

var secondQueueDimensionOptions = []string{
ingesterQueueDimension,
storeGatewayQueueDimension,
ingesterAndStoreGatewayQueueDimension,
unknownQueueDimension,
}

func randAdditionalQueueDimension() string {
maxIdx := len(secondQueueDimensionOptions) - 1

idx := rand.Intn(maxIdx)
return secondQueueDimensionOptions[idx]
}
27 changes: 27 additions & 0 deletions pkg/scheduler/queue/vs.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
goos: linux
goarch: amd64
pkg: github.com/grafana/mimir/pkg/scheduler/queue
cpu: AMD Ryzen 9 PRO 6950H with Radeon Graphics
│ baseline.txt │ break.txt │
│ sec/op │ sec/op vs base │
ConcurrentQueueOperations/1_tenants/10_concurrent_producers/16_concurrent_consumers-16 3.582µ ± ∞ ¹ 3.474µ ± ∞ ¹ ~ (p=1.000 n=1) ²
ConcurrentQueueOperations/1_tenants/10_concurrent_producers/160_concurrent_consumers-16 3.817µ ± ∞ ¹ 3.666µ ± ∞ ¹ ~ (p=1.000 n=1) ²
ConcurrentQueueOperations/1_tenants/10_concurrent_producers/1600_concurrent_consumers-16 4.872µ ± ∞ ¹ 4.222µ ± ∞ ¹ ~ (p=1.000 n=1) ²
ConcurrentQueueOperations/1_tenants/25_concurrent_producers/16_concurrent_consumers-16 3.877µ ± ∞ ¹ 3.495µ ± ∞ ¹ ~ (p=1.000 n=1) ²
ConcurrentQueueOperations/1_tenants/25_concurrent_producers/160_concurrent_consumers-16 4.214µ ± ∞ ¹ 3.730µ ± ∞ ¹ ~ (p=1.000 n=1) ²
ConcurrentQueueOperations/1_tenants/25_concurrent_producers/1600_concurrent_consumers-16 4.684µ ± ∞ ¹ 4.287µ ± ∞ ¹ ~ (p=1.000 n=1) ²
ConcurrentQueueOperations/10_tenants/10_concurrent_producers/16_concurrent_consumers-16 3.682µ ± ∞ ¹ 3.664µ ± ∞ ¹ ~ (p=1.000 n=1) ²
ConcurrentQueueOperations/10_tenants/10_concurrent_producers/160_concurrent_consumers-16 3.901µ ± ∞ ¹ 3.913µ ± ∞ ¹ ~ (p=1.000 n=1) ²
ConcurrentQueueOperations/10_tenants/10_concurrent_producers/1600_concurrent_consumers-16 4.755µ ± ∞ ¹ 4.583µ ± ∞ ¹ ~ (p=1.000 n=1) ²
ConcurrentQueueOperations/10_tenants/25_concurrent_producers/16_concurrent_consumers-16 3.767µ ± ∞ ¹ 3.680µ ± ∞ ¹ ~ (p=1.000 n=1) ²
ConcurrentQueueOperations/10_tenants/25_concurrent_producers/160_concurrent_consumers-16 3.943µ ± ∞ ¹ 3.864µ ± ∞ ¹ ~ (p=1.000 n=1) ²
ConcurrentQueueOperations/10_tenants/25_concurrent_producers/1600_concurrent_consumers-16 4.633µ ± ∞ ¹ 4.762µ ± ∞ ¹ ~ (p=1.000 n=1) ²
ConcurrentQueueOperations/1000_tenants/10_concurrent_producers/16_concurrent_consumers-16 3.926µ ± ∞ ¹ 3.629µ ± ∞ ¹ ~ (p=1.000 n=1) ²
ConcurrentQueueOperations/1000_tenants/10_concurrent_producers/160_concurrent_consumers-16 4.410µ ± ∞ ¹ 3.817µ ± ∞ ¹ ~ (p=1.000 n=1) ²
ConcurrentQueueOperations/1000_tenants/10_concurrent_producers/1600_concurrent_consumers-16 4.669µ ± ∞ ¹ 4.568µ ± ∞ ¹ ~ (p=1.000 n=1) ²
ConcurrentQueueOperations/1000_tenants/25_concurrent_producers/16_concurrent_consumers-16 3.847µ ± ∞ ¹ 3.869µ ± ∞ ¹ ~ (p=1.000 n=1) ²
ConcurrentQueueOperations/1000_tenants/25_concurrent_producers/160_concurrent_consumers-16 3.959µ ± ∞ ¹ 3.981µ ± ∞ ¹ ~ (p=1.000 n=1) ²
ConcurrentQueueOperations/1000_tenants/25_concurrent_producers/1600_concurrent_consumers-16 4.807µ ± ∞ ¹ 4.968µ ± ∞ ¹ ~ (p=1.000 n=1) ²
geomean 4.164µ 3.987µ -4.26%
¹ need >= 6 samples for confidence interval at level 0.95
² need >= 4 samples to detect a difference at alpha level 0.05

0 comments on commit 9ff9d8c

Please sign in to comment.