Skip to content

Commit 2339b4f

Browse files
committed
feat: add rate limit unit multiplier
Signed-off-by: Tobias Sommer <[email protected]>
1 parent 4537d29 commit 2339b4f

18 files changed

+484
-162
lines changed

README.md

+5
Original file line numberDiff line numberDiff line change
@@ -245,6 +245,7 @@ descriptors:
245245
- name: (optional)
246246
unit: <see below: required>
247247
requests_per_unit: <see below: required>
248+
unit_multiplier: <see below: optional>
248249
shadow_mode: (optional)
249250
detailed_metric: (optional)
250251
descriptors: (optional block)
@@ -262,11 +263,15 @@ effectively whitelisted. Otherwise, nested descriptors allow more complex matchi
262263
rate_limit:
263264
unit: <second, minute, hour, day>
264265
requests_per_unit: <uint>
266+
unit_multiplier: <uint>
265267
```
266268
267269
The rate limit block specifies the actual rate limit that will be used when there is a match.
268270
Currently the service supports per second, minute, hour, and day limits. More types of limits may be added in the
269271
future based on user demand.
272+
The `unit_multiplier` allows for creating custom rate limit durations in combination with `unit`.
273+
This allows for rate limit durations such as 30 seconds or 5 minutes.
274+
A `unit_multiplier` of 0 is invalid and leaving out the field means the duration is equal to the unit (e.g. 1 minute).
270275

271276
### Replaces
272277

src/config/config_impl.go

+37-16
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,8 @@ type yamlReplaces struct {
2020
type YamlRateLimit struct {
2121
RequestsPerUnit uint32 `yaml:"requests_per_unit"`
2222
Unit string
23-
Unlimited bool `yaml:"unlimited"`
23+
UnitMultiplier *uint32 `yaml:"unit_multiplier"`
24+
Unlimited bool `yaml:"unlimited"`
2425
Name string
2526
Replaces []yamlReplaces
2627
}
@@ -68,23 +69,26 @@ var validKeys = map[string]bool{
6869
"name": true,
6970
"replaces": true,
7071
"detailed_metric": true,
72+
"unit_multiplier": true,
7173
}
7274

7375
// Create a new rate limit config entry.
7476
// @param requestsPerUnit supplies the requests per unit of time for the entry.
7577
// @param unit supplies the unit of time for the entry.
78+
// @param unitMultiplier supplies the multiplier for the unit of time for the entry.
7679
// @param rlStats supplies the stats structure associated with the RateLimit
7780
// @param unlimited supplies whether the rate limit is unlimited
7881
// @return the new config entry.
7982
func NewRateLimit(requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats,
80-
unlimited bool, shadowMode bool, name string, replaces []string, detailedMetric bool) *RateLimit {
81-
83+
unlimited bool, shadowMode bool, name string, replaces []string, detailedMetric bool, unitMultiplier uint32,
84+
) *RateLimit {
8285
return &RateLimit{
8386
FullKey: rlStats.GetKey(),
8487
Stats: rlStats,
8588
Limit: &pb.RateLimitResponse_RateLimit{
8689
RequestsPerUnit: requestsPerUnit,
8790
Unit: unit,
91+
UnitMultiplier: unitMultiplier,
8892
},
8993
Unlimited: unlimited,
9094
ShadowMode: shadowMode,
@@ -99,8 +103,8 @@ func (this *rateLimitDescriptor) dump() string {
99103
ret := ""
100104
if this.limit != nil {
101105
ret += fmt.Sprintf(
102-
"%s: unit=%s requests_per_unit=%d, shadow_mode: %t\n", this.limit.FullKey,
103-
this.limit.Limit.Unit.String(), this.limit.Limit.RequestsPerUnit, this.limit.ShadowMode)
106+
"%s: unit=%s, unit_multiplier=%d, requests_per_unit=%d, shadow_mode: %t\n", this.limit.FullKey,
107+
this.limit.Limit.Unit.String(), this.limit.Limit.UnitMultiplier, this.limit.Limit.RequestsPerUnit, this.limit.ShadowMode)
104108
}
105109
for _, descriptor := range this.descriptors {
106110
ret += descriptor.dump()
@@ -143,8 +147,7 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p
143147
if descriptorConfig.RateLimit != nil {
144148
unlimited := descriptorConfig.RateLimit.Unlimited
145149

146-
value, present :=
147-
pb.RateLimitResponse_RateLimit_Unit_value[strings.ToUpper(descriptorConfig.RateLimit.Unit)]
150+
value, present := pb.RateLimitResponse_RateLimit_Unit_value[strings.ToUpper(descriptorConfig.RateLimit.Unit)]
148151
validUnit := present && value != int32(pb.RateLimitResponse_RateLimit_UNKNOWN)
149152

150153
if unlimited {
@@ -159,6 +162,18 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p
159162
fmt.Sprintf("invalid rate limit unit '%s'", descriptorConfig.RateLimit.Unit)))
160163
}
161164

165+
var unitMultiplier uint32
166+
if descriptorConfig.RateLimit.UnitMultiplier == nil {
167+
unitMultiplier = 1
168+
} else {
169+
unitMultiplier = *descriptorConfig.RateLimit.UnitMultiplier
170+
if unitMultiplier == 0 {
171+
panic(newRateLimitConfigError(
172+
config.Name,
173+
"invalid unit multiplier of 0"))
174+
}
175+
}
176+
162177
replaces := make([]string, len(descriptorConfig.RateLimit.Replaces))
163178
for i, e := range descriptorConfig.RateLimit.Replaces {
164179
replaces[i] = e.Name
@@ -168,10 +183,12 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p
168183
descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value),
169184
statsManager.NewStats(newParentKey), unlimited, descriptorConfig.ShadowMode,
170185
descriptorConfig.RateLimit.Name, replaces, descriptorConfig.DetailedMetric,
186+
unitMultiplier,
171187
)
188+
172189
rateLimitDebugString = fmt.Sprintf(
173-
" ratelimit={requests_per_unit=%d, unit=%s, unlimited=%t, shadow_mode=%t}", rateLimit.Limit.RequestsPerUnit,
174-
rateLimit.Limit.Unit.String(), rateLimit.Unlimited, rateLimit.ShadowMode)
190+
" ratelimit={requests_per_unit=%d, unit=%s, unit_multiplier=%d, unlimited=%t, shadow_mode=%t}", rateLimit.Limit.RequestsPerUnit,
191+
rateLimit.Limit.Unit.String(), unitMultiplier, rateLimit.Unlimited, rateLimit.ShadowMode)
175192

176193
for _, replaces := range descriptorConfig.RateLimit.Replaces {
177194
if replaces.Name == "" {
@@ -277,8 +294,8 @@ func (this *rateLimitConfigImpl) Dump() string {
277294
}
278295

279296
func (this *rateLimitConfigImpl) GetLimit(
280-
ctx context.Context, domain string, descriptor *pb_struct.RateLimitDescriptor) *RateLimit {
281-
297+
ctx context.Context, domain string, descriptor *pb_struct.RateLimitDescriptor,
298+
) *RateLimit {
282299
logger.Debugf("starting get limit lookup")
283300
var rateLimit *RateLimit = nil
284301
value := this.domains[domain]
@@ -300,6 +317,7 @@ func (this *rateLimitConfigImpl) GetLimit(
300317
"",
301318
[]string{},
302319
false,
320+
1,
303321
)
304322
return rateLimit
305323
}
@@ -352,7 +370,10 @@ func (this *rateLimitConfigImpl) GetLimit(
352370
descriptorsMap = nextDescriptor.descriptors
353371
} else {
354372
if rateLimit != nil && rateLimit.DetailedMetric {
355-
rateLimit = NewRateLimit(rateLimit.Limit.RequestsPerUnit, rateLimit.Limit.Unit, this.statsManager.NewStats(rateLimit.FullKey), rateLimit.Unlimited, rateLimit.ShadowMode, rateLimit.Name, rateLimit.Replaces, rateLimit.DetailedMetric)
373+
rateLimit = NewRateLimit(rateLimit.Limit.RequestsPerUnit, rateLimit.Limit.Unit,
374+
this.statsManager.NewStats(rateLimit.FullKey), rateLimit.Unlimited,
375+
rateLimit.ShadowMode, rateLimit.Name, rateLimit.Replaces,
376+
rateLimit.DetailedMetric, rateLimit.Limit.UnitMultiplier)
356377
}
357378

358379
break
@@ -417,8 +438,8 @@ func ConfigFileContentToYaml(fileName, content string) *YamlRoot {
417438
// @param mergeDomainConfigs defines whether multiple configurations referencing the same domain will be merged or rejected throwing an error.
418439
// @return a new config.
419440
func NewRateLimitConfigImpl(
420-
configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool) RateLimitConfig {
421-
441+
configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool,
442+
) RateLimitConfig {
422443
ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, statsManager, mergeDomainConfigs}
423444
for _, config := range configs {
424445
ret.loadConfig(config)
@@ -430,8 +451,8 @@ func NewRateLimitConfigImpl(
430451
type rateLimitConfigLoaderImpl struct{}
431452

432453
func (this *rateLimitConfigLoaderImpl) Load(
433-
configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool) RateLimitConfig {
434-
454+
configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool,
455+
) RateLimitConfig {
435456
return NewRateLimitConfigImpl(configs, statsManager, mergeDomainConfigs)
436457
}
437458

src/limiter/base_limiter.go

+13-7
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,8 @@ type LimitInfo struct {
3333
}
3434

3535
func NewRateLimitInfo(limit *config.RateLimit, limitBeforeIncrease uint32, limitAfterIncrease uint32,
36-
nearLimitThreshold uint32, overLimitThreshold uint32) *LimitInfo {
36+
nearLimitThreshold uint32, overLimitThreshold uint32,
37+
) *LimitInfo {
3738
return &LimitInfo{
3839
limit: limit, limitBeforeIncrease: limitBeforeIncrease, limitAfterIncrease: limitAfterIncrease,
3940
nearLimitThreshold: nearLimitThreshold, overLimitThreshold: overLimitThreshold,
@@ -43,7 +44,8 @@ func NewRateLimitInfo(limit *config.RateLimit, limitBeforeIncrease uint32, limit
4344
// Generates cache keys for given rate limit request. Each cache key is represented by a concatenation of
4445
// domain, descriptor and current timestamp.
4546
func (this *BaseRateLimiter) GenerateCacheKeys(request *pb.RateLimitRequest,
46-
limits []*config.RateLimit, hitsAddend uint32) []CacheKey {
47+
limits []*config.RateLimit, hitsAddend uint32,
48+
) []CacheKey {
4749
assert.Assert(len(request.Descriptors) == len(limits))
4850
cacheKeys := make([]CacheKey, len(request.Descriptors))
4951
now := this.timeSource.UnixNow()
@@ -79,7 +81,8 @@ func (this *BaseRateLimiter) IsOverLimitThresholdReached(limitInfo *LimitInfo) b
7981
// Generates response descriptor status based on cache key, over the limit with local cache, over the limit and
8082
// near the limit thresholds. Thresholds are checked in order and are mutually exclusive.
8183
func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *LimitInfo,
82-
isOverLimitWithLocalCache bool, hitsAddend uint32) *pb.RateLimitResponse_DescriptorStatus {
84+
isOverLimitWithLocalCache bool, hitsAddend uint32,
85+
) *pb.RateLimitResponse_DescriptorStatus {
8386
if key == "" {
8487
return this.generateResponseDescriptorStatus(pb.RateLimitResponse_OK,
8588
nil, 0)
@@ -113,7 +116,8 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *
113116
// similar to mongo_1h, mongo_2h, etc. In the hour 1 (0h0m - 0h59m), the cache key is mongo_1h, we start
114117
// to get ratelimited in the 50th minute, the ttl of local_cache will be set as 1 hour(0h50m-1h49m).
115118
// In the time of 1h1m, since the cache key becomes different (mongo_2h), it won't get ratelimited.
116-
err := this.localCache.Set([]byte(key), []byte{}, int(utils.UnitToDivider(limitInfo.limit.Limit.Unit)))
119+
120+
err := this.localCache.Set([]byte(key), []byte{}, int(utils.UnitToDividerWithMultiplier(limitInfo.limit.Limit.Unit, limitInfo.limit.Limit.UnitMultiplier)))
117121
if err != nil {
118122
logger.Errorf("Failing to set local cache key: %s", key)
119123
}
@@ -140,7 +144,8 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *
140144
}
141145

142146
func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64,
143-
localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager) *BaseRateLimiter {
147+
localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager,
148+
) *BaseRateLimiter {
144149
return &BaseRateLimiter{
145150
timeSource: timeSource,
146151
JitterRand: jitterRand,
@@ -194,13 +199,14 @@ func (this *BaseRateLimiter) increaseShadowModeStats(isOverLimitWithLocalCache b
194199
}
195200

196201
func (this *BaseRateLimiter) generateResponseDescriptorStatus(responseCode pb.RateLimitResponse_Code,
197-
limit *pb.RateLimitResponse_RateLimit, limitRemaining uint32) *pb.RateLimitResponse_DescriptorStatus {
202+
limit *pb.RateLimitResponse_RateLimit, limitRemaining uint32,
203+
) *pb.RateLimitResponse_DescriptorStatus {
198204
if limit != nil {
199205
return &pb.RateLimitResponse_DescriptorStatus{
200206
Code: responseCode,
201207
CurrentLimit: limit,
202208
LimitRemaining: limitRemaining,
203-
DurationUntilReset: utils.CalculateReset(&limit.Unit, this.timeSource),
209+
DurationUntilReset: utils.CalculateReset(&limit.Unit, this.timeSource, limit.UnitMultiplier),
204210
}
205211
} else {
206212
return &pb.RateLimitResponse_DescriptorStatus{

src/limiter/cache_key.go

+4-3
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,8 @@ func isPerSecondLimit(unit pb.RateLimitResponse_RateLimit_Unit) bool {
4646
// @param now supplies the current unix time.
4747
// @return CacheKey struct.
4848
func (this *CacheKeyGenerator) GenerateCacheKey(
49-
domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit, now int64) CacheKey {
50-
49+
domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit, now int64,
50+
) CacheKey {
5151
if limit == nil {
5252
return CacheKey{
5353
Key: "",
@@ -70,7 +70,8 @@ func (this *CacheKeyGenerator) GenerateCacheKey(
7070
b.WriteByte('_')
7171
}
7272

73-
divider := utils.UnitToDivider(limit.Limit.Unit)
73+
divider := utils.UnitToDividerWithMultiplier(limit.Limit.Unit, limit.Limit.UnitMultiplier)
74+
7475
b.WriteString(strconv.FormatInt((now/divider)*divider, 10))
7576

7677
return CacheKey{

src/memcached/cache_impl.go

+9-6
Original file line numberDiff line numberDiff line change
@@ -64,8 +64,8 @@ var _ limiter.RateLimitCache = (*rateLimitMemcacheImpl)(nil)
6464
func (this *rateLimitMemcacheImpl) DoLimit(
6565
ctx context.Context,
6666
request *pb.RateLimitRequest,
67-
limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus {
68-
67+
limits []*config.RateLimit,
68+
) []*pb.RateLimitResponse_DescriptorStatus {
6969
logger.Debugf("starting cache lookup")
7070

7171
// request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request.
@@ -148,7 +148,8 @@ func (this *rateLimitMemcacheImpl) DoLimit(
148148
}
149149

150150
func (this *rateLimitMemcacheImpl) increaseAsync(cacheKeys []limiter.CacheKey, isOverLimitWithLocalCache []bool,
151-
limits []*config.RateLimit, hitsAddend uint64) {
151+
limits []*config.RateLimit, hitsAddend uint64,
152+
) {
152153
defer this.waitGroup.Done()
153154
for i, cacheKey := range cacheKeys {
154155
if cacheKey.Key == "" || isOverLimitWithLocalCache[i] {
@@ -157,7 +158,7 @@ func (this *rateLimitMemcacheImpl) increaseAsync(cacheKeys []limiter.CacheKey, i
157158

158159
_, err := this.client.Increment(cacheKey.Key, hitsAddend)
159160
if err == memcache.ErrCacheMiss {
160-
expirationSeconds := utils.UnitToDivider(limits[i].Limit.Unit)
161+
expirationSeconds := utils.UnitToDividerWithMultiplier(limits[i].Limit.Unit, limits[i].Limit.UnitMultiplier)
161162
if this.expirationJitterMaxSeconds > 0 {
162163
expirationSeconds += this.jitterRand.Int63n(this.expirationJitterMaxSeconds)
163164
}
@@ -290,7 +291,8 @@ func runAsync(task func()) {
290291
}
291292

292293
func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRand *rand.Rand,
293-
expirationJitterMaxSeconds int64, localCache *freecache.Cache, statsManager stats.Manager, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache {
294+
expirationJitterMaxSeconds int64, localCache *freecache.Cache, statsManager stats.Manager, nearLimitRatio float32, cacheKeyPrefix string,
295+
) limiter.RateLimitCache {
294296
return &rateLimitMemcacheImpl{
295297
client: client,
296298
timeSource: timeSource,
@@ -303,7 +305,8 @@ func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRan
303305
}
304306

305307
func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.TimeSource, jitterRand *rand.Rand,
306-
localCache *freecache.Cache, scope gostats.Scope, statsManager stats.Manager) limiter.RateLimitCache {
308+
localCache *freecache.Cache, scope gostats.Scope, statsManager stats.Manager,
309+
) limiter.RateLimitCache {
307310
return NewRateLimitCacheImpl(
308311
CollectStats(newMemcacheFromSettings(s), scope.Scope("memcache")),
309312
timeSource,

src/redis/fixed_cache_impl.go

+5-4
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,8 @@ func pipelineAppendtoGet(client Client, pipeline *Pipeline, key string, result *
4444
func (this *fixedRateLimitCacheImpl) DoLimit(
4545
ctx context.Context,
4646
request *pb.RateLimitRequest,
47-
limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus {
48-
47+
limits []*config.RateLimit,
48+
) []*pb.RateLimitResponse_DescriptorStatus {
4949
logger.Debugf("starting cache lookup")
5050

5151
// request.HitsAddend could be 0 (default value) if not specified by the caller in the RateLimit request.
@@ -152,7 +152,7 @@ func (this *fixedRateLimitCacheImpl) DoLimit(
152152

153153
logger.Debugf("looking up cache key: %s", cacheKey.Key)
154154

155-
expirationSeconds := utils.UnitToDivider(limits[i].Limit.Unit)
155+
expirationSeconds := utils.UnitToDividerWithMultiplier(limits[i].Limit.Unit, limits[i].Limit.UnitMultiplier)
156156
if this.baseRateLimiter.ExpirationJitterMaxSeconds > 0 {
157157
expirationSeconds += this.baseRateLimiter.JitterRand.Int63n(this.baseRateLimiter.ExpirationJitterMaxSeconds)
158158
}
@@ -218,7 +218,8 @@ func (this *fixedRateLimitCacheImpl) Flush() {}
218218

219219
func NewFixedRateLimitCacheImpl(client Client, perSecondClient Client, timeSource utils.TimeSource,
220220
jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager,
221-
stopCacheKeyIncrementWhenOverlimit bool) limiter.RateLimitCache {
221+
stopCacheKeyIncrementWhenOverlimit bool,
222+
) limiter.RateLimitCache {
222223
return &fixedRateLimitCacheImpl{
223224
client: client,
224225
perSecondClient: perSecondClient,

src/service/ratelimit.go

+11-10
Original file line numberDiff line numberDiff line change
@@ -138,8 +138,9 @@ func (this *service) constructLimitsToCheck(request *pb.RateLimitRequest, ctx co
138138
logger.Debugf("descriptor is unlimited, not passing to the cache")
139139
} else {
140140
logger.Debugf(
141-
"applying limit: %d requests per %s, shadow_mode: %t",
141+
"applying limit: %d requests per %d %s, shadow_mode: %t",
142142
limitsToCheck[i].Limit.RequestsPerUnit,
143+
limitsToCheck[i].Limit.UnitMultiplier,
143144
limitsToCheck[i].Limit.Unit.String(),
144145
limitsToCheck[i].ShadowMode,
145146
)
@@ -177,8 +178,8 @@ func (this *service) constructLimitsToCheck(request *pb.RateLimitRequest, ctx co
177178
const MaxUint32 = uint32(1<<32 - 1)
178179

179180
func (this *service) shouldRateLimitWorker(
180-
ctx context.Context, request *pb.RateLimitRequest) *pb.RateLimitResponse {
181-
181+
ctx context.Context, request *pb.RateLimitRequest,
182+
) *pb.RateLimitResponse {
182183
checkServiceErr(request.Domain != "", "rate limit domain must not be empty")
183184
checkServiceErr(len(request.Descriptors) != 0, "rate limit descriptor list must not be empty")
184185

@@ -258,18 +259,18 @@ func (this *service) rateLimitRemainingHeader(descriptor *pb.RateLimitResponse_D
258259
}
259260

260261
func (this *service) rateLimitResetHeader(
261-
descriptor *pb.RateLimitResponse_DescriptorStatus) *core.HeaderValue {
262-
262+
descriptor *pb.RateLimitResponse_DescriptorStatus,
263+
) *core.HeaderValue {
263264
return &core.HeaderValue{
264265
Key: this.customHeaderResetHeader,
265-
Value: strconv.FormatInt(utils.CalculateReset(&descriptor.CurrentLimit.Unit, this.customHeaderClock).GetSeconds(), 10),
266+
Value: strconv.FormatInt(utils.CalculateReset(&descriptor.CurrentLimit.Unit, this.customHeaderClock, descriptor.CurrentLimit.UnitMultiplier).GetSeconds(), 10),
266267
}
267268
}
268269

269270
func (this *service) ShouldRateLimit(
270271
ctx context.Context,
271-
request *pb.RateLimitRequest) (finalResponse *pb.RateLimitResponse, finalError error) {
272-
272+
request *pb.RateLimitRequest,
273+
) (finalResponse *pb.RateLimitResponse, finalError error) {
273274
// Generate trace
274275
_, span := tracer.Start(ctx, "ShouldRateLimit Execution",
275276
trace.WithAttributes(
@@ -316,8 +317,8 @@ func (this *service) GetCurrentConfig() (config.RateLimitConfig, bool) {
316317
}
317318

318319
func NewService(cache limiter.RateLimitCache, configProvider provider.RateLimitConfigProvider, statsManager stats.Manager,
319-
health *server.HealthChecker, clock utils.TimeSource, shadowMode, forceStart bool, healthyWithAtLeastOneConfigLoad bool) RateLimitServiceServer {
320-
320+
health *server.HealthChecker, clock utils.TimeSource, shadowMode, forceStart bool, healthyWithAtLeastOneConfigLoad bool,
321+
) RateLimitServiceServer {
321322
newService := &service{
322323
configLock: sync.RWMutex{},
323324
configUpdateEvent: configProvider.ConfigUpdateEvent(),

0 commit comments

Comments
 (0)