Skip to content

Commit

Permalink
chore: run gofumpt (#664)
Browse files Browse the repository at this point in the history
Signed-off-by: zirain <[email protected]>
  • Loading branch information
zirain authored Aug 2, 2024
1 parent dfca264 commit 803b65c
Show file tree
Hide file tree
Showing 12 changed files with 50 additions and 39 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@
- [Memcache](#memcache)
- [Custom headers](#custom-headers)
- [Tracing](#tracing)
- [TLS](#tls)
- [mTLS](#mtls)
- [Contact](#contact)

Expand Down
19 changes: 9 additions & 10 deletions src/config/config_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,8 @@ var validKeys = map[string]bool{
// @param unlimited supplies whether the rate limit is unlimited
// @return the new config entry.
func NewRateLimit(requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats,
unlimited bool, shadowMode bool, name string, replaces []string, detailedMetric bool) *RateLimit {

unlimited bool, shadowMode bool, name string, replaces []string, detailedMetric bool,
) *RateLimit {
return &RateLimit{
FullKey: rlStats.GetKey(),
Stats: rlStats,
Expand Down Expand Up @@ -144,8 +144,7 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p
if descriptorConfig.RateLimit != nil {
unlimited := descriptorConfig.RateLimit.Unlimited

value, present :=
pb.RateLimitResponse_RateLimit_Unit_value[strings.ToUpper(descriptorConfig.RateLimit.Unit)]
value, present := pb.RateLimitResponse_RateLimit_Unit_value[strings.ToUpper(descriptorConfig.RateLimit.Unit)]
validUnit := present && value != int32(pb.RateLimitResponse_RateLimit_UNKNOWN)

if unlimited {
Expand Down Expand Up @@ -278,8 +277,8 @@ func (this *rateLimitConfigImpl) Dump() string {
}

func (this *rateLimitConfigImpl) GetLimit(
ctx context.Context, domain string, descriptor *pb_struct.RateLimitDescriptor) *RateLimit {

ctx context.Context, domain string, descriptor *pb_struct.RateLimitDescriptor,
) *RateLimit {
logger.Debugf("starting get limit lookup")
var rateLimit *RateLimit = nil
value := this.domains[domain]
Expand Down Expand Up @@ -420,8 +419,8 @@ func ConfigFileContentToYaml(fileName, content string) *YamlRoot {
// @param mergeDomainConfigs defines whether multiple configurations referencing the same domain will be merged or rejected throwing an error.
// @return a new config.
func NewRateLimitConfigImpl(
configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool) RateLimitConfig {

configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool,
) RateLimitConfig {
ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, statsManager, mergeDomainConfigs}
for _, config := range configs {
ret.loadConfig(config)
Expand All @@ -433,8 +432,8 @@ func NewRateLimitConfigImpl(
type rateLimitConfigLoaderImpl struct{}

func (this *rateLimitConfigLoaderImpl) Load(
configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool) RateLimitConfig {

configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool,
) RateLimitConfig {
return NewRateLimitConfigImpl(configs, statsManager, mergeDomainConfigs)
}

Expand Down
15 changes: 10 additions & 5 deletions src/limiter/base_limiter.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@ type LimitInfo struct {
}

func NewRateLimitInfo(limit *config.RateLimit, limitBeforeIncrease uint32, limitAfterIncrease uint32,
nearLimitThreshold uint32, overLimitThreshold uint32) *LimitInfo {
nearLimitThreshold uint32, overLimitThreshold uint32,
) *LimitInfo {
return &LimitInfo{
limit: limit, limitBeforeIncrease: limitBeforeIncrease, limitAfterIncrease: limitAfterIncrease,
nearLimitThreshold: nearLimitThreshold, overLimitThreshold: overLimitThreshold,
Expand All @@ -43,7 +44,8 @@ func NewRateLimitInfo(limit *config.RateLimit, limitBeforeIncrease uint32, limit
// Generates cache keys for given rate limit request. Each cache key is represented by a concatenation of
// domain, descriptor and current timestamp.
func (this *BaseRateLimiter) GenerateCacheKeys(request *pb.RateLimitRequest,
limits []*config.RateLimit, hitsAddend uint32) []CacheKey {
limits []*config.RateLimit, hitsAddend uint32,
) []CacheKey {
assert.Assert(len(request.Descriptors) == len(limits))
cacheKeys := make([]CacheKey, len(request.Descriptors))
now := this.timeSource.UnixNow()
Expand Down Expand Up @@ -79,7 +81,8 @@ func (this *BaseRateLimiter) IsOverLimitThresholdReached(limitInfo *LimitInfo) b
// Generates response descriptor status based on cache key, over the limit with local cache, over the limit and
// near the limit thresholds. Thresholds are checked in order and are mutually exclusive.
func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *LimitInfo,
isOverLimitWithLocalCache bool, hitsAddend uint32) *pb.RateLimitResponse_DescriptorStatus {
isOverLimitWithLocalCache bool, hitsAddend uint32,
) *pb.RateLimitResponse_DescriptorStatus {
if key == "" {
return this.generateResponseDescriptorStatus(pb.RateLimitResponse_OK,
nil, 0)
Expand Down Expand Up @@ -140,7 +143,8 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *
}

func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64,
localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager) *BaseRateLimiter {
localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager,
) *BaseRateLimiter {
return &BaseRateLimiter{
timeSource: timeSource,
JitterRand: jitterRand,
Expand Down Expand Up @@ -194,7 +198,8 @@ func (this *BaseRateLimiter) increaseShadowModeStats(isOverLimitWithLocalCache b
}

func (this *BaseRateLimiter) generateResponseDescriptorStatus(responseCode pb.RateLimitResponse_Code,
limit *pb.RateLimitResponse_RateLimit, limitRemaining uint32) *pb.RateLimitResponse_DescriptorStatus {
limit *pb.RateLimitResponse_RateLimit, limitRemaining uint32,
) *pb.RateLimitResponse_DescriptorStatus {
if limit != nil {
return &pb.RateLimitResponse_DescriptorStatus{
Code: responseCode,
Expand Down
4 changes: 2 additions & 2 deletions src/limiter/cache_key.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ func isPerSecondLimit(unit pb.RateLimitResponse_RateLimit_Unit) bool {
// @param now supplies the current unix time.
// @return CacheKey struct.
func (this *CacheKeyGenerator) GenerateCacheKey(
domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit, now int64) CacheKey {

domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit, now int64,
) CacheKey {
if limit == nil {
return CacheKey{
Key: "",
Expand Down
13 changes: 8 additions & 5 deletions src/memcached/cache_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,8 @@ var _ limiter.RateLimitCache = (*rateLimitMemcacheImpl)(nil)
func (this *rateLimitMemcacheImpl) DoLimit(
ctx context.Context,
request *pb.RateLimitRequest,
limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus {

limits []*config.RateLimit,
) []*pb.RateLimitResponse_DescriptorStatus {
logger.Debugf("starting cache lookup")

// request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request.
Expand Down Expand Up @@ -150,7 +150,8 @@ func (this *rateLimitMemcacheImpl) DoLimit(
}

func (this *rateLimitMemcacheImpl) increaseAsync(cacheKeys []limiter.CacheKey, isOverLimitWithLocalCache []bool,
limits []*config.RateLimit, hitsAddend uint64) {
limits []*config.RateLimit, hitsAddend uint64,
) {
defer this.waitGroup.Done()
for i, cacheKey := range cacheKeys {
if cacheKey.Key == "" || isOverLimitWithLocalCache[i] {
Expand Down Expand Up @@ -301,7 +302,8 @@ func runAsync(task func()) {
}

func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRand *rand.Rand,
expirationJitterMaxSeconds int64, localCache *freecache.Cache, statsManager stats.Manager, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache {
expirationJitterMaxSeconds int64, localCache *freecache.Cache, statsManager stats.Manager, nearLimitRatio float32, cacheKeyPrefix string,
) limiter.RateLimitCache {
return &rateLimitMemcacheImpl{
client: client,
timeSource: timeSource,
Expand All @@ -314,7 +316,8 @@ func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRan
}

func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.TimeSource, jitterRand *rand.Rand,
localCache *freecache.Cache, scope gostats.Scope, statsManager stats.Manager) limiter.RateLimitCache {
localCache *freecache.Cache, scope gostats.Scope, statsManager stats.Manager,
) limiter.RateLimitCache {
return NewRateLimitCacheImpl(
CollectStats(newMemcacheFromSettings(s), scope.Scope("memcache")),
timeSource,
Expand Down
1 change: 0 additions & 1 deletion src/provider/cert_provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,6 @@ func (p *CertProvider) setupRuntime() {
p.rootStore.ScopeWithTags("certs", p.settings.ExtraTags),
&loader.DirectoryRefresher{},
loader.IgnoreDotFiles)

if err != nil {
logger.Fatalf("Failed to set up goruntime loader: %v", err)
}
Expand Down
3 changes: 2 additions & 1 deletion src/redis/driver_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,8 @@ func checkError(err error) {
}

func NewClientImpl(scope stats.Scope, useTls bool, auth, redisSocketType, redisType, url string, poolSize int,
pipelineWindow time.Duration, pipelineLimit int, tlsConfig *tls.Config, healthCheckActiveConnection bool, srv server.Server) Client {
pipelineWindow time.Duration, pipelineLimit int, tlsConfig *tls.Config, healthCheckActiveConnection bool, srv server.Server,
) Client {
maskedUrl := utils.MaskCredentialsInUrl(url)
logger.Warnf("connecting to redis on %s with pool size %d", maskedUrl, poolSize)

Expand Down
7 changes: 4 additions & 3 deletions src/redis/fixed_cache_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ func pipelineAppendtoGet(client Client, pipeline *Pipeline, key string, result *
func (this *fixedRateLimitCacheImpl) DoLimit(
ctx context.Context,
request *pb.RateLimitRequest,
limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus {

limits []*config.RateLimit,
) []*pb.RateLimitResponse_DescriptorStatus {
logger.Debugf("starting cache lookup")

// request.HitsAddend could be 0 (default value) if not specified by the caller in the RateLimit request.
Expand Down Expand Up @@ -218,7 +218,8 @@ func (this *fixedRateLimitCacheImpl) Flush() {}

func NewFixedRateLimitCacheImpl(client Client, perSecondClient Client, timeSource utils.TimeSource,
jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager,
stopCacheKeyIncrementWhenOverlimit bool) limiter.RateLimitCache {
stopCacheKeyIncrementWhenOverlimit bool,
) limiter.RateLimitCache {
return &fixedRateLimitCacheImpl{
client: client,
perSecondClient: perSecondClient,
Expand Down
16 changes: 8 additions & 8 deletions src/service/ratelimit.go
Original file line number Diff line number Diff line change
Expand Up @@ -177,8 +177,8 @@ func (this *service) constructLimitsToCheck(request *pb.RateLimitRequest, ctx co
const MaxUint32 = uint32(1<<32 - 1)

func (this *service) shouldRateLimitWorker(
ctx context.Context, request *pb.RateLimitRequest) *pb.RateLimitResponse {

ctx context.Context, request *pb.RateLimitRequest,
) *pb.RateLimitResponse {
checkServiceErr(request.Domain != "", "rate limit domain must not be empty")
checkServiceErr(len(request.Descriptors) != 0, "rate limit descriptor list must not be empty")

Expand Down Expand Up @@ -258,8 +258,8 @@ func (this *service) rateLimitRemainingHeader(descriptor *pb.RateLimitResponse_D
}

func (this *service) rateLimitResetHeader(
descriptor *pb.RateLimitResponse_DescriptorStatus) *core.HeaderValue {

descriptor *pb.RateLimitResponse_DescriptorStatus,
) *core.HeaderValue {
return &core.HeaderValue{
Key: this.customHeaderResetHeader,
Value: strconv.FormatInt(utils.CalculateReset(&descriptor.CurrentLimit.Unit, this.customHeaderClock).GetSeconds(), 10),
Expand All @@ -268,8 +268,8 @@ func (this *service) rateLimitResetHeader(

func (this *service) ShouldRateLimit(
ctx context.Context,
request *pb.RateLimitRequest) (finalResponse *pb.RateLimitResponse, finalError error) {

request *pb.RateLimitRequest,
) (finalResponse *pb.RateLimitResponse, finalError error) {
// Generate trace
_, span := tracer.Start(ctx, "ShouldRateLimit Execution",
trace.WithAttributes(
Expand Down Expand Up @@ -317,8 +317,8 @@ func (this *service) GetCurrentConfig() (config.RateLimitConfig, bool) {
}

func NewService(cache limiter.RateLimitCache, configProvider provider.RateLimitConfigProvider, statsManager stats.Manager,
health *server.HealthChecker, clock utils.TimeSource, shadowMode, forceStart bool, healthyWithAtLeastOneConfigLoad bool) RateLimitServiceServer {

health *server.HealthChecker, clock utils.TimeSource, shadowMode, forceStart bool, healthyWithAtLeastOneConfigLoad bool,
) RateLimitServiceServer {
newService := &service{
configLock: sync.RWMutex{},
configUpdateEvent: configProvider.ConfigUpdateEvent(),
Expand Down
3 changes: 2 additions & 1 deletion test/memcached/cache_impl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,8 @@ func TestMemcachedGetError(t *testing.T) {

func testLocalCacheStats(localCacheStats stats.StatGenerator, statsStore stats.Store, sink *common.TestStatSink,
expectedHitCount int, expectedMissCount int, expectedLookUpCount int, expectedExpiredCount int,
expectedEntryCount int) func(*testing.T) {
expectedEntryCount int,
) func(*testing.T) {
return func(t *testing.T) {
localCacheStats.GenerateStats()
statsStore.Flush()
Expand Down
3 changes: 2 additions & 1 deletion test/redis/fixed_cache_impl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,8 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) {

func testLocalCacheStats(localCacheStats gostats.StatGenerator, statsStore gostats.Store, sink *common.TestStatSink,
expectedHitCount int, expectedMissCount int, expectedLookUpCount int, expectedExpiredCount int,
expectedEntryCount int) func(*testing.T) {
expectedEntryCount int,
) func(*testing.T) {
return func(t *testing.T) {
localCacheStats.GenerateStats()
statsStore.Flush()
Expand Down
4 changes: 2 additions & 2 deletions test/server/server_impl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ func assertHttpResponse(t *testing.T,
requestBody string,
expectedStatusCode int,
expectedContentType string,
expectedResponseBody string) {

expectedResponseBody string,
) {
t.Helper()
assert := assert.New(t)

Expand Down

0 comments on commit 803b65c

Please sign in to comment.