diff --git a/cache/redis_cache.go b/cache/redis_cache.go index 04df5097..f7f59c8f 100644 --- a/cache/redis_cache.go +++ b/cache/redis_cache.go @@ -34,8 +34,6 @@ const statsTimeout = 500 * time.Millisecond // result from redis in a temporary files before sending it to the http response const minTTLForRedisStreamingReader = 15 * time.Second -// tmpDir temporary path to store ongoing queries results -const tmpDir = "/tmp" const redisTmpFilePrefix = "chproxyRedisTmp" func newRedisCache(client redis.UniversalClient, cfg config.Cache) *redisCache { @@ -155,7 +153,7 @@ func (r *redisCache) readResultsAboveLimit(offset int, stringKey string, metadat // nb: it would be better to retry the flow if such a failure happened but this requires a huge refactoring of proxy.go if ttl <= minTTLForRedisStreamingReader { - fileStream, err := newFileWriterReader(tmpDir) + fileStream, err := newFileWriterReader(os.TempDir()) if err != nil { return nil, err } diff --git a/cache/redis_cache_test.go b/cache/redis_cache_test.go index bada949c..7bb42aa7 100644 --- a/cache/redis_cache_test.go +++ b/cache/redis_cache_test.go @@ -236,7 +236,7 @@ func TestSmallTTLOnBigPayloadAreCacheWithFile(t *testing.T) { //simulate a value almost expired redis.SetTTL(key.String(), 2*time.Second) - nbFileCacheBeforeGet, err := countFilesWithPrefix(tmpDir, redisTmpFilePrefix) + nbFileCacheBeforeGet, err := countFilesWithPrefix(os.TempDir(), redisTmpFilePrefix) if err != nil { t.Fatalf("could not read directory %s", err) } @@ -245,7 +245,7 @@ func TestSmallTTLOnBigPayloadAreCacheWithFile(t *testing.T) { if err != nil { t.Fatalf("expected cached to have the value") } - nbFileCacheAfterGet, err := countFilesWithPrefix(tmpDir, redisTmpFilePrefix) + nbFileCacheAfterGet, err := countFilesWithPrefix(os.TempDir(), redisTmpFilePrefix) if err != nil { t.Fatalf("could not read directory %s", err) } @@ -261,7 +261,7 @@ func TestSmallTTLOnBigPayloadAreCacheWithFile(t *testing.T) { t.Fatalf("got a value different than the expected one len(value)=%d vs len(expectedValue)=%d", len(string(cachedValue)), len(expectedValue)) } cachedData.Data.Close() - nbFileCacheAfterClose, err := countFilesWithPrefix(tmpDir, redisTmpFilePrefix) + nbFileCacheAfterClose, err := countFilesWithPrefix(os.TempDir(), redisTmpFilePrefix) if err != nil { t.Fatalf("could not read directory %s", err) } diff --git a/proxy.go b/proxy.go index 24940762..15ca73ea 100644 --- a/proxy.go +++ b/proxy.go @@ -10,6 +10,7 @@ import ( "net/http" "net/http/httputil" "net/url" + "os" "strconv" "strings" "sync" @@ -22,9 +23,6 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -// tmpDir temporary path to store ongoing queries results -const tmpDir = "/tmp" - // failedTransactionPrefix prefix added to the failed reason for concurrent queries registry const failedTransactionPrefix = "[concurrent query failed]" @@ -417,7 +415,7 @@ func (rp *reverseProxy) serveFromCache(s *scope, srw *statResponseWriter, req *h // The response wasn't found in the cache. // Request it from clickhouse. - tmpFileRespWriter, err := cache.NewTmpFileResponseWriter(srw, tmpDir) + tmpFileRespWriter, err := cache.NewTmpFileResponseWriter(srw, os.TempDir()) if err != nil { err = fmt.Errorf("%s: %w; query: %q", s, err, q) respondWith(srw, err, http.StatusInternalServerError)