diff --git a/xcache/cache.go b/xcache/cache.go deleted file mode 100644 index 7dad717..0000000 --- a/xcache/cache.go +++ /dev/null @@ -1,157 +0,0 @@ -package xcache - -import ( - "runtime" - "sync" - "time" -) - -const ( - // NoExpiration eg will never be deleted - NoExpiration time.Duration = -1 - // DefaultExpiration as configured with New() eg 5 minutes - DefaultExpiration time.Duration = 0 -) - -// Cache construct to control garbage collector -type Cache struct { - *cache -} - -type cache struct { - expiration time.Duration - items map[string]Item - mu sync.RWMutex - onEvicted func(string, interface{}) - janitor *janitor -} - -// Item is a generic interface holding the cache object -type Item struct { - Object interface{} - Expiration int64 -} - -// New returns a cache with a given default expiration and cleanup -// interval. If the expiration duration is less than one (or NoExpiration), -// the items in the cache never expire (by default) -func New(expiration, cleanupInterval time.Duration) *Cache { - - if expiration == 0 { - expiration = -1 - } - - c := &cache{ - expiration: expiration, - items: make(map[string]Item), - } - // trick ensures that the janitor routine does not keep - // C from being garbage collected. - // On garbage collection, the finalizer stops the janitor routine, - // and c will be collected. - C := &Cache{c} - if cleanupInterval > 0 { - runJanitor(c, cleanupInterval) - runtime.SetFinalizer(C, stopJanitor) - } - - return C -} - -// Set an item to the cache, replacing any existing item. If the duration is 0 -// (DefaultExpiration), the cache's default expiration time is used. If it is -1 -// (NoExpiration), the item never expires. -func (c *cache) Set(k string, x interface{}, d time.Duration) { - var e int64 - if d == DefaultExpiration { - d = c.expiration - } - if d > 0 { - e = time.Now().Add(d).UnixNano() - } - c.mu.Lock() - c.items[k] = Item{ - Object: x, - Expiration: e, - } - c.mu.Unlock() -} - -// Get an item from the cache or nil, and a bool indicating -// whether the key was found. -func (c *cache) Get(k string) (interface{}, bool) { - c.mu.RLock() - item, exists := c.items[k] - if !exists { - c.mu.RUnlock() - return nil, false - } - if item.Expiration > 0 { - if time.Now().UnixNano() > item.Expiration { - c.mu.RUnlock() - return nil, false - } - } - c.mu.RUnlock() - return item.Object, true -} - -// Copies all unexpired items in the cache into a new map and returns it. -func (c *cache) Items() map[string]Item { - c.mu.RLock() - defer c.mu.RUnlock() - m := make(map[string]Item, len(c.items)) - now := time.Now().UnixNano() - for k, v := range c.items { - // "Inlining" of Expired - if v.Expiration > 0 { - if now > v.Expiration { - continue - } - } - m[k] = v - } - return m -} - -// delete all expired items from the cache. -func (c *cache) deleteExpired() { - now := time.Now().UnixNano() - c.mu.Lock() - for k, v := range c.items { - if v.Expiration > 0 && now > v.Expiration { - c.Delete(k) - } - } - c.mu.Unlock() -} - -// Delete an item from the cache. Does nothing if the key is not in the cache. -func (c *cache) Delete(k string) { - c.mu.Lock() - v, evicted := c.delete(k) - c.mu.Unlock() - if evicted { - c.onEvicted(k, v) - } -} - -func (c *cache) delete(k string) (interface{}, bool) { - if c.onEvicted != nil { - if v, found := c.items[k]; found { - delete(c.items, k) - return v.Object, true - } - } - delete(c.items, k) - return nil, false -} - -// Sets an (optional) function that is called with the key and value when an -// item is evicted from the cache. (Including when it is deleted manually, but -// not when it is overwritten.) Set to nil to disable. -func (c *cache) OnEvicted(f func(string, interface{})) { - c.mu.Lock() - c.onEvicted = f - c.mu.Unlock() -} diff --git a/xcache/cache_test.go b/xcache/cache_test.go deleted file mode 100644 index 280dd0e..0000000 --- a/xcache/cache_test.go +++ /dev/null @@ -1,381 +0,0 @@ -package xcache - -import ( - "runtime" - "strconv" - "sync" - "testing" - "time" -) - -type TestStruct struct { - Num int - Children []*TestStruct -} - -func TestCache(t *testing.T) { - tc := New(DefaultExpiration, 0) - - a, found := tc.Get("a") - if found || a != nil { - t.Error("Getting A found value that shouldn't exist:", a) - } - - b, found := tc.Get("b") - if found || b != nil { - t.Error("Getting B found value that shouldn't exist:", b) - } - - c, found := tc.Get("c") - if found || c != nil { - t.Error("Getting C found value that shouldn't exist:", c) - } - - tc.Set("a", 1, DefaultExpiration) - tc.Set("b", "b", DefaultExpiration) - tc.Set("c", 3.5, DefaultExpiration) - - x, found := tc.Get("a") - if !found { - t.Error("a was not found while getting a2") - } - if x == nil { - t.Error("x for a is nil") - } else if a2 := x.(int); a2+2 != 3 { - t.Error("a2 (which should be 1) plus 2 does not equal 3; value:", a2) - } - - x, found = tc.Get("b") - if !found { - t.Error("b was not found while getting b2") - } - if x == nil { - t.Error("x for b is nil") - } else if b2 := x.(string); b2+"B" != "bB" { - t.Error("b2 (which should be b) plus B does not equal bB; value:", b2) - } - - x, found = tc.Get("c") - if !found { - t.Error("c was not found while getting c2") - } - if x == nil { - t.Error("x for c is nil") - } else if c2 := x.(float64); c2+1.2 != 4.7 { - t.Error("c2 (which should be 3.5) plus 1.2 does not equal 4.7; value:", c2) - } -} - -func TestDelete(t *testing.T) { - tc := New(DefaultExpiration, 0) - tc.Set("foo", "bar", DefaultExpiration) - tc.Delete("foo") - x, found := tc.Get("foo") - if found { - t.Error("foo was found, but it should have been deleted") - } - if x != nil { - t.Error("x is not nil:", x) - } -} - -func TestOnEvicted(t *testing.T) { - tc := New(DefaultExpiration, 0) - tc.Set("foo", 3, DefaultExpiration) - if tc.onEvicted != nil { - t.Fatal("tc.onEvicted is not nil") - } - works := false - tc.OnEvicted(func(k string, v interface{}) { - if k == "foo" && v.(int) == 3 { - works = true - } - tc.Set("bar", 4, DefaultExpiration) - }) - tc.Delete("foo") - x, _ := tc.Get("bar") - if !works { - t.Error("works bool not true") - } - if x.(int) != 4 { - t.Error("bar was not 4") - } -} - -func TestCacheTimes(t *testing.T) { - var found bool - - tc := New(50*time.Millisecond, 1*time.Millisecond) - tc.Set("a", 1, DefaultExpiration) - tc.Set("b", 2, NoExpiration) - tc.Set("c", 3, 20*time.Millisecond) - tc.Set("d", 4, 70*time.Millisecond) - - <-time.After(25 * time.Millisecond) - _, found = tc.Get("c") - if found { - t.Error("Found c when it should have been automatically deleted") - } - - <-time.After(30 * time.Millisecond) - _, found = tc.Get("a") - if found { - t.Error("Found a when it should have been automatically deleted") - } - - _, found = tc.Get("b") - if !found { - t.Error("Did not find b even though it was set to never expire") - } - - _, found = tc.Get("d") - if !found { - t.Error("Did not find d even though it was set to expire later than the default") - } - - <-time.After(20 * time.Millisecond) - _, found = tc.Get("d") - if found { - t.Error("Found d when it should have been automatically deleted (later than the default)") - } -} - -func TestStorePointerToStruct(t *testing.T) { - tc := New(DefaultExpiration, 0) - tc.Set("foo", &TestStruct{Num: 1}, DefaultExpiration) - x, found := tc.Get("foo") - if !found { - t.Fatal("*TestStruct was not found for foo") - } - foo := x.(*TestStruct) - foo.Num++ - - y, found := tc.Get("foo") - if !found { - t.Fatal("*TestStruct was not found for foo (second time)") - } - bar := y.(*TestStruct) - if bar.Num != 2 { - t.Fatal("TestStruct.Num is not 2") - } -} - -func BenchmarkCacheGetExpiring(b *testing.B) { - benchmarkCacheGet(b, 5*time.Minute) -} - -func BenchmarkCacheGetNotExpiring(b *testing.B) { - benchmarkCacheGet(b, NoExpiration) -} - -func benchmarkCacheGet(b *testing.B, exp time.Duration) { - b.StopTimer() - tc := New(exp, 0) - tc.Set("foo", "bar", DefaultExpiration) - b.StartTimer() - for i := 0; i < b.N; i++ { - tc.Get("foo") - } -} - -func BenchmarkRWMutexMapGet(b *testing.B) { - b.StopTimer() - m := map[string]string{ - "foo": "bar", - } - mu := sync.RWMutex{} - b.StartTimer() - for i := 0; i < b.N; i++ { - mu.RLock() - _, _ = m["foo"] - mu.RUnlock() - } -} - -func BenchmarkRWMutexInterfaceMapGetStruct(b *testing.B) { - b.StopTimer() - s := struct{ name string }{name: "foo"} - m := map[interface{}]string{ - s: "bar", - } - mu := sync.RWMutex{} - b.StartTimer() - for i := 0; i < b.N; i++ { - mu.RLock() - _, _ = m[s] - mu.RUnlock() - } -} - -func BenchmarkRWMutexInterfaceMapGetString(b *testing.B) { - b.StopTimer() - m := map[interface{}]string{ - "foo": "bar", - } - mu := sync.RWMutex{} - b.StartTimer() - for i := 0; i < b.N; i++ { - mu.RLock() - _, _ = m["foo"] - mu.RUnlock() - } -} - -func BenchmarkCacheGetConcurrentExpiring(b *testing.B) { - benchmarkCacheGetConcurrent(b, 5*time.Minute) -} - -func BenchmarkCacheGetConcurrentNotExpiring(b *testing.B) { - benchmarkCacheGetConcurrent(b, NoExpiration) -} - -func benchmarkCacheGetConcurrent(b *testing.B, exp time.Duration) { - b.StopTimer() - tc := New(exp, 0) - tc.Set("foo", "bar", DefaultExpiration) - wg := new(sync.WaitGroup) - workers := runtime.NumCPU() - each := b.N / workers - wg.Add(workers) - b.StartTimer() - for i := 0; i < workers; i++ { - go func() { - for j := 0; j < each; j++ { - tc.Get("foo") - } - wg.Done() - }() - } - wg.Wait() -} - -func BenchmarkRWMutexMapGetConcurrent(b *testing.B) { - b.StopTimer() - m := map[string]string{ - "foo": "bar", - } - mu := sync.RWMutex{} - wg := new(sync.WaitGroup) - workers := runtime.NumCPU() - each := b.N / workers - wg.Add(workers) - b.StartTimer() - for i := 0; i < workers; i++ { - go func() { - for j := 0; j < each; j++ { - mu.RLock() - _, _ = m["foo"] - mu.RUnlock() - } - wg.Done() - }() - } - wg.Wait() -} - -func BenchmarkCacheGetManyConcurrentExpiring(b *testing.B) { - benchmarkCacheGetManyConcurrent(b, 5*time.Minute) -} - -func BenchmarkCacheGetManyConcurrentNotExpiring(b *testing.B) { - benchmarkCacheGetManyConcurrent(b, NoExpiration) -} - -func benchmarkCacheGetManyConcurrent(b *testing.B, exp time.Duration) { - // This is the same as BenchmarkCacheGetConcurrent, but its result - // can be compared against BenchmarkShardedCacheGetManyConcurrent - // in sharded_test.go. - b.StopTimer() - n := 10000 - tc := New(exp, 0) - keys := make([]string, n) - for i := 0; i < n; i++ { - k := "foo" + strconv.Itoa(i) - keys[i] = k - tc.Set(k, "bar", DefaultExpiration) - } - each := b.N / n - wg := new(sync.WaitGroup) - wg.Add(n) - for _, v := range keys { - go func(k string) { - for j := 0; j < each; j++ { - tc.Get(k) - } - wg.Done() - }(v) - } - b.StartTimer() - wg.Wait() -} - -func BenchmarkCacheSetExpiring(b *testing.B) { - benchmarkCacheSet(b, 5*time.Minute) -} - -func BenchmarkCacheSetNotExpiring(b *testing.B) { - benchmarkCacheSet(b, NoExpiration) -} - -func benchmarkCacheSet(b *testing.B, exp time.Duration) { - b.StopTimer() - tc := New(exp, 0) - b.StartTimer() - for i := 0; i < b.N; i++ { - tc.Set("foo", "bar", DefaultExpiration) - } -} - -func BenchmarkRWMutexMapSet(b *testing.B) { - b.StopTimer() - m := map[string]string{} - mu := sync.RWMutex{} - b.StartTimer() - for i := 0; i < b.N; i++ { - mu.Lock() - m["foo"] = "bar" - mu.Unlock() - } -} - -func BenchmarkRWMutexMapSetDelete(b *testing.B) { - b.StopTimer() - m := map[string]string{} - mu := sync.RWMutex{} - b.StartTimer() - for i := 0; i < b.N; i++ { - mu.Lock() - m["foo"] = "bar" - mu.Unlock() - mu.Lock() - delete(m, "foo") - mu.Unlock() - } -} - -func BenchmarkRWMutexMapSetDeleteSingleLock(b *testing.B) { - b.StopTimer() - m := map[string]string{} - mu := sync.RWMutex{} - b.StartTimer() - for i := 0; i < b.N; i++ { - mu.Lock() - m["foo"] = "bar" - delete(m, "foo") - mu.Unlock() - } -} - -func BenchmarkDeleteExpiredLoop(b *testing.B) { - b.StopTimer() - tc := New(5*time.Minute, 0) - tc.mu.Lock() - for i := 0; i < 100000; i++ { - tc.Set(strconv.Itoa(i), "bar", DefaultExpiration) - } - tc.mu.Unlock() - b.StartTimer() - for i := 0; i < b.N; i++ { - tc.deleteExpired() - } -} diff --git a/xcache/janitor.go b/xcache/janitor.go deleted file mode 100644 index 306cb5a..0000000 --- a/xcache/janitor.go +++ /dev/null @@ -1,34 +0,0 @@ -package xcache - -import "time" - -// clean up loop -type janitor struct { - interval time.Duration - stop chan bool -} - -func (j *janitor) run(c *cache) { - ticker := time.NewTicker(j.interval) - for { - select { - case <-ticker.C: - c.deleteExpired() - case <-j.stop: - ticker.Stop() - return - } - } -} - -func runJanitor(c *cache, ci time.Duration) { - c.janitor = &janitor{ - interval: ci, - stop: make(chan bool), - } - go c.janitor.run(c) -} - -func stopJanitor(c *Cache) { - c.janitor.stop <- true -} diff --git a/xfile/copy.go b/xfile/copy.go new file mode 100644 index 0000000..c83171e --- /dev/null +++ b/xfile/copy.go @@ -0,0 +1,50 @@ +package fileutil + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +// Copy will copy the file from src to dst, the paths have to be absolute to +// ensure consistent behavior. +func Copy(src string, dst string) (err error) { + if !filepath.IsAbs(src) || !filepath.IsAbs(dst) { + return fmt.Errorf("can't copy src to dst paths not abosulte between src: %s and dst: %s", src, dst) + } + + srcStat, err := os.Stat(src) + if err != nil { + return errors.Wrap(err, "failed to copy file") + } + + if !srcStat.Mode().IsRegular() { + return fmt.Errorf("failed to copy file %s not a regular file", src) + } + + srcFile, err := os.Open(src) + if err != nil { + return errors.Wrap(err, "failed to open file to copy") + } + defer func() { + err = srcFile.Close() + }() + + dstFile, err := os.Create(dst) + if err != nil { + return errors.Wrapf(err, "failed to create file to copy to for %s", src) + } + defer func() { + err = dstFile.Close() + }() + + _, err = io.Copy(dstFile, srcFile) + if err != nil { + return errors.Wrapf(err, "failed to copy file src: %s dst: %s", src, dstFile.Name()) + } + + return err +} diff --git a/xfile/create.go b/xfile/create.go new file mode 100644 index 0000000..31dffd4 --- /dev/null +++ b/xfile/create.go @@ -0,0 +1,103 @@ +package fileutil + +import ( + "crypto/rand" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +// Specify the number of bytes you want to be different when create partial +// matching files. +const numOfDiffBytes = 4 + +// Create will create a file in a specific dir, for the specific size. The +// data inside of file is completely random. +func Create(dir string, size int) (fileName string, err error) { + tmpFile, err := ioutil.TempFile(dir, "") + if err != nil { + return "", errors.Wrapf(err, "failed to create tmp file inside of %s", dir) + } + defer func() { + err = tmpFile.Close() + }() + + var fileContent = make([]byte, size) + _, err = rand.Read(fileContent) + if err != nil { + return "", errors.Wrap(err, "failed to create random data for file") + } + + _, err = tmpFile.Write(fileContent) + if err != nil { + return "", errors.Wrapf(err, "failed to write random data to file %s", tmpFile.Name()) + } + + return tmpFile.Name(), err +} + +// useful for fuzzing + +// CreatePartialMatch will create two files that have the same size and the +// first few bytes but the final 4 bytes of the file are different. The 4 extra +// bytes are included into the specified size. +func CreatePartialMatch(dir string, size int) ([]string, error) { + if !filepath.IsAbs(dir) { + return nil, fmt.Errorf("cannot append to file, path %s is not absolute", dir) + } + + // Create the identical files. + originalFile, err := Create(dir, size-numOfDiffBytes) + if err != nil { + return nil, errors.Wrapf(err, "failed to create file inside %s", dir) + } + + var cpFile = fmt.Sprintf("%s_partial", originalFile) + + err = Copy(originalFile, cpFile) + if err != nil { + return nil, errors.Wrapf(err, "failed to copy file to %s", dir) + } + + err = appendToFile(originalFile, numOfDiffBytes) + if err != nil { + return nil, errors.Wrapf(err, "failed to append random data to %s", originalFile) + } + + err = appendToFile(cpFile, numOfDiffBytes) + if err != nil { + return nil, errors.Wrapf(err, "failed to append random data to %s", originalFile) + } + + return []string{originalFile, cpFile}, nil +} + +func appendToFile(path string, size int) (err error) { + if !filepath.IsAbs(path) { + return fmt.Errorf("cannot append to file, path is not absolute: %s", path) + } + + f, err := os.OpenFile(path, os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + return errors.Wrapf(err, "failed to open file %s", path) + } + defer func() { + err = f.Close() + }() + + var appendData = make([]byte, size) + + _, err = rand.Read(appendData) + if err != nil { + return errors.Wrapf(err, "failed to create random data for file") + } + + if _, err := f.Write(appendData); err != nil { + return errors.Wrapf(err, "failed to append data to file %s", path) + } + + return err +} diff --git a/xfile/create_test.go b/xfile/create_test.go new file mode 100644 index 0000000..81a17c8 --- /dev/null +++ b/xfile/create_test.go @@ -0,0 +1,124 @@ +package fileutil + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" +) + +func TestCreate_Successful(t *testing.T) { + tmpDir, cleanUpFn := createTmpDir(t) + defer cleanUpFn() + + wantSize := 10 + + gotName, err := Create(tmpDir, wantSize) + if err != nil { + t.Fatalf("Got unexpected error: %v", err) + } + + f, err := os.Stat(gotName) + if err != nil { + t.Fatalf("Got unexpected error: %v", err) + } + + if int64(wantSize) != f.Size() { + t.Fatalf("Create file size does not match want size, want: %d got: %d", wantSize, f.Size()) + } +} + +func TestCreate_PathShouldBeAbs(t *testing.T) { + tmpDir, cleanUpFn := createTmpDir(t) + defer cleanUpFn() + + _, err := Create(filepath.Base(tmpDir), 10) + + if err == nil { + t.Fatal("Expected error got nil") + } +} + +func TestCreatePartialMatch_Success(t *testing.T) { + tmpDir, cleanUpFn := createTmpDir(t) + defer cleanUpFn() + + wantSize := 10 + + files, err := CreatePartialMatch(tmpDir, 10) + if err != nil { + t.Fatalf("Got unexpected error: %v", err) + } + + if len(files) != 2 { + t.Fatalf("Got unexpected amount of files want: 2 got: %d", len(files)) + } + + // Compare the first bytes + f1, err := os.Open(files[0]) + if err != nil { + t.Fatalf("Got unexpected error: %v", err) + } + defer f1.Close() + f1FirstBytes := make([]byte, wantSize-numOfDiffBytes) + if _, err := io.ReadFull(f1, f1FirstBytes); err != nil { + t.Fatalf("Got unexpected error: %v", err) + } + + f2, err := os.Open(files[1]) + if err != nil { + t.Fatalf("Got unexpected error: %v", err) + } + defer f2.Close() + f2FirstBytes := make([]byte, wantSize-numOfDiffBytes) + if _, err := io.ReadFull(f2, f2FirstBytes); err != nil { + t.Fatalf("Got unexpected error: %v", err) + } + + if !reflect.DeepEqual(f1FirstBytes, f2FirstBytes) { + t.Fatalf("First bytes are not equal") + } + + // Compare the last bytes are different. + f1LastBytes := make([]byte, numOfDiffBytes) + _, err = f1.ReadAt(f1LastBytes, int64(wantSize-numOfDiffBytes)) + if err != nil { + t.Fatalf("Got unexpected error: %v", err) + } + + f2LastBytes := make([]byte, numOfDiffBytes) + _, err = f2.ReadAt(f2LastBytes, int64(wantSize-numOfDiffBytes)) + if err != nil { + t.Fatalf("Got unexpected error: %v", err) + } + + if reflect.DeepEqual(f1LastBytes, f2LastBytes) { + t.Fatalf("Last bytes should be different") + } +} + +func TestCreatePartialMatch_PathShouldBeAbs(t *testing.T) { + tmpDir, cleanUpFn := createTmpDir(t) + defer cleanUpFn() + + _, err := CreatePartialMatch(filepath.Base(tmpDir), 10) + if err == nil { + t.Fatalf("Expected error got nil") + } +} + +func createTmpDir(t *testing.T) (string, func()) { + tmpDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("Failed to create tmp dir for testdata: %v", err) + } + + return tmpDir, func() { + err := os.RemoveAll(tmpDir) + if err != nil { + t.Logf("Failed to clean up test %s: %v", t.Name(), err) + } + } +} diff --git a/xmiddleware/realip.go b/xmiddleware/realip.go index 84e68e7..916ba78 100644 --- a/xmiddleware/realip.go +++ b/xmiddleware/realip.go @@ -42,17 +42,17 @@ func RealIP(h http.Handler) http.Handler { func realIP(r *http.Request) string { var ip string - // https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/x-forwarded-headers.html - if xff := r.Header.Get(xForwardedFor); xff != "" { + if xcip := r.Header.Get(xClientIP); xcip != "" { + // different proxies set different headers + ip = xcip + } else if xff := r.Header.Get(xForwardedFor); xff != "" { + // https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/x-forwarded-headers.html // in case a transmission chain is encoded in x-forwarded-for to get the origin ip on the first position (fifo) i := strings.Index(xff, ", ") if i == -1 { i = len(xff) } ip = xff[:i] - } else if xcip := r.Header.Get(xClientIP); xcip != "" { - // different proxies set different headers - ip = xcip } else if xrip := r.Header.Get(xRealIP); xrip != "" { ip = xrip }