From 23f4dc004b74bc642ff31670ee5967bd1a99d134 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 9 Jul 2024 12:27:42 +0200 Subject: [PATCH 01/22] ETCD-636: add automated backup side car --- pkg/cmd/backuprestore/backupnoconfig.go | 88 +++++++++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 pkg/cmd/backuprestore/backupnoconfig.go diff --git a/pkg/cmd/backuprestore/backupnoconfig.go b/pkg/cmd/backuprestore/backupnoconfig.go new file mode 100644 index 000000000..e5a9656ff --- /dev/null +++ b/pkg/cmd/backuprestore/backupnoconfig.go @@ -0,0 +1,88 @@ +package backuprestore + +import ( + "errors" + "fmt" + "io" + "os/exec" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "k8s.io/klog/v2" +) + +type backupNoConfig struct { + snapshotExist bool + backupOptions +} + +func NewBackupNoConfigCommand(errOut io.Writer) *cobra.Command { + backupNoConf := &backupNoConfig{ + snapshotExist: false, + backupOptions: backupOptions{errOut: errOut}, + } + cmd := &cobra.Command{ + Use: "cluster-backup-no-config", + Short: "Backs up a snapshot of etcd database and static pod resources without config", + Run: func(cmd *cobra.Command, args []string) { + must := func(fn func() error) { + if err := fn(); err != nil { + if cmd.HasParent() { + klog.Fatal(err) + } + fmt.Fprint(backupNoConf.errOut, err.Error()) + } + } + + must(backupNoConf.Validate) + must(backupNoConf.Run) + }, + } + backupNoConf.AddFlags(cmd.Flags()) + return cmd +} + +func (b *backupNoConfig) AddFlags(fs *pflag.FlagSet) { + b.backupOptions.AddFlags(fs) +} + +func (b *backupNoConfig) Validate() error { + return b.Validate() +} + +func (b *backupNoConfig) Run() error { + if !b.snapshotExist { + if err := backup(&b.backupOptions); err != nil { + klog.Errorf("run: backup failed: [%v]", err) + } + b.snapshotExist = true + klog.Infof("config-dir is: %s", b.configDir) + return nil + } + + if err := b.copySnapshot(); err != nil { + klog.Errorf("run: backup failed: [%v]", err) + } + + return nil +} + +func (b *backupNoConfig) copySnapshot() error { + if !b.snapshotExist { + klog.Errorf("run: backup failed: [%v]", errors.New("no snapshot file exists")) + } + + src := "/var/lib/etcd/member/snap" + dst := "/var/backup/etcd/snap" + if _, err := exec.Command("cp", getCpArgs(src, dst)...).CombinedOutput(); err != nil { + klog.Errorf("run: backup failed: [%v]", err) + } + + return nil +} + +func getCpArgs(src, dst string) []string { + return strings.Split(fmt.Sprintf("--verbose --recursive --preserve --reflink=auto %s %s", src, dst), " ") +} From 6dea950462fb2c70967cf968d93de10dff609559 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 9 Jul 2024 14:18:51 +0200 Subject: [PATCH 02/22] skip recurring backups upon default annotation --- hack/defaultbackupcr.yaml | 14 ++++++++++++++ pkg/cmd/backuprestore/backupnoconfig.go | 1 + .../periodicbackupcontroller.go | 6 ++++++ 3 files changed, 21 insertions(+) create mode 100644 hack/defaultbackupcr.yaml diff --git a/hack/defaultbackupcr.yaml b/hack/defaultbackupcr.yaml new file mode 100644 index 000000000..b755d0f81 --- /dev/null +++ b/hack/defaultbackupcr.yaml @@ -0,0 +1,14 @@ +apiVersion: config.openshift.io/v1alpha1 +kind: Backup +metadata: + name: default + annotations: + default: "true" +spec: + etcd: + schedule: "20 4 * * *" + timeZone: "UTC" + retentionPolicy: + retentionType: RetentionNumber + retentionNumber: + maxNumberOfBackups: 5 diff --git a/pkg/cmd/backuprestore/backupnoconfig.go b/pkg/cmd/backuprestore/backupnoconfig.go index e5a9656ff..8c44847c8 100644 --- a/pkg/cmd/backuprestore/backupnoconfig.go +++ b/pkg/cmd/backuprestore/backupnoconfig.go @@ -15,6 +15,7 @@ import ( type backupNoConfig struct { snapshotExist bool + retention string backupOptions } diff --git a/pkg/operator/periodicbackupcontroller/periodicbackupcontroller.go b/pkg/operator/periodicbackupcontroller/periodicbackupcontroller.go index 47002712a..a42e94201 100644 --- a/pkg/operator/periodicbackupcontroller/periodicbackupcontroller.go +++ b/pkg/operator/periodicbackupcontroller/periodicbackupcontroller.go @@ -3,6 +3,7 @@ package periodicbackupcontroller import ( "context" "fmt" + "slices" "time" backupv1alpha1 "github.com/openshift/api/config/v1alpha1" @@ -80,6 +81,11 @@ func (c *PeriodicBackupController) sync(ctx context.Context, _ factory.SyncConte return fmt.Errorf("PeriodicBackupController could not list backup CRDs, error was: %w", err) } + // ignore reconciliation of default backup + backups.Items = slices.DeleteFunc(backups.Items, func(b backupv1alpha1.Backup) bool { + return b.Name == "default" + }) + for _, item := range backups.Items { err := reconcileCronJob(ctx, cronJobsClient, item, c.operatorImagePullSpec) if err != nil { From 3834e36f22848afdd81f1f6f4cd449402f88ab57 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 9 Jul 2024 17:42:51 +0200 Subject: [PATCH 03/22] extract backup specs --- pkg/cmd/backuprestore/backupnoconfig.go | 52 ++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/pkg/cmd/backuprestore/backupnoconfig.go b/pkg/cmd/backuprestore/backupnoconfig.go index 8c44847c8..9d73d82ec 100644 --- a/pkg/cmd/backuprestore/backupnoconfig.go +++ b/pkg/cmd/backuprestore/backupnoconfig.go @@ -1,10 +1,16 @@ package backuprestore import ( + "context" "errors" "fmt" + backupv1alpha1 "github.com/openshift/api/config/v1alpha1" + configversionedclientv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" "io" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/clientcmd" "os/exec" + "slices" "strings" "github.com/spf13/cobra" @@ -14,8 +20,10 @@ import ( ) type backupNoConfig struct { + kubeConfig string snapshotExist bool - retention string + schedule string + retention backupv1alpha1.RetentionPolicy backupOptions } @@ -54,6 +62,10 @@ func (b *backupNoConfig) Validate() error { } func (b *backupNoConfig) Run() error { + if err := b.extractBackupConfigs(); err != nil { + return err + } + if !b.snapshotExist { if err := backup(&b.backupOptions); err != nil { klog.Errorf("run: backup failed: [%v]", err) @@ -70,6 +82,44 @@ func (b *backupNoConfig) Run() error { return nil } +func (b *backupNoConfig) extractBackupConfigs() error { + kubeConfig, err := clientcmd.BuildConfigFromFlags("", b.kubeConfig) + if err != nil { + bErr := fmt.Errorf("error loading kubeconfig: %v", err) + klog.Error(bErr) + return bErr + } + + backupsClient, err := configversionedclientv1alpha1.NewForConfig(kubeConfig) + if err != nil { + bErr := fmt.Errorf("error creating etcd backups client: %v", err) + klog.Error(bErr) + return bErr + } + + backups, err := backupsClient.Backups().List(context.Background(), v1.ListOptions{}) + if err != nil { + lErr := fmt.Errorf("could not list backup CRDs, error was: [%v]", err) + klog.Error(lErr) + return lErr + } + + idx := slices.IndexFunc(backups.Items, func(backup backupv1alpha1.Backup) bool { + return backup.Name == "default" + }) + if idx == -1 { + sErr := fmt.Errorf("could not find default backup CR, found [%v]", backups.Items) + klog.Error(sErr) + return sErr + } + + defaultBackupCR := backups.Items[idx] + b.schedule = defaultBackupCR.Spec.EtcdBackupSpec.Schedule + b.retention = defaultBackupCR.Spec.EtcdBackupSpec.RetentionPolicy + + return nil +} + func (b *backupNoConfig) copySnapshot() error { if !b.snapshotExist { klog.Errorf("run: backup failed: [%v]", errors.New("no snapshot file exists")) From 4f43535a13f601fc14ad8d2abb3ed7c1830f5e89 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 9 Jul 2024 17:48:32 +0200 Subject: [PATCH 04/22] split backup procedure --- pkg/cmd/backuprestore/backupnoconfig.go | 35 ++++++++++++++++--------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/pkg/cmd/backuprestore/backupnoconfig.go b/pkg/cmd/backuprestore/backupnoconfig.go index 9d73d82ec..9dd40c819 100644 --- a/pkg/cmd/backuprestore/backupnoconfig.go +++ b/pkg/cmd/backuprestore/backupnoconfig.go @@ -66,19 +66,6 @@ func (b *backupNoConfig) Run() error { return err } - if !b.snapshotExist { - if err := backup(&b.backupOptions); err != nil { - klog.Errorf("run: backup failed: [%v]", err) - } - b.snapshotExist = true - klog.Infof("config-dir is: %s", b.configDir) - return nil - } - - if err := b.copySnapshot(); err != nil { - klog.Errorf("run: backup failed: [%v]", err) - } - return nil } @@ -120,6 +107,28 @@ func (b *backupNoConfig) extractBackupConfigs() error { return nil } +func (b *backupNoConfig) backup() error { + // initially take backup using etcdctl + if !b.snapshotExist { + if err := backup(&b.backupOptions); err != nil { + klog.Errorf("run: backup failed: [%v]", err) + return err + } + b.snapshotExist = true + klog.Infof("config-dir is: %s", b.configDir) + return nil + } + + // only update the snapshot file + if err := b.copySnapshot(); err != nil { + sErr := fmt.Errorf("run: backup failed: [%v]", err) + klog.Error(sErr) + return sErr + } + + return nil +} + func (b *backupNoConfig) copySnapshot() error { if !b.snapshotExist { klog.Errorf("run: backup failed: [%v]", errors.New("no snapshot file exists")) From eb7ebe18827cf8638597c20349cab77feb24dae2 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 9 Jul 2024 18:30:15 +0200 Subject: [PATCH 05/22] update go mod sum --- go.mod | 12 +++++++----- go.sum | 27 ++++++++++++++++----------- 2 files changed, 23 insertions(+), 16 deletions(-) diff --git a/go.mod b/go.mod index ed6eff11e..55ae5696e 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/davecgh/go-spew v1.1.1 github.com/ghodss/yaml v1.0.0 github.com/go-bindata/go-bindata v3.1.2+incompatible + github.com/go-co-op/gocron/v2 v2.8.0 github.com/google/go-cmp v0.6.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/openshift/api v0.0.0-20240527133614-ba11c1587003 @@ -19,7 +20,7 @@ require ( github.com/prometheus/common v0.44.0 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/vishvananda/netlink v1.0.0 go.etcd.io/etcd/api/v3 v3.5.10 go.etcd.io/etcd/client/pkg/v3 v3.5.10 @@ -70,14 +71,14 @@ require ( github.com/google/cel-go v0.17.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/imdario/mergo v0.3.7 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jonboulle/clockwork v0.2.2 // indirect + github.com/jonboulle/clockwork v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -90,6 +91,7 @@ require ( github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect github.com/robfig/cron v1.2.0 // indirect + github.com/robfig/cron/v3 v3.0.1 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/soheilhy/cmux v0.1.5 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect @@ -111,10 +113,10 @@ require ( go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.21.0 // indirect - golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect golang.org/x/net v0.23.0 // indirect golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/sync v0.6.0 // indirect + golang.org/x/sync v0.7.0 // indirect golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect diff --git a/go.sum b/go.sum index 85c0e2e98..0c04ef7b1 100644 --- a/go.sum +++ b/go.sum @@ -111,6 +111,8 @@ github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-bindata/go-bindata v3.1.2+incompatible h1:5vjJMVhowQdPzjE1LdxyFF7YFTXg5IgGVW4gBr5IbvE= github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= +github.com/go-co-op/gocron/v2 v2.8.0 h1:nyD8u9nsuTaJx9z+Fem5/U+bEnKHzPNYxhrwB5gyYvU= +github.com/go-co-op/gocron/v2 v2.8.0/go.mod h1:xY7bJxGazKam1cz04EebrlP4S9q4iWdiAylMGP3jY9w= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -194,8 +196,8 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJY github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -238,8 +240,9 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= +github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= @@ -353,6 +356,8 @@ github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPH github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -401,8 +406,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -504,8 +509,8 @@ golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= +golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -565,8 +570,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -641,8 +646,8 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 5f88f3cfbea55c9c555828670b97fa473d16693a Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 9 Jul 2024 18:30:37 +0200 Subject: [PATCH 06/22] add scheduling --- pkg/cmd/backuprestore/backupnoconfig.go | 33 ++++++++++++++++++++----- 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/pkg/cmd/backuprestore/backupnoconfig.go b/pkg/cmd/backuprestore/backupnoconfig.go index 9dd40c819..44fc7350b 100644 --- a/pkg/cmd/backuprestore/backupnoconfig.go +++ b/pkg/cmd/backuprestore/backupnoconfig.go @@ -4,19 +4,21 @@ import ( "context" "errors" "fmt" - backupv1alpha1 "github.com/openshift/api/config/v1alpha1" - configversionedclientv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" "io" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/clientcmd" "os/exec" "slices" "strings" - "github.com/spf13/cobra" - "github.com/spf13/pflag" + backupv1alpha1 "github.com/openshift/api/config/v1alpha1" + configversionedclientv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" + + gcron "github.com/go-co-op/gocron/v2" + "github.com/spf13/cobra" + "github.com/spf13/pflag" ) type backupNoConfig struct { @@ -66,6 +68,7 @@ func (b *backupNoConfig) Run() error { return err } + go b.scheduleBackup() return nil } @@ -129,6 +132,24 @@ func (b *backupNoConfig) backup() error { return nil } +func (b *backupNoConfig) scheduleBackup() error { + s, _ := gcron.NewScheduler() + defer func() { _ = s.Shutdown() }() + + if _, err := s.NewJob( + gcron.CronJob( + b.schedule, + false, + ), + gcron.NewTask(b.backup()), + ); err != nil { + return err + } + + s.Start() + return nil +} + func (b *backupNoConfig) copySnapshot() error { if !b.snapshotExist { klog.Errorf("run: backup failed: [%v]", errors.New("no snapshot file exists")) From 0ae4b22dac76fc6d7f5008fdde741b1cd98d1b7c Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 9 Jul 2024 18:39:43 +0200 Subject: [PATCH 07/22] add vendor --- .../github.com/go-co-op/gocron/v2/.gitignore | 20 + .../go-co-op/gocron/v2/.golangci.yaml | 49 + .../gocron/v2/.pre-commit-config.yaml | 24 + .../go-co-op/gocron/v2/CODE_OF_CONDUCT.md | 73 ++ .../go-co-op/gocron/v2/CONTRIBUTING.md | 38 + vendor/github.com/go-co-op/gocron/v2/LICENSE | 21 + vendor/github.com/go-co-op/gocron/v2/Makefile | 22 + .../github.com/go-co-op/gocron/v2/README.md | 176 +++ .../github.com/go-co-op/gocron/v2/SECURITY.md | 16 + .../go-co-op/gocron/v2/distributed.go | 30 + .../github.com/go-co-op/gocron/v2/errors.go | 56 + .../github.com/go-co-op/gocron/v2/executor.go | 487 ++++++++ vendor/github.com/go-co-op/gocron/v2/job.go | 1042 +++++++++++++++++ .../github.com/go-co-op/gocron/v2/logger.go | 101 ++ .../github.com/go-co-op/gocron/v2/monitor.go | 27 + .../go-co-op/gocron/v2/scheduler.go | 861 ++++++++++++++ vendor/github.com/go-co-op/gocron/v2/util.go | 118 ++ vendor/github.com/google/uuid/.travis.yml | 9 - vendor/github.com/google/uuid/CHANGELOG.md | 41 + vendor/github.com/google/uuid/CONTRIBUTING.md | 16 + vendor/github.com/google/uuid/README.md | 10 +- vendor/github.com/google/uuid/hash.go | 6 + vendor/github.com/google/uuid/node_js.go | 2 +- vendor/github.com/google/uuid/time.go | 21 +- vendor/github.com/google/uuid/uuid.go | 89 +- vendor/github.com/google/uuid/version6.go | 56 + vendor/github.com/google/uuid/version7.go | 104 ++ .../github.com/jonboulle/clockwork/README.md | 4 +- .../jonboulle/clockwork/clockwork.go | 328 ++++-- .../github.com/jonboulle/clockwork/context.go | 25 + .../github.com/jonboulle/clockwork/ticker.go | 80 +- .../github.com/jonboulle/clockwork/timer.go | 53 + vendor/github.com/robfig/cron/v3/.gitignore | 22 + vendor/github.com/robfig/cron/v3/.travis.yml | 1 + vendor/github.com/robfig/cron/v3/LICENSE | 21 + vendor/github.com/robfig/cron/v3/README.md | 125 ++ vendor/github.com/robfig/cron/v3/chain.go | 92 ++ .../robfig/cron/v3/constantdelay.go | 27 + vendor/github.com/robfig/cron/v3/cron.go | 355 ++++++ vendor/github.com/robfig/cron/v3/doc.go | 231 ++++ vendor/github.com/robfig/cron/v3/logger.go | 86 ++ vendor/github.com/robfig/cron/v3/option.go | 45 + vendor/github.com/robfig/cron/v3/parser.go | 434 +++++++ vendor/github.com/robfig/cron/v3/spec.go | 188 +++ .../testify/assert/assertion_compare.go | 28 +- .../assert/assertion_compare_can_convert.go | 16 - .../assert/assertion_compare_legacy.go | 16 - .../testify/assert/assertion_format.go | 32 +- .../testify/assert/assertion_forward.go | 59 +- .../stretchr/testify/assert/assertions.go | 207 ++-- .../testify/assert/http_assertions.go | 27 +- .../stretchr/testify/require/require.go | 65 +- .../testify/require/require_forward.go | 59 +- vendor/golang.org/x/exp/maps/maps.go | 94 ++ vendor/golang.org/x/exp/slices/cmp.go | 44 + vendor/golang.org/x/exp/slices/slices.go | 435 +++++-- vendor/golang.org/x/exp/slices/sort.go | 172 ++- .../slices/{zsortfunc.go => zsortanyfunc.go} | 154 +-- .../golang.org/x/exp/slices/zsortordered.go | 34 +- vendor/modules.txt | 23 +- 60 files changed, 6516 insertions(+), 581 deletions(-) create mode 100644 vendor/github.com/go-co-op/gocron/v2/.gitignore create mode 100644 vendor/github.com/go-co-op/gocron/v2/.golangci.yaml create mode 100644 vendor/github.com/go-co-op/gocron/v2/.pre-commit-config.yaml create mode 100644 vendor/github.com/go-co-op/gocron/v2/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-co-op/gocron/v2/CONTRIBUTING.md create mode 100644 vendor/github.com/go-co-op/gocron/v2/LICENSE create mode 100644 vendor/github.com/go-co-op/gocron/v2/Makefile create mode 100644 vendor/github.com/go-co-op/gocron/v2/README.md create mode 100644 vendor/github.com/go-co-op/gocron/v2/SECURITY.md create mode 100644 vendor/github.com/go-co-op/gocron/v2/distributed.go create mode 100644 vendor/github.com/go-co-op/gocron/v2/errors.go create mode 100644 vendor/github.com/go-co-op/gocron/v2/executor.go create mode 100644 vendor/github.com/go-co-op/gocron/v2/job.go create mode 100644 vendor/github.com/go-co-op/gocron/v2/logger.go create mode 100644 vendor/github.com/go-co-op/gocron/v2/monitor.go create mode 100644 vendor/github.com/go-co-op/gocron/v2/scheduler.go create mode 100644 vendor/github.com/go-co-op/gocron/v2/util.go delete mode 100644 vendor/github.com/google/uuid/.travis.yml create mode 100644 vendor/github.com/google/uuid/CHANGELOG.md create mode 100644 vendor/github.com/google/uuid/version6.go create mode 100644 vendor/github.com/google/uuid/version7.go create mode 100644 vendor/github.com/jonboulle/clockwork/context.go create mode 100644 vendor/github.com/jonboulle/clockwork/timer.go create mode 100644 vendor/github.com/robfig/cron/v3/.gitignore create mode 100644 vendor/github.com/robfig/cron/v3/.travis.yml create mode 100644 vendor/github.com/robfig/cron/v3/LICENSE create mode 100644 vendor/github.com/robfig/cron/v3/README.md create mode 100644 vendor/github.com/robfig/cron/v3/chain.go create mode 100644 vendor/github.com/robfig/cron/v3/constantdelay.go create mode 100644 vendor/github.com/robfig/cron/v3/cron.go create mode 100644 vendor/github.com/robfig/cron/v3/doc.go create mode 100644 vendor/github.com/robfig/cron/v3/logger.go create mode 100644 vendor/github.com/robfig/cron/v3/option.go create mode 100644 vendor/github.com/robfig/cron/v3/parser.go create mode 100644 vendor/github.com/robfig/cron/v3/spec.go delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go create mode 100644 vendor/golang.org/x/exp/maps/maps.go create mode 100644 vendor/golang.org/x/exp/slices/cmp.go rename vendor/golang.org/x/exp/slices/{zsortfunc.go => zsortanyfunc.go} (64%) diff --git a/vendor/github.com/go-co-op/gocron/v2/.gitignore b/vendor/github.com/go-co-op/gocron/v2/.gitignore new file mode 100644 index 000000000..6657e3cb2 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/v2/.gitignore @@ -0,0 +1,20 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test +local_testing +coverage.out + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +vendor/ + +# IDE project files +.idea diff --git a/vendor/github.com/go-co-op/gocron/v2/.golangci.yaml b/vendor/github.com/go-co-op/gocron/v2/.golangci.yaml new file mode 100644 index 000000000..07878d85f --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/v2/.golangci.yaml @@ -0,0 +1,49 @@ +run: + timeout: 5m + issues-exit-code: 1 + tests: true + skip-dirs: + - local + +issues: + max-same-issues: 100 + include: + - EXC0012 + - EXC0014 + +linters: + enable: + - bodyclose + - exportloopref + - gofumpt + - goimports + - gosec + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - typecheck + - unused + - whitespace + +output: + # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" + format: colored-line-number + # print lines of code with issue, default is true + print-issued-lines: true + # print linter name in the end of issue text, default is true + print-linter-name: true + # make issues output unique by line, default is true + uniq-by-line: true + # add a prefix to the output file references; default is no prefix + path-prefix: "" + # sorts results by: filepath, line and column + sort-results: true + +linters-settings: + golint: + min-confidence: 0.8 + +fix: true diff --git a/vendor/github.com/go-co-op/gocron/v2/.pre-commit-config.yaml b/vendor/github.com/go-co-op/gocron/v2/.pre-commit-config.yaml new file mode 100644 index 000000000..99b237e39 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/v2/.pre-commit-config.yaml @@ -0,0 +1,24 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: check-added-large-files + - id: check-case-conflict + - id: check-merge-conflict + - id: check-yaml + - id: detect-private-key + - id: end-of-file-fixer + - id: trailing-whitespace + - repo: https://github.com/golangci/golangci-lint + rev: v1.55.2 + hooks: + - id: golangci-lint + - repo: https://github.com/TekWizely/pre-commit-golang + rev: v1.0.0-rc.1 + hooks: + - id: go-fumpt + args: + - -w + - id: go-mod-tidy diff --git a/vendor/github.com/go-co-op/gocron/v2/CODE_OF_CONDUCT.md b/vendor/github.com/go-co-op/gocron/v2/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..7d913b55b --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/v2/CODE_OF_CONDUCT.md @@ -0,0 +1,73 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone. And we mean everyone! + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and kind language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team initially on Slack to coordinate private communication. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/vendor/github.com/go-co-op/gocron/v2/CONTRIBUTING.md b/vendor/github.com/go-co-op/gocron/v2/CONTRIBUTING.md new file mode 100644 index 000000000..99e1e8809 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/v2/CONTRIBUTING.md @@ -0,0 +1,38 @@ +# Contributing to gocron + +Thank you for coming to contribute to gocron! We welcome new ideas, PRs and general feedback. + +## Reporting Bugs + +If you find a bug then please let the project know by opening an issue after doing the following: + +- Do a quick search of the existing issues to make sure the bug isn't already reported +- Try and make a minimal list of steps that can reliably reproduce the bug you are experiencing +- Collect as much information as you can to help identify what the issue is (project version, configuration files, etc) + +## Suggesting Enhancements + +If you have a use case that you don't see a way to support yet, we would welcome the feedback in an issue. Before opening the issue, please consider: + +- Is this a common use case? +- Is it simple to understand? + +You can help us out by doing the following before raising a new issue: + +- Check that the feature hasn't been requested already by searching existing issues +- Try and reduce your enhancement into a single, concise and deliverable request, rather than a general idea +- Explain your own use cases as the basis of the request + +## Adding Features + +Pull requests are always welcome. However, before going through the trouble of implementing a change it's worth creating a bug or feature request issue. +This allows us to discuss the changes and make sure they are a good fit for the project. + +Please always make sure a pull request has been: + +- Unit tested with `make test` +- Linted with `make lint` + +## Writing Tests + +Tests should follow the [table driven test pattern](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go). See other tests in the code base for additional examples. diff --git a/vendor/github.com/go-co-op/gocron/v2/LICENSE b/vendor/github.com/go-co-op/gocron/v2/LICENSE new file mode 100644 index 000000000..3357d57d7 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2014, 辣椒面 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-co-op/gocron/v2/Makefile b/vendor/github.com/go-co-op/gocron/v2/Makefile new file mode 100644 index 000000000..abaf708a9 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/v2/Makefile @@ -0,0 +1,22 @@ +.PHONY: fmt lint test mocks test_coverage test_ci + +GO_PKGS := $(shell go list -f {{.Dir}} ./...) + +fmt: + @go list -f {{.Dir}} ./... | xargs -I{} gofmt -w -s {} + +lint: + @grep "^func " example_test.go | sort -c + @golangci-lint run + +test: + @go test -race -v $(GO_FLAGS) -count=1 $(GO_PKGS) + +test_coverage: + @go test -race -v $(GO_FLAGS) -count=1 -coverprofile=coverage.out -covermode=atomic $(GO_PKGS) + +test_ci: + @TEST_ENV=ci go test -race -v $(GO_FLAGS) -count=1 $(GO_PKGS) + +mocks: + @go generate ./... diff --git a/vendor/github.com/go-co-op/gocron/v2/README.md b/vendor/github.com/go-co-op/gocron/v2/README.md new file mode 100644 index 000000000..4a1de758e --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/v2/README.md @@ -0,0 +1,176 @@ +# gocron: A Golang Job Scheduling Package + +[![CI State](https://github.com/go-co-op/gocron/actions/workflows/go_test.yml/badge.svg?branch=v2&event=push)](https://github.com/go-co-op/gocron/actions) +![Go Report Card](https://goreportcard.com/badge/github.com/go-co-op/gocron) [![Go Doc](https://godoc.org/github.com/go-co-op/gocron/v2?status.svg)](https://pkg.go.dev/github.com/go-co-op/gocron/v2) + +gocron is a job scheduling package which lets you run Go functions at pre-determined intervals. + +If you want to chat, you can find us on Slack at +[](https://gophers.slack.com/archives/CQ7T0T1FW) + +## Quick Start + +``` +go get github.com/go-co-op/gocron/v2 +``` + +```golang +package main + +import ( + "fmt" + "time" + + "github.com/go-co-op/gocron/v2" +) + +func main() { + // create a scheduler + s, err := gocron.NewScheduler() + if err != nil { + // handle error + } + + // add a job to the scheduler + j, err := s.NewJob( + gocron.DurationJob( + 10*time.Second, + ), + gocron.NewTask( + func(a string, b int) { + // do things + }, + "hello", + 1, + ), + ) + if err != nil { + // handle error + } + // each job has a unique id + fmt.Println(j.ID()) + + // start the scheduler + s.Start() + + // block until you are ready to shut down + select { + case <-time.After(time.Minute): + } + + // when you're done, shut it down + err = s.Shutdown() + if err != nil { + // handle error + } +} +``` + +## Examples + +- [Go doc examples](https://pkg.go.dev/github.com/go-co-op/gocron/v2#pkg-examples) +- [Examples directory](examples) + +## Concepts + +- **Job**: The job encapsulates a "task", which is made up of a go function and any function parameters. The Job then + provides the scheduler with the time the job should next be scheduled to run. +- **Scheduler**: The scheduler keeps track of all the jobs and sends each job to the executor when + it is ready to be run. +- **Executor**: The executor calls the job's task and manages the complexities of different job + execution timing requirements (e.g. singletons that shouldn't overrun each other, limiting the max number of jobs running) + + +## Features + +### Job types +Jobs can be run at various intervals. +- [**Duration**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#DurationJob): +Jobs can be run at a fixed `time.Duration`. +- [**Random duration**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#DurationRandomJob): +Jobs can be run at a random `time.Duration` between a min and max. +- [**Cron**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#CronJob): +Jobs can be run using a crontab. +- [**Daily**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#DailyJob): +Jobs can be run every x days at specific times. +- [**Weekly**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#WeeklyJob): +Jobs can be run every x weeks on specific days of the week and at specific times. +- [**Monthly**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#MonthlyJob): +Jobs can be run every x months on specific days of the month and at specific times. +- [**One time**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#OneTimeJob): +Jobs can be run at specific time(s) (either once or many times). + +### Concurrency Limits +Jobs can be limited individually or across the entire scheduler. +- [**Per job limiting with singleton mode**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#WithSingletonMode): +Jobs can be limited to a single concurrent execution that either reschedules (skips overlapping executions) +or queues (waits for the previous execution to finish). +- [**Per scheduler limiting with limit mode**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#WithLimitConcurrentJobs): +Jobs can be limited to a certain number of concurrent executions across the entire scheduler +using either reschedule (skip when the limit is met) or queue (jobs are added to a queue to +wait for the limit to be available). +- **Note:** A scheduler limit and a job limit can both be enabled. + +### Distributed instances of gocron +Multiple instances of gocron can be run. +- [**Elector**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#WithDistributedElector): +An elector can be used to elect a single instance of gocron to run as the primary with the +other instances checking to see if a new leader needs to be elected. + - Implementations: [go-co-op electors](https://github.com/go-co-op?q=-elector&type=all&language=&sort=) + (don't see what you need? request on slack to get a repo created to contribute it!) +- [**Locker**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#WithDistributedLocker): +A locker can be used to lock each run of a job to a single instance of gocron. +Locker can be at job or scheduler, if it is defined both at job and scheduler then locker of job will take precedence. + - Implementations: [go-co-op lockers](https://github.com/go-co-op?q=-lock&type=all&language=&sort=) + (don't see what you need? request on slack to get a repo created to contribute it!) + +### Events +Job events can trigger actions. +- [**Listeners**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#WithEventListeners): +Can be added to a job, with [event listeners](https://pkg.go.dev/github.com/go-co-op/gocron/v2#EventListener), +or all jobs across the +[scheduler](https://pkg.go.dev/github.com/go-co-op/gocron/v2#WithGlobalJobOptions) +to listen for job events and trigger actions. + +### Options +Many job and scheduler options are available. +- [**Job options**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#JobOption): +Job options can be set when creating a job using `NewJob`. +- [**Global job options**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#WithGlobalJobOptions): +Global job options can be set when creating a scheduler using `NewScheduler` +and the `WithGlobalJobOptions` option. +- [**Scheduler options**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#SchedulerOption): +Scheduler options can be set when creating a scheduler using `NewScheduler`. + +### Logging +Logs can be enabled. +- [Logger](https://pkg.go.dev/github.com/go-co-op/gocron/v2#Logger): +The Logger interface can be implemented with your desired logging library. +The provided NewLogger uses the standard library's log package. + +### Metrics +Metrics may be collected from the execution of each job. +- [**Monitor**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#Monitor): +A monitor can be used to collect metrics for each job from a scheduler. + - Implementations: [go-co-op monitors](https://github.com/go-co-op?q=-monitor&type=all&language=&sort=) + (don't see what you need? request on slack to get a repo created to contribute it!) + +### Testing +The gocron library is set up to enable testing. +- Mocks are provided in [the mock package](mocks) using [gomock](https://github.com/uber-go/mock). +- Time can be mocked by passing in a [FakeClock](https://pkg.go.dev/github.com/jonboulle/clockwork#FakeClock) +to [WithClock](https://pkg.go.dev/github.com/go-co-op/gocron/v2#WithClock) - +see the [example on WithClock](https://pkg.go.dev/github.com/go-co-op/gocron/v2#example-WithClock). + +## Supporters + +We appreciate the support for free and open source software! + +This project is supported by: + +- [Jetbrains](https://www.jetbrains.com/?from=gocron) +- [Sentry](https://sentry.io/welcome/) + +## Star History + +[![Star History Chart](https://api.star-history.com/svg?repos=go-co-op/gocron&type=Date)](https://star-history.com/#go-co-op/gocron&Date) diff --git a/vendor/github.com/go-co-op/gocron/v2/SECURITY.md b/vendor/github.com/go-co-op/gocron/v2/SECURITY.md new file mode 100644 index 000000000..654a08550 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/v2/SECURITY.md @@ -0,0 +1,16 @@ +# Security Policy + +## Supported Versions + +The current plan is to maintain version 2 as long as possible incorporating any necessary security patches. Version 1 is deprecated and will no longer be patched. + +| Version | Supported | +| ------- | ------------------ | +| 1.x.x | :heavy_multiplication_x: | +| 2.x.x | :white_check_mark: | + +## Reporting a Vulnerability + +Vulnerabilities can be reported by [opening an issue](https://github.com/go-co-op/gocron/issues/new/choose) or reaching out on Slack: [](https://gophers.slack.com/archives/CQ7T0T1FW) + +We will do our best to addrerss any vulnerabilities in an expeditious manner. diff --git a/vendor/github.com/go-co-op/gocron/v2/distributed.go b/vendor/github.com/go-co-op/gocron/v2/distributed.go new file mode 100644 index 000000000..1617c6211 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/v2/distributed.go @@ -0,0 +1,30 @@ +//go:generate mockgen -destination=mocks/distributed.go -package=gocronmocks . Elector,Locker,Lock +package gocron + +import ( + "context" +) + +// Elector determines the leader from instances asking to be the leader. Only +// the leader runs jobs. If the leader goes down, a new leader will be elected. +type Elector interface { + // IsLeader should return nil if the job should be scheduled by the instance + // making the request and an error if the job should not be scheduled. + IsLeader(context.Context) error +} + +// Locker represents the required interface to lock jobs when running multiple schedulers. +// The lock is held for the duration of the job's run, and it is expected that the +// locker implementation handles time splay between schedulers. +// The lock key passed is the job's name - which, if not set, defaults to the +// go function's name, e.g. "pkg.myJob" for func myJob() {} in pkg +type Locker interface { + // Lock if an error is returned by lock, the job will not be scheduled. + Lock(ctx context.Context, key string) (Lock, error) +} + +// Lock represents an obtained lock. The lock is released after the execution of the job +// by the scheduler. +type Lock interface { + Unlock(ctx context.Context) error +} diff --git a/vendor/github.com/go-co-op/gocron/v2/errors.go b/vendor/github.com/go-co-op/gocron/v2/errors.go new file mode 100644 index 000000000..53df01b1c --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/v2/errors.go @@ -0,0 +1,56 @@ +package gocron + +import "fmt" + +// Public error definitions +var ( + ErrCronJobParse = fmt.Errorf("gocron: CronJob: crontab parse failure") + ErrDailyJobAtTimeNil = fmt.Errorf("gocron: DailyJob: atTime within atTimes must not be nil") + ErrDailyJobAtTimesNil = fmt.Errorf("gocron: DailyJob: atTimes must not be nil") + ErrDailyJobHours = fmt.Errorf("gocron: DailyJob: atTimes hours must be between 0 and 23 inclusive") + ErrDailyJobMinutesSeconds = fmt.Errorf("gocron: DailyJob: atTimes minutes and seconds must be between 0 and 59 inclusive") + ErrDurationJobIntervalZero = fmt.Errorf("gocron: DurationJob: time interval is 0") + ErrDurationRandomJobMinMax = fmt.Errorf("gocron: DurationRandomJob: minimum duration must be less than maximum duration") + ErrEventListenerFuncNil = fmt.Errorf("gocron: eventListenerFunc must not be nil") + ErrJobNotFound = fmt.Errorf("gocron: job not found") + ErrJobRunNowFailed = fmt.Errorf("gocron: Job: RunNow: scheduler unreachable") + ErrMonthlyJobDays = fmt.Errorf("gocron: MonthlyJob: daysOfTheMonth must be between 31 and -31 inclusive, and not 0") + ErrMonthlyJobAtTimeNil = fmt.Errorf("gocron: MonthlyJob: atTime within atTimes must not be nil") + ErrMonthlyJobAtTimesNil = fmt.Errorf("gocron: MonthlyJob: atTimes must not be nil") + ErrMonthlyJobDaysNil = fmt.Errorf("gocron: MonthlyJob: daysOfTheMonth must not be nil") + ErrMonthlyJobHours = fmt.Errorf("gocron: MonthlyJob: atTimes hours must be between 0 and 23 inclusive") + ErrMonthlyJobMinutesSeconds = fmt.Errorf("gocron: MonthlyJob: atTimes minutes and seconds must be between 0 and 59 inclusive") + ErrNewJobTaskNil = fmt.Errorf("gocron: NewJob: Task must not be nil") + ErrNewJobTaskNotFunc = fmt.Errorf("gocron: NewJob: Task.Function must be of kind reflect.Func") + ErrNewJobWrongNumberOfParameters = fmt.Errorf("gocron: NewJob: Number of provided parameters does not match expected") + ErrNewJobWrongTypeOfParameters = fmt.Errorf("gocron: NewJob: Type of provided parameters does not match expected") + ErrOneTimeJobStartDateTimePast = fmt.Errorf("gocron: OneTimeJob: start must not be in the past") + ErrStopExecutorTimedOut = fmt.Errorf("gocron: timed out waiting for executor to stop") + ErrStopJobsTimedOut = fmt.Errorf("gocron: timed out waiting for jobs to finish") + ErrStopSchedulerTimedOut = fmt.Errorf("gocron: timed out waiting for scheduler to stop") + ErrWeeklyJobAtTimeNil = fmt.Errorf("gocron: WeeklyJob: atTime within atTimes must not be nil") + ErrWeeklyJobAtTimesNil = fmt.Errorf("gocron: WeeklyJob: atTimes must not be nil") + ErrWeeklyJobDaysOfTheWeekNil = fmt.Errorf("gocron: WeeklyJob: daysOfTheWeek must not be nil") + ErrWeeklyJobHours = fmt.Errorf("gocron: WeeklyJob: atTimes hours must be between 0 and 23 inclusive") + ErrWeeklyJobMinutesSeconds = fmt.Errorf("gocron: WeeklyJob: atTimes minutes and seconds must be between 0 and 59 inclusive") + ErrPanicRecovered = fmt.Errorf("gocron: panic recovered") + ErrWithClockNil = fmt.Errorf("gocron: WithClock: clock must not be nil") + ErrWithDistributedElectorNil = fmt.Errorf("gocron: WithDistributedElector: elector must not be nil") + ErrWithDistributedLockerNil = fmt.Errorf("gocron: WithDistributedLocker: locker must not be nil") + ErrWithDistributedJobLockerNil = fmt.Errorf("gocron: WithDistributedJobLocker: locker must not be nil") + ErrWithLimitConcurrentJobsZero = fmt.Errorf("gocron: WithLimitConcurrentJobs: limit must be greater than 0") + ErrWithLocationNil = fmt.Errorf("gocron: WithLocation: location must not be nil") + ErrWithLoggerNil = fmt.Errorf("gocron: WithLogger: logger must not be nil") + ErrWithMonitorNil = fmt.Errorf("gocron: WithMonitor: monitor must not be nil") + ErrWithNameEmpty = fmt.Errorf("gocron: WithName: name must not be empty") + ErrWithStartDateTimePast = fmt.Errorf("gocron: WithStartDateTime: start must not be in the past") + ErrWithStopTimeoutZeroOrNegative = fmt.Errorf("gocron: WithStopTimeout: timeout must be greater than 0") +) + +// internal errors +var ( + errAtTimeNil = fmt.Errorf("errAtTimeNil") + errAtTimesNil = fmt.Errorf("errAtTimesNil") + errAtTimeHours = fmt.Errorf("errAtTimeHours") + errAtTimeMinSec = fmt.Errorf("errAtTimeMinSec") +) diff --git a/vendor/github.com/go-co-op/gocron/v2/executor.go b/vendor/github.com/go-co-op/gocron/v2/executor.go new file mode 100644 index 000000000..f3661970a --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/v2/executor.go @@ -0,0 +1,487 @@ +package gocron + +import ( + "context" + "fmt" + "strconv" + "sync" + "time" + + "github.com/google/uuid" +) + +type executor struct { + ctx context.Context + cancel context.CancelFunc + logger Logger + stopCh chan struct{} + jobsIn chan jobIn + jobsOutForRescheduling chan uuid.UUID + jobsOutCompleted chan uuid.UUID + jobOutRequest chan jobOutRequest + stopTimeout time.Duration + done chan error + singletonRunners *sync.Map // map[uuid.UUID]singletonRunner + limitMode *limitModeConfig + elector Elector + locker Locker + monitor Monitor +} + +type jobIn struct { + id uuid.UUID + shouldSendOut bool +} + +type singletonRunner struct { + in chan jobIn + rescheduleLimiter chan struct{} +} + +type limitModeConfig struct { + started bool + mode LimitMode + limit uint + rescheduleLimiter chan struct{} + in chan jobIn + // singletonJobs is used to track singleton jobs that are running + // in the limit mode runner. This is used to prevent the same job + // from running multiple times across limit mode runners when both + // a limit mode and singleton mode are enabled. + singletonJobs map[uuid.UUID]struct{} + singletonJobsMu sync.Mutex +} + +func (e *executor) start() { + e.logger.Debug("gocron: executor started") + + // creating the executor's context here as the executor + // is the only goroutine that should access this context + // any other uses within the executor should create a context + // using the executor context as parent. + e.ctx, e.cancel = context.WithCancel(context.Background()) + + // the standardJobsWg tracks + standardJobsWg := &waitGroupWithMutex{} + + singletonJobsWg := &waitGroupWithMutex{} + + limitModeJobsWg := &waitGroupWithMutex{} + + // create a fresh map for tracking singleton runners + e.singletonRunners = &sync.Map{} + + // start the for leap that is the executor + // selecting on channels for work to do + for { + select { + // job ids in are sent from 1 of 2 places: + // 1. the scheduler sends directly when jobs + // are run immediately. + // 2. sent from time.AfterFuncs in which job schedules + // are spun up by the scheduler + case jIn := <-e.jobsIn: + select { + case <-e.stopCh: + e.stop(standardJobsWg, singletonJobsWg, limitModeJobsWg) + return + default: + } + // this context is used to handle cancellation of the executor + // on requests for a job to the scheduler via requestJobCtx + ctx, cancel := context.WithCancel(e.ctx) + + if e.limitMode != nil && !e.limitMode.started { + // check if we are already running the limit mode runners + // if not, spin up the required number i.e. limit! + e.limitMode.started = true + for i := e.limitMode.limit; i > 0; i-- { + limitModeJobsWg.Add(1) + go e.limitModeRunner("limitMode-"+strconv.Itoa(int(i)), e.limitMode.in, limitModeJobsWg, e.limitMode.mode, e.limitMode.rescheduleLimiter) + } + } + + // spin off into a goroutine to unblock the executor and + // allow for processing for more work + go func() { + // make sure to cancel the above context per the docs + // // Canceling this context releases resources associated with it, so code should + // // call cancel as soon as the operations running in this Context complete. + defer cancel() + + // check for limit mode - this spins up a separate runner which handles + // limiting the total number of concurrently running jobs + if e.limitMode != nil { + if e.limitMode.mode == LimitModeReschedule { + select { + // rescheduleLimiter is a channel the size of the limit + // this blocks publishing to the channel and keeps + // the executor from building up a waiting queue + // and forces rescheduling + case e.limitMode.rescheduleLimiter <- struct{}{}: + e.limitMode.in <- jIn + default: + // all runners are busy, reschedule the work for later + // which means we just skip it here and do nothing + // TODO when metrics are added, this should increment a rescheduled metric + e.sendOutForRescheduling(&jIn) + } + } else { + // since we're not using LimitModeReschedule, but instead using LimitModeWait + // we do want to queue up the work to the limit mode runners and allow them + // to work through the channel backlog. A hard limit of 1000 is in place + // at which point this call would block. + // TODO when metrics are added, this should increment a wait metric + e.sendOutForRescheduling(&jIn) + e.limitMode.in <- jIn + } + } else { + // no limit mode, so we're either running a regular job or + // a job with a singleton mode + // + // get the job, so we can figure out what kind it is and how + // to execute it + j := requestJobCtx(ctx, jIn.id, e.jobOutRequest) + if j == nil { + // safety check as it'd be strange bug if this occurred + return + } + if j.singletonMode { + // for singleton mode, get the existing runner for the job + // or spin up a new one + runner := &singletonRunner{} + runnerSrc, ok := e.singletonRunners.Load(jIn.id) + if !ok { + runner.in = make(chan jobIn, 1000) + if j.singletonLimitMode == LimitModeReschedule { + runner.rescheduleLimiter = make(chan struct{}, 1) + } + e.singletonRunners.Store(jIn.id, runner) + singletonJobsWg.Add(1) + go e.singletonModeRunner("singleton-"+jIn.id.String(), runner.in, singletonJobsWg, j.singletonLimitMode, runner.rescheduleLimiter) + } else { + runner = runnerSrc.(*singletonRunner) + } + + if j.singletonLimitMode == LimitModeReschedule { + // reschedule mode uses the limiter channel to check + // for a running job and reschedules if the channel is full. + select { + case runner.rescheduleLimiter <- struct{}{}: + runner.in <- jIn + e.sendOutForRescheduling(&jIn) + default: + // runner is busy, reschedule the work for later + // which means we just skip it here and do nothing + // TODO when metrics are added, this should increment a rescheduled metric + e.sendOutForRescheduling(&jIn) + } + } else { + // wait mode, fill up that queue (buffered channel, so it's ok) + runner.in <- jIn + e.sendOutForRescheduling(&jIn) + } + } else { + select { + case <-e.stopCh: + e.stop(standardJobsWg, singletonJobsWg, limitModeJobsWg) + return + default: + } + // we've gotten to the basic / standard jobs -- + // the ones without anything special that just want + // to be run. Add to the WaitGroup so that + // stopping or shutting down can wait for the jobs to + // complete. + standardJobsWg.Add(1) + go func(j internalJob) { + e.runJob(j, jIn) + standardJobsWg.Done() + }(*j) + } + } + }() + case <-e.stopCh: + e.stop(standardJobsWg, singletonJobsWg, limitModeJobsWg) + return + } + } +} + +func (e *executor) sendOutForRescheduling(jIn *jobIn) { + if jIn.shouldSendOut { + select { + case e.jobsOutForRescheduling <- jIn.id: + case <-e.ctx.Done(): + return + } + } + // we need to set this to false now, because to handle + // non-limit jobs, we send out from the e.runJob function + // and in this case we don't want to send out twice. + jIn.shouldSendOut = false +} + +func (e *executor) limitModeRunner(name string, in chan jobIn, wg *waitGroupWithMutex, limitMode LimitMode, rescheduleLimiter chan struct{}) { + e.logger.Debug("gocron: limitModeRunner starting", "name", name) + for { + select { + case jIn := <-in: + select { + case <-e.ctx.Done(): + e.logger.Debug("gocron: limitModeRunner shutting down", "name", name) + wg.Done() + return + default: + } + + ctx, cancel := context.WithCancel(e.ctx) + j := requestJobCtx(ctx, jIn.id, e.jobOutRequest) + cancel() + if j != nil { + if j.singletonMode { + e.limitMode.singletonJobsMu.Lock() + _, ok := e.limitMode.singletonJobs[jIn.id] + if ok { + // this job is already running, so don't run it + // but instead reschedule it + e.limitMode.singletonJobsMu.Unlock() + if jIn.shouldSendOut { + select { + case <-e.ctx.Done(): + return + case <-j.ctx.Done(): + return + case e.jobsOutForRescheduling <- j.id: + } + } + // remove the limiter block, as this particular job + // was a singleton already running, and we want to + // allow another job to be scheduled + if limitMode == LimitModeReschedule { + <-rescheduleLimiter + } + continue + } + e.limitMode.singletonJobs[jIn.id] = struct{}{} + e.limitMode.singletonJobsMu.Unlock() + } + e.runJob(*j, jIn) + + if j.singletonMode { + e.limitMode.singletonJobsMu.Lock() + delete(e.limitMode.singletonJobs, jIn.id) + e.limitMode.singletonJobsMu.Unlock() + } + } + + // remove the limiter block to allow another job to be scheduled + if limitMode == LimitModeReschedule { + <-rescheduleLimiter + } + case <-e.ctx.Done(): + e.logger.Debug("limitModeRunner shutting down", "name", name) + wg.Done() + return + } + } +} + +func (e *executor) singletonModeRunner(name string, in chan jobIn, wg *waitGroupWithMutex, limitMode LimitMode, rescheduleLimiter chan struct{}) { + e.logger.Debug("gocron: singletonModeRunner starting", "name", name) + for { + select { + case jIn := <-in: + select { + case <-e.ctx.Done(): + e.logger.Debug("gocron: singletonModeRunner shutting down", "name", name) + wg.Done() + return + default: + } + + ctx, cancel := context.WithCancel(e.ctx) + j := requestJobCtx(ctx, jIn.id, e.jobOutRequest) + cancel() + if j != nil { + // need to set shouldSendOut = false here, as there is a duplicative call to sendOutForRescheduling + // inside the runJob function that needs to be skipped. sendOutForRescheduling is previously called + // when the job is sent to the singleton mode runner. + jIn.shouldSendOut = false + e.runJob(*j, jIn) + } + + // remove the limiter block to allow another job to be scheduled + if limitMode == LimitModeReschedule { + <-rescheduleLimiter + } + case <-e.ctx.Done(): + e.logger.Debug("singletonModeRunner shutting down", "name", name) + wg.Done() + return + } + } +} + +func (e *executor) runJob(j internalJob, jIn jobIn) { + if j.ctx == nil { + return + } + select { + case <-e.ctx.Done(): + return + case <-j.ctx.Done(): + return + default: + } + + if e.elector != nil { + if err := e.elector.IsLeader(j.ctx); err != nil { + e.sendOutForRescheduling(&jIn) + e.incrementJobCounter(j, Skip) + return + } + } else if j.locker != nil { + lock, err := j.locker.Lock(j.ctx, j.name) + if err != nil { + _ = callJobFuncWithParams(j.afterLockError, j.id, j.name, err) + e.sendOutForRescheduling(&jIn) + e.incrementJobCounter(j, Skip) + return + } + defer func() { _ = lock.Unlock(j.ctx) }() + } else if e.locker != nil { + lock, err := e.locker.Lock(j.ctx, j.name) + if err != nil { + _ = callJobFuncWithParams(j.afterLockError, j.id, j.name, err) + e.sendOutForRescheduling(&jIn) + e.incrementJobCounter(j, Skip) + return + } + defer func() { _ = lock.Unlock(j.ctx) }() + } + _ = callJobFuncWithParams(j.beforeJobRuns, j.id, j.name) + + e.sendOutForRescheduling(&jIn) + select { + case e.jobsOutCompleted <- j.id: + case <-e.ctx.Done(): + } + + startTime := time.Now() + err := e.callJobWithRecover(j) + if e.monitor != nil { + e.monitor.RecordJobTiming(startTime, time.Now(), j.id, j.name, j.tags) + } + if err != nil { + _ = callJobFuncWithParams(j.afterJobRunsWithError, j.id, j.name, err) + e.incrementJobCounter(j, Fail) + } else { + _ = callJobFuncWithParams(j.afterJobRuns, j.id, j.name) + e.incrementJobCounter(j, Success) + } +} + +func (e *executor) callJobWithRecover(j internalJob) (err error) { + defer func() { + if recoverData := recover(); recoverData != nil { + _ = callJobFuncWithParams(j.afterJobRunsWithPanic, j.id, j.name, recoverData) + + // if panic is occurred, we should return an error + err = fmt.Errorf("%w from %v", ErrPanicRecovered, recoverData) + } + }() + + return callJobFuncWithParams(j.function, j.parameters...) +} + +func (e *executor) incrementJobCounter(j internalJob, status JobStatus) { + if e.monitor != nil { + e.monitor.IncrementJob(j.id, j.name, j.tags, status) + } +} + +func (e *executor) stop(standardJobsWg, singletonJobsWg, limitModeJobsWg *waitGroupWithMutex) { + e.logger.Debug("gocron: stopping executor") + // we've been asked to stop. This is either because the scheduler has been told + // to stop all jobs or the scheduler has been asked to completely shutdown. + // + // cancel tells all the functions to stop their work and send in a done response + e.cancel() + + // the wait for job channels are used to report back whether we successfully waited + // for all jobs to complete or if we hit the configured timeout. + waitForJobs := make(chan struct{}, 1) + waitForSingletons := make(chan struct{}, 1) + waitForLimitMode := make(chan struct{}, 1) + + // the waiter context is used to cancel the functions waiting on jobs. + // this is done to avoid goroutine leaks. + waiterCtx, waiterCancel := context.WithCancel(context.Background()) + + // wait for standard jobs to complete + go func() { + e.logger.Debug("gocron: waiting for standard jobs to complete") + go func() { + // this is done in a separate goroutine, so we aren't + // blocked by the WaitGroup's Wait call in the event + // that the waiter context is cancelled. + // This particular goroutine could leak in the event that + // some long-running standard job doesn't complete. + standardJobsWg.Wait() + e.logger.Debug("gocron: standard jobs completed") + waitForJobs <- struct{}{} + }() + <-waiterCtx.Done() + }() + + // wait for per job singleton limit mode runner jobs to complete + go func() { + e.logger.Debug("gocron: waiting for singleton jobs to complete") + go func() { + singletonJobsWg.Wait() + e.logger.Debug("gocron: singleton jobs completed") + waitForSingletons <- struct{}{} + }() + <-waiterCtx.Done() + }() + + // wait for limit mode runners to complete + go func() { + e.logger.Debug("gocron: waiting for limit mode jobs to complete") + go func() { + limitModeJobsWg.Wait() + e.logger.Debug("gocron: limitMode jobs completed") + waitForLimitMode <- struct{}{} + }() + <-waiterCtx.Done() + }() + + // now either wait for all the jobs to complete, + // or hit the timeout. + var count int + timeout := time.Now().Add(e.stopTimeout) + for time.Now().Before(timeout) && count < 3 { + select { + case <-waitForJobs: + count++ + case <-waitForSingletons: + count++ + case <-waitForLimitMode: + count++ + default: + } + } + if count < 3 { + e.done <- ErrStopJobsTimedOut + e.logger.Debug("gocron: executor stopped - timed out") + } else { + e.done <- nil + e.logger.Debug("gocron: executor stopped") + } + waiterCancel() + + if e.limitMode != nil { + e.limitMode.started = false + } +} diff --git a/vendor/github.com/go-co-op/gocron/v2/job.go b/vendor/github.com/go-co-op/gocron/v2/job.go new file mode 100644 index 000000000..5b0302c42 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/v2/job.go @@ -0,0 +1,1042 @@ +//go:generate mockgen -destination=mocks/job.go -package=gocronmocks . Job +package gocron + +import ( + "context" + "errors" + "fmt" + "math/rand" + "strings" + "time" + + "github.com/google/uuid" + "github.com/jonboulle/clockwork" + "github.com/robfig/cron/v3" + "golang.org/x/exp/slices" +) + +// internalJob stores the information needed by the scheduler +// to manage scheduling, starting and stopping the job +type internalJob struct { + ctx context.Context + cancel context.CancelFunc + id uuid.UUID + name string + tags []string + jobSchedule + + // as some jobs may queue up, it's possible to + // have multiple nextScheduled times + nextScheduled []time.Time + + lastRun time.Time + function any + parameters []any + timer clockwork.Timer + singletonMode bool + singletonLimitMode LimitMode + limitRunsTo *limitRunsTo + startTime time.Time + startImmediately bool + // event listeners + afterJobRuns func(jobID uuid.UUID, jobName string) + beforeJobRuns func(jobID uuid.UUID, jobName string) + afterJobRunsWithError func(jobID uuid.UUID, jobName string, err error) + afterJobRunsWithPanic func(jobID uuid.UUID, jobName string, recoverData any) + afterLockError func(jobID uuid.UUID, jobName string, err error) + + locker Locker +} + +// stop is used to stop the job's timer and cancel the context +// stopping the timer is critical for cleaning up jobs that are +// sleeping in a time.AfterFunc timer when the job is being stopped. +// cancelling the context keeps the executor from continuing to try +// and run the job. +func (j *internalJob) stop() { + if j.timer != nil { + j.timer.Stop() + } + j.cancel() +} + +// task stores the function and parameters +// that are actually run when the job is executed. +type task struct { + function any + parameters []any +} + +// Task defines a function that returns the task +// function and parameters. +type Task func() task + +// NewTask provides the job's task function and parameters. +func NewTask(function any, parameters ...any) Task { + return func() task { + return task{ + function: function, + parameters: parameters, + } + } +} + +// limitRunsTo is used for managing the number of runs +// when the user only wants the job to run a certain +// number of times and then be removed from the scheduler. +type limitRunsTo struct { + limit uint + runCount uint +} + +// ----------------------------------------------- +// ----------------------------------------------- +// --------------- Job Variants ------------------ +// ----------------------------------------------- +// ----------------------------------------------- + +// JobDefinition defines the interface that must be +// implemented to create a job from the definition. +type JobDefinition interface { + setup(j *internalJob, l *time.Location, now time.Time) error +} + +var _ JobDefinition = (*cronJobDefinition)(nil) + +type cronJobDefinition struct { + crontab string + withSeconds bool +} + +func (c cronJobDefinition) setup(j *internalJob, location *time.Location, _ time.Time) error { + var withLocation string + if strings.HasPrefix(c.crontab, "TZ=") || strings.HasPrefix(c.crontab, "CRON_TZ=") { + withLocation = c.crontab + } else { + // since the user didn't provide a timezone default to the location + // passed in by the scheduler. Default: time.Local + withLocation = fmt.Sprintf("CRON_TZ=%s %s", location.String(), c.crontab) + } + + var ( + cronSchedule cron.Schedule + err error + ) + + if c.withSeconds { + p := cron.NewParser(cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor) + cronSchedule, err = p.Parse(withLocation) + } else { + cronSchedule, err = cron.ParseStandard(withLocation) + } + if err != nil { + return errors.Join(ErrCronJobParse, err) + } + + j.jobSchedule = &cronJob{cronSchedule: cronSchedule} + return nil +} + +// CronJob defines a new job using the crontab syntax: `* * * * *`. +// An optional 6th field can be used at the beginning if withSeconds +// is set to true: `* * * * * *`. +// The timezone can be set on the Scheduler using WithLocation, or in the +// crontab in the form `TZ=America/Chicago * * * * *` or +// `CRON_TZ=America/Chicago * * * * *` +func CronJob(crontab string, withSeconds bool) JobDefinition { + return cronJobDefinition{ + crontab: crontab, + withSeconds: withSeconds, + } +} + +var _ JobDefinition = (*durationJobDefinition)(nil) + +type durationJobDefinition struct { + duration time.Duration +} + +func (d durationJobDefinition) setup(j *internalJob, _ *time.Location, _ time.Time) error { + if d.duration == 0 { + return ErrDurationJobIntervalZero + } + j.jobSchedule = &durationJob{duration: d.duration} + return nil +} + +// DurationJob defines a new job using time.Duration +// for the interval. +func DurationJob(duration time.Duration) JobDefinition { + return durationJobDefinition{ + duration: duration, + } +} + +var _ JobDefinition = (*durationRandomJobDefinition)(nil) + +type durationRandomJobDefinition struct { + min, max time.Duration +} + +func (d durationRandomJobDefinition) setup(j *internalJob, _ *time.Location, _ time.Time) error { + if d.min >= d.max { + return ErrDurationRandomJobMinMax + } + + j.jobSchedule = &durationRandomJob{ + min: d.min, + max: d.max, + rand: rand.New(rand.NewSource(time.Now().UnixNano())), // nolint:gosec + } + return nil +} + +// DurationRandomJob defines a new job that runs on a random interval +// between the min and max duration values provided. +// +// To achieve a similar behavior as tools that use a splay/jitter technique +// consider the median value as the baseline and the difference between the +// max-median or median-min as the splay/jitter. +// +// For example, if you want a job to run every 5 minutes, but want to add +// up to 1 min of jitter to the interval, you could use +// DurationRandomJob(4*time.Minute, 6*time.Minute) +func DurationRandomJob(minDuration, maxDuration time.Duration) JobDefinition { + return durationRandomJobDefinition{ + min: minDuration, + max: maxDuration, + } +} + +// DailyJob runs the job on the interval of days, and at the set times. +// By default, the job will start the next available day, considering the last run to be now, +// and the time and day based on the interval and times you input. This means, if you +// select an interval greater than 1, your job by default will run X (interval) days from now +// if there are no atTimes left in the current day. You can use WithStartAt to tell the +// scheduler to start the job sooner. +func DailyJob(interval uint, atTimes AtTimes) JobDefinition { + return dailyJobDefinition{ + interval: interval, + atTimes: atTimes, + } +} + +var _ JobDefinition = (*dailyJobDefinition)(nil) + +type dailyJobDefinition struct { + interval uint + atTimes AtTimes +} + +func (d dailyJobDefinition) setup(j *internalJob, location *time.Location, _ time.Time) error { + atTimesDate, err := convertAtTimesToDateTime(d.atTimes, location) + switch { + case errors.Is(err, errAtTimesNil): + return ErrDailyJobAtTimesNil + case errors.Is(err, errAtTimeNil): + return ErrDailyJobAtTimeNil + case errors.Is(err, errAtTimeHours): + return ErrDailyJobHours + case errors.Is(err, errAtTimeMinSec): + return ErrDailyJobMinutesSeconds + } + + ds := dailyJob{ + interval: d.interval, + atTimes: atTimesDate, + } + j.jobSchedule = ds + return nil +} + +var _ JobDefinition = (*weeklyJobDefinition)(nil) + +type weeklyJobDefinition struct { + interval uint + daysOfTheWeek Weekdays + atTimes AtTimes +} + +func (w weeklyJobDefinition) setup(j *internalJob, location *time.Location, _ time.Time) error { + var ws weeklyJob + ws.interval = w.interval + + if w.daysOfTheWeek == nil { + return ErrWeeklyJobDaysOfTheWeekNil + } + + daysOfTheWeek := w.daysOfTheWeek() + + slices.Sort(daysOfTheWeek) + ws.daysOfWeek = daysOfTheWeek + + atTimesDate, err := convertAtTimesToDateTime(w.atTimes, location) + switch { + case errors.Is(err, errAtTimesNil): + return ErrWeeklyJobAtTimesNil + case errors.Is(err, errAtTimeNil): + return ErrWeeklyJobAtTimeNil + case errors.Is(err, errAtTimeHours): + return ErrWeeklyJobHours + case errors.Is(err, errAtTimeMinSec): + return ErrWeeklyJobMinutesSeconds + } + ws.atTimes = atTimesDate + + j.jobSchedule = ws + return nil +} + +// Weekdays defines a function that returns a list of week days. +type Weekdays func() []time.Weekday + +// NewWeekdays provide the days of the week the job should run. +func NewWeekdays(weekday time.Weekday, weekdays ...time.Weekday) Weekdays { + return func() []time.Weekday { + weekdays = append(weekdays, weekday) + return weekdays + } +} + +// WeeklyJob runs the job on the interval of weeks, on the specific days of the week +// specified, and at the set times. +// +// By default, the job will start the next available day, considering the last run to be now, +// and the time and day based on the interval, days and times you input. This means, if you +// select an interval greater than 1, your job by default will run X (interval) weeks from now +// if there are no daysOfTheWeek left in the current week. You can use WithStartAt to tell the +// scheduler to start the job sooner. +func WeeklyJob(interval uint, daysOfTheWeek Weekdays, atTimes AtTimes) JobDefinition { + return weeklyJobDefinition{ + interval: interval, + daysOfTheWeek: daysOfTheWeek, + atTimes: atTimes, + } +} + +var _ JobDefinition = (*monthlyJobDefinition)(nil) + +type monthlyJobDefinition struct { + interval uint + daysOfTheMonth DaysOfTheMonth + atTimes AtTimes +} + +func (m monthlyJobDefinition) setup(j *internalJob, location *time.Location, _ time.Time) error { + var ms monthlyJob + ms.interval = m.interval + + if m.daysOfTheMonth == nil { + return ErrMonthlyJobDaysNil + } + + var daysStart, daysEnd []int + for _, day := range m.daysOfTheMonth() { + if day > 31 || day == 0 || day < -31 { + return ErrMonthlyJobDays + } + if day > 0 { + daysStart = append(daysStart, day) + } else { + daysEnd = append(daysEnd, day) + } + } + daysStart = removeSliceDuplicatesInt(daysStart) + slices.Sort(daysStart) + ms.days = daysStart + + daysEnd = removeSliceDuplicatesInt(daysEnd) + slices.Sort(daysEnd) + ms.daysFromEnd = daysEnd + + atTimesDate, err := convertAtTimesToDateTime(m.atTimes, location) + switch { + case errors.Is(err, errAtTimesNil): + return ErrMonthlyJobAtTimesNil + case errors.Is(err, errAtTimeNil): + return ErrMonthlyJobAtTimeNil + case errors.Is(err, errAtTimeHours): + return ErrMonthlyJobHours + case errors.Is(err, errAtTimeMinSec): + return ErrMonthlyJobMinutesSeconds + } + ms.atTimes = atTimesDate + + j.jobSchedule = ms + return nil +} + +type days []int + +// DaysOfTheMonth defines a function that returns a list of days. +type DaysOfTheMonth func() days + +// NewDaysOfTheMonth provide the days of the month the job should +// run. The days can be positive 1 to 31 and/or negative -31 to -1. +// Negative values count backwards from the end of the month. +// For example: -1 == the last day of the month. +// +// -5 == 5 days before the end of the month. +func NewDaysOfTheMonth(day int, moreDays ...int) DaysOfTheMonth { + return func() days { + moreDays = append(moreDays, day) + return moreDays + } +} + +type atTime struct { + hours, minutes, seconds uint +} + +func (a atTime) time(location *time.Location) time.Time { + return time.Date(0, 0, 0, int(a.hours), int(a.minutes), int(a.seconds), 0, location) +} + +// AtTime defines a function that returns the internal atTime +type AtTime func() atTime + +// NewAtTime provide the hours, minutes and seconds at which +// the job should be run +func NewAtTime(hours, minutes, seconds uint) AtTime { + return func() atTime { + return atTime{hours: hours, minutes: minutes, seconds: seconds} + } +} + +// AtTimes define a list of AtTime +type AtTimes func() []AtTime + +// NewAtTimes provide the hours, minutes and seconds at which +// the job should be run +func NewAtTimes(atTime AtTime, atTimes ...AtTime) AtTimes { + return func() []AtTime { + atTimes = append(atTimes, atTime) + return atTimes + } +} + +// MonthlyJob runs the job on the interval of months, on the specific days of the month +// specified, and at the set times. Days of the month can be 1 to 31 or negative (-1 to -31), which +// count backwards from the end of the month. E.g. -1 is the last day of the month. +// +// If a day of the month is selected that does not exist in all months (e.g. 31st) +// any month that does not have that day will be skipped. +// +// By default, the job will start the next available day, considering the last run to be now, +// and the time and month based on the interval, days and times you input. +// This means, if you select an interval greater than 1, your job by default will run +// X (interval) months from now if there are no daysOfTheMonth left in the current month. +// You can use WithStartAt to tell the scheduler to start the job sooner. +// +// Carefully consider your configuration! +// - For example: an interval of 2 months on the 31st of each month, starting 12/31 +// would skip Feb, April, June, and next run would be in August. +func MonthlyJob(interval uint, daysOfTheMonth DaysOfTheMonth, atTimes AtTimes) JobDefinition { + return monthlyJobDefinition{ + interval: interval, + daysOfTheMonth: daysOfTheMonth, + atTimes: atTimes, + } +} + +var _ JobDefinition = (*oneTimeJobDefinition)(nil) + +type oneTimeJobDefinition struct { + startAt OneTimeJobStartAtOption +} + +func (o oneTimeJobDefinition) setup(j *internalJob, _ *time.Location, now time.Time) error { + sortedTimes := o.startAt(j) + slices.SortStableFunc(sortedTimes, ascendingTime) + // keep only schedules that are in the future + idx, found := slices.BinarySearchFunc(sortedTimes, now, ascendingTime) + if found { + idx++ + } + sortedTimes = sortedTimes[idx:] + if !j.startImmediately && len(sortedTimes) == 0 { + return ErrOneTimeJobStartDateTimePast + } + j.jobSchedule = oneTimeJob{sortedTimes: sortedTimes} + return nil +} + +// OneTimeJobStartAtOption defines when the one time job is run +type OneTimeJobStartAtOption func(*internalJob) []time.Time + +// OneTimeJobStartImmediately tells the scheduler to run the one time job immediately. +func OneTimeJobStartImmediately() OneTimeJobStartAtOption { + return func(j *internalJob) []time.Time { + j.startImmediately = true + return []time.Time{} + } +} + +// OneTimeJobStartDateTime sets the date & time at which the job should run. +// This datetime must be in the future (according to the scheduler clock). +func OneTimeJobStartDateTime(start time.Time) OneTimeJobStartAtOption { + return func(j *internalJob) []time.Time { + return []time.Time{start} + } +} + +// OneTimeJobStartDateTimes sets the date & times at which the job should run. +// At least one of the date/times must be in the future (according to the scheduler clock). +func OneTimeJobStartDateTimes(times ...time.Time) OneTimeJobStartAtOption { + return func(j *internalJob) []time.Time { + return times + } +} + +// OneTimeJob is to run a job once at a specified time and not on +// any regular schedule. +func OneTimeJob(startAt OneTimeJobStartAtOption) JobDefinition { + return oneTimeJobDefinition{ + startAt: startAt, + } +} + +// ----------------------------------------------- +// ----------------------------------------------- +// ----------------- Job Options ----------------- +// ----------------------------------------------- +// ----------------------------------------------- + +// JobOption defines the constructor for job options. +type JobOption func(*internalJob) error + +// WithDistributedJobLocker sets the locker to be used by multiple +// Scheduler instances to ensure that only one instance of each +// job is run. +func WithDistributedJobLocker(locker Locker) JobOption { + return func(j *internalJob) error { + if locker == nil { + return ErrWithDistributedJobLockerNil + } + j.locker = locker + return nil + } +} + +// WithEventListeners sets the event listeners that should be +// run for the job. +func WithEventListeners(eventListeners ...EventListener) JobOption { + return func(j *internalJob) error { + for _, eventListener := range eventListeners { + if err := eventListener(j); err != nil { + return err + } + } + return nil + } +} + +// WithLimitedRuns limits the number of executions of this job to n. +// Upon reaching the limit, the job is removed from the scheduler. +func WithLimitedRuns(limit uint) JobOption { + return func(j *internalJob) error { + j.limitRunsTo = &limitRunsTo{ + limit: limit, + runCount: 0, + } + return nil + } +} + +// WithName sets the name of the job. Name provides +// a human-readable identifier for the job. +func WithName(name string) JobOption { + // TODO use the name for metrics and future logging option + return func(j *internalJob) error { + if name == "" { + return ErrWithNameEmpty + } + j.name = name + return nil + } +} + +// WithSingletonMode keeps the job from running again if it is already running. +// This is useful for jobs that should not overlap, and that occasionally +// (but not consistently) run longer than the interval between job runs. +func WithSingletonMode(mode LimitMode) JobOption { + return func(j *internalJob) error { + j.singletonMode = true + j.singletonLimitMode = mode + return nil + } +} + +// WithStartAt sets the option for starting the job at +// a specific datetime. +func WithStartAt(option StartAtOption) JobOption { + return func(j *internalJob) error { + return option(j) + } +} + +// StartAtOption defines options for starting the job +type StartAtOption func(*internalJob) error + +// WithStartImmediately tells the scheduler to run the job immediately +// regardless of the type or schedule of job. After this immediate run +// the job is scheduled from this time based on the job definition. +func WithStartImmediately() StartAtOption { + return func(j *internalJob) error { + j.startImmediately = true + return nil + } +} + +// WithStartDateTime sets the first date & time at which the job should run. +// This datetime must be in the future. +func WithStartDateTime(start time.Time) StartAtOption { + return func(j *internalJob) error { + if start.IsZero() || start.Before(time.Now()) { + return ErrWithStartDateTimePast + } + j.startTime = start + return nil + } +} + +// WithTags sets the tags for the job. Tags provide +// a way to identify jobs by a set of tags and remove +// multiple jobs by tag. +func WithTags(tags ...string) JobOption { + return func(j *internalJob) error { + j.tags = tags + return nil + } +} + +// ----------------------------------------------- +// ----------------------------------------------- +// ------------- Job Event Listeners ------------- +// ----------------------------------------------- +// ----------------------------------------------- + +// EventListener defines the constructor for event +// listeners that can be used to listen for job events. +type EventListener func(*internalJob) error + +// BeforeJobRuns is used to listen for when a job is about to run and +// then run the provided function. +func BeforeJobRuns(eventListenerFunc func(jobID uuid.UUID, jobName string)) EventListener { + return func(j *internalJob) error { + if eventListenerFunc == nil { + return ErrEventListenerFuncNil + } + j.beforeJobRuns = eventListenerFunc + return nil + } +} + +// AfterJobRuns is used to listen for when a job has run +// without an error, and then run the provided function. +func AfterJobRuns(eventListenerFunc func(jobID uuid.UUID, jobName string)) EventListener { + return func(j *internalJob) error { + if eventListenerFunc == nil { + return ErrEventListenerFuncNil + } + j.afterJobRuns = eventListenerFunc + return nil + } +} + +// AfterJobRunsWithError is used to listen for when a job has run and +// returned an error, and then run the provided function. +func AfterJobRunsWithError(eventListenerFunc func(jobID uuid.UUID, jobName string, err error)) EventListener { + return func(j *internalJob) error { + if eventListenerFunc == nil { + return ErrEventListenerFuncNil + } + j.afterJobRunsWithError = eventListenerFunc + return nil + } +} + +// AfterJobRunsWithPanic is used to listen for when a job has run and +// returned panicked recover data, and then run the provided function. +func AfterJobRunsWithPanic(eventListenerFunc func(jobID uuid.UUID, jobName string, recoverData any)) EventListener { + return func(j *internalJob) error { + if eventListenerFunc == nil { + return ErrEventListenerFuncNil + } + j.afterJobRunsWithPanic = eventListenerFunc + return nil + } +} + +// AfterLockError is used to when the distributed locker returns an error and +// then run the provided function. +func AfterLockError(eventListenerFunc func(jobID uuid.UUID, jobName string, err error)) EventListener { + return func(j *internalJob) error { + if eventListenerFunc == nil { + return ErrEventListenerFuncNil + } + j.afterLockError = eventListenerFunc + return nil + } +} + +// ----------------------------------------------- +// ----------------------------------------------- +// ---------------- Job Schedules ---------------- +// ----------------------------------------------- +// ----------------------------------------------- + +type jobSchedule interface { + next(lastRun time.Time) time.Time +} + +var _ jobSchedule = (*cronJob)(nil) + +type cronJob struct { + cronSchedule cron.Schedule +} + +func (j *cronJob) next(lastRun time.Time) time.Time { + return j.cronSchedule.Next(lastRun) +} + +var _ jobSchedule = (*durationJob)(nil) + +type durationJob struct { + duration time.Duration +} + +func (j *durationJob) next(lastRun time.Time) time.Time { + return lastRun.Add(j.duration) +} + +var _ jobSchedule = (*durationRandomJob)(nil) + +type durationRandomJob struct { + min, max time.Duration + rand *rand.Rand +} + +func (j *durationRandomJob) next(lastRun time.Time) time.Time { + r := j.rand.Int63n(int64(j.max - j.min)) + return lastRun.Add(j.min + time.Duration(r)) +} + +var _ jobSchedule = (*dailyJob)(nil) + +type dailyJob struct { + interval uint + atTimes []time.Time +} + +func (d dailyJob) next(lastRun time.Time) time.Time { + firstPass := true + next := d.nextDay(lastRun, firstPass) + if !next.IsZero() { + return next + } + firstPass = false + + startNextDay := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day()+int(d.interval), 0, 0, 0, lastRun.Nanosecond(), lastRun.Location()) + return d.nextDay(startNextDay, firstPass) +} + +func (d dailyJob) nextDay(lastRun time.Time, firstPass bool) time.Time { + for _, at := range d.atTimes { + // sub the at time hour/min/sec onto the lastScheduledRun's values + // to use in checks to see if we've got our next run time + atDate := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day(), at.Hour(), at.Minute(), at.Second(), lastRun.Nanosecond(), lastRun.Location()) + + if firstPass && atDate.After(lastRun) { + // checking to see if it is after i.e. greater than, + // and not greater or equal as our lastScheduledRun day/time + // will be in the loop, and we don't want to select it again + return atDate + } else if !firstPass && !atDate.Before(lastRun) { + // now that we're looking at the next day, it's ok to consider + // the same at time that was last run (as lastScheduledRun has been incremented) + return atDate + } + } + return time.Time{} +} + +var _ jobSchedule = (*weeklyJob)(nil) + +type weeklyJob struct { + interval uint + daysOfWeek []time.Weekday + atTimes []time.Time +} + +func (w weeklyJob) next(lastRun time.Time) time.Time { + firstPass := true + next := w.nextWeekDayAtTime(lastRun, firstPass) + if !next.IsZero() { + return next + } + firstPass = false + + startOfTheNextIntervalWeek := (lastRun.Day() - int(lastRun.Weekday())) + int(w.interval*7) + from := time.Date(lastRun.Year(), lastRun.Month(), startOfTheNextIntervalWeek, 0, 0, 0, 0, lastRun.Location()) + return w.nextWeekDayAtTime(from, firstPass) +} + +func (w weeklyJob) nextWeekDayAtTime(lastRun time.Time, firstPass bool) time.Time { + for _, wd := range w.daysOfWeek { + // checking if we're on the same day or later in the same week + if wd >= lastRun.Weekday() { + // weekDayDiff is used to add the correct amount to the atDate day below + weekDayDiff := wd - lastRun.Weekday() + for _, at := range w.atTimes { + // sub the at time hour/min/sec onto the lastScheduledRun's values + // to use in checks to see if we've got our next run time + atDate := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day()+int(weekDayDiff), at.Hour(), at.Minute(), at.Second(), lastRun.Nanosecond(), lastRun.Location()) + + if firstPass && atDate.After(lastRun) { + // checking to see if it is after i.e. greater than, + // and not greater or equal as our lastScheduledRun day/time + // will be in the loop, and we don't want to select it again + return atDate + } else if !firstPass && !atDate.Before(lastRun) { + // now that we're looking at the next week, it's ok to consider + // the same at time that was last run (as lastScheduledRun has been incremented) + return atDate + } + } + } + } + return time.Time{} +} + +var _ jobSchedule = (*monthlyJob)(nil) + +type monthlyJob struct { + interval uint + days []int + daysFromEnd []int + atTimes []time.Time +} + +func (m monthlyJob) next(lastRun time.Time) time.Time { + daysList := make([]int, len(m.days)) + copy(daysList, m.days) + + daysFromEnd := m.handleNegativeDays(lastRun, daysList, m.daysFromEnd) + next := m.nextMonthDayAtTime(lastRun, daysFromEnd, true) + if !next.IsZero() { + return next + } + + from := time.Date(lastRun.Year(), lastRun.Month()+time.Month(m.interval), 1, 0, 0, 0, 0, lastRun.Location()) + for next.IsZero() { + daysFromEnd = m.handleNegativeDays(from, daysList, m.daysFromEnd) + next = m.nextMonthDayAtTime(from, daysFromEnd, false) + from = from.AddDate(0, int(m.interval), 0) + } + + return next +} + +func (m monthlyJob) handleNegativeDays(from time.Time, days, negativeDays []int) []int { + var out []int + // getting a list of the days from the end of the following month + // -1 == the last day of the month + firstDayNextMonth := time.Date(from.Year(), from.Month()+1, 1, 0, 0, 0, 0, from.Location()) + for _, daySub := range negativeDays { + day := firstDayNextMonth.AddDate(0, 0, daySub).Day() + out = append(out, day) + } + out = append(out, days...) + slices.Sort(out) + return out +} + +func (m monthlyJob) nextMonthDayAtTime(lastRun time.Time, days []int, firstPass bool) time.Time { + // find the next day in the month that should run and then check for an at time + for _, day := range days { + if day >= lastRun.Day() { + for _, at := range m.atTimes { + // sub the day, and the at time hour/min/sec onto the lastScheduledRun's values + // to use in checks to see if we've got our next run time + atDate := time.Date(lastRun.Year(), lastRun.Month(), day, at.Hour(), at.Minute(), at.Second(), lastRun.Nanosecond(), lastRun.Location()) + + if atDate.Month() != lastRun.Month() { + // this check handles if we're setting a day not in the current month + // e.g. setting day 31 in Feb results in March 2nd + continue + } + + if firstPass && atDate.After(lastRun) { + // checking to see if it is after i.e. greater than, + // and not greater or equal as our lastScheduledRun day/time + // will be in the loop, and we don't want to select it again + return atDate + } else if !firstPass && !atDate.Before(lastRun) { + // now that we're looking at the next month, it's ok to consider + // the same at time that was lastScheduledRun (as lastScheduledRun has been incremented) + return atDate + } + } + continue + } + } + return time.Time{} +} + +var _ jobSchedule = (*oneTimeJob)(nil) + +type oneTimeJob struct { + sortedTimes []time.Time +} + +// next finds the next item in a sorted list of times using binary-search. +// +// example: sortedTimes: [2, 4, 6, 8] +// +// lastRun: 1 => [idx=0,found=false] => next is 2 - sorted[idx] idx=0 +// lastRun: 2 => [idx=0,found=true] => next is 4 - sorted[idx+1] idx=1 +// lastRun: 3 => [idx=1,found=false] => next is 4 - sorted[idx] idx=1 +// lastRun: 4 => [idx=1,found=true] => next is 6 - sorted[idx+1] idx=2 +// lastRun: 7 => [idx=3,found=false] => next is 8 - sorted[idx] idx=3 +// lastRun: 8 => [idx=3,found=found] => next is none +// lastRun: 9 => [idx=3,found=found] => next is none +func (o oneTimeJob) next(lastRun time.Time) time.Time { + idx, found := slices.BinarySearchFunc(o.sortedTimes, lastRun, ascendingTime) + // if found, the next run is the following index + if found { + idx++ + } + // exhausted runs + if idx >= len(o.sortedTimes) { + return time.Time{} + } + + return o.sortedTimes[idx] +} + +// ----------------------------------------------- +// ----------------------------------------------- +// ---------------- Job Interface ---------------- +// ----------------------------------------------- +// ----------------------------------------------- + +// Job provides the available methods on the job +// available to the caller. +type Job interface { + // ID returns the job's unique identifier. + ID() uuid.UUID + // LastRun returns the time of the job's last run + LastRun() (time.Time, error) + // Name returns the name defined on the job. + Name() string + // NextRun returns the time of the job's next scheduled run. + NextRun() (time.Time, error) + // NextRuns returns the requested number of calculated next run values. + NextRuns(int) ([]time.Time, error) + // RunNow runs the job once, now. This does not alter + // the existing run schedule, and will respect all job + // and scheduler limits. This means that running a job now may + // cause the job's regular interval to be rescheduled due to + // the instance being run by RunNow blocking your run limit. + RunNow() error + // Tags returns the job's string tags. + Tags() []string +} + +var _ Job = (*job)(nil) + +// job is the internal struct that implements +// the public interface. This is used to avoid +// leaking information the caller never needs +// to have or tinker with. +type job struct { + id uuid.UUID + name string + tags []string + jobOutRequest chan jobOutRequest + runJobRequest chan runJobRequest +} + +func (j job) ID() uuid.UUID { + return j.id +} + +func (j job) LastRun() (time.Time, error) { + ij := requestJob(j.id, j.jobOutRequest) + if ij == nil || ij.id == uuid.Nil { + return time.Time{}, ErrJobNotFound + } + return ij.lastRun, nil +} + +func (j job) Name() string { + return j.name +} + +func (j job) NextRun() (time.Time, error) { + ij := requestJob(j.id, j.jobOutRequest) + if ij == nil || ij.id == uuid.Nil { + return time.Time{}, ErrJobNotFound + } + if len(ij.nextScheduled) == 0 { + return time.Time{}, nil + } + // the first element is the next scheduled run with subsequent + // runs following after in the slice + return ij.nextScheduled[0], nil +} + +func (j job) NextRuns(count int) ([]time.Time, error) { + ij := requestJob(j.id, j.jobOutRequest) + if ij == nil || ij.id == uuid.Nil { + return nil, ErrJobNotFound + } + + lengthNextScheduled := len(ij.nextScheduled) + if lengthNextScheduled == 0 { + return nil, nil + } else if count <= lengthNextScheduled { + return ij.nextScheduled[:count], nil + } + + out := make([]time.Time, count) + for i := 0; i < count; i++ { + if i < lengthNextScheduled { + out[i] = ij.nextScheduled[i] + continue + } + + from := out[i-1] + out[i] = ij.next(from) + } + + return out, nil +} + +func (j job) Tags() []string { + return j.tags +} + +func (j job) RunNow() error { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + resp := make(chan error, 1) + + select { + case j.runJobRequest <- runJobRequest{ + id: j.id, + outChan: resp, + }: + case <-time.After(100 * time.Millisecond): + return ErrJobRunNowFailed + } + var err error + select { + case <-ctx.Done(): + return ErrJobRunNowFailed + case errReceived := <-resp: + err = errReceived + } + return err +} diff --git a/vendor/github.com/go-co-op/gocron/v2/logger.go b/vendor/github.com/go-co-op/gocron/v2/logger.go new file mode 100644 index 000000000..c8ebaf82b --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/v2/logger.go @@ -0,0 +1,101 @@ +//go:generate mockgen -destination=mocks/logger.go -package=gocronmocks . Logger +package gocron + +import ( + "fmt" + "log" + "os" + "strings" +) + +// Logger is the interface that wraps the basic logging methods +// used by gocron. The methods are modeled after the standard +// library slog package. The default logger is a no-op logger. +// To enable logging, use one of the provided New*Logger functions +// or implement your own Logger. The actual level of Log that is logged +// is handled by the implementation. +type Logger interface { + Debug(msg string, args ...any) + Error(msg string, args ...any) + Info(msg string, args ...any) + Warn(msg string, args ...any) +} + +var _ Logger = (*noOpLogger)(nil) + +type noOpLogger struct{} + +func (l noOpLogger) Debug(_ string, _ ...any) {} +func (l noOpLogger) Error(_ string, _ ...any) {} +func (l noOpLogger) Info(_ string, _ ...any) {} +func (l noOpLogger) Warn(_ string, _ ...any) {} + +var _ Logger = (*logger)(nil) + +// LogLevel is the level of logging that should be logged +// when using the basic NewLogger. +type LogLevel int + +// The different log levels that can be used. +const ( + LogLevelError LogLevel = iota + LogLevelWarn + LogLevelInfo + LogLevelDebug +) + +type logger struct { + log *log.Logger + level LogLevel +} + +// NewLogger returns a new Logger that logs at the given level. +func NewLogger(level LogLevel) Logger { + l := log.New(os.Stdout, "", log.LstdFlags) + return &logger{ + log: l, + level: level, + } +} + +func (l *logger) Debug(msg string, args ...any) { + if l.level < LogLevelDebug { + return + } + l.log.Printf("DEBUG: %s%s\n", msg, logFormatArgs(args...)) +} + +func (l *logger) Error(msg string, args ...any) { + if l.level < LogLevelError { + return + } + l.log.Printf("ERROR: %s%s\n", msg, logFormatArgs(args...)) +} + +func (l *logger) Info(msg string, args ...any) { + if l.level < LogLevelInfo { + return + } + l.log.Printf("INFO: %s%s\n", msg, logFormatArgs(args...)) +} + +func (l *logger) Warn(msg string, args ...any) { + if l.level < LogLevelWarn { + return + } + l.log.Printf("WARN: %s%s\n", msg, logFormatArgs(args...)) +} + +func logFormatArgs(args ...any) string { + if len(args) == 0 { + return "" + } + if len(args)%2 != 0 { + return ", " + fmt.Sprint(args...) + } + var pairs []string + for i := 0; i < len(args); i += 2 { + pairs = append(pairs, fmt.Sprintf("%s=%v", args[i], args[i+1])) + } + return ", " + strings.Join(pairs, ", ") +} diff --git a/vendor/github.com/go-co-op/gocron/v2/monitor.go b/vendor/github.com/go-co-op/gocron/v2/monitor.go new file mode 100644 index 000000000..ecf28805f --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/v2/monitor.go @@ -0,0 +1,27 @@ +package gocron + +import ( + "time" + + "github.com/google/uuid" +) + +// JobStatus is the status of job run that should be collected with the metric. +type JobStatus string + +// The different statuses of job that can be used. +const ( + Fail JobStatus = "fail" + Success JobStatus = "success" + Skip JobStatus = "skip" +) + +// Monitor represents the interface to collect jobs metrics. +type Monitor interface { + // IncrementJob will provide details about the job and expects the underlying implementation + // to handle instantiating and incrementing a value + IncrementJob(id uuid.UUID, name string, tags []string, status JobStatus) + // RecordJobTiming will provide details about the job and the timing and expects the underlying implementation + // to handle instantiating and recording the value + RecordJobTiming(startTime, endTime time.Time, id uuid.UUID, name string, tags []string) +} diff --git a/vendor/github.com/go-co-op/gocron/v2/scheduler.go b/vendor/github.com/go-co-op/gocron/v2/scheduler.go new file mode 100644 index 000000000..9540003fb --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/v2/scheduler.go @@ -0,0 +1,861 @@ +//go:generate mockgen -destination=mocks/scheduler.go -package=gocronmocks . Scheduler +package gocron + +import ( + "context" + "reflect" + "runtime" + "time" + + "github.com/google/uuid" + "github.com/jonboulle/clockwork" + "golang.org/x/exp/slices" +) + +var _ Scheduler = (*scheduler)(nil) + +// Scheduler defines the interface for the Scheduler. +type Scheduler interface { + // Jobs returns all the jobs currently in the scheduler. + Jobs() []Job + // NewJob creates a new job in the Scheduler. The job is scheduled per the provided + // definition when the Scheduler is started. If the Scheduler is already running + // the job will be scheduled when the Scheduler is started. + NewJob(JobDefinition, Task, ...JobOption) (Job, error) + // RemoveByTags removes all jobs that have at least one of the provided tags. + RemoveByTags(...string) + // RemoveJob removes the job with the provided id. + RemoveJob(uuid.UUID) error + // Shutdown should be called when you no longer need + // the Scheduler or Job's as the Scheduler cannot + // be restarted after calling Shutdown. This is similar + // to a Close or Cleanup method and is often deferred after + // starting the scheduler. + Shutdown() error + // Start begins scheduling jobs for execution based + // on each job's definition. Job's added to an already + // running scheduler will be scheduled immediately based + // on definition. Start is non-blocking. + Start() + // StopJobs stops the execution of all jobs in the scheduler. + // This can be useful in situations where jobs need to be + // paused globally and then restarted with Start(). + StopJobs() error + // Update replaces the existing Job's JobDefinition with the provided + // JobDefinition. The Job's Job.ID() remains the same. + Update(uuid.UUID, JobDefinition, Task, ...JobOption) (Job, error) + // JobsWaitingInQueue number of jobs waiting in Queue in case of LimitModeWait + // In case of LimitModeReschedule or no limit it will be always zero + JobsWaitingInQueue() int +} + +// ----------------------------------------------- +// ----------------------------------------------- +// ----------------- Scheduler ------------------- +// ----------------------------------------------- +// ----------------------------------------------- + +type scheduler struct { + shutdownCtx context.Context + shutdownCancel context.CancelFunc + exec executor + jobs map[uuid.UUID]internalJob + location *time.Location + clock clockwork.Clock + started bool + globalJobOptions []JobOption + logger Logger + + startCh chan struct{} + startedCh chan struct{} + stopCh chan struct{} + stopErrCh chan error + allJobsOutRequest chan allJobsOutRequest + jobOutRequestCh chan jobOutRequest + runJobRequestCh chan runJobRequest + newJobCh chan newJobIn + removeJobCh chan uuid.UUID + removeJobsByTagsCh chan []string +} + +type newJobIn struct { + ctx context.Context + cancel context.CancelFunc + job internalJob +} + +type jobOutRequest struct { + id uuid.UUID + outChan chan internalJob +} + +type runJobRequest struct { + id uuid.UUID + outChan chan error +} + +type allJobsOutRequest struct { + outChan chan []Job +} + +// NewScheduler creates a new Scheduler instance. +// The Scheduler is not started until Start() is called. +// +// NewJob will add jobs to the Scheduler, but they will not +// be scheduled until Start() is called. +func NewScheduler(options ...SchedulerOption) (Scheduler, error) { + schCtx, cancel := context.WithCancel(context.Background()) + + exec := executor{ + stopCh: make(chan struct{}), + stopTimeout: time.Second * 10, + singletonRunners: nil, + logger: &noOpLogger{}, + + jobsIn: make(chan jobIn), + jobsOutForRescheduling: make(chan uuid.UUID), + jobsOutCompleted: make(chan uuid.UUID), + jobOutRequest: make(chan jobOutRequest, 1000), + done: make(chan error), + } + + s := &scheduler{ + shutdownCtx: schCtx, + shutdownCancel: cancel, + exec: exec, + jobs: make(map[uuid.UUID]internalJob), + location: time.Local, + clock: clockwork.NewRealClock(), + logger: &noOpLogger{}, + + newJobCh: make(chan newJobIn), + removeJobCh: make(chan uuid.UUID), + removeJobsByTagsCh: make(chan []string), + startCh: make(chan struct{}), + startedCh: make(chan struct{}), + stopCh: make(chan struct{}), + stopErrCh: make(chan error, 1), + jobOutRequestCh: make(chan jobOutRequest), + runJobRequestCh: make(chan runJobRequest), + allJobsOutRequest: make(chan allJobsOutRequest), + } + + for _, option := range options { + err := option(s) + if err != nil { + return nil, err + } + } + + go func() { + s.logger.Info("gocron: new scheduler created") + for { + select { + case id := <-s.exec.jobsOutForRescheduling: + s.selectExecJobsOutForRescheduling(id) + + case id := <-s.exec.jobsOutCompleted: + s.selectExecJobsOutCompleted(id) + + case in := <-s.newJobCh: + s.selectNewJob(in) + + case id := <-s.removeJobCh: + s.selectRemoveJob(id) + + case tags := <-s.removeJobsByTagsCh: + s.selectRemoveJobsByTags(tags) + + case out := <-s.exec.jobOutRequest: + s.selectJobOutRequest(out) + + case out := <-s.jobOutRequestCh: + s.selectJobOutRequest(out) + + case out := <-s.allJobsOutRequest: + s.selectAllJobsOutRequest(out) + + case run := <-s.runJobRequestCh: + s.selectRunJobRequest(run) + + case <-s.startCh: + s.selectStart() + + case <-s.stopCh: + s.stopScheduler() + + case <-s.shutdownCtx.Done(): + s.stopScheduler() + return + } + } + }() + + return s, nil +} + +// ----------------------------------------------- +// ----------------------------------------------- +// --------- Scheduler Channel Methods ----------- +// ----------------------------------------------- +// ----------------------------------------------- + +// The scheduler's channel functions are broken out here +// to allow prioritizing within the select blocks. The idea +// being that we want to make sure that scheduling tasks +// are not blocked by requests from the caller for information +// about jobs. + +func (s *scheduler) stopScheduler() { + s.logger.Debug("gocron: stopping scheduler") + if s.started { + s.exec.stopCh <- struct{}{} + } + + for _, j := range s.jobs { + j.stop() + } + for id, j := range s.jobs { + <-j.ctx.Done() + + j.ctx, j.cancel = context.WithCancel(s.shutdownCtx) + s.jobs[id] = j + } + var err error + if s.started { + select { + case err = <-s.exec.done: + case <-time.After(s.exec.stopTimeout + 1*time.Second): + err = ErrStopExecutorTimedOut + } + } + s.stopErrCh <- err + s.started = false + s.logger.Debug("gocron: scheduler stopped") +} + +func (s *scheduler) selectAllJobsOutRequest(out allJobsOutRequest) { + outJobs := make([]Job, len(s.jobs)) + var counter int + for _, j := range s.jobs { + outJobs[counter] = s.jobFromInternalJob(j) + counter++ + } + slices.SortFunc(outJobs, func(a, b Job) int { + aID, bID := a.ID().String(), b.ID().String() + switch { + case aID < bID: + return -1 + case aID > bID: + return 1 + default: + return 0 + } + }) + select { + case <-s.shutdownCtx.Done(): + case out.outChan <- outJobs: + } +} + +func (s *scheduler) selectRunJobRequest(run runJobRequest) { + j, ok := s.jobs[run.id] + if !ok { + select { + case run.outChan <- ErrJobNotFound: + default: + } + } + select { + case <-s.shutdownCtx.Done(): + select { + case run.outChan <- ErrJobRunNowFailed: + default: + } + case s.exec.jobsIn <- jobIn{ + id: j.id, + shouldSendOut: false, + }: + select { + case run.outChan <- nil: + default: + } + } +} + +func (s *scheduler) selectRemoveJob(id uuid.UUID) { + j, ok := s.jobs[id] + if !ok { + return + } + j.stop() + delete(s.jobs, id) +} + +// Jobs coming back from the executor to the scheduler that +// need to evaluated for rescheduling. +func (s *scheduler) selectExecJobsOutForRescheduling(id uuid.UUID) { + select { + case <-s.shutdownCtx.Done(): + return + default: + } + j, ok := s.jobs[id] + if !ok { + // the job was removed while it was running, and + // so we don't need to reschedule it. + return + } + var scheduleFrom time.Time + if len(j.nextScheduled) > 0 { + // always grab the last element in the slice as that is the furthest + // out in the future and the time from which we want to calculate + // the subsequent next run time. + slices.SortStableFunc(j.nextScheduled, ascendingTime) + scheduleFrom = j.nextScheduled[len(j.nextScheduled)-1] + } + + next := j.next(scheduleFrom) + if next.IsZero() { + // the job's next function will return zero for OneTime jobs. + // since they are one time only, they do not need rescheduling. + return + } + if next.Before(s.now()) { + // in some cases the next run time can be in the past, for example: + // - the time on the machine was incorrect and has been synced with ntp + // - the machine went to sleep, and woke up some time later + // in those cases, we want to increment to the next run in the future + // and schedule the job for that time. + for next.Before(s.now()) { + next = j.next(next) + } + } + j.nextScheduled = append(j.nextScheduled, next) + j.timer = s.clock.AfterFunc(next.Sub(s.now()), func() { + // set the actual timer on the job here and listen for + // shut down events so that the job doesn't attempt to + // run if the scheduler has been shutdown. + select { + case <-s.shutdownCtx.Done(): + return + case s.exec.jobsIn <- jobIn{ + id: j.id, + shouldSendOut: true, + }: + } + }) + // update the job with its new next and last run times and timer. + s.jobs[id] = j +} + +func (s *scheduler) selectExecJobsOutCompleted(id uuid.UUID) { + j, ok := s.jobs[id] + if !ok { + return + } + + // if the job has nextScheduled time in the past, + // we need to remove any that are in the past. + var newNextScheduled []time.Time + for _, t := range j.nextScheduled { + if t.Before(s.now()) { + continue + } + newNextScheduled = append(newNextScheduled, t) + } + j.nextScheduled = newNextScheduled + + // if the job has a limited number of runs set, we need to + // check how many runs have occurred and stop running this + // job if it has reached the limit. + if j.limitRunsTo != nil { + j.limitRunsTo.runCount = j.limitRunsTo.runCount + 1 + if j.limitRunsTo.runCount == j.limitRunsTo.limit { + go func() { + select { + case <-s.shutdownCtx.Done(): + return + case s.removeJobCh <- id: + } + }() + return + } + } + + j.lastRun = s.now() + s.jobs[id] = j +} + +func (s *scheduler) selectJobOutRequest(out jobOutRequest) { + if j, ok := s.jobs[out.id]; ok { + select { + case out.outChan <- j: + case <-s.shutdownCtx.Done(): + } + } + close(out.outChan) +} + +func (s *scheduler) selectNewJob(in newJobIn) { + j := in.job + if s.started { + next := j.startTime + if j.startImmediately { + next = s.now() + select { + case <-s.shutdownCtx.Done(): + case s.exec.jobsIn <- jobIn{ + id: j.id, + shouldSendOut: true, + }: + } + } else { + if next.IsZero() { + next = j.next(s.now()) + } + + id := j.id + j.timer = s.clock.AfterFunc(next.Sub(s.now()), func() { + select { + case <-s.shutdownCtx.Done(): + case s.exec.jobsIn <- jobIn{ + id: id, + shouldSendOut: true, + }: + } + }) + } + j.nextScheduled = append(j.nextScheduled, next) + } + + s.jobs[j.id] = j + in.cancel() +} + +func (s *scheduler) selectRemoveJobsByTags(tags []string) { + for _, j := range s.jobs { + for _, tag := range tags { + if slices.Contains(j.tags, tag) { + j.stop() + delete(s.jobs, j.id) + break + } + } + } +} + +func (s *scheduler) selectStart() { + s.logger.Debug("gocron: scheduler starting") + go s.exec.start() + + s.started = true + for id, j := range s.jobs { + next := j.startTime + if j.startImmediately { + next = s.now() + select { + case <-s.shutdownCtx.Done(): + case s.exec.jobsIn <- jobIn{ + id: id, + shouldSendOut: true, + }: + } + } else { + if next.IsZero() { + next = j.next(s.now()) + } + + jobID := id + j.timer = s.clock.AfterFunc(next.Sub(s.now()), func() { + select { + case <-s.shutdownCtx.Done(): + case s.exec.jobsIn <- jobIn{ + id: jobID, + shouldSendOut: true, + }: + } + }) + } + j.nextScheduled = append(j.nextScheduled, next) + s.jobs[id] = j + } + select { + case <-s.shutdownCtx.Done(): + case s.startedCh <- struct{}{}: + s.logger.Info("gocron: scheduler started") + } +} + +// ----------------------------------------------- +// ----------------------------------------------- +// ------------- Scheduler Methods --------------- +// ----------------------------------------------- +// ----------------------------------------------- + +func (s *scheduler) now() time.Time { + return s.clock.Now().In(s.location) +} + +func (s *scheduler) jobFromInternalJob(in internalJob) job { + return job{ + in.id, + in.name, + slices.Clone(in.tags), + s.jobOutRequestCh, + s.runJobRequestCh, + } +} + +func (s *scheduler) Jobs() []Job { + outChan := make(chan []Job) + select { + case <-s.shutdownCtx.Done(): + case s.allJobsOutRequest <- allJobsOutRequest{outChan: outChan}: + } + + var jobs []Job + select { + case <-s.shutdownCtx.Done(): + case jobs = <-outChan: + } + + return jobs +} + +func (s *scheduler) NewJob(jobDefinition JobDefinition, task Task, options ...JobOption) (Job, error) { + return s.addOrUpdateJob(uuid.Nil, jobDefinition, task, options) +} + +func (s *scheduler) addOrUpdateJob(id uuid.UUID, definition JobDefinition, taskWrapper Task, options []JobOption) (Job, error) { + j := internalJob{} + if id == uuid.Nil { + j.id = uuid.New() + } else { + currentJob := requestJobCtx(s.shutdownCtx, id, s.jobOutRequestCh) + if currentJob != nil && currentJob.id != uuid.Nil { + select { + case <-s.shutdownCtx.Done(): + return nil, nil + case s.removeJobCh <- id: + <-currentJob.ctx.Done() + } + } + + j.id = id + } + + j.ctx, j.cancel = context.WithCancel(s.shutdownCtx) + + if taskWrapper == nil { + return nil, ErrNewJobTaskNil + } + + tsk := taskWrapper() + taskFunc := reflect.ValueOf(tsk.function) + for taskFunc.Kind() == reflect.Ptr { + taskFunc = taskFunc.Elem() + } + + if taskFunc.Kind() != reflect.Func { + return nil, ErrNewJobTaskNotFunc + } + + expectedParameterLength := taskFunc.Type().NumIn() + if len(tsk.parameters) != expectedParameterLength { + return nil, ErrNewJobWrongNumberOfParameters + } + + for i := 0; i < expectedParameterLength; i++ { + t1 := reflect.TypeOf(tsk.parameters[i]).Kind() + if t1 == reflect.Interface || t1 == reflect.Pointer { + t1 = reflect.TypeOf(tsk.parameters[i]).Elem().Kind() + } + t2 := reflect.New(taskFunc.Type().In(i)).Elem().Kind() + if t2 == reflect.Interface || t2 == reflect.Pointer { + t2 = reflect.Indirect(reflect.ValueOf(taskFunc.Type().In(i))).Kind() + } + if t1 != t2 { + return nil, ErrNewJobWrongTypeOfParameters + } + } + + j.name = runtime.FuncForPC(taskFunc.Pointer()).Name() + j.function = tsk.function + j.parameters = tsk.parameters + + // apply global job options + for _, option := range s.globalJobOptions { + if err := option(&j); err != nil { + return nil, err + } + } + + // apply job specific options, which take precedence + for _, option := range options { + if err := option(&j); err != nil { + return nil, err + } + } + + if err := definition.setup(&j, s.location, s.clock.Now()); err != nil { + return nil, err + } + + newJobCtx, newJobCancel := context.WithCancel(context.Background()) + select { + case <-s.shutdownCtx.Done(): + case s.newJobCh <- newJobIn{ + ctx: newJobCtx, + cancel: newJobCancel, + job: j, + }: + } + + select { + case <-newJobCtx.Done(): + case <-s.shutdownCtx.Done(): + } + + out := s.jobFromInternalJob(j) + return &out, nil +} + +func (s *scheduler) RemoveByTags(tags ...string) { + select { + case <-s.shutdownCtx.Done(): + case s.removeJobsByTagsCh <- tags: + } +} + +func (s *scheduler) RemoveJob(id uuid.UUID) error { + j := requestJobCtx(s.shutdownCtx, id, s.jobOutRequestCh) + if j == nil || j.id == uuid.Nil { + return ErrJobNotFound + } + select { + case <-s.shutdownCtx.Done(): + case s.removeJobCh <- id: + } + + return nil +} + +func (s *scheduler) Start() { + select { + case <-s.shutdownCtx.Done(): + case s.startCh <- struct{}{}: + <-s.startedCh + } +} + +func (s *scheduler) StopJobs() error { + select { + case <-s.shutdownCtx.Done(): + return nil + case s.stopCh <- struct{}{}: + } + select { + case err := <-s.stopErrCh: + return err + case <-time.After(s.exec.stopTimeout + 2*time.Second): + return ErrStopSchedulerTimedOut + } +} + +func (s *scheduler) Shutdown() error { + s.shutdownCancel() + select { + case err := <-s.stopErrCh: + return err + case <-time.After(s.exec.stopTimeout + 2*time.Second): + return ErrStopSchedulerTimedOut + } +} + +func (s *scheduler) Update(id uuid.UUID, jobDefinition JobDefinition, task Task, options ...JobOption) (Job, error) { + return s.addOrUpdateJob(id, jobDefinition, task, options) +} + +func (s *scheduler) JobsWaitingInQueue() int { + if s.exec.limitMode != nil && s.exec.limitMode.mode == LimitModeWait { + return len(s.exec.limitMode.in) + } + return 0 +} + +// ----------------------------------------------- +// ----------------------------------------------- +// ------------- Scheduler Options --------------- +// ----------------------------------------------- +// ----------------------------------------------- + +// SchedulerOption defines the function for setting +// options on the Scheduler. +type SchedulerOption func(*scheduler) error + +// WithClock sets the clock used by the Scheduler +// to the clock provided. See https://github.com/jonboulle/clockwork +func WithClock(clock clockwork.Clock) SchedulerOption { + return func(s *scheduler) error { + if clock == nil { + return ErrWithClockNil + } + s.clock = clock + return nil + } +} + +// WithDistributedElector sets the elector to be used by multiple +// Scheduler instances to determine who should be the leader. +// Only the leader runs jobs, while non-leaders wait and continue +// to check if a new leader has been elected. +func WithDistributedElector(elector Elector) SchedulerOption { + return func(s *scheduler) error { + if elector == nil { + return ErrWithDistributedElectorNil + } + s.exec.elector = elector + return nil + } +} + +// WithDistributedLocker sets the locker to be used by multiple +// Scheduler instances to ensure that only one instance of each +// job is run. +func WithDistributedLocker(locker Locker) SchedulerOption { + return func(s *scheduler) error { + if locker == nil { + return ErrWithDistributedLockerNil + } + s.exec.locker = locker + return nil + } +} + +// WithGlobalJobOptions sets JobOption's that will be applied to +// all jobs added to the scheduler. JobOption's set on the job +// itself will override if the same JobOption is set globally. +func WithGlobalJobOptions(jobOptions ...JobOption) SchedulerOption { + return func(s *scheduler) error { + s.globalJobOptions = jobOptions + return nil + } +} + +// LimitMode defines the modes used for handling jobs that reach +// the limit provided in WithLimitConcurrentJobs +type LimitMode int + +const ( + // LimitModeReschedule causes jobs reaching the limit set in + // WithLimitConcurrentJobs or WithSingletonMode to be skipped + // and rescheduled for the next run time rather than being + // queued up to wait. + LimitModeReschedule = 1 + + // LimitModeWait causes jobs reaching the limit set in + // WithLimitConcurrentJobs or WithSingletonMode to wait + // in a queue until a slot becomes available to run. + // + // Note: this mode can produce unpredictable results as + // job execution order isn't guaranteed. For example, a job that + // executes frequently may pile up in the wait queue and be executed + // many times back to back when the queue opens. + // + // Warning: do not use this mode if your jobs will continue to stack + // up beyond the ability of the limit workers to keep up. An example of + // what NOT to do: + // + // s, _ := gocron.NewScheduler(gocron.WithLimitConcurrentJobs) + // s.NewJob( + // gocron.DurationJob( + // time.Second, + // Task{ + // Function: func() { + // time.Sleep(10 * time.Second) + // }, + // }, + // ), + // ) + LimitModeWait = 2 +) + +// WithLimitConcurrentJobs sets the limit and mode to be used by the +// Scheduler for limiting the number of jobs that may be running at +// a given time. +// +// Note: the limit mode selected for WithLimitConcurrentJobs takes initial +// precedence in the event you are also running a limit mode at the job level +// using WithSingletonMode. +// +// Warning: a single time consuming job can dominate your limit in the event +// you are running both the scheduler limit WithLimitConcurrentJobs(1, LimitModeWait) +// and a job limit WithSingletonMode(LimitModeReschedule). +func WithLimitConcurrentJobs(limit uint, mode LimitMode) SchedulerOption { + return func(s *scheduler) error { + if limit == 0 { + return ErrWithLimitConcurrentJobsZero + } + s.exec.limitMode = &limitModeConfig{ + mode: mode, + limit: limit, + in: make(chan jobIn, 1000), + singletonJobs: make(map[uuid.UUID]struct{}), + } + if mode == LimitModeReschedule { + s.exec.limitMode.rescheduleLimiter = make(chan struct{}, limit) + } + return nil + } +} + +// WithLocation sets the location (i.e. timezone) that the scheduler +// should operate within. In many systems time.Local is UTC. +// Default: time.Local +func WithLocation(location *time.Location) SchedulerOption { + return func(s *scheduler) error { + if location == nil { + return ErrWithLocationNil + } + s.location = location + return nil + } +} + +// WithLogger sets the logger to be used by the Scheduler. +func WithLogger(logger Logger) SchedulerOption { + return func(s *scheduler) error { + if logger == nil { + return ErrWithLoggerNil + } + s.logger = logger + s.exec.logger = logger + return nil + } +} + +// WithStopTimeout sets the amount of time the Scheduler should +// wait gracefully for jobs to complete before returning when +// StopJobs() or Shutdown() are called. +// Default: 10 * time.Second +func WithStopTimeout(timeout time.Duration) SchedulerOption { + return func(s *scheduler) error { + if timeout <= 0 { + return ErrWithStopTimeoutZeroOrNegative + } + s.exec.stopTimeout = timeout + return nil + } +} + +// WithMonitor sets the metrics provider to be used by the Scheduler. +func WithMonitor(monitor Monitor) SchedulerOption { + return func(s *scheduler) error { + if monitor == nil { + return ErrWithMonitorNil + } + s.exec.monitor = monitor + return nil + } +} diff --git a/vendor/github.com/go-co-op/gocron/v2/util.go b/vendor/github.com/go-co-op/gocron/v2/util.go new file mode 100644 index 000000000..a4e5b6fda --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/v2/util.go @@ -0,0 +1,118 @@ +package gocron + +import ( + "context" + "reflect" + "sync" + "time" + + "github.com/google/uuid" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" +) + +func callJobFuncWithParams(jobFunc any, params ...any) error { + if jobFunc == nil { + return nil + } + f := reflect.ValueOf(jobFunc) + if f.IsZero() { + return nil + } + if len(params) != f.Type().NumIn() { + return nil + } + in := make([]reflect.Value, len(params)) + for k, param := range params { + in[k] = reflect.ValueOf(param) + } + returnValues := f.Call(in) + for _, val := range returnValues { + i := val.Interface() + if err, ok := i.(error); ok { + return err + } + } + return nil +} + +func requestJob(id uuid.UUID, ch chan jobOutRequest) *internalJob { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + return requestJobCtx(ctx, id, ch) +} + +func requestJobCtx(ctx context.Context, id uuid.UUID, ch chan jobOutRequest) *internalJob { + resp := make(chan internalJob, 1) + select { + case ch <- jobOutRequest{ + id: id, + outChan: resp, + }: + case <-ctx.Done(): + return nil + } + var j internalJob + select { + case <-ctx.Done(): + return nil + case jobReceived := <-resp: + j = jobReceived + } + return &j +} + +func removeSliceDuplicatesInt(in []int) []int { + m := make(map[int]struct{}) + + for _, i := range in { + m[i] = struct{}{} + } + return maps.Keys(m) +} + +func convertAtTimesToDateTime(atTimes AtTimes, location *time.Location) ([]time.Time, error) { + if atTimes == nil { + return nil, errAtTimesNil + } + var atTimesDate []time.Time + for _, a := range atTimes() { + if a == nil { + return nil, errAtTimeNil + } + at := a() + if at.hours > 23 { + return nil, errAtTimeHours + } else if at.minutes > 59 || at.seconds > 59 { + return nil, errAtTimeMinSec + } + atTimesDate = append(atTimesDate, at.time(location)) + } + slices.SortStableFunc(atTimesDate, ascendingTime) + return atTimesDate, nil +} + +func ascendingTime(a, b time.Time) int { + return a.Compare(b) +} + +type waitGroupWithMutex struct { + wg sync.WaitGroup + mu sync.Mutex +} + +func (w *waitGroupWithMutex) Add(delta int) { + w.mu.Lock() + defer w.mu.Unlock() + w.wg.Add(delta) +} + +func (w *waitGroupWithMutex) Done() { + w.wg.Done() +} + +func (w *waitGroupWithMutex) Wait() { + w.mu.Lock() + defer w.mu.Unlock() + w.wg.Wait() +} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a60b..000000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md new file mode 100644 index 000000000..7ec5ac7ea --- /dev/null +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -0,0 +1,41 @@ +# Changelog + +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + +## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) + + +### Features + +* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) + +## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) + + +### Features + +* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4)) + +### Fixes + +* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior) + +## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) + + +### Bug Fixes + +* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) + +## Changelog diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md index 04fdf09f1..a502fdc51 100644 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -2,6 +2,22 @@ We definitely welcome patches and contribution to this project! +### Tips + +Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). + +Always try to include a test case! If it is not possible or not necessary, +please explain why in the pull request description. + +### Releasing + +Commits that would precipitate a SemVer change, as described in the Conventional +Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) +to create a release candidate pull request. Once submitted, `release-please` +will create a release. + +For tips on how to work with `release-please`, see its documentation. + ### Legal requirements In order to protect both you and ourselves, you will need to sign the diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index f765a46f9..3e9a61889 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -1,6 +1,6 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +# uuid The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) +[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) and DCE 1.1: Authentication and Security Services. This package is based on the github.com/pborman/uuid package (previously named @@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this change is the ability to represent an invalid UUID (vs a NIL UUID). ###### Install -`go get github.com/google/uuid` +```sh +go get github.com/google/uuid +``` ###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) +[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b404f4bec..dc60082d3 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -17,6 +17,12 @@ var ( NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } ) // NewHash returns a new UUID derived from the hash of space concatenated with diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go index 24b78edc9..b2a0bc871 100644 --- a/vendor/github.com/google/uuid/node_js.go +++ b/vendor/github.com/google/uuid/node_js.go @@ -7,6 +7,6 @@ package uuid // getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. +// This removes the "net" dependency, because it is not used in the browser. // Using the "net" library inflates the size of the transpiled JS code by 673k bytes. func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go index e6ef06cdc..c35112927 100644 --- a/vendor/github.com/google/uuid/time.go +++ b/vendor/github.com/google/uuid/time.go @@ -108,12 +108,23 @@ func setClockSequence(seq int) { } // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. +// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) + var t Time + switch uuid.Version() { + case 6: + time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 + t = Time(time) + case 7: + time := binary.BigEndian.Uint64(uuid[:8]) + t = Time((time>>16)*10000 + g1582ns100) + default: // forward compatible + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + t = Time(time) + } + return t } // ClockSequence returns the clock sequence encoded in uuid. diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index a57207aeb..5232b4867 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -56,11 +56,15 @@ func IsInvalidLengthError(err error) bool { return ok } -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both +// the standard UUID forms defined in RFC 4122 +// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition, +// Parse accepts non-standard strings such as the raw hex encoding +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings, +// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are +// examined in the latter case. Parse should not be used to validate strings as +// it parses non-standard encodings as indicated above. func Parse(s string) (UUID, error) { var uuid UUID switch len(s) { @@ -69,7 +73,7 @@ func Parse(s string) (UUID, error) { // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { + if !strings.EqualFold(s[:9], "urn:uuid:") { return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) } s = s[9:] @@ -101,7 +105,8 @@ func Parse(s string) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(s[x], s[x+1]) if !ok { return uuid, errors.New("invalid UUID format") @@ -117,7 +122,7 @@ func ParseBytes(b []byte) (UUID, error) { switch len(b) { case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) } b = b[9:] @@ -145,7 +150,8 @@ func ParseBytes(b []byte) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(b[x], b[x+1]) if !ok { return uuid, errors.New("invalid UUID format") @@ -180,6 +186,59 @@ func Must(uuid UUID, err error) UUID { return uuid } +// Validate returns an error if s is not a properly formatted UUID in one of the following formats: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} +// It returns an error if the format is invalid, otherwise nil. +func Validate(s string) error { + switch len(s) { + // Standard UUID format + case 36: + + // UUID with "urn:uuid:" prefix + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // UUID enclosed in braces + case 36 + 2: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("invalid bracketed UUID format") + } + s = s[1 : len(s)-1] + + // UUID without hyphens + case 32: + for i := 0; i < len(s); i += 2 { + _, ok := xtob(s[i], s[i+1]) + if !ok { + return errors.New("invalid UUID format") + } + } + + default: + return invalidLengthError{len(s)} + } + + // Check for standard UUID format + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return errors.New("invalid UUID format") + } + for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { + if _, ok := xtob(s[x], s[x+1]); !ok { + return errors.New("invalid UUID format") + } + } + } + + return nil +} + // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // , or "" if uuid is invalid. func (uuid UUID) String() string { @@ -292,3 +351,15 @@ func DisableRandPool() { poolMu.Lock() poolPos = randPoolSize } + +// UUIDs is a slice of UUID types. +type UUIDs []UUID + +// Strings returns a string slice containing the string form of each UUID in uuids. +func (uuids UUIDs) Strings() []string { + var uuidStrs = make([]string, len(uuids)) + for i, uuid := range uuids { + uuidStrs[i] = uuid.String() + } + return uuidStrs +} diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go new file mode 100644 index 000000000..339a959a7 --- /dev/null +++ b/vendor/github.com/google/uuid/version6.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "encoding/binary" + +// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. +// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. +// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 +// +// NewV6 returns a Version 6 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewV6 returns Nil and an error. +func NewV6() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_mid | time_low_and_version | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |clk_seq_hi_res | clk_seq_low | node (0-1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | node (2-5) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + + binary.BigEndian.PutUint64(uuid[0:], uint64(now)) + binary.BigEndian.PutUint16(uuid[8:], seq) + + uuid[6] = 0x60 | (uuid[6] & 0x0F) + uuid[8] = 0x80 | (uuid[8] & 0x3F) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go new file mode 100644 index 000000000..3167b643d --- /dev/null +++ b/vendor/github.com/google/uuid/version7.go @@ -0,0 +1,104 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// UUID version 7 features a time-ordered value field derived from the widely +// implemented and well known Unix Epoch timestamp source, +// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. +// As well as improved entropy characteristics over versions 1 or 6. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 +// +// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. +// +// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). +// Uses the randomness pool if it was enabled with EnableRandPool. +// On error, NewV7 returns Nil and an error +func NewV7() (UUID, error) { + uuid, err := NewRandom() + if err != nil { + return uuid, err + } + makeV7(uuid[:]) + return uuid, nil +} + +// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). +// it use NewRandomFromReader fill random bits. +// On error, NewV7FromReader returns Nil and an error. +func NewV7FromReader(r io.Reader) (UUID, error) { + uuid, err := NewRandomFromReader(r) + if err != nil { + return uuid, err + } + + makeV7(uuid[:]) + return uuid, nil +} + +// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) +// uuid[8] already has the right version number (Variant is 10) +// see function NewV7 and NewV7FromReader +func makeV7(uuid []byte) { + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a (12 bit seq) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + _ = uuid[15] // bounds check + + t, s := getV7Time() + + uuid[0] = byte(t >> 40) + uuid[1] = byte(t >> 32) + uuid[2] = byte(t >> 24) + uuid[3] = byte(t >> 16) + uuid[4] = byte(t >> 8) + uuid[5] = byte(t) + + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq +} diff --git a/vendor/github.com/jonboulle/clockwork/README.md b/vendor/github.com/jonboulle/clockwork/README.md index cad608357..42970da80 100644 --- a/vendor/github.com/jonboulle/clockwork/README.md +++ b/vendor/github.com/jonboulle/clockwork/README.md @@ -2,9 +2,9 @@ [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#utilities) -[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/jonboulle/clockwork/CI?style=flat-square)](https://github.com/jonboulle/clockwork/actions?query=workflow%3ACI) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/jonboulle/clockwork/ci.yaml?style=flat-square)](https://github.com/jonboulle/clockwork/actions?query=workflow%3ACI) [![Go Report Card](https://goreportcard.com/badge/github.com/jonboulle/clockwork?style=flat-square)](https://goreportcard.com/report/github.com/jonboulle/clockwork) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.11-61CFDD.svg?style=flat-square) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.15-61CFDD.svg?style=flat-square) [![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/jonboulle/clockwork) **A simple fake clock for Go.** diff --git a/vendor/github.com/jonboulle/clockwork/clockwork.go b/vendor/github.com/jonboulle/clockwork/clockwork.go index 1018051f4..3206b36e4 100644 --- a/vendor/github.com/jonboulle/clockwork/clockwork.go +++ b/vendor/github.com/jonboulle/clockwork/clockwork.go @@ -1,30 +1,38 @@ package clockwork import ( + "context" + "sort" "sync" "time" ) -// Clock provides an interface that packages can use instead of directly -// using the time module, so that chronology-related behavior can be tested +// Clock provides an interface that packages can use instead of directly using +// the [time] module, so that chronology-related behavior can be tested. type Clock interface { After(d time.Duration) <-chan time.Time Sleep(d time.Duration) Now() time.Time Since(t time.Time) time.Duration NewTicker(d time.Duration) Ticker + NewTimer(d time.Duration) Timer + AfterFunc(d time.Duration, f func()) Timer } -// FakeClock provides an interface for a clock which can be -// manually advanced through time +// FakeClock provides an interface for a clock which can be manually advanced +// through time. +// +// FakeClock maintains a list of "waiters," which consists of all callers +// waiting on the underlying clock (i.e. Tickers and Timers including callers of +// Sleep or After). Users can call BlockUntil to block until the clock has an +// expected number of waiters. type FakeClock interface { Clock // Advance advances the FakeClock to a new point in time, ensuring any existing - // sleepers are notified appropriately before returning + // waiters are notified appropriately before returning. Advance(d time.Duration) - // BlockUntil will block until the FakeClock has the given number of - // sleepers (callers of Sleep or After) - BlockUntil(n int) + // BlockUntil blocks until the FakeClock has the given number of waiters. + BlockUntil(waiters int) } // NewRealClock returns a Clock which simply delegates calls to the actual time @@ -35,10 +43,11 @@ func NewRealClock() Clock { // NewFakeClock returns a FakeClock implementation which can be // manually advanced through time for testing. The initial time of the -// FakeClock will be an arbitrary non-zero time. +// FakeClock will be the current system time. +// +// Tests that require a deterministic time must use NewFakeClockAt. func NewFakeClock() FakeClock { - // use a fixture that does not fulfill Time.IsZero() - return NewFakeClockAt(time.Date(1984, time.April, 4, 0, 0, 0, 0, time.UTC)) + return NewFakeClockAt(time.Now()) } // NewFakeClockAt returns a FakeClock initialised at the given time.Time. @@ -67,129 +76,274 @@ func (rc *realClock) Since(t time.Time) time.Duration { } func (rc *realClock) NewTicker(d time.Duration) Ticker { - return &realTicker{time.NewTicker(d)} + return realTicker{time.NewTicker(d)} } -type fakeClock struct { - sleepers []*sleeper - blockers []*blocker - time time.Time +func (rc *realClock) NewTimer(d time.Duration) Timer { + return realTimer{time.NewTimer(d)} +} - l sync.RWMutex +func (rc *realClock) AfterFunc(d time.Duration, f func()) Timer { + return realTimer{time.AfterFunc(d, f)} } -// sleeper represents a caller of After or Sleep -type sleeper struct { - until time.Time - done chan time.Time +type fakeClock struct { + // l protects all attributes of the clock, including all attributes of all + // waiters and blockers. + l sync.RWMutex + waiters []expirer + blockers []*blocker + time time.Time } -// blocker represents a caller of BlockUntil +// blocker is a caller of BlockUntil. type blocker struct { count int - ch chan struct{} + + // ch is closed when the underlying clock has the specificed number of blockers. + ch chan struct{} } -// After mimics time.After; it waits for the given duration to elapse on the -// fakeClock, then sends the current time on the returned channel. -func (fc *fakeClock) After(d time.Duration) <-chan time.Time { - fc.l.Lock() - defer fc.l.Unlock() - now := fc.time - done := make(chan time.Time, 1) - if d.Nanoseconds() <= 0 { - // special case - trigger immediately - done <- now - } else { - // otherwise, add to the set of sleepers - s := &sleeper{ - until: now.Add(d), - done: done, - } - fc.sleepers = append(fc.sleepers, s) - // and notify any blockers - fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) - } - return done +// expirer is a timer or ticker that expires at some point in the future. +type expirer interface { + // expire the expirer at the given time, returning the desired duration until + // the next expiration, if any. + expire(now time.Time) (next *time.Duration) + + // Get and set the expiration time. + expiry() time.Time + setExpiry(time.Time) } -// notifyBlockers notifies all the blockers waiting until the -// given number of sleepers are waiting on the fakeClock. It -// returns an updated slice of blockers (i.e. those still waiting) -func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) { - for _, b := range blockers { - if b.count == count { - close(b.ch) - } else { - newBlockers = append(newBlockers, b) - } - } - return +// After mimics [time.After]; it waits for the given duration to elapse on the +// fakeClock, then sends the current time on the returned channel. +func (fc *fakeClock) After(d time.Duration) <-chan time.Time { + return fc.NewTimer(d).Chan() } -// Sleep blocks until the given duration has passed on the fakeClock +// Sleep blocks until the given duration has passed on the fakeClock. func (fc *fakeClock) Sleep(d time.Duration) { <-fc.After(d) } -// Time returns the current time of the fakeClock +// Now returns the current time of the fakeClock func (fc *fakeClock) Now() time.Time { fc.l.RLock() - t := fc.time - fc.l.RUnlock() - return t + defer fc.l.RUnlock() + return fc.time } -// Since returns the duration that has passed since the given time on the fakeClock +// Since returns the duration that has passed since the given time on the +// fakeClock. func (fc *fakeClock) Since(t time.Time) time.Duration { return fc.Now().Sub(t) } +// NewTicker returns a Ticker that will expire only after calls to +// fakeClock.Advance() have moved the clock past the given duration. func (fc *fakeClock) NewTicker(d time.Duration) Ticker { - ft := &fakeTicker{ - c: make(chan time.Time, 1), - stop: make(chan bool, 1), - clock: fc, - period: d, + var ft *fakeTicker + ft = &fakeTicker{ + firer: newFirer(), + d: d, + reset: func(d time.Duration) { fc.set(ft, d) }, + stop: func() { fc.stop(ft) }, } - ft.runTickThread() + fc.set(ft, d) return ft } -// Advance advances fakeClock to a new point in time, ensuring channels from any -// previous invocations of After are notified appropriately before returning +// NewTimer returns a Timer that will fire only after calls to +// fakeClock.Advance() have moved the clock past the given duration. +func (fc *fakeClock) NewTimer(d time.Duration) Timer { + return fc.newTimer(d, nil) +} + +// AfterFunc mimics [time.AfterFunc]; it returns a Timer that will invoke the +// given function only after calls to fakeClock.Advance() have moved the clock +// past the given duration. +func (fc *fakeClock) AfterFunc(d time.Duration, f func()) Timer { + return fc.newTimer(d, f) +} + +// newTimer returns a new timer, using an optional afterFunc. +func (fc *fakeClock) newTimer(d time.Duration, afterfunc func()) *fakeTimer { + var ft *fakeTimer + ft = &fakeTimer{ + firer: newFirer(), + reset: func(d time.Duration) bool { + fc.l.Lock() + defer fc.l.Unlock() + // fc.l must be held across the calls to stopExpirer & setExpirer. + stopped := fc.stopExpirer(ft) + fc.setExpirer(ft, d) + return stopped + }, + stop: func() bool { return fc.stop(ft) }, + + afterFunc: afterfunc, + } + fc.set(ft, d) + return ft +} + +// Advance advances fakeClock to a new point in time, ensuring waiters and +// blockers are notified appropriately before returning. func (fc *fakeClock) Advance(d time.Duration) { fc.l.Lock() defer fc.l.Unlock() end := fc.time.Add(d) - var newSleepers []*sleeper - for _, s := range fc.sleepers { - if end.Sub(s.until) >= 0 { - s.done <- end - } else { - newSleepers = append(newSleepers, s) + // Expire the earliest waiter until the earliest waiter's expiration is after + // end. + // + // We don't iterate because the callback of the waiter might register a new + // waiter, so the list of waiters might change as we execute this. + for len(fc.waiters) > 0 && !end.Before(fc.waiters[0].expiry()) { + w := fc.waiters[0] + fc.waiters = fc.waiters[1:] + + // Use the waiter's expriation as the current time for this expiration. + now := w.expiry() + fc.time = now + if d := w.expire(now); d != nil { + // Set the new exipration if needed. + fc.setExpirer(w, *d) } } - fc.sleepers = newSleepers - fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) fc.time = end } -// BlockUntil will block until the fakeClock has the given number of sleepers -// (callers of Sleep or After) +// BlockUntil blocks until the fakeClock has the given number of waiters. +// +// Prefer BlockUntilContext, which offers context cancellation to prevent +// deadlock. +// +// Deprecation warning: This function might be deprecated in later versions. func (fc *fakeClock) BlockUntil(n int) { - fc.l.Lock() - // Fast path: current number of sleepers is what we're looking for - if len(fc.sleepers) == n { - fc.l.Unlock() + b := fc.newBlocker(n) + if b == nil { return } - // Otherwise, set up a new blocker + <-b.ch +} + +// BlockUntilContext blocks until the fakeClock has the given number of waiters +// or the context is cancelled. +func (fc *fakeClock) BlockUntilContext(ctx context.Context, n int) error { + b := fc.newBlocker(n) + if b == nil { + return nil + } + + select { + case <-b.ch: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func (fc *fakeClock) newBlocker(n int) *blocker { + fc.l.Lock() + defer fc.l.Unlock() + // Fast path: we already have >= n waiters. + if len(fc.waiters) >= n { + return nil + } + // Set up a new blocker to wait for more waiters. b := &blocker{ count: n, ch: make(chan struct{}), } fc.blockers = append(fc.blockers, b) - fc.l.Unlock() - <-b.ch + return b +} + +// stop stops an expirer, returning true if the expirer was stopped. +func (fc *fakeClock) stop(e expirer) bool { + fc.l.Lock() + defer fc.l.Unlock() + return fc.stopExpirer(e) +} + +// stopExpirer stops an expirer, returning true if the expirer was stopped. +// +// The caller must hold fc.l. +func (fc *fakeClock) stopExpirer(e expirer) bool { + for i, t := range fc.waiters { + if t == e { + // Remove element, maintaining order. + copy(fc.waiters[i:], fc.waiters[i+1:]) + fc.waiters[len(fc.waiters)-1] = nil + fc.waiters = fc.waiters[:len(fc.waiters)-1] + return true + } + } + return false +} + +// set sets an expirer to expire at a future point in time. +func (fc *fakeClock) set(e expirer, d time.Duration) { + fc.l.Lock() + defer fc.l.Unlock() + fc.setExpirer(e, d) +} + +// setExpirer sets an expirer to expire at a future point in time. +// +// The caller must hold fc.l. +func (fc *fakeClock) setExpirer(e expirer, d time.Duration) { + if d.Nanoseconds() <= 0 { + // special case - trigger immediately, never reset. + // + // TODO: Explain what cases this covers. + e.expire(fc.time) + return + } + // Add the expirer to the set of waiters and notify any blockers. + e.setExpiry(fc.time.Add(d)) + fc.waiters = append(fc.waiters, e) + sort.Slice(fc.waiters, func(i int, j int) bool { + return fc.waiters[i].expiry().Before(fc.waiters[j].expiry()) + }) + + // Notify blockers of our new waiter. + var blocked []*blocker + count := len(fc.waiters) + for _, b := range fc.blockers { + if b.count <= count { + close(b.ch) + continue + } + blocked = append(blocked, b) + } + fc.blockers = blocked +} + +// firer is used by fakeTimer and fakeTicker used to help implement expirer. +type firer struct { + // The channel associated with the firer, used to send expriation times. + c chan time.Time + + // The time when the firer expires. Only meaningful if the firer is currently + // one of a fakeClock's waiters. + exp time.Time +} + +func newFirer() firer { + return firer{c: make(chan time.Time, 1)} +} + +func (f *firer) Chan() <-chan time.Time { + return f.c +} + +// expiry implements expirer. +func (f *firer) expiry() time.Time { + return f.exp +} + +// setExpiry implements expirer. +func (f *firer) setExpiry(t time.Time) { + f.exp = t } diff --git a/vendor/github.com/jonboulle/clockwork/context.go b/vendor/github.com/jonboulle/clockwork/context.go new file mode 100644 index 000000000..edbb368f0 --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/context.go @@ -0,0 +1,25 @@ +package clockwork + +import ( + "context" +) + +// contextKey is private to this package so we can ensure uniqueness here. This +// type identifies context values provided by this package. +type contextKey string + +// keyClock provides a clock for injecting during tests. If absent, a real clock should be used. +var keyClock = contextKey("clock") // clockwork.Clock + +// AddToContext creates a derived context that references the specified clock. +func AddToContext(ctx context.Context, clock Clock) context.Context { + return context.WithValue(ctx, keyClock, clock) +} + +// FromContext extracts a clock from the context. If not present, a real clock is returned. +func FromContext(ctx context.Context) Clock { + if clock, ok := ctx.Value(keyClock).(Clock); ok { + return clock + } + return NewRealClock() +} diff --git a/vendor/github.com/jonboulle/clockwork/ticker.go b/vendor/github.com/jonboulle/clockwork/ticker.go index 32b5d01e7..b68e4d777 100644 --- a/vendor/github.com/jonboulle/clockwork/ticker.go +++ b/vendor/github.com/jonboulle/clockwork/ticker.go @@ -1,72 +1,48 @@ package clockwork -import ( - "time" -) +import "time" -// Ticker provides an interface which can be used instead of directly -// using the ticker within the time module. The real-time ticker t -// provides ticks through t.C which becomes now t.Chan() to make -// this channel requirement definable in this interface. +// Ticker provides an interface which can be used instead of directly using +// [time.Ticker]. The real-time ticker t provides ticks through t.C which +// becomes t.Chan() to make this channel requirement definable in this +// interface. type Ticker interface { Chan() <-chan time.Time + Reset(d time.Duration) Stop() } type realTicker struct{ *time.Ticker } -func (rt *realTicker) Chan() <-chan time.Time { - return rt.C +func (r realTicker) Chan() <-chan time.Time { + return r.C } type fakeTicker struct { - c chan time.Time - stop chan bool - clock FakeClock - period time.Duration + firer + + // reset and stop provide the implementation of the respective exported + // functions. + reset func(d time.Duration) + stop func() + + // The duration of the ticker. + d time.Duration } -func (ft *fakeTicker) Chan() <-chan time.Time { - return ft.c +func (f *fakeTicker) Reset(d time.Duration) { + f.reset(d) } -func (ft *fakeTicker) Stop() { - ft.stop <- true +func (f *fakeTicker) Stop() { + f.stop() } -// runTickThread initializes a background goroutine to send the tick time to the ticker channel -// after every period. Tick events are discarded if the underlying ticker channel does not have -// enough capacity. -func (ft *fakeTicker) runTickThread() { - nextTick := ft.clock.Now().Add(ft.period) - next := ft.clock.After(ft.period) - go func() { - for { - select { - case <-ft.stop: - return - case <-next: - // We send the time that the tick was supposed to occur at. - tick := nextTick - // Before sending the tick, we'll compute the next tick time and star the clock.After call. - now := ft.clock.Now() - // First, figure out how many periods there have been between "now" and the time we were - // supposed to have trigged, then advance over all of those. - skipTicks := (now.Sub(tick) + ft.period - 1) / ft.period - nextTick = nextTick.Add(skipTicks * ft.period) - // Now, keep advancing until we are past now. This should happen at most once. - for !nextTick.After(now) { - nextTick = nextTick.Add(ft.period) - } - // Figure out how long between now and the next scheduled tick, then wait that long. - remaining := nextTick.Sub(now) - next = ft.clock.After(remaining) - // Finally, we can actually send the tick. - select { - case ft.c <- tick: - default: - } - } - } - }() +func (f *fakeTicker) expire(now time.Time) *time.Duration { + // Never block on expiration. + select { + case f.c <- now: + default: + } + return &f.d } diff --git a/vendor/github.com/jonboulle/clockwork/timer.go b/vendor/github.com/jonboulle/clockwork/timer.go new file mode 100644 index 000000000..6f928b3dd --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/timer.go @@ -0,0 +1,53 @@ +package clockwork + +import "time" + +// Timer provides an interface which can be used instead of directly using +// [time.Timer]. The real-time timer t provides events through t.C which becomes +// t.Chan() to make this channel requirement definable in this interface. +type Timer interface { + Chan() <-chan time.Time + Reset(d time.Duration) bool + Stop() bool +} + +type realTimer struct{ *time.Timer } + +func (r realTimer) Chan() <-chan time.Time { + return r.C +} + +type fakeTimer struct { + firer + + // reset and stop provide the implmenetation of the respective exported + // functions. + reset func(d time.Duration) bool + stop func() bool + + // If present when the timer fires, the timer calls afterFunc in its own + // goroutine rather than sending the time on Chan(). + afterFunc func() +} + +func (f *fakeTimer) Reset(d time.Duration) bool { + return f.reset(d) +} + +func (f *fakeTimer) Stop() bool { + return f.stop() +} + +func (f *fakeTimer) expire(now time.Time) *time.Duration { + if f.afterFunc != nil { + go f.afterFunc() + return nil + } + + // Never block on expiration. + select { + case f.c <- now: + default: + } + return nil +} diff --git a/vendor/github.com/robfig/cron/v3/.gitignore b/vendor/github.com/robfig/cron/v3/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/robfig/cron/v3/.travis.yml b/vendor/github.com/robfig/cron/v3/.travis.yml new file mode 100644 index 000000000..4f2ee4d97 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/robfig/cron/v3/LICENSE b/vendor/github.com/robfig/cron/v3/LICENSE new file mode 100644 index 000000000..3a0f627ff --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/LICENSE @@ -0,0 +1,21 @@ +Copyright (C) 2012 Rob Figueiredo +All Rights Reserved. + +MIT LICENSE + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/robfig/cron/v3/README.md b/vendor/github.com/robfig/cron/v3/README.md new file mode 100644 index 000000000..984c537c0 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/README.md @@ -0,0 +1,125 @@ +[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron) +[![Build Status](https://travis-ci.org/robfig/cron.svg?branch=master)](https://travis-ci.org/robfig/cron) + +# cron + +Cron V3 has been released! + +To download the specific tagged release, run: + + go get github.com/robfig/cron/v3@v3.0.0 + +Import it in your program as: + + import "github.com/robfig/cron/v3" + +It requires Go 1.11 or later due to usage of Go Modules. + +Refer to the documentation here: +http://godoc.org/github.com/robfig/cron + +The rest of this document describes the the advances in v3 and a list of +breaking changes for users that wish to upgrade from an earlier version. + +## Upgrading to v3 (June 2019) + +cron v3 is a major upgrade to the library that addresses all outstanding bugs, +feature requests, and rough edges. It is based on a merge of master which +contains various fixes to issues found over the years and the v2 branch which +contains some backwards-incompatible features like the ability to remove cron +jobs. In addition, v3 adds support for Go Modules, cleans up rough edges like +the timezone support, and fixes a number of bugs. + +New features: + +- Support for Go modules. Callers must now import this library as + `github.com/robfig/cron/v3`, instead of `gopkg.in/...` + +- Fixed bugs: + - 0f01e6b parser: fix combining of Dow and Dom (#70) + - dbf3220 adjust times when rolling the clock forward to handle non-existent midnight (#157) + - eeecf15 spec_test.go: ensure an error is returned on 0 increment (#144) + - 70971dc cron.Entries(): update request for snapshot to include a reply channel (#97) + - 1cba5e6 cron: fix: removing a job causes the next scheduled job to run too late (#206) + +- Standard cron spec parsing by default (first field is "minute"), with an easy + way to opt into the seconds field (quartz-compatible). Although, note that the + year field (optional in Quartz) is not supported. + +- Extensible, key/value logging via an interface that complies with + the https://github.com/go-logr/logr project. + +- The new Chain & JobWrapper types allow you to install "interceptors" to add + cross-cutting behavior like the following: + - Recover any panics from jobs + - Delay a job's execution if the previous run hasn't completed yet + - Skip a job's execution if the previous run hasn't completed yet + - Log each job's invocations + - Notification when jobs are completed + +It is backwards incompatible with both v1 and v2. These updates are required: + +- The v1 branch accepted an optional seconds field at the beginning of the cron + spec. This is non-standard and has led to a lot of confusion. The new default + parser conforms to the standard as described by [the Cron wikipedia page]. + + UPDATING: To retain the old behavior, construct your Cron with a custom + parser: + + // Seconds field, required + cron.New(cron.WithSeconds()) + + // Seconds field, optional + cron.New( + cron.WithParser( + cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)) + +- The Cron type now accepts functional options on construction rather than the + previous ad-hoc behavior modification mechanisms (setting a field, calling a setter). + + UPDATING: Code that sets Cron.ErrorLogger or calls Cron.SetLocation must be + updated to provide those values on construction. + +- CRON_TZ is now the recommended way to specify the timezone of a single + schedule, which is sanctioned by the specification. The legacy "TZ=" prefix + will continue to be supported since it is unambiguous and easy to do so. + + UPDATING: No update is required. + +- By default, cron will no longer recover panics in jobs that it runs. + Recovering can be surprising (see issue #192) and seems to be at odds with + typical behavior of libraries. Relatedly, the `cron.WithPanicLogger` option + has been removed to accommodate the more general JobWrapper type. + + UPDATING: To opt into panic recovery and configure the panic logger: + + cron.New(cron.WithChain( + cron.Recover(logger), // or use cron.DefaultLogger + )) + +- In adding support for https://github.com/go-logr/logr, `cron.WithVerboseLogger` was + removed, since it is duplicative with the leveled logging. + + UPDATING: Callers should use `WithLogger` and specify a logger that does not + discard `Info` logs. For convenience, one is provided that wraps `*log.Logger`: + + cron.New( + cron.WithLogger(cron.VerbosePrintfLogger(logger))) + + +### Background - Cron spec format + +There are two cron spec formats in common usage: + +- The "standard" cron format, described on [the Cron wikipedia page] and used by + the cron Linux system utility. + +- The cron format used by [the Quartz Scheduler], commonly used for scheduled + jobs in Java software + +[the Cron wikipedia page]: https://en.wikipedia.org/wiki/Cron +[the Quartz Scheduler]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/tutorial-lesson-06.html + +The original version of this package included an optional "seconds" field, which +made it incompatible with both of these formats. Now, the "standard" format is +the default format accepted, and the Quartz format is opt-in. diff --git a/vendor/github.com/robfig/cron/v3/chain.go b/vendor/github.com/robfig/cron/v3/chain.go new file mode 100644 index 000000000..9565b418e --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/chain.go @@ -0,0 +1,92 @@ +package cron + +import ( + "fmt" + "runtime" + "sync" + "time" +) + +// JobWrapper decorates the given Job with some behavior. +type JobWrapper func(Job) Job + +// Chain is a sequence of JobWrappers that decorates submitted jobs with +// cross-cutting behaviors like logging or synchronization. +type Chain struct { + wrappers []JobWrapper +} + +// NewChain returns a Chain consisting of the given JobWrappers. +func NewChain(c ...JobWrapper) Chain { + return Chain{c} +} + +// Then decorates the given job with all JobWrappers in the chain. +// +// This: +// NewChain(m1, m2, m3).Then(job) +// is equivalent to: +// m1(m2(m3(job))) +func (c Chain) Then(j Job) Job { + for i := range c.wrappers { + j = c.wrappers[len(c.wrappers)-i-1](j) + } + return j +} + +// Recover panics in wrapped jobs and log them with the provided logger. +func Recover(logger Logger) JobWrapper { + return func(j Job) Job { + return FuncJob(func() { + defer func() { + if r := recover(); r != nil { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + err, ok := r.(error) + if !ok { + err = fmt.Errorf("%v", r) + } + logger.Error(err, "panic", "stack", "...\n"+string(buf)) + } + }() + j.Run() + }) + } +} + +// DelayIfStillRunning serializes jobs, delaying subsequent runs until the +// previous one is complete. Jobs running after a delay of more than a minute +// have the delay logged at Info. +func DelayIfStillRunning(logger Logger) JobWrapper { + return func(j Job) Job { + var mu sync.Mutex + return FuncJob(func() { + start := time.Now() + mu.Lock() + defer mu.Unlock() + if dur := time.Since(start); dur > time.Minute { + logger.Info("delay", "duration", dur) + } + j.Run() + }) + } +} + +// SkipIfStillRunning skips an invocation of the Job if a previous invocation is +// still running. It logs skips to the given logger at Info level. +func SkipIfStillRunning(logger Logger) JobWrapper { + return func(j Job) Job { + var ch = make(chan struct{}, 1) + ch <- struct{}{} + return FuncJob(func() { + select { + case v := <-ch: + j.Run() + ch <- v + default: + logger.Info("skip") + } + }) + } +} diff --git a/vendor/github.com/robfig/cron/v3/constantdelay.go b/vendor/github.com/robfig/cron/v3/constantdelay.go new file mode 100644 index 000000000..cd6e7b1be --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/constantdelay.go @@ -0,0 +1,27 @@ +package cron + +import "time" + +// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes". +// It does not support jobs more frequent than once a second. +type ConstantDelaySchedule struct { + Delay time.Duration +} + +// Every returns a crontab Schedule that activates once every duration. +// Delays of less than a second are not supported (will round up to 1 second). +// Any fields less than a Second are truncated. +func Every(duration time.Duration) ConstantDelaySchedule { + if duration < time.Second { + duration = time.Second + } + return ConstantDelaySchedule{ + Delay: duration - time.Duration(duration.Nanoseconds())%time.Second, + } +} + +// Next returns the next time this should be run. +// This rounds so that the next activation time will be on the second. +func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time { + return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond) +} diff --git a/vendor/github.com/robfig/cron/v3/cron.go b/vendor/github.com/robfig/cron/v3/cron.go new file mode 100644 index 000000000..c7e917665 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/cron.go @@ -0,0 +1,355 @@ +package cron + +import ( + "context" + "sort" + "sync" + "time" +) + +// Cron keeps track of any number of entries, invoking the associated func as +// specified by the schedule. It may be started, stopped, and the entries may +// be inspected while running. +type Cron struct { + entries []*Entry + chain Chain + stop chan struct{} + add chan *Entry + remove chan EntryID + snapshot chan chan []Entry + running bool + logger Logger + runningMu sync.Mutex + location *time.Location + parser ScheduleParser + nextID EntryID + jobWaiter sync.WaitGroup +} + +// ScheduleParser is an interface for schedule spec parsers that return a Schedule +type ScheduleParser interface { + Parse(spec string) (Schedule, error) +} + +// Job is an interface for submitted cron jobs. +type Job interface { + Run() +} + +// Schedule describes a job's duty cycle. +type Schedule interface { + // Next returns the next activation time, later than the given time. + // Next is invoked initially, and then each time the job is run. + Next(time.Time) time.Time +} + +// EntryID identifies an entry within a Cron instance +type EntryID int + +// Entry consists of a schedule and the func to execute on that schedule. +type Entry struct { + // ID is the cron-assigned ID of this entry, which may be used to look up a + // snapshot or remove it. + ID EntryID + + // Schedule on which this job should be run. + Schedule Schedule + + // Next time the job will run, or the zero time if Cron has not been + // started or this entry's schedule is unsatisfiable + Next time.Time + + // Prev is the last time this job was run, or the zero time if never. + Prev time.Time + + // WrappedJob is the thing to run when the Schedule is activated. + WrappedJob Job + + // Job is the thing that was submitted to cron. + // It is kept around so that user code that needs to get at the job later, + // e.g. via Entries() can do so. + Job Job +} + +// Valid returns true if this is not the zero entry. +func (e Entry) Valid() bool { return e.ID != 0 } + +// byTime is a wrapper for sorting the entry array by time +// (with zero time at the end). +type byTime []*Entry + +func (s byTime) Len() int { return len(s) } +func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byTime) Less(i, j int) bool { + // Two zero times should return false. + // Otherwise, zero is "greater" than any other time. + // (To sort it at the end of the list.) + if s[i].Next.IsZero() { + return false + } + if s[j].Next.IsZero() { + return true + } + return s[i].Next.Before(s[j].Next) +} + +// New returns a new Cron job runner, modified by the given options. +// +// Available Settings +// +// Time Zone +// Description: The time zone in which schedules are interpreted +// Default: time.Local +// +// Parser +// Description: Parser converts cron spec strings into cron.Schedules. +// Default: Accepts this spec: https://en.wikipedia.org/wiki/Cron +// +// Chain +// Description: Wrap submitted jobs to customize behavior. +// Default: A chain that recovers panics and logs them to stderr. +// +// See "cron.With*" to modify the default behavior. +func New(opts ...Option) *Cron { + c := &Cron{ + entries: nil, + chain: NewChain(), + add: make(chan *Entry), + stop: make(chan struct{}), + snapshot: make(chan chan []Entry), + remove: make(chan EntryID), + running: false, + runningMu: sync.Mutex{}, + logger: DefaultLogger, + location: time.Local, + parser: standardParser, + } + for _, opt := range opts { + opt(c) + } + return c +} + +// FuncJob is a wrapper that turns a func() into a cron.Job +type FuncJob func() + +func (f FuncJob) Run() { f() } + +// AddFunc adds a func to the Cron to be run on the given schedule. +// The spec is parsed using the time zone of this Cron instance as the default. +// An opaque ID is returned that can be used to later remove it. +func (c *Cron) AddFunc(spec string, cmd func()) (EntryID, error) { + return c.AddJob(spec, FuncJob(cmd)) +} + +// AddJob adds a Job to the Cron to be run on the given schedule. +// The spec is parsed using the time zone of this Cron instance as the default. +// An opaque ID is returned that can be used to later remove it. +func (c *Cron) AddJob(spec string, cmd Job) (EntryID, error) { + schedule, err := c.parser.Parse(spec) + if err != nil { + return 0, err + } + return c.Schedule(schedule, cmd), nil +} + +// Schedule adds a Job to the Cron to be run on the given schedule. +// The job is wrapped with the configured Chain. +func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID { + c.runningMu.Lock() + defer c.runningMu.Unlock() + c.nextID++ + entry := &Entry{ + ID: c.nextID, + Schedule: schedule, + WrappedJob: c.chain.Then(cmd), + Job: cmd, + } + if !c.running { + c.entries = append(c.entries, entry) + } else { + c.add <- entry + } + return entry.ID +} + +// Entries returns a snapshot of the cron entries. +func (c *Cron) Entries() []Entry { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + replyChan := make(chan []Entry, 1) + c.snapshot <- replyChan + return <-replyChan + } + return c.entrySnapshot() +} + +// Location gets the time zone location +func (c *Cron) Location() *time.Location { + return c.location +} + +// Entry returns a snapshot of the given entry, or nil if it couldn't be found. +func (c *Cron) Entry(id EntryID) Entry { + for _, entry := range c.Entries() { + if id == entry.ID { + return entry + } + } + return Entry{} +} + +// Remove an entry from being run in the future. +func (c *Cron) Remove(id EntryID) { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + c.remove <- id + } else { + c.removeEntry(id) + } +} + +// Start the cron scheduler in its own goroutine, or no-op if already started. +func (c *Cron) Start() { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + return + } + c.running = true + go c.run() +} + +// Run the cron scheduler, or no-op if already running. +func (c *Cron) Run() { + c.runningMu.Lock() + if c.running { + c.runningMu.Unlock() + return + } + c.running = true + c.runningMu.Unlock() + c.run() +} + +// run the scheduler.. this is private just due to the need to synchronize +// access to the 'running' state variable. +func (c *Cron) run() { + c.logger.Info("start") + + // Figure out the next activation times for each entry. + now := c.now() + for _, entry := range c.entries { + entry.Next = entry.Schedule.Next(now) + c.logger.Info("schedule", "now", now, "entry", entry.ID, "next", entry.Next) + } + + for { + // Determine the next entry to run. + sort.Sort(byTime(c.entries)) + + var timer *time.Timer + if len(c.entries) == 0 || c.entries[0].Next.IsZero() { + // If there are no entries yet, just sleep - it still handles new entries + // and stop requests. + timer = time.NewTimer(100000 * time.Hour) + } else { + timer = time.NewTimer(c.entries[0].Next.Sub(now)) + } + + for { + select { + case now = <-timer.C: + now = now.In(c.location) + c.logger.Info("wake", "now", now) + + // Run every entry whose next time was less than now + for _, e := range c.entries { + if e.Next.After(now) || e.Next.IsZero() { + break + } + c.startJob(e.WrappedJob) + e.Prev = e.Next + e.Next = e.Schedule.Next(now) + c.logger.Info("run", "now", now, "entry", e.ID, "next", e.Next) + } + + case newEntry := <-c.add: + timer.Stop() + now = c.now() + newEntry.Next = newEntry.Schedule.Next(now) + c.entries = append(c.entries, newEntry) + c.logger.Info("added", "now", now, "entry", newEntry.ID, "next", newEntry.Next) + + case replyChan := <-c.snapshot: + replyChan <- c.entrySnapshot() + continue + + case <-c.stop: + timer.Stop() + c.logger.Info("stop") + return + + case id := <-c.remove: + timer.Stop() + now = c.now() + c.removeEntry(id) + c.logger.Info("removed", "entry", id) + } + + break + } + } +} + +// startJob runs the given job in a new goroutine. +func (c *Cron) startJob(j Job) { + c.jobWaiter.Add(1) + go func() { + defer c.jobWaiter.Done() + j.Run() + }() +} + +// now returns current time in c location +func (c *Cron) now() time.Time { + return time.Now().In(c.location) +} + +// Stop stops the cron scheduler if it is running; otherwise it does nothing. +// A context is returned so the caller can wait for running jobs to complete. +func (c *Cron) Stop() context.Context { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + c.stop <- struct{}{} + c.running = false + } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + c.jobWaiter.Wait() + cancel() + }() + return ctx +} + +// entrySnapshot returns a copy of the current cron entry list. +func (c *Cron) entrySnapshot() []Entry { + var entries = make([]Entry, len(c.entries)) + for i, e := range c.entries { + entries[i] = *e + } + return entries +} + +func (c *Cron) removeEntry(id EntryID) { + var entries []*Entry + for _, e := range c.entries { + if e.ID != id { + entries = append(entries, e) + } + } + c.entries = entries +} diff --git a/vendor/github.com/robfig/cron/v3/doc.go b/vendor/github.com/robfig/cron/v3/doc.go new file mode 100644 index 000000000..fa5d08b4d --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/doc.go @@ -0,0 +1,231 @@ +/* +Package cron implements a cron spec parser and job runner. + +Installation + +To download the specific tagged release, run: + + go get github.com/robfig/cron/v3@v3.0.0 + +Import it in your program as: + + import "github.com/robfig/cron/v3" + +It requires Go 1.11 or later due to usage of Go Modules. + +Usage + +Callers may register Funcs to be invoked on a given schedule. Cron will run +them in their own goroutines. + + c := cron.New() + c.AddFunc("30 * * * *", func() { fmt.Println("Every hour on the half hour") }) + c.AddFunc("30 3-6,20-23 * * *", func() { fmt.Println(".. in the range 3-6am, 8-11pm") }) + c.AddFunc("CRON_TZ=Asia/Tokyo 30 04 * * *", func() { fmt.Println("Runs at 04:30 Tokyo time every day") }) + c.AddFunc("@hourly", func() { fmt.Println("Every hour, starting an hour from now") }) + c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty, starting an hour thirty from now") }) + c.Start() + .. + // Funcs are invoked in their own goroutine, asynchronously. + ... + // Funcs may also be added to a running Cron + c.AddFunc("@daily", func() { fmt.Println("Every day") }) + .. + // Inspect the cron job entries' next and previous run times. + inspect(c.Entries()) + .. + c.Stop() // Stop the scheduler (does not stop any jobs already running). + +CRON Expression Format + +A cron expression represents a set of times, using 5 space-separated fields. + + Field name | Mandatory? | Allowed values | Allowed special characters + ---------- | ---------- | -------------- | -------------------------- + Minutes | Yes | 0-59 | * / , - + Hours | Yes | 0-23 | * / , - + Day of month | Yes | 1-31 | * / , - ? + Month | Yes | 1-12 or JAN-DEC | * / , - + Day of week | Yes | 0-6 or SUN-SAT | * / , - ? + +Month and Day-of-week field values are case insensitive. "SUN", "Sun", and +"sun" are equally accepted. + +The specific interpretation of the format is based on the Cron Wikipedia page: +https://en.wikipedia.org/wiki/Cron + +Alternative Formats + +Alternative Cron expression formats support other fields like seconds. You can +implement that by creating a custom Parser as follows. + + cron.New( + cron.WithParser( + cron.NewParser( + cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor))) + +Since adding Seconds is the most common modification to the standard cron spec, +cron provides a builtin function to do that, which is equivalent to the custom +parser you saw earlier, except that its seconds field is REQUIRED: + + cron.New(cron.WithSeconds()) + +That emulates Quartz, the most popular alternative Cron schedule format: +http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html + +Special Characters + +Asterisk ( * ) + +The asterisk indicates that the cron expression will match for all values of the +field; e.g., using an asterisk in the 5th field (month) would indicate every +month. + +Slash ( / ) + +Slashes are used to describe increments of ranges. For example 3-59/15 in the +1st field (minutes) would indicate the 3rd minute of the hour and every 15 +minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...", +that is, an increment over the largest possible range of the field. The form +"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the +increment until the end of that specific range. It does not wrap around. + +Comma ( , ) + +Commas are used to separate items of a list. For example, using "MON,WED,FRI" in +the 5th field (day of week) would mean Mondays, Wednesdays and Fridays. + +Hyphen ( - ) + +Hyphens are used to define ranges. For example, 9-17 would indicate every +hour between 9am and 5pm inclusive. + +Question mark ( ? ) + +Question mark may be used instead of '*' for leaving either day-of-month or +day-of-week blank. + +Predefined schedules + +You may use one of several pre-defined schedules in place of a cron expression. + + Entry | Description | Equivalent To + ----- | ----------- | ------------- + @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 * + @monthly | Run once a month, midnight, first of month | 0 0 1 * * + @weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0 + @daily (or @midnight) | Run once a day, midnight | 0 0 * * * + @hourly | Run once an hour, beginning of hour | 0 * * * * + +Intervals + +You may also schedule a job to execute at fixed intervals, starting at the time it's added +or cron is run. This is supported by formatting the cron spec like this: + + @every + +where "duration" is a string accepted by time.ParseDuration +(http://golang.org/pkg/time/#ParseDuration). + +For example, "@every 1h30m10s" would indicate a schedule that activates after +1 hour, 30 minutes, 10 seconds, and then every interval after that. + +Note: The interval does not take the job runtime into account. For example, +if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes, +it will have only 2 minutes of idle time between each run. + +Time zones + +By default, all interpretation and scheduling is done in the machine's local +time zone (time.Local). You can specify a different time zone on construction: + + cron.New( + cron.WithLocation(time.UTC)) + +Individual cron schedules may also override the time zone they are to be +interpreted in by providing an additional space-separated field at the beginning +of the cron spec, of the form "CRON_TZ=Asia/Tokyo". + +For example: + + # Runs at 6am in time.Local + cron.New().AddFunc("0 6 * * ?", ...) + + # Runs at 6am in America/New_York + nyc, _ := time.LoadLocation("America/New_York") + c := cron.New(cron.WithLocation(nyc)) + c.AddFunc("0 6 * * ?", ...) + + # Runs at 6am in Asia/Tokyo + cron.New().AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...) + + # Runs at 6am in Asia/Tokyo + c := cron.New(cron.WithLocation(nyc)) + c.SetLocation("America/New_York") + c.AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...) + +The prefix "TZ=(TIME ZONE)" is also supported for legacy compatibility. + +Be aware that jobs scheduled during daylight-savings leap-ahead transitions will +not be run! + +Job Wrappers + +A Cron runner may be configured with a chain of job wrappers to add +cross-cutting functionality to all submitted jobs. For example, they may be used +to achieve the following effects: + + - Recover any panics from jobs (activated by default) + - Delay a job's execution if the previous run hasn't completed yet + - Skip a job's execution if the previous run hasn't completed yet + - Log each job's invocations + +Install wrappers for all jobs added to a cron using the `cron.WithChain` option: + + cron.New(cron.WithChain( + cron.SkipIfStillRunning(logger), + )) + +Install wrappers for individual jobs by explicitly wrapping them: + + job = cron.NewChain( + cron.SkipIfStillRunning(logger), + ).Then(job) + +Thread safety + +Since the Cron service runs concurrently with the calling code, some amount of +care must be taken to ensure proper synchronization. + +All cron methods are designed to be correctly synchronized as long as the caller +ensures that invocations have a clear happens-before ordering between them. + +Logging + +Cron defines a Logger interface that is a subset of the one defined in +github.com/go-logr/logr. It has two logging levels (Info and Error), and +parameters are key/value pairs. This makes it possible for cron logging to plug +into structured logging systems. An adapter, [Verbose]PrintfLogger, is provided +to wrap the standard library *log.Logger. + +For additional insight into Cron operations, verbose logging may be activated +which will record job runs, scheduling decisions, and added or removed jobs. +Activate it with a one-off logger as follows: + + cron.New( + cron.WithLogger( + cron.VerbosePrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)))) + + +Implementation + +Cron entries are stored in an array, sorted by their next activation time. Cron +sleeps until the next job is due to be run. + +Upon waking: + - it runs each entry that is active on that second + - it calculates the next run times for the jobs that were run + - it re-sorts the array of entries by next activation time. + - it goes to sleep until the soonest job. +*/ +package cron diff --git a/vendor/github.com/robfig/cron/v3/logger.go b/vendor/github.com/robfig/cron/v3/logger.go new file mode 100644 index 000000000..b4efcc053 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/logger.go @@ -0,0 +1,86 @@ +package cron + +import ( + "io/ioutil" + "log" + "os" + "strings" + "time" +) + +// DefaultLogger is used by Cron if none is specified. +var DefaultLogger Logger = PrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)) + +// DiscardLogger can be used by callers to discard all log messages. +var DiscardLogger Logger = PrintfLogger(log.New(ioutil.Discard, "", 0)) + +// Logger is the interface used in this package for logging, so that any backend +// can be plugged in. It is a subset of the github.com/go-logr/logr interface. +type Logger interface { + // Info logs routine messages about cron's operation. + Info(msg string, keysAndValues ...interface{}) + // Error logs an error condition. + Error(err error, msg string, keysAndValues ...interface{}) +} + +// PrintfLogger wraps a Printf-based logger (such as the standard library "log") +// into an implementation of the Logger interface which logs errors only. +func PrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger { + return printfLogger{l, false} +} + +// VerbosePrintfLogger wraps a Printf-based logger (such as the standard library +// "log") into an implementation of the Logger interface which logs everything. +func VerbosePrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger { + return printfLogger{l, true} +} + +type printfLogger struct { + logger interface{ Printf(string, ...interface{}) } + logInfo bool +} + +func (pl printfLogger) Info(msg string, keysAndValues ...interface{}) { + if pl.logInfo { + keysAndValues = formatTimes(keysAndValues) + pl.logger.Printf( + formatString(len(keysAndValues)), + append([]interface{}{msg}, keysAndValues...)...) + } +} + +func (pl printfLogger) Error(err error, msg string, keysAndValues ...interface{}) { + keysAndValues = formatTimes(keysAndValues) + pl.logger.Printf( + formatString(len(keysAndValues)+2), + append([]interface{}{msg, "error", err}, keysAndValues...)...) +} + +// formatString returns a logfmt-like format string for the number of +// key/values. +func formatString(numKeysAndValues int) string { + var sb strings.Builder + sb.WriteString("%s") + if numKeysAndValues > 0 { + sb.WriteString(", ") + } + for i := 0; i < numKeysAndValues/2; i++ { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString("%v=%v") + } + return sb.String() +} + +// formatTimes formats any time.Time values as RFC3339. +func formatTimes(keysAndValues []interface{}) []interface{} { + var formattedArgs []interface{} + for _, arg := range keysAndValues { + if t, ok := arg.(time.Time); ok { + arg = t.Format(time.RFC3339) + } + formattedArgs = append(formattedArgs, arg) + } + return formattedArgs +} diff --git a/vendor/github.com/robfig/cron/v3/option.go b/vendor/github.com/robfig/cron/v3/option.go new file mode 100644 index 000000000..09e4278e7 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/option.go @@ -0,0 +1,45 @@ +package cron + +import ( + "time" +) + +// Option represents a modification to the default behavior of a Cron. +type Option func(*Cron) + +// WithLocation overrides the timezone of the cron instance. +func WithLocation(loc *time.Location) Option { + return func(c *Cron) { + c.location = loc + } +} + +// WithSeconds overrides the parser used for interpreting job schedules to +// include a seconds field as the first one. +func WithSeconds() Option { + return WithParser(NewParser( + Second | Minute | Hour | Dom | Month | Dow | Descriptor, + )) +} + +// WithParser overrides the parser used for interpreting job schedules. +func WithParser(p ScheduleParser) Option { + return func(c *Cron) { + c.parser = p + } +} + +// WithChain specifies Job wrappers to apply to all jobs added to this cron. +// Refer to the Chain* functions in this package for provided wrappers. +func WithChain(wrappers ...JobWrapper) Option { + return func(c *Cron) { + c.chain = NewChain(wrappers...) + } +} + +// WithLogger uses the provided logger. +func WithLogger(logger Logger) Option { + return func(c *Cron) { + c.logger = logger + } +} diff --git a/vendor/github.com/robfig/cron/v3/parser.go b/vendor/github.com/robfig/cron/v3/parser.go new file mode 100644 index 000000000..3cf8879f7 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/parser.go @@ -0,0 +1,434 @@ +package cron + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Configuration options for creating a parser. Most options specify which +// fields should be included, while others enable features. If a field is not +// included the parser will assume a default value. These options do not change +// the order fields are parse in. +type ParseOption int + +const ( + Second ParseOption = 1 << iota // Seconds field, default 0 + SecondOptional // Optional seconds field, default 0 + Minute // Minutes field, default 0 + Hour // Hours field, default 0 + Dom // Day of month field, default * + Month // Month field, default * + Dow // Day of week field, default * + DowOptional // Optional day of week field, default * + Descriptor // Allow descriptors such as @monthly, @weekly, etc. +) + +var places = []ParseOption{ + Second, + Minute, + Hour, + Dom, + Month, + Dow, +} + +var defaults = []string{ + "0", + "0", + "0", + "*", + "*", + "*", +} + +// A custom Parser that can be configured. +type Parser struct { + options ParseOption +} + +// NewParser creates a Parser with custom options. +// +// It panics if more than one Optional is given, since it would be impossible to +// correctly infer which optional is provided or missing in general. +// +// Examples +// +// // Standard parser without descriptors +// specParser := NewParser(Minute | Hour | Dom | Month | Dow) +// sched, err := specParser.Parse("0 0 15 */3 *") +// +// // Same as above, just excludes time fields +// subsParser := NewParser(Dom | Month | Dow) +// sched, err := specParser.Parse("15 */3 *") +// +// // Same as above, just makes Dow optional +// subsParser := NewParser(Dom | Month | DowOptional) +// sched, err := specParser.Parse("15 */3") +// +func NewParser(options ParseOption) Parser { + optionals := 0 + if options&DowOptional > 0 { + optionals++ + } + if options&SecondOptional > 0 { + optionals++ + } + if optionals > 1 { + panic("multiple optionals may not be configured") + } + return Parser{options} +} + +// Parse returns a new crontab schedule representing the given spec. +// It returns a descriptive error if the spec is not valid. +// It accepts crontab specs and features configured by NewParser. +func (p Parser) Parse(spec string) (Schedule, error) { + if len(spec) == 0 { + return nil, fmt.Errorf("empty spec string") + } + + // Extract timezone if present + var loc = time.Local + if strings.HasPrefix(spec, "TZ=") || strings.HasPrefix(spec, "CRON_TZ=") { + var err error + i := strings.Index(spec, " ") + eq := strings.Index(spec, "=") + if loc, err = time.LoadLocation(spec[eq+1 : i]); err != nil { + return nil, fmt.Errorf("provided bad location %s: %v", spec[eq+1:i], err) + } + spec = strings.TrimSpace(spec[i:]) + } + + // Handle named schedules (descriptors), if configured + if strings.HasPrefix(spec, "@") { + if p.options&Descriptor == 0 { + return nil, fmt.Errorf("parser does not accept descriptors: %v", spec) + } + return parseDescriptor(spec, loc) + } + + // Split on whitespace. + fields := strings.Fields(spec) + + // Validate & fill in any omitted or optional fields + var err error + fields, err = normalizeFields(fields, p.options) + if err != nil { + return nil, err + } + + field := func(field string, r bounds) uint64 { + if err != nil { + return 0 + } + var bits uint64 + bits, err = getField(field, r) + return bits + } + + var ( + second = field(fields[0], seconds) + minute = field(fields[1], minutes) + hour = field(fields[2], hours) + dayofmonth = field(fields[3], dom) + month = field(fields[4], months) + dayofweek = field(fields[5], dow) + ) + if err != nil { + return nil, err + } + + return &SpecSchedule{ + Second: second, + Minute: minute, + Hour: hour, + Dom: dayofmonth, + Month: month, + Dow: dayofweek, + Location: loc, + }, nil +} + +// normalizeFields takes a subset set of the time fields and returns the full set +// with defaults (zeroes) populated for unset fields. +// +// As part of performing this function, it also validates that the provided +// fields are compatible with the configured options. +func normalizeFields(fields []string, options ParseOption) ([]string, error) { + // Validate optionals & add their field to options + optionals := 0 + if options&SecondOptional > 0 { + options |= Second + optionals++ + } + if options&DowOptional > 0 { + options |= Dow + optionals++ + } + if optionals > 1 { + return nil, fmt.Errorf("multiple optionals may not be configured") + } + + // Figure out how many fields we need + max := 0 + for _, place := range places { + if options&place > 0 { + max++ + } + } + min := max - optionals + + // Validate number of fields + if count := len(fields); count < min || count > max { + if min == max { + return nil, fmt.Errorf("expected exactly %d fields, found %d: %s", min, count, fields) + } + return nil, fmt.Errorf("expected %d to %d fields, found %d: %s", min, max, count, fields) + } + + // Populate the optional field if not provided + if min < max && len(fields) == min { + switch { + case options&DowOptional > 0: + fields = append(fields, defaults[5]) // TODO: improve access to default + case options&SecondOptional > 0: + fields = append([]string{defaults[0]}, fields...) + default: + return nil, fmt.Errorf("unknown optional field") + } + } + + // Populate all fields not part of options with their defaults + n := 0 + expandedFields := make([]string, len(places)) + copy(expandedFields, defaults) + for i, place := range places { + if options&place > 0 { + expandedFields[i] = fields[n] + n++ + } + } + return expandedFields, nil +} + +var standardParser = NewParser( + Minute | Hour | Dom | Month | Dow | Descriptor, +) + +// ParseStandard returns a new crontab schedule representing the given +// standardSpec (https://en.wikipedia.org/wiki/Cron). It requires 5 entries +// representing: minute, hour, day of month, month and day of week, in that +// order. It returns a descriptive error if the spec is not valid. +// +// It accepts +// - Standard crontab specs, e.g. "* * * * ?" +// - Descriptors, e.g. "@midnight", "@every 1h30m" +func ParseStandard(standardSpec string) (Schedule, error) { + return standardParser.Parse(standardSpec) +} + +// getField returns an Int with the bits set representing all of the times that +// the field represents or error parsing field value. A "field" is a comma-separated +// list of "ranges". +func getField(field string, r bounds) (uint64, error) { + var bits uint64 + ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' }) + for _, expr := range ranges { + bit, err := getRange(expr, r) + if err != nil { + return bits, err + } + bits |= bit + } + return bits, nil +} + +// getRange returns the bits indicated by the given expression: +// number | number "-" number [ "/" number ] +// or error parsing range. +func getRange(expr string, r bounds) (uint64, error) { + var ( + start, end, step uint + rangeAndStep = strings.Split(expr, "/") + lowAndHigh = strings.Split(rangeAndStep[0], "-") + singleDigit = len(lowAndHigh) == 1 + err error + ) + + var extra uint64 + if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" { + start = r.min + end = r.max + extra = starBit + } else { + start, err = parseIntOrName(lowAndHigh[0], r.names) + if err != nil { + return 0, err + } + switch len(lowAndHigh) { + case 1: + end = start + case 2: + end, err = parseIntOrName(lowAndHigh[1], r.names) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("too many hyphens: %s", expr) + } + } + + switch len(rangeAndStep) { + case 1: + step = 1 + case 2: + step, err = mustParseInt(rangeAndStep[1]) + if err != nil { + return 0, err + } + + // Special handling: "N/step" means "N-max/step". + if singleDigit { + end = r.max + } + if step > 1 { + extra = 0 + } + default: + return 0, fmt.Errorf("too many slashes: %s", expr) + } + + if start < r.min { + return 0, fmt.Errorf("beginning of range (%d) below minimum (%d): %s", start, r.min, expr) + } + if end > r.max { + return 0, fmt.Errorf("end of range (%d) above maximum (%d): %s", end, r.max, expr) + } + if start > end { + return 0, fmt.Errorf("beginning of range (%d) beyond end of range (%d): %s", start, end, expr) + } + if step == 0 { + return 0, fmt.Errorf("step of range should be a positive number: %s", expr) + } + + return getBits(start, end, step) | extra, nil +} + +// parseIntOrName returns the (possibly-named) integer contained in expr. +func parseIntOrName(expr string, names map[string]uint) (uint, error) { + if names != nil { + if namedInt, ok := names[strings.ToLower(expr)]; ok { + return namedInt, nil + } + } + return mustParseInt(expr) +} + +// mustParseInt parses the given expression as an int or returns an error. +func mustParseInt(expr string) (uint, error) { + num, err := strconv.Atoi(expr) + if err != nil { + return 0, fmt.Errorf("failed to parse int from %s: %s", expr, err) + } + if num < 0 { + return 0, fmt.Errorf("negative number (%d) not allowed: %s", num, expr) + } + + return uint(num), nil +} + +// getBits sets all bits in the range [min, max], modulo the given step size. +func getBits(min, max, step uint) uint64 { + var bits uint64 + + // If step is 1, use shifts. + if step == 1 { + return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min) + } + + // Else, use a simple loop. + for i := min; i <= max; i += step { + bits |= 1 << i + } + return bits +} + +// all returns all bits within the given bounds. (plus the star bit) +func all(r bounds) uint64 { + return getBits(r.min, r.max, 1) | starBit +} + +// parseDescriptor returns a predefined schedule for the expression, or error if none matches. +func parseDescriptor(descriptor string, loc *time.Location) (Schedule, error) { + switch descriptor { + case "@yearly", "@annually": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: 1 << months.min, + Dow: all(dow), + Location: loc, + }, nil + + case "@monthly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: all(months), + Dow: all(dow), + Location: loc, + }, nil + + case "@weekly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: 1 << dow.min, + Location: loc, + }, nil + + case "@daily", "@midnight": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: all(dow), + Location: loc, + }, nil + + case "@hourly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: all(hours), + Dom: all(dom), + Month: all(months), + Dow: all(dow), + Location: loc, + }, nil + + } + + const every = "@every " + if strings.HasPrefix(descriptor, every) { + duration, err := time.ParseDuration(descriptor[len(every):]) + if err != nil { + return nil, fmt.Errorf("failed to parse duration %s: %s", descriptor, err) + } + return Every(duration), nil + } + + return nil, fmt.Errorf("unrecognized descriptor: %s", descriptor) +} diff --git a/vendor/github.com/robfig/cron/v3/spec.go b/vendor/github.com/robfig/cron/v3/spec.go new file mode 100644 index 000000000..fa1e241e5 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/spec.go @@ -0,0 +1,188 @@ +package cron + +import "time" + +// SpecSchedule specifies a duty cycle (to the second granularity), based on a +// traditional crontab specification. It is computed initially and stored as bit sets. +type SpecSchedule struct { + Second, Minute, Hour, Dom, Month, Dow uint64 + + // Override location for this schedule. + Location *time.Location +} + +// bounds provides a range of acceptable values (plus a map of name to value). +type bounds struct { + min, max uint + names map[string]uint +} + +// The bounds for each field. +var ( + seconds = bounds{0, 59, nil} + minutes = bounds{0, 59, nil} + hours = bounds{0, 23, nil} + dom = bounds{1, 31, nil} + months = bounds{1, 12, map[string]uint{ + "jan": 1, + "feb": 2, + "mar": 3, + "apr": 4, + "may": 5, + "jun": 6, + "jul": 7, + "aug": 8, + "sep": 9, + "oct": 10, + "nov": 11, + "dec": 12, + }} + dow = bounds{0, 6, map[string]uint{ + "sun": 0, + "mon": 1, + "tue": 2, + "wed": 3, + "thu": 4, + "fri": 5, + "sat": 6, + }} +) + +const ( + // Set the top bit if a star was included in the expression. + starBit = 1 << 63 +) + +// Next returns the next time this schedule is activated, greater than the given +// time. If no time can be found to satisfy the schedule, return the zero time. +func (s *SpecSchedule) Next(t time.Time) time.Time { + // General approach + // + // For Month, Day, Hour, Minute, Second: + // Check if the time value matches. If yes, continue to the next field. + // If the field doesn't match the schedule, then increment the field until it matches. + // While incrementing the field, a wrap-around brings it back to the beginning + // of the field list (since it is necessary to re-verify previous field + // values) + + // Convert the given time into the schedule's timezone, if one is specified. + // Save the original timezone so we can convert back after we find a time. + // Note that schedules without a time zone specified (time.Local) are treated + // as local to the time provided. + origLocation := t.Location() + loc := s.Location + if loc == time.Local { + loc = t.Location() + } + if s.Location != time.Local { + t = t.In(s.Location) + } + + // Start at the earliest possible time (the upcoming second). + t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) + + // This flag indicates whether a field has been incremented. + added := false + + // If no time is found within five years, return zero. + yearLimit := t.Year() + 5 + +WRAP: + if t.Year() > yearLimit { + return time.Time{} + } + + // Find the first applicable month. + // If it's this month, then do nothing. + for 1< 12 { + t = t.Add(time.Duration(24-t.Hour()) * time.Hour) + } else { + t = t.Add(time.Duration(-t.Hour()) * time.Hour) + } + } + + if t.Day() == 1 { + goto WRAP + } + } + + for 1< 0 + dowMatch bool = 1< 0 + ) + if s.Dom&starBit > 0 || s.Dow&starBit > 0 { + return domMatch && dowMatch + } + return domMatch || dowMatch +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index b774da88d..4d4b4aad6 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -28,6 +28,8 @@ var ( uint32Type = reflect.TypeOf(uint32(1)) uint64Type = reflect.TypeOf(uint64(1)) + uintptrType = reflect.TypeOf(uintptr(1)) + float32Type = reflect.TypeOf(float32(1)) float64Type = reflect.TypeOf(float64(1)) @@ -308,11 +310,11 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Struct: { // All structs enter here. We're not interested in most types. - if !canConvert(obj1Value, timeType) { + if !obj1Value.CanConvert(timeType) { break } - // time.Time can compared! + // time.Time can be compared! timeObj1, ok := obj1.(time.Time) if !ok { timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) @@ -328,7 +330,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Slice: { // We only care about the []byte type. - if !canConvert(obj1Value, bytesType) { + if !obj1Value.CanConvert(bytesType) { break } @@ -345,6 +347,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true } + case reflect.Uintptr: + { + uintptrObj1, ok := obj1.(uintptr) + if !ok { + uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr) + } + uintptrObj2, ok := obj2.(uintptr) + if !ok { + uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr) + } + if uintptrObj1 > uintptrObj2 { + return compareGreater, true + } + if uintptrObj1 == uintptrObj2 { + return compareEqual, true + } + if uintptrObj1 < uintptrObj2 { + return compareLess, true + } + } } return compareEqual, false diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go deleted file mode 100644 index da867903e..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build go1.17 -// +build go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_legacy.go - -package assert - -import "reflect" - -// Wrapper around reflect.Value.CanConvert, for compatibility -// reasons. -func canConvert(value reflect.Value, to reflect.Type) bool { - return value.CanConvert(to) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go deleted file mode 100644 index 1701af2a3..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !go1.17 -// +build !go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_can_convert.go - -package assert - -import "reflect" - -// Older versions of Go does not have the reflect.Value.CanConvert -// method. -func canConvert(value reflect.Value, to reflect.Type) bool { - return false -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 84dbd6c79..3ddab109a 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -107,7 +104,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") @@ -616,6 +613,16 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...) } +// NotImplementsf asserts that an object does not implement the specified interface. +// +// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + // NotNilf asserts that the specified object is not nil. // // assert.NotNilf(t, err, "error message %s", "formatted") @@ -660,10 +667,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -747,10 +756,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index b1d94aec5..a84e09bd4 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -189,7 +186,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValues(uint32(123), int32(123)) @@ -200,7 +197,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") @@ -1221,6 +1218,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in return NotErrorIsf(a.t, err, target, msg, args...) } +// NotImplements asserts that an object does not implement the specified interface. +// +// a.NotImplements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplements(a.t, interfaceObject, object, msgAndArgs...) +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplementsf(a.t, interfaceObject, object, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1309,10 +1326,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2]) +// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1320,10 +1339,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1483,10 +1504,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2]) +// a.Subset({"x": 1, "y": 2}, {"x": 1}) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1494,10 +1516,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index a55d1bba9..0b7570f21 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - yaml "gopkg.in/yaml.v3" + "gopkg.in/yaml.v3" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -110,7 +110,12 @@ func copyExportedFields(expected interface{}) interface{} { return result.Interface() case reflect.Array, reflect.Slice: - result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + var result reflect.Value + if expectedKind == reflect.Array { + result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem() + } else { + result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + } for i := 0; i < expectedValue.Len(); i++ { index := expectedValue.Index(i) if isNil(index) { @@ -140,6 +145,8 @@ func copyExportedFields(expected interface{}) interface{} { // structures. // // This function does no assertion of any kind. +// +// Deprecated: Use [EqualExportedValues] instead. func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool { expectedCleaned := copyExportedFields(expected) actualCleaned := copyExportedFields(actual) @@ -153,17 +160,40 @@ func ObjectsAreEqualValues(expected, actual interface{}) bool { return true } - actualType := reflect.TypeOf(actual) - if actualType == nil { + expectedValue := reflect.ValueOf(expected) + actualValue := reflect.ValueOf(actual) + if !expectedValue.IsValid() || !actualValue.IsValid() { return false } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + + expectedType := expectedValue.Type() + actualType := actualValue.Type() + if !expectedType.ConvertibleTo(actualType) { + return false + } + + if !isNumericType(expectedType) || !isNumericType(actualType) { // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + return reflect.DeepEqual( + expectedValue.Convert(actualType).Interface(), actual, + ) } - return false + // If BOTH values are numeric, there are chances of false positives due + // to overflow or underflow. So, we need to make sure to always convert + // the smaller type to a larger type before comparing. + if expectedType.Size() >= actualType.Size() { + return actualValue.Convert(expectedType).Interface() == expected + } + + return expectedValue.Convert(actualType).Interface() == actual +} + +// isNumericType returns true if the type is one of: +// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, +// float32, float64, complex64, complex128 +func isNumericType(t reflect.Type) bool { + return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128 } /* CallerInfo is necessary because the assert functions use the testing object @@ -266,7 +296,7 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { // Aligns the provided message so that all lines after the first line start at the same location as the first line. // Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the // basis on which the alignment occurs). func indentMessageLines(message string, longestLabelLen int) string { outBuf := new(bytes.Buffer) @@ -382,6 +412,25 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg return true } +// NotImplements asserts that an object does not implement the specified interface. +// +// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...) + } + if reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...) + } + + return true +} + // IsType asserts that the specified objects are of the same type. func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -496,7 +545,7 @@ func samePointers(first, second interface{}) bool { // representations appropriate to be presented to the user. // // If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parenthesis similar +// with the type name, and the value will be enclosed in parentheses similar // to a type conversion in the Go grammar. func formatUnequalValues(expected, actual interface{}) (e string, a string) { if reflect.TypeOf(expected) != reflect.TypeOf(actual) { @@ -523,7 +572,7 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValues(t, uint32(123), int32(123)) @@ -566,12 +615,19 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } + if aType.Kind() == reflect.Ptr { + aType = aType.Elem() + } + if bType.Kind() == reflect.Ptr { + bType = bType.Elem() + } + if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) } if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) } expected = copyExportedFields(expected) @@ -620,17 +676,6 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return Fail(t, "Expected value not to be nil.", msgAndArgs...) } -// containsKind checks if a specified kind in the slice of kinds. -func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { - for i := 0; i < len(kinds); i++ { - if kind == kinds[i] { - return true - } - } - - return false -} - // isNil checks if a specified object is nil or not, without Failing. func isNil(object interface{}) bool { if object == nil { @@ -638,16 +683,13 @@ func isNil(object interface{}) bool { } value := reflect.ValueOf(object) - kind := value.Kind() - isNilableKind := containsKind( - []reflect.Kind{ - reflect.Chan, reflect.Func, - reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, - kind) - - if isNilableKind && value.IsNil() { - return true + switch value.Kind() { + case + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + + return value.IsNil() } return false @@ -731,16 +773,14 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { +// getLen tries to get the length of an object. +// It returns (0, false) if impossible. +func getLen(x interface{}) (length int, ok bool) { v := reflect.ValueOf(x) defer func() { - if e := recover(); e != nil { - ok = false - } + ok = recover() == nil }() - return true, v.Len() + return v.Len(), true } // Len asserts that the specified object has specific length. @@ -751,13 +791,13 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - ok, l := getLen(object) + l, ok := getLen(object) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...) } if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) } return true } @@ -919,10 +959,11 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2]) +// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -975,10 +1016,12 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2]) +// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1439,7 +1482,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd h.Helper() } if math.IsNaN(epsilon) { - return Fail(t, "epsilon must not be NaN") + return Fail(t, "epsilon must not be NaN", msgAndArgs...) } actualEpsilon, err := calcRelativeError(expected, actual) if err != nil { @@ -1458,19 +1501,26 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if h, ok := t.(tHelper); ok { h.Helper() } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { + + if expected == nil || actual == nil { return Fail(t, "Parameters must be slice", msgAndArgs...) } - actualSlice := reflect.ValueOf(actual) expectedSlice := reflect.ValueOf(expected) + actualSlice := reflect.ValueOf(actual) - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result + if expectedSlice.Type().Kind() != reflect.Slice { + return Fail(t, "Expected value must be slice", msgAndArgs...) + } + + expectedLen := expectedSlice.Len() + if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) { + return false + } + + for i := 0; i < expectedLen; i++ { + if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) { + return false } } @@ -1870,23 +1920,18 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { } // FailNow panics. -func (c *CollectT) FailNow() { +func (*CollectT) FailNow() { panic("Assertion failed") } -// Reset clears the collected errors. -func (c *CollectT) Reset() { - c.errors = nil +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Reset() { + panic("Reset() is deprecated") } -// Copy copies the collected errors to the supplied t. -func (c *CollectT) Copy(t TestingT) { - if tt, ok := t.(tHelper); ok { - tt.Helper() - } - for _, err := range c.errors { - t.Errorf("%v", err) - } +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Copy(TestingT) { + panic("Copy() is deprecated") } // EventuallyWithT asserts that given condition will be met in waitFor time, @@ -1912,8 +1957,8 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time h.Helper() } - collect := new(CollectT) - ch := make(chan bool, 1) + var lastFinishedTickErrs []error + ch := make(chan []error, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1924,19 +1969,25 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time for tick := ticker.C; ; { select { case <-timer.C: - collect.Copy(t) + for _, err := range lastFinishedTickErrs { + t.Errorf("%v", err) + } return Fail(t, "Condition never satisfied", msgAndArgs...) case <-tick: tick = nil - collect.Reset() go func() { + collect := new(CollectT) + defer func() { + ch <- collect.errors + }() condition(collect) - ch <- len(collect.errors) == 0 }() - case v := <-ch: - if v { + case errs := <-ch: + if len(errs) == 0 { return true } + // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. + lastFinishedTickErrs = errs tick = ticker.C } } diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index d8038c28a..861ed4b7c 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -12,7 +12,7 @@ import ( // an error if building a new request fails. func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url, nil) + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return -1, err } @@ -32,12 +32,12 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent if !isSuccessCode { - Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isSuccessCode @@ -54,12 +54,12 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect if !isRedirectCode { - Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isRedirectCode @@ -76,12 +76,12 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isErrorCode := code >= http.StatusBadRequest if !isErrorCode { - Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isErrorCode @@ -98,12 +98,12 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } successful := code == statuscode if !successful { - Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...) } return successful @@ -113,7 +113,10 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va // empty string if building a new request fails. func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if len(values) > 0 { + url += "?" + values.Encode() + } + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return "" } @@ -135,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -155,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 63f852147..506a82f80 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package require @@ -235,7 +232,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, t.FailNow() } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValues(t, uint32(123), int32(123)) @@ -249,7 +246,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg t.FailNow() } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") @@ -1546,6 +1543,32 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf t.FailNow() } +// NotImplements asserts that an object does not implement the specified interface. +// +// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotImplements(t, interfaceObject, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotImplementsf(t, interfaceObject, object, msg, args...) { + return + } + t.FailNow() +} + // NotNil asserts that the specified object is not nil. // // assert.NotNil(t, err) @@ -1658,10 +1681,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2]) +// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1672,10 +1697,12 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i t.FailNow() } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1880,10 +1907,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg t.FailNow() } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2]) +// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1894,10 +1922,11 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte t.FailNow() } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 3b5b09330..eee8310a5 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package require @@ -190,7 +187,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValues(uint32(123), int32(123)) @@ -201,7 +198,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") @@ -1222,6 +1219,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in NotErrorIsf(a.t, err, target, msg, args...) } +// NotImplements asserts that an object does not implement the specified interface. +// +// a.NotImplements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotImplements(a.t, interfaceObject, object, msgAndArgs...) +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotImplementsf(a.t, interfaceObject, object, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1310,10 +1327,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2]) +// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1321,10 +1340,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1484,10 +1505,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2]) +// a.Subset({"x": 1, "y": 2}, {"x": 1}) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1495,10 +1517,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go new file mode 100644 index 000000000..ecc0dabb7 --- /dev/null +++ b/vendor/golang.org/x/exp/maps/maps.go @@ -0,0 +1,94 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package maps defines various functions useful with maps of any type. +package maps + +// Keys returns the keys of the map m. +// The keys will be in an indeterminate order. +func Keys[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + return r +} + +// Values returns the values of the map m. +// The values will be in an indeterminate order. +func Values[M ~map[K]V, K comparable, V any](m M) []V { + r := make([]V, 0, len(m)) + for _, v := range m { + r = append(r, v) + } + return r +} + +// Equal reports whether two maps contain the same key/value pairs. +// Values are compared using ==. +func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool { + if len(m1) != len(m2) { + return false + } + for k, v1 := range m1 { + if v2, ok := m2[k]; !ok || v1 != v2 { + return false + } + } + return true +} + +// EqualFunc is like Equal, but compares values using eq. +// Keys are still compared with ==. +func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool { + if len(m1) != len(m2) { + return false + } + for k, v1 := range m1 { + if v2, ok := m2[k]; !ok || !eq(v1, v2) { + return false + } + } + return true +} + +// Clear removes all entries from m, leaving it empty. +func Clear[M ~map[K]V, K comparable, V any](m M) { + for k := range m { + delete(m, k) + } +} + +// Clone returns a copy of m. This is a shallow clone: +// the new keys and values are set using ordinary assignment. +func Clone[M ~map[K]V, K comparable, V any](m M) M { + // Preserve nil in case it matters. + if m == nil { + return nil + } + r := make(M, len(m)) + for k, v := range m { + r[k] = v + } + return r +} + +// Copy copies all key/value pairs in src adding them to dst. +// When a key in src is already present in dst, +// the value in dst will be overwritten by the value associated +// with the key in src. +func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) { + for k, v := range src { + dst[k] = v + } +} + +// DeleteFunc deletes any key/value pairs from m for which del returns true. +func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) { + for k, v := range m { + if del(k, v) { + delete(m, k) + } + } +} diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go new file mode 100644 index 000000000..fbf1934a0 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/cmp.go @@ -0,0 +1,44 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// min is a version of the predeclared function from the Go 1.21 release. +func min[T constraints.Ordered](a, b T) T { + if a < b || isNaN(a) { + return a + } + return b +} + +// max is a version of the predeclared function from the Go 1.21 release. +func max[T constraints.Ordered](a, b T) T { + if a > b || isNaN(a) { + return a + } + return b +} + +// cmpLess is a copy of cmp.Less from the Go 1.21 release. +func cmpLess[T constraints.Ordered](x, y T) bool { + return (isNaN(x) && !isNaN(y)) || x < y +} + +// cmpCompare is a copy of cmp.Compare from the Go 1.21 release. +func cmpCompare[T constraints.Ordered](x, y T) int { + xNaN := isNaN(x) + yNaN := isNaN(y) + if xNaN && yNaN { + return 0 + } + if xNaN || x < y { + return -1 + } + if yNaN || x > y { + return +1 + } + return 0 +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index 8a237c5d6..46ceac343 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -3,23 +3,20 @@ // license that can be found in the LICENSE file. // Package slices defines various functions useful with slices of any type. -// Unless otherwise specified, these functions all apply to the elements -// of a slice at index 0 <= i < len(s). -// -// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a -// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), -// or the sorting may fail to sort correctly. A common case is when sorting slices of -// floating-point numbers containing NaN values. package slices -import "golang.org/x/exp/constraints" +import ( + "unsafe" + + "golang.org/x/exp/constraints" +) // Equal reports whether two slices are equal: the same length and all // elements equal. If the lengths are different, Equal returns false. // Otherwise, the elements are compared in increasing index order, and the // comparison stops at the first unequal pair. // Floating point NaNs are not considered equal. -func Equal[E comparable](s1, s2 []E) bool { +func Equal[S ~[]E, E comparable](s1, s2 S) bool { if len(s1) != len(s2) { return false } @@ -31,12 +28,12 @@ func Equal[E comparable](s1, s2 []E) bool { return true } -// EqualFunc reports whether two slices are equal using a comparison +// EqualFunc reports whether two slices are equal using an equality // function on each pair of elements. If the lengths are different, // EqualFunc returns false. Otherwise, the elements are compared in // increasing index order, and the comparison stops at the first index // for which eq returns false. -func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { +func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { if len(s1) != len(s2) { return false } @@ -49,45 +46,37 @@ func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { return true } -// Compare compares the elements of s1 and s2. -// The elements are compared sequentially, starting at index 0, +// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair +// of elements. The elements are compared sequentially, starting at index 0, // until one element is not equal to the other. // The result of comparing the first non-matching elements is returned. // If both slices are equal until one of them ends, the shorter slice is // considered less than the longer one. // The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. -// Comparisons involving floating point NaNs are ignored. -func Compare[E constraints.Ordered](s1, s2 []E) int { - s2len := len(s2) +func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { for i, v1 := range s1 { - if i >= s2len { + if i >= len(s2) { return +1 } v2 := s2[i] - switch { - case v1 < v2: - return -1 - case v1 > v2: - return +1 + if c := cmpCompare(v1, v2); c != 0 { + return c } } - if len(s1) < s2len { + if len(s1) < len(s2) { return -1 } return 0 } -// CompareFunc is like Compare but uses a comparison function -// on each pair of elements. The elements are compared in increasing -// index order, and the comparisons stop after the first time cmp -// returns non-zero. +// CompareFunc is like [Compare] but uses a custom comparison function on each +// pair of elements. // The result is the first non-zero result of cmp; if cmp always // returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), // and +1 if len(s1) > len(s2). -func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { - s2len := len(s2) +func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { for i, v1 := range s1 { - if i >= s2len { + if i >= len(s2) { return +1 } v2 := s2[i] @@ -95,7 +84,7 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { return c } } - if len(s1) < s2len { + if len(s1) < len(s2) { return -1 } return 0 @@ -103,9 +92,9 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { // Index returns the index of the first occurrence of v in s, // or -1 if not present. -func Index[E comparable](s []E, v E) int { - for i, vs := range s { - if v == vs { +func Index[S ~[]E, E comparable](s S, v E) int { + for i := range s { + if v == s[i] { return i } } @@ -114,9 +103,9 @@ func Index[E comparable](s []E, v E) int { // IndexFunc returns the first index i satisfying f(s[i]), // or -1 if none do. -func IndexFunc[E any](s []E, f func(E) bool) int { - for i, v := range s { - if f(v) { +func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { + for i := range s { + if f(s[i]) { return i } } @@ -124,37 +113,237 @@ func IndexFunc[E any](s []E, f func(E) bool) int { } // Contains reports whether v is present in s. -func Contains[E comparable](s []E, v E) bool { +func Contains[S ~[]E, E comparable](s S, v E) bool { return Index(s, v) >= 0 } +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + // Insert inserts the values v... into s at index i, // returning the modified slice. -// In the returned slice r, r[i] == v[0]. +// The elements at s[i:] are shifted up to make room. +// In the returned slice r, r[i] == v[0], +// and r[i+len(v)] == value originally at r[i]. // Insert panics if i is out of range. // This function is O(len(s) + len(v)). func Insert[S ~[]E, E any](s S, i int, v ...E) S { - tot := len(s) + len(v) - if tot <= cap(s) { - s2 := s[:tot] - copy(s2[i+len(v):], s[i:]) + m := len(v) + if m == 0 { + return s + } + n := len(s) + if i == n { + return append(s, v...) + } + if n+m > cap(s) { + // Use append rather than make so that we bump the size of + // the slice up to the next storage class. + // This is what Grow does but we don't call Grow because + // that might copy the values twice. + s2 := append(s[:i], make(S, n+m-i)...) copy(s2[i:], v) + copy(s2[i+m:], s[i:]) return s2 } - s2 := make(S, tot) - copy(s2, s[:i]) - copy(s2[i:], v) - copy(s2[i+len(v):], s[i:]) - return s2 + s = s[:n+m] + + // before: + // s: aaaaaaaabbbbccccccccdddd + // ^ ^ ^ ^ + // i i+m n n+m + // after: + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // + // a are the values that don't move in s. + // v are the values copied in from v. + // b and c are the values from s that are shifted up in index. + // d are the values that get overwritten, never to be seen again. + + if !overlaps(v, s[i+m:]) { + // Easy case - v does not overlap either the c or d regions. + // (It might be in some of a or b, or elsewhere entirely.) + // The data we copy up doesn't write to v at all, so just do it. + + copy(s[i+m:], s[i:]) + + // Now we have + // s: aaaaaaaabbbbbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // Note the b values are duplicated. + + copy(s[i:], v) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s + } + + // The hard case - v overlaps c or d. We can't just shift up + // the data because we'd move or clobber the values we're trying + // to insert. + // So instead, write v on top of d, then rotate. + copy(s[n:], v) + + // Now we have + // s: aaaaaaaabbbbccccccccvvvv + // ^ ^ ^ ^ + // i i+m n n+m + + rotateRight(s[i:], m) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s +} + +// clearSlice sets all elements up to the length of s to the zero value of E. +// We may use the builtin clear func instead, and remove clearSlice, when upgrading +// to Go 1.21+. +func clearSlice[S ~[]E, E any](s S) { + var zero E + for i := range s { + s[i] = zero + } } // Delete removes the elements s[i:j] from s, returning the modified slice. -// Delete panics if s[i:j] is not a valid slice of s. -// Delete modifies the contents of the slice s; it does not create a new slice. -// Delete is O(len(s)-(j-i)), so if many items must be deleted, it is better to +// Delete panics if j > len(s) or s[i:j] is not a valid slice of s. +// Delete is O(len(s)-i), so if many items must be deleted, it is better to // make a single call deleting them all together than to delete one at a time. +// Delete zeroes the elements s[len(s)-(j-i):len(s)]. func Delete[S ~[]E, E any](s S, i, j int) S { - return append(s[:i], s[j:]...) + _ = s[i:j:len(s)] // bounds check + + if i == j { + return s + } + + oldlen := len(s) + s = append(s[:i], s[j:]...) + clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC + return s +} + +// DeleteFunc removes any elements from s for which del returns true, +// returning the modified slice. +// DeleteFunc zeroes the elements between the new length and the original length. +func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { + i := IndexFunc(s, del) + if i == -1 { + return s + } + // Don't start copying elements until we find one to delete. + for j := i + 1; j < len(s); j++ { + if v := s[j]; !del(v) { + s[i] = v + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] +} + +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice + + if i == j { + return Insert(s, i, v...) + } + if j == len(s) { + return append(s[:i], v...) + } + + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot > cap(s) { + // Too big to fit, allocate and copy over. + s2 := append(s[:i], make(S, tot-i)...) // See Insert + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 + } + + r := s[:tot] + + if i+len(v) <= j { + // Easy, as v fits in the deleted portion. + copy(r[i:], v) + if i+len(v) != j { + copy(r[i+len(v):], s[j:]) + } + clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC + return r + } + + // We are expanding (v is bigger than j-i). + // The situation is something like this: + // (example has i=4,j=8,len(s)=16,len(v)=6) + // s: aaaaxxxxbbbbbbbbyy + // ^ ^ ^ ^ + // i j len(s) tot + // a: prefix of s + // x: deleted range + // b: more of s + // y: area to expand into + + if !overlaps(r[i+len(v):], v) { + // Easy, as v is not clobbered by the first copy. + copy(r[i+len(v):], s[j:]) + copy(r[i:], v) + return r + } + + // This is a situation where we don't have a single place to which + // we can copy v. Parts of it need to go to two different places. + // We want to copy the prefix of v into y and the suffix into x, then + // rotate |y| spots to the right. + // + // v[2:] v[:2] + // | | + // s: aaaavvvvbbbbbbbbvv + // ^ ^ ^ ^ + // i j len(s) tot + // + // If either of those two destinations don't alias v, then we're good. + y := len(v) - (j - i) // length of y portion + + if !overlaps(r[i:j], v) { + copy(r[i:j], v[y:]) + copy(r[len(s):], v[:y]) + rotateRight(r[i:], y) + return r + } + if !overlaps(r[len(s):], v) { + copy(r[len(s):], v[:y]) + copy(r[i:j], v[y:]) + rotateRight(r[i:], y) + return r + } + + // Now we know that v overlaps both x and y. + // That means that the entirety of b is *inside* v. + // So we don't need to preserve b at all; instead we + // can copy v first, then copy the b part of v out of + // v to the right destination. + k := startIdx(v, s[j:]) + copy(r[i:], v) + copy(r[i+len(v):], r[i+k:]) + return r } // Clone returns a copy of the slice. @@ -169,50 +358,158 @@ func Clone[S ~[]E, E any](s S) S { // Compact replaces consecutive runs of equal elements with a single copy. // This is like the uniq command found on Unix. -// Compact modifies the contents of the slice s; it does not create a new slice. +// Compact modifies the contents of the slice s and returns the modified slice, +// which may have a smaller length. +// Compact zeroes the elements between the new length and the original length. func Compact[S ~[]E, E comparable](s S) S { - if len(s) == 0 { + if len(s) < 2 { return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if v != last { - s[i] = v + for k := 1; k < len(s); k++ { + if s[k] != s[k-1] { + if i != k { + s[i] = s[k] + } i++ - last = v } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } -// CompactFunc is like Compact but uses a comparison function. +// CompactFunc is like [Compact] but uses an equality function to compare elements. +// For runs of elements that compare equal, CompactFunc keeps the first one. +// CompactFunc zeroes the elements between the new length and the original length. func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { - if len(s) == 0 { + if len(s) < 2 { return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if !eq(v, last) { - s[i] = v + for k := 1; k < len(s); k++ { + if !eq(s[k], s[k-1]) { + if i != k { + s[i] = s[k] + } i++ - last = v } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } // Grow increases the slice's capacity, if necessary, to guarantee space for // another n elements. After Grow(n), at least n elements can be appended -// to the slice without another allocation. Grow may modify elements of the -// slice between the length and the capacity. If n is negative or too large to +// to the slice without another allocation. If n is negative or too large to // allocate the memory, Grow panics. func Grow[S ~[]E, E any](s S, n int) S { - return append(s, make(S, n)...)[:len(s)] + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s } // Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. func Clip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] } + +// Rotation algorithm explanation: +// +// rotate left by 2 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join first parts +// 89234567 01 +// recursively rotate first left part by 2 +// 23456789 01 +// join at the end +// 2345678901 +// +// rotate left by 8 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join last parts +// 89 23456701 +// recursively rotate second part left by 6 +// 89 01234567 +// join at the end +// 8901234567 + +// TODO: There are other rotate algorithms. +// This algorithm has the desirable property that it moves each element exactly twice. +// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. +// The follow-cycles algorithm can be 1-write but it is not very cache friendly. + +// rotateLeft rotates b left by n spaces. +// s_final[i] = s_orig[i+r], wrapping around. +func rotateLeft[E any](s []E, r int) { + for r != 0 && r != len(s) { + if r*2 <= len(s) { + swap(s[:r], s[len(s)-r:]) + s = s[:len(s)-r] + } else { + swap(s[:len(s)-r], s[r:]) + s, r = s[len(s)-r:], r*2-len(s) + } + } +} +func rotateRight[E any](s []E, r int) { + rotateLeft(s, len(s)-r) +} + +// swap swaps the contents of x and y. x and y must be equal length and disjoint. +func swap[E any](x, y []E) { + for i := 0; i < len(x); i++ { + x[i], y[i] = y[i], x[i] + } +} + +// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. +func overlaps[E any](a, b []E) bool { + if len(a) == 0 || len(b) == 0 { + return false + } + elemSize := unsafe.Sizeof(a[0]) + if elemSize == 0 { + return false + } + // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445. + // Also see crypto/internal/alias/alias.go:AnyOverlap + return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) && + uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1) +} + +// startIdx returns the index in haystack where the needle starts. +// prerequisite: the needle must be aliased entirely inside the haystack. +func startIdx[E any](haystack, needle []E) int { + p := &needle[0] + for i := range haystack { + if p == &haystack[i] { + return i + } + } + // TODO: what if the overlap is by a non-integral number of Es? + panic("needle not found") +} + +// Reverse reverses the elements of the slice in place. +func Reverse[S ~[]E, E any](s S) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index c22e74bd1..f58bbc7ba 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp + package slices import ( @@ -11,97 +13,159 @@ import ( ) // Sort sorts a slice of any ordered type in ascending order. -// Sort may fail to sort correctly when sorting slices of floating-point -// numbers containing Not-a-number (NaN) values. -// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) -// instead if the input may contain NaNs. -func Sort[E constraints.Ordered](x []E) { +// When sorting floating-point numbers, NaNs are ordered before other values. +func Sort[S ~[]E, E constraints.Ordered](x S) { n := len(x) pdqsortOrdered(x, 0, n, bits.Len(uint(n))) } -// SortFunc sorts the slice x in ascending order as determined by the less function. -// This sort is not guaranteed to be stable. +// SortFunc sorts the slice x in ascending order as determined by the cmp +// function. This sort is not guaranteed to be stable. +// cmp(a, b) should return a negative number when a < b, a positive number when +// a > b and zero when a == b or when a is not comparable to b in the sense +// of the formal definition of Strict Weak Ordering. // -// SortFunc requires that less is a strict weak ordering. +// SortFunc requires that cmp is a strict weak ordering. // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. -func SortFunc[E any](x []E, less func(a, b E) bool) { +// To indicate 'uncomparable', return 0 from the function. +func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { n := len(x) - pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) + pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) } -// SortStable sorts the slice x while keeping the original order of equal -// elements, using less to compare elements. -func SortStableFunc[E any](x []E, less func(a, b E) bool) { - stableLessFunc(x, len(x), less) +// SortStableFunc sorts the slice x while keeping the original order of equal +// elements, using cmp to compare elements in the same way as [SortFunc]. +func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + stableCmpFunc(x, len(x), cmp) } // IsSorted reports whether x is sorted in ascending order. -func IsSorted[E constraints.Ordered](x []E) bool { +func IsSorted[S ~[]E, E constraints.Ordered](x S) bool { for i := len(x) - 1; i > 0; i-- { - if x[i] < x[i-1] { + if cmpLess(x[i], x[i-1]) { return false } } return true } -// IsSortedFunc reports whether x is sorted in ascending order, with less as the -// comparison function. -func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { +// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the +// comparison function as defined by [SortFunc]. +func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { for i := len(x) - 1; i > 0; i-- { - if less(x[i], x[i-1]) { + if cmp(x[i], x[i-1]) < 0 { return false } } return true } +// Min returns the minimal value in x. It panics if x is empty. +// For floating-point numbers, Min propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Min[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Min: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = min(m, x[i]) + } + return m +} + +// MinFunc returns the minimal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one minimal element +// according to the cmp function, MinFunc returns the first one. +func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MinFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) < 0 { + m = x[i] + } + } + return m +} + +// Max returns the maximal value in x. It panics if x is empty. +// For floating-point E, Max propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Max[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Max: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = max(m, x[i]) + } + return m +} + +// MaxFunc returns the maximal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one maximal element +// according to the cmp function, MaxFunc returns the first one. +func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MaxFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) > 0 { + m = x[i] + } + } + return m +} + // BinarySearch searches for target in a sorted slice and returns the position // where target is found, or the position where target would appear in the // sort order; it also returns a bool saying whether the target is really found // in the slice. The slice must be sorted in increasing order. -func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { - // search returns the leftmost position where f returns true, or len(x) if f - // returns false for all x. This is the insertion position for target in x, - // and could point to an element that's either == target or not. - pos := search(len(x), func(i int) bool { return x[i] >= target }) - if pos >= len(x) || x[pos] != target { - return pos, false - } else { - return pos, true - } -} - -// BinarySearchFunc works like BinarySearch, but uses a custom comparison -// function. The slice must be sorted in increasing order, where "increasing" is -// defined by cmp. cmp(a, b) is expected to return an integer comparing the two -// parameters: 0 if a == b, a negative number if a < b and a positive number if -// a > b. -func BinarySearchFunc[E any](x []E, target E, cmp func(E, E) int) (int, bool) { - pos := search(len(x), func(i int) bool { return cmp(x[i], target) >= 0 }) - if pos >= len(x) || cmp(x[pos], target) != 0 { - return pos, false - } else { - return pos, true +func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { + // Inlining is faster than calling BinarySearchFunc with a lambda. + n := len(x) + // Define x[-1] < target and x[n] >= target. + // Invariant: x[i-1] < target, x[j] >= target. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmpLess(x[h], target) { + i = h + 1 // preserves x[i-1] < target + } else { + j = h // preserves x[j] >= target + } } + // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. + return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target))) } -func search(n int, f func(int) bool) int { - // Define f(-1) == false and f(n) == true. - // Invariant: f(i-1) == false, f(j) == true. +// BinarySearchFunc works like [BinarySearch], but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" +// is defined by cmp. cmp should return 0 if the slice element matches +// the target, a negative number if the slice element precedes the target, +// or a positive number if the slice element follows the target. +// cmp must implement the same ordering as the slice, such that if +// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. +func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { + n := len(x) + // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . + // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. i, j := 0, n for i < j { h := int(uint(i+j) >> 1) // avoid overflow when computing h // i ≤ h < j - if !f(h) { - i = h + 1 // preserves f(i-1) == false + if cmp(x[h], target) < 0 { + i = h + 1 // preserves cmp(x[i - 1], target) < 0 } else { - j = h // preserves f(j) == true + j = h // preserves cmp(x[j], target) >= 0 } } - // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. - return i + // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. + return i, i < n && cmp(x[i], target) == 0 } type sortedHint int // hint for pdqsort when choosing the pivot @@ -125,3 +189,9 @@ func (r *xorshift) Next() uint64 { func nextPowerOfTwo(length int) uint { return 1 << bits.Len(uint(length)) } + +// isNaN reports whether x is a NaN without requiring the math package. +// This will always return false if T is not floating-point. +func isNaN[T constraints.Ordered](x T) bool { + return x != x +} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go similarity index 64% rename from vendor/golang.org/x/exp/slices/zsortfunc.go rename to vendor/golang.org/x/exp/slices/zsortanyfunc.go index 2a632476c..06f2c7a24 100644 --- a/vendor/golang.org/x/exp/slices/zsortfunc.go +++ b/vendor/golang.org/x/exp/slices/zsortanyfunc.go @@ -6,28 +6,28 @@ package slices -// insertionSortLessFunc sorts data[a:b] using insertion sort. -func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +// insertionSortCmpFunc sorts data[a:b] using insertion sort. +func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { for i := a + 1; i < b; i++ { - for j := i; j > a && less(data[j], data[j-1]); j-- { + for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- { data[j], data[j-1] = data[j-1], data[j] } } } -// siftDownLessFunc implements the heap property on data[lo:hi]. +// siftDownCmpFunc implements the heap property on data[lo:hi]. // first is an offset into the array where the root of the heap lies. -func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { +func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) { root := lo for { child := 2*root + 1 if child >= hi { break } - if child+1 < hi && less(data[first+child], data[first+child+1]) { + if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) { child++ } - if !less(data[first+root], data[first+child]) { + if !(cmp(data[first+root], data[first+child]) < 0) { return } data[first+root], data[first+child] = data[first+child], data[first+root] @@ -35,30 +35,30 @@ func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool } } -func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { first := a lo := 0 hi := b - a // Build heap with greatest element at top. for i := (hi - 1) / 2; i >= 0; i-- { - siftDownLessFunc(data, i, hi, first, less) + siftDownCmpFunc(data, i, hi, first, cmp) } // Pop elements, largest first, into end of data. for i := hi - 1; i >= 0; i-- { data[first], data[first+i] = data[first+i], data[first] - siftDownLessFunc(data, lo, i, first, less) + siftDownCmpFunc(data, lo, i, first, cmp) } } -// pdqsortLessFunc sorts data[a:b]. +// pdqsortCmpFunc sorts data[a:b]. // The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. // pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf // C++ implementation: https://github.com/orlp/pdqsort // Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ // limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { +func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) { const maxInsertion = 12 var ( @@ -70,25 +70,25 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { length := b - a if length <= maxInsertion { - insertionSortLessFunc(data, a, b, less) + insertionSortCmpFunc(data, a, b, cmp) return } // Fall back to heapsort if too many bad choices were made. if limit == 0 { - heapSortLessFunc(data, a, b, less) + heapSortCmpFunc(data, a, b, cmp) return } // If the last partitioning was imbalanced, we need to breaking patterns. if !wasBalanced { - breakPatternsLessFunc(data, a, b, less) + breakPatternsCmpFunc(data, a, b, cmp) limit-- } - pivot, hint := choosePivotLessFunc(data, a, b, less) + pivot, hint := choosePivotCmpFunc(data, a, b, cmp) if hint == decreasingHint { - reverseRangeLessFunc(data, a, b, less) + reverseRangeCmpFunc(data, a, b, cmp) // The chosen pivot was pivot-a elements after the start of the array. // After reversing it is pivot-a elements before the end of the array. // The idea came from Rust's implementation. @@ -98,48 +98,48 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { // The slice is likely already sorted. if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortLessFunc(data, a, b, less) { + if partialInsertionSortCmpFunc(data, a, b, cmp) { return } } // Probably the slice contains many duplicate elements, partition the slice into // elements equal to and elements greater than the pivot. - if a > 0 && !less(data[a-1], data[pivot]) { - mid := partitionEqualLessFunc(data, a, b, pivot, less) + if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) { + mid := partitionEqualCmpFunc(data, a, b, pivot, cmp) a = mid continue } - mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) + mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp) wasPartitioned = alreadyPartitioned leftLen, rightLen := mid-a, b-mid balanceThreshold := length / 8 if leftLen < rightLen { wasBalanced = leftLen >= balanceThreshold - pdqsortLessFunc(data, a, mid, limit, less) + pdqsortCmpFunc(data, a, mid, limit, cmp) a = mid + 1 } else { wasBalanced = rightLen >= balanceThreshold - pdqsortLessFunc(data, mid+1, b, limit, less) + pdqsortCmpFunc(data, mid+1, b, limit, cmp) b = mid } } } -// partitionLessFunc does one quicksort partition. +// partitionCmpFunc does one quicksort partition. // Let p = data[pivot] // Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. // On return, data[newpivot] = p -func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { +func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) { data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - for i <= j && less(data[i], data[a]) { + for i <= j && (cmp(data[i], data[a]) < 0) { i++ } - for i <= j && !less(data[j], data[a]) { + for i <= j && !(cmp(data[j], data[a]) < 0) { j-- } if i > j { @@ -151,10 +151,10 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) j-- for { - for i <= j && less(data[i], data[a]) { + for i <= j && (cmp(data[i], data[a]) < 0) { i++ } - for i <= j && !less(data[j], data[a]) { + for i <= j && !(cmp(data[j], data[a]) < 0) { j-- } if i > j { @@ -168,17 +168,17 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) return j, false } -// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. // It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { +func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) { data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned for { - for i <= j && !less(data[a], data[i]) { + for i <= j && !(cmp(data[a], data[i]) < 0) { i++ } - for i <= j && less(data[a], data[j]) { + for i <= j && (cmp(data[a], data[j]) < 0) { j-- } if i > j { @@ -191,15 +191,15 @@ func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) return i } -// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { +// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool { const ( maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted shortestShifting = 50 // don't shift any elements on short arrays ) i := a + 1 for j := 0; j < maxSteps; j++ { - for i < b && !less(data[i], data[i-1]) { + for i < b && !(cmp(data[i], data[i-1]) < 0) { i++ } @@ -216,7 +216,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b // Shift the smaller one to the left. if i-a >= 2 { for j := i - 1; j >= 1; j-- { - if !less(data[j], data[j-1]) { + if !(cmp(data[j], data[j-1]) < 0) { break } data[j], data[j-1] = data[j-1], data[j] @@ -225,7 +225,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b // Shift the greater one to the right. if b-i >= 2 { for j := i + 1; j < b; j++ { - if !less(data[j], data[j-1]) { + if !(cmp(data[j], data[j-1]) < 0) { break } data[j], data[j-1] = data[j-1], data[j] @@ -235,9 +235,9 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b return false } -// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns +// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns // that might cause imbalanced partitions in quicksort. -func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { length := b - a if length >= 8 { random := xorshift(length) @@ -253,12 +253,12 @@ func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { } } -// choosePivotLessFunc chooses a pivot in data[a:b]. +// choosePivotCmpFunc chooses a pivot in data[a:b]. // // [0,8): chooses a static pivot. // [8,shortestNinther): uses the simple median-of-three method. // [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { +func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) { const ( shortestNinther = 50 maxSwaps = 4 * 3 @@ -276,12 +276,12 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv if l >= 8 { if l >= shortestNinther { // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentLessFunc(data, i, &swaps, less) - j = medianAdjacentLessFunc(data, j, &swaps, less) - k = medianAdjacentLessFunc(data, k, &swaps, less) + i = medianAdjacentCmpFunc(data, i, &swaps, cmp) + j = medianAdjacentCmpFunc(data, j, &swaps, cmp) + k = medianAdjacentCmpFunc(data, k, &swaps, cmp) } // Find the median among i, j, k and stores it into j. - j = medianLessFunc(data, i, j, k, &swaps, less) + j = medianCmpFunc(data, i, j, k, &swaps, cmp) } switch swaps { @@ -294,29 +294,29 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv } } -// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { - if less(data[b], data[a]) { +// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) { + if cmp(data[b], data[a]) < 0 { *swaps++ return b, a } return a, b } -// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { - a, b = order2LessFunc(data, a, b, swaps, less) - b, c = order2LessFunc(data, b, c, swaps, less) - a, b = order2LessFunc(data, a, b, swaps, less) +// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int { + a, b = order2CmpFunc(data, a, b, swaps, cmp) + b, c = order2CmpFunc(data, b, c, swaps, cmp) + a, b = order2CmpFunc(data, a, b, swaps, cmp) return b } -// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { - return medianLessFunc(data, a-1, a, a+1, swaps, less) +// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int { + return medianCmpFunc(data, a-1, a, a+1, swaps, cmp) } -func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { i := a j := b - 1 for i < j { @@ -326,37 +326,37 @@ func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { } } -func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { +func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) { for i := 0; i < n; i++ { data[a+i], data[b+i] = data[b+i], data[a+i] } } -func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { +func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) { blockSize := 20 // must be > 0 a, b := 0, blockSize for b <= n { - insertionSortLessFunc(data, a, b, less) + insertionSortCmpFunc(data, a, b, cmp) a = b b += blockSize } - insertionSortLessFunc(data, a, n, less) + insertionSortCmpFunc(data, a, n, cmp) for blockSize < n { a, b = 0, 2*blockSize for b <= n { - symMergeLessFunc(data, a, a+blockSize, b, less) + symMergeCmpFunc(data, a, a+blockSize, b, cmp) a = b b += 2 * blockSize } if m := a + blockSize; m < n { - symMergeLessFunc(data, a, m, n, less) + symMergeCmpFunc(data, a, m, n, cmp) } blockSize *= 2 } } -// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using // the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum // Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz // Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in @@ -375,7 +375,7 @@ func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { // symMerge assumes non-degenerate arguments: a < m && m < b. // Having the caller check this condition eliminates many leaf recursion calls, // which improves performance. -func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { +func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { // Avoid unnecessary recursions of symMerge // by direct insertion of data[a] into data[m:b] // if data[a:m] only contains one element. @@ -387,7 +387,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { j := b for i < j { h := int(uint(i+j) >> 1) - if less(data[h], data[a]) { + if cmp(data[h], data[a]) < 0 { i = h + 1 } else { j = h @@ -411,7 +411,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { j := m for i < j { h := int(uint(i+j) >> 1) - if !less(data[m], data[h]) { + if !(cmp(data[m], data[h]) < 0) { i = h + 1 } else { j = h @@ -438,7 +438,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { for start < r { c := int(uint(start+r) >> 1) - if !less(data[p-c], data[c]) { + if !(cmp(data[p-c], data[c]) < 0) { start = c + 1 } else { r = c @@ -447,33 +447,33 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { end := n - start if start < m && m < end { - rotateLessFunc(data, start, m, end, less) + rotateCmpFunc(data, start, m, end, cmp) } if a < start && start < mid { - symMergeLessFunc(data, a, start, mid, less) + symMergeCmpFunc(data, a, start, mid, cmp) } if mid < end && end < b { - symMergeLessFunc(data, mid, end, b, less) + symMergeCmpFunc(data, mid, end, b, cmp) } } -// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: // Data of the form 'x u v y' is changed to 'x v u y'. // rotate performs at most b-a many calls to data.Swap, // and it assumes non-degenerate arguments: a < m && m < b. -func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { +func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { i := m - a j := b - m for i != j { if i > j { - swapRangeLessFunc(data, m-i, m, j, less) + swapRangeCmpFunc(data, m-i, m, j, cmp) i -= j } else { - swapRangeLessFunc(data, m-i, m+j-i, i, less) + swapRangeCmpFunc(data, m-i, m+j-i, i, cmp) j -= i } } // i == j - swapRangeLessFunc(data, m-i, m, i, less) + swapRangeCmpFunc(data, m-i, m, i, cmp) } diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go index efaa1c8b7..99b47c398 100644 --- a/vendor/golang.org/x/exp/slices/zsortordered.go +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -11,7 +11,7 @@ import "golang.org/x/exp/constraints" // insertionSortOrdered sorts data[a:b] using insertion sort. func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { for i := a + 1; i < b; i++ { - for j := i; j > a && (data[j] < data[j-1]); j-- { + for j := i; j > a && cmpLess(data[j], data[j-1]); j-- { data[j], data[j-1] = data[j-1], data[j] } } @@ -26,10 +26,10 @@ func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { if child >= hi { break } - if child+1 < hi && (data[first+child] < data[first+child+1]) { + if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) { child++ } - if !(data[first+root] < data[first+child]) { + if !cmpLess(data[first+root], data[first+child]) { return } data[first+root], data[first+child] = data[first+child], data[first+root] @@ -107,7 +107,7 @@ func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { // Probably the slice contains many duplicate elements, partition the slice into // elements equal to and elements greater than the pivot. - if a > 0 && !(data[a-1] < data[pivot]) { + if a > 0 && !cmpLess(data[a-1], data[pivot]) { mid := partitionEqualOrdered(data, a, b, pivot) a = mid continue @@ -138,10 +138,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - for i <= j && (data[i] < data[a]) { + for i <= j && cmpLess(data[i], data[a]) { i++ } - for i <= j && !(data[j] < data[a]) { + for i <= j && !cmpLess(data[j], data[a]) { j-- } if i > j { @@ -153,10 +153,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo j-- for { - for i <= j && (data[i] < data[a]) { + for i <= j && cmpLess(data[i], data[a]) { i++ } - for i <= j && !(data[j] < data[a]) { + for i <= j && !cmpLess(data[j], data[a]) { j-- } if i > j { @@ -177,10 +177,10 @@ func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (ne i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned for { - for i <= j && !(data[a] < data[i]) { + for i <= j && !cmpLess(data[a], data[i]) { i++ } - for i <= j && (data[a] < data[j]) { + for i <= j && cmpLess(data[a], data[j]) { j-- } if i > j { @@ -201,7 +201,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool ) i := a + 1 for j := 0; j < maxSteps; j++ { - for i < b && !(data[i] < data[i-1]) { + for i < b && !cmpLess(data[i], data[i-1]) { i++ } @@ -218,7 +218,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool // Shift the smaller one to the left. if i-a >= 2 { for j := i - 1; j >= 1; j-- { - if !(data[j] < data[j-1]) { + if !cmpLess(data[j], data[j-1]) { break } data[j], data[j-1] = data[j-1], data[j] @@ -227,7 +227,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool // Shift the greater one to the right. if b-i >= 2 { for j := i + 1; j < b; j++ { - if !(data[j] < data[j-1]) { + if !cmpLess(data[j], data[j-1]) { break } data[j], data[j-1] = data[j-1], data[j] @@ -298,7 +298,7 @@ func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, h // order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { - if data[b] < data[a] { + if cmpLess(data[b], data[a]) { *swaps++ return b, a } @@ -389,7 +389,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { j := b for i < j { h := int(uint(i+j) >> 1) - if data[h] < data[a] { + if cmpLess(data[h], data[a]) { i = h + 1 } else { j = h @@ -413,7 +413,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { j := m for i < j { h := int(uint(i+j) >> 1) - if !(data[m] < data[h]) { + if !cmpLess(data[m], data[h]) { i = h + 1 } else { j = h @@ -440,7 +440,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { for start < r { c := int(uint(start+r) >> 1) - if !(data[p-c] < data[c]) { + if !cmpLess(data[p-c], data[c]) { start = c + 1 } else { r = c diff --git a/vendor/modules.txt b/vendor/modules.txt index 3d1b67644..e8e799303 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -52,6 +52,9 @@ github.com/ghodss/yaml ## explicit github.com/go-bindata/go-bindata github.com/go-bindata/go-bindata/go-bindata +# github.com/go-co-op/gocron/v2 v2.8.0 +## explicit; go 1.20 +github.com/go-co-op/gocron/v2 # github.com/go-logr/logr v1.4.1 ## explicit; go 1.18 github.com/go-logr/logr @@ -137,7 +140,7 @@ github.com/google/go-cmp/cmp/internal/value ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource -# github.com/google/uuid v1.3.0 +# github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid # github.com/gorilla/websocket v1.5.0 @@ -166,8 +169,8 @@ github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap -# github.com/jonboulle/clockwork v0.2.2 -## explicit; go 1.13 +# github.com/jonboulle/clockwork v0.4.0 +## explicit; go 1.15 github.com/jonboulle/clockwork # github.com/josharian/intern v1.0.0 ## explicit; go 1.5 @@ -419,6 +422,9 @@ github.com/prometheus/procfs/internal/util # github.com/robfig/cron v1.2.0 ## explicit github.com/robfig/cron +# github.com/robfig/cron/v3 v3.0.1 +## explicit; go 1.12 +github.com/robfig/cron/v3 # github.com/sirupsen/logrus v1.9.0 ## explicit; go 1.13 github.com/sirupsen/logrus @@ -434,8 +440,8 @@ github.com/spf13/pflag # github.com/stoewer/go-strcase v1.2.0 ## explicit; go 1.11 github.com/stoewer/go-strcase -# github.com/stretchr/testify v1.8.4 -## explicit; go 1.20 +# github.com/stretchr/testify v1.9.0 +## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/require # github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 @@ -644,9 +650,10 @@ golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/salsa20/salsa -# golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e -## explicit; go 1.18 +# golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 +## explicit; go 1.20 golang.org/x/exp/constraints +golang.org/x/exp/maps golang.org/x/exp/slices # golang.org/x/net v0.23.0 ## explicit; go 1.18 @@ -662,7 +669,7 @@ golang.org/x/net/websocket ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.6.0 +# golang.org/x/sync v0.7.0 ## explicit; go 1.18 golang.org/x/sync/singleflight # golang.org/x/sys v0.18.0 From 4c8ff99cf6ab780d74836f59bd1d1dbba316ac55 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 9 Jul 2024 19:39:35 +0200 Subject: [PATCH 08/22] add prune schedule --- pkg/cmd/backuprestore/backupnoconfig.go | 46 +++++++++++++++++++++++++ pkg/cmd/prune-backups/prune.go | 14 ++++++++ 2 files changed, 60 insertions(+) diff --git a/pkg/cmd/backuprestore/backupnoconfig.go b/pkg/cmd/backuprestore/backupnoconfig.go index 44fc7350b..64f460458 100644 --- a/pkg/cmd/backuprestore/backupnoconfig.go +++ b/pkg/cmd/backuprestore/backupnoconfig.go @@ -11,6 +11,7 @@ import ( backupv1alpha1 "github.com/openshift/api/config/v1alpha1" configversionedclientv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" + prune_backups "github.com/openshift/cluster-etcd-operator/pkg/cmd/prune-backups" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/clientcmd" @@ -69,6 +70,7 @@ func (b *backupNoConfig) Run() error { } go b.scheduleBackup() + go b.scheduleBackupPrune() return nil } @@ -164,6 +166,50 @@ func (b *backupNoConfig) copySnapshot() error { return nil } +func (b *backupNoConfig) pruneBackups() error { + switch b.retention.RetentionType { + case prune_backups.RetentionTypeNone: + klog.Info("no retention policy specified") + return nil + case prune_backups.RetentionTypeNumber: + if b.retention.RetentionNumber == nil { + err := fmt.Errorf("retention policy RetentionTypeNumber requires RetentionNumberConfig") + klog.Error(err) + return err + } + return prune_backups.Retain(b.retention) + case prune_backups.RetentionTypeSize: + if b.retention.RetentionSize == nil { + err := fmt.Errorf("retention policy RetentionTypeSize requires RetentionSizeConfig") + klog.Error(err) + return err + } + return prune_backups.Retain(b.retention) + default: + err := fmt.Errorf("illegal retention policy type: [%v]", b.retention.RetentionType) + klog.Error(err) + return err + } +} + +func (b *backupNoConfig) scheduleBackupPrune() error { + s, _ := gcron.NewScheduler() + defer func() { _ = s.Shutdown() }() + + if _, err := s.NewJob( + gcron.CronJob( + b.schedule, + false, + ), + gcron.NewTask(b.pruneBackups()), + ); err != nil { + return err + } + + s.Start() + return nil +} + func getCpArgs(src, dst string) []string { return strings.Split(fmt.Sprintf("--verbose --recursive --preserve --reflink=auto %s %s", src, dst), " ") } diff --git a/pkg/cmd/prune-backups/prune.go b/pkg/cmd/prune-backups/prune.go index 339b1b288..154ab2b99 100644 --- a/pkg/cmd/prune-backups/prune.go +++ b/pkg/cmd/prune-backups/prune.go @@ -3,6 +3,7 @@ package prune_backups import ( goflag "flag" "fmt" + "github.com/openshift/api/config/v1alpha1" "github.com/spf13/cobra" "io/fs" "k8s.io/klog/v2" @@ -109,6 +110,19 @@ func (r *pruneOpts) Run() error { return nil } +func Retain(policy v1alpha1.RetentionPolicy) error { + switch policy.RetentionType { + case RetentionTypeNone: + klog.Infof("nothing to do, retention type is none") + return nil + case RetentionTypeNumber: + return retainByNumber(policy.RetentionNumber.MaxNumberOfBackups) + case RetentionTypeSize: + return retainBySizeGb(policy.RetentionSize.MaxSizeOfBackupsGb) + } + return nil +} + func retainBySizeGb(sizeInGb int) error { folders, err := listAllBackupFolders() if err != nil { From 9181812538efcfc2b80466262ad6aac69764d52c Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Wed, 10 Jul 2024 03:39:19 +0200 Subject: [PATCH 09/22] add unit test --- pkg/cmd/backuprestore/backupnoconfig.go | 59 ++++++++++---- pkg/cmd/backuprestore/backupnoconfig_test.go | 86 ++++++++++++++++++++ 2 files changed, 131 insertions(+), 14 deletions(-) create mode 100644 pkg/cmd/backuprestore/backupnoconfig_test.go diff --git a/pkg/cmd/backuprestore/backupnoconfig.go b/pkg/cmd/backuprestore/backupnoconfig.go index 64f460458..fc19fbf56 100644 --- a/pkg/cmd/backuprestore/backupnoconfig.go +++ b/pkg/cmd/backuprestore/backupnoconfig.go @@ -10,10 +10,11 @@ import ( "strings" backupv1alpha1 "github.com/openshift/api/config/v1alpha1" - configversionedclientv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" - prune_backups "github.com/openshift/cluster-etcd-operator/pkg/cmd/prune-backups" + backupv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" + prune "github.com/openshift/cluster-etcd-operator/pkg/cmd/prune-backups" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" @@ -65,30 +66,54 @@ func (b *backupNoConfig) Validate() error { } func (b *backupNoConfig) Run() error { - if err := b.extractBackupConfigs(); err != nil { + backupsClient, err := b.getBackupClient() + if err != nil { + return err + } + + if err = b.extractBackupSpecs(backupsClient); err != nil { return err } - go b.scheduleBackup() - go b.scheduleBackupPrune() + var errs []error + go func() { + err := b.scheduleBackup() + if err != nil { + errs = append(errs, err) + } + }() + go func() { + err := b.scheduleBackupPrune() + if err != nil { + errs = append(errs, err) + } + }() + + if len(errs) > 0 { + return kerrors.NewAggregate(errs) + } return nil } -func (b *backupNoConfig) extractBackupConfigs() error { +func (b *backupNoConfig) getBackupClient() (backupv1client.BackupsGetter, error) { kubeConfig, err := clientcmd.BuildConfigFromFlags("", b.kubeConfig) if err != nil { bErr := fmt.Errorf("error loading kubeconfig: %v", err) klog.Error(bErr) - return bErr + return nil, bErr } - backupsClient, err := configversionedclientv1alpha1.NewForConfig(kubeConfig) + backupsClient, err := backupv1client.NewForConfig(kubeConfig) if err != nil { bErr := fmt.Errorf("error creating etcd backups client: %v", err) klog.Error(bErr) - return bErr + return nil, bErr } + return backupsClient, nil +} + +func (b *backupNoConfig) extractBackupSpecs(backupsClient backupv1client.BackupsGetter) error { backups, err := backupsClient.Backups().List(context.Background(), v1.ListOptions{}) if err != nil { lErr := fmt.Errorf("could not list backup CRDs, error was: [%v]", err) @@ -96,6 +121,12 @@ func (b *backupNoConfig) extractBackupConfigs() error { return lErr } + if len(backups.Items) == 0 { + lErr := fmt.Errorf("no backup CRDs exist, found [%v]", backups) + klog.Error(lErr) + return lErr + } + idx := slices.IndexFunc(backups.Items, func(backup backupv1alpha1.Backup) bool { return backup.Name == "default" }) @@ -168,23 +199,23 @@ func (b *backupNoConfig) copySnapshot() error { func (b *backupNoConfig) pruneBackups() error { switch b.retention.RetentionType { - case prune_backups.RetentionTypeNone: + case prune.RetentionTypeNone: klog.Info("no retention policy specified") return nil - case prune_backups.RetentionTypeNumber: + case prune.RetentionTypeNumber: if b.retention.RetentionNumber == nil { err := fmt.Errorf("retention policy RetentionTypeNumber requires RetentionNumberConfig") klog.Error(err) return err } - return prune_backups.Retain(b.retention) - case prune_backups.RetentionTypeSize: + return prune.Retain(b.retention) + case prune.RetentionTypeSize: if b.retention.RetentionSize == nil { err := fmt.Errorf("retention policy RetentionTypeSize requires RetentionSizeConfig") klog.Error(err) return err } - return prune_backups.Retain(b.retention) + return prune.Retain(b.retention) default: err := fmt.Errorf("illegal retention policy type: [%v]", b.retention.RetentionType) klog.Error(err) diff --git a/pkg/cmd/backuprestore/backupnoconfig_test.go b/pkg/cmd/backuprestore/backupnoconfig_test.go new file mode 100644 index 000000000..216dade96 --- /dev/null +++ b/pkg/cmd/backuprestore/backupnoconfig_test.go @@ -0,0 +1,86 @@ +package backuprestore + +import ( + "errors" + "testing" + + backupv1alpha1 "github.com/openshift/api/config/v1alpha1" + fake "github.com/openshift/client-go/config/clientset/versioned/fake" + "github.com/stretchr/testify/require" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestBackupNoConfig_extractBackupSpecs(t *testing.T) { + testCases := []struct { + name string + backupName string + schedule string + expErr error + }{ + { + name: "empty input", + backupName: "", + schedule: "", + expErr: errors.New("no backup CRDs exist, found"), + }, + { + name: "non default backup", + backupName: "test-backup", + schedule: "20 4 * * *", + expErr: errors.New("could not find default backup CR"), + }, + { + name: "default backup", + backupName: "default", + schedule: "10 8 * 7 *", + expErr: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // arrange + var operatorFake *fake.Clientset + backup := createBackupObject(tc.backupName, tc.schedule) + + if backup != nil { + operatorFake = fake.NewSimpleClientset([]runtime.Object{backup}...) + } else { + operatorFake = fake.NewSimpleClientset() + } + + // act + b := &backupNoConfig{} + err := b.extractBackupSpecs(operatorFake.ConfigV1alpha1()) + + // assert + if tc.expErr != nil { + require.ErrorContains(t, err, tc.expErr.Error()) + } else { + require.Equal(t, tc.expErr, err) + require.Equal(t, tc.schedule, b.schedule) + require.Equal(t, getRetentionPolicy(), b.retention) + } + }) + } +} + +func createBackupObject(backupName, schedule string) *backupv1alpha1.Backup { + if backupName == "" { + return nil + } + return &backupv1alpha1.Backup{ObjectMeta: v1.ObjectMeta{Name: backupName}, + Spec: backupv1alpha1.BackupSpec{ + EtcdBackupSpec: backupv1alpha1.EtcdBackupSpec{ + Schedule: schedule, + RetentionPolicy: getRetentionPolicy(), + TimeZone: "UTC", + PVCName: "backup-happy-path-pvc"}}} +} + +func getRetentionPolicy() backupv1alpha1.RetentionPolicy { + return backupv1alpha1.RetentionPolicy{ + RetentionType: backupv1alpha1.RetentionTypeNumber, + RetentionNumber: &backupv1alpha1.RetentionNumberConfig{MaxNumberOfBackups: 5}} +} From 371fad675adfb0b047c26022f144a390091806fb Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Wed, 10 Jul 2024 10:34:08 +0200 Subject: [PATCH 10/22] remove copy backup --- pkg/cmd/backuprestore/backupnoconfig.go | 51 +++----------------- pkg/cmd/backuprestore/backupnoconfig_test.go | 4 +- 2 files changed, 9 insertions(+), 46 deletions(-) diff --git a/pkg/cmd/backuprestore/backupnoconfig.go b/pkg/cmd/backuprestore/backupnoconfig.go index fc19fbf56..6d13df5dc 100644 --- a/pkg/cmd/backuprestore/backupnoconfig.go +++ b/pkg/cmd/backuprestore/backupnoconfig.go @@ -2,25 +2,22 @@ package backuprestore import ( "context" - "errors" "fmt" "io" - "os/exec" "slices" - "strings" + gcron "github.com/go-co-op/gocron/v2" backupv1alpha1 "github.com/openshift/api/config/v1alpha1" backupv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" prune "github.com/openshift/cluster-etcd-operator/pkg/cmd/prune-backups" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" - - gcron "github.com/go-co-op/gocron/v2" - "github.com/spf13/cobra" - "github.com/spf13/pflag" ) type backupNoConfig struct { @@ -37,7 +34,7 @@ func NewBackupNoConfigCommand(errOut io.Writer) *cobra.Command { backupOptions: backupOptions{errOut: errOut}, } cmd := &cobra.Command{ - Use: "cluster-backup-no-config", + Use: "backup-server", Short: "Backs up a snapshot of etcd database and static pod resources without config", Run: func(cmd *cobra.Command, args []string) { must := func(fn func() error) { @@ -144,25 +141,7 @@ func (b *backupNoConfig) extractBackupSpecs(backupsClient backupv1client.Backups } func (b *backupNoConfig) backup() error { - // initially take backup using etcdctl - if !b.snapshotExist { - if err := backup(&b.backupOptions); err != nil { - klog.Errorf("run: backup failed: [%v]", err) - return err - } - b.snapshotExist = true - klog.Infof("config-dir is: %s", b.configDir) - return nil - } - - // only update the snapshot file - if err := b.copySnapshot(); err != nil { - sErr := fmt.Errorf("run: backup failed: [%v]", err) - klog.Error(sErr) - return sErr - } - - return nil + return backup(&b.backupOptions) } func (b *backupNoConfig) scheduleBackup() error { @@ -183,20 +162,6 @@ func (b *backupNoConfig) scheduleBackup() error { return nil } -func (b *backupNoConfig) copySnapshot() error { - if !b.snapshotExist { - klog.Errorf("run: backup failed: [%v]", errors.New("no snapshot file exists")) - } - - src := "/var/lib/etcd/member/snap" - dst := "/var/backup/etcd/snap" - if _, err := exec.Command("cp", getCpArgs(src, dst)...).CombinedOutput(); err != nil { - klog.Errorf("run: backup failed: [%v]", err) - } - - return nil -} - func (b *backupNoConfig) pruneBackups() error { switch b.retention.RetentionType { case prune.RetentionTypeNone: @@ -240,7 +205,3 @@ func (b *backupNoConfig) scheduleBackupPrune() error { s.Start() return nil } - -func getCpArgs(src, dst string) []string { - return strings.Split(fmt.Sprintf("--verbose --recursive --preserve --reflink=auto %s %s", src, dst), " ") -} diff --git a/pkg/cmd/backuprestore/backupnoconfig_test.go b/pkg/cmd/backuprestore/backupnoconfig_test.go index 216dade96..b35ec24b4 100644 --- a/pkg/cmd/backuprestore/backupnoconfig_test.go +++ b/pkg/cmd/backuprestore/backupnoconfig_test.go @@ -6,9 +6,11 @@ import ( backupv1alpha1 "github.com/openshift/api/config/v1alpha1" fake "github.com/openshift/client-go/config/clientset/versioned/fake" - "github.com/stretchr/testify/require" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + + "github.com/stretchr/testify/require" ) func TestBackupNoConfig_extractBackupSpecs(t *testing.T) { From e6e335a5e2962c207cfdc36ffb17a33971b9d5a3 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Wed, 10 Jul 2024 14:52:08 +0200 Subject: [PATCH 11/22] add scheduling opts --- pkg/cmd/backuprestore/backupnoconfig.go | 62 +++++++++---------------- 1 file changed, 22 insertions(+), 40 deletions(-) diff --git a/pkg/cmd/backuprestore/backupnoconfig.go b/pkg/cmd/backuprestore/backupnoconfig.go index 6d13df5dc..b19341720 100644 --- a/pkg/cmd/backuprestore/backupnoconfig.go +++ b/pkg/cmd/backuprestore/backupnoconfig.go @@ -3,6 +3,7 @@ package backuprestore import ( "context" "fmt" + "github.com/google/uuid" "io" "slices" @@ -15,7 +16,6 @@ import ( "github.com/spf13/pflag" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" ) @@ -25,6 +25,7 @@ type backupNoConfig struct { snapshotExist bool schedule string retention backupv1alpha1.RetentionPolicy + scheduler gcron.Scheduler backupOptions } @@ -68,27 +69,22 @@ func (b *backupNoConfig) Run() error { return err } + b.scheduler, _ = gcron.NewScheduler( + gcron.WithLimitConcurrentJobs(1, gcron.LimitModeWait), + gcron.WithGlobalJobOptions( + gcron.WithLimitedRuns(1), + gcron.WithSingletonMode(gcron.LimitModeWait))) + defer func() { _ = b.scheduler.Shutdown() }() + if err = b.extractBackupSpecs(backupsClient); err != nil { return err } - var errs []error - go func() { - err := b.scheduleBackup() - if err != nil { - errs = append(errs, err) - } - }() - go func() { - err := b.scheduleBackupPrune() - if err != nil { - errs = append(errs, err) - } - }() - - if len(errs) > 0 { - return kerrors.NewAggregate(errs) + err = b.scheduleBackup() + if err != nil { + return err } + return nil } @@ -145,20 +141,24 @@ func (b *backupNoConfig) backup() error { } func (b *backupNoConfig) scheduleBackup() error { - s, _ := gcron.NewScheduler() - defer func() { _ = s.Shutdown() }() - - if _, err := s.NewJob( + if _, err := b.scheduler.NewJob( gcron.CronJob( b.schedule, false, ), gcron.NewTask(b.backup()), + gcron.WithEventListeners( + gcron.AfterJobRuns( + func(jobID uuid.UUID, jobName string) { + b.pruneBackups() + }, + ), + ), ); err != nil { return err } - s.Start() + b.scheduler.Start() return nil } @@ -187,21 +187,3 @@ func (b *backupNoConfig) pruneBackups() error { return err } } - -func (b *backupNoConfig) scheduleBackupPrune() error { - s, _ := gcron.NewScheduler() - defer func() { _ = s.Shutdown() }() - - if _, err := s.NewJob( - gcron.CronJob( - b.schedule, - false, - ), - gcron.NewTask(b.pruneBackups()), - ); err != nil { - return err - } - - s.Start() - return nil -} From 726bcbe900de036b360978f3d642b97125a43fa6 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Wed, 10 Jul 2024 15:30:32 +0200 Subject: [PATCH 12/22] expose PruneOpts --- pkg/cmd/backuprestore/backupnoconfig.go | 42 +++++++------------ pkg/cmd/prune-backups/prune.go | 56 ++++++++++++------------- pkg/cmd/prune-backups/prune_test.go | 20 ++++----- 3 files changed, 53 insertions(+), 65 deletions(-) diff --git a/pkg/cmd/backuprestore/backupnoconfig.go b/pkg/cmd/backuprestore/backupnoconfig.go index b19341720..aba534e7c 100644 --- a/pkg/cmd/backuprestore/backupnoconfig.go +++ b/pkg/cmd/backuprestore/backupnoconfig.go @@ -3,21 +3,21 @@ package backuprestore import ( "context" "fmt" - "github.com/google/uuid" "io" "slices" - gcron "github.com/go-co-op/gocron/v2" backupv1alpha1 "github.com/openshift/api/config/v1alpha1" backupv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" - prune "github.com/openshift/cluster-etcd-operator/pkg/cmd/prune-backups" - - "github.com/spf13/cobra" - "github.com/spf13/pflag" + prune_backups "github.com/openshift/cluster-etcd-operator/pkg/cmd/prune-backups" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" + + gcron "github.com/go-co-op/gocron/v2" + "github.com/google/uuid" + "github.com/spf13/cobra" + "github.com/spf13/pflag" ) type backupNoConfig struct { @@ -163,27 +163,15 @@ func (b *backupNoConfig) scheduleBackup() error { } func (b *backupNoConfig) pruneBackups() error { - switch b.retention.RetentionType { - case prune.RetentionTypeNone: - klog.Info("no retention policy specified") - return nil - case prune.RetentionTypeNumber: - if b.retention.RetentionNumber == nil { - err := fmt.Errorf("retention policy RetentionTypeNumber requires RetentionNumberConfig") - klog.Error(err) - return err - } - return prune.Retain(b.retention) - case prune.RetentionTypeSize: - if b.retention.RetentionSize == nil { - err := fmt.Errorf("retention policy RetentionTypeSize requires RetentionSizeConfig") - klog.Error(err) - return err - } - return prune.Retain(b.retention) - default: - err := fmt.Errorf("illegal retention policy type: [%v]", b.retention.RetentionType) - klog.Error(err) + opts := &prune_backups.PruneOpts{ + RetentionType: string(b.retention.RetentionType), + MaxNumberOfBackups: b.retention.RetentionNumber.MaxNumberOfBackups, + MaxSizeOfBackupsGb: b.retention.RetentionSize.MaxSizeOfBackupsGb, + } + + if err := opts.Validate(); err != nil { return err } + + return opts.Run() } diff --git a/pkg/cmd/prune-backups/prune.go b/pkg/cmd/prune-backups/prune.go index 154ab2b99..a7e3a2e6a 100644 --- a/pkg/cmd/prune-backups/prune.go +++ b/pkg/cmd/prune-backups/prune.go @@ -30,14 +30,14 @@ type backupDirStat struct { modTime time.Time } -type pruneOpts struct { - retentionType string - maxNumberOfBackups int - maxSizeOfBackupsGb int +type PruneOpts struct { + RetentionType string + MaxNumberOfBackups int + MaxSizeOfBackupsGb int } func NewPruneCommand() *cobra.Command { - opts := pruneOpts{retentionType: "None"} + opts := PruneOpts{RetentionType: "None"} cmd := &cobra.Command{ Use: "prune-backups", Short: "Prunes existing backups on the filesystem.", @@ -57,12 +57,12 @@ func NewPruneCommand() *cobra.Command { return cmd } -func (r *pruneOpts) AddFlags(cmd *cobra.Command) { +func (r *PruneOpts) AddFlags(cmd *cobra.Command) { flagSet := cmd.Flags() - flagSet.StringVar(&r.retentionType, "type", r.retentionType, "Which kind of retention to execute, can either be None, RetentionNumber or RetentionSize.") + flagSet.StringVar(&r.RetentionType, "type", r.RetentionType, "Which kind of retention to execute, can either be None, RetentionNumber or RetentionSize.") // the defaults are zero for validation, we inject the real defaults from the periodic backup controller - flagSet.IntVar(&r.maxNumberOfBackups, "maxNumberOfBackups", 0, "how many backups to keep when type=RetentionNumber") - flagSet.IntVar(&r.maxSizeOfBackupsGb, "maxSizeOfBackupsGb", 0, "how many gigabytes of backups to keep when type=RetentionSize") + flagSet.IntVar(&r.MaxNumberOfBackups, "MaxNumberOfBackups", 0, "how many backups to keep when type=RetentionNumber") + flagSet.IntVar(&r.MaxSizeOfBackupsGb, "MaxSizeOfBackupsGb", 0, "how many gigabytes of backups to keep when type=RetentionSize") // adding klog flags to tune verbosity better gfs := goflag.NewFlagSet("", goflag.ExitOnError) @@ -70,41 +70,41 @@ func (r *pruneOpts) AddFlags(cmd *cobra.Command) { cmd.Flags().AddGoFlagSet(gfs) } -func (r *pruneOpts) Validate() error { - if r.retentionType != RetentionTypeNone && r.retentionType != RetentionTypeNumber && r.retentionType != RetentionTypeSize { - return fmt.Errorf("unknown retention type: [%s]", r.retentionType) +func (r *PruneOpts) Validate() error { + if r.RetentionType != RetentionTypeNone && r.RetentionType != RetentionTypeNumber && r.RetentionType != RetentionTypeSize { + return fmt.Errorf("unknown retention type: [%s]", r.RetentionType) } - if r.retentionType == RetentionTypeNumber { - if r.maxNumberOfBackups < 1 { - return fmt.Errorf("unexpected amount of backups [%d] found, expected at least 1", r.maxNumberOfBackups) + if r.RetentionType == RetentionTypeNumber { + if r.MaxNumberOfBackups < 1 { + return fmt.Errorf("unexpected amount of backups [%d] found, expected at least 1", r.MaxNumberOfBackups) } - if r.maxSizeOfBackupsGb != 0 { - return fmt.Errorf("unexpected argument [maxSizeOfBackupsGb] found while using %s", RetentionTypeNumber) + if r.MaxSizeOfBackupsGb != 0 { + return fmt.Errorf("unexpected argument [MaxSizeOfBackupsGb] found while using %s", RetentionTypeNumber) } - } else if r.retentionType == RetentionTypeSize { - if r.maxSizeOfBackupsGb < 1 { - return fmt.Errorf("unexpected size of backups [%d]gb found, expected at least 1", r.maxSizeOfBackupsGb) + } else if r.RetentionType == RetentionTypeSize { + if r.MaxSizeOfBackupsGb < 1 { + return fmt.Errorf("unexpected size of backups [%d]gb found, expected at least 1", r.MaxSizeOfBackupsGb) } - if r.maxNumberOfBackups != 0 { - return fmt.Errorf("unexpected argument [maxNumberOfBackups] found while using %s", RetentionTypeSize) + if r.MaxNumberOfBackups != 0 { + return fmt.Errorf("unexpected argument [MaxNumberOfBackups] found while using %s", RetentionTypeSize) } } return nil } -func (r *pruneOpts) Run() error { - if r.retentionType == RetentionTypeNone { +func (r *PruneOpts) Run() error { + if r.RetentionType == RetentionTypeNone { klog.Infof("nothing to do, retention type is none") return nil - } else if r.retentionType == RetentionTypeSize { - return retainBySizeGb(r.maxSizeOfBackupsGb) - } else if r.retentionType == RetentionTypeNumber { - return retainByNumber(r.maxNumberOfBackups) + } else if r.RetentionType == RetentionTypeSize { + return retainBySizeGb(r.MaxSizeOfBackupsGb) + } else if r.RetentionType == RetentionTypeNumber { + return retainByNumber(r.MaxNumberOfBackups) } return nil diff --git a/pkg/cmd/prune-backups/prune_test.go b/pkg/cmd/prune-backups/prune_test.go index ab1894b8a..54d24a459 100644 --- a/pkg/cmd/prune-backups/prune_test.go +++ b/pkg/cmd/prune-backups/prune_test.go @@ -13,21 +13,21 @@ import ( func TestCommandValidation(t *testing.T) { testCases := map[string]struct { - opts pruneOpts + opts PruneOpts expectedErr error }{ - "none happy": {opts: pruneOpts{retentionType: RetentionTypeNone}}, - "number happy": {opts: pruneOpts{retentionType: RetentionTypeNumber, maxNumberOfBackups: 1}}, - "number zero": {opts: pruneOpts{retentionType: RetentionTypeNumber, maxNumberOfBackups: 0}, + "none happy": {opts: PruneOpts{RetentionType: RetentionTypeNone}}, + "number happy": {opts: PruneOpts{RetentionType: RetentionTypeNumber, MaxNumberOfBackups: 1}}, + "number zero": {opts: PruneOpts{RetentionType: RetentionTypeNumber, MaxNumberOfBackups: 0}, expectedErr: fmt.Errorf("unexpected amount of backups [0] found, expected at least 1")}, - "number flipped": {opts: pruneOpts{retentionType: RetentionTypeNumber, maxNumberOfBackups: 2, maxSizeOfBackupsGb: 25}, - expectedErr: fmt.Errorf("unexpected argument [maxSizeOfBackupsGb] found while using RetentionNumber")}, + "number flipped": {opts: PruneOpts{RetentionType: RetentionTypeNumber, MaxNumberOfBackups: 2, MaxSizeOfBackupsGb: 25}, + expectedErr: fmt.Errorf("unexpected argument [MaxSizeOfBackupsGb] found while using RetentionNumber")}, - "size happy": {opts: pruneOpts{retentionType: RetentionTypeSize, maxSizeOfBackupsGb: 1}}, - "size zero": {opts: pruneOpts{retentionType: RetentionTypeSize, maxSizeOfBackupsGb: 0}, + "size happy": {opts: PruneOpts{RetentionType: RetentionTypeSize, MaxSizeOfBackupsGb: 1}}, + "size zero": {opts: PruneOpts{RetentionType: RetentionTypeSize, MaxSizeOfBackupsGb: 0}, expectedErr: fmt.Errorf("unexpected size of backups [0]gb found, expected at least 1")}, - "size flipped": {opts: pruneOpts{retentionType: RetentionTypeSize, maxSizeOfBackupsGb: 2, maxNumberOfBackups: 25}, - expectedErr: fmt.Errorf("unexpected argument [maxNumberOfBackups] found while using RetentionSize")}, + "size flipped": {opts: PruneOpts{RetentionType: RetentionTypeSize, MaxSizeOfBackupsGb: 2, MaxNumberOfBackups: 25}, + expectedErr: fmt.Errorf("unexpected argument [MaxNumberOfBackups] found while using RetentionSize")}, } for k, v := range testCases { From b288cc0003bd3dc79de2c9c138d8bc0516a081aa Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Wed, 10 Jul 2024 15:43:58 +0200 Subject: [PATCH 13/22] add subcmd --- cmd/cluster-etcd-operator/main.go | 1 + pkg/cmd/backuprestore/backupnoconfig.go | 14 +++++++++----- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/cmd/cluster-etcd-operator/main.go b/cmd/cluster-etcd-operator/main.go index d4a3763d5..852486bc9 100644 --- a/cmd/cluster-etcd-operator/main.go +++ b/cmd/cluster-etcd-operator/main.go @@ -75,6 +75,7 @@ func NewSSCSCommand(ctx context.Context) *cobra.Command { cmd.AddCommand(readyz.NewReadyzCommand()) cmd.AddCommand(prune_backups.NewPruneCommand()) cmd.AddCommand(requestbackup.NewRequestBackupCommand(ctx)) + cmd.AddCommand(backuprestore.NewBackupNoConfigCommand(os.Stderr)) return cmd } diff --git a/pkg/cmd/backuprestore/backupnoconfig.go b/pkg/cmd/backuprestore/backupnoconfig.go index aba534e7c..13b259a5b 100644 --- a/pkg/cmd/backuprestore/backupnoconfig.go +++ b/pkg/cmd/backuprestore/backupnoconfig.go @@ -8,9 +8,10 @@ import ( backupv1alpha1 "github.com/openshift/api/config/v1alpha1" backupv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" - prune_backups "github.com/openshift/cluster-etcd-operator/pkg/cmd/prune-backups" + prunebackups "github.com/openshift/cluster-etcd-operator/pkg/cmd/prune-backups" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" @@ -141,6 +142,8 @@ func (b *backupNoConfig) backup() error { } func (b *backupNoConfig) scheduleBackup() error { + var errs []error + if _, err := b.scheduler.NewJob( gcron.CronJob( b.schedule, @@ -150,20 +153,21 @@ func (b *backupNoConfig) scheduleBackup() error { gcron.WithEventListeners( gcron.AfterJobRuns( func(jobID uuid.UUID, jobName string) { - b.pruneBackups() + err := b.pruneBackups() + errs = append(errs, err) }, ), ), ); err != nil { - return err + errs = append(errs, err) } b.scheduler.Start() - return nil + return utilerrors.NewAggregate(errs) } func (b *backupNoConfig) pruneBackups() error { - opts := &prune_backups.PruneOpts{ + opts := &prunebackups.PruneOpts{ RetentionType: string(b.retention.RetentionType), MaxNumberOfBackups: b.retention.RetentionNumber.MaxNumberOfBackups, MaxSizeOfBackupsGb: b.retention.RetentionSize.MaxSizeOfBackupsGb, From c631a665abd160432f1c773ea780de3d98f75fbf Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Wed, 10 Jul 2024 16:01:03 +0200 Subject: [PATCH 14/22] bump go.mod --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 55ae5696e..7a3409cd3 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/go-bindata/go-bindata v3.1.2+incompatible github.com/go-co-op/gocron/v2 v2.8.0 github.com/google/go-cmp v0.6.0 + github.com/google/uuid v1.6.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/openshift/api v0.0.0-20240527133614-ba11c1587003 github.com/openshift/build-machinery-go v0.0.0-20240419090851-af9c868bcf52 @@ -71,7 +72,6 @@ require ( github.com/google/cel-go v0.17.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect From 19d40997431cf0a3e291f71e389c86c5fa27dceb Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Thu, 11 Jul 2024 15:32:25 +0200 Subject: [PATCH 15/22] bump go mod sum --- go.mod | 3 +-- go.sum | 6 ++---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 7a3409cd3..57bddc337 100644 --- a/go.mod +++ b/go.mod @@ -5,10 +5,10 @@ go 1.22.0 toolchain go1.22.1 require ( + github.com/adhocore/gronx v1.8.1 github.com/davecgh/go-spew v1.1.1 github.com/ghodss/yaml v1.0.0 github.com/go-bindata/go-bindata v3.1.2+incompatible - github.com/go-co-op/gocron/v2 v2.8.0 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 @@ -91,7 +91,6 @@ require ( github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect github.com/robfig/cron v1.2.0 // indirect - github.com/robfig/cron/v3 v3.0.1 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/soheilhy/cmux v0.1.5 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect diff --git a/go.sum b/go.sum index 0c04ef7b1..4de7e8ecd 100644 --- a/go.sum +++ b/go.sum @@ -21,6 +21,8 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/adhocore/gronx v1.8.1 h1:F2mLTG5sB11z7vplwD4iydz3YCEjstSfYmCrdSm3t6A= +github.com/adhocore/gronx v1.8.1/go.mod h1:7oUY1WAU8rEJWmAxXR2DN0JaO4gi9khSgKjiRypqteg= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -111,8 +113,6 @@ github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-bindata/go-bindata v3.1.2+incompatible h1:5vjJMVhowQdPzjE1LdxyFF7YFTXg5IgGVW4gBr5IbvE= github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= -github.com/go-co-op/gocron/v2 v2.8.0 h1:nyD8u9nsuTaJx9z+Fem5/U+bEnKHzPNYxhrwB5gyYvU= -github.com/go-co-op/gocron/v2 v2.8.0/go.mod h1:xY7bJxGazKam1cz04EebrlP4S9q4iWdiAylMGP3jY9w= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -356,8 +356,6 @@ github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPH github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= -github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= From 9987b565a83257b7a32a99f8186f409e9bed51c5 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Thu, 11 Jul 2024 15:32:52 +0200 Subject: [PATCH 16/22] use cron daemon --- pkg/cmd/backuprestore/backupnoconfig.go | 67 ++++++++++--------------- 1 file changed, 26 insertions(+), 41 deletions(-) diff --git a/pkg/cmd/backuprestore/backupnoconfig.go b/pkg/cmd/backuprestore/backupnoconfig.go index 13b259a5b..a1ff55c12 100644 --- a/pkg/cmd/backuprestore/backupnoconfig.go +++ b/pkg/cmd/backuprestore/backupnoconfig.go @@ -11,30 +11,28 @@ import ( prunebackups "github.com/openshift/cluster-etcd-operator/pkg/cmd/prune-backups" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" - gcron "github.com/go-co-op/gocron/v2" - "github.com/google/uuid" + "github.com/adhocore/gronx/pkg/tasker" "github.com/spf13/cobra" "github.com/spf13/pflag" ) type backupNoConfig struct { - kubeConfig string - snapshotExist bool - schedule string - retention backupv1alpha1.RetentionPolicy - scheduler gcron.Scheduler + kubeConfig string + schedule string + timeZone string + retention backupv1alpha1.RetentionPolicy + scheduler *tasker.Tasker backupOptions } func NewBackupNoConfigCommand(errOut io.Writer) *cobra.Command { backupNoConf := &backupNoConfig{ - snapshotExist: false, backupOptions: backupOptions{errOut: errOut}, } + cmd := &cobra.Command{ Use: "backup-server", Short: "Backs up a snapshot of etcd database and static pod resources without config", @@ -70,22 +68,27 @@ func (b *backupNoConfig) Run() error { return err } - b.scheduler, _ = gcron.NewScheduler( - gcron.WithLimitConcurrentJobs(1, gcron.LimitModeWait), - gcron.WithGlobalJobOptions( - gcron.WithLimitedRuns(1), - gcron.WithSingletonMode(gcron.LimitModeWait))) - defer func() { _ = b.scheduler.Shutdown() }() - if err = b.extractBackupSpecs(backupsClient); err != nil { return err } + b.scheduler = tasker.New(tasker.Option{ + Verbose: true, + Tz: b.timeZone, + }) + err = b.scheduleBackup() if err != nil { return err } + doneCh := make(chan struct{}) + go func() { + b.scheduler.Run() + doneCh <- struct{}{} + }() + + <-doneCh return nil } @@ -133,37 +136,19 @@ func (b *backupNoConfig) extractBackupSpecs(backupsClient backupv1client.Backups defaultBackupCR := backups.Items[idx] b.schedule = defaultBackupCR.Spec.EtcdBackupSpec.Schedule b.retention = defaultBackupCR.Spec.EtcdBackupSpec.RetentionPolicy + b.timeZone = defaultBackupCR.Spec.EtcdBackupSpec.TimeZone return nil } -func (b *backupNoConfig) backup() error { - return backup(&b.backupOptions) -} - func (b *backupNoConfig) scheduleBackup() error { - var errs []error - - if _, err := b.scheduler.NewJob( - gcron.CronJob( - b.schedule, - false, - ), - gcron.NewTask(b.backup()), - gcron.WithEventListeners( - gcron.AfterJobRuns( - func(jobID uuid.UUID, jobName string) { - err := b.pruneBackups() - errs = append(errs, err) - }, - ), - ), - ); err != nil { - errs = append(errs, err) - } + var err error + b.scheduler.Task(b.schedule, func(ctx context.Context) (int, error) { + err = backup(&b.backupOptions) + return 0, err + }, false) - b.scheduler.Start() - return utilerrors.NewAggregate(errs) + return err } func (b *backupNoConfig) pruneBackups() error { From 26deca1e0281402fdbd4b43030886a0142870983 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Thu, 11 Jul 2024 15:41:43 +0200 Subject: [PATCH 17/22] add vendor --- go.mod | 2 +- .../github.com/adhocore/gronx/.editorconfig | 13 + vendor/github.com/adhocore/gronx/.gitignore | 11 + .../github.com/adhocore/gronx/.goreleaser.yml | 67 ++ vendor/github.com/adhocore/gronx/CHANGELOG.md | 119 ++ .../gocron/v2 => adhocore/gronx}/LICENSE | 2 +- vendor/github.com/adhocore/gronx/README.md | 328 ++++++ vendor/github.com/adhocore/gronx/VERSION | 1 + vendor/github.com/adhocore/gronx/batch.go | 51 + vendor/github.com/adhocore/gronx/checker.go | 132 +++ vendor/github.com/adhocore/gronx/gronx.go | 178 +++ vendor/github.com/adhocore/gronx/next.go | 195 +++ .../adhocore/gronx/pkg/tasker/README.md | 169 +++ .../adhocore/gronx/pkg/tasker/parser.go | 107 ++ .../adhocore/gronx/pkg/tasker/tasker.go | 417 +++++++ vendor/github.com/adhocore/gronx/prev.go | 57 + vendor/github.com/adhocore/gronx/validator.go | 147 +++ .../github.com/go-co-op/gocron/v2/.gitignore | 20 - .../go-co-op/gocron/v2/.golangci.yaml | 49 - .../gocron/v2/.pre-commit-config.yaml | 24 - .../go-co-op/gocron/v2/CODE_OF_CONDUCT.md | 73 -- .../go-co-op/gocron/v2/CONTRIBUTING.md | 38 - vendor/github.com/go-co-op/gocron/v2/Makefile | 22 - .../github.com/go-co-op/gocron/v2/README.md | 176 --- .../github.com/go-co-op/gocron/v2/SECURITY.md | 16 - .../go-co-op/gocron/v2/distributed.go | 30 - .../github.com/go-co-op/gocron/v2/errors.go | 56 - .../github.com/go-co-op/gocron/v2/executor.go | 487 -------- vendor/github.com/go-co-op/gocron/v2/job.go | 1042 ----------------- .../github.com/go-co-op/gocron/v2/logger.go | 101 -- .../github.com/go-co-op/gocron/v2/monitor.go | 27 - .../go-co-op/gocron/v2/scheduler.go | 861 -------------- vendor/github.com/go-co-op/gocron/v2/util.go | 118 -- vendor/github.com/robfig/cron/v3/.gitignore | 22 - vendor/github.com/robfig/cron/v3/.travis.yml | 1 - vendor/github.com/robfig/cron/v3/LICENSE | 21 - vendor/github.com/robfig/cron/v3/README.md | 125 -- vendor/github.com/robfig/cron/v3/chain.go | 92 -- .../robfig/cron/v3/constantdelay.go | 27 - vendor/github.com/robfig/cron/v3/cron.go | 355 ------ vendor/github.com/robfig/cron/v3/doc.go | 231 ---- vendor/github.com/robfig/cron/v3/logger.go | 86 -- vendor/github.com/robfig/cron/v3/option.go | 45 - vendor/github.com/robfig/cron/v3/parser.go | 434 ------- vendor/github.com/robfig/cron/v3/spec.go | 188 --- vendor/golang.org/x/exp/maps/maps.go | 94 -- vendor/modules.txt | 11 +- 47 files changed, 1998 insertions(+), 4870 deletions(-) create mode 100644 vendor/github.com/adhocore/gronx/.editorconfig create mode 100644 vendor/github.com/adhocore/gronx/.gitignore create mode 100644 vendor/github.com/adhocore/gronx/.goreleaser.yml create mode 100644 vendor/github.com/adhocore/gronx/CHANGELOG.md rename vendor/github.com/{go-co-op/gocron/v2 => adhocore/gronx}/LICENSE (96%) create mode 100644 vendor/github.com/adhocore/gronx/README.md create mode 100644 vendor/github.com/adhocore/gronx/VERSION create mode 100644 vendor/github.com/adhocore/gronx/batch.go create mode 100644 vendor/github.com/adhocore/gronx/checker.go create mode 100644 vendor/github.com/adhocore/gronx/gronx.go create mode 100644 vendor/github.com/adhocore/gronx/next.go create mode 100644 vendor/github.com/adhocore/gronx/pkg/tasker/README.md create mode 100644 vendor/github.com/adhocore/gronx/pkg/tasker/parser.go create mode 100644 vendor/github.com/adhocore/gronx/pkg/tasker/tasker.go create mode 100644 vendor/github.com/adhocore/gronx/prev.go create mode 100644 vendor/github.com/adhocore/gronx/validator.go delete mode 100644 vendor/github.com/go-co-op/gocron/v2/.gitignore delete mode 100644 vendor/github.com/go-co-op/gocron/v2/.golangci.yaml delete mode 100644 vendor/github.com/go-co-op/gocron/v2/.pre-commit-config.yaml delete mode 100644 vendor/github.com/go-co-op/gocron/v2/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-co-op/gocron/v2/CONTRIBUTING.md delete mode 100644 vendor/github.com/go-co-op/gocron/v2/Makefile delete mode 100644 vendor/github.com/go-co-op/gocron/v2/README.md delete mode 100644 vendor/github.com/go-co-op/gocron/v2/SECURITY.md delete mode 100644 vendor/github.com/go-co-op/gocron/v2/distributed.go delete mode 100644 vendor/github.com/go-co-op/gocron/v2/errors.go delete mode 100644 vendor/github.com/go-co-op/gocron/v2/executor.go delete mode 100644 vendor/github.com/go-co-op/gocron/v2/job.go delete mode 100644 vendor/github.com/go-co-op/gocron/v2/logger.go delete mode 100644 vendor/github.com/go-co-op/gocron/v2/monitor.go delete mode 100644 vendor/github.com/go-co-op/gocron/v2/scheduler.go delete mode 100644 vendor/github.com/go-co-op/gocron/v2/util.go delete mode 100644 vendor/github.com/robfig/cron/v3/.gitignore delete mode 100644 vendor/github.com/robfig/cron/v3/.travis.yml delete mode 100644 vendor/github.com/robfig/cron/v3/LICENSE delete mode 100644 vendor/github.com/robfig/cron/v3/README.md delete mode 100644 vendor/github.com/robfig/cron/v3/chain.go delete mode 100644 vendor/github.com/robfig/cron/v3/constantdelay.go delete mode 100644 vendor/github.com/robfig/cron/v3/cron.go delete mode 100644 vendor/github.com/robfig/cron/v3/doc.go delete mode 100644 vendor/github.com/robfig/cron/v3/logger.go delete mode 100644 vendor/github.com/robfig/cron/v3/option.go delete mode 100644 vendor/github.com/robfig/cron/v3/parser.go delete mode 100644 vendor/github.com/robfig/cron/v3/spec.go delete mode 100644 vendor/golang.org/x/exp/maps/maps.go diff --git a/go.mod b/go.mod index 57bddc337..39cdc67e6 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,6 @@ require ( github.com/ghodss/yaml v1.0.0 github.com/go-bindata/go-bindata v3.1.2+incompatible github.com/google/go-cmp v0.6.0 - github.com/google/uuid v1.6.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/openshift/api v0.0.0-20240527133614-ba11c1587003 github.com/openshift/build-machinery-go v0.0.0-20240419090851-af9c868bcf52 @@ -72,6 +71,7 @@ require ( github.com/google/cel-go v0.17.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect diff --git a/vendor/github.com/adhocore/gronx/.editorconfig b/vendor/github.com/adhocore/gronx/.editorconfig new file mode 100644 index 000000000..e0e124aee --- /dev/null +++ b/vendor/github.com/adhocore/gronx/.editorconfig @@ -0,0 +1,13 @@ +root = true + +[*] +indent_style = space +indent_size = 4 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[*.go] +indent_style = tab +tab_width = 2 diff --git a/vendor/github.com/adhocore/gronx/.gitignore b/vendor/github.com/adhocore/gronx/.gitignore new file mode 100644 index 000000000..12ae865c8 --- /dev/null +++ b/vendor/github.com/adhocore/gronx/.gitignore @@ -0,0 +1,11 @@ +.idea/ +.DS_Store +*~ +*.out +vendor/ +dist/ +.env +bin/ +*.php +test/*.go +*.txt diff --git a/vendor/github.com/adhocore/gronx/.goreleaser.yml b/vendor/github.com/adhocore/gronx/.goreleaser.yml new file mode 100644 index 000000000..b52f6d997 --- /dev/null +++ b/vendor/github.com/adhocore/gronx/.goreleaser.yml @@ -0,0 +1,67 @@ +project_name: tasker + +release: + prerelease: auto + name_template: "Version v{{.Version}}" + # draft: true + mode: "keep-existing" + +before: + hooks: + - go mod tidy + +builds: + - <<: &build_defaults + binary: bin/tasker + main: ./cmd/tasker + ldflags: + - -X main.Version={{.Version}} + env: + - CGO_ENABLED=0 + id: macOS + goos: [darwin] + goarch: [amd64, arm64] + + - <<: *build_defaults + id: linux + goos: [linux] + goarch: [386, arm, amd64, arm64] + + - <<: *build_defaults + id: windows + goos: [windows] + goarch: [amd64] + +archives: + - id: nix + builds: [macOS, linux] + <<: &archive_defaults + name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + wrap_in_directory: true + rlcp: true + format: tar.gz + files: + - LICENSE + + - id: windows + builds: [windows] + <<: *archive_defaults + wrap_in_directory: false + format: zip + files: + - LICENSE + +checksum: + name_template: 'checksums.txt' + algorithm: sha256 + +changelog: + skip: true + use: github + sort: desc + filters: + exclude: + - '^doc:' + - '^dev:' + - '^build:' + - '^ci:' diff --git a/vendor/github.com/adhocore/gronx/CHANGELOG.md b/vendor/github.com/adhocore/gronx/CHANGELOG.md new file mode 100644 index 000000000..9224d685e --- /dev/null +++ b/vendor/github.com/adhocore/gronx/CHANGELOG.md @@ -0,0 +1,119 @@ +## [v0.2.7](https://github.com/adhocore/gronx/releases/tag/v0.2.7) (2022-06-28) + +### Miscellaneous +- **Workflow**: Run tests on 1.18x (Jitendra) +- Tests for go v1.17.x, add codecov (Jitendra) + + +## [v0.2.6](https://github.com/adhocore/gronx/releases/tag/v0.2.6) (2021-10-14) + +### Miscellaneous +- Fix 'with' languages (Jitendra Adhikari) [_a813b55_](https://github.com/adhocore/gronx/commit/a813b55) +- Init/setup github codeql (Jitendra Adhikari) [_fe2aa5a_](https://github.com/adhocore/gronx/commit/fe2aa5a) + + +## [v0.2.5](https://github.com/adhocore/gronx/releases/tag/v0.2.5) (2021-07-25) + +### Bug Fixes +- **Tasker**: The clause should be using OR (Jitendra Adhikari) [_b813b85_](https://github.com/adhocore/gronx/commit/b813b85) + + +## [v0.2.4](https://github.com/adhocore/gronx/releases/tag/v0.2.4) (2021-05-05) + +### Features +- **Pkg.tasker**: Capture cmd output in tasker logger, error in stderr (Jitendra Adhikari) [_0da0aae_](https://github.com/adhocore/gronx/commit/0da0aae) + +### Internal Refactors +- **Cmd.tasker**: Taskify is now method of tasker (Jitendra Adhikari) [_8b1373b_](https://github.com/adhocore/gronx/commit/8b1373b) + + +## [v0.2.3](https://github.com/adhocore/gronx/releases/tag/v0.2.3) (2021-05-04) + +### Bug Fixes +- **Pkg.tasker**: Sleep 100ms so abort can be bailed asap, remove dup msg (Jitendra Adhikari) [_d868920_](https://github.com/adhocore/gronx/commit/d868920) + +### Miscellaneous +- Allow leeway period at the end (Jitendra Adhikari) [_5ebf923_](https://github.com/adhocore/gronx/commit/5ebf923) + + +## [v0.2.2](https://github.com/adhocore/gronx/releases/tag/v0.2.2) (2021-05-03) + +### Bug Fixes +- **Pkg.tasker**: DoRun checks if timed out before run (Jitendra Adhikari) [_f27a657_](https://github.com/adhocore/gronx/commit/f27a657) + +### Internal Refactors +- **Pkg.tasker**: Use dateFormat var, update final tick phrase (Jitendra Adhikari) [_fad0271_](https://github.com/adhocore/gronx/commit/fad0271) + + +## [v0.2.1](https://github.com/adhocore/gronx/releases/tag/v0.2.1) (2021-05-02) + +### Bug Fixes +- **Pkg.tasker**: Deprecate sleep dur if next tick timeout (Jitendra Adhikari) [_3de45a1_](https://github.com/adhocore/gronx/commit/3de45a1) + + +## [v0.2.0](https://github.com/adhocore/gronx/releases/tag/v0.2.0) (2021-05-02) + +### Features +- **Cmd.tasker**: Add tasker for standalone usage as task daemon (Jitendra Adhikari) [_0d99409_](https://github.com/adhocore/gronx/commit/0d99409) +- **Pkg.tasker**: Add parser for tasker pkg (Jitendra Adhikari) [_e7f1811_](https://github.com/adhocore/gronx/commit/e7f1811) +- **Pkg.tasker**: Add tasker pkg (Jitendra Adhikari) [_a57b1c4_](https://github.com/adhocore/gronx/commit/a57b1c4) + +### Bug Fixes +- **Pkg.tasker**: Use log.New() instead (Jitendra Adhikari) [_0cf2c07_](https://github.com/adhocore/gronx/commit/0cf2c07) +- **Validator**: This check is not really required (Jitendra Adhikari) [_c3d75e3_](https://github.com/adhocore/gronx/commit/c3d75e3) + +### Internal Refactors +- **Gronx**: Add public methods for internal usage, expose spaceRe (Jitendra Adhikari) [_94eb20b_](https://github.com/adhocore/gronx/commit/94eb20b) + +### Miscellaneous +- **Pkg.tasker**: Use file perms as octal (Jitendra Adhikari) [_83f258d_](https://github.com/adhocore/gronx/commit/83f258d) +- **Workflow**: Include all tests in action (Jitendra Adhikari) [_7328cbf_](https://github.com/adhocore/gronx/commit/7328cbf) + +### Documentations +- Add task mangager and tasker docs/usages (Jitendra Adhikari) [_e77aa5f_](https://github.com/adhocore/gronx/commit/e77aa5f) + + +## [v0.1.4](https://github.com/adhocore/gronx/releases/tag/v0.1.4) (2021-04-25) + +### Miscellaneous +- **Mod**: 1.13 is okay too (Jitendra Adhikari) [_6c328e7_](https://github.com/adhocore/gronx/commit/6c328e7) +- Try go 1.13.x (Jitendra Adhikari) [_b017ec4_](https://github.com/adhocore/gronx/commit/b017ec4) + +### Documentations +- Practical usage (Jitendra Adhikari) [_9572e61_](https://github.com/adhocore/gronx/commit/9572e61) + + +## [v0.1.3](https://github.com/adhocore/gronx/releases/tag/v0.1.3) (2021-04-22) + +### Internal Refactors +- **Checker**: Preserve error, for pos 2 & 4 bail only on due or err (Jitendra Adhikari) [_39a9cd5_](https://github.com/adhocore/gronx/commit/39a9cd5) +- **Validator**: Do not discard error from strconv (Jitendra Adhikari) [_3b0f444_](https://github.com/adhocore/gronx/commit/3b0f444) + + +## [v0.1.2](https://github.com/adhocore/gronx/releases/tag/v0.1.2) (2021-04-21) + +### Features +- Add IsValid() (Jitendra Adhikari) [_150687b_](https://github.com/adhocore/gronx/commit/150687b) + +### Documentations +- IsValid usage (Jitendra Adhikari) [_b747116_](https://github.com/adhocore/gronx/commit/b747116) + + +## [v0.1.1](https://github.com/adhocore/gronx/releases/tag/v0.1.1) (2021-04-21) + +### Features +- Add main gronx api (Jitendra Adhikari) [_1b3b108_](https://github.com/adhocore/gronx/commit/1b3b108) +- Add cron segment checker (Jitendra Adhikari) [_a56be7c_](https://github.com/adhocore/gronx/commit/a56be7c) +- Add validator (Jitendra Adhikari) [_455a024_](https://github.com/adhocore/gronx/commit/455a024) + +### Miscellaneous +- **Workflow**: Update actions (Jitendra Adhikari) [_8b54cc3_](https://github.com/adhocore/gronx/commit/8b54cc3) +- Init module (Jitendra Adhikari) [_bada37d_](https://github.com/adhocore/gronx/commit/bada37d) +- Add license (Jitendra Adhikari) [_5f20b96_](https://github.com/adhocore/gronx/commit/5f20b96) +- **Gh**: Add meta files (Jitendra Adhikari) [_35a1310_](https://github.com/adhocore/gronx/commit/35a1310) +- **Workflow**: Add lint/test actions (Jitendra Adhikari) [_884d5cb_](https://github.com/adhocore/gronx/commit/884d5cb) +- Add editorconfig (Jitendra Adhikari) [_8b75494_](https://github.com/adhocore/gronx/commit/8b75494) + +### Documentations +- On cron expressions (Jitendra Adhikari) [_547fd72_](https://github.com/adhocore/gronx/commit/547fd72) +- Add readme (Jitendra Adhikari) [_3955e88_](https://github.com/adhocore/gronx/commit/3955e88) diff --git a/vendor/github.com/go-co-op/gocron/v2/LICENSE b/vendor/github.com/adhocore/gronx/LICENSE similarity index 96% rename from vendor/github.com/go-co-op/gocron/v2/LICENSE rename to vendor/github.com/adhocore/gronx/LICENSE index 3357d57d7..f11487644 100644 --- a/vendor/github.com/go-co-op/gocron/v2/LICENSE +++ b/vendor/github.com/adhocore/gronx/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2014, 辣椒面 +Copyright (c) 2021-2099 Jitendra Adhikari Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/adhocore/gronx/README.md b/vendor/github.com/adhocore/gronx/README.md new file mode 100644 index 000000000..0b3b78f5c --- /dev/null +++ b/vendor/github.com/adhocore/gronx/README.md @@ -0,0 +1,328 @@ +# adhocore/gronx + +[![Latest Version](https://img.shields.io/github/release/adhocore/gronx.svg?style=flat-square)](https://github.com/adhocore/gronx/releases) +[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg?style=flat-square)](LICENSE) +[![Go Report](https://goreportcard.com/badge/github.com/adhocore/gronx)](https://goreportcard.com/report/github.com/adhocore/gronx) +[![Test](https://github.com/adhocore/gronx/actions/workflows/test-action.yml/badge.svg)](https://github.com/adhocore/gronx/actions/workflows/test-action.yml) +[![Lint](https://github.com/adhocore/gronx/actions/workflows/lint-action.yml/badge.svg)](https://github.com/adhocore/gronx/actions/workflows/lint-action.yml) +[![Codecov](https://img.shields.io/codecov/c/github/adhocore/gronx/main.svg?style=flat-square)](https://codecov.io/gh/adhocore/gronx) +[![Support](https://img.shields.io/static/v1?label=Support&message=%E2%9D%A4&logo=GitHub)](https://github.com/sponsors/adhocore) +[![Tweet](https://img.shields.io/twitter/url/http/shields.io.svg?style=social)](https://twitter.com/intent/tweet?text=Lightweight+fast+and+deps+free+cron+expression+parser+for+Golang&url=https://github.com/adhocore/gronx&hashtags=go,golang,parser,cron,cronexpr,cronparser) + +`gronx` is Golang [cron expression](#cron-expression) parser ported from [adhocore/cron-expr](https://github.com/adhocore/php-cron-expr) with task runner +and daemon that supports crontab like task list file. Use it programatically in Golang or as standalone binary instead of crond. If that's not enough, you can use gronx to find the next (`NextTick()`) or previous (`PrevTick()`) run time of an expression from any arbitrary point of time. + +- Zero dependency. +- Very **fast** because it bails early in case a segment doesn't match. +- Built in crontab like daemon. +- Supports time granularity of Seconds. + +Find gronx in [pkg.go.dev](https://pkg.go.dev/github.com/adhocore/gronx). + +## Installation + +```sh +go get -u github.com/adhocore/gronx +``` + +## Usage + +```go +import ( + "time" + + "github.com/adhocore/gronx" +) + +gron := gronx.New() +expr := "* * * * *" + +// check if expr is even valid, returns bool +gron.IsValid(expr) // true + +// check if expr is due for current time, returns bool and error +gron.IsDue(expr) // true|false, nil + +// check if expr is due for given time +gron.IsDue(expr, time.Date(2021, time.April, 1, 1, 1, 0, 0, time.UTC)) // true|false, nil +``` + +### Batch Due Check + +If you have multiple cron expressions to check due on same reference time use `BatchDue()`: +```go +gron := gronx.New() +exprs := []string{"* * * * *", "0 */5 * * * *"} + +// gives []gronx.Expr{} array, each item has Due flag and Err enountered. +dues := gron.BatchDue(exprs) + +for _, expr := range dues { + if expr.Err != nil { + // Handle err + } else if expr.Due { + // Handle due + } +} + +// Or with given time +ref := time.Now() +gron.BatchDue(exprs, ref) +``` + +### Next Tick + +To find out when is the cron due next (in near future): +```go +allowCurrent = true // includes current time as well +nextTime, err := gronx.NextTick(expr, allowCurrent) // gives time.Time, error + +// OR, next tick after certain reference time +refTime = time.Date(2022, time.November, 1, 1, 1, 0, 0, time.UTC) +allowCurrent = false // excludes the ref time +nextTime, err := gronx.NextTickAfter(expr, refTime, allowCurrent) // gives time.Time, error +``` + +### Prev Tick + +To find out when was the cron due previously (in near past): +```go +allowCurrent = true // includes current time as well +prevTime, err := gronx.PrevTick(expr, allowCurrent) // gives time.Time, error + +// OR, prev tick before certain reference time +refTime = time.Date(2022, time.November, 1, 1, 1, 0, 0, time.UTC) +allowCurrent = false // excludes the ref time +nextTime, err := gronx.PrevTickBefore(expr, refTime, allowCurrent) // gives time.Time, error +``` + +> The working of `PrevTick*()` and `NextTick*()` are mostly the same except the direction. +> They differ in lookback or lookahead. + +### Standalone Daemon + +In a more practical level, you would use this tool to manage and invoke jobs in app itself and not +mess around with `crontab` for each and every new tasks/jobs. + +In crontab just put one entry with `* * * * *` which points to your Go entry point that uses this tool. +Then in that entry point you would invoke different tasks if the corresponding Cron expr is due. +Simple map structure would work for this. + +Check the section below for more sophisticated way of managing tasks automatically using `gronx` daemon called `tasker`. + +--- +### Go Tasker + +Tasker is a task manager that can be programatically used in Golang applications. It runs as a daemon and invokes tasks scheduled with cron expression: +```go +package main + +import ( + "context" + "time" + + "github.com/adhocore/gronx/pkg/tasker" +) + +func main() { + taskr := tasker.New(tasker.Option{ + Verbose: true, + // optional: defaults to local + Tz: "Asia/Bangkok", + // optional: defaults to stderr log stream + Out: "/full/path/to/output-file", + }) + + // add task to run every minute + taskr.Task("* * * * *", func(ctx context.Context) (int, error) { + // do something ... + + // then return exit code and error, for eg: if everything okay + return 0, nil + }).Task("*/5 * * * *", func(ctx context.Context) (int, error) { // every 5 minutes + // you can also log the output to Out file as configured in Option above: + taskr.Log.Printf("done something in %d s", 2) + + return 0, nil + }) + + // run task without overlap, set concurrent flag to false: + concurrent := false + taskr.Task("* * * * * *", , tasker.Taskify("sleep 2", tasker.Option{}), concurrent) + + // every 10 minute with arbitrary command + taskr.Task("@10minutes", taskr.Taskify("command --option val -- args", tasker.Option{Shell: "/bin/sh -c"})) + + // ... add more tasks + + // optionally if you want tasker to stop after 2 hour, pass the duration with Until(): + taskr.Until(2 * time.Hour) + + // finally run the tasker, it ticks sharply on every minute and runs all the tasks due on that time! + // it exits gracefully when ctrl+c is received making sure pending tasks are completed. + taskr.Run() +} +``` + +#### Concurrency + +By default the tasks can run concurrently i.e if previous run is still not finished +but it is now due again, it will run again. +If you want to run only one instance of a task at a time, set concurrent flag to false: + +```go +taskr := tasker.New(tasker.Option{}) + +concurrent := false +expr, task := "* * * * * *", tasker.Taskify("php -r 'sleep(2);'") +taskr.Task(expr, task, concurrent) +``` + +### Task Daemon + +It can also be used as standalone task daemon instead of programmatic usage for Golang application. + +First, just install tasker command: +```sh +go install github.com/adhocore/gronx/cmd/tasker@latest +``` + +Or you can also download latest prebuilt binary from [release](https://github.com/adhocore/gronx/releases/latest) for platform of your choice. + +Then prepare a taskfile ([example](./tests/../test/taskfile.txt)) in crontab format +(or can even point to existing crontab). +> `user` is not supported: it is just cron expr followed by the command. + +Finally run the task daemon like so +``` +tasker -file path/to/taskfile +``` +> You can pass more options to control the behavior of task daemon, see below. + +#### Tasker command options: + +```txt +-file string + The task file in crontab format +-out string + The fullpath to file where output from tasks are sent to +-shell string + The shell to use for running tasks (default "/usr/bin/bash") +-tz string + The timezone to use for tasks (default "Local") +-until int + The timeout for task daemon in minutes +-verbose + The verbose mode outputs as much as possible +``` + +Examples: +```sh +tasker -verbose -file path/to/taskfile -until 120 # run until next 120min (i.e 2hour) with all feedbacks echoed back +tasker -verbose -file path/to/taskfile -out path/to/output # with all feedbacks echoed to the output file +tasker -tz America/New_York -file path/to/taskfile -shell zsh # run all tasks using zsh shell based on NY timezone +``` + +> File extension of taskfile for (`-file` option) does not matter: can be any or none. +> The directory for outfile (`-out` option) must exist, file is created by task daemon. + +> Same timezone applies for all tasks currently and it might support overriding timezone per task in future release. + +#### Notes on Windows + +In Windows if it doesn't find `bash.exe` or `git-bash.exe` it will use `powershell`. +`powershell` may not be compatible with Unix flavored commands. Also to note: +you can't do chaining with `cmd1 && cmd2` but rather `cmd1 ; cmd2`. + +--- +### Cron Expression + +A complete cron expression consists of 7 segments viz: +``` + +``` + +However only 5 will do and this is most commonly used. 5 segments are interpreted as: +``` + +``` +in which case a default value of 0 is prepended for `` position. + +In a 6 segments expression, if 6th segment matches `` (i.e 4 digits at least) it will be interpreted as: +``` + +``` +and a default value of 0 is prepended for `` position. + +For each segments you can have **multiple choices** separated by comma: +> Eg: `0 0,30 * * * *` means either 0th or 30th minute. + +To specify **range of values** you can use dash: +> Eg: `0 10-15 * * * *` means 10th, 11th, 12th, 13th, 14th and 15th minute. + +To specify **range of step** you can combine a dash and slash: +> Eg: `0 10-15/2 * * * *` means every 2 minutes between 10 and 15 i.e 10th, 12th and 14th minute. + +For the `` and `` segment, there are additional [**modifiers**](#modifiers) (optional). + +And if you want, you can mix the multiple choices, ranges and steps in a single expression: +> `0 5,12-20/4,55 * * * *` matches if any one of `5` or `12-20/4` or `55` matches the minute. + +### Real Abbreviations + +You can use real abbreviations (3 chars) for month and week days. eg: `JAN`, `dec`, `fri`, `SUN` + +### Tags + +Following tags are available and they are converted to real cron expressions before parsing: + +- *@yearly* or *@annually* - every year +- *@monthly* - every month +- *@daily* - every day +- *@weekly* - every week +- *@hourly* - every hour +- *@5minutes* - every 5 minutes +- *@10minutes* - every 10 minutes +- *@15minutes* - every 15 minutes +- *@30minutes* - every 30 minutes +- *@always* - every minute +- *@everysecond* - every second + +> For BC reasons, `@always` still means every minute for now, in future release it may mean every seconds instead. + +```go +// Use tags like so: +gron.IsDue("@hourly") +gron.IsDue("@5minutes") +``` + +### Modifiers + +Following modifiers supported + +- *Day of Month / 3rd of 5 segments / 4th of 6+ segments:* + - `L` stands for last day of month (eg: `L` could mean 29th for February in leap year) + - `W` stands for closest week day (eg: `10W` is closest week days (MON-FRI) to 10th date) +- *Day of Week / 5th of 5 segments / 6th of 6+ segments:* + - `L` stands for last weekday of month (eg: `2L` is last tuesday) + - `#` stands for nth day of week in the month (eg: `1#2` is second monday) + +--- +## License + +> © [MIT](./LICENSE) | 2021-2099, Jitendra Adhikari + +## Credits + +This project is ported from [adhocore/cron-expr](https://github.com/adhocore/php-cron-expr) and +release managed by [please](https://github.com/adhocore/please). + +--- +### Other projects + +My other golang projects you might find interesting and useful: + +- [**urlsh**](https://github.com/adhocore/urlsh) - URL shortener and bookmarker service with UI, API, Cache, Hits Counter and forwarder using postgres and redis in backend, bulma in frontend; has [web](https://urlssh.xyz) and cli client +- [**fast**](https://github.com/adhocore/fast) - Check your internet speed with ease and comfort right from the terminal +- [**goic**](https://github.com/adhocore/goic) - Go Open ID Connect, is OpenID connect client library for Golang, supports the Authorization Code Flow of OpenID Connect specification. +- [**chin**](https://github.com/adhocore/chin) - A Go lang command line tool to show a spinner as user waits for some long running jobs to finish. diff --git a/vendor/github.com/adhocore/gronx/VERSION b/vendor/github.com/adhocore/gronx/VERSION new file mode 100644 index 000000000..34707cbb1 --- /dev/null +++ b/vendor/github.com/adhocore/gronx/VERSION @@ -0,0 +1 @@ +v0.2.7 diff --git a/vendor/github.com/adhocore/gronx/batch.go b/vendor/github.com/adhocore/gronx/batch.go new file mode 100644 index 000000000..63d85ec2d --- /dev/null +++ b/vendor/github.com/adhocore/gronx/batch.go @@ -0,0 +1,51 @@ +package gronx + +import ( + "strings" + "time" +) + +// Expr represents an item in array for batch check +type Expr struct { + Expr string + Due bool + Err error +} + +// BatchDue checks if multiple expressions are due for given time (or now). +// It returns []Expr with filled in Due and Err values. +func (g *Gronx) BatchDue(exprs []string, ref ...time.Time) []Expr { + ref = append(ref, time.Now()) + g.C.SetRef(ref[0]) + + var segs []string + + cache, batch := map[string]Expr{}, make([]Expr, len(exprs)) + for i := range exprs { + batch[i].Expr = exprs[i] + segs, batch[i].Err = Segments(exprs[i]) + key := strings.Join(segs, " ") + if batch[i].Err != nil { + cache[key] = batch[i] + continue + } + + if c, ok := cache[key]; ok { + batch[i] = c + batch[i].Expr = exprs[i] + continue + } + + due := true + for pos, seg := range segs { + if seg != "*" && seg != "?" { + if due, batch[i].Err = g.C.CheckDue(seg, pos); !due || batch[i].Err != nil { + break + } + } + } + batch[i].Due = due + cache[key] = batch[i] + } + return batch +} diff --git a/vendor/github.com/adhocore/gronx/checker.go b/vendor/github.com/adhocore/gronx/checker.go new file mode 100644 index 000000000..8ce1d9d90 --- /dev/null +++ b/vendor/github.com/adhocore/gronx/checker.go @@ -0,0 +1,132 @@ +package gronx + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// Checker is interface for cron segment due check. +type Checker interface { + GetRef() time.Time + SetRef(ref time.Time) + CheckDue(segment string, pos int) (bool, error) +} + +// SegmentChecker is factory implementation of Checker. +type SegmentChecker struct { + ref time.Time +} + +// GetRef returns the current reference time +func (c *SegmentChecker) GetRef() time.Time { + return c.ref +} + +// SetRef sets the reference time for which to check if a cron expression is due. +func (c *SegmentChecker) SetRef(ref time.Time) { + c.ref = ref +} + +// CheckDue checks if the cron segment at given position is due. +// It returns bool or error if any. +func (c *SegmentChecker) CheckDue(segment string, pos int) (due bool, err error) { + ref, last := c.GetRef(), -1 + val, loc := valueByPos(ref, pos), ref.Location() + isMonthDay, isWeekDay := pos == 3, pos == 5 + + for _, offset := range strings.Split(segment, ",") { + mod := (isMonthDay || isWeekDay) && strings.ContainsAny(offset, "LW#") + if due, err = c.isOffsetDue(offset, val, pos); due || (!mod && err != nil) { + return + } + if !mod { + continue + } + if last == -1 { + last = time.Date(ref.Year(), ref.Month(), 1, 0, 0, 0, 0, loc).AddDate(0, 1, 0).Add(-time.Second).Day() + } + if isMonthDay { + due, err = isValidMonthDay(offset, last, ref) + } else if isWeekDay { + due, err = isValidWeekDay(offset, last, ref) + } + if due || err != nil { + return due, err + } + } + + return false, nil +} + +func (c *SegmentChecker) isOffsetDue(offset string, val, pos int) (bool, error) { + if offset == "*" || offset == "?" { + return true, nil + } + + bounds, isWeekDay := boundsByPos(pos), pos == 5 + if strings.Contains(offset, "/") { + return inStep(val, offset, bounds) + } + if strings.Contains(offset, "-") { + if isWeekDay { + offset = strings.Replace(offset, "7-", "0-", 1) + } + return inRange(val, offset, bounds) + } + + nval, err := strconv.Atoi(offset) + if err != nil { + return false, err + } + + if nval < bounds[0] || nval > bounds[1] { + return false, fmt.Errorf("segment#%d: '%s' out of bounds(%d, %d)", pos, offset, bounds[0], bounds[1]) + } + + if !isWeekDay && (val == 0 || nval == 0) { + return nval == 0 && val == 0, nil + } + + return nval == val || (isWeekDay && nval == 7 && val == 0), nil +} + +func valueByPos(ref time.Time, pos int) (val int) { + switch pos { + case 0: + val = ref.Second() + case 1: + val = ref.Minute() + case 2: + val = ref.Hour() + case 3: + val = ref.Day() + case 4: + val = int(ref.Month()) + case 5: + val = int(ref.Weekday()) + case 6: + val = ref.Year() + } + return +} + +func boundsByPos(pos int) (bounds []int) { + bounds = []int{0, 0} + switch pos { + case 0, 1: + bounds = []int{0, 59} + case 2: + bounds = []int{0, 23} + case 3: + bounds = []int{1, 31} + case 4: + bounds = []int{1, 12} + case 5: + bounds = []int{0, 7} + case 6: + bounds = []int{0, 9999} + } + return +} diff --git a/vendor/github.com/adhocore/gronx/gronx.go b/vendor/github.com/adhocore/gronx/gronx.go new file mode 100644 index 000000000..958d8fb11 --- /dev/null +++ b/vendor/github.com/adhocore/gronx/gronx.go @@ -0,0 +1,178 @@ +package gronx + +import ( + "errors" + "regexp" + "strings" + "time" +) + +var literals = strings.NewReplacer( + "SUN", "0", "MON", "1", "TUE", "2", "WED", "3", "THU", "4", "FRI", "5", "SAT", "6", + "JAN", "1", "FEB", "2", "MAR", "3", "APR", "4", "MAY", "5", "JUN", "6", "JUL", "7", + "AUG", "8", "SEP", "9", "OCT", "10", "NOV", "11", "DEC", "12", +) + +var expressions = map[string]string{ + "@yearly": "0 0 1 1 *", + "@annually": "0 0 1 1 *", + "@monthly": "0 0 1 * *", + "@weekly": "0 0 * * 0", + "@daily": "0 0 * * *", + "@hourly": "0 * * * *", + "@always": "* * * * *", + "@5minutes": "*/5 * * * *", + "@10minutes": "*/10 * * * *", + "@15minutes": "*/15 * * * *", + "@30minutes": "0,30 * * * *", + + "@everysecond": "* * * * * *", +} + +// AddTag adds a new custom tag representing given expr +func AddTag(tag, expr string) error { + _, ok := expressions[tag] + if ok { + return errors.New("conflict tag") + } + + segs, err := Segments(expr) + if err != nil { + return err + } + expr = strings.Join(segs, " ") + + expressions[tag] = expr + return nil +} + +// SpaceRe is regex for whitespace. +var SpaceRe = regexp.MustCompile(`\s+`) +var yearRe = regexp.MustCompile(`\d{4}`) + +func normalize(expr string) []string { + expr = strings.Trim(expr, " \t") + if e, ok := expressions[strings.ToLower(expr)]; ok { + expr = e + } + + expr = SpaceRe.ReplaceAllString(expr, " ") + expr = literals.Replace(strings.ToUpper(expr)) + + return strings.Split(strings.ReplaceAll(expr, " ", " "), " ") +} + +// Gronx is the main program. +type Gronx struct { + C Checker +} + +// New initializes Gronx with factory defaults. +func New() *Gronx { + return &Gronx{&SegmentChecker{}} +} + +// IsDue checks if cron expression is due for given reference time (or now). +// It returns bool or error if any. +func (g *Gronx) IsDue(expr string, ref ...time.Time) (bool, error) { + ref = append(ref, time.Now()) + g.C.SetRef(ref[0]) + + segs, err := Segments(expr) + if err != nil { + return false, err + } + + return g.SegmentsDue(segs) +} + +func (g *Gronx) isDue(expr string, ref time.Time) bool { + due, err := g.IsDue(expr, ref) + return err == nil && due +} + +// Segments splits expr into array array of cron parts. +// If expression contains 5 parts or 6th part is year like, it prepends a second. +// It returns array or error. +func Segments(expr string) ([]string, error) { + segs := normalize(expr) + slen := len(segs) + if slen < 5 || slen > 7 { + return []string{}, errors.New("expr should contain 5-7 segments separated by space") + } + + // Prepend second if required + prepend := slen == 5 || (slen == 6 && yearRe.MatchString(segs[5])) + if prepend { + segs = append([]string{"0"}, segs...) + } + + return segs, nil +} + +// SegmentsDue checks if all cron parts are due. +// It returns bool. You should use IsDue(expr) instead. +func (g *Gronx) SegmentsDue(segs []string) (bool, error) { + skipMonthDayCheck := false + for i := 0; i < len(segs); i++ { + pos := len(segs) - 1 - i + seg := segs[pos] + isMonthDay, isWeekday := pos == 3, pos == 5 + + if seg == "*" || seg == "?" { + continue + } + + if isMonthDay && skipMonthDayCheck { + continue + } + + if isWeekday { + monthDaySeg := segs[3] + intersect := strings.Index(seg, "*/") == 0 || strings.Index(monthDaySeg, "*") == 0 || monthDaySeg == "?" + + if !intersect { + due, err := g.C.CheckDue(seg, pos) + if err != nil { + return false, err + } + + monthDayDue, err := g.C.CheckDue(monthDaySeg, 3) + if due || monthDayDue { + skipMonthDayCheck = true + continue + } + + if err != nil { + return false, err + } + } + } + + if due, err := g.C.CheckDue(seg, pos); !due { + return due, err + } + } + + return true, nil +} + +// checker for validity +var checker = &SegmentChecker{ref: time.Now()} + +// IsValid checks if cron expression is valid. +// It returns bool. +func (g *Gronx) IsValid(expr string) bool { + segs, err := Segments(expr) + if err != nil { + return false + } + + for pos, seg := range segs { + if _, err := checker.CheckDue(seg, pos); err != nil { + return false + } + } + + return true +} diff --git a/vendor/github.com/adhocore/gronx/next.go b/vendor/github.com/adhocore/gronx/next.go new file mode 100644 index 000000000..3643374a5 --- /dev/null +++ b/vendor/github.com/adhocore/gronx/next.go @@ -0,0 +1,195 @@ +package gronx + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "time" +) + +// CronDateFormat is Y-m-d H:i (seconds are not significant) +const CronDateFormat = "2006-01-02 15:04" + +// FullDateFormat is Y-m-d H:i:s (with seconds) +const FullDateFormat = "2006-01-02 15:04:05" + +// NextTick gives next run time from now +func NextTick(expr string, inclRefTime bool) (time.Time, error) { + return NextTickAfter(expr, time.Now(), inclRefTime) +} + +// NextTickAfter gives next run time from the provided time.Time +func NextTickAfter(expr string, start time.Time, inclRefTime bool) (time.Time, error) { + gron, next := New(), start.Truncate(time.Second) + due, err := gron.IsDue(expr, start) + if err != nil || (due && inclRefTime) { + return start, err + } + + segments, _ := Segments(expr) + if len(segments) > 6 && isUnreachableYear(segments[6], next, inclRefTime, false) { + return next, fmt.Errorf("unreachable year segment: %s", segments[6]) + } + + next, err = loop(gron, segments, next, inclRefTime, false) + // Ignore superfluous err + if err != nil && gron.isDue(expr, next) { + err = nil + } + return next, err +} + +func loop(gron *Gronx, segments []string, start time.Time, incl bool, reverse bool) (next time.Time, err error) { + iter, next, bumped := 500, start, false +over: + for iter > 0 { + iter-- + skipMonthDayForIter := false + for i := 0; i < len(segments); i++ { + pos := len(segments) - 1 - i + seg := segments[pos] + isMonthDay, isWeekday := pos == 3, pos == 5 + + if seg == "*" || seg == "?" { + continue + } + + if !isWeekday { + if isMonthDay && skipMonthDayForIter { + continue + } + if next, bumped, err = bumpUntilDue(gron.C, seg, pos, next, reverse); bumped { + goto over + } + continue + } + // From here we process the weekday segment in case it is neither * nor ? + + monthDaySeg := segments[3] + intersect := strings.Index(seg, "*/") == 0 || strings.Index(monthDaySeg, "*") == 0 || monthDaySeg == "?" + + nextForWeekDay := next + nextForWeekDay, bumped, err = bumpUntilDue(gron.C, seg, pos, nextForWeekDay, reverse) + if !bumped { + // Weekday seg is specific and next is already at right weekday, so no need to process month day if union case + next = nextForWeekDay + if !intersect { + skipMonthDayForIter = true + } + continue + } + // Weekday was bumped, so we need to check for month day + + if intersect { + // We need intersection so we keep bumped weekday and go over + next = nextForWeekDay + goto over + } + // Month day seg is specific and a number/list/range, so we need to check and keep the closest to next + + nextForMonthDay := next + nextForMonthDay, bumped, err = bumpUntilDue(gron.C, monthDaySeg, 3, nextForMonthDay, reverse) + + monthDayIsClosestToNextThanWeekDay := reverse && nextForMonthDay.After(nextForWeekDay) || + !reverse && nextForMonthDay.Before(nextForWeekDay) + + if monthDayIsClosestToNextThanWeekDay { + next = nextForMonthDay + if !bumped { + // Month day seg is specific and next is already at right month day, we can continue + skipMonthDayForIter = true + continue + } + } else { + next = nextForWeekDay + } + goto over + } + + if !incl && next.Format(FullDateFormat) == start.Format(FullDateFormat) { + delta := time.Second + if reverse { + delta = -time.Second + } + next = next.Add(delta) + continue + } + return + } + return start, errors.New("tried so hard") +} + +var dashRe = regexp.MustCompile(`/.*$`) + +func isUnreachableYear(year string, ref time.Time, incl bool, reverse bool) bool { + if year == "*" || year == "?" { + return false + } + + edge, inc := ref.Year(), 1 + if !incl { + if reverse { + inc = -1 + } + edge += inc + } + for _, offset := range strings.Split(year, ",") { + if strings.Index(offset, "*/") == 0 || strings.Index(offset, "0/") == 0 { + return false + } + for _, part := range strings.Split(dashRe.ReplaceAllString(offset, ""), "-") { + val, err := strconv.Atoi(part) + if err != nil || (!reverse && val >= edge) || (reverse && val < edge) { + return false + } + } + } + return true +} + +var limit = map[int]int{0: 60, 1: 60, 2: 24, 3: 31, 4: 12, 5: 366, 6: 100} + +func bumpUntilDue(c Checker, segment string, pos int, ref time.Time, reverse bool) (time.Time, bool, error) { + // + iter := limit[pos] + for iter > 0 { + c.SetRef(ref) + if ok, _ := c.CheckDue(segment, pos); ok { + return ref, iter != limit[pos], nil + } + if reverse { + ref = bumpReverse(ref, pos) + } else { + ref = bump(ref, pos) + } + iter-- + } + return ref, false, errors.New("tried so hard") +} + +func bump(ref time.Time, pos int) time.Time { + loc := ref.Location() + + switch pos { + case 0: + ref = ref.Add(time.Second) + case 1: + minTime := ref.Add(time.Minute) + ref = time.Date(minTime.Year(), minTime.Month(), minTime.Day(), minTime.Hour(), minTime.Minute(), 0, 0, loc) + case 2: + hTime := ref.Add(time.Hour) + ref = time.Date(hTime.Year(), hTime.Month(), hTime.Day(), hTime.Hour(), 0, 0, 0, loc) + case 3, 5: + dTime := ref.AddDate(0, 0, 1) + ref = time.Date(dTime.Year(), dTime.Month(), dTime.Day(), 0, 0, 0, 0, loc) + case 4: + ref = time.Date(ref.Year(), ref.Month(), 1, 0, 0, 0, 0, loc) + ref = ref.AddDate(0, 1, 0) + case 6: + yTime := ref.AddDate(1, 0, 0) + ref = time.Date(yTime.Year(), 1, 1, 0, 0, 0, 0, loc) + } + return ref +} diff --git a/vendor/github.com/adhocore/gronx/pkg/tasker/README.md b/vendor/github.com/adhocore/gronx/pkg/tasker/README.md new file mode 100644 index 000000000..b6ef3ccc5 --- /dev/null +++ b/vendor/github.com/adhocore/gronx/pkg/tasker/README.md @@ -0,0 +1,169 @@ +# adhocore/gronx/pkg/tasker + +[![Latest Version](https://img.shields.io/github/release/adhocore/gronx.svg?style=flat-square)](https://github.com/adhocore/gronx/releases) +[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg?style=flat-square)](LICENSE) +[![Go Report](https://goreportcard.com/badge/github.com/adhocore/gronx)](https://goreportcard.com/report/github.com/adhocore/gronx) +[![Test](https://github.com/adhocore/gronx/actions/workflows/test-action.yml/badge.svg)](https://github.com/adhocore/gronx/actions/workflows/test-action.yml) +[![Donate](https://img.shields.io/badge/donate-paypal-blue.svg?style=flat-square)](https://www.paypal.me/ji10/50usd) +[![Tweet](https://img.shields.io/twitter/url/http/shields.io.svg?style=social)](https://twitter.com/intent/tweet?text=Lightweight+fast+and+deps+free+cron+expression+parser+for+Golang&url=https://github.com/adhocore/gronx&hashtags=go,golang,parser,cron,cronexpr,cronparser) + + +`tasker` is cron expression based task scheduler and/or daemon for programamtic usage in Golang (tested on v1.13 and above) or independent standalone usage. + +## Installation + +```sh +go get -u github.com/adhocore/gronx/cmd/tasker +``` +--- +## Usage +### Go Tasker + +Tasker is a task manager that can be programatically used in Golang applications. +It runs as a daemon and and invokes tasks scheduled with cron expression: + +```go +package main + +import ( + "context" + "time" + + "github.com/adhocore/gronx/pkg/tasker" +) + +func main() { + taskr := tasker.New(tasker.Option{ + Verbose: true, + // optional: defaults to local + Tz: "Asia/Bangkok", + // optional: defaults to stderr log stream + Out: "/full/path/to/output-file", + }) + + // add task to run every minute + taskr.Task("* * * * *", func(ctx context.Context) (int, error) { + // do something ... + + // then return exit code and error, for eg: if everything okay + return 0, nil + }).Task("*/5 * * * *", func(ctx context.Context) (int, error) { // every 5 minutes + // you can also log the output to Out file as configured in Option above: + taskr.Log.Printf("done something in %d s", 2) + + return 0, nil + }) + + // run task without overlap, set concurrent flag to false: + concurrent := false + taskr.Task("* * * * * *", , tasker.Taskify("sleep 2", tasker.Option{}), concurrent) + + // every 10 minute with arbitrary command + taskr.Task("@10minutes", taskr.Taskify("command --option val -- args", tasker.Option{Shell: "/bin/sh -c"})) + + // ... add more tasks + + // optionally if you want tasker to stop after 2 hour, pass the duration with Until(): + taskr.Until(2 * time.Hour) + + // finally run the tasker, it ticks sharply on every minute and runs all the tasks due on that time! + // it exits gracefully when ctrl+c is received making sure pending tasks are completed. + taskr.Run() +} +``` + +#### Concurrency + +By default the tasks can run concurrently i.e if previous run is still not finished +but it is now due again, it will run again. +If you want to run only one instance of a task at a time, set concurrent flag to false: + +```go +taskr := tasker.New(tasker.Option{}) + +concurrent := false +expr, task := "* * * * * *", tasker.Taskify("php -r 'sleep(2);'") +taskr.Task(expr, task, concurrent) +``` + +### Task Daemon +It can also be used as standalone task daemon instead of programmatic usage for Golang application. + +First, just install tasker command: +```sh +go install github.com/adhocore/gronx/cmd/tasker@latest +``` + +Or you can also download latest prebuilt binary from [release](https://github.com/adhocore/gronx/releases/latest) for platform of your choice. + +Then prepare a taskfile ([example](https://github.com/adhocore/gronx/blob/main/test/taskfile.txt)) in crontab format +(or can even point to existing crontab). +> `user` is not supported: it is just cron expr followed by the command. + +Finally run the task daemon like so +``` +tasker -file path/to/taskfile +``` + +#### Version + +```sh +tasker -v +``` + +> You can pass more options to control the behavior of task daemon, see below. + +#### Tasker command options: +```txt +-file string + The task file in crontab format +-out string + The fullpath to file where output from tasks are sent to +-shell string + The shell to use for running tasks (default "/usr/bin/bash") +-tz string + The timezone to use for tasks (default "Local") +-until int + The timeout for task daemon in minutes +-verbose + The verbose mode outputs as much as possible +``` + +Examples: +```sh +tasker -verbose -file path/to/taskfile -until 120 # run until next 120min (i.e 2hour) with all feedbacks echoed back +tasker -verbose -file path/to/taskfile -out path/to/output # with all feedbacks echoed to the output file +tasker -tz America/New_York -file path/to/taskfile -shell zsh # run all tasks using zsh shell based on NY timezone +``` + +> File extension of taskfile for (`-file` option) does not matter: can be any or none. +> The directory for outfile (`-out` option) must exist, file is created by task daemon. + +> Same timezone applies for all tasks currently and it might support overriding timezone per task in future release. + +#### Notes on Windows +In Windows if it doesn't find `bash.exe` or `git-bash.exe` it will use `powershell`. +`powershell` may not be compatible with Unix flavored commands. Also to note: +you can't do chaining with `cmd1 && cmd2` but rather `cmd1 ; cmd2`. + +--- +## Understanding Cron Expression + +Checkout [gronx](https://github.com/adhocore/gronx#cron-expression) docs on cron expression. + +--- +## License + +> © [MIT](https://github.com/adhocore/gronx/blob/main/LICENSE) | 2021-2099, Jitendra Adhikari + +## Credits + +This project is ported from [adhocore/cron-expr](https://github.com/adhocore/php-cron-expr) and +release managed by [please](https://github.com/adhocore/please). + +--- +### Other projects +My other golang projects you might find interesting and useful: + +- [**urlsh**](https://github.com/adhocore/urlsh) - URL shortener and bookmarker service with UI, API, Cache, Hits Counter and forwarder using postgres and redis in backend, bulma in frontend; has [web](https://urlssh.xyz) and cli client +- [**fast**](https://github.com/adhocore/fast) - Check your internet speed with ease and comfort right from the terminal diff --git a/vendor/github.com/adhocore/gronx/pkg/tasker/parser.go b/vendor/github.com/adhocore/gronx/pkg/tasker/parser.go new file mode 100644 index 000000000..198793776 --- /dev/null +++ b/vendor/github.com/adhocore/gronx/pkg/tasker/parser.go @@ -0,0 +1,107 @@ +package tasker + +import ( + "bufio" + "log" + "os" + "regexp" + "strings" + + "github.com/adhocore/gronx" +) + +// MustParseTaskfile either parses taskfile from given Option. +// It fails hard in case any error. +func MustParseTaskfile(opts Option) []Task { + file, err := os.Open(opts.File) + if err != nil { + log.Printf("[parser] can't open file: %s", opts.File) + exit(1) + } + defer file.Close() + + lines := []string{} + scan := bufio.NewScanner(file) + for scan.Scan() { + ln := strings.TrimLeft(scan.Text(), " \t") + // Skip empty or comment + if ln != "" && ln[0] != '#' { + lines = append(lines, ln) + } + } + + if err := scan.Err(); err != nil { + if len(lines) == 0 { + log.Printf("[parser] error reading taskfile: %v", err) + exit(1) + } + + log.Println(err) + } + + return linesToTasks(lines) +} + +// var cronRe = regexp.MustCompile(`^((?:[^\s]+\s+){5,6}(?:\d{4})?)(?:\s+)?(.*)`) +var aliasRe = regexp.MustCompile(`^(@(?:annually|yearly|monthly|weekly|daily|hourly|5minutes|10minutes|15minutes|30minutes|always|everysecond))(?:\s+)?(.*)`) +var segRe = regexp.MustCompile(`(?i),|/\d+$|^\d+-\d+$|^([0-7]|sun|mon|tue|wed|thu|fri|sat)(L|W|#\d)?$|-([0-7]|sun|mon|tue|wed|thu|fri|sat)$|\d{4}`) + +func linesToTasks(lines []string) []Task { + var tasks []Task + + gron := gronx.New() + for _, line := range lines { + var match []string + if line[0] == '@' { + match = aliasRe.FindStringSubmatch(line) + } else { + match = parseLine(line) + } + + if len(match) > 2 && gron.IsValid(match[1]) { + tasks = append(tasks, Task{strings.Trim(match[1], " \t"), match[2]}) + continue + } + + log.Printf("[parser] can't parse cron expr: %s", line) + } + + return tasks +} + +func parseLine(line string) (match []string) { + wasWs, expr, cmd := false, "", "" + i, nseg, llen := 0, 0, len(line)-1 + match = append(match, line) + + for ; i < llen && nseg <= 7; i++ { + isWs := strings.ContainsAny(line[i:i+1], "\t ") + if nseg >= 5 { + seg, ws := "", line[i-1:i] + for i < llen && !strings.ContainsAny(line[i:i+1], "\t ") { + i, seg = i+1, seg+line[i:i+1] + } + if isCronPart(seg) { + expr, nseg = expr+ws+seg, nseg+1 + } else if seg != "" { + cmd += seg + break + } + } else { + expr += line[i : i+1] + } + if isWs && !wasWs { + nseg++ + } + wasWs = isWs + } + cmd += line[i:] + if nseg >= 5 && strings.TrimSpace(cmd) != "" { + match = append(match, expr, cmd) + } + return +} + +func isCronPart(seg string) bool { + return seg != "" && seg[0] != '/' && (seg[0] == '*' || seg[0] == '?' || segRe.MatchString(seg)) +} diff --git a/vendor/github.com/adhocore/gronx/pkg/tasker/tasker.go b/vendor/github.com/adhocore/gronx/pkg/tasker/tasker.go new file mode 100644 index 000000000..75bdd4cc4 --- /dev/null +++ b/vendor/github.com/adhocore/gronx/pkg/tasker/tasker.go @@ -0,0 +1,417 @@ +package tasker + +import ( + "context" + "fmt" + "log" + "os" + "os/exec" + "os/signal" + "path/filepath" + "reflect" + "strings" + "sync" + "sync/atomic" + "syscall" + "time" + + "github.com/adhocore/gronx" +) + +// Option is the config options for Tasker. +type Option struct { + File string + Tz string + Shell string + Out string + Until int64 + Verbose bool +} + +// TaskFunc is the actual task handler. +type TaskFunc func(ctx context.Context) (int, error) + +// Task wraps a cron expr and its' command. +type Task struct { + Expr string + Cmd string +} + +// Tasker is the task manager. +type Tasker struct { + Log *log.Logger + loc *time.Location + gron *gronx.Gronx + wg sync.WaitGroup + until time.Time + exprs map[string][]string + tasks map[string]TaskFunc + mutex map[string]uint32 + abort bool + timeout bool + verbose bool + ctx context.Context + ctxCancel context.CancelFunc +} + +type result struct { + ref string + code int + err error +} + +var exit = os.Exit + +// New inits a task manager. +// It returns Tasker. +func New(opt Option) *Tasker { + gron := gronx.New() + tasks := make(map[string]TaskFunc) + exprs := make(map[string][]string) + + if opt.Tz == "" { + opt.Tz = "Local" + } + + loc, err := time.LoadLocation(opt.Tz) + if err != nil { + log.Printf("invalid tz location: %s", opt.Tz) + exit(1) + } + + logger := log.New(os.Stderr, "", log.LstdFlags) + if opt.Out != "" { + if _, err := os.Stat(filepath.Dir(opt.Out)); err != nil { + log.Printf("output dir does not exist: %s", filepath.Base(opt.Out)) + exit(1) + } + + file, err := os.OpenFile(opt.Out, os.O_CREATE|os.O_WRONLY, 0777) + if err != nil { + log.Printf("can't open output file: %s", opt.Out) + exit(1) + } + + logger = log.New(file, "", log.LstdFlags) + } + + return &Tasker{Log: logger, loc: loc, gron: gron, exprs: exprs, tasks: tasks, verbose: opt.Verbose} +} + +// WithContext adds a parent context to the Tasker struct +// and begins the abort when Done is received +func (t *Tasker) WithContext(ctx context.Context) *Tasker { + t.ctx, t.ctxCancel = context.WithCancel(ctx) + return t +} + +func (t *Tasker) ctxDone() { + <-t.ctx.Done() + if t.verbose { + t.Log.Printf("[tasker] received signal on context.Done, aborting") + } + t.abort = true +} + +// Taskify creates TaskFunc out of plain command wrt given options. +func (t *Tasker) Taskify(cmd string, opt Option) TaskFunc { + sh := Shell(opt.Shell) + + return func(ctx context.Context) (int, error) { + buf := strings.Builder{} + exc := exec.Command(sh[0], sh[1], cmd) + exc.Stderr = &buf + exc.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} + + if t.Log.Writer() != exc.Stderr { + exc.Stdout = t.Log.Writer() + } + + err := exc.Run() + if err == nil { + return 0, nil + } + + for _, ln := range strings.Split(strings.TrimRight(buf.String(), "\r\n"), "\n") { + log.Println(ln) + } + + code := 1 + if exErr, ok := err.(*exec.ExitError); ok { + code = exErr.ExitCode() + } + + return code, err + } +} + +// Shell gives a pair of shell and arg. +// It returns array of string. +func Shell(shell ...string) []string { + if os.PathSeparator == '\\' { + shell = append(shell, "git-bash.exe -c", "bash.exe -c", "powershell.exe -Command") + } else { + shell = append(shell, "bash -c", "sh -c", "zsh -c") + } + + for _, sh := range shell { + arg := "-c" + cmd := strings.Split(sh, " -") + if len(cmd) > 1 { + arg = "-" + cmd[1] + } + if exc, err := exec.LookPath(cmd[0]); err == nil { + return []string{exc, arg} + } + } + + return []string{"/bin/sh", "-c"} +} + +const taskIDFormat = "[%s][#%d]" + +// Task appends new task handler for given cron expr. +// It returns Tasker (itself) for fluency and bails if expr is invalid. +func (t *Tasker) Task(expr string, task TaskFunc, concurrent ...bool) *Tasker { + segs, err := gronx.Segments(expr) + if err != nil { + log.Fatalf("invalid cron expr: %+v", err) + } + + concurrent = append(concurrent, true) + old, expr := gronx.SpaceRe.ReplaceAllString(expr, " "), strings.Join(segs, " ") + if _, ok := t.exprs[expr]; !ok { + if !t.gron.IsValid(expr) { + log.Fatalf("invalid cron expr: %+v", err) + } + + t.exprs[expr] = []string{} + } + + ref := fmt.Sprintf(taskIDFormat, old, len(t.exprs[expr])+1) + + t.exprs[expr] = append(t.exprs[expr], ref) + t.tasks[ref] = task + + if !concurrent[0] { + if len(t.mutex) == 0 { + t.mutex = map[string]uint32{} + } + t.mutex[ref] = 0 + } + + return t +} + +// Until sets the cutoff time until which the tasker runs. +// It returns itself for fluency. +func (t *Tasker) Until(until interface{}) *Tasker { + switch until := until.(type) { + case time.Duration: + t.until = t.now().Add(until) + case time.Time: + t.until = until + default: + log.Printf("until must be time.Duration or time.Time, got: %v", reflect.TypeOf(until)) + exit(1) + } + + return t +} + +func (t *Tasker) now() time.Time { + return time.Now().In(t.loc) +} + +// Run runs the task manager. +func (t *Tasker) Run() { + t.doSetup() + + first := true + for !t.abort && !t.timeout { + ref, willTime := t.tickTimer(first) + if t.timeout || t.abort { + break + } + + tasks := make(map[string]TaskFunc) + t.gron.C.SetRef(ref) + for expr, refs := range t.exprs { + if due, _ := t.gron.SegmentsDue(strings.Split(expr, " ")); !due { + continue + } + + for _, ref := range refs { + tasks[ref] = t.tasks[ref] + } + } + + if len(tasks) > 0 { + t.runTasks(tasks) + } + + first = false + t.timeout = willTime + } + + t.wait() +} + +// Stop the task manager. +func (t *Tasker) Stop() { + t.abort = true +} + +var dateFormat = "2006/01/02 15:04:05" + +func (t *Tasker) doSetup() { + if len(t.tasks) == 0 { + t.Log.Fatal("[tasker] no tasks available") + } + if !t.until.IsZero() && t.verbose { + if t.until.Before(t.now()) { + log.Fatalf("[tasker] timeout must be in future") + } + t.Log.Printf("[tasker] final tick on or before %s", t.until.Format(dateFormat)) + } + + // If we have seconds precision tickSec should be 1 + for expr := range t.exprs { + if expr[0:2] != "0 " { + tickSec = 1 + break + } + } + if t.ctx != nil { + go t.ctxDone() + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGTERM) + + go func() { + <-sig + t.abort = true + }() +} + +var tickSec = 60 + +func (t *Tasker) tickTimer(first bool) (time.Time, bool) { + now, timed, willTime := t.now(), !t.until.IsZero(), false + if t.timeout || t.abort { + return now, willTime + } + + wait := tickSec - now.Second()%tickSec + if !first && wait == 0 { + wait = tickSec + } + + if wait < 1 || wait > tickSec { + return now, willTime + } + + next := now.Add(time.Duration(wait) * time.Second) + willTime = timed && next.After(t.until) + if t.verbose && !willTime { + t.Log.Printf("[tasker] next tick on %s", next.Format(dateFormat)) + } + + if willTime { + next = now.Add(time.Duration(tickSec) - now.Sub(t.until)) + } + for !t.abort && !t.timeout && t.now().Before(next) { + time.Sleep(100 * time.Millisecond) + } + + t.timeout = timed && next.After(t.until) + + return next, willTime +} + +func (t *Tasker) runTasks(tasks map[string]TaskFunc) { + if t.verbose { + if t.abort { + t.Log.Println("[tasker] completing pending tasks") + } else { + t.Log.Printf("[tasker] running %d due tasks\n", len(tasks)) + } + } + + ctx := context.Background() + if t.ctx != nil { + ctx = t.ctx + } + + for ref, task := range tasks { + if !t.canRun(ref) { + continue + } + + t.wg.Add(1) + rc := make(chan result) + + go t.doRun(ctx, ref, task, rc) + go t.doOut(rc) + } +} + +func (t *Tasker) canRun(ref string) bool { + lock, ok := t.mutex[ref] + if !ok { + return true + } + if atomic.CompareAndSwapUint32(&lock, 0, 1) { + t.mutex[ref] = 1 + return true + } + return false +} + +func (t *Tasker) doRun(ctx context.Context, ref string, task TaskFunc, rc chan result) { + defer t.wg.Done() + if t.abort || t.timeout { + return + } + + if t.verbose { + t.Log.Printf("[tasker] task %s running\n", ref) + } + + code, err := task(ctx) + if lock, ok := t.mutex[ref]; ok { + atomic.StoreUint32(&lock, 0) + t.mutex[ref] = 0 + } + + rc <- result{ref, code, err} +} + +func (t *Tasker) doOut(rc chan result) { + res := <-rc + if res.err != nil { + t.Log.Printf("[tasker] task %s errored %v", res.ref, res.err) + } + + if t.verbose { + if res.code == 0 { + t.Log.Printf("[tasker] task %s ran successfully", res.ref) + } else { + t.Log.Printf("[tasker] task %s returned error code: %d", res.ref, res.code) + } + } +} + +func (t *Tasker) wait() { + if !t.abort { + t.Log.Println("[tasker] timed out, waiting tasks to complete") + } else { + t.Log.Println("[tasker] interrupted, waiting tasks to complete") + } + + t.wg.Wait() + + // Allow a leeway period + time.Sleep(100 * time.Microsecond) +} diff --git a/vendor/github.com/adhocore/gronx/prev.go b/vendor/github.com/adhocore/gronx/prev.go new file mode 100644 index 000000000..cc2fc9420 --- /dev/null +++ b/vendor/github.com/adhocore/gronx/prev.go @@ -0,0 +1,57 @@ +package gronx + +import ( + "fmt" + "time" +) + +// PrevTick gives previous run time before now +func PrevTick(expr string, inclRefTime bool) (time.Time, error) { + return PrevTickBefore(expr, time.Now(), inclRefTime) +} + +// PrevTickBefore gives previous run time before given reference time +func PrevTickBefore(expr string, start time.Time, inclRefTime bool) (time.Time, error) { + gron, prev := New(), start.Truncate(time.Second) + due, err := gron.IsDue(expr, start) + if err != nil || (due && inclRefTime) { + return prev, err + } + + segments, _ := Segments(expr) + if len(segments) > 6 && isUnreachableYear(segments[6], prev, inclRefTime, true) { + return prev, fmt.Errorf("unreachable year segment: %s", segments[6]) + } + + prev, err = loop(gron, segments, prev, inclRefTime, true) + // Ignore superfluous err + if err != nil && gron.isDue(expr, prev) { + err = nil + } + return prev, err +} + +func bumpReverse(ref time.Time, pos int) time.Time { + loc := ref.Location() + + switch pos { + case 0: + ref = ref.Add(-time.Second) + case 1: + minTime := ref.Add(-time.Minute) + ref = time.Date(minTime.Year(), minTime.Month(), minTime.Day(), minTime.Hour(), minTime.Minute(), 59, 0, loc) + case 2: + hTime := ref.Add(-time.Hour) + ref = time.Date(hTime.Year(), hTime.Month(), hTime.Day(), hTime.Hour(), 59, 59, 0, loc) + case 3, 5: + dTime := ref.AddDate(0, 0, -1) + ref = time.Date(dTime.Year(), dTime.Month(), dTime.Day(), 23, 59, 59, 0, loc) + case 4: + ref = time.Date(ref.Year(), ref.Month(), 1, 0, 0, 0, 0, loc) + ref = ref.Add(-time.Second) + case 6: + yTime := ref.AddDate(-1, 0, 0) + ref = time.Date(yTime.Year(), 12, 31, 23, 59, 59, 0, loc) + } + return ref +} diff --git a/vendor/github.com/adhocore/gronx/validator.go b/vendor/github.com/adhocore/gronx/validator.go new file mode 100644 index 000000000..62c3a3630 --- /dev/null +++ b/vendor/github.com/adhocore/gronx/validator.go @@ -0,0 +1,147 @@ +package gronx + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +func inStep(val int, s string, bounds []int) (bool, error) { + parts := strings.Split(s, "/") + step, err := strconv.Atoi(parts[1]) + if err != nil { + return false, err + } + if step == 0 { + return false, errors.New("step can't be 0") + } + + if strings.Index(s, "*/") == 0 { + return (val-bounds[0])%step == 0, nil + } + if strings.Index(s, "0/") == 0 { + return val%step == 0, nil + } + + sub, end := strings.Split(parts[0], "-"), val + start, err := strconv.Atoi(sub[0]) + if err != nil { + return false, err + } + + if len(sub) > 1 { + end, err = strconv.Atoi(sub[1]) + if err != nil { + return false, err + } + } + + if (len(sub) > 1 && end < start) || start < bounds[0] || end > bounds[1] { + return false, fmt.Errorf("step '%s' out of bounds(%d, %d)", parts[0], bounds[0], bounds[1]) + } + + return inStepRange(val, start, end, step), nil +} + +func inRange(val int, s string, bounds []int) (bool, error) { + parts := strings.Split(s, "-") + start, err := strconv.Atoi(parts[0]) + if err != nil { + return false, err + } + + end, err := strconv.Atoi(parts[1]) + if err != nil { + return false, err + } + + if end < start || start < bounds[0] || end > bounds[1] { + return false, fmt.Errorf("range '%s' out of bounds(%d, %d)", s, bounds[0], bounds[1]) + } + + return start <= val && val <= end, nil +} + +func inStepRange(val, start, end, step int) bool { + for i := start; i <= end && i <= val; i += step { + if i == val { + return true + } + } + return false +} + +func isValidMonthDay(val string, last int, ref time.Time) (valid bool, err error) { + day, loc := ref.Day(), ref.Location() + if val == "L" { + return day == last, nil + } + + pos := strings.Index(val, "W") + if pos < 1 { + return false, errors.New("invalid offset value: " + val) + } + + nval, err := strconv.Atoi(val[0:pos]) + if err != nil { + return false, err + } + + for _, i := range []int{0, -1, 1, -2, 2} { + incr := i + nval + if incr > 0 && incr <= last { + iref := time.Date(ref.Year(), ref.Month(), incr, ref.Hour(), ref.Minute(), ref.Second(), 0, loc) + week := int(iref.Weekday()) + + if week > 0 && week < 6 && iref.Month() == ref.Month() { + valid = day == iref.Day() + break + } + } + } + + return valid, nil +} + +func isValidWeekDay(val string, last int, ref time.Time) (bool, error) { + loc := ref.Location() + + if pos := strings.Index(val, "L"); pos > 0 { + nval, err := strconv.Atoi(val[0:pos]) + if err != nil { + return false, err + } + + for i := 0; i < 7; i++ { + day := last - i + dref := time.Date(ref.Year(), ref.Month(), day, ref.Hour(), ref.Minute(), ref.Second(), 0, loc) + if int(dref.Weekday()) == nval%7 { + return ref.Day() == day, nil + } + } + } + + pos := strings.Index(val, "#") + parts := strings.Split(strings.ReplaceAll(val, "7#", "0#"), "#") + if pos < 1 || len(parts) < 2 { + return false, errors.New("invalid offset value: " + val) + } + + day, err := strconv.Atoi(parts[0]) + if err != nil { + return false, err + } + + nth, err := strconv.Atoi(parts[1]) + if err != nil { + return false, err + } + + if day < 0 || day > 7 || nth < 1 || nth > 5 || int(ref.Weekday()) != day { + return false, nil + } + + return (ref.Day()-1)/7 == nth-1, nil +} diff --git a/vendor/github.com/go-co-op/gocron/v2/.gitignore b/vendor/github.com/go-co-op/gocron/v2/.gitignore deleted file mode 100644 index 6657e3cb2..000000000 --- a/vendor/github.com/go-co-op/gocron/v2/.gitignore +++ /dev/null @@ -1,20 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, built with `go test -c` -*.test -local_testing -coverage.out - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Dependency directories (remove the comment below to include it) -vendor/ - -# IDE project files -.idea diff --git a/vendor/github.com/go-co-op/gocron/v2/.golangci.yaml b/vendor/github.com/go-co-op/gocron/v2/.golangci.yaml deleted file mode 100644 index 07878d85f..000000000 --- a/vendor/github.com/go-co-op/gocron/v2/.golangci.yaml +++ /dev/null @@ -1,49 +0,0 @@ -run: - timeout: 5m - issues-exit-code: 1 - tests: true - skip-dirs: - - local - -issues: - max-same-issues: 100 - include: - - EXC0012 - - EXC0014 - -linters: - enable: - - bodyclose - - exportloopref - - gofumpt - - goimports - - gosec - - gosimple - - govet - - ineffassign - - misspell - - revive - - staticcheck - - typecheck - - unused - - whitespace - -output: - # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" - format: colored-line-number - # print lines of code with issue, default is true - print-issued-lines: true - # print linter name in the end of issue text, default is true - print-linter-name: true - # make issues output unique by line, default is true - uniq-by-line: true - # add a prefix to the output file references; default is no prefix - path-prefix: "" - # sorts results by: filepath, line and column - sort-results: true - -linters-settings: - golint: - min-confidence: 0.8 - -fix: true diff --git a/vendor/github.com/go-co-op/gocron/v2/.pre-commit-config.yaml b/vendor/github.com/go-co-op/gocron/v2/.pre-commit-config.yaml deleted file mode 100644 index 99b237e39..000000000 --- a/vendor/github.com/go-co-op/gocron/v2/.pre-commit-config.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# See https://pre-commit.com for more information -# See https://pre-commit.com/hooks.html for more hooks -repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 - hooks: - - id: check-added-large-files - - id: check-case-conflict - - id: check-merge-conflict - - id: check-yaml - - id: detect-private-key - - id: end-of-file-fixer - - id: trailing-whitespace - - repo: https://github.com/golangci/golangci-lint - rev: v1.55.2 - hooks: - - id: golangci-lint - - repo: https://github.com/TekWizely/pre-commit-golang - rev: v1.0.0-rc.1 - hooks: - - id: go-fumpt - args: - - -w - - id: go-mod-tidy diff --git a/vendor/github.com/go-co-op/gocron/v2/CODE_OF_CONDUCT.md b/vendor/github.com/go-co-op/gocron/v2/CODE_OF_CONDUCT.md deleted file mode 100644 index 7d913b55b..000000000 --- a/vendor/github.com/go-co-op/gocron/v2/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,73 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone. And we mean everyone! - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and kind language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team initially on Slack to coordinate private communication. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq diff --git a/vendor/github.com/go-co-op/gocron/v2/CONTRIBUTING.md b/vendor/github.com/go-co-op/gocron/v2/CONTRIBUTING.md deleted file mode 100644 index 99e1e8809..000000000 --- a/vendor/github.com/go-co-op/gocron/v2/CONTRIBUTING.md +++ /dev/null @@ -1,38 +0,0 @@ -# Contributing to gocron - -Thank you for coming to contribute to gocron! We welcome new ideas, PRs and general feedback. - -## Reporting Bugs - -If you find a bug then please let the project know by opening an issue after doing the following: - -- Do a quick search of the existing issues to make sure the bug isn't already reported -- Try and make a minimal list of steps that can reliably reproduce the bug you are experiencing -- Collect as much information as you can to help identify what the issue is (project version, configuration files, etc) - -## Suggesting Enhancements - -If you have a use case that you don't see a way to support yet, we would welcome the feedback in an issue. Before opening the issue, please consider: - -- Is this a common use case? -- Is it simple to understand? - -You can help us out by doing the following before raising a new issue: - -- Check that the feature hasn't been requested already by searching existing issues -- Try and reduce your enhancement into a single, concise and deliverable request, rather than a general idea -- Explain your own use cases as the basis of the request - -## Adding Features - -Pull requests are always welcome. However, before going through the trouble of implementing a change it's worth creating a bug or feature request issue. -This allows us to discuss the changes and make sure they are a good fit for the project. - -Please always make sure a pull request has been: - -- Unit tested with `make test` -- Linted with `make lint` - -## Writing Tests - -Tests should follow the [table driven test pattern](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go). See other tests in the code base for additional examples. diff --git a/vendor/github.com/go-co-op/gocron/v2/Makefile b/vendor/github.com/go-co-op/gocron/v2/Makefile deleted file mode 100644 index abaf708a9..000000000 --- a/vendor/github.com/go-co-op/gocron/v2/Makefile +++ /dev/null @@ -1,22 +0,0 @@ -.PHONY: fmt lint test mocks test_coverage test_ci - -GO_PKGS := $(shell go list -f {{.Dir}} ./...) - -fmt: - @go list -f {{.Dir}} ./... | xargs -I{} gofmt -w -s {} - -lint: - @grep "^func " example_test.go | sort -c - @golangci-lint run - -test: - @go test -race -v $(GO_FLAGS) -count=1 $(GO_PKGS) - -test_coverage: - @go test -race -v $(GO_FLAGS) -count=1 -coverprofile=coverage.out -covermode=atomic $(GO_PKGS) - -test_ci: - @TEST_ENV=ci go test -race -v $(GO_FLAGS) -count=1 $(GO_PKGS) - -mocks: - @go generate ./... diff --git a/vendor/github.com/go-co-op/gocron/v2/README.md b/vendor/github.com/go-co-op/gocron/v2/README.md deleted file mode 100644 index 4a1de758e..000000000 --- a/vendor/github.com/go-co-op/gocron/v2/README.md +++ /dev/null @@ -1,176 +0,0 @@ -# gocron: A Golang Job Scheduling Package - -[![CI State](https://github.com/go-co-op/gocron/actions/workflows/go_test.yml/badge.svg?branch=v2&event=push)](https://github.com/go-co-op/gocron/actions) -![Go Report Card](https://goreportcard.com/badge/github.com/go-co-op/gocron) [![Go Doc](https://godoc.org/github.com/go-co-op/gocron/v2?status.svg)](https://pkg.go.dev/github.com/go-co-op/gocron/v2) - -gocron is a job scheduling package which lets you run Go functions at pre-determined intervals. - -If you want to chat, you can find us on Slack at -[](https://gophers.slack.com/archives/CQ7T0T1FW) - -## Quick Start - -``` -go get github.com/go-co-op/gocron/v2 -``` - -```golang -package main - -import ( - "fmt" - "time" - - "github.com/go-co-op/gocron/v2" -) - -func main() { - // create a scheduler - s, err := gocron.NewScheduler() - if err != nil { - // handle error - } - - // add a job to the scheduler - j, err := s.NewJob( - gocron.DurationJob( - 10*time.Second, - ), - gocron.NewTask( - func(a string, b int) { - // do things - }, - "hello", - 1, - ), - ) - if err != nil { - // handle error - } - // each job has a unique id - fmt.Println(j.ID()) - - // start the scheduler - s.Start() - - // block until you are ready to shut down - select { - case <-time.After(time.Minute): - } - - // when you're done, shut it down - err = s.Shutdown() - if err != nil { - // handle error - } -} -``` - -## Examples - -- [Go doc examples](https://pkg.go.dev/github.com/go-co-op/gocron/v2#pkg-examples) -- [Examples directory](examples) - -## Concepts - -- **Job**: The job encapsulates a "task", which is made up of a go function and any function parameters. The Job then - provides the scheduler with the time the job should next be scheduled to run. -- **Scheduler**: The scheduler keeps track of all the jobs and sends each job to the executor when - it is ready to be run. -- **Executor**: The executor calls the job's task and manages the complexities of different job - execution timing requirements (e.g. singletons that shouldn't overrun each other, limiting the max number of jobs running) - - -## Features - -### Job types -Jobs can be run at various intervals. -- [**Duration**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#DurationJob): -Jobs can be run at a fixed `time.Duration`. -- [**Random duration**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#DurationRandomJob): -Jobs can be run at a random `time.Duration` between a min and max. -- [**Cron**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#CronJob): -Jobs can be run using a crontab. -- [**Daily**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#DailyJob): -Jobs can be run every x days at specific times. -- [**Weekly**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#WeeklyJob): -Jobs can be run every x weeks on specific days of the week and at specific times. -- [**Monthly**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#MonthlyJob): -Jobs can be run every x months on specific days of the month and at specific times. -- [**One time**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#OneTimeJob): -Jobs can be run at specific time(s) (either once or many times). - -### Concurrency Limits -Jobs can be limited individually or across the entire scheduler. -- [**Per job limiting with singleton mode**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#WithSingletonMode): -Jobs can be limited to a single concurrent execution that either reschedules (skips overlapping executions) -or queues (waits for the previous execution to finish). -- [**Per scheduler limiting with limit mode**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#WithLimitConcurrentJobs): -Jobs can be limited to a certain number of concurrent executions across the entire scheduler -using either reschedule (skip when the limit is met) or queue (jobs are added to a queue to -wait for the limit to be available). -- **Note:** A scheduler limit and a job limit can both be enabled. - -### Distributed instances of gocron -Multiple instances of gocron can be run. -- [**Elector**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#WithDistributedElector): -An elector can be used to elect a single instance of gocron to run as the primary with the -other instances checking to see if a new leader needs to be elected. - - Implementations: [go-co-op electors](https://github.com/go-co-op?q=-elector&type=all&language=&sort=) - (don't see what you need? request on slack to get a repo created to contribute it!) -- [**Locker**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#WithDistributedLocker): -A locker can be used to lock each run of a job to a single instance of gocron. -Locker can be at job or scheduler, if it is defined both at job and scheduler then locker of job will take precedence. - - Implementations: [go-co-op lockers](https://github.com/go-co-op?q=-lock&type=all&language=&sort=) - (don't see what you need? request on slack to get a repo created to contribute it!) - -### Events -Job events can trigger actions. -- [**Listeners**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#WithEventListeners): -Can be added to a job, with [event listeners](https://pkg.go.dev/github.com/go-co-op/gocron/v2#EventListener), -or all jobs across the -[scheduler](https://pkg.go.dev/github.com/go-co-op/gocron/v2#WithGlobalJobOptions) -to listen for job events and trigger actions. - -### Options -Many job and scheduler options are available. -- [**Job options**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#JobOption): -Job options can be set when creating a job using `NewJob`. -- [**Global job options**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#WithGlobalJobOptions): -Global job options can be set when creating a scheduler using `NewScheduler` -and the `WithGlobalJobOptions` option. -- [**Scheduler options**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#SchedulerOption): -Scheduler options can be set when creating a scheduler using `NewScheduler`. - -### Logging -Logs can be enabled. -- [Logger](https://pkg.go.dev/github.com/go-co-op/gocron/v2#Logger): -The Logger interface can be implemented with your desired logging library. -The provided NewLogger uses the standard library's log package. - -### Metrics -Metrics may be collected from the execution of each job. -- [**Monitor**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#Monitor): -A monitor can be used to collect metrics for each job from a scheduler. - - Implementations: [go-co-op monitors](https://github.com/go-co-op?q=-monitor&type=all&language=&sort=) - (don't see what you need? request on slack to get a repo created to contribute it!) - -### Testing -The gocron library is set up to enable testing. -- Mocks are provided in [the mock package](mocks) using [gomock](https://github.com/uber-go/mock). -- Time can be mocked by passing in a [FakeClock](https://pkg.go.dev/github.com/jonboulle/clockwork#FakeClock) -to [WithClock](https://pkg.go.dev/github.com/go-co-op/gocron/v2#WithClock) - -see the [example on WithClock](https://pkg.go.dev/github.com/go-co-op/gocron/v2#example-WithClock). - -## Supporters - -We appreciate the support for free and open source software! - -This project is supported by: - -- [Jetbrains](https://www.jetbrains.com/?from=gocron) -- [Sentry](https://sentry.io/welcome/) - -## Star History - -[![Star History Chart](https://api.star-history.com/svg?repos=go-co-op/gocron&type=Date)](https://star-history.com/#go-co-op/gocron&Date) diff --git a/vendor/github.com/go-co-op/gocron/v2/SECURITY.md b/vendor/github.com/go-co-op/gocron/v2/SECURITY.md deleted file mode 100644 index 654a08550..000000000 --- a/vendor/github.com/go-co-op/gocron/v2/SECURITY.md +++ /dev/null @@ -1,16 +0,0 @@ -# Security Policy - -## Supported Versions - -The current plan is to maintain version 2 as long as possible incorporating any necessary security patches. Version 1 is deprecated and will no longer be patched. - -| Version | Supported | -| ------- | ------------------ | -| 1.x.x | :heavy_multiplication_x: | -| 2.x.x | :white_check_mark: | - -## Reporting a Vulnerability - -Vulnerabilities can be reported by [opening an issue](https://github.com/go-co-op/gocron/issues/new/choose) or reaching out on Slack: [](https://gophers.slack.com/archives/CQ7T0T1FW) - -We will do our best to addrerss any vulnerabilities in an expeditious manner. diff --git a/vendor/github.com/go-co-op/gocron/v2/distributed.go b/vendor/github.com/go-co-op/gocron/v2/distributed.go deleted file mode 100644 index 1617c6211..000000000 --- a/vendor/github.com/go-co-op/gocron/v2/distributed.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:generate mockgen -destination=mocks/distributed.go -package=gocronmocks . Elector,Locker,Lock -package gocron - -import ( - "context" -) - -// Elector determines the leader from instances asking to be the leader. Only -// the leader runs jobs. If the leader goes down, a new leader will be elected. -type Elector interface { - // IsLeader should return nil if the job should be scheduled by the instance - // making the request and an error if the job should not be scheduled. - IsLeader(context.Context) error -} - -// Locker represents the required interface to lock jobs when running multiple schedulers. -// The lock is held for the duration of the job's run, and it is expected that the -// locker implementation handles time splay between schedulers. -// The lock key passed is the job's name - which, if not set, defaults to the -// go function's name, e.g. "pkg.myJob" for func myJob() {} in pkg -type Locker interface { - // Lock if an error is returned by lock, the job will not be scheduled. - Lock(ctx context.Context, key string) (Lock, error) -} - -// Lock represents an obtained lock. The lock is released after the execution of the job -// by the scheduler. -type Lock interface { - Unlock(ctx context.Context) error -} diff --git a/vendor/github.com/go-co-op/gocron/v2/errors.go b/vendor/github.com/go-co-op/gocron/v2/errors.go deleted file mode 100644 index 53df01b1c..000000000 --- a/vendor/github.com/go-co-op/gocron/v2/errors.go +++ /dev/null @@ -1,56 +0,0 @@ -package gocron - -import "fmt" - -// Public error definitions -var ( - ErrCronJobParse = fmt.Errorf("gocron: CronJob: crontab parse failure") - ErrDailyJobAtTimeNil = fmt.Errorf("gocron: DailyJob: atTime within atTimes must not be nil") - ErrDailyJobAtTimesNil = fmt.Errorf("gocron: DailyJob: atTimes must not be nil") - ErrDailyJobHours = fmt.Errorf("gocron: DailyJob: atTimes hours must be between 0 and 23 inclusive") - ErrDailyJobMinutesSeconds = fmt.Errorf("gocron: DailyJob: atTimes minutes and seconds must be between 0 and 59 inclusive") - ErrDurationJobIntervalZero = fmt.Errorf("gocron: DurationJob: time interval is 0") - ErrDurationRandomJobMinMax = fmt.Errorf("gocron: DurationRandomJob: minimum duration must be less than maximum duration") - ErrEventListenerFuncNil = fmt.Errorf("gocron: eventListenerFunc must not be nil") - ErrJobNotFound = fmt.Errorf("gocron: job not found") - ErrJobRunNowFailed = fmt.Errorf("gocron: Job: RunNow: scheduler unreachable") - ErrMonthlyJobDays = fmt.Errorf("gocron: MonthlyJob: daysOfTheMonth must be between 31 and -31 inclusive, and not 0") - ErrMonthlyJobAtTimeNil = fmt.Errorf("gocron: MonthlyJob: atTime within atTimes must not be nil") - ErrMonthlyJobAtTimesNil = fmt.Errorf("gocron: MonthlyJob: atTimes must not be nil") - ErrMonthlyJobDaysNil = fmt.Errorf("gocron: MonthlyJob: daysOfTheMonth must not be nil") - ErrMonthlyJobHours = fmt.Errorf("gocron: MonthlyJob: atTimes hours must be between 0 and 23 inclusive") - ErrMonthlyJobMinutesSeconds = fmt.Errorf("gocron: MonthlyJob: atTimes minutes and seconds must be between 0 and 59 inclusive") - ErrNewJobTaskNil = fmt.Errorf("gocron: NewJob: Task must not be nil") - ErrNewJobTaskNotFunc = fmt.Errorf("gocron: NewJob: Task.Function must be of kind reflect.Func") - ErrNewJobWrongNumberOfParameters = fmt.Errorf("gocron: NewJob: Number of provided parameters does not match expected") - ErrNewJobWrongTypeOfParameters = fmt.Errorf("gocron: NewJob: Type of provided parameters does not match expected") - ErrOneTimeJobStartDateTimePast = fmt.Errorf("gocron: OneTimeJob: start must not be in the past") - ErrStopExecutorTimedOut = fmt.Errorf("gocron: timed out waiting for executor to stop") - ErrStopJobsTimedOut = fmt.Errorf("gocron: timed out waiting for jobs to finish") - ErrStopSchedulerTimedOut = fmt.Errorf("gocron: timed out waiting for scheduler to stop") - ErrWeeklyJobAtTimeNil = fmt.Errorf("gocron: WeeklyJob: atTime within atTimes must not be nil") - ErrWeeklyJobAtTimesNil = fmt.Errorf("gocron: WeeklyJob: atTimes must not be nil") - ErrWeeklyJobDaysOfTheWeekNil = fmt.Errorf("gocron: WeeklyJob: daysOfTheWeek must not be nil") - ErrWeeklyJobHours = fmt.Errorf("gocron: WeeklyJob: atTimes hours must be between 0 and 23 inclusive") - ErrWeeklyJobMinutesSeconds = fmt.Errorf("gocron: WeeklyJob: atTimes minutes and seconds must be between 0 and 59 inclusive") - ErrPanicRecovered = fmt.Errorf("gocron: panic recovered") - ErrWithClockNil = fmt.Errorf("gocron: WithClock: clock must not be nil") - ErrWithDistributedElectorNil = fmt.Errorf("gocron: WithDistributedElector: elector must not be nil") - ErrWithDistributedLockerNil = fmt.Errorf("gocron: WithDistributedLocker: locker must not be nil") - ErrWithDistributedJobLockerNil = fmt.Errorf("gocron: WithDistributedJobLocker: locker must not be nil") - ErrWithLimitConcurrentJobsZero = fmt.Errorf("gocron: WithLimitConcurrentJobs: limit must be greater than 0") - ErrWithLocationNil = fmt.Errorf("gocron: WithLocation: location must not be nil") - ErrWithLoggerNil = fmt.Errorf("gocron: WithLogger: logger must not be nil") - ErrWithMonitorNil = fmt.Errorf("gocron: WithMonitor: monitor must not be nil") - ErrWithNameEmpty = fmt.Errorf("gocron: WithName: name must not be empty") - ErrWithStartDateTimePast = fmt.Errorf("gocron: WithStartDateTime: start must not be in the past") - ErrWithStopTimeoutZeroOrNegative = fmt.Errorf("gocron: WithStopTimeout: timeout must be greater than 0") -) - -// internal errors -var ( - errAtTimeNil = fmt.Errorf("errAtTimeNil") - errAtTimesNil = fmt.Errorf("errAtTimesNil") - errAtTimeHours = fmt.Errorf("errAtTimeHours") - errAtTimeMinSec = fmt.Errorf("errAtTimeMinSec") -) diff --git a/vendor/github.com/go-co-op/gocron/v2/executor.go b/vendor/github.com/go-co-op/gocron/v2/executor.go deleted file mode 100644 index f3661970a..000000000 --- a/vendor/github.com/go-co-op/gocron/v2/executor.go +++ /dev/null @@ -1,487 +0,0 @@ -package gocron - -import ( - "context" - "fmt" - "strconv" - "sync" - "time" - - "github.com/google/uuid" -) - -type executor struct { - ctx context.Context - cancel context.CancelFunc - logger Logger - stopCh chan struct{} - jobsIn chan jobIn - jobsOutForRescheduling chan uuid.UUID - jobsOutCompleted chan uuid.UUID - jobOutRequest chan jobOutRequest - stopTimeout time.Duration - done chan error - singletonRunners *sync.Map // map[uuid.UUID]singletonRunner - limitMode *limitModeConfig - elector Elector - locker Locker - monitor Monitor -} - -type jobIn struct { - id uuid.UUID - shouldSendOut bool -} - -type singletonRunner struct { - in chan jobIn - rescheduleLimiter chan struct{} -} - -type limitModeConfig struct { - started bool - mode LimitMode - limit uint - rescheduleLimiter chan struct{} - in chan jobIn - // singletonJobs is used to track singleton jobs that are running - // in the limit mode runner. This is used to prevent the same job - // from running multiple times across limit mode runners when both - // a limit mode and singleton mode are enabled. - singletonJobs map[uuid.UUID]struct{} - singletonJobsMu sync.Mutex -} - -func (e *executor) start() { - e.logger.Debug("gocron: executor started") - - // creating the executor's context here as the executor - // is the only goroutine that should access this context - // any other uses within the executor should create a context - // using the executor context as parent. - e.ctx, e.cancel = context.WithCancel(context.Background()) - - // the standardJobsWg tracks - standardJobsWg := &waitGroupWithMutex{} - - singletonJobsWg := &waitGroupWithMutex{} - - limitModeJobsWg := &waitGroupWithMutex{} - - // create a fresh map for tracking singleton runners - e.singletonRunners = &sync.Map{} - - // start the for leap that is the executor - // selecting on channels for work to do - for { - select { - // job ids in are sent from 1 of 2 places: - // 1. the scheduler sends directly when jobs - // are run immediately. - // 2. sent from time.AfterFuncs in which job schedules - // are spun up by the scheduler - case jIn := <-e.jobsIn: - select { - case <-e.stopCh: - e.stop(standardJobsWg, singletonJobsWg, limitModeJobsWg) - return - default: - } - // this context is used to handle cancellation of the executor - // on requests for a job to the scheduler via requestJobCtx - ctx, cancel := context.WithCancel(e.ctx) - - if e.limitMode != nil && !e.limitMode.started { - // check if we are already running the limit mode runners - // if not, spin up the required number i.e. limit! - e.limitMode.started = true - for i := e.limitMode.limit; i > 0; i-- { - limitModeJobsWg.Add(1) - go e.limitModeRunner("limitMode-"+strconv.Itoa(int(i)), e.limitMode.in, limitModeJobsWg, e.limitMode.mode, e.limitMode.rescheduleLimiter) - } - } - - // spin off into a goroutine to unblock the executor and - // allow for processing for more work - go func() { - // make sure to cancel the above context per the docs - // // Canceling this context releases resources associated with it, so code should - // // call cancel as soon as the operations running in this Context complete. - defer cancel() - - // check for limit mode - this spins up a separate runner which handles - // limiting the total number of concurrently running jobs - if e.limitMode != nil { - if e.limitMode.mode == LimitModeReschedule { - select { - // rescheduleLimiter is a channel the size of the limit - // this blocks publishing to the channel and keeps - // the executor from building up a waiting queue - // and forces rescheduling - case e.limitMode.rescheduleLimiter <- struct{}{}: - e.limitMode.in <- jIn - default: - // all runners are busy, reschedule the work for later - // which means we just skip it here and do nothing - // TODO when metrics are added, this should increment a rescheduled metric - e.sendOutForRescheduling(&jIn) - } - } else { - // since we're not using LimitModeReschedule, but instead using LimitModeWait - // we do want to queue up the work to the limit mode runners and allow them - // to work through the channel backlog. A hard limit of 1000 is in place - // at which point this call would block. - // TODO when metrics are added, this should increment a wait metric - e.sendOutForRescheduling(&jIn) - e.limitMode.in <- jIn - } - } else { - // no limit mode, so we're either running a regular job or - // a job with a singleton mode - // - // get the job, so we can figure out what kind it is and how - // to execute it - j := requestJobCtx(ctx, jIn.id, e.jobOutRequest) - if j == nil { - // safety check as it'd be strange bug if this occurred - return - } - if j.singletonMode { - // for singleton mode, get the existing runner for the job - // or spin up a new one - runner := &singletonRunner{} - runnerSrc, ok := e.singletonRunners.Load(jIn.id) - if !ok { - runner.in = make(chan jobIn, 1000) - if j.singletonLimitMode == LimitModeReschedule { - runner.rescheduleLimiter = make(chan struct{}, 1) - } - e.singletonRunners.Store(jIn.id, runner) - singletonJobsWg.Add(1) - go e.singletonModeRunner("singleton-"+jIn.id.String(), runner.in, singletonJobsWg, j.singletonLimitMode, runner.rescheduleLimiter) - } else { - runner = runnerSrc.(*singletonRunner) - } - - if j.singletonLimitMode == LimitModeReschedule { - // reschedule mode uses the limiter channel to check - // for a running job and reschedules if the channel is full. - select { - case runner.rescheduleLimiter <- struct{}{}: - runner.in <- jIn - e.sendOutForRescheduling(&jIn) - default: - // runner is busy, reschedule the work for later - // which means we just skip it here and do nothing - // TODO when metrics are added, this should increment a rescheduled metric - e.sendOutForRescheduling(&jIn) - } - } else { - // wait mode, fill up that queue (buffered channel, so it's ok) - runner.in <- jIn - e.sendOutForRescheduling(&jIn) - } - } else { - select { - case <-e.stopCh: - e.stop(standardJobsWg, singletonJobsWg, limitModeJobsWg) - return - default: - } - // we've gotten to the basic / standard jobs -- - // the ones without anything special that just want - // to be run. Add to the WaitGroup so that - // stopping or shutting down can wait for the jobs to - // complete. - standardJobsWg.Add(1) - go func(j internalJob) { - e.runJob(j, jIn) - standardJobsWg.Done() - }(*j) - } - } - }() - case <-e.stopCh: - e.stop(standardJobsWg, singletonJobsWg, limitModeJobsWg) - return - } - } -} - -func (e *executor) sendOutForRescheduling(jIn *jobIn) { - if jIn.shouldSendOut { - select { - case e.jobsOutForRescheduling <- jIn.id: - case <-e.ctx.Done(): - return - } - } - // we need to set this to false now, because to handle - // non-limit jobs, we send out from the e.runJob function - // and in this case we don't want to send out twice. - jIn.shouldSendOut = false -} - -func (e *executor) limitModeRunner(name string, in chan jobIn, wg *waitGroupWithMutex, limitMode LimitMode, rescheduleLimiter chan struct{}) { - e.logger.Debug("gocron: limitModeRunner starting", "name", name) - for { - select { - case jIn := <-in: - select { - case <-e.ctx.Done(): - e.logger.Debug("gocron: limitModeRunner shutting down", "name", name) - wg.Done() - return - default: - } - - ctx, cancel := context.WithCancel(e.ctx) - j := requestJobCtx(ctx, jIn.id, e.jobOutRequest) - cancel() - if j != nil { - if j.singletonMode { - e.limitMode.singletonJobsMu.Lock() - _, ok := e.limitMode.singletonJobs[jIn.id] - if ok { - // this job is already running, so don't run it - // but instead reschedule it - e.limitMode.singletonJobsMu.Unlock() - if jIn.shouldSendOut { - select { - case <-e.ctx.Done(): - return - case <-j.ctx.Done(): - return - case e.jobsOutForRescheduling <- j.id: - } - } - // remove the limiter block, as this particular job - // was a singleton already running, and we want to - // allow another job to be scheduled - if limitMode == LimitModeReschedule { - <-rescheduleLimiter - } - continue - } - e.limitMode.singletonJobs[jIn.id] = struct{}{} - e.limitMode.singletonJobsMu.Unlock() - } - e.runJob(*j, jIn) - - if j.singletonMode { - e.limitMode.singletonJobsMu.Lock() - delete(e.limitMode.singletonJobs, jIn.id) - e.limitMode.singletonJobsMu.Unlock() - } - } - - // remove the limiter block to allow another job to be scheduled - if limitMode == LimitModeReschedule { - <-rescheduleLimiter - } - case <-e.ctx.Done(): - e.logger.Debug("limitModeRunner shutting down", "name", name) - wg.Done() - return - } - } -} - -func (e *executor) singletonModeRunner(name string, in chan jobIn, wg *waitGroupWithMutex, limitMode LimitMode, rescheduleLimiter chan struct{}) { - e.logger.Debug("gocron: singletonModeRunner starting", "name", name) - for { - select { - case jIn := <-in: - select { - case <-e.ctx.Done(): - e.logger.Debug("gocron: singletonModeRunner shutting down", "name", name) - wg.Done() - return - default: - } - - ctx, cancel := context.WithCancel(e.ctx) - j := requestJobCtx(ctx, jIn.id, e.jobOutRequest) - cancel() - if j != nil { - // need to set shouldSendOut = false here, as there is a duplicative call to sendOutForRescheduling - // inside the runJob function that needs to be skipped. sendOutForRescheduling is previously called - // when the job is sent to the singleton mode runner. - jIn.shouldSendOut = false - e.runJob(*j, jIn) - } - - // remove the limiter block to allow another job to be scheduled - if limitMode == LimitModeReschedule { - <-rescheduleLimiter - } - case <-e.ctx.Done(): - e.logger.Debug("singletonModeRunner shutting down", "name", name) - wg.Done() - return - } - } -} - -func (e *executor) runJob(j internalJob, jIn jobIn) { - if j.ctx == nil { - return - } - select { - case <-e.ctx.Done(): - return - case <-j.ctx.Done(): - return - default: - } - - if e.elector != nil { - if err := e.elector.IsLeader(j.ctx); err != nil { - e.sendOutForRescheduling(&jIn) - e.incrementJobCounter(j, Skip) - return - } - } else if j.locker != nil { - lock, err := j.locker.Lock(j.ctx, j.name) - if err != nil { - _ = callJobFuncWithParams(j.afterLockError, j.id, j.name, err) - e.sendOutForRescheduling(&jIn) - e.incrementJobCounter(j, Skip) - return - } - defer func() { _ = lock.Unlock(j.ctx) }() - } else if e.locker != nil { - lock, err := e.locker.Lock(j.ctx, j.name) - if err != nil { - _ = callJobFuncWithParams(j.afterLockError, j.id, j.name, err) - e.sendOutForRescheduling(&jIn) - e.incrementJobCounter(j, Skip) - return - } - defer func() { _ = lock.Unlock(j.ctx) }() - } - _ = callJobFuncWithParams(j.beforeJobRuns, j.id, j.name) - - e.sendOutForRescheduling(&jIn) - select { - case e.jobsOutCompleted <- j.id: - case <-e.ctx.Done(): - } - - startTime := time.Now() - err := e.callJobWithRecover(j) - if e.monitor != nil { - e.monitor.RecordJobTiming(startTime, time.Now(), j.id, j.name, j.tags) - } - if err != nil { - _ = callJobFuncWithParams(j.afterJobRunsWithError, j.id, j.name, err) - e.incrementJobCounter(j, Fail) - } else { - _ = callJobFuncWithParams(j.afterJobRuns, j.id, j.name) - e.incrementJobCounter(j, Success) - } -} - -func (e *executor) callJobWithRecover(j internalJob) (err error) { - defer func() { - if recoverData := recover(); recoverData != nil { - _ = callJobFuncWithParams(j.afterJobRunsWithPanic, j.id, j.name, recoverData) - - // if panic is occurred, we should return an error - err = fmt.Errorf("%w from %v", ErrPanicRecovered, recoverData) - } - }() - - return callJobFuncWithParams(j.function, j.parameters...) -} - -func (e *executor) incrementJobCounter(j internalJob, status JobStatus) { - if e.monitor != nil { - e.monitor.IncrementJob(j.id, j.name, j.tags, status) - } -} - -func (e *executor) stop(standardJobsWg, singletonJobsWg, limitModeJobsWg *waitGroupWithMutex) { - e.logger.Debug("gocron: stopping executor") - // we've been asked to stop. This is either because the scheduler has been told - // to stop all jobs or the scheduler has been asked to completely shutdown. - // - // cancel tells all the functions to stop their work and send in a done response - e.cancel() - - // the wait for job channels are used to report back whether we successfully waited - // for all jobs to complete or if we hit the configured timeout. - waitForJobs := make(chan struct{}, 1) - waitForSingletons := make(chan struct{}, 1) - waitForLimitMode := make(chan struct{}, 1) - - // the waiter context is used to cancel the functions waiting on jobs. - // this is done to avoid goroutine leaks. - waiterCtx, waiterCancel := context.WithCancel(context.Background()) - - // wait for standard jobs to complete - go func() { - e.logger.Debug("gocron: waiting for standard jobs to complete") - go func() { - // this is done in a separate goroutine, so we aren't - // blocked by the WaitGroup's Wait call in the event - // that the waiter context is cancelled. - // This particular goroutine could leak in the event that - // some long-running standard job doesn't complete. - standardJobsWg.Wait() - e.logger.Debug("gocron: standard jobs completed") - waitForJobs <- struct{}{} - }() - <-waiterCtx.Done() - }() - - // wait for per job singleton limit mode runner jobs to complete - go func() { - e.logger.Debug("gocron: waiting for singleton jobs to complete") - go func() { - singletonJobsWg.Wait() - e.logger.Debug("gocron: singleton jobs completed") - waitForSingletons <- struct{}{} - }() - <-waiterCtx.Done() - }() - - // wait for limit mode runners to complete - go func() { - e.logger.Debug("gocron: waiting for limit mode jobs to complete") - go func() { - limitModeJobsWg.Wait() - e.logger.Debug("gocron: limitMode jobs completed") - waitForLimitMode <- struct{}{} - }() - <-waiterCtx.Done() - }() - - // now either wait for all the jobs to complete, - // or hit the timeout. - var count int - timeout := time.Now().Add(e.stopTimeout) - for time.Now().Before(timeout) && count < 3 { - select { - case <-waitForJobs: - count++ - case <-waitForSingletons: - count++ - case <-waitForLimitMode: - count++ - default: - } - } - if count < 3 { - e.done <- ErrStopJobsTimedOut - e.logger.Debug("gocron: executor stopped - timed out") - } else { - e.done <- nil - e.logger.Debug("gocron: executor stopped") - } - waiterCancel() - - if e.limitMode != nil { - e.limitMode.started = false - } -} diff --git a/vendor/github.com/go-co-op/gocron/v2/job.go b/vendor/github.com/go-co-op/gocron/v2/job.go deleted file mode 100644 index 5b0302c42..000000000 --- a/vendor/github.com/go-co-op/gocron/v2/job.go +++ /dev/null @@ -1,1042 +0,0 @@ -//go:generate mockgen -destination=mocks/job.go -package=gocronmocks . Job -package gocron - -import ( - "context" - "errors" - "fmt" - "math/rand" - "strings" - "time" - - "github.com/google/uuid" - "github.com/jonboulle/clockwork" - "github.com/robfig/cron/v3" - "golang.org/x/exp/slices" -) - -// internalJob stores the information needed by the scheduler -// to manage scheduling, starting and stopping the job -type internalJob struct { - ctx context.Context - cancel context.CancelFunc - id uuid.UUID - name string - tags []string - jobSchedule - - // as some jobs may queue up, it's possible to - // have multiple nextScheduled times - nextScheduled []time.Time - - lastRun time.Time - function any - parameters []any - timer clockwork.Timer - singletonMode bool - singletonLimitMode LimitMode - limitRunsTo *limitRunsTo - startTime time.Time - startImmediately bool - // event listeners - afterJobRuns func(jobID uuid.UUID, jobName string) - beforeJobRuns func(jobID uuid.UUID, jobName string) - afterJobRunsWithError func(jobID uuid.UUID, jobName string, err error) - afterJobRunsWithPanic func(jobID uuid.UUID, jobName string, recoverData any) - afterLockError func(jobID uuid.UUID, jobName string, err error) - - locker Locker -} - -// stop is used to stop the job's timer and cancel the context -// stopping the timer is critical for cleaning up jobs that are -// sleeping in a time.AfterFunc timer when the job is being stopped. -// cancelling the context keeps the executor from continuing to try -// and run the job. -func (j *internalJob) stop() { - if j.timer != nil { - j.timer.Stop() - } - j.cancel() -} - -// task stores the function and parameters -// that are actually run when the job is executed. -type task struct { - function any - parameters []any -} - -// Task defines a function that returns the task -// function and parameters. -type Task func() task - -// NewTask provides the job's task function and parameters. -func NewTask(function any, parameters ...any) Task { - return func() task { - return task{ - function: function, - parameters: parameters, - } - } -} - -// limitRunsTo is used for managing the number of runs -// when the user only wants the job to run a certain -// number of times and then be removed from the scheduler. -type limitRunsTo struct { - limit uint - runCount uint -} - -// ----------------------------------------------- -// ----------------------------------------------- -// --------------- Job Variants ------------------ -// ----------------------------------------------- -// ----------------------------------------------- - -// JobDefinition defines the interface that must be -// implemented to create a job from the definition. -type JobDefinition interface { - setup(j *internalJob, l *time.Location, now time.Time) error -} - -var _ JobDefinition = (*cronJobDefinition)(nil) - -type cronJobDefinition struct { - crontab string - withSeconds bool -} - -func (c cronJobDefinition) setup(j *internalJob, location *time.Location, _ time.Time) error { - var withLocation string - if strings.HasPrefix(c.crontab, "TZ=") || strings.HasPrefix(c.crontab, "CRON_TZ=") { - withLocation = c.crontab - } else { - // since the user didn't provide a timezone default to the location - // passed in by the scheduler. Default: time.Local - withLocation = fmt.Sprintf("CRON_TZ=%s %s", location.String(), c.crontab) - } - - var ( - cronSchedule cron.Schedule - err error - ) - - if c.withSeconds { - p := cron.NewParser(cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor) - cronSchedule, err = p.Parse(withLocation) - } else { - cronSchedule, err = cron.ParseStandard(withLocation) - } - if err != nil { - return errors.Join(ErrCronJobParse, err) - } - - j.jobSchedule = &cronJob{cronSchedule: cronSchedule} - return nil -} - -// CronJob defines a new job using the crontab syntax: `* * * * *`. -// An optional 6th field can be used at the beginning if withSeconds -// is set to true: `* * * * * *`. -// The timezone can be set on the Scheduler using WithLocation, or in the -// crontab in the form `TZ=America/Chicago * * * * *` or -// `CRON_TZ=America/Chicago * * * * *` -func CronJob(crontab string, withSeconds bool) JobDefinition { - return cronJobDefinition{ - crontab: crontab, - withSeconds: withSeconds, - } -} - -var _ JobDefinition = (*durationJobDefinition)(nil) - -type durationJobDefinition struct { - duration time.Duration -} - -func (d durationJobDefinition) setup(j *internalJob, _ *time.Location, _ time.Time) error { - if d.duration == 0 { - return ErrDurationJobIntervalZero - } - j.jobSchedule = &durationJob{duration: d.duration} - return nil -} - -// DurationJob defines a new job using time.Duration -// for the interval. -func DurationJob(duration time.Duration) JobDefinition { - return durationJobDefinition{ - duration: duration, - } -} - -var _ JobDefinition = (*durationRandomJobDefinition)(nil) - -type durationRandomJobDefinition struct { - min, max time.Duration -} - -func (d durationRandomJobDefinition) setup(j *internalJob, _ *time.Location, _ time.Time) error { - if d.min >= d.max { - return ErrDurationRandomJobMinMax - } - - j.jobSchedule = &durationRandomJob{ - min: d.min, - max: d.max, - rand: rand.New(rand.NewSource(time.Now().UnixNano())), // nolint:gosec - } - return nil -} - -// DurationRandomJob defines a new job that runs on a random interval -// between the min and max duration values provided. -// -// To achieve a similar behavior as tools that use a splay/jitter technique -// consider the median value as the baseline and the difference between the -// max-median or median-min as the splay/jitter. -// -// For example, if you want a job to run every 5 minutes, but want to add -// up to 1 min of jitter to the interval, you could use -// DurationRandomJob(4*time.Minute, 6*time.Minute) -func DurationRandomJob(minDuration, maxDuration time.Duration) JobDefinition { - return durationRandomJobDefinition{ - min: minDuration, - max: maxDuration, - } -} - -// DailyJob runs the job on the interval of days, and at the set times. -// By default, the job will start the next available day, considering the last run to be now, -// and the time and day based on the interval and times you input. This means, if you -// select an interval greater than 1, your job by default will run X (interval) days from now -// if there are no atTimes left in the current day. You can use WithStartAt to tell the -// scheduler to start the job sooner. -func DailyJob(interval uint, atTimes AtTimes) JobDefinition { - return dailyJobDefinition{ - interval: interval, - atTimes: atTimes, - } -} - -var _ JobDefinition = (*dailyJobDefinition)(nil) - -type dailyJobDefinition struct { - interval uint - atTimes AtTimes -} - -func (d dailyJobDefinition) setup(j *internalJob, location *time.Location, _ time.Time) error { - atTimesDate, err := convertAtTimesToDateTime(d.atTimes, location) - switch { - case errors.Is(err, errAtTimesNil): - return ErrDailyJobAtTimesNil - case errors.Is(err, errAtTimeNil): - return ErrDailyJobAtTimeNil - case errors.Is(err, errAtTimeHours): - return ErrDailyJobHours - case errors.Is(err, errAtTimeMinSec): - return ErrDailyJobMinutesSeconds - } - - ds := dailyJob{ - interval: d.interval, - atTimes: atTimesDate, - } - j.jobSchedule = ds - return nil -} - -var _ JobDefinition = (*weeklyJobDefinition)(nil) - -type weeklyJobDefinition struct { - interval uint - daysOfTheWeek Weekdays - atTimes AtTimes -} - -func (w weeklyJobDefinition) setup(j *internalJob, location *time.Location, _ time.Time) error { - var ws weeklyJob - ws.interval = w.interval - - if w.daysOfTheWeek == nil { - return ErrWeeklyJobDaysOfTheWeekNil - } - - daysOfTheWeek := w.daysOfTheWeek() - - slices.Sort(daysOfTheWeek) - ws.daysOfWeek = daysOfTheWeek - - atTimesDate, err := convertAtTimesToDateTime(w.atTimes, location) - switch { - case errors.Is(err, errAtTimesNil): - return ErrWeeklyJobAtTimesNil - case errors.Is(err, errAtTimeNil): - return ErrWeeklyJobAtTimeNil - case errors.Is(err, errAtTimeHours): - return ErrWeeklyJobHours - case errors.Is(err, errAtTimeMinSec): - return ErrWeeklyJobMinutesSeconds - } - ws.atTimes = atTimesDate - - j.jobSchedule = ws - return nil -} - -// Weekdays defines a function that returns a list of week days. -type Weekdays func() []time.Weekday - -// NewWeekdays provide the days of the week the job should run. -func NewWeekdays(weekday time.Weekday, weekdays ...time.Weekday) Weekdays { - return func() []time.Weekday { - weekdays = append(weekdays, weekday) - return weekdays - } -} - -// WeeklyJob runs the job on the interval of weeks, on the specific days of the week -// specified, and at the set times. -// -// By default, the job will start the next available day, considering the last run to be now, -// and the time and day based on the interval, days and times you input. This means, if you -// select an interval greater than 1, your job by default will run X (interval) weeks from now -// if there are no daysOfTheWeek left in the current week. You can use WithStartAt to tell the -// scheduler to start the job sooner. -func WeeklyJob(interval uint, daysOfTheWeek Weekdays, atTimes AtTimes) JobDefinition { - return weeklyJobDefinition{ - interval: interval, - daysOfTheWeek: daysOfTheWeek, - atTimes: atTimes, - } -} - -var _ JobDefinition = (*monthlyJobDefinition)(nil) - -type monthlyJobDefinition struct { - interval uint - daysOfTheMonth DaysOfTheMonth - atTimes AtTimes -} - -func (m monthlyJobDefinition) setup(j *internalJob, location *time.Location, _ time.Time) error { - var ms monthlyJob - ms.interval = m.interval - - if m.daysOfTheMonth == nil { - return ErrMonthlyJobDaysNil - } - - var daysStart, daysEnd []int - for _, day := range m.daysOfTheMonth() { - if day > 31 || day == 0 || day < -31 { - return ErrMonthlyJobDays - } - if day > 0 { - daysStart = append(daysStart, day) - } else { - daysEnd = append(daysEnd, day) - } - } - daysStart = removeSliceDuplicatesInt(daysStart) - slices.Sort(daysStart) - ms.days = daysStart - - daysEnd = removeSliceDuplicatesInt(daysEnd) - slices.Sort(daysEnd) - ms.daysFromEnd = daysEnd - - atTimesDate, err := convertAtTimesToDateTime(m.atTimes, location) - switch { - case errors.Is(err, errAtTimesNil): - return ErrMonthlyJobAtTimesNil - case errors.Is(err, errAtTimeNil): - return ErrMonthlyJobAtTimeNil - case errors.Is(err, errAtTimeHours): - return ErrMonthlyJobHours - case errors.Is(err, errAtTimeMinSec): - return ErrMonthlyJobMinutesSeconds - } - ms.atTimes = atTimesDate - - j.jobSchedule = ms - return nil -} - -type days []int - -// DaysOfTheMonth defines a function that returns a list of days. -type DaysOfTheMonth func() days - -// NewDaysOfTheMonth provide the days of the month the job should -// run. The days can be positive 1 to 31 and/or negative -31 to -1. -// Negative values count backwards from the end of the month. -// For example: -1 == the last day of the month. -// -// -5 == 5 days before the end of the month. -func NewDaysOfTheMonth(day int, moreDays ...int) DaysOfTheMonth { - return func() days { - moreDays = append(moreDays, day) - return moreDays - } -} - -type atTime struct { - hours, minutes, seconds uint -} - -func (a atTime) time(location *time.Location) time.Time { - return time.Date(0, 0, 0, int(a.hours), int(a.minutes), int(a.seconds), 0, location) -} - -// AtTime defines a function that returns the internal atTime -type AtTime func() atTime - -// NewAtTime provide the hours, minutes and seconds at which -// the job should be run -func NewAtTime(hours, minutes, seconds uint) AtTime { - return func() atTime { - return atTime{hours: hours, minutes: minutes, seconds: seconds} - } -} - -// AtTimes define a list of AtTime -type AtTimes func() []AtTime - -// NewAtTimes provide the hours, minutes and seconds at which -// the job should be run -func NewAtTimes(atTime AtTime, atTimes ...AtTime) AtTimes { - return func() []AtTime { - atTimes = append(atTimes, atTime) - return atTimes - } -} - -// MonthlyJob runs the job on the interval of months, on the specific days of the month -// specified, and at the set times. Days of the month can be 1 to 31 or negative (-1 to -31), which -// count backwards from the end of the month. E.g. -1 is the last day of the month. -// -// If a day of the month is selected that does not exist in all months (e.g. 31st) -// any month that does not have that day will be skipped. -// -// By default, the job will start the next available day, considering the last run to be now, -// and the time and month based on the interval, days and times you input. -// This means, if you select an interval greater than 1, your job by default will run -// X (interval) months from now if there are no daysOfTheMonth left in the current month. -// You can use WithStartAt to tell the scheduler to start the job sooner. -// -// Carefully consider your configuration! -// - For example: an interval of 2 months on the 31st of each month, starting 12/31 -// would skip Feb, April, June, and next run would be in August. -func MonthlyJob(interval uint, daysOfTheMonth DaysOfTheMonth, atTimes AtTimes) JobDefinition { - return monthlyJobDefinition{ - interval: interval, - daysOfTheMonth: daysOfTheMonth, - atTimes: atTimes, - } -} - -var _ JobDefinition = (*oneTimeJobDefinition)(nil) - -type oneTimeJobDefinition struct { - startAt OneTimeJobStartAtOption -} - -func (o oneTimeJobDefinition) setup(j *internalJob, _ *time.Location, now time.Time) error { - sortedTimes := o.startAt(j) - slices.SortStableFunc(sortedTimes, ascendingTime) - // keep only schedules that are in the future - idx, found := slices.BinarySearchFunc(sortedTimes, now, ascendingTime) - if found { - idx++ - } - sortedTimes = sortedTimes[idx:] - if !j.startImmediately && len(sortedTimes) == 0 { - return ErrOneTimeJobStartDateTimePast - } - j.jobSchedule = oneTimeJob{sortedTimes: sortedTimes} - return nil -} - -// OneTimeJobStartAtOption defines when the one time job is run -type OneTimeJobStartAtOption func(*internalJob) []time.Time - -// OneTimeJobStartImmediately tells the scheduler to run the one time job immediately. -func OneTimeJobStartImmediately() OneTimeJobStartAtOption { - return func(j *internalJob) []time.Time { - j.startImmediately = true - return []time.Time{} - } -} - -// OneTimeJobStartDateTime sets the date & time at which the job should run. -// This datetime must be in the future (according to the scheduler clock). -func OneTimeJobStartDateTime(start time.Time) OneTimeJobStartAtOption { - return func(j *internalJob) []time.Time { - return []time.Time{start} - } -} - -// OneTimeJobStartDateTimes sets the date & times at which the job should run. -// At least one of the date/times must be in the future (according to the scheduler clock). -func OneTimeJobStartDateTimes(times ...time.Time) OneTimeJobStartAtOption { - return func(j *internalJob) []time.Time { - return times - } -} - -// OneTimeJob is to run a job once at a specified time and not on -// any regular schedule. -func OneTimeJob(startAt OneTimeJobStartAtOption) JobDefinition { - return oneTimeJobDefinition{ - startAt: startAt, - } -} - -// ----------------------------------------------- -// ----------------------------------------------- -// ----------------- Job Options ----------------- -// ----------------------------------------------- -// ----------------------------------------------- - -// JobOption defines the constructor for job options. -type JobOption func(*internalJob) error - -// WithDistributedJobLocker sets the locker to be used by multiple -// Scheduler instances to ensure that only one instance of each -// job is run. -func WithDistributedJobLocker(locker Locker) JobOption { - return func(j *internalJob) error { - if locker == nil { - return ErrWithDistributedJobLockerNil - } - j.locker = locker - return nil - } -} - -// WithEventListeners sets the event listeners that should be -// run for the job. -func WithEventListeners(eventListeners ...EventListener) JobOption { - return func(j *internalJob) error { - for _, eventListener := range eventListeners { - if err := eventListener(j); err != nil { - return err - } - } - return nil - } -} - -// WithLimitedRuns limits the number of executions of this job to n. -// Upon reaching the limit, the job is removed from the scheduler. -func WithLimitedRuns(limit uint) JobOption { - return func(j *internalJob) error { - j.limitRunsTo = &limitRunsTo{ - limit: limit, - runCount: 0, - } - return nil - } -} - -// WithName sets the name of the job. Name provides -// a human-readable identifier for the job. -func WithName(name string) JobOption { - // TODO use the name for metrics and future logging option - return func(j *internalJob) error { - if name == "" { - return ErrWithNameEmpty - } - j.name = name - return nil - } -} - -// WithSingletonMode keeps the job from running again if it is already running. -// This is useful for jobs that should not overlap, and that occasionally -// (but not consistently) run longer than the interval between job runs. -func WithSingletonMode(mode LimitMode) JobOption { - return func(j *internalJob) error { - j.singletonMode = true - j.singletonLimitMode = mode - return nil - } -} - -// WithStartAt sets the option for starting the job at -// a specific datetime. -func WithStartAt(option StartAtOption) JobOption { - return func(j *internalJob) error { - return option(j) - } -} - -// StartAtOption defines options for starting the job -type StartAtOption func(*internalJob) error - -// WithStartImmediately tells the scheduler to run the job immediately -// regardless of the type or schedule of job. After this immediate run -// the job is scheduled from this time based on the job definition. -func WithStartImmediately() StartAtOption { - return func(j *internalJob) error { - j.startImmediately = true - return nil - } -} - -// WithStartDateTime sets the first date & time at which the job should run. -// This datetime must be in the future. -func WithStartDateTime(start time.Time) StartAtOption { - return func(j *internalJob) error { - if start.IsZero() || start.Before(time.Now()) { - return ErrWithStartDateTimePast - } - j.startTime = start - return nil - } -} - -// WithTags sets the tags for the job. Tags provide -// a way to identify jobs by a set of tags and remove -// multiple jobs by tag. -func WithTags(tags ...string) JobOption { - return func(j *internalJob) error { - j.tags = tags - return nil - } -} - -// ----------------------------------------------- -// ----------------------------------------------- -// ------------- Job Event Listeners ------------- -// ----------------------------------------------- -// ----------------------------------------------- - -// EventListener defines the constructor for event -// listeners that can be used to listen for job events. -type EventListener func(*internalJob) error - -// BeforeJobRuns is used to listen for when a job is about to run and -// then run the provided function. -func BeforeJobRuns(eventListenerFunc func(jobID uuid.UUID, jobName string)) EventListener { - return func(j *internalJob) error { - if eventListenerFunc == nil { - return ErrEventListenerFuncNil - } - j.beforeJobRuns = eventListenerFunc - return nil - } -} - -// AfterJobRuns is used to listen for when a job has run -// without an error, and then run the provided function. -func AfterJobRuns(eventListenerFunc func(jobID uuid.UUID, jobName string)) EventListener { - return func(j *internalJob) error { - if eventListenerFunc == nil { - return ErrEventListenerFuncNil - } - j.afterJobRuns = eventListenerFunc - return nil - } -} - -// AfterJobRunsWithError is used to listen for when a job has run and -// returned an error, and then run the provided function. -func AfterJobRunsWithError(eventListenerFunc func(jobID uuid.UUID, jobName string, err error)) EventListener { - return func(j *internalJob) error { - if eventListenerFunc == nil { - return ErrEventListenerFuncNil - } - j.afterJobRunsWithError = eventListenerFunc - return nil - } -} - -// AfterJobRunsWithPanic is used to listen for when a job has run and -// returned panicked recover data, and then run the provided function. -func AfterJobRunsWithPanic(eventListenerFunc func(jobID uuid.UUID, jobName string, recoverData any)) EventListener { - return func(j *internalJob) error { - if eventListenerFunc == nil { - return ErrEventListenerFuncNil - } - j.afterJobRunsWithPanic = eventListenerFunc - return nil - } -} - -// AfterLockError is used to when the distributed locker returns an error and -// then run the provided function. -func AfterLockError(eventListenerFunc func(jobID uuid.UUID, jobName string, err error)) EventListener { - return func(j *internalJob) error { - if eventListenerFunc == nil { - return ErrEventListenerFuncNil - } - j.afterLockError = eventListenerFunc - return nil - } -} - -// ----------------------------------------------- -// ----------------------------------------------- -// ---------------- Job Schedules ---------------- -// ----------------------------------------------- -// ----------------------------------------------- - -type jobSchedule interface { - next(lastRun time.Time) time.Time -} - -var _ jobSchedule = (*cronJob)(nil) - -type cronJob struct { - cronSchedule cron.Schedule -} - -func (j *cronJob) next(lastRun time.Time) time.Time { - return j.cronSchedule.Next(lastRun) -} - -var _ jobSchedule = (*durationJob)(nil) - -type durationJob struct { - duration time.Duration -} - -func (j *durationJob) next(lastRun time.Time) time.Time { - return lastRun.Add(j.duration) -} - -var _ jobSchedule = (*durationRandomJob)(nil) - -type durationRandomJob struct { - min, max time.Duration - rand *rand.Rand -} - -func (j *durationRandomJob) next(lastRun time.Time) time.Time { - r := j.rand.Int63n(int64(j.max - j.min)) - return lastRun.Add(j.min + time.Duration(r)) -} - -var _ jobSchedule = (*dailyJob)(nil) - -type dailyJob struct { - interval uint - atTimes []time.Time -} - -func (d dailyJob) next(lastRun time.Time) time.Time { - firstPass := true - next := d.nextDay(lastRun, firstPass) - if !next.IsZero() { - return next - } - firstPass = false - - startNextDay := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day()+int(d.interval), 0, 0, 0, lastRun.Nanosecond(), lastRun.Location()) - return d.nextDay(startNextDay, firstPass) -} - -func (d dailyJob) nextDay(lastRun time.Time, firstPass bool) time.Time { - for _, at := range d.atTimes { - // sub the at time hour/min/sec onto the lastScheduledRun's values - // to use in checks to see if we've got our next run time - atDate := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day(), at.Hour(), at.Minute(), at.Second(), lastRun.Nanosecond(), lastRun.Location()) - - if firstPass && atDate.After(lastRun) { - // checking to see if it is after i.e. greater than, - // and not greater or equal as our lastScheduledRun day/time - // will be in the loop, and we don't want to select it again - return atDate - } else if !firstPass && !atDate.Before(lastRun) { - // now that we're looking at the next day, it's ok to consider - // the same at time that was last run (as lastScheduledRun has been incremented) - return atDate - } - } - return time.Time{} -} - -var _ jobSchedule = (*weeklyJob)(nil) - -type weeklyJob struct { - interval uint - daysOfWeek []time.Weekday - atTimes []time.Time -} - -func (w weeklyJob) next(lastRun time.Time) time.Time { - firstPass := true - next := w.nextWeekDayAtTime(lastRun, firstPass) - if !next.IsZero() { - return next - } - firstPass = false - - startOfTheNextIntervalWeek := (lastRun.Day() - int(lastRun.Weekday())) + int(w.interval*7) - from := time.Date(lastRun.Year(), lastRun.Month(), startOfTheNextIntervalWeek, 0, 0, 0, 0, lastRun.Location()) - return w.nextWeekDayAtTime(from, firstPass) -} - -func (w weeklyJob) nextWeekDayAtTime(lastRun time.Time, firstPass bool) time.Time { - for _, wd := range w.daysOfWeek { - // checking if we're on the same day or later in the same week - if wd >= lastRun.Weekday() { - // weekDayDiff is used to add the correct amount to the atDate day below - weekDayDiff := wd - lastRun.Weekday() - for _, at := range w.atTimes { - // sub the at time hour/min/sec onto the lastScheduledRun's values - // to use in checks to see if we've got our next run time - atDate := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day()+int(weekDayDiff), at.Hour(), at.Minute(), at.Second(), lastRun.Nanosecond(), lastRun.Location()) - - if firstPass && atDate.After(lastRun) { - // checking to see if it is after i.e. greater than, - // and not greater or equal as our lastScheduledRun day/time - // will be in the loop, and we don't want to select it again - return atDate - } else if !firstPass && !atDate.Before(lastRun) { - // now that we're looking at the next week, it's ok to consider - // the same at time that was last run (as lastScheduledRun has been incremented) - return atDate - } - } - } - } - return time.Time{} -} - -var _ jobSchedule = (*monthlyJob)(nil) - -type monthlyJob struct { - interval uint - days []int - daysFromEnd []int - atTimes []time.Time -} - -func (m monthlyJob) next(lastRun time.Time) time.Time { - daysList := make([]int, len(m.days)) - copy(daysList, m.days) - - daysFromEnd := m.handleNegativeDays(lastRun, daysList, m.daysFromEnd) - next := m.nextMonthDayAtTime(lastRun, daysFromEnd, true) - if !next.IsZero() { - return next - } - - from := time.Date(lastRun.Year(), lastRun.Month()+time.Month(m.interval), 1, 0, 0, 0, 0, lastRun.Location()) - for next.IsZero() { - daysFromEnd = m.handleNegativeDays(from, daysList, m.daysFromEnd) - next = m.nextMonthDayAtTime(from, daysFromEnd, false) - from = from.AddDate(0, int(m.interval), 0) - } - - return next -} - -func (m monthlyJob) handleNegativeDays(from time.Time, days, negativeDays []int) []int { - var out []int - // getting a list of the days from the end of the following month - // -1 == the last day of the month - firstDayNextMonth := time.Date(from.Year(), from.Month()+1, 1, 0, 0, 0, 0, from.Location()) - for _, daySub := range negativeDays { - day := firstDayNextMonth.AddDate(0, 0, daySub).Day() - out = append(out, day) - } - out = append(out, days...) - slices.Sort(out) - return out -} - -func (m monthlyJob) nextMonthDayAtTime(lastRun time.Time, days []int, firstPass bool) time.Time { - // find the next day in the month that should run and then check for an at time - for _, day := range days { - if day >= lastRun.Day() { - for _, at := range m.atTimes { - // sub the day, and the at time hour/min/sec onto the lastScheduledRun's values - // to use in checks to see if we've got our next run time - atDate := time.Date(lastRun.Year(), lastRun.Month(), day, at.Hour(), at.Minute(), at.Second(), lastRun.Nanosecond(), lastRun.Location()) - - if atDate.Month() != lastRun.Month() { - // this check handles if we're setting a day not in the current month - // e.g. setting day 31 in Feb results in March 2nd - continue - } - - if firstPass && atDate.After(lastRun) { - // checking to see if it is after i.e. greater than, - // and not greater or equal as our lastScheduledRun day/time - // will be in the loop, and we don't want to select it again - return atDate - } else if !firstPass && !atDate.Before(lastRun) { - // now that we're looking at the next month, it's ok to consider - // the same at time that was lastScheduledRun (as lastScheduledRun has been incremented) - return atDate - } - } - continue - } - } - return time.Time{} -} - -var _ jobSchedule = (*oneTimeJob)(nil) - -type oneTimeJob struct { - sortedTimes []time.Time -} - -// next finds the next item in a sorted list of times using binary-search. -// -// example: sortedTimes: [2, 4, 6, 8] -// -// lastRun: 1 => [idx=0,found=false] => next is 2 - sorted[idx] idx=0 -// lastRun: 2 => [idx=0,found=true] => next is 4 - sorted[idx+1] idx=1 -// lastRun: 3 => [idx=1,found=false] => next is 4 - sorted[idx] idx=1 -// lastRun: 4 => [idx=1,found=true] => next is 6 - sorted[idx+1] idx=2 -// lastRun: 7 => [idx=3,found=false] => next is 8 - sorted[idx] idx=3 -// lastRun: 8 => [idx=3,found=found] => next is none -// lastRun: 9 => [idx=3,found=found] => next is none -func (o oneTimeJob) next(lastRun time.Time) time.Time { - idx, found := slices.BinarySearchFunc(o.sortedTimes, lastRun, ascendingTime) - // if found, the next run is the following index - if found { - idx++ - } - // exhausted runs - if idx >= len(o.sortedTimes) { - return time.Time{} - } - - return o.sortedTimes[idx] -} - -// ----------------------------------------------- -// ----------------------------------------------- -// ---------------- Job Interface ---------------- -// ----------------------------------------------- -// ----------------------------------------------- - -// Job provides the available methods on the job -// available to the caller. -type Job interface { - // ID returns the job's unique identifier. - ID() uuid.UUID - // LastRun returns the time of the job's last run - LastRun() (time.Time, error) - // Name returns the name defined on the job. - Name() string - // NextRun returns the time of the job's next scheduled run. - NextRun() (time.Time, error) - // NextRuns returns the requested number of calculated next run values. - NextRuns(int) ([]time.Time, error) - // RunNow runs the job once, now. This does not alter - // the existing run schedule, and will respect all job - // and scheduler limits. This means that running a job now may - // cause the job's regular interval to be rescheduled due to - // the instance being run by RunNow blocking your run limit. - RunNow() error - // Tags returns the job's string tags. - Tags() []string -} - -var _ Job = (*job)(nil) - -// job is the internal struct that implements -// the public interface. This is used to avoid -// leaking information the caller never needs -// to have or tinker with. -type job struct { - id uuid.UUID - name string - tags []string - jobOutRequest chan jobOutRequest - runJobRequest chan runJobRequest -} - -func (j job) ID() uuid.UUID { - return j.id -} - -func (j job) LastRun() (time.Time, error) { - ij := requestJob(j.id, j.jobOutRequest) - if ij == nil || ij.id == uuid.Nil { - return time.Time{}, ErrJobNotFound - } - return ij.lastRun, nil -} - -func (j job) Name() string { - return j.name -} - -func (j job) NextRun() (time.Time, error) { - ij := requestJob(j.id, j.jobOutRequest) - if ij == nil || ij.id == uuid.Nil { - return time.Time{}, ErrJobNotFound - } - if len(ij.nextScheduled) == 0 { - return time.Time{}, nil - } - // the first element is the next scheduled run with subsequent - // runs following after in the slice - return ij.nextScheduled[0], nil -} - -func (j job) NextRuns(count int) ([]time.Time, error) { - ij := requestJob(j.id, j.jobOutRequest) - if ij == nil || ij.id == uuid.Nil { - return nil, ErrJobNotFound - } - - lengthNextScheduled := len(ij.nextScheduled) - if lengthNextScheduled == 0 { - return nil, nil - } else if count <= lengthNextScheduled { - return ij.nextScheduled[:count], nil - } - - out := make([]time.Time, count) - for i := 0; i < count; i++ { - if i < lengthNextScheduled { - out[i] = ij.nextScheduled[i] - continue - } - - from := out[i-1] - out[i] = ij.next(from) - } - - return out, nil -} - -func (j job) Tags() []string { - return j.tags -} - -func (j job) RunNow() error { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - resp := make(chan error, 1) - - select { - case j.runJobRequest <- runJobRequest{ - id: j.id, - outChan: resp, - }: - case <-time.After(100 * time.Millisecond): - return ErrJobRunNowFailed - } - var err error - select { - case <-ctx.Done(): - return ErrJobRunNowFailed - case errReceived := <-resp: - err = errReceived - } - return err -} diff --git a/vendor/github.com/go-co-op/gocron/v2/logger.go b/vendor/github.com/go-co-op/gocron/v2/logger.go deleted file mode 100644 index c8ebaf82b..000000000 --- a/vendor/github.com/go-co-op/gocron/v2/logger.go +++ /dev/null @@ -1,101 +0,0 @@ -//go:generate mockgen -destination=mocks/logger.go -package=gocronmocks . Logger -package gocron - -import ( - "fmt" - "log" - "os" - "strings" -) - -// Logger is the interface that wraps the basic logging methods -// used by gocron. The methods are modeled after the standard -// library slog package. The default logger is a no-op logger. -// To enable logging, use one of the provided New*Logger functions -// or implement your own Logger. The actual level of Log that is logged -// is handled by the implementation. -type Logger interface { - Debug(msg string, args ...any) - Error(msg string, args ...any) - Info(msg string, args ...any) - Warn(msg string, args ...any) -} - -var _ Logger = (*noOpLogger)(nil) - -type noOpLogger struct{} - -func (l noOpLogger) Debug(_ string, _ ...any) {} -func (l noOpLogger) Error(_ string, _ ...any) {} -func (l noOpLogger) Info(_ string, _ ...any) {} -func (l noOpLogger) Warn(_ string, _ ...any) {} - -var _ Logger = (*logger)(nil) - -// LogLevel is the level of logging that should be logged -// when using the basic NewLogger. -type LogLevel int - -// The different log levels that can be used. -const ( - LogLevelError LogLevel = iota - LogLevelWarn - LogLevelInfo - LogLevelDebug -) - -type logger struct { - log *log.Logger - level LogLevel -} - -// NewLogger returns a new Logger that logs at the given level. -func NewLogger(level LogLevel) Logger { - l := log.New(os.Stdout, "", log.LstdFlags) - return &logger{ - log: l, - level: level, - } -} - -func (l *logger) Debug(msg string, args ...any) { - if l.level < LogLevelDebug { - return - } - l.log.Printf("DEBUG: %s%s\n", msg, logFormatArgs(args...)) -} - -func (l *logger) Error(msg string, args ...any) { - if l.level < LogLevelError { - return - } - l.log.Printf("ERROR: %s%s\n", msg, logFormatArgs(args...)) -} - -func (l *logger) Info(msg string, args ...any) { - if l.level < LogLevelInfo { - return - } - l.log.Printf("INFO: %s%s\n", msg, logFormatArgs(args...)) -} - -func (l *logger) Warn(msg string, args ...any) { - if l.level < LogLevelWarn { - return - } - l.log.Printf("WARN: %s%s\n", msg, logFormatArgs(args...)) -} - -func logFormatArgs(args ...any) string { - if len(args) == 0 { - return "" - } - if len(args)%2 != 0 { - return ", " + fmt.Sprint(args...) - } - var pairs []string - for i := 0; i < len(args); i += 2 { - pairs = append(pairs, fmt.Sprintf("%s=%v", args[i], args[i+1])) - } - return ", " + strings.Join(pairs, ", ") -} diff --git a/vendor/github.com/go-co-op/gocron/v2/monitor.go b/vendor/github.com/go-co-op/gocron/v2/monitor.go deleted file mode 100644 index ecf28805f..000000000 --- a/vendor/github.com/go-co-op/gocron/v2/monitor.go +++ /dev/null @@ -1,27 +0,0 @@ -package gocron - -import ( - "time" - - "github.com/google/uuid" -) - -// JobStatus is the status of job run that should be collected with the metric. -type JobStatus string - -// The different statuses of job that can be used. -const ( - Fail JobStatus = "fail" - Success JobStatus = "success" - Skip JobStatus = "skip" -) - -// Monitor represents the interface to collect jobs metrics. -type Monitor interface { - // IncrementJob will provide details about the job and expects the underlying implementation - // to handle instantiating and incrementing a value - IncrementJob(id uuid.UUID, name string, tags []string, status JobStatus) - // RecordJobTiming will provide details about the job and the timing and expects the underlying implementation - // to handle instantiating and recording the value - RecordJobTiming(startTime, endTime time.Time, id uuid.UUID, name string, tags []string) -} diff --git a/vendor/github.com/go-co-op/gocron/v2/scheduler.go b/vendor/github.com/go-co-op/gocron/v2/scheduler.go deleted file mode 100644 index 9540003fb..000000000 --- a/vendor/github.com/go-co-op/gocron/v2/scheduler.go +++ /dev/null @@ -1,861 +0,0 @@ -//go:generate mockgen -destination=mocks/scheduler.go -package=gocronmocks . Scheduler -package gocron - -import ( - "context" - "reflect" - "runtime" - "time" - - "github.com/google/uuid" - "github.com/jonboulle/clockwork" - "golang.org/x/exp/slices" -) - -var _ Scheduler = (*scheduler)(nil) - -// Scheduler defines the interface for the Scheduler. -type Scheduler interface { - // Jobs returns all the jobs currently in the scheduler. - Jobs() []Job - // NewJob creates a new job in the Scheduler. The job is scheduled per the provided - // definition when the Scheduler is started. If the Scheduler is already running - // the job will be scheduled when the Scheduler is started. - NewJob(JobDefinition, Task, ...JobOption) (Job, error) - // RemoveByTags removes all jobs that have at least one of the provided tags. - RemoveByTags(...string) - // RemoveJob removes the job with the provided id. - RemoveJob(uuid.UUID) error - // Shutdown should be called when you no longer need - // the Scheduler or Job's as the Scheduler cannot - // be restarted after calling Shutdown. This is similar - // to a Close or Cleanup method and is often deferred after - // starting the scheduler. - Shutdown() error - // Start begins scheduling jobs for execution based - // on each job's definition. Job's added to an already - // running scheduler will be scheduled immediately based - // on definition. Start is non-blocking. - Start() - // StopJobs stops the execution of all jobs in the scheduler. - // This can be useful in situations where jobs need to be - // paused globally and then restarted with Start(). - StopJobs() error - // Update replaces the existing Job's JobDefinition with the provided - // JobDefinition. The Job's Job.ID() remains the same. - Update(uuid.UUID, JobDefinition, Task, ...JobOption) (Job, error) - // JobsWaitingInQueue number of jobs waiting in Queue in case of LimitModeWait - // In case of LimitModeReschedule or no limit it will be always zero - JobsWaitingInQueue() int -} - -// ----------------------------------------------- -// ----------------------------------------------- -// ----------------- Scheduler ------------------- -// ----------------------------------------------- -// ----------------------------------------------- - -type scheduler struct { - shutdownCtx context.Context - shutdownCancel context.CancelFunc - exec executor - jobs map[uuid.UUID]internalJob - location *time.Location - clock clockwork.Clock - started bool - globalJobOptions []JobOption - logger Logger - - startCh chan struct{} - startedCh chan struct{} - stopCh chan struct{} - stopErrCh chan error - allJobsOutRequest chan allJobsOutRequest - jobOutRequestCh chan jobOutRequest - runJobRequestCh chan runJobRequest - newJobCh chan newJobIn - removeJobCh chan uuid.UUID - removeJobsByTagsCh chan []string -} - -type newJobIn struct { - ctx context.Context - cancel context.CancelFunc - job internalJob -} - -type jobOutRequest struct { - id uuid.UUID - outChan chan internalJob -} - -type runJobRequest struct { - id uuid.UUID - outChan chan error -} - -type allJobsOutRequest struct { - outChan chan []Job -} - -// NewScheduler creates a new Scheduler instance. -// The Scheduler is not started until Start() is called. -// -// NewJob will add jobs to the Scheduler, but they will not -// be scheduled until Start() is called. -func NewScheduler(options ...SchedulerOption) (Scheduler, error) { - schCtx, cancel := context.WithCancel(context.Background()) - - exec := executor{ - stopCh: make(chan struct{}), - stopTimeout: time.Second * 10, - singletonRunners: nil, - logger: &noOpLogger{}, - - jobsIn: make(chan jobIn), - jobsOutForRescheduling: make(chan uuid.UUID), - jobsOutCompleted: make(chan uuid.UUID), - jobOutRequest: make(chan jobOutRequest, 1000), - done: make(chan error), - } - - s := &scheduler{ - shutdownCtx: schCtx, - shutdownCancel: cancel, - exec: exec, - jobs: make(map[uuid.UUID]internalJob), - location: time.Local, - clock: clockwork.NewRealClock(), - logger: &noOpLogger{}, - - newJobCh: make(chan newJobIn), - removeJobCh: make(chan uuid.UUID), - removeJobsByTagsCh: make(chan []string), - startCh: make(chan struct{}), - startedCh: make(chan struct{}), - stopCh: make(chan struct{}), - stopErrCh: make(chan error, 1), - jobOutRequestCh: make(chan jobOutRequest), - runJobRequestCh: make(chan runJobRequest), - allJobsOutRequest: make(chan allJobsOutRequest), - } - - for _, option := range options { - err := option(s) - if err != nil { - return nil, err - } - } - - go func() { - s.logger.Info("gocron: new scheduler created") - for { - select { - case id := <-s.exec.jobsOutForRescheduling: - s.selectExecJobsOutForRescheduling(id) - - case id := <-s.exec.jobsOutCompleted: - s.selectExecJobsOutCompleted(id) - - case in := <-s.newJobCh: - s.selectNewJob(in) - - case id := <-s.removeJobCh: - s.selectRemoveJob(id) - - case tags := <-s.removeJobsByTagsCh: - s.selectRemoveJobsByTags(tags) - - case out := <-s.exec.jobOutRequest: - s.selectJobOutRequest(out) - - case out := <-s.jobOutRequestCh: - s.selectJobOutRequest(out) - - case out := <-s.allJobsOutRequest: - s.selectAllJobsOutRequest(out) - - case run := <-s.runJobRequestCh: - s.selectRunJobRequest(run) - - case <-s.startCh: - s.selectStart() - - case <-s.stopCh: - s.stopScheduler() - - case <-s.shutdownCtx.Done(): - s.stopScheduler() - return - } - } - }() - - return s, nil -} - -// ----------------------------------------------- -// ----------------------------------------------- -// --------- Scheduler Channel Methods ----------- -// ----------------------------------------------- -// ----------------------------------------------- - -// The scheduler's channel functions are broken out here -// to allow prioritizing within the select blocks. The idea -// being that we want to make sure that scheduling tasks -// are not blocked by requests from the caller for information -// about jobs. - -func (s *scheduler) stopScheduler() { - s.logger.Debug("gocron: stopping scheduler") - if s.started { - s.exec.stopCh <- struct{}{} - } - - for _, j := range s.jobs { - j.stop() - } - for id, j := range s.jobs { - <-j.ctx.Done() - - j.ctx, j.cancel = context.WithCancel(s.shutdownCtx) - s.jobs[id] = j - } - var err error - if s.started { - select { - case err = <-s.exec.done: - case <-time.After(s.exec.stopTimeout + 1*time.Second): - err = ErrStopExecutorTimedOut - } - } - s.stopErrCh <- err - s.started = false - s.logger.Debug("gocron: scheduler stopped") -} - -func (s *scheduler) selectAllJobsOutRequest(out allJobsOutRequest) { - outJobs := make([]Job, len(s.jobs)) - var counter int - for _, j := range s.jobs { - outJobs[counter] = s.jobFromInternalJob(j) - counter++ - } - slices.SortFunc(outJobs, func(a, b Job) int { - aID, bID := a.ID().String(), b.ID().String() - switch { - case aID < bID: - return -1 - case aID > bID: - return 1 - default: - return 0 - } - }) - select { - case <-s.shutdownCtx.Done(): - case out.outChan <- outJobs: - } -} - -func (s *scheduler) selectRunJobRequest(run runJobRequest) { - j, ok := s.jobs[run.id] - if !ok { - select { - case run.outChan <- ErrJobNotFound: - default: - } - } - select { - case <-s.shutdownCtx.Done(): - select { - case run.outChan <- ErrJobRunNowFailed: - default: - } - case s.exec.jobsIn <- jobIn{ - id: j.id, - shouldSendOut: false, - }: - select { - case run.outChan <- nil: - default: - } - } -} - -func (s *scheduler) selectRemoveJob(id uuid.UUID) { - j, ok := s.jobs[id] - if !ok { - return - } - j.stop() - delete(s.jobs, id) -} - -// Jobs coming back from the executor to the scheduler that -// need to evaluated for rescheduling. -func (s *scheduler) selectExecJobsOutForRescheduling(id uuid.UUID) { - select { - case <-s.shutdownCtx.Done(): - return - default: - } - j, ok := s.jobs[id] - if !ok { - // the job was removed while it was running, and - // so we don't need to reschedule it. - return - } - var scheduleFrom time.Time - if len(j.nextScheduled) > 0 { - // always grab the last element in the slice as that is the furthest - // out in the future and the time from which we want to calculate - // the subsequent next run time. - slices.SortStableFunc(j.nextScheduled, ascendingTime) - scheduleFrom = j.nextScheduled[len(j.nextScheduled)-1] - } - - next := j.next(scheduleFrom) - if next.IsZero() { - // the job's next function will return zero for OneTime jobs. - // since they are one time only, they do not need rescheduling. - return - } - if next.Before(s.now()) { - // in some cases the next run time can be in the past, for example: - // - the time on the machine was incorrect and has been synced with ntp - // - the machine went to sleep, and woke up some time later - // in those cases, we want to increment to the next run in the future - // and schedule the job for that time. - for next.Before(s.now()) { - next = j.next(next) - } - } - j.nextScheduled = append(j.nextScheduled, next) - j.timer = s.clock.AfterFunc(next.Sub(s.now()), func() { - // set the actual timer on the job here and listen for - // shut down events so that the job doesn't attempt to - // run if the scheduler has been shutdown. - select { - case <-s.shutdownCtx.Done(): - return - case s.exec.jobsIn <- jobIn{ - id: j.id, - shouldSendOut: true, - }: - } - }) - // update the job with its new next and last run times and timer. - s.jobs[id] = j -} - -func (s *scheduler) selectExecJobsOutCompleted(id uuid.UUID) { - j, ok := s.jobs[id] - if !ok { - return - } - - // if the job has nextScheduled time in the past, - // we need to remove any that are in the past. - var newNextScheduled []time.Time - for _, t := range j.nextScheduled { - if t.Before(s.now()) { - continue - } - newNextScheduled = append(newNextScheduled, t) - } - j.nextScheduled = newNextScheduled - - // if the job has a limited number of runs set, we need to - // check how many runs have occurred and stop running this - // job if it has reached the limit. - if j.limitRunsTo != nil { - j.limitRunsTo.runCount = j.limitRunsTo.runCount + 1 - if j.limitRunsTo.runCount == j.limitRunsTo.limit { - go func() { - select { - case <-s.shutdownCtx.Done(): - return - case s.removeJobCh <- id: - } - }() - return - } - } - - j.lastRun = s.now() - s.jobs[id] = j -} - -func (s *scheduler) selectJobOutRequest(out jobOutRequest) { - if j, ok := s.jobs[out.id]; ok { - select { - case out.outChan <- j: - case <-s.shutdownCtx.Done(): - } - } - close(out.outChan) -} - -func (s *scheduler) selectNewJob(in newJobIn) { - j := in.job - if s.started { - next := j.startTime - if j.startImmediately { - next = s.now() - select { - case <-s.shutdownCtx.Done(): - case s.exec.jobsIn <- jobIn{ - id: j.id, - shouldSendOut: true, - }: - } - } else { - if next.IsZero() { - next = j.next(s.now()) - } - - id := j.id - j.timer = s.clock.AfterFunc(next.Sub(s.now()), func() { - select { - case <-s.shutdownCtx.Done(): - case s.exec.jobsIn <- jobIn{ - id: id, - shouldSendOut: true, - }: - } - }) - } - j.nextScheduled = append(j.nextScheduled, next) - } - - s.jobs[j.id] = j - in.cancel() -} - -func (s *scheduler) selectRemoveJobsByTags(tags []string) { - for _, j := range s.jobs { - for _, tag := range tags { - if slices.Contains(j.tags, tag) { - j.stop() - delete(s.jobs, j.id) - break - } - } - } -} - -func (s *scheduler) selectStart() { - s.logger.Debug("gocron: scheduler starting") - go s.exec.start() - - s.started = true - for id, j := range s.jobs { - next := j.startTime - if j.startImmediately { - next = s.now() - select { - case <-s.shutdownCtx.Done(): - case s.exec.jobsIn <- jobIn{ - id: id, - shouldSendOut: true, - }: - } - } else { - if next.IsZero() { - next = j.next(s.now()) - } - - jobID := id - j.timer = s.clock.AfterFunc(next.Sub(s.now()), func() { - select { - case <-s.shutdownCtx.Done(): - case s.exec.jobsIn <- jobIn{ - id: jobID, - shouldSendOut: true, - }: - } - }) - } - j.nextScheduled = append(j.nextScheduled, next) - s.jobs[id] = j - } - select { - case <-s.shutdownCtx.Done(): - case s.startedCh <- struct{}{}: - s.logger.Info("gocron: scheduler started") - } -} - -// ----------------------------------------------- -// ----------------------------------------------- -// ------------- Scheduler Methods --------------- -// ----------------------------------------------- -// ----------------------------------------------- - -func (s *scheduler) now() time.Time { - return s.clock.Now().In(s.location) -} - -func (s *scheduler) jobFromInternalJob(in internalJob) job { - return job{ - in.id, - in.name, - slices.Clone(in.tags), - s.jobOutRequestCh, - s.runJobRequestCh, - } -} - -func (s *scheduler) Jobs() []Job { - outChan := make(chan []Job) - select { - case <-s.shutdownCtx.Done(): - case s.allJobsOutRequest <- allJobsOutRequest{outChan: outChan}: - } - - var jobs []Job - select { - case <-s.shutdownCtx.Done(): - case jobs = <-outChan: - } - - return jobs -} - -func (s *scheduler) NewJob(jobDefinition JobDefinition, task Task, options ...JobOption) (Job, error) { - return s.addOrUpdateJob(uuid.Nil, jobDefinition, task, options) -} - -func (s *scheduler) addOrUpdateJob(id uuid.UUID, definition JobDefinition, taskWrapper Task, options []JobOption) (Job, error) { - j := internalJob{} - if id == uuid.Nil { - j.id = uuid.New() - } else { - currentJob := requestJobCtx(s.shutdownCtx, id, s.jobOutRequestCh) - if currentJob != nil && currentJob.id != uuid.Nil { - select { - case <-s.shutdownCtx.Done(): - return nil, nil - case s.removeJobCh <- id: - <-currentJob.ctx.Done() - } - } - - j.id = id - } - - j.ctx, j.cancel = context.WithCancel(s.shutdownCtx) - - if taskWrapper == nil { - return nil, ErrNewJobTaskNil - } - - tsk := taskWrapper() - taskFunc := reflect.ValueOf(tsk.function) - for taskFunc.Kind() == reflect.Ptr { - taskFunc = taskFunc.Elem() - } - - if taskFunc.Kind() != reflect.Func { - return nil, ErrNewJobTaskNotFunc - } - - expectedParameterLength := taskFunc.Type().NumIn() - if len(tsk.parameters) != expectedParameterLength { - return nil, ErrNewJobWrongNumberOfParameters - } - - for i := 0; i < expectedParameterLength; i++ { - t1 := reflect.TypeOf(tsk.parameters[i]).Kind() - if t1 == reflect.Interface || t1 == reflect.Pointer { - t1 = reflect.TypeOf(tsk.parameters[i]).Elem().Kind() - } - t2 := reflect.New(taskFunc.Type().In(i)).Elem().Kind() - if t2 == reflect.Interface || t2 == reflect.Pointer { - t2 = reflect.Indirect(reflect.ValueOf(taskFunc.Type().In(i))).Kind() - } - if t1 != t2 { - return nil, ErrNewJobWrongTypeOfParameters - } - } - - j.name = runtime.FuncForPC(taskFunc.Pointer()).Name() - j.function = tsk.function - j.parameters = tsk.parameters - - // apply global job options - for _, option := range s.globalJobOptions { - if err := option(&j); err != nil { - return nil, err - } - } - - // apply job specific options, which take precedence - for _, option := range options { - if err := option(&j); err != nil { - return nil, err - } - } - - if err := definition.setup(&j, s.location, s.clock.Now()); err != nil { - return nil, err - } - - newJobCtx, newJobCancel := context.WithCancel(context.Background()) - select { - case <-s.shutdownCtx.Done(): - case s.newJobCh <- newJobIn{ - ctx: newJobCtx, - cancel: newJobCancel, - job: j, - }: - } - - select { - case <-newJobCtx.Done(): - case <-s.shutdownCtx.Done(): - } - - out := s.jobFromInternalJob(j) - return &out, nil -} - -func (s *scheduler) RemoveByTags(tags ...string) { - select { - case <-s.shutdownCtx.Done(): - case s.removeJobsByTagsCh <- tags: - } -} - -func (s *scheduler) RemoveJob(id uuid.UUID) error { - j := requestJobCtx(s.shutdownCtx, id, s.jobOutRequestCh) - if j == nil || j.id == uuid.Nil { - return ErrJobNotFound - } - select { - case <-s.shutdownCtx.Done(): - case s.removeJobCh <- id: - } - - return nil -} - -func (s *scheduler) Start() { - select { - case <-s.shutdownCtx.Done(): - case s.startCh <- struct{}{}: - <-s.startedCh - } -} - -func (s *scheduler) StopJobs() error { - select { - case <-s.shutdownCtx.Done(): - return nil - case s.stopCh <- struct{}{}: - } - select { - case err := <-s.stopErrCh: - return err - case <-time.After(s.exec.stopTimeout + 2*time.Second): - return ErrStopSchedulerTimedOut - } -} - -func (s *scheduler) Shutdown() error { - s.shutdownCancel() - select { - case err := <-s.stopErrCh: - return err - case <-time.After(s.exec.stopTimeout + 2*time.Second): - return ErrStopSchedulerTimedOut - } -} - -func (s *scheduler) Update(id uuid.UUID, jobDefinition JobDefinition, task Task, options ...JobOption) (Job, error) { - return s.addOrUpdateJob(id, jobDefinition, task, options) -} - -func (s *scheduler) JobsWaitingInQueue() int { - if s.exec.limitMode != nil && s.exec.limitMode.mode == LimitModeWait { - return len(s.exec.limitMode.in) - } - return 0 -} - -// ----------------------------------------------- -// ----------------------------------------------- -// ------------- Scheduler Options --------------- -// ----------------------------------------------- -// ----------------------------------------------- - -// SchedulerOption defines the function for setting -// options on the Scheduler. -type SchedulerOption func(*scheduler) error - -// WithClock sets the clock used by the Scheduler -// to the clock provided. See https://github.com/jonboulle/clockwork -func WithClock(clock clockwork.Clock) SchedulerOption { - return func(s *scheduler) error { - if clock == nil { - return ErrWithClockNil - } - s.clock = clock - return nil - } -} - -// WithDistributedElector sets the elector to be used by multiple -// Scheduler instances to determine who should be the leader. -// Only the leader runs jobs, while non-leaders wait and continue -// to check if a new leader has been elected. -func WithDistributedElector(elector Elector) SchedulerOption { - return func(s *scheduler) error { - if elector == nil { - return ErrWithDistributedElectorNil - } - s.exec.elector = elector - return nil - } -} - -// WithDistributedLocker sets the locker to be used by multiple -// Scheduler instances to ensure that only one instance of each -// job is run. -func WithDistributedLocker(locker Locker) SchedulerOption { - return func(s *scheduler) error { - if locker == nil { - return ErrWithDistributedLockerNil - } - s.exec.locker = locker - return nil - } -} - -// WithGlobalJobOptions sets JobOption's that will be applied to -// all jobs added to the scheduler. JobOption's set on the job -// itself will override if the same JobOption is set globally. -func WithGlobalJobOptions(jobOptions ...JobOption) SchedulerOption { - return func(s *scheduler) error { - s.globalJobOptions = jobOptions - return nil - } -} - -// LimitMode defines the modes used for handling jobs that reach -// the limit provided in WithLimitConcurrentJobs -type LimitMode int - -const ( - // LimitModeReschedule causes jobs reaching the limit set in - // WithLimitConcurrentJobs or WithSingletonMode to be skipped - // and rescheduled for the next run time rather than being - // queued up to wait. - LimitModeReschedule = 1 - - // LimitModeWait causes jobs reaching the limit set in - // WithLimitConcurrentJobs or WithSingletonMode to wait - // in a queue until a slot becomes available to run. - // - // Note: this mode can produce unpredictable results as - // job execution order isn't guaranteed. For example, a job that - // executes frequently may pile up in the wait queue and be executed - // many times back to back when the queue opens. - // - // Warning: do not use this mode if your jobs will continue to stack - // up beyond the ability of the limit workers to keep up. An example of - // what NOT to do: - // - // s, _ := gocron.NewScheduler(gocron.WithLimitConcurrentJobs) - // s.NewJob( - // gocron.DurationJob( - // time.Second, - // Task{ - // Function: func() { - // time.Sleep(10 * time.Second) - // }, - // }, - // ), - // ) - LimitModeWait = 2 -) - -// WithLimitConcurrentJobs sets the limit and mode to be used by the -// Scheduler for limiting the number of jobs that may be running at -// a given time. -// -// Note: the limit mode selected for WithLimitConcurrentJobs takes initial -// precedence in the event you are also running a limit mode at the job level -// using WithSingletonMode. -// -// Warning: a single time consuming job can dominate your limit in the event -// you are running both the scheduler limit WithLimitConcurrentJobs(1, LimitModeWait) -// and a job limit WithSingletonMode(LimitModeReschedule). -func WithLimitConcurrentJobs(limit uint, mode LimitMode) SchedulerOption { - return func(s *scheduler) error { - if limit == 0 { - return ErrWithLimitConcurrentJobsZero - } - s.exec.limitMode = &limitModeConfig{ - mode: mode, - limit: limit, - in: make(chan jobIn, 1000), - singletonJobs: make(map[uuid.UUID]struct{}), - } - if mode == LimitModeReschedule { - s.exec.limitMode.rescheduleLimiter = make(chan struct{}, limit) - } - return nil - } -} - -// WithLocation sets the location (i.e. timezone) that the scheduler -// should operate within. In many systems time.Local is UTC. -// Default: time.Local -func WithLocation(location *time.Location) SchedulerOption { - return func(s *scheduler) error { - if location == nil { - return ErrWithLocationNil - } - s.location = location - return nil - } -} - -// WithLogger sets the logger to be used by the Scheduler. -func WithLogger(logger Logger) SchedulerOption { - return func(s *scheduler) error { - if logger == nil { - return ErrWithLoggerNil - } - s.logger = logger - s.exec.logger = logger - return nil - } -} - -// WithStopTimeout sets the amount of time the Scheduler should -// wait gracefully for jobs to complete before returning when -// StopJobs() or Shutdown() are called. -// Default: 10 * time.Second -func WithStopTimeout(timeout time.Duration) SchedulerOption { - return func(s *scheduler) error { - if timeout <= 0 { - return ErrWithStopTimeoutZeroOrNegative - } - s.exec.stopTimeout = timeout - return nil - } -} - -// WithMonitor sets the metrics provider to be used by the Scheduler. -func WithMonitor(monitor Monitor) SchedulerOption { - return func(s *scheduler) error { - if monitor == nil { - return ErrWithMonitorNil - } - s.exec.monitor = monitor - return nil - } -} diff --git a/vendor/github.com/go-co-op/gocron/v2/util.go b/vendor/github.com/go-co-op/gocron/v2/util.go deleted file mode 100644 index a4e5b6fda..000000000 --- a/vendor/github.com/go-co-op/gocron/v2/util.go +++ /dev/null @@ -1,118 +0,0 @@ -package gocron - -import ( - "context" - "reflect" - "sync" - "time" - - "github.com/google/uuid" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" -) - -func callJobFuncWithParams(jobFunc any, params ...any) error { - if jobFunc == nil { - return nil - } - f := reflect.ValueOf(jobFunc) - if f.IsZero() { - return nil - } - if len(params) != f.Type().NumIn() { - return nil - } - in := make([]reflect.Value, len(params)) - for k, param := range params { - in[k] = reflect.ValueOf(param) - } - returnValues := f.Call(in) - for _, val := range returnValues { - i := val.Interface() - if err, ok := i.(error); ok { - return err - } - } - return nil -} - -func requestJob(id uuid.UUID, ch chan jobOutRequest) *internalJob { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - return requestJobCtx(ctx, id, ch) -} - -func requestJobCtx(ctx context.Context, id uuid.UUID, ch chan jobOutRequest) *internalJob { - resp := make(chan internalJob, 1) - select { - case ch <- jobOutRequest{ - id: id, - outChan: resp, - }: - case <-ctx.Done(): - return nil - } - var j internalJob - select { - case <-ctx.Done(): - return nil - case jobReceived := <-resp: - j = jobReceived - } - return &j -} - -func removeSliceDuplicatesInt(in []int) []int { - m := make(map[int]struct{}) - - for _, i := range in { - m[i] = struct{}{} - } - return maps.Keys(m) -} - -func convertAtTimesToDateTime(atTimes AtTimes, location *time.Location) ([]time.Time, error) { - if atTimes == nil { - return nil, errAtTimesNil - } - var atTimesDate []time.Time - for _, a := range atTimes() { - if a == nil { - return nil, errAtTimeNil - } - at := a() - if at.hours > 23 { - return nil, errAtTimeHours - } else if at.minutes > 59 || at.seconds > 59 { - return nil, errAtTimeMinSec - } - atTimesDate = append(atTimesDate, at.time(location)) - } - slices.SortStableFunc(atTimesDate, ascendingTime) - return atTimesDate, nil -} - -func ascendingTime(a, b time.Time) int { - return a.Compare(b) -} - -type waitGroupWithMutex struct { - wg sync.WaitGroup - mu sync.Mutex -} - -func (w *waitGroupWithMutex) Add(delta int) { - w.mu.Lock() - defer w.mu.Unlock() - w.wg.Add(delta) -} - -func (w *waitGroupWithMutex) Done() { - w.wg.Done() -} - -func (w *waitGroupWithMutex) Wait() { - w.mu.Lock() - defer w.mu.Unlock() - w.wg.Wait() -} diff --git a/vendor/github.com/robfig/cron/v3/.gitignore b/vendor/github.com/robfig/cron/v3/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/vendor/github.com/robfig/cron/v3/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/robfig/cron/v3/.travis.yml b/vendor/github.com/robfig/cron/v3/.travis.yml deleted file mode 100644 index 4f2ee4d97..000000000 --- a/vendor/github.com/robfig/cron/v3/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/robfig/cron/v3/LICENSE b/vendor/github.com/robfig/cron/v3/LICENSE deleted file mode 100644 index 3a0f627ff..000000000 --- a/vendor/github.com/robfig/cron/v3/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (C) 2012 Rob Figueiredo -All Rights Reserved. - -MIT LICENSE - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/robfig/cron/v3/README.md b/vendor/github.com/robfig/cron/v3/README.md deleted file mode 100644 index 984c537c0..000000000 --- a/vendor/github.com/robfig/cron/v3/README.md +++ /dev/null @@ -1,125 +0,0 @@ -[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron) -[![Build Status](https://travis-ci.org/robfig/cron.svg?branch=master)](https://travis-ci.org/robfig/cron) - -# cron - -Cron V3 has been released! - -To download the specific tagged release, run: - - go get github.com/robfig/cron/v3@v3.0.0 - -Import it in your program as: - - import "github.com/robfig/cron/v3" - -It requires Go 1.11 or later due to usage of Go Modules. - -Refer to the documentation here: -http://godoc.org/github.com/robfig/cron - -The rest of this document describes the the advances in v3 and a list of -breaking changes for users that wish to upgrade from an earlier version. - -## Upgrading to v3 (June 2019) - -cron v3 is a major upgrade to the library that addresses all outstanding bugs, -feature requests, and rough edges. It is based on a merge of master which -contains various fixes to issues found over the years and the v2 branch which -contains some backwards-incompatible features like the ability to remove cron -jobs. In addition, v3 adds support for Go Modules, cleans up rough edges like -the timezone support, and fixes a number of bugs. - -New features: - -- Support for Go modules. Callers must now import this library as - `github.com/robfig/cron/v3`, instead of `gopkg.in/...` - -- Fixed bugs: - - 0f01e6b parser: fix combining of Dow and Dom (#70) - - dbf3220 adjust times when rolling the clock forward to handle non-existent midnight (#157) - - eeecf15 spec_test.go: ensure an error is returned on 0 increment (#144) - - 70971dc cron.Entries(): update request for snapshot to include a reply channel (#97) - - 1cba5e6 cron: fix: removing a job causes the next scheduled job to run too late (#206) - -- Standard cron spec parsing by default (first field is "minute"), with an easy - way to opt into the seconds field (quartz-compatible). Although, note that the - year field (optional in Quartz) is not supported. - -- Extensible, key/value logging via an interface that complies with - the https://github.com/go-logr/logr project. - -- The new Chain & JobWrapper types allow you to install "interceptors" to add - cross-cutting behavior like the following: - - Recover any panics from jobs - - Delay a job's execution if the previous run hasn't completed yet - - Skip a job's execution if the previous run hasn't completed yet - - Log each job's invocations - - Notification when jobs are completed - -It is backwards incompatible with both v1 and v2. These updates are required: - -- The v1 branch accepted an optional seconds field at the beginning of the cron - spec. This is non-standard and has led to a lot of confusion. The new default - parser conforms to the standard as described by [the Cron wikipedia page]. - - UPDATING: To retain the old behavior, construct your Cron with a custom - parser: - - // Seconds field, required - cron.New(cron.WithSeconds()) - - // Seconds field, optional - cron.New( - cron.WithParser( - cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)) - -- The Cron type now accepts functional options on construction rather than the - previous ad-hoc behavior modification mechanisms (setting a field, calling a setter). - - UPDATING: Code that sets Cron.ErrorLogger or calls Cron.SetLocation must be - updated to provide those values on construction. - -- CRON_TZ is now the recommended way to specify the timezone of a single - schedule, which is sanctioned by the specification. The legacy "TZ=" prefix - will continue to be supported since it is unambiguous and easy to do so. - - UPDATING: No update is required. - -- By default, cron will no longer recover panics in jobs that it runs. - Recovering can be surprising (see issue #192) and seems to be at odds with - typical behavior of libraries. Relatedly, the `cron.WithPanicLogger` option - has been removed to accommodate the more general JobWrapper type. - - UPDATING: To opt into panic recovery and configure the panic logger: - - cron.New(cron.WithChain( - cron.Recover(logger), // or use cron.DefaultLogger - )) - -- In adding support for https://github.com/go-logr/logr, `cron.WithVerboseLogger` was - removed, since it is duplicative with the leveled logging. - - UPDATING: Callers should use `WithLogger` and specify a logger that does not - discard `Info` logs. For convenience, one is provided that wraps `*log.Logger`: - - cron.New( - cron.WithLogger(cron.VerbosePrintfLogger(logger))) - - -### Background - Cron spec format - -There are two cron spec formats in common usage: - -- The "standard" cron format, described on [the Cron wikipedia page] and used by - the cron Linux system utility. - -- The cron format used by [the Quartz Scheduler], commonly used for scheduled - jobs in Java software - -[the Cron wikipedia page]: https://en.wikipedia.org/wiki/Cron -[the Quartz Scheduler]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/tutorial-lesson-06.html - -The original version of this package included an optional "seconds" field, which -made it incompatible with both of these formats. Now, the "standard" format is -the default format accepted, and the Quartz format is opt-in. diff --git a/vendor/github.com/robfig/cron/v3/chain.go b/vendor/github.com/robfig/cron/v3/chain.go deleted file mode 100644 index 9565b418e..000000000 --- a/vendor/github.com/robfig/cron/v3/chain.go +++ /dev/null @@ -1,92 +0,0 @@ -package cron - -import ( - "fmt" - "runtime" - "sync" - "time" -) - -// JobWrapper decorates the given Job with some behavior. -type JobWrapper func(Job) Job - -// Chain is a sequence of JobWrappers that decorates submitted jobs with -// cross-cutting behaviors like logging or synchronization. -type Chain struct { - wrappers []JobWrapper -} - -// NewChain returns a Chain consisting of the given JobWrappers. -func NewChain(c ...JobWrapper) Chain { - return Chain{c} -} - -// Then decorates the given job with all JobWrappers in the chain. -// -// This: -// NewChain(m1, m2, m3).Then(job) -// is equivalent to: -// m1(m2(m3(job))) -func (c Chain) Then(j Job) Job { - for i := range c.wrappers { - j = c.wrappers[len(c.wrappers)-i-1](j) - } - return j -} - -// Recover panics in wrapped jobs and log them with the provided logger. -func Recover(logger Logger) JobWrapper { - return func(j Job) Job { - return FuncJob(func() { - defer func() { - if r := recover(); r != nil { - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - err, ok := r.(error) - if !ok { - err = fmt.Errorf("%v", r) - } - logger.Error(err, "panic", "stack", "...\n"+string(buf)) - } - }() - j.Run() - }) - } -} - -// DelayIfStillRunning serializes jobs, delaying subsequent runs until the -// previous one is complete. Jobs running after a delay of more than a minute -// have the delay logged at Info. -func DelayIfStillRunning(logger Logger) JobWrapper { - return func(j Job) Job { - var mu sync.Mutex - return FuncJob(func() { - start := time.Now() - mu.Lock() - defer mu.Unlock() - if dur := time.Since(start); dur > time.Minute { - logger.Info("delay", "duration", dur) - } - j.Run() - }) - } -} - -// SkipIfStillRunning skips an invocation of the Job if a previous invocation is -// still running. It logs skips to the given logger at Info level. -func SkipIfStillRunning(logger Logger) JobWrapper { - return func(j Job) Job { - var ch = make(chan struct{}, 1) - ch <- struct{}{} - return FuncJob(func() { - select { - case v := <-ch: - j.Run() - ch <- v - default: - logger.Info("skip") - } - }) - } -} diff --git a/vendor/github.com/robfig/cron/v3/constantdelay.go b/vendor/github.com/robfig/cron/v3/constantdelay.go deleted file mode 100644 index cd6e7b1be..000000000 --- a/vendor/github.com/robfig/cron/v3/constantdelay.go +++ /dev/null @@ -1,27 +0,0 @@ -package cron - -import "time" - -// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes". -// It does not support jobs more frequent than once a second. -type ConstantDelaySchedule struct { - Delay time.Duration -} - -// Every returns a crontab Schedule that activates once every duration. -// Delays of less than a second are not supported (will round up to 1 second). -// Any fields less than a Second are truncated. -func Every(duration time.Duration) ConstantDelaySchedule { - if duration < time.Second { - duration = time.Second - } - return ConstantDelaySchedule{ - Delay: duration - time.Duration(duration.Nanoseconds())%time.Second, - } -} - -// Next returns the next time this should be run. -// This rounds so that the next activation time will be on the second. -func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time { - return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond) -} diff --git a/vendor/github.com/robfig/cron/v3/cron.go b/vendor/github.com/robfig/cron/v3/cron.go deleted file mode 100644 index c7e917665..000000000 --- a/vendor/github.com/robfig/cron/v3/cron.go +++ /dev/null @@ -1,355 +0,0 @@ -package cron - -import ( - "context" - "sort" - "sync" - "time" -) - -// Cron keeps track of any number of entries, invoking the associated func as -// specified by the schedule. It may be started, stopped, and the entries may -// be inspected while running. -type Cron struct { - entries []*Entry - chain Chain - stop chan struct{} - add chan *Entry - remove chan EntryID - snapshot chan chan []Entry - running bool - logger Logger - runningMu sync.Mutex - location *time.Location - parser ScheduleParser - nextID EntryID - jobWaiter sync.WaitGroup -} - -// ScheduleParser is an interface for schedule spec parsers that return a Schedule -type ScheduleParser interface { - Parse(spec string) (Schedule, error) -} - -// Job is an interface for submitted cron jobs. -type Job interface { - Run() -} - -// Schedule describes a job's duty cycle. -type Schedule interface { - // Next returns the next activation time, later than the given time. - // Next is invoked initially, and then each time the job is run. - Next(time.Time) time.Time -} - -// EntryID identifies an entry within a Cron instance -type EntryID int - -// Entry consists of a schedule and the func to execute on that schedule. -type Entry struct { - // ID is the cron-assigned ID of this entry, which may be used to look up a - // snapshot or remove it. - ID EntryID - - // Schedule on which this job should be run. - Schedule Schedule - - // Next time the job will run, or the zero time if Cron has not been - // started or this entry's schedule is unsatisfiable - Next time.Time - - // Prev is the last time this job was run, or the zero time if never. - Prev time.Time - - // WrappedJob is the thing to run when the Schedule is activated. - WrappedJob Job - - // Job is the thing that was submitted to cron. - // It is kept around so that user code that needs to get at the job later, - // e.g. via Entries() can do so. - Job Job -} - -// Valid returns true if this is not the zero entry. -func (e Entry) Valid() bool { return e.ID != 0 } - -// byTime is a wrapper for sorting the entry array by time -// (with zero time at the end). -type byTime []*Entry - -func (s byTime) Len() int { return len(s) } -func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s byTime) Less(i, j int) bool { - // Two zero times should return false. - // Otherwise, zero is "greater" than any other time. - // (To sort it at the end of the list.) - if s[i].Next.IsZero() { - return false - } - if s[j].Next.IsZero() { - return true - } - return s[i].Next.Before(s[j].Next) -} - -// New returns a new Cron job runner, modified by the given options. -// -// Available Settings -// -// Time Zone -// Description: The time zone in which schedules are interpreted -// Default: time.Local -// -// Parser -// Description: Parser converts cron spec strings into cron.Schedules. -// Default: Accepts this spec: https://en.wikipedia.org/wiki/Cron -// -// Chain -// Description: Wrap submitted jobs to customize behavior. -// Default: A chain that recovers panics and logs them to stderr. -// -// See "cron.With*" to modify the default behavior. -func New(opts ...Option) *Cron { - c := &Cron{ - entries: nil, - chain: NewChain(), - add: make(chan *Entry), - stop: make(chan struct{}), - snapshot: make(chan chan []Entry), - remove: make(chan EntryID), - running: false, - runningMu: sync.Mutex{}, - logger: DefaultLogger, - location: time.Local, - parser: standardParser, - } - for _, opt := range opts { - opt(c) - } - return c -} - -// FuncJob is a wrapper that turns a func() into a cron.Job -type FuncJob func() - -func (f FuncJob) Run() { f() } - -// AddFunc adds a func to the Cron to be run on the given schedule. -// The spec is parsed using the time zone of this Cron instance as the default. -// An opaque ID is returned that can be used to later remove it. -func (c *Cron) AddFunc(spec string, cmd func()) (EntryID, error) { - return c.AddJob(spec, FuncJob(cmd)) -} - -// AddJob adds a Job to the Cron to be run on the given schedule. -// The spec is parsed using the time zone of this Cron instance as the default. -// An opaque ID is returned that can be used to later remove it. -func (c *Cron) AddJob(spec string, cmd Job) (EntryID, error) { - schedule, err := c.parser.Parse(spec) - if err != nil { - return 0, err - } - return c.Schedule(schedule, cmd), nil -} - -// Schedule adds a Job to the Cron to be run on the given schedule. -// The job is wrapped with the configured Chain. -func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID { - c.runningMu.Lock() - defer c.runningMu.Unlock() - c.nextID++ - entry := &Entry{ - ID: c.nextID, - Schedule: schedule, - WrappedJob: c.chain.Then(cmd), - Job: cmd, - } - if !c.running { - c.entries = append(c.entries, entry) - } else { - c.add <- entry - } - return entry.ID -} - -// Entries returns a snapshot of the cron entries. -func (c *Cron) Entries() []Entry { - c.runningMu.Lock() - defer c.runningMu.Unlock() - if c.running { - replyChan := make(chan []Entry, 1) - c.snapshot <- replyChan - return <-replyChan - } - return c.entrySnapshot() -} - -// Location gets the time zone location -func (c *Cron) Location() *time.Location { - return c.location -} - -// Entry returns a snapshot of the given entry, or nil if it couldn't be found. -func (c *Cron) Entry(id EntryID) Entry { - for _, entry := range c.Entries() { - if id == entry.ID { - return entry - } - } - return Entry{} -} - -// Remove an entry from being run in the future. -func (c *Cron) Remove(id EntryID) { - c.runningMu.Lock() - defer c.runningMu.Unlock() - if c.running { - c.remove <- id - } else { - c.removeEntry(id) - } -} - -// Start the cron scheduler in its own goroutine, or no-op if already started. -func (c *Cron) Start() { - c.runningMu.Lock() - defer c.runningMu.Unlock() - if c.running { - return - } - c.running = true - go c.run() -} - -// Run the cron scheduler, or no-op if already running. -func (c *Cron) Run() { - c.runningMu.Lock() - if c.running { - c.runningMu.Unlock() - return - } - c.running = true - c.runningMu.Unlock() - c.run() -} - -// run the scheduler.. this is private just due to the need to synchronize -// access to the 'running' state variable. -func (c *Cron) run() { - c.logger.Info("start") - - // Figure out the next activation times for each entry. - now := c.now() - for _, entry := range c.entries { - entry.Next = entry.Schedule.Next(now) - c.logger.Info("schedule", "now", now, "entry", entry.ID, "next", entry.Next) - } - - for { - // Determine the next entry to run. - sort.Sort(byTime(c.entries)) - - var timer *time.Timer - if len(c.entries) == 0 || c.entries[0].Next.IsZero() { - // If there are no entries yet, just sleep - it still handles new entries - // and stop requests. - timer = time.NewTimer(100000 * time.Hour) - } else { - timer = time.NewTimer(c.entries[0].Next.Sub(now)) - } - - for { - select { - case now = <-timer.C: - now = now.In(c.location) - c.logger.Info("wake", "now", now) - - // Run every entry whose next time was less than now - for _, e := range c.entries { - if e.Next.After(now) || e.Next.IsZero() { - break - } - c.startJob(e.WrappedJob) - e.Prev = e.Next - e.Next = e.Schedule.Next(now) - c.logger.Info("run", "now", now, "entry", e.ID, "next", e.Next) - } - - case newEntry := <-c.add: - timer.Stop() - now = c.now() - newEntry.Next = newEntry.Schedule.Next(now) - c.entries = append(c.entries, newEntry) - c.logger.Info("added", "now", now, "entry", newEntry.ID, "next", newEntry.Next) - - case replyChan := <-c.snapshot: - replyChan <- c.entrySnapshot() - continue - - case <-c.stop: - timer.Stop() - c.logger.Info("stop") - return - - case id := <-c.remove: - timer.Stop() - now = c.now() - c.removeEntry(id) - c.logger.Info("removed", "entry", id) - } - - break - } - } -} - -// startJob runs the given job in a new goroutine. -func (c *Cron) startJob(j Job) { - c.jobWaiter.Add(1) - go func() { - defer c.jobWaiter.Done() - j.Run() - }() -} - -// now returns current time in c location -func (c *Cron) now() time.Time { - return time.Now().In(c.location) -} - -// Stop stops the cron scheduler if it is running; otherwise it does nothing. -// A context is returned so the caller can wait for running jobs to complete. -func (c *Cron) Stop() context.Context { - c.runningMu.Lock() - defer c.runningMu.Unlock() - if c.running { - c.stop <- struct{}{} - c.running = false - } - ctx, cancel := context.WithCancel(context.Background()) - go func() { - c.jobWaiter.Wait() - cancel() - }() - return ctx -} - -// entrySnapshot returns a copy of the current cron entry list. -func (c *Cron) entrySnapshot() []Entry { - var entries = make([]Entry, len(c.entries)) - for i, e := range c.entries { - entries[i] = *e - } - return entries -} - -func (c *Cron) removeEntry(id EntryID) { - var entries []*Entry - for _, e := range c.entries { - if e.ID != id { - entries = append(entries, e) - } - } - c.entries = entries -} diff --git a/vendor/github.com/robfig/cron/v3/doc.go b/vendor/github.com/robfig/cron/v3/doc.go deleted file mode 100644 index fa5d08b4d..000000000 --- a/vendor/github.com/robfig/cron/v3/doc.go +++ /dev/null @@ -1,231 +0,0 @@ -/* -Package cron implements a cron spec parser and job runner. - -Installation - -To download the specific tagged release, run: - - go get github.com/robfig/cron/v3@v3.0.0 - -Import it in your program as: - - import "github.com/robfig/cron/v3" - -It requires Go 1.11 or later due to usage of Go Modules. - -Usage - -Callers may register Funcs to be invoked on a given schedule. Cron will run -them in their own goroutines. - - c := cron.New() - c.AddFunc("30 * * * *", func() { fmt.Println("Every hour on the half hour") }) - c.AddFunc("30 3-6,20-23 * * *", func() { fmt.Println(".. in the range 3-6am, 8-11pm") }) - c.AddFunc("CRON_TZ=Asia/Tokyo 30 04 * * *", func() { fmt.Println("Runs at 04:30 Tokyo time every day") }) - c.AddFunc("@hourly", func() { fmt.Println("Every hour, starting an hour from now") }) - c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty, starting an hour thirty from now") }) - c.Start() - .. - // Funcs are invoked in their own goroutine, asynchronously. - ... - // Funcs may also be added to a running Cron - c.AddFunc("@daily", func() { fmt.Println("Every day") }) - .. - // Inspect the cron job entries' next and previous run times. - inspect(c.Entries()) - .. - c.Stop() // Stop the scheduler (does not stop any jobs already running). - -CRON Expression Format - -A cron expression represents a set of times, using 5 space-separated fields. - - Field name | Mandatory? | Allowed values | Allowed special characters - ---------- | ---------- | -------------- | -------------------------- - Minutes | Yes | 0-59 | * / , - - Hours | Yes | 0-23 | * / , - - Day of month | Yes | 1-31 | * / , - ? - Month | Yes | 1-12 or JAN-DEC | * / , - - Day of week | Yes | 0-6 or SUN-SAT | * / , - ? - -Month and Day-of-week field values are case insensitive. "SUN", "Sun", and -"sun" are equally accepted. - -The specific interpretation of the format is based on the Cron Wikipedia page: -https://en.wikipedia.org/wiki/Cron - -Alternative Formats - -Alternative Cron expression formats support other fields like seconds. You can -implement that by creating a custom Parser as follows. - - cron.New( - cron.WithParser( - cron.NewParser( - cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor))) - -Since adding Seconds is the most common modification to the standard cron spec, -cron provides a builtin function to do that, which is equivalent to the custom -parser you saw earlier, except that its seconds field is REQUIRED: - - cron.New(cron.WithSeconds()) - -That emulates Quartz, the most popular alternative Cron schedule format: -http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html - -Special Characters - -Asterisk ( * ) - -The asterisk indicates that the cron expression will match for all values of the -field; e.g., using an asterisk in the 5th field (month) would indicate every -month. - -Slash ( / ) - -Slashes are used to describe increments of ranges. For example 3-59/15 in the -1st field (minutes) would indicate the 3rd minute of the hour and every 15 -minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...", -that is, an increment over the largest possible range of the field. The form -"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the -increment until the end of that specific range. It does not wrap around. - -Comma ( , ) - -Commas are used to separate items of a list. For example, using "MON,WED,FRI" in -the 5th field (day of week) would mean Mondays, Wednesdays and Fridays. - -Hyphen ( - ) - -Hyphens are used to define ranges. For example, 9-17 would indicate every -hour between 9am and 5pm inclusive. - -Question mark ( ? ) - -Question mark may be used instead of '*' for leaving either day-of-month or -day-of-week blank. - -Predefined schedules - -You may use one of several pre-defined schedules in place of a cron expression. - - Entry | Description | Equivalent To - ----- | ----------- | ------------- - @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 * - @monthly | Run once a month, midnight, first of month | 0 0 1 * * - @weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0 - @daily (or @midnight) | Run once a day, midnight | 0 0 * * * - @hourly | Run once an hour, beginning of hour | 0 * * * * - -Intervals - -You may also schedule a job to execute at fixed intervals, starting at the time it's added -or cron is run. This is supported by formatting the cron spec like this: - - @every - -where "duration" is a string accepted by time.ParseDuration -(http://golang.org/pkg/time/#ParseDuration). - -For example, "@every 1h30m10s" would indicate a schedule that activates after -1 hour, 30 minutes, 10 seconds, and then every interval after that. - -Note: The interval does not take the job runtime into account. For example, -if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes, -it will have only 2 minutes of idle time between each run. - -Time zones - -By default, all interpretation and scheduling is done in the machine's local -time zone (time.Local). You can specify a different time zone on construction: - - cron.New( - cron.WithLocation(time.UTC)) - -Individual cron schedules may also override the time zone they are to be -interpreted in by providing an additional space-separated field at the beginning -of the cron spec, of the form "CRON_TZ=Asia/Tokyo". - -For example: - - # Runs at 6am in time.Local - cron.New().AddFunc("0 6 * * ?", ...) - - # Runs at 6am in America/New_York - nyc, _ := time.LoadLocation("America/New_York") - c := cron.New(cron.WithLocation(nyc)) - c.AddFunc("0 6 * * ?", ...) - - # Runs at 6am in Asia/Tokyo - cron.New().AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...) - - # Runs at 6am in Asia/Tokyo - c := cron.New(cron.WithLocation(nyc)) - c.SetLocation("America/New_York") - c.AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...) - -The prefix "TZ=(TIME ZONE)" is also supported for legacy compatibility. - -Be aware that jobs scheduled during daylight-savings leap-ahead transitions will -not be run! - -Job Wrappers - -A Cron runner may be configured with a chain of job wrappers to add -cross-cutting functionality to all submitted jobs. For example, they may be used -to achieve the following effects: - - - Recover any panics from jobs (activated by default) - - Delay a job's execution if the previous run hasn't completed yet - - Skip a job's execution if the previous run hasn't completed yet - - Log each job's invocations - -Install wrappers for all jobs added to a cron using the `cron.WithChain` option: - - cron.New(cron.WithChain( - cron.SkipIfStillRunning(logger), - )) - -Install wrappers for individual jobs by explicitly wrapping them: - - job = cron.NewChain( - cron.SkipIfStillRunning(logger), - ).Then(job) - -Thread safety - -Since the Cron service runs concurrently with the calling code, some amount of -care must be taken to ensure proper synchronization. - -All cron methods are designed to be correctly synchronized as long as the caller -ensures that invocations have a clear happens-before ordering between them. - -Logging - -Cron defines a Logger interface that is a subset of the one defined in -github.com/go-logr/logr. It has two logging levels (Info and Error), and -parameters are key/value pairs. This makes it possible for cron logging to plug -into structured logging systems. An adapter, [Verbose]PrintfLogger, is provided -to wrap the standard library *log.Logger. - -For additional insight into Cron operations, verbose logging may be activated -which will record job runs, scheduling decisions, and added or removed jobs. -Activate it with a one-off logger as follows: - - cron.New( - cron.WithLogger( - cron.VerbosePrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)))) - - -Implementation - -Cron entries are stored in an array, sorted by their next activation time. Cron -sleeps until the next job is due to be run. - -Upon waking: - - it runs each entry that is active on that second - - it calculates the next run times for the jobs that were run - - it re-sorts the array of entries by next activation time. - - it goes to sleep until the soonest job. -*/ -package cron diff --git a/vendor/github.com/robfig/cron/v3/logger.go b/vendor/github.com/robfig/cron/v3/logger.go deleted file mode 100644 index b4efcc053..000000000 --- a/vendor/github.com/robfig/cron/v3/logger.go +++ /dev/null @@ -1,86 +0,0 @@ -package cron - -import ( - "io/ioutil" - "log" - "os" - "strings" - "time" -) - -// DefaultLogger is used by Cron if none is specified. -var DefaultLogger Logger = PrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)) - -// DiscardLogger can be used by callers to discard all log messages. -var DiscardLogger Logger = PrintfLogger(log.New(ioutil.Discard, "", 0)) - -// Logger is the interface used in this package for logging, so that any backend -// can be plugged in. It is a subset of the github.com/go-logr/logr interface. -type Logger interface { - // Info logs routine messages about cron's operation. - Info(msg string, keysAndValues ...interface{}) - // Error logs an error condition. - Error(err error, msg string, keysAndValues ...interface{}) -} - -// PrintfLogger wraps a Printf-based logger (such as the standard library "log") -// into an implementation of the Logger interface which logs errors only. -func PrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger { - return printfLogger{l, false} -} - -// VerbosePrintfLogger wraps a Printf-based logger (such as the standard library -// "log") into an implementation of the Logger interface which logs everything. -func VerbosePrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger { - return printfLogger{l, true} -} - -type printfLogger struct { - logger interface{ Printf(string, ...interface{}) } - logInfo bool -} - -func (pl printfLogger) Info(msg string, keysAndValues ...interface{}) { - if pl.logInfo { - keysAndValues = formatTimes(keysAndValues) - pl.logger.Printf( - formatString(len(keysAndValues)), - append([]interface{}{msg}, keysAndValues...)...) - } -} - -func (pl printfLogger) Error(err error, msg string, keysAndValues ...interface{}) { - keysAndValues = formatTimes(keysAndValues) - pl.logger.Printf( - formatString(len(keysAndValues)+2), - append([]interface{}{msg, "error", err}, keysAndValues...)...) -} - -// formatString returns a logfmt-like format string for the number of -// key/values. -func formatString(numKeysAndValues int) string { - var sb strings.Builder - sb.WriteString("%s") - if numKeysAndValues > 0 { - sb.WriteString(", ") - } - for i := 0; i < numKeysAndValues/2; i++ { - if i > 0 { - sb.WriteString(", ") - } - sb.WriteString("%v=%v") - } - return sb.String() -} - -// formatTimes formats any time.Time values as RFC3339. -func formatTimes(keysAndValues []interface{}) []interface{} { - var formattedArgs []interface{} - for _, arg := range keysAndValues { - if t, ok := arg.(time.Time); ok { - arg = t.Format(time.RFC3339) - } - formattedArgs = append(formattedArgs, arg) - } - return formattedArgs -} diff --git a/vendor/github.com/robfig/cron/v3/option.go b/vendor/github.com/robfig/cron/v3/option.go deleted file mode 100644 index 09e4278e7..000000000 --- a/vendor/github.com/robfig/cron/v3/option.go +++ /dev/null @@ -1,45 +0,0 @@ -package cron - -import ( - "time" -) - -// Option represents a modification to the default behavior of a Cron. -type Option func(*Cron) - -// WithLocation overrides the timezone of the cron instance. -func WithLocation(loc *time.Location) Option { - return func(c *Cron) { - c.location = loc - } -} - -// WithSeconds overrides the parser used for interpreting job schedules to -// include a seconds field as the first one. -func WithSeconds() Option { - return WithParser(NewParser( - Second | Minute | Hour | Dom | Month | Dow | Descriptor, - )) -} - -// WithParser overrides the parser used for interpreting job schedules. -func WithParser(p ScheduleParser) Option { - return func(c *Cron) { - c.parser = p - } -} - -// WithChain specifies Job wrappers to apply to all jobs added to this cron. -// Refer to the Chain* functions in this package for provided wrappers. -func WithChain(wrappers ...JobWrapper) Option { - return func(c *Cron) { - c.chain = NewChain(wrappers...) - } -} - -// WithLogger uses the provided logger. -func WithLogger(logger Logger) Option { - return func(c *Cron) { - c.logger = logger - } -} diff --git a/vendor/github.com/robfig/cron/v3/parser.go b/vendor/github.com/robfig/cron/v3/parser.go deleted file mode 100644 index 3cf8879f7..000000000 --- a/vendor/github.com/robfig/cron/v3/parser.go +++ /dev/null @@ -1,434 +0,0 @@ -package cron - -import ( - "fmt" - "math" - "strconv" - "strings" - "time" -) - -// Configuration options for creating a parser. Most options specify which -// fields should be included, while others enable features. If a field is not -// included the parser will assume a default value. These options do not change -// the order fields are parse in. -type ParseOption int - -const ( - Second ParseOption = 1 << iota // Seconds field, default 0 - SecondOptional // Optional seconds field, default 0 - Minute // Minutes field, default 0 - Hour // Hours field, default 0 - Dom // Day of month field, default * - Month // Month field, default * - Dow // Day of week field, default * - DowOptional // Optional day of week field, default * - Descriptor // Allow descriptors such as @monthly, @weekly, etc. -) - -var places = []ParseOption{ - Second, - Minute, - Hour, - Dom, - Month, - Dow, -} - -var defaults = []string{ - "0", - "0", - "0", - "*", - "*", - "*", -} - -// A custom Parser that can be configured. -type Parser struct { - options ParseOption -} - -// NewParser creates a Parser with custom options. -// -// It panics if more than one Optional is given, since it would be impossible to -// correctly infer which optional is provided or missing in general. -// -// Examples -// -// // Standard parser without descriptors -// specParser := NewParser(Minute | Hour | Dom | Month | Dow) -// sched, err := specParser.Parse("0 0 15 */3 *") -// -// // Same as above, just excludes time fields -// subsParser := NewParser(Dom | Month | Dow) -// sched, err := specParser.Parse("15 */3 *") -// -// // Same as above, just makes Dow optional -// subsParser := NewParser(Dom | Month | DowOptional) -// sched, err := specParser.Parse("15 */3") -// -func NewParser(options ParseOption) Parser { - optionals := 0 - if options&DowOptional > 0 { - optionals++ - } - if options&SecondOptional > 0 { - optionals++ - } - if optionals > 1 { - panic("multiple optionals may not be configured") - } - return Parser{options} -} - -// Parse returns a new crontab schedule representing the given spec. -// It returns a descriptive error if the spec is not valid. -// It accepts crontab specs and features configured by NewParser. -func (p Parser) Parse(spec string) (Schedule, error) { - if len(spec) == 0 { - return nil, fmt.Errorf("empty spec string") - } - - // Extract timezone if present - var loc = time.Local - if strings.HasPrefix(spec, "TZ=") || strings.HasPrefix(spec, "CRON_TZ=") { - var err error - i := strings.Index(spec, " ") - eq := strings.Index(spec, "=") - if loc, err = time.LoadLocation(spec[eq+1 : i]); err != nil { - return nil, fmt.Errorf("provided bad location %s: %v", spec[eq+1:i], err) - } - spec = strings.TrimSpace(spec[i:]) - } - - // Handle named schedules (descriptors), if configured - if strings.HasPrefix(spec, "@") { - if p.options&Descriptor == 0 { - return nil, fmt.Errorf("parser does not accept descriptors: %v", spec) - } - return parseDescriptor(spec, loc) - } - - // Split on whitespace. - fields := strings.Fields(spec) - - // Validate & fill in any omitted or optional fields - var err error - fields, err = normalizeFields(fields, p.options) - if err != nil { - return nil, err - } - - field := func(field string, r bounds) uint64 { - if err != nil { - return 0 - } - var bits uint64 - bits, err = getField(field, r) - return bits - } - - var ( - second = field(fields[0], seconds) - minute = field(fields[1], minutes) - hour = field(fields[2], hours) - dayofmonth = field(fields[3], dom) - month = field(fields[4], months) - dayofweek = field(fields[5], dow) - ) - if err != nil { - return nil, err - } - - return &SpecSchedule{ - Second: second, - Minute: minute, - Hour: hour, - Dom: dayofmonth, - Month: month, - Dow: dayofweek, - Location: loc, - }, nil -} - -// normalizeFields takes a subset set of the time fields and returns the full set -// with defaults (zeroes) populated for unset fields. -// -// As part of performing this function, it also validates that the provided -// fields are compatible with the configured options. -func normalizeFields(fields []string, options ParseOption) ([]string, error) { - // Validate optionals & add their field to options - optionals := 0 - if options&SecondOptional > 0 { - options |= Second - optionals++ - } - if options&DowOptional > 0 { - options |= Dow - optionals++ - } - if optionals > 1 { - return nil, fmt.Errorf("multiple optionals may not be configured") - } - - // Figure out how many fields we need - max := 0 - for _, place := range places { - if options&place > 0 { - max++ - } - } - min := max - optionals - - // Validate number of fields - if count := len(fields); count < min || count > max { - if min == max { - return nil, fmt.Errorf("expected exactly %d fields, found %d: %s", min, count, fields) - } - return nil, fmt.Errorf("expected %d to %d fields, found %d: %s", min, max, count, fields) - } - - // Populate the optional field if not provided - if min < max && len(fields) == min { - switch { - case options&DowOptional > 0: - fields = append(fields, defaults[5]) // TODO: improve access to default - case options&SecondOptional > 0: - fields = append([]string{defaults[0]}, fields...) - default: - return nil, fmt.Errorf("unknown optional field") - } - } - - // Populate all fields not part of options with their defaults - n := 0 - expandedFields := make([]string, len(places)) - copy(expandedFields, defaults) - for i, place := range places { - if options&place > 0 { - expandedFields[i] = fields[n] - n++ - } - } - return expandedFields, nil -} - -var standardParser = NewParser( - Minute | Hour | Dom | Month | Dow | Descriptor, -) - -// ParseStandard returns a new crontab schedule representing the given -// standardSpec (https://en.wikipedia.org/wiki/Cron). It requires 5 entries -// representing: minute, hour, day of month, month and day of week, in that -// order. It returns a descriptive error if the spec is not valid. -// -// It accepts -// - Standard crontab specs, e.g. "* * * * ?" -// - Descriptors, e.g. "@midnight", "@every 1h30m" -func ParseStandard(standardSpec string) (Schedule, error) { - return standardParser.Parse(standardSpec) -} - -// getField returns an Int with the bits set representing all of the times that -// the field represents or error parsing field value. A "field" is a comma-separated -// list of "ranges". -func getField(field string, r bounds) (uint64, error) { - var bits uint64 - ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' }) - for _, expr := range ranges { - bit, err := getRange(expr, r) - if err != nil { - return bits, err - } - bits |= bit - } - return bits, nil -} - -// getRange returns the bits indicated by the given expression: -// number | number "-" number [ "/" number ] -// or error parsing range. -func getRange(expr string, r bounds) (uint64, error) { - var ( - start, end, step uint - rangeAndStep = strings.Split(expr, "/") - lowAndHigh = strings.Split(rangeAndStep[0], "-") - singleDigit = len(lowAndHigh) == 1 - err error - ) - - var extra uint64 - if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" { - start = r.min - end = r.max - extra = starBit - } else { - start, err = parseIntOrName(lowAndHigh[0], r.names) - if err != nil { - return 0, err - } - switch len(lowAndHigh) { - case 1: - end = start - case 2: - end, err = parseIntOrName(lowAndHigh[1], r.names) - if err != nil { - return 0, err - } - default: - return 0, fmt.Errorf("too many hyphens: %s", expr) - } - } - - switch len(rangeAndStep) { - case 1: - step = 1 - case 2: - step, err = mustParseInt(rangeAndStep[1]) - if err != nil { - return 0, err - } - - // Special handling: "N/step" means "N-max/step". - if singleDigit { - end = r.max - } - if step > 1 { - extra = 0 - } - default: - return 0, fmt.Errorf("too many slashes: %s", expr) - } - - if start < r.min { - return 0, fmt.Errorf("beginning of range (%d) below minimum (%d): %s", start, r.min, expr) - } - if end > r.max { - return 0, fmt.Errorf("end of range (%d) above maximum (%d): %s", end, r.max, expr) - } - if start > end { - return 0, fmt.Errorf("beginning of range (%d) beyond end of range (%d): %s", start, end, expr) - } - if step == 0 { - return 0, fmt.Errorf("step of range should be a positive number: %s", expr) - } - - return getBits(start, end, step) | extra, nil -} - -// parseIntOrName returns the (possibly-named) integer contained in expr. -func parseIntOrName(expr string, names map[string]uint) (uint, error) { - if names != nil { - if namedInt, ok := names[strings.ToLower(expr)]; ok { - return namedInt, nil - } - } - return mustParseInt(expr) -} - -// mustParseInt parses the given expression as an int or returns an error. -func mustParseInt(expr string) (uint, error) { - num, err := strconv.Atoi(expr) - if err != nil { - return 0, fmt.Errorf("failed to parse int from %s: %s", expr, err) - } - if num < 0 { - return 0, fmt.Errorf("negative number (%d) not allowed: %s", num, expr) - } - - return uint(num), nil -} - -// getBits sets all bits in the range [min, max], modulo the given step size. -func getBits(min, max, step uint) uint64 { - var bits uint64 - - // If step is 1, use shifts. - if step == 1 { - return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min) - } - - // Else, use a simple loop. - for i := min; i <= max; i += step { - bits |= 1 << i - } - return bits -} - -// all returns all bits within the given bounds. (plus the star bit) -func all(r bounds) uint64 { - return getBits(r.min, r.max, 1) | starBit -} - -// parseDescriptor returns a predefined schedule for the expression, or error if none matches. -func parseDescriptor(descriptor string, loc *time.Location) (Schedule, error) { - switch descriptor { - case "@yearly", "@annually": - return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Dom: 1 << dom.min, - Month: 1 << months.min, - Dow: all(dow), - Location: loc, - }, nil - - case "@monthly": - return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Dom: 1 << dom.min, - Month: all(months), - Dow: all(dow), - Location: loc, - }, nil - - case "@weekly": - return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Dom: all(dom), - Month: all(months), - Dow: 1 << dow.min, - Location: loc, - }, nil - - case "@daily", "@midnight": - return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Dom: all(dom), - Month: all(months), - Dow: all(dow), - Location: loc, - }, nil - - case "@hourly": - return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: all(hours), - Dom: all(dom), - Month: all(months), - Dow: all(dow), - Location: loc, - }, nil - - } - - const every = "@every " - if strings.HasPrefix(descriptor, every) { - duration, err := time.ParseDuration(descriptor[len(every):]) - if err != nil { - return nil, fmt.Errorf("failed to parse duration %s: %s", descriptor, err) - } - return Every(duration), nil - } - - return nil, fmt.Errorf("unrecognized descriptor: %s", descriptor) -} diff --git a/vendor/github.com/robfig/cron/v3/spec.go b/vendor/github.com/robfig/cron/v3/spec.go deleted file mode 100644 index fa1e241e5..000000000 --- a/vendor/github.com/robfig/cron/v3/spec.go +++ /dev/null @@ -1,188 +0,0 @@ -package cron - -import "time" - -// SpecSchedule specifies a duty cycle (to the second granularity), based on a -// traditional crontab specification. It is computed initially and stored as bit sets. -type SpecSchedule struct { - Second, Minute, Hour, Dom, Month, Dow uint64 - - // Override location for this schedule. - Location *time.Location -} - -// bounds provides a range of acceptable values (plus a map of name to value). -type bounds struct { - min, max uint - names map[string]uint -} - -// The bounds for each field. -var ( - seconds = bounds{0, 59, nil} - minutes = bounds{0, 59, nil} - hours = bounds{0, 23, nil} - dom = bounds{1, 31, nil} - months = bounds{1, 12, map[string]uint{ - "jan": 1, - "feb": 2, - "mar": 3, - "apr": 4, - "may": 5, - "jun": 6, - "jul": 7, - "aug": 8, - "sep": 9, - "oct": 10, - "nov": 11, - "dec": 12, - }} - dow = bounds{0, 6, map[string]uint{ - "sun": 0, - "mon": 1, - "tue": 2, - "wed": 3, - "thu": 4, - "fri": 5, - "sat": 6, - }} -) - -const ( - // Set the top bit if a star was included in the expression. - starBit = 1 << 63 -) - -// Next returns the next time this schedule is activated, greater than the given -// time. If no time can be found to satisfy the schedule, return the zero time. -func (s *SpecSchedule) Next(t time.Time) time.Time { - // General approach - // - // For Month, Day, Hour, Minute, Second: - // Check if the time value matches. If yes, continue to the next field. - // If the field doesn't match the schedule, then increment the field until it matches. - // While incrementing the field, a wrap-around brings it back to the beginning - // of the field list (since it is necessary to re-verify previous field - // values) - - // Convert the given time into the schedule's timezone, if one is specified. - // Save the original timezone so we can convert back after we find a time. - // Note that schedules without a time zone specified (time.Local) are treated - // as local to the time provided. - origLocation := t.Location() - loc := s.Location - if loc == time.Local { - loc = t.Location() - } - if s.Location != time.Local { - t = t.In(s.Location) - } - - // Start at the earliest possible time (the upcoming second). - t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) - - // This flag indicates whether a field has been incremented. - added := false - - // If no time is found within five years, return zero. - yearLimit := t.Year() + 5 - -WRAP: - if t.Year() > yearLimit { - return time.Time{} - } - - // Find the first applicable month. - // If it's this month, then do nothing. - for 1< 12 { - t = t.Add(time.Duration(24-t.Hour()) * time.Hour) - } else { - t = t.Add(time.Duration(-t.Hour()) * time.Hour) - } - } - - if t.Day() == 1 { - goto WRAP - } - } - - for 1< 0 - dowMatch bool = 1< 0 - ) - if s.Dom&starBit > 0 || s.Dow&starBit > 0 { - return domMatch && dowMatch - } - return domMatch || dowMatch -} diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go deleted file mode 100644 index ecc0dabb7..000000000 --- a/vendor/golang.org/x/exp/maps/maps.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package maps defines various functions useful with maps of any type. -package maps - -// Keys returns the keys of the map m. -// The keys will be in an indeterminate order. -func Keys[M ~map[K]V, K comparable, V any](m M) []K { - r := make([]K, 0, len(m)) - for k := range m { - r = append(r, k) - } - return r -} - -// Values returns the values of the map m. -// The values will be in an indeterminate order. -func Values[M ~map[K]V, K comparable, V any](m M) []V { - r := make([]V, 0, len(m)) - for _, v := range m { - r = append(r, v) - } - return r -} - -// Equal reports whether two maps contain the same key/value pairs. -// Values are compared using ==. -func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool { - if len(m1) != len(m2) { - return false - } - for k, v1 := range m1 { - if v2, ok := m2[k]; !ok || v1 != v2 { - return false - } - } - return true -} - -// EqualFunc is like Equal, but compares values using eq. -// Keys are still compared with ==. -func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool { - if len(m1) != len(m2) { - return false - } - for k, v1 := range m1 { - if v2, ok := m2[k]; !ok || !eq(v1, v2) { - return false - } - } - return true -} - -// Clear removes all entries from m, leaving it empty. -func Clear[M ~map[K]V, K comparable, V any](m M) { - for k := range m { - delete(m, k) - } -} - -// Clone returns a copy of m. This is a shallow clone: -// the new keys and values are set using ordinary assignment. -func Clone[M ~map[K]V, K comparable, V any](m M) M { - // Preserve nil in case it matters. - if m == nil { - return nil - } - r := make(M, len(m)) - for k, v := range m { - r[k] = v - } - return r -} - -// Copy copies all key/value pairs in src adding them to dst. -// When a key in src is already present in dst, -// the value in dst will be overwritten by the value associated -// with the key in src. -func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) { - for k, v := range src { - dst[k] = v - } -} - -// DeleteFunc deletes any key/value pairs from m for which del returns true. -func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) { - for k, v := range m { - if del(k, v) { - delete(m, k) - } - } -} diff --git a/vendor/modules.txt b/vendor/modules.txt index e8e799303..55a942a11 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,6 +1,10 @@ # github.com/NYTimes/gziphandler v1.1.1 ## explicit; go 1.11 github.com/NYTimes/gziphandler +# github.com/adhocore/gronx v1.8.1 +## explicit; go 1.13 +github.com/adhocore/gronx +github.com/adhocore/gronx/pkg/tasker # github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df ## explicit; go 1.18 github.com/antlr/antlr4/runtime/Go/antlr/v4 @@ -52,9 +56,6 @@ github.com/ghodss/yaml ## explicit github.com/go-bindata/go-bindata github.com/go-bindata/go-bindata/go-bindata -# github.com/go-co-op/gocron/v2 v2.8.0 -## explicit; go 1.20 -github.com/go-co-op/gocron/v2 # github.com/go-logr/logr v1.4.1 ## explicit; go 1.18 github.com/go-logr/logr @@ -422,9 +423,6 @@ github.com/prometheus/procfs/internal/util # github.com/robfig/cron v1.2.0 ## explicit github.com/robfig/cron -# github.com/robfig/cron/v3 v3.0.1 -## explicit; go 1.12 -github.com/robfig/cron/v3 # github.com/sirupsen/logrus v1.9.0 ## explicit; go 1.13 github.com/sirupsen/logrus @@ -653,7 +651,6 @@ golang.org/x/crypto/salsa20/salsa # golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 ## explicit; go 1.20 golang.org/x/exp/constraints -golang.org/x/exp/maps golang.org/x/exp/slices # golang.org/x/net v0.23.0 ## explicit; go 1.18 From a9606111b0749944a2012c727c773232368e3ccc Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Thu, 11 Jul 2024 17:25:32 +0200 Subject: [PATCH 18/22] add sidecar container --- bindata/etcd/pod.yaml | 36 +++++++++++++++++++++++++ pkg/cmd/backuprestore/backupnoconfig.go | 2 +- pkg/operator/etcd_assets/bindata.go | 36 +++++++++++++++++++++++++ 3 files changed, 73 insertions(+), 1 deletion(-) diff --git a/bindata/etcd/pod.yaml b/bindata/etcd/pod.yaml index 1b94d519f..5b17bc605 100644 --- a/bindata/etcd/pod.yaml +++ b/bindata/etcd/pod.yaml @@ -311,6 +311,36 @@ ${COMPUTED_ENV_VARS} name: log-dir - mountPath: /etc/kubernetes/static-pod-certs name: cert-dir + - name: etcd-backup-noconfig + image: ${OPERATOR_IMAGE} + imagePullPolicy: IfNotPresent + terminationMessagePolicy: FallbackToLogsOnError + command: + - /bin/sh + - -c + - | + #!/bin/sh + set -euo pipefail + exec nice -n -18 cluster-etcd-operator backup-server \ + --endpoints=https://localhost:2379 \ + --config-dir=/etc/kubernetes \ + --data-dir=/var/lib/etcd \ + --backup-dir=/var/backup/etcd + securityContext: + privileged: true + resources: + requests: + memory: 50Mi + cpu: 10m + env: +${COMPUTED_ENV_VARS} + volumeMounts: + - mountPath: /var/lib/etcd + name: data-dir + - mountPath: /var/backup/etcd + name: backup-dir + - mountPath: /etc/kubernetes + name: config-dir hostNetwork: true priorityClassName: system-node-critical tolerations: @@ -335,3 +365,9 @@ ${COMPUTED_ENV_VARS} - hostPath: path: /var/log/etcd name: log-dir + - hostPath: + path: /var/backup/etcd + name: backup-dir + - hostPath: + path: /etc/kubernetes + name: config-dir diff --git a/pkg/cmd/backuprestore/backupnoconfig.go b/pkg/cmd/backuprestore/backupnoconfig.go index a1ff55c12..0ff868c72 100644 --- a/pkg/cmd/backuprestore/backupnoconfig.go +++ b/pkg/cmd/backuprestore/backupnoconfig.go @@ -59,7 +59,7 @@ func (b *backupNoConfig) AddFlags(fs *pflag.FlagSet) { } func (b *backupNoConfig) Validate() error { - return b.Validate() + return b.backupOptions.Validate() } func (b *backupNoConfig) Run() error { diff --git a/pkg/operator/etcd_assets/bindata.go b/pkg/operator/etcd_assets/bindata.go index f8e67500d..bc28fa60d 100644 --- a/pkg/operator/etcd_assets/bindata.go +++ b/pkg/operator/etcd_assets/bindata.go @@ -1227,6 +1227,36 @@ ${COMPUTED_ENV_VARS} name: log-dir - mountPath: /etc/kubernetes/static-pod-certs name: cert-dir + - name: etcd-backup-noconfig + image: ${OPERATOR_IMAGE} + imagePullPolicy: IfNotPresent + terminationMessagePolicy: FallbackToLogsOnError + command: + - /bin/sh + - -c + - | + #!/bin/sh + set -euo pipefail + exec nice -n -18 cluster-etcd-operator backup-server \ + --endpoints=https://localhost:2379 \ + --config-dir=/etc/kubernetes \ + --data-dir=/var/lib/etcd \ + --backup-dir=/var/backup/etcd + securityContext: + privileged: true + resources: + requests: + memory: 50Mi + cpu: 10m + env: +${COMPUTED_ENV_VARS} + volumeMounts: + - mountPath: /var/lib/etcd + name: data-dir + - mountPath: /var/backup/etcd + name: backup-dir + - mountPath: /etc/kubernetes + name: config-dir hostNetwork: true priorityClassName: system-node-critical tolerations: @@ -1251,6 +1281,12 @@ ${COMPUTED_ENV_VARS} - hostPath: path: /var/log/etcd name: log-dir + - hostPath: + path: /var/backup/etcd + name: backup-dir + - hostPath: + path: /etc/kubernetes + name: config-dir `) func etcdPodYamlBytes() ([]byte, error) { From a7d9e5cad54b26830f8266c2864a0fa6baa83a87 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Fri, 12 Jul 2024 22:57:02 +0200 Subject: [PATCH 19/22] use in-cluster config --- bindata/etcd/pod.yaml | 29 +++++++++++++++++++++++++ pkg/cmd/backuprestore/backupnoconfig.go | 17 +++++++-------- pkg/operator/etcd_assets/bindata.go | 29 +++++++++++++++++++++++++ 3 files changed, 66 insertions(+), 9 deletions(-) diff --git a/bindata/etcd/pod.yaml b/bindata/etcd/pod.yaml index 5b17bc605..9935ba9c1 100644 --- a/bindata/etcd/pod.yaml +++ b/bindata/etcd/pod.yaml @@ -12,6 +12,7 @@ metadata: etcd: "true" revision: "REVISION" spec: + serviceAccountName: etcd-pod initContainers: - name: setup image: ${IMAGE} @@ -341,6 +342,11 @@ ${COMPUTED_ENV_VARS} name: backup-dir - mountPath: /etc/kubernetes name: config-dir + - mountPath: /etc/kubernetes/static-pod-certs + name: cert-dir + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access + readOnly: true hostNetwork: true priorityClassName: system-node-critical tolerations: @@ -371,3 +377,26 @@ ${COMPUTED_ENV_VARS} - hostPath: path: /etc/kubernetes name: config-dir + - name: kube-api-access + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + expirationSeconds: 3600 + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + path: namespace + - configMap: + items: + - key: service-ca.crt + path: service-ca.crt + name: openshift-service-ca.crt diff --git a/pkg/cmd/backuprestore/backupnoconfig.go b/pkg/cmd/backuprestore/backupnoconfig.go index 0ff868c72..beb1ac031 100644 --- a/pkg/cmd/backuprestore/backupnoconfig.go +++ b/pkg/cmd/backuprestore/backupnoconfig.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "k8s.io/client-go/rest" "slices" backupv1alpha1 "github.com/openshift/api/config/v1alpha1" @@ -11,7 +12,6 @@ import ( prunebackups "github.com/openshift/cluster-etcd-operator/pkg/cmd/prune-backups" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" "github.com/adhocore/gronx/pkg/tasker" @@ -20,11 +20,10 @@ import ( ) type backupNoConfig struct { - kubeConfig string - schedule string - timeZone string - retention backupv1alpha1.RetentionPolicy - scheduler *tasker.Tasker + schedule string + timeZone string + retention backupv1alpha1.RetentionPolicy + scheduler *tasker.Tasker backupOptions } @@ -93,14 +92,14 @@ func (b *backupNoConfig) Run() error { } func (b *backupNoConfig) getBackupClient() (backupv1client.BackupsGetter, error) { - kubeConfig, err := clientcmd.BuildConfigFromFlags("", b.kubeConfig) + config, err := rest.InClusterConfig() if err != nil { - bErr := fmt.Errorf("error loading kubeconfig: %v", err) + bErr := fmt.Errorf("error loading in-cluster kube client config: %v", err) klog.Error(bErr) return nil, bErr } - backupsClient, err := backupv1client.NewForConfig(kubeConfig) + backupsClient, err := backupv1client.NewForConfig(config) if err != nil { bErr := fmt.Errorf("error creating etcd backups client: %v", err) klog.Error(bErr) diff --git a/pkg/operator/etcd_assets/bindata.go b/pkg/operator/etcd_assets/bindata.go index bc28fa60d..241381456 100644 --- a/pkg/operator/etcd_assets/bindata.go +++ b/pkg/operator/etcd_assets/bindata.go @@ -928,6 +928,7 @@ metadata: etcd: "true" revision: "REVISION" spec: + serviceAccountName: etcd-pod initContainers: - name: setup image: ${IMAGE} @@ -1257,6 +1258,11 @@ ${COMPUTED_ENV_VARS} name: backup-dir - mountPath: /etc/kubernetes name: config-dir + - mountPath: /etc/kubernetes/static-pod-certs + name: cert-dir + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access + readOnly: true hostNetwork: true priorityClassName: system-node-critical tolerations: @@ -1287,6 +1293,29 @@ ${COMPUTED_ENV_VARS} - hostPath: path: /etc/kubernetes name: config-dir + - name: kube-api-access + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + expirationSeconds: 3600 + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + path: namespace + - configMap: + items: + - key: service-ca.crt + path: service-ca.crt + name: openshift-service-ca.crt `) func etcdPodYamlBytes() ([]byte, error) { From 53c0a49da4854818458cae9dc85f72e98175cb1c Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 16 Jul 2024 19:58:06 +0200 Subject: [PATCH 20/22] remove kube-api-access cm --- bindata/etcd/pod.yaml | 24 ------------------------ pkg/cmd/prune-backups/prune.go | 14 -------------- pkg/operator/etcd_assets/bindata.go | 24 ------------------------ 3 files changed, 62 deletions(-) diff --git a/bindata/etcd/pod.yaml b/bindata/etcd/pod.yaml index 9935ba9c1..2df278d04 100644 --- a/bindata/etcd/pod.yaml +++ b/bindata/etcd/pod.yaml @@ -12,7 +12,6 @@ metadata: etcd: "true" revision: "REVISION" spec: - serviceAccountName: etcd-pod initContainers: - name: setup image: ${IMAGE} @@ -377,26 +376,3 @@ ${COMPUTED_ENV_VARS} - hostPath: path: /etc/kubernetes name: config-dir - - name: kube-api-access - projected: - defaultMode: 420 - sources: - - serviceAccountToken: - expirationSeconds: 3600 - path: token - - configMap: - items: - - key: ca.crt - path: ca.crt - name: kube-root-ca.crt - - downwardAPI: - items: - - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - path: namespace - - configMap: - items: - - key: service-ca.crt - path: service-ca.crt - name: openshift-service-ca.crt diff --git a/pkg/cmd/prune-backups/prune.go b/pkg/cmd/prune-backups/prune.go index a7e3a2e6a..aeecd316f 100644 --- a/pkg/cmd/prune-backups/prune.go +++ b/pkg/cmd/prune-backups/prune.go @@ -3,7 +3,6 @@ package prune_backups import ( goflag "flag" "fmt" - "github.com/openshift/api/config/v1alpha1" "github.com/spf13/cobra" "io/fs" "k8s.io/klog/v2" @@ -110,19 +109,6 @@ func (r *PruneOpts) Run() error { return nil } -func Retain(policy v1alpha1.RetentionPolicy) error { - switch policy.RetentionType { - case RetentionTypeNone: - klog.Infof("nothing to do, retention type is none") - return nil - case RetentionTypeNumber: - return retainByNumber(policy.RetentionNumber.MaxNumberOfBackups) - case RetentionTypeSize: - return retainBySizeGb(policy.RetentionSize.MaxSizeOfBackupsGb) - } - return nil -} - func retainBySizeGb(sizeInGb int) error { folders, err := listAllBackupFolders() if err != nil { diff --git a/pkg/operator/etcd_assets/bindata.go b/pkg/operator/etcd_assets/bindata.go index 241381456..2e9988a55 100644 --- a/pkg/operator/etcd_assets/bindata.go +++ b/pkg/operator/etcd_assets/bindata.go @@ -928,7 +928,6 @@ metadata: etcd: "true" revision: "REVISION" spec: - serviceAccountName: etcd-pod initContainers: - name: setup image: ${IMAGE} @@ -1293,29 +1292,6 @@ ${COMPUTED_ENV_VARS} - hostPath: path: /etc/kubernetes name: config-dir - - name: kube-api-access - projected: - defaultMode: 420 - sources: - - serviceAccountToken: - expirationSeconds: 3600 - path: token - - configMap: - items: - - key: ca.crt - path: ca.crt - name: kube-root-ca.crt - - downwardAPI: - items: - - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - path: namespace - - configMap: - items: - - key: service-ca.crt - path: service-ca.crt - name: openshift-service-ca.crt `) func etcdPodYamlBytes() ([]byte, error) { From 4c9ccc5c158ca5caf3d3c96ae9053c39c7d4b54b Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 16 Jul 2024 20:25:45 +0200 Subject: [PATCH 21/22] use client cert access apiserver --- bindata/etcd/backup-server-kubeconfig.yaml | 26 ++++ bindata/etcd/pod.yaml | 4 +- pkg/cmd/backuprestore/backupnoconfig.go | 18 +-- pkg/operator/etcd_assets/bindata.go | 134 ++++++++++++++------- pkg/operator/starter.go | 3 + 5 files changed, 130 insertions(+), 55 deletions(-) create mode 100644 bindata/etcd/backup-server-kubeconfig.yaml diff --git a/bindata/etcd/backup-server-kubeconfig.yaml b/bindata/etcd/backup-server-kubeconfig.yaml new file mode 100644 index 000000000..685bf6300 --- /dev/null +++ b/bindata/etcd/backup-server-kubeconfig.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: backup-server-kubeconfig + namespace: openshift-etcd +data: + kubeconfig: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority: /etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-server-ca/ca-bundle.crt + server: https://localhost:6443 + name: loopback + contexts: + - context: + cluster: loopback + user: backup-server + name: backup-server + current-context: backup-server + kind: Config + preferences: {} + users: + - name: backup-server + user: + client-certificate: /etc/kubernetes/static-pod-certs/secrets/backup-server-client-cert-key/tls.crt + client-key: /etc/kubernetes/static-pod-certs/secrets/backup-server-client-cert-key/tls.key diff --git a/bindata/etcd/pod.yaml b/bindata/etcd/pod.yaml index 2df278d04..1503c1a16 100644 --- a/bindata/etcd/pod.yaml +++ b/bindata/etcd/pod.yaml @@ -322,6 +322,7 @@ ${COMPUTED_ENV_VARS} #!/bin/sh set -euo pipefail exec nice -n -18 cluster-etcd-operator backup-server \ + --kubeConfig=/etc/kubernetes/static-pod-certs/configmaps/etcd-backup-server/kubeconfig \ --endpoints=https://localhost:2379 \ --config-dir=/etc/kubernetes \ --data-dir=/var/lib/etcd \ @@ -343,9 +344,6 @@ ${COMPUTED_ENV_VARS} name: config-dir - mountPath: /etc/kubernetes/static-pod-certs name: cert-dir - - mountPath: /var/run/secrets/kubernetes.io/serviceaccount - name: kube-api-access - readOnly: true hostNetwork: true priorityClassName: system-node-critical tolerations: diff --git a/pkg/cmd/backuprestore/backupnoconfig.go b/pkg/cmd/backuprestore/backupnoconfig.go index beb1ac031..4bdcf5e70 100644 --- a/pkg/cmd/backuprestore/backupnoconfig.go +++ b/pkg/cmd/backuprestore/backupnoconfig.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "k8s.io/client-go/rest" "slices" backupv1alpha1 "github.com/openshift/api/config/v1alpha1" @@ -12,6 +11,7 @@ import ( prunebackups "github.com/openshift/cluster-etcd-operator/pkg/cmd/prune-backups" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" "github.com/adhocore/gronx/pkg/tasker" @@ -20,10 +20,11 @@ import ( ) type backupNoConfig struct { - schedule string - timeZone string - retention backupv1alpha1.RetentionPolicy - scheduler *tasker.Tasker + kubeConfig string + schedule string + timeZone string + retention backupv1alpha1.RetentionPolicy + scheduler *tasker.Tasker backupOptions } @@ -54,6 +55,9 @@ func NewBackupNoConfigCommand(errOut io.Writer) *cobra.Command { } func (b *backupNoConfig) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&b.kubeConfig, "kubeConfig", "", "kubeConfig specifies the config to be used by the cmd for accessing the api server") + cobra.MarkFlagRequired(fs, "kubeConfig") + b.backupOptions.AddFlags(fs) } @@ -92,9 +96,9 @@ func (b *backupNoConfig) Run() error { } func (b *backupNoConfig) getBackupClient() (backupv1client.BackupsGetter, error) { - config, err := rest.InClusterConfig() + config, err := clientcmd.BuildConfigFromFlags("", b.kubeConfig) if err != nil { - bErr := fmt.Errorf("error loading in-cluster kube client config: %v", err) + bErr := fmt.Errorf("error loading kubeconfig: %v", err) klog.Error(bErr) return nil, bErr } diff --git a/pkg/operator/etcd_assets/bindata.go b/pkg/operator/etcd_assets/bindata.go index 2e9988a55..113b5b393 100644 --- a/pkg/operator/etcd_assets/bindata.go +++ b/pkg/operator/etcd_assets/bindata.go @@ -1,5 +1,6 @@ // Code generated for package etcd_assets by go-bindata DO NOT EDIT. (@generated) // sources: +// bindata/etcd/backup-server-kubeconfig.yaml // bindata/etcd/backups-cr.yaml // bindata/etcd/backups-crb.yaml // bindata/etcd/backups-sa.yaml @@ -74,6 +75,49 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } +var _etcdBackupServerKubeconfigYaml = []byte(`apiVersion: v1 +kind: ConfigMap +metadata: + name: backup-server-kubeconfig + namespace: openshift-etcd +data: + kubeconfig: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority: /etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-server-ca/ca-bundle.crt + server: https://localhost:6443 + name: loopback + contexts: + - context: + cluster: loopback + user: backup-server + name: backup-server + current-context: backup-server + kind: Config + preferences: {} + users: + - name: backup-server + user: + client-certificate: /etc/kubernetes/static-pod-certs/secrets/backup-server-client-cert-key/tls.crt + client-key: /etc/kubernetes/static-pod-certs/secrets/backup-server-client-cert-key/tls.key +`) + +func etcdBackupServerKubeconfigYamlBytes() ([]byte, error) { + return _etcdBackupServerKubeconfigYaml, nil +} + +func etcdBackupServerKubeconfigYaml() (*asset, error) { + bytes, err := etcdBackupServerKubeconfigYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "etcd/backup-server-kubeconfig.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + var _etcdBackupsCrYaml = []byte(`kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -1238,6 +1282,7 @@ ${COMPUTED_ENV_VARS} #!/bin/sh set -euo pipefail exec nice -n -18 cluster-etcd-operator backup-server \ + --kubeConfig=/etc/kubernetes/static-pod-certs/configmaps/etcd-backup-server/kubeconfig \ --endpoints=https://localhost:2379 \ --config-dir=/etc/kubernetes \ --data-dir=/var/lib/etcd \ @@ -1259,9 +1304,6 @@ ${COMPUTED_ENV_VARS} name: config-dir - mountPath: /etc/kubernetes/static-pod-certs name: cert-dir - - mountPath: /var/run/secrets/kubernetes.io/serviceaccount - name: kube-api-access - readOnly: true hostNetwork: true priorityClassName: system-node-critical tolerations: @@ -1767,27 +1809,28 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ - "etcd/backups-cr.yaml": etcdBackupsCrYaml, - "etcd/backups-crb.yaml": etcdBackupsCrbYaml, - "etcd/backups-sa.yaml": etcdBackupsSaYaml, - "etcd/cluster-backup-cronjob.yaml": etcdClusterBackupCronjobYaml, - "etcd/cluster-backup-job.yaml": etcdClusterBackupJobYaml, - "etcd/cluster-backup.sh": etcdClusterBackupSh, - "etcd/cluster-restore.sh": etcdClusterRestoreSh, - "etcd/cm.yaml": etcdCmYaml, - "etcd/etcd-common-tools": etcdEtcdCommonTools, - "etcd/minimal-sm.yaml": etcdMinimalSmYaml, - "etcd/ns.yaml": etcdNsYaml, - "etcd/pod-cm.yaml": etcdPodCmYaml, - "etcd/pod.yaml": etcdPodYaml, - "etcd/prometheus-role.yaml": etcdPrometheusRoleYaml, - "etcd/prometheus-rolebinding.yaml": etcdPrometheusRolebindingYaml, - "etcd/restore-pod-cm.yaml": etcdRestorePodCmYaml, - "etcd/restore-pod.yaml": etcdRestorePodYaml, - "etcd/sa.yaml": etcdSaYaml, - "etcd/scripts-cm.yaml": etcdScriptsCmYaml, - "etcd/sm.yaml": etcdSmYaml, - "etcd/svc.yaml": etcdSvcYaml, + "etcd/backup-server-kubeconfig.yaml": etcdBackupServerKubeconfigYaml, + "etcd/backups-cr.yaml": etcdBackupsCrYaml, + "etcd/backups-crb.yaml": etcdBackupsCrbYaml, + "etcd/backups-sa.yaml": etcdBackupsSaYaml, + "etcd/cluster-backup-cronjob.yaml": etcdClusterBackupCronjobYaml, + "etcd/cluster-backup-job.yaml": etcdClusterBackupJobYaml, + "etcd/cluster-backup.sh": etcdClusterBackupSh, + "etcd/cluster-restore.sh": etcdClusterRestoreSh, + "etcd/cm.yaml": etcdCmYaml, + "etcd/etcd-common-tools": etcdEtcdCommonTools, + "etcd/minimal-sm.yaml": etcdMinimalSmYaml, + "etcd/ns.yaml": etcdNsYaml, + "etcd/pod-cm.yaml": etcdPodCmYaml, + "etcd/pod.yaml": etcdPodYaml, + "etcd/prometheus-role.yaml": etcdPrometheusRoleYaml, + "etcd/prometheus-rolebinding.yaml": etcdPrometheusRolebindingYaml, + "etcd/restore-pod-cm.yaml": etcdRestorePodCmYaml, + "etcd/restore-pod.yaml": etcdRestorePodYaml, + "etcd/sa.yaml": etcdSaYaml, + "etcd/scripts-cm.yaml": etcdScriptsCmYaml, + "etcd/sm.yaml": etcdSmYaml, + "etcd/svc.yaml": etcdSvcYaml, } // AssetDir returns the file names below a certain @@ -1834,27 +1877,28 @@ type bintree struct { var _bintree = &bintree{nil, map[string]*bintree{ "etcd": {nil, map[string]*bintree{ - "backups-cr.yaml": {etcdBackupsCrYaml, map[string]*bintree{}}, - "backups-crb.yaml": {etcdBackupsCrbYaml, map[string]*bintree{}}, - "backups-sa.yaml": {etcdBackupsSaYaml, map[string]*bintree{}}, - "cluster-backup-cronjob.yaml": {etcdClusterBackupCronjobYaml, map[string]*bintree{}}, - "cluster-backup-job.yaml": {etcdClusterBackupJobYaml, map[string]*bintree{}}, - "cluster-backup.sh": {etcdClusterBackupSh, map[string]*bintree{}}, - "cluster-restore.sh": {etcdClusterRestoreSh, map[string]*bintree{}}, - "cm.yaml": {etcdCmYaml, map[string]*bintree{}}, - "etcd-common-tools": {etcdEtcdCommonTools, map[string]*bintree{}}, - "minimal-sm.yaml": {etcdMinimalSmYaml, map[string]*bintree{}}, - "ns.yaml": {etcdNsYaml, map[string]*bintree{}}, - "pod-cm.yaml": {etcdPodCmYaml, map[string]*bintree{}}, - "pod.yaml": {etcdPodYaml, map[string]*bintree{}}, - "prometheus-role.yaml": {etcdPrometheusRoleYaml, map[string]*bintree{}}, - "prometheus-rolebinding.yaml": {etcdPrometheusRolebindingYaml, map[string]*bintree{}}, - "restore-pod-cm.yaml": {etcdRestorePodCmYaml, map[string]*bintree{}}, - "restore-pod.yaml": {etcdRestorePodYaml, map[string]*bintree{}}, - "sa.yaml": {etcdSaYaml, map[string]*bintree{}}, - "scripts-cm.yaml": {etcdScriptsCmYaml, map[string]*bintree{}}, - "sm.yaml": {etcdSmYaml, map[string]*bintree{}}, - "svc.yaml": {etcdSvcYaml, map[string]*bintree{}}, + "backup-server-kubeconfig.yaml": {etcdBackupServerKubeconfigYaml, map[string]*bintree{}}, + "backups-cr.yaml": {etcdBackupsCrYaml, map[string]*bintree{}}, + "backups-crb.yaml": {etcdBackupsCrbYaml, map[string]*bintree{}}, + "backups-sa.yaml": {etcdBackupsSaYaml, map[string]*bintree{}}, + "cluster-backup-cronjob.yaml": {etcdClusterBackupCronjobYaml, map[string]*bintree{}}, + "cluster-backup-job.yaml": {etcdClusterBackupJobYaml, map[string]*bintree{}}, + "cluster-backup.sh": {etcdClusterBackupSh, map[string]*bintree{}}, + "cluster-restore.sh": {etcdClusterRestoreSh, map[string]*bintree{}}, + "cm.yaml": {etcdCmYaml, map[string]*bintree{}}, + "etcd-common-tools": {etcdEtcdCommonTools, map[string]*bintree{}}, + "minimal-sm.yaml": {etcdMinimalSmYaml, map[string]*bintree{}}, + "ns.yaml": {etcdNsYaml, map[string]*bintree{}}, + "pod-cm.yaml": {etcdPodCmYaml, map[string]*bintree{}}, + "pod.yaml": {etcdPodYaml, map[string]*bintree{}}, + "prometheus-role.yaml": {etcdPrometheusRoleYaml, map[string]*bintree{}}, + "prometheus-rolebinding.yaml": {etcdPrometheusRolebindingYaml, map[string]*bintree{}}, + "restore-pod-cm.yaml": {etcdRestorePodCmYaml, map[string]*bintree{}}, + "restore-pod.yaml": {etcdRestorePodYaml, map[string]*bintree{}}, + "sa.yaml": {etcdSaYaml, map[string]*bintree{}}, + "scripts-cm.yaml": {etcdScriptsCmYaml, map[string]*bintree{}}, + "sm.yaml": {etcdSmYaml, map[string]*bintree{}}, + "svc.yaml": {etcdSvcYaml, map[string]*bintree{}}, }}, }} diff --git a/pkg/operator/starter.go b/pkg/operator/starter.go index 5acd88a36..c66fcf60d 100644 --- a/pkg/operator/starter.go +++ b/pkg/operator/starter.go @@ -221,6 +221,7 @@ func RunOperator(ctx context.Context, controllerContext *controllercmd.Controlle "etcd/backups-sa.yaml", "etcd/backups-cr.yaml", "etcd/backups-crb.yaml", + "etcd/backup-server-kubeconfig.yaml", }, (&resourceapply.ClientHolder{}).WithKubernetes(kubeClient).WithDynamicClient(dynamicClient), operatorClient, @@ -629,6 +630,8 @@ var CertConfigMaps = []installer.UnrevisionedResource{ {Name: "restore-etcd-pod"}, {Name: "etcd-scripts"}, {Name: "etcd-all-bundles"}, + // kubeconfig for backup-server + {Name: "backup-server-kubeconfig"}, } var CertSecrets = []installer.UnrevisionedResource{ From b4f991927bcc652948db38f8640d19d47372307b Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Mon, 22 Jul 2024 23:21:43 +0200 Subject: [PATCH 22/22] reconcile default backup CR --- bindata/etcd/backup-server-kubeconfig.yaml | 26 --- bindata/etcd/pod.yaml | 33 ---- pkg/cmd/backuprestore/backupnoconfig.go | 93 +--------- pkg/cmd/backuprestore/backupnoconfig_test.go | 88 ---------- pkg/operator/etcd_assets/bindata.go | 163 +++++------------- .../periodicbackupcontroller.go | 151 ++++++++++++++-- pkg/operator/starter.go | 3 - 7 files changed, 189 insertions(+), 368 deletions(-) delete mode 100644 bindata/etcd/backup-server-kubeconfig.yaml delete mode 100644 pkg/cmd/backuprestore/backupnoconfig_test.go diff --git a/bindata/etcd/backup-server-kubeconfig.yaml b/bindata/etcd/backup-server-kubeconfig.yaml deleted file mode 100644 index 685bf6300..000000000 --- a/bindata/etcd/backup-server-kubeconfig.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: backup-server-kubeconfig - namespace: openshift-etcd -data: - kubeconfig: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority: /etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-server-ca/ca-bundle.crt - server: https://localhost:6443 - name: loopback - contexts: - - context: - cluster: loopback - user: backup-server - name: backup-server - current-context: backup-server - kind: Config - preferences: {} - users: - - name: backup-server - user: - client-certificate: /etc/kubernetes/static-pod-certs/secrets/backup-server-client-cert-key/tls.crt - client-key: /etc/kubernetes/static-pod-certs/secrets/backup-server-client-cert-key/tls.key diff --git a/bindata/etcd/pod.yaml b/bindata/etcd/pod.yaml index 1503c1a16..31ef7b713 100644 --- a/bindata/etcd/pod.yaml +++ b/bindata/etcd/pod.yaml @@ -311,39 +311,6 @@ ${COMPUTED_ENV_VARS} name: log-dir - mountPath: /etc/kubernetes/static-pod-certs name: cert-dir - - name: etcd-backup-noconfig - image: ${OPERATOR_IMAGE} - imagePullPolicy: IfNotPresent - terminationMessagePolicy: FallbackToLogsOnError - command: - - /bin/sh - - -c - - | - #!/bin/sh - set -euo pipefail - exec nice -n -18 cluster-etcd-operator backup-server \ - --kubeConfig=/etc/kubernetes/static-pod-certs/configmaps/etcd-backup-server/kubeconfig \ - --endpoints=https://localhost:2379 \ - --config-dir=/etc/kubernetes \ - --data-dir=/var/lib/etcd \ - --backup-dir=/var/backup/etcd - securityContext: - privileged: true - resources: - requests: - memory: 50Mi - cpu: 10m - env: -${COMPUTED_ENV_VARS} - volumeMounts: - - mountPath: /var/lib/etcd - name: data-dir - - mountPath: /var/backup/etcd - name: backup-dir - - mountPath: /etc/kubernetes - name: config-dir - - mountPath: /etc/kubernetes/static-pod-certs - name: cert-dir hostNetwork: true priorityClassName: system-node-critical tolerations: diff --git a/pkg/cmd/backuprestore/backupnoconfig.go b/pkg/cmd/backuprestore/backupnoconfig.go index 4bdcf5e70..9f9133872 100644 --- a/pkg/cmd/backuprestore/backupnoconfig.go +++ b/pkg/cmd/backuprestore/backupnoconfig.go @@ -4,14 +4,7 @@ import ( "context" "fmt" "io" - "slices" - backupv1alpha1 "github.com/openshift/api/config/v1alpha1" - backupv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" - prunebackups "github.com/openshift/cluster-etcd-operator/pkg/cmd/prune-backups" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" "github.com/adhocore/gronx/pkg/tasker" @@ -20,11 +13,9 @@ import ( ) type backupNoConfig struct { - kubeConfig string - schedule string - timeZone string - retention backupv1alpha1.RetentionPolicy - scheduler *tasker.Tasker + schedule string + timeZone string + scheduler *tasker.Tasker backupOptions } @@ -55,8 +46,8 @@ func NewBackupNoConfigCommand(errOut io.Writer) *cobra.Command { } func (b *backupNoConfig) AddFlags(fs *pflag.FlagSet) { - fs.StringVar(&b.kubeConfig, "kubeConfig", "", "kubeConfig specifies the config to be used by the cmd for accessing the api server") - cobra.MarkFlagRequired(fs, "kubeConfig") + fs.StringVar(&b.schedule, "schedule", "", "schedule specifies the cron schedule to run the backup") + fs.StringVar(&b.timeZone, "timezone", "", "timezone specifies the timezone of the cron schedule to run the backup") b.backupOptions.AddFlags(fs) } @@ -66,21 +57,12 @@ func (b *backupNoConfig) Validate() error { } func (b *backupNoConfig) Run() error { - backupsClient, err := b.getBackupClient() - if err != nil { - return err - } - - if err = b.extractBackupSpecs(backupsClient); err != nil { - return err - } - b.scheduler = tasker.New(tasker.Option{ Verbose: true, Tz: b.timeZone, }) - err = b.scheduleBackup() + err := b.scheduleBackup() if err != nil { return err } @@ -95,55 +77,6 @@ func (b *backupNoConfig) Run() error { return nil } -func (b *backupNoConfig) getBackupClient() (backupv1client.BackupsGetter, error) { - config, err := clientcmd.BuildConfigFromFlags("", b.kubeConfig) - if err != nil { - bErr := fmt.Errorf("error loading kubeconfig: %v", err) - klog.Error(bErr) - return nil, bErr - } - - backupsClient, err := backupv1client.NewForConfig(config) - if err != nil { - bErr := fmt.Errorf("error creating etcd backups client: %v", err) - klog.Error(bErr) - return nil, bErr - } - - return backupsClient, nil -} - -func (b *backupNoConfig) extractBackupSpecs(backupsClient backupv1client.BackupsGetter) error { - backups, err := backupsClient.Backups().List(context.Background(), v1.ListOptions{}) - if err != nil { - lErr := fmt.Errorf("could not list backup CRDs, error was: [%v]", err) - klog.Error(lErr) - return lErr - } - - if len(backups.Items) == 0 { - lErr := fmt.Errorf("no backup CRDs exist, found [%v]", backups) - klog.Error(lErr) - return lErr - } - - idx := slices.IndexFunc(backups.Items, func(backup backupv1alpha1.Backup) bool { - return backup.Name == "default" - }) - if idx == -1 { - sErr := fmt.Errorf("could not find default backup CR, found [%v]", backups.Items) - klog.Error(sErr) - return sErr - } - - defaultBackupCR := backups.Items[idx] - b.schedule = defaultBackupCR.Spec.EtcdBackupSpec.Schedule - b.retention = defaultBackupCR.Spec.EtcdBackupSpec.RetentionPolicy - b.timeZone = defaultBackupCR.Spec.EtcdBackupSpec.TimeZone - - return nil -} - func (b *backupNoConfig) scheduleBackup() error { var err error b.scheduler.Task(b.schedule, func(ctx context.Context) (int, error) { @@ -153,17 +86,3 @@ func (b *backupNoConfig) scheduleBackup() error { return err } - -func (b *backupNoConfig) pruneBackups() error { - opts := &prunebackups.PruneOpts{ - RetentionType: string(b.retention.RetentionType), - MaxNumberOfBackups: b.retention.RetentionNumber.MaxNumberOfBackups, - MaxSizeOfBackupsGb: b.retention.RetentionSize.MaxSizeOfBackupsGb, - } - - if err := opts.Validate(); err != nil { - return err - } - - return opts.Run() -} diff --git a/pkg/cmd/backuprestore/backupnoconfig_test.go b/pkg/cmd/backuprestore/backupnoconfig_test.go deleted file mode 100644 index b35ec24b4..000000000 --- a/pkg/cmd/backuprestore/backupnoconfig_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package backuprestore - -import ( - "errors" - "testing" - - backupv1alpha1 "github.com/openshift/api/config/v1alpha1" - fake "github.com/openshift/client-go/config/clientset/versioned/fake" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - "github.com/stretchr/testify/require" -) - -func TestBackupNoConfig_extractBackupSpecs(t *testing.T) { - testCases := []struct { - name string - backupName string - schedule string - expErr error - }{ - { - name: "empty input", - backupName: "", - schedule: "", - expErr: errors.New("no backup CRDs exist, found"), - }, - { - name: "non default backup", - backupName: "test-backup", - schedule: "20 4 * * *", - expErr: errors.New("could not find default backup CR"), - }, - { - name: "default backup", - backupName: "default", - schedule: "10 8 * 7 *", - expErr: nil, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // arrange - var operatorFake *fake.Clientset - backup := createBackupObject(tc.backupName, tc.schedule) - - if backup != nil { - operatorFake = fake.NewSimpleClientset([]runtime.Object{backup}...) - } else { - operatorFake = fake.NewSimpleClientset() - } - - // act - b := &backupNoConfig{} - err := b.extractBackupSpecs(operatorFake.ConfigV1alpha1()) - - // assert - if tc.expErr != nil { - require.ErrorContains(t, err, tc.expErr.Error()) - } else { - require.Equal(t, tc.expErr, err) - require.Equal(t, tc.schedule, b.schedule) - require.Equal(t, getRetentionPolicy(), b.retention) - } - }) - } -} - -func createBackupObject(backupName, schedule string) *backupv1alpha1.Backup { - if backupName == "" { - return nil - } - return &backupv1alpha1.Backup{ObjectMeta: v1.ObjectMeta{Name: backupName}, - Spec: backupv1alpha1.BackupSpec{ - EtcdBackupSpec: backupv1alpha1.EtcdBackupSpec{ - Schedule: schedule, - RetentionPolicy: getRetentionPolicy(), - TimeZone: "UTC", - PVCName: "backup-happy-path-pvc"}}} -} - -func getRetentionPolicy() backupv1alpha1.RetentionPolicy { - return backupv1alpha1.RetentionPolicy{ - RetentionType: backupv1alpha1.RetentionTypeNumber, - RetentionNumber: &backupv1alpha1.RetentionNumberConfig{MaxNumberOfBackups: 5}} -} diff --git a/pkg/operator/etcd_assets/bindata.go b/pkg/operator/etcd_assets/bindata.go index 113b5b393..8ad973ee7 100644 --- a/pkg/operator/etcd_assets/bindata.go +++ b/pkg/operator/etcd_assets/bindata.go @@ -1,6 +1,5 @@ // Code generated for package etcd_assets by go-bindata DO NOT EDIT. (@generated) // sources: -// bindata/etcd/backup-server-kubeconfig.yaml // bindata/etcd/backups-cr.yaml // bindata/etcd/backups-crb.yaml // bindata/etcd/backups-sa.yaml @@ -75,49 +74,6 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _etcdBackupServerKubeconfigYaml = []byte(`apiVersion: v1 -kind: ConfigMap -metadata: - name: backup-server-kubeconfig - namespace: openshift-etcd -data: - kubeconfig: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority: /etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-server-ca/ca-bundle.crt - server: https://localhost:6443 - name: loopback - contexts: - - context: - cluster: loopback - user: backup-server - name: backup-server - current-context: backup-server - kind: Config - preferences: {} - users: - - name: backup-server - user: - client-certificate: /etc/kubernetes/static-pod-certs/secrets/backup-server-client-cert-key/tls.crt - client-key: /etc/kubernetes/static-pod-certs/secrets/backup-server-client-cert-key/tls.key -`) - -func etcdBackupServerKubeconfigYamlBytes() ([]byte, error) { - return _etcdBackupServerKubeconfigYaml, nil -} - -func etcdBackupServerKubeconfigYaml() (*asset, error) { - bytes, err := etcdBackupServerKubeconfigYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "etcd/backup-server-kubeconfig.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - var _etcdBackupsCrYaml = []byte(`kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -1271,39 +1227,6 @@ ${COMPUTED_ENV_VARS} name: log-dir - mountPath: /etc/kubernetes/static-pod-certs name: cert-dir - - name: etcd-backup-noconfig - image: ${OPERATOR_IMAGE} - imagePullPolicy: IfNotPresent - terminationMessagePolicy: FallbackToLogsOnError - command: - - /bin/sh - - -c - - | - #!/bin/sh - set -euo pipefail - exec nice -n -18 cluster-etcd-operator backup-server \ - --kubeConfig=/etc/kubernetes/static-pod-certs/configmaps/etcd-backup-server/kubeconfig \ - --endpoints=https://localhost:2379 \ - --config-dir=/etc/kubernetes \ - --data-dir=/var/lib/etcd \ - --backup-dir=/var/backup/etcd - securityContext: - privileged: true - resources: - requests: - memory: 50Mi - cpu: 10m - env: -${COMPUTED_ENV_VARS} - volumeMounts: - - mountPath: /var/lib/etcd - name: data-dir - - mountPath: /var/backup/etcd - name: backup-dir - - mountPath: /etc/kubernetes - name: config-dir - - mountPath: /etc/kubernetes/static-pod-certs - name: cert-dir hostNetwork: true priorityClassName: system-node-critical tolerations: @@ -1809,28 +1732,27 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ - "etcd/backup-server-kubeconfig.yaml": etcdBackupServerKubeconfigYaml, - "etcd/backups-cr.yaml": etcdBackupsCrYaml, - "etcd/backups-crb.yaml": etcdBackupsCrbYaml, - "etcd/backups-sa.yaml": etcdBackupsSaYaml, - "etcd/cluster-backup-cronjob.yaml": etcdClusterBackupCronjobYaml, - "etcd/cluster-backup-job.yaml": etcdClusterBackupJobYaml, - "etcd/cluster-backup.sh": etcdClusterBackupSh, - "etcd/cluster-restore.sh": etcdClusterRestoreSh, - "etcd/cm.yaml": etcdCmYaml, - "etcd/etcd-common-tools": etcdEtcdCommonTools, - "etcd/minimal-sm.yaml": etcdMinimalSmYaml, - "etcd/ns.yaml": etcdNsYaml, - "etcd/pod-cm.yaml": etcdPodCmYaml, - "etcd/pod.yaml": etcdPodYaml, - "etcd/prometheus-role.yaml": etcdPrometheusRoleYaml, - "etcd/prometheus-rolebinding.yaml": etcdPrometheusRolebindingYaml, - "etcd/restore-pod-cm.yaml": etcdRestorePodCmYaml, - "etcd/restore-pod.yaml": etcdRestorePodYaml, - "etcd/sa.yaml": etcdSaYaml, - "etcd/scripts-cm.yaml": etcdScriptsCmYaml, - "etcd/sm.yaml": etcdSmYaml, - "etcd/svc.yaml": etcdSvcYaml, + "etcd/backups-cr.yaml": etcdBackupsCrYaml, + "etcd/backups-crb.yaml": etcdBackupsCrbYaml, + "etcd/backups-sa.yaml": etcdBackupsSaYaml, + "etcd/cluster-backup-cronjob.yaml": etcdClusterBackupCronjobYaml, + "etcd/cluster-backup-job.yaml": etcdClusterBackupJobYaml, + "etcd/cluster-backup.sh": etcdClusterBackupSh, + "etcd/cluster-restore.sh": etcdClusterRestoreSh, + "etcd/cm.yaml": etcdCmYaml, + "etcd/etcd-common-tools": etcdEtcdCommonTools, + "etcd/minimal-sm.yaml": etcdMinimalSmYaml, + "etcd/ns.yaml": etcdNsYaml, + "etcd/pod-cm.yaml": etcdPodCmYaml, + "etcd/pod.yaml": etcdPodYaml, + "etcd/prometheus-role.yaml": etcdPrometheusRoleYaml, + "etcd/prometheus-rolebinding.yaml": etcdPrometheusRolebindingYaml, + "etcd/restore-pod-cm.yaml": etcdRestorePodCmYaml, + "etcd/restore-pod.yaml": etcdRestorePodYaml, + "etcd/sa.yaml": etcdSaYaml, + "etcd/scripts-cm.yaml": etcdScriptsCmYaml, + "etcd/sm.yaml": etcdSmYaml, + "etcd/svc.yaml": etcdSvcYaml, } // AssetDir returns the file names below a certain @@ -1877,28 +1799,27 @@ type bintree struct { var _bintree = &bintree{nil, map[string]*bintree{ "etcd": {nil, map[string]*bintree{ - "backup-server-kubeconfig.yaml": {etcdBackupServerKubeconfigYaml, map[string]*bintree{}}, - "backups-cr.yaml": {etcdBackupsCrYaml, map[string]*bintree{}}, - "backups-crb.yaml": {etcdBackupsCrbYaml, map[string]*bintree{}}, - "backups-sa.yaml": {etcdBackupsSaYaml, map[string]*bintree{}}, - "cluster-backup-cronjob.yaml": {etcdClusterBackupCronjobYaml, map[string]*bintree{}}, - "cluster-backup-job.yaml": {etcdClusterBackupJobYaml, map[string]*bintree{}}, - "cluster-backup.sh": {etcdClusterBackupSh, map[string]*bintree{}}, - "cluster-restore.sh": {etcdClusterRestoreSh, map[string]*bintree{}}, - "cm.yaml": {etcdCmYaml, map[string]*bintree{}}, - "etcd-common-tools": {etcdEtcdCommonTools, map[string]*bintree{}}, - "minimal-sm.yaml": {etcdMinimalSmYaml, map[string]*bintree{}}, - "ns.yaml": {etcdNsYaml, map[string]*bintree{}}, - "pod-cm.yaml": {etcdPodCmYaml, map[string]*bintree{}}, - "pod.yaml": {etcdPodYaml, map[string]*bintree{}}, - "prometheus-role.yaml": {etcdPrometheusRoleYaml, map[string]*bintree{}}, - "prometheus-rolebinding.yaml": {etcdPrometheusRolebindingYaml, map[string]*bintree{}}, - "restore-pod-cm.yaml": {etcdRestorePodCmYaml, map[string]*bintree{}}, - "restore-pod.yaml": {etcdRestorePodYaml, map[string]*bintree{}}, - "sa.yaml": {etcdSaYaml, map[string]*bintree{}}, - "scripts-cm.yaml": {etcdScriptsCmYaml, map[string]*bintree{}}, - "sm.yaml": {etcdSmYaml, map[string]*bintree{}}, - "svc.yaml": {etcdSvcYaml, map[string]*bintree{}}, + "backups-cr.yaml": {etcdBackupsCrYaml, map[string]*bintree{}}, + "backups-crb.yaml": {etcdBackupsCrbYaml, map[string]*bintree{}}, + "backups-sa.yaml": {etcdBackupsSaYaml, map[string]*bintree{}}, + "cluster-backup-cronjob.yaml": {etcdClusterBackupCronjobYaml, map[string]*bintree{}}, + "cluster-backup-job.yaml": {etcdClusterBackupJobYaml, map[string]*bintree{}}, + "cluster-backup.sh": {etcdClusterBackupSh, map[string]*bintree{}}, + "cluster-restore.sh": {etcdClusterRestoreSh, map[string]*bintree{}}, + "cm.yaml": {etcdCmYaml, map[string]*bintree{}}, + "etcd-common-tools": {etcdEtcdCommonTools, map[string]*bintree{}}, + "minimal-sm.yaml": {etcdMinimalSmYaml, map[string]*bintree{}}, + "ns.yaml": {etcdNsYaml, map[string]*bintree{}}, + "pod-cm.yaml": {etcdPodCmYaml, map[string]*bintree{}}, + "pod.yaml": {etcdPodYaml, map[string]*bintree{}}, + "prometheus-role.yaml": {etcdPrometheusRoleYaml, map[string]*bintree{}}, + "prometheus-rolebinding.yaml": {etcdPrometheusRolebindingYaml, map[string]*bintree{}}, + "restore-pod-cm.yaml": {etcdRestorePodCmYaml, map[string]*bintree{}}, + "restore-pod.yaml": {etcdRestorePodYaml, map[string]*bintree{}}, + "sa.yaml": {etcdSaYaml, map[string]*bintree{}}, + "scripts-cm.yaml": {etcdScriptsCmYaml, map[string]*bintree{}}, + "sm.yaml": {etcdSmYaml, map[string]*bintree{}}, + "svc.yaml": {etcdSvcYaml, map[string]*bintree{}}, }}, }} diff --git a/pkg/operator/periodicbackupcontroller/periodicbackupcontroller.go b/pkg/operator/periodicbackupcontroller/periodicbackupcontroller.go index a42e94201..7850ca1ba 100644 --- a/pkg/operator/periodicbackupcontroller/periodicbackupcontroller.go +++ b/pkg/operator/periodicbackupcontroller/periodicbackupcontroller.go @@ -11,25 +11,29 @@ import ( backupv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" "github.com/openshift/cluster-etcd-operator/pkg/backuphelpers" "github.com/openshift/cluster-etcd-operator/pkg/operator/etcd_assets" + "github.com/openshift/cluster-etcd-operator/pkg/operator/health" "github.com/openshift/cluster-etcd-operator/pkg/operator/operatorclient" + "github.com/openshift/library-go/pkg/controller/factory" "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes" batchv1client "k8s.io/client-go/kubernetes/typed/batch/v1" "k8s.io/klog/v2" - - "github.com/openshift/cluster-etcd-operator/pkg/operator/health" - "github.com/openshift/library-go/pkg/controller/factory" - "github.com/openshift/library-go/pkg/operator/events" - "github.com/openshift/library-go/pkg/operator/v1helpers" - "k8s.io/client-go/kubernetes" ) -const backupJobLabel = "backup-name" +const ( + backupJobLabel = "backup-name" + defaultBackupServerContainerName = "etcd-backup-noconfig" +) type PeriodicBackupController struct { operatorClient v1helpers.OperatorClient @@ -37,6 +41,7 @@ type PeriodicBackupController struct { kubeClient kubernetes.Interface operatorImagePullSpec string featureGateAccessor featuregates.FeatureGateAccess + defaultBackupRunning bool } func NewPeriodicBackupController( @@ -81,11 +86,40 @@ func (c *PeriodicBackupController) sync(ctx context.Context, _ factory.SyncConte return fmt.Errorf("PeriodicBackupController could not list backup CRDs, error was: %w", err) } - // ignore reconciliation of default backup - backups.Items = slices.DeleteFunc(backups.Items, func(b backupv1alpha1.Backup) bool { - return b.Name == "default" + // reconciliation of default backup + // (1) default backup CR exists && flag == true ( ignore ) + // (2) default backup CR no exists && flag == true ( remove container ) + // (3) default backup CR exists && flag == false ( deploy container ) + // (4) default backup CR no exists && flag == false ( ignore ) + idx := slices.IndexFunc(backups.Items, func(backup backupv1alpha1.Backup) bool { + return backup.Name == "default" }) + if idx == -1 && c.defaultBackupRunning { // case (2) + _, err = removeBackupServerContainer() + if err != nil { + rErr := fmt.Errorf("could not remove default backup container: [%v]", err) + klog.Error(rErr) + return rErr + } + c.defaultBackupRunning = false + klog.V(4).Info("default backup container removed successfully") + //TODO: redeploy etcd pods + + } else if idx > -1 && !c.defaultBackupRunning { // case (3) + _, err = runBackupServerContainer(backups.Items[idx], c.operatorImagePullSpec) + if err != nil { + rErr := fmt.Errorf("could not run default backup container: [%v]", err) + klog.Error(rErr) + return rErr + } + c.defaultBackupRunning = true + klog.V(4).Info("default backup container run successfully") + //TODO: redeploy etcd pods + // TODO: run retention + // TODO: update statusConditions + } + for _, item := range backups.Items { err := reconcileCronJob(ctx, cronJobsClient, item, c.operatorImagePullSpec) if err != nil { @@ -266,3 +300,100 @@ func newCronJob() (*batchv1.CronJob, error) { return obj.(*batchv1.CronJob), nil } + +func runBackupServerContainer(defaultBackup backupv1alpha1.Backup, operatorImagePullSpec string) (*corev1.Pod, error) { + etcdPod, err := decodeEtcdPodManifest() + if err != nil { + return nil, err + } + + if exist := slices.ContainsFunc(etcdPod.Spec.Containers, func(c corev1.Container) bool { + return c.Name == defaultBackupServerContainerName + }); exist { + return etcdPod, nil + } + + requiredVolumes := map[string]string{ + "data-dir": "/var/lib/etcd", + "config-dir": "/etc/kubernetes", + "backup-dir": " /var/backup/etcd", + } + + for k := range requiredVolumes { + if exist := slices.ContainsFunc(etcdPod.Spec.Volumes, func(v corev1.Volume) bool { + return v.Name == k + }); !exist { + return nil, fmt.Errorf("could not find required volume '%v' ", k) + } + } + + backupServerContainer := corev1.Container{} + backupServerContainer.Name = defaultBackupServerContainerName + backupServerContainer.Image = operatorImagePullSpec + backupServerContainer.ImagePullPolicy = corev1.PullAlways + backupServerContainer.TerminationMessagePolicy = corev1.TerminationMessageFallbackToLogsOnError + backupServerContainer.VolumeMounts = mapToMounts(requiredVolumes) + backupServerContainer.Args = mapToArgs(requiredVolumes) + backupServerContainer.Args = append([]string{"backup-server", + fmt.Sprintf("--%s=%s", "schedule", defaultBackup.Spec.EtcdBackupSpec.Schedule), + fmt.Sprintf("--%s=%s", "timezone", defaultBackup.Spec.EtcdBackupSpec.TimeZone), + }, backupServerContainer.Args...) + + etcdPod.Spec.Containers = append(etcdPod.Spec.Containers, backupServerContainer) + return etcdPod, nil +} + +func removeBackupServerContainer() (*corev1.Pod, error) { + etcdPod, err := decodeEtcdPodManifest() + if err != nil { + return nil, err + } + + idx := slices.IndexFunc(etcdPod.Spec.Containers, func(c corev1.Container) bool { + return c.Name == defaultBackupServerContainerName + }) + if idx == -1 { + return etcdPod, nil + } + + etcdPod.Spec.Containers = append(etcdPod.Spec.Containers[:idx], etcdPod.Spec.Containers[idx+1:]...) + return etcdPod, nil +} + +func decodeEtcdPodManifest() (*corev1.Pod, error) { + scheme := runtime.NewScheme() + codec := serializer.NewCodecFactory(scheme) + utilruntime.Must(corev1.AddToScheme(scheme)) + etcdPodBytes := etcd_assets.MustAsset("etcd/pod.yaml") + + obj, err := runtime.Decode(codec.UniversalDecoder(corev1.SchemeGroupVersion), etcdPodBytes) + if err != nil { + return nil, fmt.Errorf("failed to decode etcd pod manifest: %v", err) + } + + pod, ok := obj.(*corev1.Pod) + if !ok { + return nil, fmt.Errorf("unsupported type: etcdStaticPodManifest is not type *corev1.Pod but %T", obj) + } + + return pod, nil +} + +func mapToArgs(m map[string]string) []string { + var args []string + for k, v := range m { + args = append(args, fmt.Sprintf("--%s=%s", k, v)) + } + return args +} + +func mapToMounts(m map[string]string) []corev1.VolumeMount { + var mounts []corev1.VolumeMount + for k, v := range m { + mounts = append(mounts, corev1.VolumeMount{ + Name: k, + MountPath: v, + }) + } + return mounts +} diff --git a/pkg/operator/starter.go b/pkg/operator/starter.go index c66fcf60d..5acd88a36 100644 --- a/pkg/operator/starter.go +++ b/pkg/operator/starter.go @@ -221,7 +221,6 @@ func RunOperator(ctx context.Context, controllerContext *controllercmd.Controlle "etcd/backups-sa.yaml", "etcd/backups-cr.yaml", "etcd/backups-crb.yaml", - "etcd/backup-server-kubeconfig.yaml", }, (&resourceapply.ClientHolder{}).WithKubernetes(kubeClient).WithDynamicClient(dynamicClient), operatorClient, @@ -630,8 +629,6 @@ var CertConfigMaps = []installer.UnrevisionedResource{ {Name: "restore-etcd-pod"}, {Name: "etcd-scripts"}, {Name: "etcd-all-bundles"}, - // kubeconfig for backup-server - {Name: "backup-server-kubeconfig"}, } var CertSecrets = []installer.UnrevisionedResource{