diff --git a/examples/demo/demo.go b/examples/demo/demo.go index 4c82841957c..1d0ae13d8fc 100644 --- a/examples/demo/demo.go +++ b/examples/demo/demo.go @@ -84,13 +84,15 @@ func runCluster() { } env, err := vttest.NewLocalTestEnv(12345) if err != nil { - log.Exitf("Error: %v", err) + log.Error(fmt.Sprintf("Error: %v", err)) + os.Exit(1) } cluster.Env = env err = cluster.Setup() if err != nil { cluster.TearDown() - log.Exitf("Error: %v", err) + log.Error(fmt.Sprintf("Error: %v", err)) + os.Exit(1) } } @@ -189,7 +191,7 @@ func execQuery(conn *mysql.Conn, key, query, keyspace, shard string, response ma "title": title, "error": err.Error(), } - log.Errorf("error: %v", err) + log.Error(fmt.Sprintf("error: %v", err)) return } response[key] = resultToMap(title, qr) @@ -207,7 +209,7 @@ func resultToMap(title string, qr *sqltypes.Result) map[string]any { if value.Type() == sqltypes.VarBinary { bytes, err := value.ToBytes() if err != nil { - log.Errorf("Error converting value to bytes: %v", err) + log.Error(fmt.Sprintf("Error converting value to bytes: %v", err)) return nil } srow = append(srow, hex.EncodeToString(bytes)) @@ -230,7 +232,7 @@ func streamQuerylog(port int) (<-chan string, error) { request := fmt.Sprintf("http://localhost:%d/debug/querylog", port) resp, err := http.Get(request) if err != nil { - log.Errorf("Error reading stream: %v: %v", request, err) + log.Error(fmt.Sprintf("Error reading stream: %v: %v", request, err)) return nil, err } ch := make(chan string, 100) @@ -239,7 +241,7 @@ func streamQuerylog(port int) (<-chan string, error) { for { str, err := buffered.ReadString('\n') if err != nil { - log.Errorf("Error reading stream: %v: %v", request, err) + log.Error(fmt.Sprintf("Error reading stream: %v: %v", request, err)) close(ch) resp.Body.Close() return diff --git a/go/acl/acl.go b/go/acl/acl.go index eb68a188472..7e260a9452a 100644 --- a/go/acl/acl.go +++ b/go/acl/acl.go @@ -38,6 +38,7 @@ package acl import ( "fmt" "net/http" + "os" "sync" "github.com/spf13/pflag" @@ -80,7 +81,8 @@ func RegisterFlags(fs *pflag.FlagSet) { // functions when needed. func RegisterPolicy(name string, policy Policy) { if _, ok := policies[name]; ok { - log.Fatalf("policy %s is already registered", name) + log.Error(fmt.Sprintf("policy %s is already registered", name)) + os.Exit(1) } policies[name] = policy } @@ -95,7 +97,7 @@ func savePolicy() { currentPolicy = policy return } - log.Warningf("security-policy %q not found; using fallback policy (deny-all)", securityPolicy) + log.Warn(fmt.Sprintf("security-policy %q not found; using fallback policy (deny-all)", securityPolicy)) currentPolicy = denyAllPolicy{} } diff --git a/go/cmd/mysqlctl/mysqlctl.go b/go/cmd/mysqlctl/mysqlctl.go index 846d80dd47f..1dd7014e403 100644 --- a/go/cmd/mysqlctl/mysqlctl.go +++ b/go/cmd/mysqlctl/mysqlctl.go @@ -18,6 +18,9 @@ limitations under the License. package main import ( + "fmt" + "os" + "vitess.io/vitess/go/cmd/mysqlctl/command" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/utils" @@ -27,6 +30,7 @@ func main() { command.Root.SetGlobalNormalizationFunc(utils.NormalizeUnderscoresToDashes) if err := command.Root.Execute(); err != nil { - log.Exit(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } } diff --git a/go/cmd/mysqlctld/cli/mysqlctld.go b/go/cmd/mysqlctld/cli/mysqlctld.go index e4f63b9c57f..e32d38b74af 100644 --- a/go/cmd/mysqlctld/cli/mysqlctld.go +++ b/go/cmd/mysqlctld/cli/mysqlctld.go @@ -119,7 +119,7 @@ func run(cmd *cobra.Command, args []string) error { mycnfFile := mysqlctl.MycnfFile(tabletUID) if _, statErr := os.Stat(mycnfFile); os.IsNotExist(statErr) { // Generate my.cnf from scratch and use it to find mysqld. - log.Infof("mycnf file (%s) doesn't exist, initializing", mycnfFile) + log.Info(fmt.Sprintf("mycnf file (%s) doesn't exist, initializing", mycnfFile)) var err error mysqld, cnf, err = mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort, collationEnv) @@ -135,7 +135,7 @@ func run(cmd *cobra.Command, args []string) error { } } else { // There ought to be an existing my.cnf, so use it to find mysqld. - log.Infof("mycnf file (%s) already exists, starting without init", mycnfFile) + log.Info(fmt.Sprintf("mycnf file (%s) already exists, starting without init", mycnfFile)) var err error mysqld, cnf, err = mysqlctl.OpenMysqldAndMycnf(tabletUID, collationEnv) @@ -158,7 +158,7 @@ func run(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to start mysqld: %w", err) } } else { - log.Infof("found interrupted restore, not starting mysqld") + log.Info("found interrupted restore, not starting mysqld") } } cancel() @@ -167,11 +167,11 @@ func run(cmd *cobra.Command, args []string) error { // Take mysqld down with us on SIGTERM before entering lame duck. servenv.OnTermSync(func() { - log.Infof("mysqlctl received SIGTERM, shutting down mysqld first") + log.Info("mysqlctl received SIGTERM, shutting down mysqld first") ctx, cancel := context.WithTimeout(cmd.Context(), shutdownWaitTime+10*time.Second) defer cancel() if err := mysqld.Shutdown(ctx, cnf, true, shutdownWaitTime); err != nil { - log.Errorf("failed to shutdown mysqld: %v", err) + log.Error(fmt.Sprintf("failed to shutdown mysqld: %v", err)) } }) @@ -184,9 +184,9 @@ func run(cmd *cobra.Command, args []string) error { select { case <-mysqldTerminated: - log.Infof("mysqld shut down on its own, exiting mysqlctld") + log.Info("mysqld shut down on its own, exiting mysqlctld") case <-mysqlctldTerminated: - log.Infof("mysqlctld shut down gracefully") + log.Info("mysqlctld shut down gracefully") } return nil diff --git a/go/cmd/mysqlctld/mysqlctld.go b/go/cmd/mysqlctld/mysqlctld.go index 5bdfc7a84b8..81caf76c393 100644 --- a/go/cmd/mysqlctld/mysqlctld.go +++ b/go/cmd/mysqlctld/mysqlctld.go @@ -20,6 +20,9 @@ limitations under the License. package main import ( + "fmt" + "os" + "vitess.io/vitess/go/cmd/mysqlctld/cli" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/utils" @@ -29,6 +32,7 @@ func main() { cli.Main.SetGlobalNormalizationFunc(utils.NormalizeUnderscoresToDashes) if err := cli.Main.Execute(); err != nil { - log.Exit(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } } diff --git a/go/cmd/topo2topo/topo2topo.go b/go/cmd/topo2topo/topo2topo.go index f9019460898..eb55420b19b 100644 --- a/go/cmd/topo2topo/topo2topo.go +++ b/go/cmd/topo2topo/topo2topo.go @@ -17,6 +17,9 @@ limitations under the License. package main import ( + "fmt" + "os" + "vitess.io/vitess/go/cmd/topo2topo/cli" "vitess.io/vitess/go/exit" "vitess.io/vitess/go/vt/log" @@ -28,6 +31,7 @@ func main() { cli.Main.SetGlobalNormalizationFunc(utils.NormalizeUnderscoresToDashes) if err := cli.Main.Execute(); err != nil { - log.Exitf("%s", err) + log.Error(fmt.Sprintf("%s", err)) + os.Exit(1) } } diff --git a/go/cmd/vtadmin/main.go b/go/cmd/vtadmin/main.go index 4f4ac271865..b6de8ad5935 100644 --- a/go/cmd/vtadmin/main.go +++ b/go/cmd/vtadmin/main.go @@ -18,7 +18,9 @@ package main import ( "flag" + "fmt" "io" + "os" "time" "github.com/spf13/cobra" @@ -78,7 +80,8 @@ var ( // a log.Fatal call with the given args. func fatal(args ...any) { trace.LogErrorsWhenClosing(traceCloser) - log.Fatal(args...) + log.Error(fmt.Sprint(args...)) + os.Exit(1) } // startTracing checks the value of --tracer and then starts tracing, populating @@ -86,12 +89,12 @@ func fatal(args ...any) { func startTracing(cmd *cobra.Command) { tracer, err := cmd.Flags().GetString("tracer") if err != nil { - log.Warningf("not starting tracer; err: %s", err) + log.Warn(fmt.Sprintf("not starting tracer; err: %s", err)) return } if tracer == "" || tracer == "noop" { - log.Warningf("starting tracing with noop tracer") + log.Warn("starting tracing with noop tracer") } traceCloser = trace.StartTracing("vtadmin") @@ -136,7 +139,7 @@ func run(cmd *cobra.Command, args []string) { } if cacheRefreshKey == "" { - log.Warningf("no cache-refresh-key set; forcing cache refreshes will not be possible") + log.Warn("no cache-refresh-key set; forcing cache refreshes will not be possible") } cache.SetCacheRefreshKey(cacheRefreshKey) @@ -228,7 +231,8 @@ func main() { rootCmd.SetGlobalNormalizationFunc(utils.NormalizeUnderscoresToDashes) if err := rootCmd.Execute(); err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } } diff --git a/go/cmd/vtbackup/cli/vtbackup.go b/go/cmd/vtbackup/cli/vtbackup.go index a8bbadd87ba..137f69ca2d9 100644 --- a/go/cmd/vtbackup/cli/vtbackup.go +++ b/go/cmd/vtbackup/cli/vtbackup.go @@ -270,7 +270,7 @@ func run(cc *cobra.Command, args []string) error { defer logutil.Flush() if minRetentionCount < 1 { - log.Errorf("min_retention_count must be at least 1 to allow restores to succeed") + log.Error("min_retention_count must be at least 1 to allow restores to succeed") exit.Return(1) } @@ -314,7 +314,7 @@ func run(cc *cobra.Command, args []string) error { } if keepAliveTimeout > 0 { - log.Infof("Backup was successful, waiting %s before exiting (or until context expires).", keepAliveTimeout) + log.Info(fmt.Sprintf("Backup was successful, waiting %s before exiting (or until context expires).", keepAliveTimeout)) select { case <-time.After(keepAliveTimeout): case <-ctx.Done(): @@ -345,9 +345,9 @@ func takeBackup(ctx, backgroundCtx context.Context, topoServer *topo.Server, bac // accumulate garbage (and run out of disk space) if it's restarted. tabletDir := mysqlctl.TabletDir(tabletAlias.Uid) defer func() { - log.Infof("Removing temporary tablet directory: %v", tabletDir) + log.Info(fmt.Sprintf("Removing temporary tablet directory: %v", tabletDir)) if err := os.RemoveAll(tabletDir); err != nil { - log.Warningf("Failed to remove temporary tablet directory: %v", err) + log.Warn(fmt.Sprintf("Failed to remove temporary tablet directory: %v", err)) } }() @@ -363,7 +363,7 @@ func takeBackup(ctx, backgroundCtx context.Context, topoServer *topo.Server, bac cancelBackgroundCtx() }() mysqld.OnTerm(func() { - log.Warning("Cancelling vtbackup as MySQL has terminated") + log.Warn("Cancelling vtbackup as MySQL has terminated") cancelCtx() cancelBackgroundCtx() }) @@ -382,7 +382,7 @@ func takeBackup(ctx, backgroundCtx context.Context, topoServer *topo.Server, bac mysqlShutdownCtx, mysqlShutdownCancel := context.WithTimeout(backgroundCtx, mysqlShutdownTimeout+10*time.Second) defer mysqlShutdownCancel() if err := mysqld.Shutdown(mysqlShutdownCtx, mycnf, false, mysqlShutdownTimeout); err != nil { - log.Errorf("failed to shutdown mysqld: %v", err) + log.Error(fmt.Sprintf("failed to shutdown mysqld: %v", err)) } }() @@ -460,7 +460,7 @@ func takeBackup(ctx, backgroundCtx context.Context, topoServer *topo.Server, bac phase.Set(phaseNameRestoreLastBackup, int64(1)) defer phase.Set(phaseNameRestoreLastBackup, int64(0)) backupDir := mysqlctl.GetBackupDir(initKeyspace, initShard) - log.Infof("Restoring latest backup from directory %v", backupDir) + log.Info(fmt.Sprintf("Restoring latest backup from directory %v", backupDir)) restoreAt := time.Now() params := mysqlctl.RestoreParams{ Cnf: mycnf, @@ -481,7 +481,7 @@ func takeBackup(ctx, backgroundCtx context.Context, topoServer *topo.Server, bac case nil: // if err is nil, we expect backupManifest to be non-nil restorePos = backupManifest.Position - log.Infof("Successfully restored from backup at replication position %v", restorePos) + log.Info(fmt.Sprintf("Successfully restored from backup at replication position %v", restorePos)) case mysqlctl.ErrNoBackup: // There is no backup found, but we may be taking the initial backup of a shard if !allowFirstBackup { @@ -501,7 +501,7 @@ func takeBackup(ctx, backgroundCtx context.Context, topoServer *topo.Server, bac disabledRedoLog := false if disableRedoLog { if err := mysqld.DisableRedoLog(ctx); err != nil { - log.Warningf("Error disabling redo logging: %v", err) + log.Warn(fmt.Sprintf("Error disabling redo logging: %v", err)) } else { disabledRedoLog = true } @@ -547,7 +547,7 @@ func takeBackup(ctx, backgroundCtx context.Context, topoServer *topo.Server, bac return fmt.Errorf("can't get the primary replication position after all retries: %v", err) } - log.Infof("takeBackup: primary position is: %s", primaryPos.String()) + log.Info("takeBackup: primary position is: " + primaryPos.String()) // Remember the time when we fetched the primary position, not when we caught // up to it, so the timestamp on our backup is honest (assuming we make it @@ -582,13 +582,13 @@ func takeBackup(ctx, backgroundCtx context.Context, topoServer *topo.Server, bac status, statusErr = mysqld.ReplicationStatus(ctx) if statusErr != nil { lastErr.Record(statusErr) - log.Warningf("Error getting replication status: %v", statusErr) + log.Warn(fmt.Sprintf("Error getting replication status: %v", statusErr)) continue } if status.Position.AtLeast(primaryPos) { // We're caught up on replication to at least the point the primary // was at when this vtbackup run started. - log.Infof("Replication caught up to %v after %v", status.Position, time.Since(waitStartTime)) + log.Info(fmt.Sprintf("Replication caught up to %v after %v", status.Position, time.Since(waitStartTime))) deprecatedDurationByPhase.Set("CatchUpReplication", int64(time.Since(waitStartTime).Seconds())) break } @@ -601,12 +601,12 @@ func takeBackup(ctx, backgroundCtx context.Context, topoServer *topo.Server, bac } if !status.Healthy() { errStr := "Replication has stopped before backup could be taken. Trying to restart replication." - log.Warning(errStr) + log.Warn(errStr) lastErr.Record(errors.New(strings.ToLower(errStr))) phaseStatus.Set([]string{phaseNameCatchupReplication, phaseStatusCatchupReplicationStopped}, 1) if err := startReplication(ctx, mysqld, topoServer); err != nil { - log.Warningf("Failed to restart replication: %v", err) + log.Warn(fmt.Sprintf("Failed to restart replication: %v", err)) } } else { phaseStatus.Set([]string{phaseNameCatchupReplication, phaseStatusCatchupReplicationStopped}, 0) @@ -624,7 +624,7 @@ func takeBackup(ctx, backgroundCtx context.Context, topoServer *topo.Server, bac if statusErr != nil { return fmt.Errorf("can't get replication status: %v", err) } - log.Infof("Replication caught up to %v", status.Position) + log.Info(fmt.Sprintf("Replication caught up to %v", status.Position)) if !status.Position.AtLeast(primaryPos) && status.Position.Equal(restorePos) { return fmt.Errorf("not taking backup: replication did not make any progress from restore point: %v", restorePos) } @@ -758,11 +758,11 @@ func retryOnError(ctx context.Context, fn func() error) error { if err == nil { return nil } - log.Errorf("Waiting %v to retry after error: %v", waitTime, err) + log.Error(fmt.Sprintf("Waiting %v to retry after error: %v", waitTime, err)) select { case <-ctx.Done(): - log.Errorf("Not retrying after error: %v", ctx.Err()) + log.Error(fmt.Sprintf("Not retrying after error: %v", ctx.Err())) return ctx.Err() case <-time.After(waitTime): waitTime *= 2 @@ -781,7 +781,7 @@ func pruneBackups(ctx context.Context, backupStorage backupstorage.BackupStorage } numBackups := len(backups) if numBackups <= minRetentionCount { - log.Infof("Found %v backups. Not pruning any since this is within the min_retention_count of %v.", numBackups, minRetentionCount) + log.Info(fmt.Sprintf("Found %v backups. Not pruning any since this is within the min_retention_count of %v.", numBackups, minRetentionCount)) return nil } // We have more than the minimum retention count, so we could afford to @@ -794,18 +794,18 @@ func pruneBackups(ctx context.Context, backupStorage backupstorage.BackupStorage } if time.Since(backupTime) < minRetentionTime { // The oldest remaining backup is not old enough to prune. - log.Infof("Oldest backup taken at %v has not reached min_retention_time of %v. Nothing left to prune.", backupTime, minRetentionTime) + log.Info(fmt.Sprintf("Oldest backup taken at %v has not reached min_retention_time of %v. Nothing left to prune.", backupTime, minRetentionTime)) break } // Remove the backup. - log.Infof("Removing old backup %v from %v, since it's older than min_retention_time of %v", backup.Name(), backupDir, minRetentionTime) + log.Info(fmt.Sprintf("Removing old backup %v from %v, since it's older than min_retention_time of %v", backup.Name(), backupDir, minRetentionTime)) if err := backupStorage.RemoveBackup(ctx, backupDir, backup.Name()); err != nil { return fmt.Errorf("couldn't remove backup %v from %v: %v", backup.Name(), backupDir, err) } // We successfully removed one backup. Can we afford to prune any more? numBackups-- if numBackups == minRetentionCount { - log.Infof("Successfully pruned backup count to min_retention_count of %v.", minRetentionCount) + log.Info(fmt.Sprintf("Successfully pruned backup count to min_retention_count of %v.", minRetentionCount)) break } } @@ -837,7 +837,7 @@ func shouldBackup(ctx context.Context, topoServer *topo.Server, backupStorage ba if initialBackup { // Check if any backups for the shard already exist in this backup storage location. if lastBackup != nil { - log.Infof("At least one complete backup already exists, so there's no need to seed an empty backup. Doing nothing.") + log.Info("At least one complete backup already exists, so there's no need to seed an empty backup. Doing nothing.") return false, nil } @@ -861,17 +861,17 @@ func shouldBackup(ctx context.Context, topoServer *topo.Server, backupStorage ba return false, fmt.Errorf("refusing to upload initial backup of empty database: the shard %v/%v already has at least one tablet that may be serving (%v); you must take a backup from a live tablet instead", initKeyspace, initShard, tabletAlias) } } - log.Infof("Shard %v/%v exists but has no serving tablets.", initKeyspace, initShard) + log.Info(fmt.Sprintf("Shard %v/%v exists but has no serving tablets.", initKeyspace, initShard)) case topo.IsErrType(shardErr, topo.NoNode): // The shard doesn't exist, so we know no tablets are running. - log.Infof("Shard %v/%v doesn't exist; assuming it has no serving tablets.", initKeyspace, initShard) + log.Info(fmt.Sprintf("Shard %v/%v doesn't exist; assuming it has no serving tablets.", initKeyspace, initShard)) default: // If we encounter any other error, we don't know for sure whether // the shard exists, so it's not safe to continue. return false, fmt.Errorf("failed to check whether shard %v/%v exists before doing initial backup: %v", initKeyspace, initShard, err) } - log.Infof("Shard %v/%v has no existing backups. Creating initial backup.", initKeyspace, initShard) + log.Info(fmt.Sprintf("Shard %v/%v has no existing backups. Creating initial backup.", initKeyspace, initShard)) return true, nil } @@ -898,11 +898,11 @@ func shouldBackup(ctx context.Context, topoServer *topo.Server, backupStorage ba } if elapsedTime := time.Since(lastBackupTime); elapsedTime < minBackupInterval { // It hasn't been long enough yet. - log.Infof("Skipping backup since only %v has elapsed since the last backup at %v, which is less than the min_backup_interval of %v.", elapsedTime, lastBackupTime, minBackupInterval) + log.Info(fmt.Sprintf("Skipping backup since only %v has elapsed since the last backup at %v, which is less than the min_backup_interval of %v.", elapsedTime, lastBackupTime, minBackupInterval)) return false, nil } // It has been long enough. - log.Infof("The last backup was taken at %v, which is older than the min_backup_interval of %v.", lastBackupTime, minBackupInterval) + log.Info(fmt.Sprintf("The last backup was taken at %v, which is older than the min_backup_interval of %v.", lastBackupTime, minBackupInterval)) return true, nil } @@ -917,7 +917,7 @@ func lastCompleteBackup(ctx context.Context, backups []backupstorage.BackupHandl // which is written at the end after all files are uploaded. backup := backups[i] if err := checkBackupComplete(ctx, backup); err != nil { - log.Warningf("Ignoring backup %v because it's incomplete: %v", backup.Name(), err) + log.Warn(fmt.Sprintf("Ignoring backup %v because it's incomplete: %v", backup.Name(), err)) continue } return backup @@ -932,6 +932,6 @@ func checkBackupComplete(ctx context.Context, backup backupstorage.BackupHandle) return fmt.Errorf("can't get backup MANIFEST: %v", err) } - log.Infof("Found complete backup %v taken at position %v", backup.Name(), manifest.Position.String()) + log.Info(fmt.Sprintf("Found complete backup %v taken at position %v", backup.Name(), manifest.Position.String())) return nil } diff --git a/go/cmd/vtbackup/vtbackup.go b/go/cmd/vtbackup/vtbackup.go index a69afd8dc97..1dd1d1cea75 100644 --- a/go/cmd/vtbackup/vtbackup.go +++ b/go/cmd/vtbackup/vtbackup.go @@ -17,6 +17,8 @@ limitations under the License. package main import ( + "fmt" + "vitess.io/vitess/go/cmd/vtbackup/cli" "vitess.io/vitess/go/exit" "vitess.io/vitess/go/vt/log" @@ -28,7 +30,7 @@ func main() { cli.Main.SetGlobalNormalizationFunc(utils.NormalizeUnderscoresToDashes) if err := cli.Main.Execute(); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) exit.Return(1) } } diff --git a/go/cmd/vtbench/vtbench.go b/go/cmd/vtbench/vtbench.go index 75e221d6fe5..3f62b584eb0 100644 --- a/go/cmd/vtbench/vtbench.go +++ b/go/cmd/vtbench/vtbench.go @@ -17,6 +17,9 @@ limitations under the License. package main import ( + "fmt" + "os" + "vitess.io/vitess/go/cmd/vtbench/cli" "vitess.io/vitess/go/exit" "vitess.io/vitess/go/vt/log" @@ -28,6 +31,7 @@ func main() { cli.Main.SetGlobalNormalizationFunc(utils.NormalizeUnderscoresToDashes) if err := cli.Main.Execute(); err != nil { - log.Exit(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } } diff --git a/go/cmd/vtclient/cli/vtclient.go b/go/cmd/vtclient/cli/vtclient.go index d31555d2abc..912ad498504 100644 --- a/go/cmd/vtclient/cli/vtclient.go +++ b/go/cmd/vtclient/cli/vtclient.go @@ -195,7 +195,7 @@ func _run(cmd *cobra.Command, args []string) (*results, error) { return nil, fmt.Errorf("client error: %w", err) } - log.Infof("Sending the query...") + log.Info("Sending the query...") ctx, cancel := context.WithTimeout(cmd.Context(), timeout) defer cancel() diff --git a/go/cmd/vtclient/vtclient.go b/go/cmd/vtclient/vtclient.go index 5df798dfd6d..6db3e44af65 100644 --- a/go/cmd/vtclient/vtclient.go +++ b/go/cmd/vtclient/vtclient.go @@ -17,6 +17,9 @@ limitations under the License. package main import ( + "fmt" + "os" + "vitess.io/vitess/go/cmd/vtclient/cli" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/utils" @@ -27,6 +30,7 @@ func main() { cli.Main.SetGlobalNormalizationFunc(utils.NormalizeUnderscoresToDashes) if err := cli.Main.Execute(); err != nil { - log.Exit(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } } diff --git a/go/cmd/vtcombo/cli/main.go b/go/cmd/vtcombo/cli/main.go index e49fee14dc1..11a86b14b24 100644 --- a/go/cmd/vtcombo/cli/main.go +++ b/go/cmd/vtcombo/cli/main.go @@ -187,7 +187,8 @@ func run(cmd *cobra.Command, args []string) (err error) { TruncateErrLen: servenv.TruncateErrLen, }) if err != nil { - log.Fatalf("unable to initialize env: %v", err) + log.Error(fmt.Sprintf("unable to initialize env: %v", err)) + os.Exit(1) } ctx, cancel := context.WithCancel(cmd.Context()) @@ -321,7 +322,8 @@ func run(cmd *cobra.Command, args []string) (err error) { } if len(tabletTypes) == 0 { - log.Exitf("tablet-types-to-wait should contain at least one serving tablet type") + log.Error("tablet-types-to-wait should contain at least one serving tablet type") + os.Exit(1) } } else { tabletTypes = append(tabletTypes, topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY) diff --git a/go/cmd/vtcombo/cli/vschema_watcher.go b/go/cmd/vtcombo/cli/vschema_watcher.go index 63192a3cdda..4534a70007f 100644 --- a/go/cmd/vtcombo/cli/vschema_watcher.go +++ b/go/cmd/vtcombo/cli/vschema_watcher.go @@ -19,6 +19,7 @@ package cli import ( "context" "encoding/json" + "fmt" "os" "path" @@ -32,7 +33,8 @@ import ( func startVschemaWatcher(ctx context.Context, vschemaPersistenceDir string, ts *topo.Server) { // Create the directory if it doesn't exist. if err := createDirectoryIfNotExists(vschemaPersistenceDir); err != nil { - log.Fatalf("Unable to create vschema persistence directory %v: %v", vschemaPersistenceDir, err) + log.Error(fmt.Sprintf("Unable to create vschema persistence directory %v: %v", vschemaPersistenceDir, err)) + os.Exit(1) } // If there are keyspace files, load them. @@ -40,7 +42,8 @@ func startVschemaWatcher(ctx context.Context, vschemaPersistenceDir string, ts * // Rebuild the SrvVSchema object in case we loaded vschema from file if err := ts.RebuildSrvVSchema(ctx, tpb.Cells); err != nil { - log.Fatalf("RebuildSrvVSchema failed: %v", err) + log.Error(fmt.Sprintf("RebuildSrvVSchema failed: %v", err)) + os.Exit(1) } // Now watch for changes in the SrvVSchema object and persist them to disk. @@ -53,7 +56,8 @@ func loadKeyspacesFromDir(ctx context.Context, dir string, ts *topo.Server) { if _, err := os.Stat(ksFile); err == nil { jsonData, err := os.ReadFile(ksFile) if err != nil { - log.Fatalf("Unable to read keyspace file %v: %v", ksFile, err) + log.Error(fmt.Sprintf("Unable to read keyspace file %v: %v", ksFile, err)) + os.Exit(1) } ksvs := &topo.KeyspaceVSchemaInfo{ @@ -62,15 +66,17 @@ func loadKeyspacesFromDir(ctx context.Context, dir string, ts *topo.Server) { } err = json.Unmarshal(jsonData, ksvs.Keyspace) if err != nil { - log.Fatalf("Unable to parse keyspace file %v: %v", ksFile, err) + log.Error(fmt.Sprintf("Unable to parse keyspace file %v: %v", ksFile, err)) + os.Exit(1) } _, err = vindexes.BuildKeyspace(ksvs.Keyspace, env.Parser()) if err != nil { - log.Fatalf("Invalid keyspace definition: %v", err) + log.Error(fmt.Sprintf("Invalid keyspace definition: %v", err)) + os.Exit(1) } ts.SaveVSchema(ctx, ksvs) - log.Infof("Loaded keyspace %v from %v\n", ks.Name, ksFile) + log.Info(fmt.Sprintf("Loaded keyspace %v from %v\n", ks.Name, ksFile)) } } } @@ -78,17 +84,19 @@ func loadKeyspacesFromDir(ctx context.Context, dir string, ts *topo.Server) { func watchSrvVSchema(ctx context.Context, ts *topo.Server, cell string) { data, ch, err := ts.WatchSrvVSchema(ctx, tpb.Cells[0]) if err != nil { - log.Fatalf("WatchSrvVSchema failed: %v", err) + log.Error(fmt.Sprintf("WatchSrvVSchema failed: %v", err)) + os.Exit(1) } if data.Err != nil { - log.Fatalf("WatchSrvVSchema could not retrieve initial vschema: %v", data.Err) + log.Error(fmt.Sprintf("WatchSrvVSchema could not retrieve initial vschema: %v", data.Err)) + os.Exit(1) } persistNewSrvVSchema(data.Value) for update := range ch { if update.Err != nil { - log.Errorf("WatchSrvVSchema returned an error: %v", update.Err) + log.Error(fmt.Sprintf("WatchSrvVSchema returned an error: %v", update.Err)) } else { persistNewSrvVSchema(update.Value) } @@ -99,15 +107,15 @@ func persistNewSrvVSchema(srvVSchema *vschemapb.SrvVSchema) { for ksName, ks := range srvVSchema.Keyspaces { jsonBytes, err := json.MarshalIndent(ks, "", " ") if err != nil { - log.Errorf("Error marshaling keyspace: %v", err) + log.Error(fmt.Sprintf("Error marshaling keyspace: %v", err)) continue } err = os.WriteFile(path.Join(vschemaPersistenceDir, ksName+".json"), jsonBytes, 0o644) if err != nil { - log.Errorf("Error writing keyspace file: %v", err) + log.Error(fmt.Sprintf("Error writing keyspace file: %v", err)) } - log.Infof("Persisted keyspace %v to %v", ksName, vschemaPersistenceDir) + log.Info(fmt.Sprintf("Persisted keyspace %v to %v", ksName, vschemaPersistenceDir)) } } diff --git a/go/cmd/vtcombo/main.go b/go/cmd/vtcombo/main.go index 8736714b605..29a6f272fac 100644 --- a/go/cmd/vtcombo/main.go +++ b/go/cmd/vtcombo/main.go @@ -22,6 +22,8 @@ limitations under the License. package main import ( + "fmt" + "vitess.io/vitess/go/cmd/vtcombo/cli" "vitess.io/vitess/go/exit" "vitess.io/vitess/go/vt/log" @@ -33,7 +35,7 @@ func main() { cli.Main.SetGlobalNormalizationFunc(utils.NormalizeUnderscoresToDashes) if err := cli.Main.Execute(); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) exit.Return(1) } } diff --git a/go/cmd/vtctl/vtctl.go b/go/cmd/vtctl/vtctl.go index 5564cfa91a0..d5d8b342533 100644 --- a/go/cmd/vtctl/vtctl.go +++ b/go/cmd/vtctl/vtctl.go @@ -139,7 +139,8 @@ func main() { TruncateErrLen: servenv.TruncateErrLen, }) if err != nil { - log.Fatalf("cannot initialize sql parser: %v", err) + log.Error(fmt.Sprintf("cannot initialize sql parser: %v", err)) + os.Exit(1) } // (TODO:ajm188) . @@ -172,7 +173,7 @@ func main() { os.Args = append([]string{"vtctldclient"}, args[1:]...) if err := command.Root.ExecuteContext(ctx); err != nil { - log.Errorf("action failed: %v %v", action, err) + log.Error(fmt.Sprintf("action failed: %v %v", action, err)) exit.Return(255) } case strings.EqualFold(action, "LegacyVtctlCommand"): @@ -181,7 +182,7 @@ func main() { args = args[1:] fallthrough default: - log.Warningf("WARNING: vtctl should only be used for VDiff v1 workflows. Please use VDiff v2 and consider using vtctldclient for all other commands.") + log.Warn("WARNING: vtctl should only be used for VDiff v1 workflows. Please use VDiff v2 and consider using vtctldclient for all other commands.") wr := wrangler.New(env, logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) if args[0] == "--" { @@ -199,7 +200,7 @@ func main() { case nil: // keep going default: - log.Errorf("action failed: %v %v", action, err) + log.Error(fmt.Sprintf("action failed: %v %v", action, err)) exit.Return(255) } } diff --git a/go/cmd/vtctl/vtctl_unix.go b/go/cmd/vtctl/vtctl_unix.go index c65b1d9978d..a7c2ae0a7d4 100644 --- a/go/cmd/vtctl/vtctl_unix.go +++ b/go/cmd/vtctl/vtctl_unix.go @@ -19,6 +19,7 @@ limitations under the License. package main import ( + "fmt" "log/syslog" "vitess.io/vitess/go/vt/log" @@ -28,6 +29,6 @@ func logSyslog(msg string) { if syslogger, err := syslog.New(syslog.LOG_INFO, "vtctl "); err == nil { syslogger.Info(msg) //nolint:errcheck } else { - log.Warningf("cannot connect to syslog: %v", err) + log.Warn(fmt.Sprintf("cannot connect to syslog: %v", err)) } } diff --git a/go/cmd/vtctl/vtctl_windows.go b/go/cmd/vtctl/vtctl_windows.go index 63c5cceb63b..37ebad1f398 100644 --- a/go/cmd/vtctl/vtctl_windows.go +++ b/go/cmd/vtctl/vtctl_windows.go @@ -23,5 +23,5 @@ import ( ) func logSyslog(msg string) { - log.Warningf("windows does not have syslog support") + log.Warn("windows does not have syslog support") } diff --git a/go/cmd/vtctlclient/main.go b/go/cmd/vtctlclient/main.go index 872728cd4c7..0358e5dbe6f 100644 --- a/go/cmd/vtctlclient/main.go +++ b/go/cmd/vtctlclient/main.go @@ -86,7 +86,7 @@ func main() { // We can't do much without a --server flag if server == "" { - log.Error(errors.New("please specify --server to specify the vtctld server to connect to")) + log.Error(fmt.Sprint(errors.New("please specify --server to specify the vtctld server to connect to"))) os.Exit(1) } @@ -105,7 +105,7 @@ func main() { errStr := strings.ReplaceAll(err.Error(), "remote error: ", "") fmt.Printf("%s Error: %s\n", args[0], errStr) - log.Error(err) + log.Error(fmt.Sprint(err)) os.Exit(1) } } diff --git a/go/cmd/vtctld/cli/schema.go b/go/cmd/vtctld/cli/schema.go index 60004b9782d..2dcfae122e8 100644 --- a/go/cmd/vtctld/cli/schema.go +++ b/go/cmd/vtctld/cli/schema.go @@ -18,6 +18,8 @@ package cli import ( "context" + "fmt" + "os" "time" "vitess.io/vitess/go/timer" @@ -58,7 +60,8 @@ func initSchema(ctx context.Context) { timer := timer.NewTimer(interval) controllerFactory, err := schemamanager.GetControllerFactory(schemaChangeController) if err != nil { - log.Fatalf("unable to get a controller factory, error: %v", err) + log.Error(fmt.Sprintf("unable to get a controller factory, error: %v", err)) + os.Exit(1) } timer.Start(func() { @@ -67,7 +70,7 @@ func initSchema(ctx context.Context) { schemamanager.SchemaChangeUser: schemaChangeUser, }) if err != nil { - log.Errorf("failed to get controller, error: %v", err) + log.Error(fmt.Sprintf("failed to get controller, error: %v", err)) return } wr := wrangler.New(env, logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) @@ -77,7 +80,7 @@ func initSchema(ctx context.Context) { schemamanager.NewTabletExecutor("vtctld/schema", wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), schemaChangeReplicasTimeout, 0, env.Parser()), ) if err != nil { - log.Errorf("Schema change failed, error: %v", err) + log.Error(fmt.Sprintf("Schema change failed, error: %v", err)) } }) servenv.OnClose(func() { timer.Stop() }) diff --git a/go/cmd/vtctld/main.go b/go/cmd/vtctld/main.go index 06329733fdd..34acba81e54 100644 --- a/go/cmd/vtctld/main.go +++ b/go/cmd/vtctld/main.go @@ -17,6 +17,9 @@ limitations under the License. package main import ( + "fmt" + "os" + "vitess.io/vitess/go/cmd/vtctld/cli" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/utils" @@ -25,6 +28,7 @@ import ( func main() { cli.Main.SetGlobalNormalizationFunc(utils.NormalizeUnderscoresToDashes) if err := cli.Main.Execute(); err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } } diff --git a/go/cmd/vtctldclient/command/legacy_shim.go b/go/cmd/vtctldclient/command/legacy_shim.go index a443a5026e1..15490138068 100644 --- a/go/cmd/vtctldclient/command/legacy_shim.go +++ b/go/cmd/vtctldclient/command/legacy_shim.go @@ -92,7 +92,7 @@ func runLegacyCommand(ctx context.Context, args []string) error { errStr := strings.ReplaceAll(err.Error(), "remote error: ", "") fmt.Printf("%s Error: %s\n", flag.Arg(0), errStr) - log.Error(err) + log.Error(fmt.Sprint(err)) } return err diff --git a/go/cmd/vtctldclient/command/reparents.go b/go/cmd/vtctldclient/command/reparents.go index 0043c6f7aa9..c308ac2834e 100644 --- a/go/cmd/vtctldclient/command/reparents.go +++ b/go/cmd/vtctldclient/command/reparents.go @@ -186,7 +186,7 @@ func commandInitShardPrimary(cmd *cobra.Command, args []string) error { } for _, event := range resp.Events { - log.Infof("%v", event) + log.Info(fmt.Sprintf("%v", event)) } return err diff --git a/go/cmd/vtctldclient/main.go b/go/cmd/vtctldclient/main.go index 8ee11ff0753..493dd1798d3 100644 --- a/go/cmd/vtctldclient/main.go +++ b/go/cmd/vtctldclient/main.go @@ -18,6 +18,7 @@ package main import ( "flag" + "fmt" "vitess.io/vitess/go/acl" "vitess.io/vitess/go/cmd/vtctldclient/command" @@ -59,7 +60,7 @@ func main() { command.Root.SetGlobalNormalizationFunc(flagUtils.NormalizeUnderscoresToDashes) // back to your regularly scheduled cobra programming if err := command.Root.Execute(); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) exit.Return(1) } } diff --git a/go/cmd/vtgate/vtgate.go b/go/cmd/vtgate/vtgate.go index 085565704a7..038a07d9e80 100644 --- a/go/cmd/vtgate/vtgate.go +++ b/go/cmd/vtgate/vtgate.go @@ -17,6 +17,9 @@ limitations under the License. package main import ( + "fmt" + "os" + "vitess.io/vitess/go/cmd/vtgate/cli" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/utils" @@ -25,6 +28,7 @@ import ( func main() { cli.Main.SetGlobalNormalizationFunc(utils.NormalizeUnderscoresToDashes) if err := cli.Main.Execute(); err != nil { - log.Exit(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } } diff --git a/go/cmd/vtgateclienttest/main.go b/go/cmd/vtgateclienttest/main.go index 76d64dd46e2..82de6458219 100644 --- a/go/cmd/vtgateclienttest/main.go +++ b/go/cmd/vtgateclienttest/main.go @@ -17,6 +17,9 @@ limitations under the License. package main import ( + "fmt" + "os" + "vitess.io/vitess/go/cmd/vtgateclienttest/cli" "vitess.io/vitess/go/exit" "vitess.io/vitess/go/vt/log" @@ -28,6 +31,7 @@ func main() { cli.Main.SetGlobalNormalizationFunc(utils.NormalizeUnderscoresToDashes) if err := cli.Main.Execute(); err != nil { - log.Exitf("%s", err) + log.Error(fmt.Sprintf("%s", err)) + os.Exit(1) } } diff --git a/go/cmd/vtgateclienttest/services/terminal.go b/go/cmd/vtgateclienttest/services/terminal.go index e6a853ae306..1d6c68453a3 100644 --- a/go/cmd/vtgateclienttest/services/terminal.go +++ b/go/cmd/vtgateclienttest/services/terminal.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "os" "vitess.io/vitess/go/vt/vtgate/vtgateservice" @@ -52,7 +53,8 @@ func (c *terminalClient) Execute( prepared bool, ) (*vtgatepb.Session, *sqltypes.Result, error) { if sql == "quit://" { - log.Fatal("Received quit:// query. Going down.") + log.Error("Received quit:// query. Going down.") + os.Exit(1) } return session, nil, errTerminal } @@ -60,7 +62,8 @@ func (c *terminalClient) Execute( func (c *terminalClient) ExecuteBatch(ctx context.Context, session *vtgatepb.Session, sqlList []string, bindVariablesList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) { if len(sqlList) == 1 { if sqlList[0] == "quit://" { - log.Fatal("Received quit:// query. Going down.") + log.Error("Received quit:// query. Going down.") + os.Exit(1) } } return session, nil, errTerminal @@ -92,7 +95,7 @@ func (c *terminalClient) VStream(ctx context.Context, tabletType topodatapb.Tabl func (c *terminalClient) HandlePanic(err *error) { if x := recover(); x != nil { - log.Errorf("Uncaught panic:\n%v\n%s", x, tb.Stack(4)) + log.Error(fmt.Sprintf("Uncaught panic:\n%v\n%s", x, tb.Stack(4))) *err = fmt.Errorf("uncaught panic: %v", x) } } diff --git a/go/cmd/vtorc/cli/cli.go b/go/cmd/vtorc/cli/cli.go index 94f3e275b3e..7e65121c155 100644 --- a/go/cmd/vtorc/cli/cli.go +++ b/go/cmd/vtorc/cli/cli.go @@ -17,6 +17,9 @@ limitations under the License. package cli import ( + "fmt" + "os" + "github.com/spf13/cobra" "vitess.io/vitess/go/acl" @@ -58,9 +61,10 @@ func run(cmd *cobra.Command, args []string) { config.MarkConfigurationLoaded() // Log final config values to debug if something goes wrong. - log.Infof("Running with Configuration - %v", debug.AllSettings()) + log.Info(fmt.Sprintf("Running with Configuration - %v", debug.AllSettings())) if err := server.StartVTOrcDiscovery(); err != nil { - log.Fatalf("Failed to start vtorc: %+v", err) + log.Error(fmt.Sprintf("Failed to start vtorc: %+v", err)) + os.Exit(1) } server.RegisterVTOrcAPIEndpoints() diff --git a/go/cmd/vtorc/main.go b/go/cmd/vtorc/main.go index b4d12d41950..56b0fd65716 100644 --- a/go/cmd/vtorc/main.go +++ b/go/cmd/vtorc/main.go @@ -17,6 +17,9 @@ package main import ( + "fmt" + "os" + "vitess.io/vitess/go/cmd/vtorc/cli" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/utils" @@ -28,6 +31,7 @@ func main() { cli.Main.SetGlobalNormalizationFunc(utils.NormalizeUnderscoresToDashes) if err := cli.Main.Execute(); err != nil { - log.Exit(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } } diff --git a/go/cmd/vttablet/cli/cli.go b/go/cmd/vttablet/cli/cli.go index 8201c34393b..a5aef29f2a6 100644 --- a/go/cmd/vttablet/cli/cli.go +++ b/go/cmd/vttablet/cli/cli.go @@ -206,7 +206,7 @@ func initConfig(tabletAlias *topodatapb.TabletAlias, collationEnv *collations.En } } gotBytes, _ := yaml2.Marshal(config) - log.Infof("Loaded config file %s successfully:\n%s", tabletConfig, gotBytes) + log.Info(fmt.Sprintf("Loaded config file %s successfully:\n%s", tabletConfig, gotBytes)) var ( mycnf *mysqlctl.Mycnf diff --git a/go/cmd/vttablet/vttablet.go b/go/cmd/vttablet/vttablet.go index ad65dd54990..b71f353b934 100644 --- a/go/cmd/vttablet/vttablet.go +++ b/go/cmd/vttablet/vttablet.go @@ -18,6 +18,9 @@ limitations under the License. package main import ( + "fmt" + "os" + "vitess.io/vitess/go/cmd/vttablet/cli" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/utils" @@ -26,6 +29,7 @@ import ( func main() { cli.Main.SetGlobalNormalizationFunc(utils.NormalizeUnderscoresToDashes) if err := cli.Main.Execute(); err != nil { - log.Exit(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } } diff --git a/go/cmd/vttestserver/cli/main.go b/go/cmd/vttestserver/cli/main.go index fd7287a38c7..7d6eb634627 100644 --- a/go/cmd/vttestserver/cli/main.go +++ b/go/cmd/vttestserver/cli/main.go @@ -20,6 +20,7 @@ package cli import ( "encoding/json" "errors" + "fmt" "os" "os/signal" "path" @@ -321,8 +322,8 @@ func runCluster() (cluster vttest.LocalCluster, err error) { return } - log.Infof("Starting local cluster...") - log.Infof("config: %#v", config) + log.Info("Starting local cluster...") + log.Info(fmt.Sprintf("config: %#v", config)) cluster = vttest.LocalCluster{ Config: config, Env: env, diff --git a/go/cmd/vttestserver/cli/main_test.go b/go/cmd/vttestserver/cli/main_test.go index fee457d9640..10696d0439c 100644 --- a/go/cmd/vttestserver/cli/main_test.go +++ b/go/cmd/vttestserver/cli/main_test.go @@ -278,11 +278,11 @@ func TestExternalTopoServerConsul(t *testing.T) { defer func() { // Alerts command did not run successful if err := cmd.Process.Kill(); err != nil { - log.Errorf("cmd process kill has an error: %v", err) + log.Error(fmt.Sprintf("cmd process kill has an error: %v", err)) } // Alerts command did not run successful if err := cmd.Wait(); err != nil { - log.Errorf("cmd process wait has an error: %v", err) + log.Error(fmt.Sprintf("cmd process wait has an error: %v", err)) } }() diff --git a/go/cmd/vttestserver/main.go b/go/cmd/vttestserver/main.go index 08a7d247784..96427e2e171 100644 --- a/go/cmd/vttestserver/main.go +++ b/go/cmd/vttestserver/main.go @@ -17,6 +17,9 @@ limitations under the License. package main import ( + "fmt" + "os" + "vitess.io/vitess/go/cmd/vttestserver/cli" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/utils" @@ -26,6 +29,7 @@ func main() { cmd := cli.New() cmd.SetGlobalNormalizationFunc(utils.NormalizeUnderscoresToDashes) if err := cmd.Execute(); err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } } diff --git a/go/cmd/zk/command/cat.go b/go/cmd/zk/command/cat.go index 1087c508058..b5aff942305 100644 --- a/go/cmd/zk/command/cat.go +++ b/go/cmd/zk/command/cat.go @@ -66,7 +66,7 @@ func commandCat(cmd *cobra.Command, args []string) error { if err != nil { hasError = true if !catArgs.Force || err != zk.ErrNoNode { - log.Warningf("cat: cannot access %v: %v", zkPath, err) + log.Warn(fmt.Sprintf("cat: cannot access %v: %v", zkPath, err)) } continue } @@ -78,7 +78,7 @@ func commandCat(cmd *cobra.Command, args []string) error { if catArgs.DecodeProto { decoded, err = topo.DecodeContent(zkPath, data, false) if err != nil { - log.Warningf("cat: cannot proto decode %v: %v", zkPath, err) + log.Warn(fmt.Sprintf("cat: cannot proto decode %v: %v", zkPath, err)) decoded = string(data) } } else { diff --git a/go/cmd/zk/command/chmod.go b/go/cmd/zk/command/chmod.go index 4c5db9c077c..399241e2b5f 100644 --- a/go/cmd/zk/command/chmod.go +++ b/go/cmd/zk/command/chmod.go @@ -66,7 +66,7 @@ func commandChmod(cmd *cobra.Command, args []string) error { aclv, _, err := fs.Conn.GetACL(cmd.Context(), zkPath) if err != nil { hasError = true - log.Warningf("chmod: cannot set access %v: %v", zkPath, err) + log.Warn(fmt.Sprintf("chmod: cannot set access %v: %v", zkPath, err)) continue } if addPerms { @@ -77,7 +77,7 @@ func commandChmod(cmd *cobra.Command, args []string) error { err = fs.Conn.SetACL(cmd.Context(), zkPath, aclv, -1) if err != nil { hasError = true - log.Warningf("chmod: cannot set access %v: %v", zkPath, err) + log.Warn(fmt.Sprintf("chmod: cannot set access %v: %v", zkPath, err)) continue } } diff --git a/go/cmd/zk/command/edit.go b/go/cmd/zk/command/edit.go index 90348161502..9244081ab21 100644 --- a/go/cmd/zk/command/edit.go +++ b/go/cmd/zk/command/edit.go @@ -50,7 +50,7 @@ func commandEdit(cmd *cobra.Command, args []string) error { data, stat, err := fs.Conn.Get(cmd.Context(), zkPath) if err != nil { if !editArgs.Force || err != zk.ErrNoNode { - log.Warningf("edit: cannot access %v: %v", zkPath, err) + log.Warn(fmt.Sprintf("edit: cannot access %v: %v", zkPath, err)) } return fmt.Errorf("edit: cannot access %v: %v", zkPath, err) } diff --git a/go/cmd/zk/command/ls.go b/go/cmd/zk/command/ls.go index 60245d07974..761a73d1deb 100644 --- a/go/cmd/zk/command/ls.go +++ b/go/cmd/zk/command/ls.go @@ -88,7 +88,7 @@ func commandLs(cmd *cobra.Command, args []string) error { if err != nil { hasError = true if !lsArgs.Force || err != zk.ErrNoNode { - log.Warningf("ls: cannot access %v: %v", zkPath, err) + log.Warn(fmt.Sprintf("ls: cannot access %v: %v", zkPath, err)) } } @@ -114,7 +114,7 @@ func commandLs(cmd *cobra.Command, args []string) error { _, stat, err := fs.Conn.Exists(cmd.Context(), localPath) if err != nil { if !lsArgs.Force || err != zk.ErrNoNode { - log.Warningf("ls: cannot access: %v: %v", localPath, err) + log.Warn(fmt.Sprintf("ls: cannot access: %v: %v", localPath, err)) } } else { stats[i] = stat diff --git a/go/cmd/zk/command/rm.go b/go/cmd/zk/command/rm.go index eabe5ed8db8..2ae708f686e 100644 --- a/go/cmd/zk/command/rm.go +++ b/go/cmd/zk/command/rm.go @@ -79,7 +79,7 @@ func commandRm(cmd *cobra.Command, args []string) error { } if err != nil && (!rmArgs.Force || err != zk.ErrNoNode) { hasError = true - log.Warningf("rm: cannot delete %v: %v", zkPath, err) + log.Warn(fmt.Sprintf("rm: cannot delete %v: %v", zkPath, err)) } } if hasError { diff --git a/go/cmd/zk/command/stat.go b/go/cmd/zk/command/stat.go index 08653fc945b..3690f92a98a 100644 --- a/go/cmd/zk/command/stat.go +++ b/go/cmd/zk/command/stat.go @@ -60,7 +60,7 @@ func commandStat(cmd *cobra.Command, args []string) error { if err != nil { hasError = true if !statArgs.Force || err != zk.ErrNoNode { - log.Warningf("stat: cannot access %v: %v", zkPath, err) + log.Warn(fmt.Sprintf("stat: cannot access %v: %v", zkPath, err)) } continue } diff --git a/go/cmd/zk/command/watch.go b/go/cmd/zk/command/watch.go index 7d6de784718..ab18c4cf89f 100644 --- a/go/cmd/zk/command/watch.go +++ b/go/cmd/zk/command/watch.go @@ -52,13 +52,13 @@ func commandWatch(cmd *cobra.Command, args []string) error { case <-cmd.Context().Done(): return nil case event := <-eventChan: - log.Infof("watch: event %v: %v", event.Path, event) + log.Info(fmt.Sprintf("watch: event %v: %v", event.Path, event)) if event.Type == zk.EventNodeDataChanged { data, stat, watch, err := fs.Conn.GetW(cmd.Context(), event.Path) if err != nil { return fmt.Errorf("ERROR: failed to watch %v", err) } - log.Infof("watch: %v %v\n", event.Path, stat) + log.Info(fmt.Sprintf("watch: %v %v\n", event.Path, stat)) println(data) go func() { eventChan <- <-watch @@ -66,7 +66,7 @@ func commandWatch(cmd *cobra.Command, args []string) error { } else if event.State == zk.StateDisconnected { return nil } else if event.Type == zk.EventNodeDeleted { - log.Infof("watch: %v deleted\n", event.Path) + log.Info(fmt.Sprintf("watch: %v deleted\n", event.Path)) } else { // Most likely a session event - try t _, _, watch, err := fs.Conn.GetW(cmd.Context(), event.Path) diff --git a/go/cmd/zk/zkcmd.go b/go/cmd/zk/zkcmd.go index e1fe11c65cd..d1ab04d2636 100644 --- a/go/cmd/zk/zkcmd.go +++ b/go/cmd/zk/zkcmd.go @@ -18,6 +18,7 @@ package main import ( "context" + "fmt" "os" "os/signal" @@ -42,7 +43,7 @@ func main() { command.Root.SetGlobalNormalizationFunc(utils.NormalizeUnderscoresToDashes) // Run the command. if err := command.Root.ExecuteContext(ctx); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) exit.Return(1) } } diff --git a/go/cmd/zkctl/zkctl.go b/go/cmd/zkctl/zkctl.go index a86ba25db44..8c37a155cb8 100644 --- a/go/cmd/zkctl/zkctl.go +++ b/go/cmd/zkctl/zkctl.go @@ -17,6 +17,8 @@ limitations under the License. package main import ( + "fmt" + "vitess.io/vitess/go/cmd/zkctl/command" "vitess.io/vitess/go/exit" "vitess.io/vitess/go/vt/log" @@ -28,7 +30,7 @@ func main() { command.Root.SetGlobalNormalizationFunc(utils.NormalizeUnderscoresToDashes) if err := command.Root.Execute(); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) exit.Return(1) } } diff --git a/go/cmd/zkctld/cli/zkctld.go b/go/cmd/zkctld/cli/zkctld.go index 5ac3520868e..3865c8a240a 100644 --- a/go/cmd/zkctld/cli/zkctld.go +++ b/go/cmd/zkctld/cli/zkctld.go @@ -71,25 +71,25 @@ func run(cmd *cobra.Command, args []string) error { zkd := zkctl.NewZkd(zkConfig) if zkd.Inited() { - log.Infof("already initialized, starting without init...") + log.Info("already initialized, starting without init...") if err := zkd.Start(); err != nil { return fmt.Errorf("failed start: %v", err) } } else { - log.Infof("initializing...") + log.Info("initializing...") if err := zkd.Init(); err != nil { return fmt.Errorf("failed init: %v", err) } } - log.Infof("waiting for signal or server shutdown...") + log.Info("waiting for signal or server shutdown...") sig := make(chan os.Signal, 1) signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM) select { case <-zkd.Done(): - log.Infof("server shut down on its own") + log.Info("server shut down on its own") case <-sig: - log.Infof("signal received, shutting down server") + log.Info("signal received, shutting down server") // Action to perform if there is an error if err := zkd.Shutdown(); err != nil { diff --git a/go/cmd/zkctld/zkctld.go b/go/cmd/zkctld/zkctld.go index cf745337c45..3f073b78071 100644 --- a/go/cmd/zkctld/zkctld.go +++ b/go/cmd/zkctld/zkctld.go @@ -20,6 +20,8 @@ limitations under the License. package main import ( + "fmt" + "vitess.io/vitess/go/cmd/zkctld/cli" "vitess.io/vitess/go/exit" "vitess.io/vitess/go/vt/log" @@ -30,7 +32,7 @@ func main() { defer exit.Recover() cli.Main.SetGlobalNormalizationFunc(utils.NormalizeUnderscoresToDashes) if err := cli.Main.Execute(); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) exit.Return(1) } } diff --git a/go/event/syslogger/fake_logger.go b/go/event/syslogger/fake_logger.go index 852ca2a72a6..9e952786251 100644 --- a/go/event/syslogger/fake_logger.go +++ b/go/event/syslogger/fake_logger.go @@ -17,7 +17,8 @@ limitations under the License. package syslogger import ( - "fmt" + "log/slog" + "strings" "vitess.io/vitess/go/vt/log" ) @@ -27,59 +28,50 @@ type loggerMsg struct { level string } type testLogger struct { - logs []loggerMsg - savedInfof func(format string, args ...any) - savedWarningf func(format string, args ...any) - savedErrorf func(format string, args ...any) + handler *log.CaptureHandler + restore func() } func NewTestLogger() *testLogger { - tl := &testLogger{ - savedInfof: log.Infof, - savedWarningf: log.Warningf, - savedErrorf: log.Errorf, + handler := log.NewCaptureHandler() + restore := log.SetLogger(slog.New(handler)) + + return &testLogger{ + handler: handler, + restore: restore, } - log.Infof = tl.recordInfof - log.Warningf = tl.recordWarningf - log.Errorf = tl.recordErrorf - return tl } func (tl *testLogger) Close() { - log.Infof = tl.savedInfof - log.Warningf = tl.savedWarningf - log.Errorf = tl.savedErrorf -} - -func (tl *testLogger) recordInfof(format string, args ...any) { - msg := fmt.Sprintf(format, args...) - tl.logs = append(tl.logs, loggerMsg{msg, "INFO"}) - tl.savedInfof(msg) -} - -func (tl *testLogger) recordWarningf(format string, args ...any) { - msg := fmt.Sprintf(format, args...) - tl.logs = append(tl.logs, loggerMsg{msg, "WARNING"}) - tl.savedWarningf(msg) -} - -func (tl *testLogger) recordErrorf(format string, args ...any) { - msg := fmt.Sprintf(format, args...) - tl.logs = append(tl.logs, loggerMsg{msg, "ERROR"}) - tl.savedErrorf(msg) + if tl.restore != nil { + tl.restore() + } } func (tl *testLogger) getLog() loggerMsg { - if len(tl.logs) > 0 { - return tl.logs[len(tl.logs)-1] + record, ok := tl.handler.Last() + if ok { + return loggerMsg{ + msg: record.Message, + level: formatLevel(record.Level), + } } return loggerMsg{"no logs!", "ERROR"} } func (tl *testLogger) GetAllLogs() []string { var logs []string - for _, l := range tl.logs { - logs = append(logs, l.level+":"+l.msg) + for _, record := range tl.handler.Records() { + level := formatLevel(record.Level) + logs = append(logs, level+":"+record.Message) } return logs } + +func formatLevel(level slog.Level) string { + if level == slog.LevelWarn { + return "WARNING" + } + + return strings.ToUpper(level.String()) +} diff --git a/go/event/syslogger/fake_logger_test.go b/go/event/syslogger/fake_logger_test.go index df4a8f8294e..ff6450ddef2 100644 --- a/go/event/syslogger/fake_logger_test.go +++ b/go/event/syslogger/fake_logger_test.go @@ -20,10 +20,13 @@ import ( "testing" "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/vt/log" ) func TestGetLogsForNoLogs(t *testing.T) { tl := NewTestLogger() + defer tl.Close() errLoggerMsg := tl.getLog() want := loggerMsg{ @@ -36,9 +39,11 @@ func TestGetLogsForNoLogs(t *testing.T) { func TestGetAllLogs(t *testing.T) { tl := NewTestLogger() - tl.recordInfof("Test info log") - tl.recordErrorf("Test error log") - tl.recordWarningf("Test warning log") + defer tl.Close() + + log.Info("Test info log") + log.Error("Test error log") + log.Warn("Test warning log") want := []string{"INFO:Test info log", "ERROR:Test error log", "WARNING:Test warning log"} loggerMsgs := tl.GetAllLogs() diff --git a/go/event/syslogger/syslogger.go b/go/event/syslogger/syslogger.go index 234f2b5a712..1d2b86a1fb8 100644 --- a/go/event/syslogger/syslogger.go +++ b/go/event/syslogger/syslogger.go @@ -94,55 +94,55 @@ func listener(ev Syslogger) { if writer != nil { err = writer.Emerg(msg) } else { - log.Errorf(msg) + log.Error(msg) } case syslog.LOG_ALERT: if writer != nil { err = writer.Alert(msg) } else { - log.Errorf(msg) + log.Error(msg) } case syslog.LOG_CRIT: if writer != nil { err = writer.Crit(msg) } else { - log.Errorf(msg) + log.Error(msg) } case syslog.LOG_ERR: if writer != nil { err = writer.Err(msg) } else { - log.Errorf(msg) + log.Error(msg) } case syslog.LOG_WARNING: if writer != nil { err = writer.Warning(msg) } else { - log.Warningf(msg) + log.Warn(msg) } case syslog.LOG_NOTICE: if writer != nil { err = writer.Notice(msg) } else { - log.Infof(msg) + log.Info(msg) } case syslog.LOG_INFO: if writer != nil { err = writer.Info(msg) } else { - log.Infof(msg) + log.Info(msg) } case syslog.LOG_DEBUG: if writer != nil { err = writer.Debug(msg) } else { - log.Infof(msg) + log.Info(msg) } default: err = fmt.Errorf("invalid syslog severity: %v", sev) } if err != nil { - log.Errorf("can't write syslog event: %v", err) + log.Error(fmt.Sprintf("can't write syslog event: %v", err)) } } @@ -168,7 +168,7 @@ func initSyslog() { var err error writer, err = syslog.New(syslog.LOG_INFO|syslog.LOG_USER, os.Args[0]) if err != nil { - log.Errorf("can't connect to syslog: %v", err.Error()) + log.Error(fmt.Sprintf("can't connect to syslog: %v", err.Error())) writer = nil } diff --git a/go/exit/exit.go b/go/exit/exit.go index 3d0f49e4591..1fa5166c487 100644 --- a/go/exit/exit.go +++ b/go/exit/exit.go @@ -48,6 +48,7 @@ called from main(). See Recover() and Return() for more details. package exit import ( + "fmt" "os" "vitess.io/vitess/go/tb" @@ -84,7 +85,7 @@ func doRecover(err any, recoverAll bool) { exitFunc(int(code)) default: if recoverAll { - log.Errorf("panic: %v", tb.Errorf("%v", err)) + log.Error(fmt.Sprintf("panic: %v", tb.Errorf("%v", err))) exitFunc(255) } else { panic(err) diff --git a/go/flags/endtoend/mysqlctl.txt b/go/flags/endtoend/mysqlctl.txt index e252218fd95..549dcb00295 100644 --- a/go/flags/endtoend/mysqlctl.txt +++ b/go/flags/endtoend/mysqlctl.txt @@ -65,7 +65,9 @@ Flags: --keep-logs-by-mtime duration keep logs for this long (using mtime) (zero to keep forever) --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) --log-err-stacks log stack traces for errors + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --logtostderr log to standard error instead of files diff --git a/go/flags/endtoend/mysqlctld.txt b/go/flags/endtoend/mysqlctld.txt index d5c977ce411..c95975c9a7b 100644 --- a/go/flags/endtoend/mysqlctld.txt +++ b/go/flags/endtoend/mysqlctld.txt @@ -94,7 +94,9 @@ Flags: --keep-logs-by-mtime duration keep logs for this long (using mtime) (zero to keep forever) --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) --log-err-stacks log stack traces for errors + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --logtostderr log to standard error instead of files diff --git a/go/flags/endtoend/topo2topo.txt b/go/flags/endtoend/topo2topo.txt index 1586e4cae12..9b0805f9840 100644 --- a/go/flags/endtoend/topo2topo.txt +++ b/go/flags/endtoend/topo2topo.txt @@ -28,7 +28,9 @@ Flags: --keep-logs duration keep logs for this long (using ctime) (zero to keep forever) --keep-logs-by-mtime duration keep logs for this long (using mtime) (zero to keep forever) --log-err-stacks log stack traces for errors + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --logtostderr log to standard error instead of files diff --git a/go/flags/endtoend/vtaclcheck.txt b/go/flags/endtoend/vtaclcheck.txt index 9253aec9b01..09f0c3adffa 100644 --- a/go/flags/endtoend/vtaclcheck.txt +++ b/go/flags/endtoend/vtaclcheck.txt @@ -16,7 +16,9 @@ Flags: --keep-logs duration keep logs for this long (using ctime) (zero to keep forever) --keep-logs-by-mtime duration keep logs for this long (using mtime) (zero to keep forever) --log-err-stacks log stack traces for errors + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --logtostderr log to standard error instead of files diff --git a/go/flags/endtoend/vtbackup.txt b/go/flags/endtoend/vtbackup.txt index 87bb94fd43c..2c0ef09fb75 100644 --- a/go/flags/endtoend/vtbackup.txt +++ b/go/flags/endtoend/vtbackup.txt @@ -163,7 +163,9 @@ Flags: --keep-logs-by-mtime duration keep logs for this long (using mtime) (zero to keep forever) --lock-timeout duration Maximum time to wait when attempting to acquire a lock from the topo server (default 45s) --log-err-stacks log stack traces for errors + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --logtostderr log to standard error instead of files diff --git a/go/flags/endtoend/vtbench.txt b/go/flags/endtoend/vtbench.txt index f1449e468e1..66a429e52c7 100644 --- a/go/flags/endtoend/vtbench.txt +++ b/go/flags/endtoend/vtbench.txt @@ -77,7 +77,9 @@ Flags: --keep-logs duration keep logs for this long (using ctime) (zero to keep forever) --keep-logs-by-mtime duration keep logs for this long (using mtime) (zero to keep forever) --log-err-stacks log stack traces for errors + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --logtostderr log to standard error instead of files diff --git a/go/flags/endtoend/vtclient.txt b/go/flags/endtoend/vtclient.txt index b79139b1aad..2b2ea888ae6 100644 --- a/go/flags/endtoend/vtclient.txt +++ b/go/flags/endtoend/vtclient.txt @@ -40,7 +40,9 @@ Flags: --keep-logs duration keep logs for this long (using ctime) (zero to keep forever) --keep-logs-by-mtime duration keep logs for this long (using mtime) (zero to keep forever) --log-err-stacks log stack traces for errors + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --logtostderr log to standard error instead of files diff --git a/go/flags/endtoend/vtcombo.txt b/go/flags/endtoend/vtcombo.txt index edfb1058bf4..8275f9e1f31 100644 --- a/go/flags/endtoend/vtcombo.txt +++ b/go/flags/endtoend/vtcombo.txt @@ -191,8 +191,10 @@ Flags: --lock-tables-timeout duration How long to keep the table locked before timing out (default 1m0s) --lock-timeout duration Maximum time to wait when attempting to acquire a lock from the topo server (default 45s) --log-err-stacks log stack traces for errors + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-queries-to-file string Enable query logging to the specified file --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --logtostderr log to standard error instead of files diff --git a/go/flags/endtoend/vtctlclient.txt b/go/flags/endtoend/vtctlclient.txt index ac6b76fcf42..ad3439847c9 100644 --- a/go/flags/endtoend/vtctlclient.txt +++ b/go/flags/endtoend/vtctlclient.txt @@ -25,7 +25,9 @@ Usage of vtctlclient: --keep-logs duration keep logs for this long (using ctime) (zero to keep forever) --keep-logs-by-mtime duration keep logs for this long (using mtime) (zero to keep forever) --log-err-stacks log stack traces for errors + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_link string If non-empty, add symbolic links in this directory to the log files diff --git a/go/flags/endtoend/vtctld.txt b/go/flags/endtoend/vtctld.txt index 5f3706db4da..56062a6e76e 100644 --- a/go/flags/endtoend/vtctld.txt +++ b/go/flags/endtoend/vtctld.txt @@ -95,7 +95,9 @@ Flags: --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) --lock-timeout duration Maximum time to wait when attempting to acquire a lock from the topo server (default 45s) --log-err-stacks log stack traces for errors + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --logtostderr log to standard error instead of files diff --git a/go/flags/endtoend/vtctldclient.txt b/go/flags/endtoend/vtctldclient.txt index 97214868b41..be5012a3129 100644 --- a/go/flags/endtoend/vtctldclient.txt +++ b/go/flags/endtoend/vtctldclient.txt @@ -134,7 +134,9 @@ Flags: -h, --help help for vtctldclient --keep-logs duration keep logs for this long (using ctime) (zero to keep forever) --keep-logs-by-mtime duration keep logs for this long (using mtime) (zero to keep forever) + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_link string If non-empty, add symbolic links in this directory to the log files diff --git a/go/flags/endtoend/vtexplain.txt b/go/flags/endtoend/vtexplain.txt index 5c2aeab0ffa..c36883a9dcb 100644 --- a/go/flags/endtoend/vtexplain.txt +++ b/go/flags/endtoend/vtexplain.txt @@ -55,7 +55,9 @@ Flags: --ks-shard-map string JSON map of keyspace name -> shard name -> ShardReference object. The inner map is the same as the output of FindAllShardsInKeyspace --ks-shard-map-file string File containing json blob of keyspace name -> shard name -> ShardReference object --log-err-stacks log stack traces for errors + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --logtostderr log to standard error instead of files diff --git a/go/flags/endtoend/vtgate.txt b/go/flags/endtoend/vtgate.txt index 2e2cc5b5cd6..35832f8efab 100644 --- a/go/flags/endtoend/vtgate.txt +++ b/go/flags/endtoend/vtgate.txt @@ -113,8 +113,10 @@ Flags: --lock-heartbeat-time duration If there is lock function used. This will keep the lock connection active by using this heartbeat (default 5s) --lock-timeout duration Maximum time to wait when attempting to acquire a lock from the topo server (default 45s) --log-err-stacks log stack traces for errors + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-queries-to-file string Enable query logging to the specified file --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --logtostderr log to standard error instead of files diff --git a/go/flags/endtoend/vtgateclienttest.txt b/go/flags/endtoend/vtgateclienttest.txt index 1e7c624939e..e0c4f5b6933 100644 --- a/go/flags/endtoend/vtgateclienttest.txt +++ b/go/flags/endtoend/vtgateclienttest.txt @@ -49,7 +49,9 @@ Flags: --keep-logs-by-mtime duration keep logs for this long (using mtime) (zero to keep forever) --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) --log-err-stacks log stack traces for errors + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --logtostderr log to standard error instead of files diff --git a/go/flags/endtoend/vtorc.txt b/go/flags/endtoend/vtorc.txt index fd125a2f878..8a789fa5a66 100644 --- a/go/flags/endtoend/vtorc.txt +++ b/go/flags/endtoend/vtorc.txt @@ -56,7 +56,9 @@ Flags: --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) --lock-timeout duration Maximum time to wait when attempting to acquire a lock from the topo server (default 45s) --log-err-stacks log stack traces for errors + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --logtostderr log to standard error instead of files diff --git a/go/flags/endtoend/vttablet.txt b/go/flags/endtoend/vttablet.txt index d9a27b62ea6..0c4d77a522f 100644 --- a/go/flags/endtoend/vttablet.txt +++ b/go/flags/endtoend/vttablet.txt @@ -215,9 +215,11 @@ Flags: --lock-tables-timeout duration How long to keep the table locked before timing out (default 1m0s) --lock-timeout duration Maximum time to wait when attempting to acquire a lock from the topo server (default 45s) --log-err-stacks log stack traces for errors + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-queries Enable query logging to syslog. --log-queries-to-file string Enable query logging to the specified file --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --logtostderr log to standard error instead of files diff --git a/go/flags/endtoend/vttestserver.txt b/go/flags/endtoend/vttestserver.txt index d41a6b2c2bc..2415eb10b1f 100644 --- a/go/flags/endtoend/vttestserver.txt +++ b/go/flags/endtoend/vttestserver.txt @@ -82,7 +82,9 @@ Flags: --keyspaces strings Comma separated list of keyspaces (default [test_keyspace]) --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) --log-err-stacks log stack traces for errors + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --logtostderr log to standard error instead of files diff --git a/go/flags/endtoend/zk.txt b/go/flags/endtoend/zk.txt index 91d0c053f63..44ee23d9487 100644 --- a/go/flags/endtoend/zk.txt +++ b/go/flags/endtoend/zk.txt @@ -33,7 +33,9 @@ Flags: -h, --help help for zk --keep-logs duration keep logs for this long (using ctime) (zero to keep forever) --keep-logs-by-mtime duration keep logs for this long (using mtime) (zero to keep forever) + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --purge-logs-interval duration how often try to remove old logs (default 1h0m0s) --security-policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --server string server(s) to connect to diff --git a/go/flags/endtoend/zkctl.txt b/go/flags/endtoend/zkctl.txt index 178d552fd8f..a7140603825 100644 --- a/go/flags/endtoend/zkctl.txt +++ b/go/flags/endtoend/zkctl.txt @@ -23,7 +23,9 @@ Flags: --keep-logs duration keep logs for this long (using ctime) (zero to keep forever) --keep-logs-by-mtime duration keep logs for this long (using mtime) (zero to keep forever) --log-err-stacks log stack traces for errors + --log-level string minimum structured logging level: info, warn, debug, or error (default "info") --log-rotate-max-size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --log-structured enable structured logging --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --logtostderr log to standard error instead of files diff --git a/go/mysql/auth_server.go b/go/mysql/auth_server.go index 15304c1ac9d..43d4519f047 100644 --- a/go/mysql/auth_server.go +++ b/go/mysql/auth_server.go @@ -23,7 +23,9 @@ import ( "crypto/sha256" "crypto/subtle" "encoding/hex" + "fmt" "net" + "os" "sync" "vitess.io/vitess/go/mysql/sqlerror" @@ -577,7 +579,8 @@ func RegisterAuthServer(name string, authServer AuthServer) { mu.Lock() defer mu.Unlock() if _, ok := authServers[name]; ok { - log.Fatalf("AuthServer named %v already exists", name) + log.Error(fmt.Sprintf("AuthServer named %v already exists", name)) + os.Exit(1) } authServers[name] = authServer } @@ -588,7 +591,8 @@ func GetAuthServer(name string) AuthServer { defer mu.Unlock() authServer, ok := authServers[name] if !ok { - log.Exitf("no AuthServer name %v registered", name) + log.Error(fmt.Sprintf("no AuthServer name %v registered", name)) + os.Exit(1) } return authServer } diff --git a/go/mysql/auth_server_clientcert.go b/go/mysql/auth_server_clientcert.go index 02e82611e14..4362a1eb886 100644 --- a/go/mysql/auth_server_clientcert.go +++ b/go/mysql/auth_server_clientcert.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "net" + "os" "vitess.io/vitess/go/vt/log" ) @@ -37,7 +38,8 @@ func InitAuthServerClientCert(clientcertAuthMethod string, caValue string) { return } if clientcertAuthMethod != string(MysqlClearPassword) && clientcertAuthMethod != string(MysqlDialog) { - log.Exitf("Invalid mysql_clientcert_auth_method value: only support mysql_clear_password or dialog") + log.Error("Invalid mysql_clientcert_auth_method value: only support mysql_clear_password or dialog") + os.Exit(1) } ascc := newAuthServerClientCert(clientcertAuthMethod) @@ -56,7 +58,8 @@ func newAuthServerClientCert(clientcertAuthMethod string) *AuthServerClientCert case MysqlDialog: authMethod = NewMysqlDialogAuthMethod(ascc, ascc, "") default: - log.Exitf("Invalid mysql_clientcert_auth_method value: only support mysql_clear_password or dialog") + log.Error("Invalid mysql_clientcert_auth_method value: only support mysql_clear_password or dialog") + os.Exit(1) } ascc.methods = []AuthMethod{authMethod} diff --git a/go/mysql/auth_server_static.go b/go/mysql/auth_server_static.go index 03d6a70026c..717f9c6302f 100644 --- a/go/mysql/auth_server_static.go +++ b/go/mysql/auth_server_static.go @@ -20,6 +20,7 @@ import ( "bytes" "crypto/subtle" "encoding/json" + "fmt" "net" "os" "os/signal" @@ -82,12 +83,13 @@ func InitAuthServerStatic(mysqlAuthServerStaticFile, mysqlAuthServerStaticString // Check parameters. if mysqlAuthServerStaticFile == "" && mysqlAuthServerStaticString == "" { // Not configured, nothing to do. - log.Infof("Not configuring AuthServerStatic, as mysql_auth_server_static_file and mysql_auth_server_static_string are empty") + log.Info("Not configuring AuthServerStatic, as mysql_auth_server_static_file and mysql_auth_server_static_string are empty") return } if mysqlAuthServerStaticFile != "" && mysqlAuthServerStaticString != "" { // Both parameters specified, can only use one. - log.Exitf("Both mysql_auth_server_static_file and mysql_auth_server_static_string specified, can only use one.") + log.Error("Both mysql_auth_server_static_file and mysql_auth_server_static_string specified, can only use one.") + os.Exit(1) } // Create and register auth server. @@ -101,7 +103,8 @@ func InitAuthServerStatic(mysqlAuthServerStaticFile, mysqlAuthServerStaticString func RegisterAuthServerStaticFromParams(file, jsonConfig string, reloadInterval time.Duration) { authServerStatic := NewAuthServerStatic(file, jsonConfig, reloadInterval) if len(authServerStatic.entries) <= 0 { - log.Exitf("Failed to populate entries from file: %v", file) + log.Error(fmt.Sprintf("Failed to populate entries from file: %v", file)) + os.Exit(1) } RegisterAuthServer("static", authServerStatic) } @@ -260,7 +263,7 @@ func (a *AuthServerStatic) reload() { if a.file != "" { data, err := os.ReadFile(a.file) if err != nil { - log.Errorf("Failed to read mysql_auth_server_static_file file: %v", err) + log.Error(fmt.Sprintf("Failed to read mysql_auth_server_static_file file: %v", err)) return } jsonBytes = data @@ -268,7 +271,7 @@ func (a *AuthServerStatic) reload() { entries := make(map[string][]*AuthServerStaticEntry) if err := ParseConfig(jsonBytes, &entries); err != nil { - log.Errorf("Error parsing auth server config: %v", err) + log.Error(fmt.Sprintf("Error parsing auth server config: %v", err)) return } @@ -343,7 +346,7 @@ func parseLegacyConfig(jsonBytes []byte, config *map[string][]*AuthServerStaticE if err := decoder.Decode(&legacyConfig); err != nil { return err } - log.Warningf("Config parsed using legacy configuration. Please update to the latest format: {\"user\":[{\"Password\": \"xxx\"}, ...]}") + log.Warn("Config parsed using legacy configuration. Please update to the latest format: {\"user\":[{\"Password\": \"xxx\"}, ...]}") for key, value := range legacyConfig { (*config)[key] = append((*config)[key], value) } diff --git a/go/mysql/conn.go b/go/mysql/conn.go index 1fdb0a453a5..d11680ea12e 100644 --- a/go/mysql/conn.go +++ b/go/mysql/conn.go @@ -275,7 +275,7 @@ func newServerConn(conn net.Conn, listener *Listener) *Conn { enabledKeepAlive := false if tcpConn, ok := conn.(*net.TCPConn); ok { if err := setTcpConnProperties(tcpConn, listener.connKeepAlivePeriod); err != nil { - log.Errorf("error in setting tcp properties: %v", err) + log.Error(fmt.Sprintf("error in setting tcp properties: %v", err)) } else { enabledKeepAlive = true } @@ -829,7 +829,7 @@ func (c *Conn) WriteErrorAndLog(format string, args ...any) bool { func (c *Conn) writeErrorAndLog(errorCode sqlerror.ErrorCode, sqlState string, format string, args ...any) bool { if err := c.writeErrorPacket(errorCode, sqlState, format, args...); err != nil { - log.Errorf("Error writing error to %s: %v", c, err) + log.Error(fmt.Sprintf("Error writing error to %s: %v", c, err)) return false } return true @@ -838,7 +838,7 @@ func (c *Conn) writeErrorAndLog(errorCode sqlerror.ErrorCode, sqlState string, f func (c *Conn) writeErrorPacketFromErrorAndLog(err error) bool { werr := c.writeErrorPacketFromError(err) if werr != nil { - log.Errorf("Error writing error to %s: %v", c, werr) + log.Error(fmt.Sprintf("Error writing error to %s: %v", c, werr)) return false } return true @@ -896,7 +896,7 @@ func (c *Conn) handleNextCommand(handler Handler) bool { if err != nil { // Don't log EOF errors. They cause too much spam. if err != io.EOF && !strings.Contains(err.Error(), "use of closed network connection") { - log.Errorf("Error reading packet from %s: %v", c, err) + log.Error(fmt.Sprintf("Error reading packet from %s: %v", c, err)) } return false } @@ -955,7 +955,7 @@ func (c *Conn) handleNextCommand(handler Handler) bool { case ComRegisterReplica: return c.handleComRegisterReplica(handler, data) default: - log.Errorf("Got unhandled packet (default) from %s, returning error: %v", c, data) + log.Error(fmt.Sprintf("Got unhandled packet (default) from %s, returning error: %v", c, data)) c.recycleReadPacket() if !c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, "command handling not implemented yet: %v", data[0]) { return false @@ -970,7 +970,7 @@ func (c *Conn) handleComRegisterReplica(handler Handler, data []byte) (kontinue replicaHost, replicaPort, replicaUser, replicaPassword, err := c.parseComRegisterReplica(data) if err != nil { - log.Errorf("conn %v: parseComRegisterReplica failed: %v", c.ID(), err) + log.Error(fmt.Sprintf("conn %v: parseComRegisterReplica failed: %v", c.ID(), err)) return false } if err := handler.ComRegisterReplica(c, replicaHost, replicaPort, replicaUser, replicaPassword); err != nil { @@ -990,14 +990,14 @@ func (c *Conn) handleComBinlogDump(handler Handler, data []byte) (kontinue bool) c.startWriterBuffering() defer func() { if err := c.endWriterBuffering(); err != nil { - log.Errorf("conn %v: flush() failed: %v", c.ID(), err) + log.Error(fmt.Sprintf("conn %v: flush() failed: %v", c.ID(), err)) kontinue = false } }() logfile, binlogPos, err := c.parseComBinlogDump(data) if err != nil { - log.Errorf("conn %v: parseComBinlogDumpGTID failed: %v", c.ID(), err) + log.Error(fmt.Sprintf("conn %v: parseComBinlogDumpGTID failed: %v", c.ID(), err)) return false } if err := handler.ComBinlogDump(c, logfile, binlogPos); err != nil { @@ -1014,14 +1014,14 @@ func (c *Conn) handleComBinlogDumpGTID(handler Handler, data []byte) (kontinue b c.startWriterBuffering() defer func() { if err := c.endWriterBuffering(); err != nil { - log.Errorf("conn %v: flush() failed: %v", c.ID(), err) + log.Error(fmt.Sprintf("conn %v: flush() failed: %v", c.ID(), err)) kontinue = false } }() logFile, logPos, position, err := c.parseComBinlogDumpGTID(data) if err != nil { - log.Errorf("conn %v: parseComBinlogDumpGTID failed: %v", c.ID(), err) + log.Error(fmt.Sprintf("conn %v: parseComBinlogDumpGTID failed: %v", c.ID(), err)) return false } if err := handler.ComBinlogDumpGTID(c, logFile, logPos, position.GTIDSet); err != nil { @@ -1047,7 +1047,7 @@ func (c *Conn) handleComStmtReset(data []byte) bool { stmtID, ok := c.parseComStmtReset(data) c.recycleReadPacket() if !ok { - log.Error("Got unhandled packet from client %v, returning error: %v", c.ConnectionID, data) + log.Error(fmt.Sprintf("Got unhandled packet from client %v, returning error: %v", c.ConnectionID, data)) if !c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, "error handling packet: %v", data) { return false } @@ -1055,7 +1055,7 @@ func (c *Conn) handleComStmtReset(data []byte) bool { prepare, ok := c.PrepareData[stmtID] if !ok { - log.Error("Commands were executed in an improper order from client %v, packet: %v", c.ConnectionID, data) + log.Error(fmt.Sprintf("Commands were executed in an improper order from client %v, packet: %v", c.ConnectionID, data)) if !c.writeErrorAndLog(sqlerror.CRCommandsOutOfSync, sqlerror.SSNetError, "commands were executed in an improper order: %v", data) { return false } @@ -1068,7 +1068,7 @@ func (c *Conn) handleComStmtReset(data []byte) bool { } if err := c.writeOKPacket(&PacketOK{statusFlags: c.StatusFlags}); err != nil { - log.Error("Error writing ComStmtReset OK packet to client %v: %v", c.ConnectionID, err) + log.Error(fmt.Sprintf("Error writing ComStmtReset OK packet to client %v: %v", c.ConnectionID, err)) return false } return true @@ -1108,7 +1108,7 @@ func (c *Conn) handleComStmtExecute(handler Handler, data []byte) (kontinue bool c.startWriterBuffering() defer func() { if err := c.endWriterBuffering(); err != nil { - log.Errorf("conn %v: flush() failed: %v", c.ID(), err) + log.Error(fmt.Sprintf("conn %v: flush() failed: %v", c.ID(), err)) kontinue = false } }() @@ -1175,7 +1175,7 @@ func (c *Conn) handleComStmtExecute(handler Handler, data []byte) (kontinue bool if err != nil { // We can't send an error in the middle of a stream. // All we can do is abort the send, which will cause a 2013. - log.Errorf("Error in the middle of a stream to %s: %v", c, err) + log.Error(fmt.Sprintf("Error in the middle of a stream to %s: %v", c, err)) return false } @@ -1184,7 +1184,7 @@ func (c *Conn) handleComStmtExecute(handler Handler, data []byte) (kontinue bool // was a read operation. if !sendFinished { if err := c.writeEndResult(false, 0, 0, handler.WarningCount(c)); err != nil { - log.Errorf("Error writing result to %s: %v", c, err) + log.Error(fmt.Sprintf("Error writing result to %s: %v", c, err)) return false } } @@ -1198,7 +1198,7 @@ func (c *Conn) handleComPrepare(handler Handler, data []byte) (kontinue bool) { c.startWriterBuffering() defer func() { if err := c.endWriterBuffering(); err != nil { - log.Errorf("conn %v: flush() failed: %v", c.ID(), err) + log.Error(fmt.Sprintf("conn %v: flush() failed: %v", c.ID(), err)) kontinue = false } }() @@ -1209,11 +1209,11 @@ func (c *Conn) handleComPrepare(handler Handler, data []byte) (kontinue bool) { if c.Capabilities&CapabilityClientMultiStatements != 0 { queries, err := handler.Env().Parser().SplitStatementToPieces(query) if err != nil { - log.Errorf("Conn %v: Error splitting query: %v", c, err) + log.Error(fmt.Sprintf("Conn %v: Error splitting query: %v", c, err)) return c.writeErrorPacketFromErrorAndLog(err) } if len(queries) != 1 { - log.Errorf("Conn %v: can not prepare multiple statements", c) + log.Error(fmt.Sprintf("Conn %v: can not prepare multiple statements", c)) return c.writeErrorPacketFromErrorAndLog(err) } query = queries[0] @@ -1236,7 +1236,7 @@ func (c *Conn) handleComPrepare(handler Handler, data []byte) (kontinue bool) { c.PrepareData[c.StatementID] = prepare if err := c.writePrepare(fld, prepare); err != nil { - log.Error("Error writing prepare data to client %v: %v", c.ConnectionID, err) + log.Error(fmt.Sprintf("Error writing prepare data to client %v: %v", c.ConnectionID, err)) return false } return true @@ -1252,17 +1252,17 @@ func (c *Conn) handleComSetOption(data []byte) bool { case 1: c.Capabilities &^= CapabilityClientMultiStatements default: - log.Errorf("Got unhandled packet (ComSetOption default) from client %v, returning error: %v", c.ConnectionID, data) + log.Error(fmt.Sprintf("Got unhandled packet (ComSetOption default) from client %v, returning error: %v", c.ConnectionID, data)) if !c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, "error handling packet: %v", data) { return false } } if err := c.writeEndResult(false, 0, 0, 0); err != nil { - log.Errorf("Error writeEndResult error %v ", err) + log.Error(fmt.Sprintf("Error writeEndResult error %v ", err)) return false } } else { - log.Errorf("Got unhandled packet (ComSetOption else) from client %v, returning error: %v", c.ConnectionID, data) + log.Error(fmt.Sprintf("Got unhandled packet (ComSetOption else) from client %v, returning error: %v", c.ConnectionID, data)) if !c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, "error handling packet: %v", data) { return false } @@ -1279,7 +1279,7 @@ func (c *Conn) handleComPing() bool { } } else { if err := c.writeOKPacket(&PacketOK{statusFlags: c.StatusFlags}); err != nil { - log.Errorf("Error writing ComPing result to %s: %v", c, err) + log.Error(fmt.Sprintf("Error writing ComPing result to %s: %v", c, err)) return false } } @@ -1293,7 +1293,7 @@ func (c *Conn) handleComQueryMulti(handler Handler, data []byte) (kontinue bool) c.startWriterBuffering() defer func() { if err := c.endWriterBuffering(); err != nil { - log.Errorf("conn %v: flush() failed: %v", c.ID(), err) + log.Error(fmt.Sprintf("conn %v: flush() failed: %v", c.ID(), err)) kontinue = false } }() @@ -1334,7 +1334,7 @@ func (c *Conn) execQueryMulti(query string, handler Handler) execResult { // If we haven't sent a last packet yet, we should send the end result packet. if firstPacket && needsEndPacket { if err := c.writeEndResult(true, 0, 0, handler.WarningCount(c)); err != nil { - log.Errorf("Error writing result to %s: %v", c, err) + log.Error(fmt.Sprintf("Error writing result to %s: %v", c, err)) return err } } @@ -1405,14 +1405,14 @@ func (c *Conn) execQueryMulti(query string, handler Handler) execResult { if err != nil { // We can't send an error in the middle of a stream. // All we can do is abort the send, which will cause a 2013. - log.Errorf("Error in the middle of a stream to %s: %v", c, err) + log.Error(fmt.Sprintf("Error in the middle of a stream to %s: %v", c, err)) return connErr } // If we haven't sent the final packet for the last query, we should send that too. if needsEndPacket { if err := c.writeEndResult(false, 0, 0, handler.WarningCount(c)); err != nil { - log.Errorf("Error writing result to %s: %v", c, err) + log.Error(fmt.Sprintf("Error writing result to %s: %v", c, err)) return connErr } } @@ -1426,7 +1426,7 @@ func (c *Conn) handleComQuery(handler Handler, data []byte) (kontinue bool) { c.startWriterBuffering() defer func() { if err := c.endWriterBuffering(); err != nil { - log.Errorf("conn %v: flush() failed: %v", c.ID(), err) + log.Error(fmt.Sprintf("conn %v: flush() failed: %v", c.ID(), err)) kontinue = false } }() @@ -1440,7 +1440,7 @@ func (c *Conn) handleComQuery(handler Handler, data []byte) (kontinue bool) { if c.Capabilities&CapabilityClientMultiStatements != 0 { queries, err = handler.Env().Parser().SplitStatementToPieces(query) if err != nil { - log.Errorf("Conn %v: Error splitting query: %v", c, err) + log.Error(fmt.Sprintf("Conn %v: Error splitting query: %v", c, err)) return c.writeErrorPacketFromErrorAndLog(err) } } else { @@ -1525,7 +1525,7 @@ func (c *Conn) execQuery(query string, handler Handler, more bool) execResult { if err != nil { // We can't send an error in the middle of a stream. // All we can do is abort the send, which will cause a 2013. - log.Errorf("Error in the middle of a stream to %s: %v", c, err) + log.Error(fmt.Sprintf("Error in the middle of a stream to %s: %v", c, err)) return connErr } @@ -1534,7 +1534,7 @@ func (c *Conn) execQuery(query string, handler Handler, more bool) execResult { // was a read operation. if !sendFinished { if err := c.writeEndResult(more, 0, 0, handler.WarningCount(c)); err != nil { - log.Errorf("Error writing result to %s: %v", c, err) + log.Error(fmt.Sprintf("Error writing result to %s: %v", c, err)) return connErr } } diff --git a/go/mysql/fakesqldb/server.go b/go/mysql/fakesqldb/server.go index 39c1a2582f8..cca65a46ea6 100644 --- a/go/mysql/fakesqldb/server.go +++ b/go/mysql/fakesqldb/server.go @@ -411,7 +411,7 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R // log error if err := callback(&sqltypes.Result{}); err != nil { - log.Errorf("callback failed : %v", err) + log.Error(fmt.Sprintf("callback failed : %v", err)) } return nil } @@ -424,7 +424,7 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R // log error if err := callback(&sqltypes.Result{}); err != nil { - log.Errorf("callback failed : %v", err) + log.Error(fmt.Sprintf("callback failed : %v", err)) } return nil } @@ -469,7 +469,7 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R parser := sqlparser.NewTestParser() err = fmt.Errorf("fakesqldb:: query: '%s' is not supported on %v", parser.TruncateForUI(query), db.name) - log.Errorf("Query not found: %s", parser.TruncateForUI(query)) + log.Error("Query not found: " + parser.TruncateForUI(query)) return err } diff --git a/go/mysql/ldapauthserver/auth_server_ldap.go b/go/mysql/ldapauthserver/auth_server_ldap.go index d518ddd986f..5e430d87481 100644 --- a/go/mysql/ldapauthserver/auth_server_ldap.go +++ b/go/mysql/ldapauthserver/auth_server_ldap.go @@ -49,16 +49,17 @@ type AuthServerLdap struct { // Init is public so it can be called from plugin_auth_ldap.go (go/cmd/vtgate) func Init(ldapAuthConfigFile, ldapAuthConfigString, ldapAuthMethod string) { if ldapAuthConfigFile == "" && ldapAuthConfigString == "" { - log.Infof("Not configuring AuthServerLdap because mysql_ldap_auth_config_file and mysql_ldap_auth_config_string are empty") + log.Info("Not configuring AuthServerLdap because mysql_ldap_auth_config_file and mysql_ldap_auth_config_string are empty") return } if ldapAuthConfigFile != "" && ldapAuthConfigString != "" { - log.Infof("Both mysql_ldap_auth_config_file and mysql_ldap_auth_config_string are non-empty, can only use one.") + log.Info("Both mysql_ldap_auth_config_file and mysql_ldap_auth_config_string are non-empty, can only use one.") return } if ldapAuthMethod != string(mysql.MysqlClearPassword) && ldapAuthMethod != string(mysql.MysqlDialog) { - log.Exitf("Invalid mysql_ldap_auth_method value: only support mysql_clear_password or dialog") + log.Error("Invalid mysql_ldap_auth_method value: only support mysql_clear_password or dialog") + os.Exit(1) } ldapAuthServer := &AuthServerLdap{ Client: &ClientImpl{}, @@ -70,11 +71,13 @@ func Init(ldapAuthConfigFile, ldapAuthConfigString, ldapAuthMethod string) { var err error data, err = os.ReadFile(ldapAuthConfigFile) if err != nil { - log.Exitf("Failed to read mysql_ldap_auth_config_file: %v", err) + log.Error(fmt.Sprintf("Failed to read mysql_ldap_auth_config_file: %v", err)) + os.Exit(1) } } if err := json.Unmarshal(data, ldapAuthServer); err != nil { - log.Exitf("Error parsing AuthServerLdap config: %v", err) + log.Error(fmt.Sprintf("Error parsing AuthServerLdap config: %v", err)) + os.Exit(1) } var authMethod mysql.AuthMethod @@ -84,7 +87,8 @@ func Init(ldapAuthConfigFile, ldapAuthConfigString, ldapAuthMethod string) { case mysql.MysqlDialog: authMethod = mysql.NewMysqlDialogAuthMethod(ldapAuthServer, ldapAuthServer, "") default: - log.Exitf("Invalid mysql_ldap_auth_method value: only support mysql_clear_password or dialog") + log.Error("Invalid mysql_ldap_auth_method value: only support mysql_clear_password or dialog") + os.Exit(1) } ldapAuthServer.methods = []mysql.AuthMethod{authMethod} @@ -177,13 +181,13 @@ func (lud *LdapUserData) update() { lud.Unlock() err := lud.asl.Connect("tcp", &lud.asl.ServerConfig) if err != nil { - log.Errorf("Error updating LDAP user data: %v", err) + log.Error(fmt.Sprintf("Error updating LDAP user data: %v", err)) return } defer lud.asl.Close() // after the error check groups, err := lud.asl.getGroups(lud.username) if err != nil { - log.Errorf("Error updating LDAP user data: %v", err) + log.Error(fmt.Sprintf("Error updating LDAP user data: %v", err)) return } lud.Lock() diff --git a/go/mysql/replication/primary_status.go b/go/mysql/replication/primary_status.go index 220fce3cfde..f715d51fde5 100644 --- a/go/mysql/replication/primary_status.go +++ b/go/mysql/replication/primary_status.go @@ -65,7 +65,7 @@ func ParsePrimaryStatus(fields map[string]string) PrimaryStatus { var err error status.FilePosition.GTIDSet, err = ParseFilePosGTIDSet(fmt.Sprintf("%s:%s", file, fileExecPosStr)) if err != nil { - log.Warningf("Error parsing GTID set %s:%s: %v", file, fileExecPosStr, err) + log.Warn(fmt.Sprintf("Error parsing GTID set %s:%s: %v", file, fileExecPosStr, err)) } } diff --git a/go/mysql/replication/replication_status.go b/go/mysql/replication/replication_status.go index 22e238c9e2b..4b00ad605a9 100644 --- a/go/mysql/replication/replication_status.go +++ b/go/mysql/replication/replication_status.go @@ -363,7 +363,7 @@ func ParseReplicationStatus(fields map[string]string, replica bool) ReplicationS if file != "" && executedPosStr != "" { status.FilePosition.GTIDSet, err = ParseFilePosGTIDSet(fmt.Sprintf("%s:%s", file, executedPosStr)) if err != nil { - log.Warningf("Error parsing GTID set %s:%s: %v", file, executedPosStr, err) + log.Warn(fmt.Sprintf("Error parsing GTID set %s:%s: %v", file, executedPosStr, err)) } } @@ -372,7 +372,7 @@ func ParseReplicationStatus(fields map[string]string, replica bool) ReplicationS if file != "" && readPosStr != "" { status.RelayLogSourceBinlogEquivalentPosition.GTIDSet, err = ParseFilePosGTIDSet(fmt.Sprintf("%s:%s", file, readPosStr)) if err != nil { - log.Warningf("Error parsing GTID set %s:%s: %v", file, readPosStr, err) + log.Warn(fmt.Sprintf("Error parsing GTID set %s:%s: %v", file, readPosStr, err)) } } @@ -381,7 +381,7 @@ func ParseReplicationStatus(fields map[string]string, replica bool) ReplicationS if file != "" && relayPosStr != "" { status.RelayLogFilePosition.GTIDSet, err = ParseFilePosGTIDSet(fmt.Sprintf("%s:%s", file, relayPosStr)) if err != nil { - log.Warningf("Error parsing GTID set %s:%s: %v", file, relayPosStr, err) + log.Warn(fmt.Sprintf("Error parsing GTID set %s:%s: %v", file, relayPosStr, err)) } } return status diff --git a/go/mysql/server.go b/go/mysql/server.go index 59f77237fc8..f261e59ed2c 100644 --- a/go/mysql/server.go +++ b/go/mysql/server.go @@ -19,6 +19,7 @@ package mysql import ( "context" "crypto/tls" + "fmt" "io" "net" "strings" @@ -358,7 +359,7 @@ func (l *Listener) Accept() { if l.PreHandleFunc != nil { conn, err = l.PreHandleFunc(ctx, conn, connectionID) if err != nil { - log.Errorf("mysql_server pre hook: %s", err) + log.Error(fmt.Sprintf("mysql_server pre hook: %s", err)) return } } @@ -380,7 +381,7 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti // Catch panics, and close the connection in any case. defer func() { if x := recover(); x != nil { - log.Errorf("mysql_server caught panic:\n%v\n%s", x, tb.Stack(4)) + log.Error(fmt.Sprintf("mysql_server caught panic:\n%v\n%s", x, tb.Stack(4))) } // We call endWriterBuffering here in case there's a premature return after // startWriterBuffering is called @@ -404,7 +405,7 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti serverAuthPluginData, err := c.writeHandshakeV10(l.ServerVersion, l.authServer, uint8(l.charset), l.TLSConfig.Load() != nil) if err != nil { if err != io.EOF { - log.Errorf("Cannot send HandshakeV10 packet to %s: %v", c, err) + log.Error(fmt.Sprintf("Cannot send HandshakeV10 packet to %s: %v", c, err)) } return } @@ -415,13 +416,13 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti if err != nil { // Don't log EOF errors. They cause too much spam, same as main read loop. if err != io.EOF { - log.Infof("Cannot read client handshake response from %s: %v, it may not be a valid MySQL client", c, err) + log.Info(fmt.Sprintf("Cannot read client handshake response from %s: %v, it may not be a valid MySQL client", c, err)) } return } user, clientAuthMethod, clientAuthResponse, err := l.parseClientHandshakePacket(c, true, response) if err != nil { - log.Errorf("Cannot parse client handshake response from %s: %v", c, err) + log.Error(fmt.Sprintf("Cannot parse client handshake response from %s: %v", c, err)) return } @@ -431,14 +432,14 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti // SSL was enabled. We need to re-read the auth packet. response, err = c.readEphemeralPacket() if err != nil { - log.Errorf("Cannot read post-SSL client handshake response from %s: %v", c, err) + log.Error(fmt.Sprintf("Cannot read post-SSL client handshake response from %s: %v", c, err)) return } // Returns copies of the data, so we can recycle the buffer. user, clientAuthMethod, clientAuthResponse, err = l.parseClientHandshakePacket(c, false, response) if err != nil { - log.Errorf("Cannot parse post-SSL client handshake response from %s: %v", c, err) + log.Error(fmt.Sprintf("Cannot parse post-SSL client handshake response from %s: %v", c, err)) return } c.recycleReadPacket() @@ -495,18 +496,18 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti serverAuthPluginData, err = negotiatedAuthMethod.AuthPluginData() if err != nil { - log.Errorf("Error generating auth switch packet for %s: %v", c, err) + log.Error(fmt.Sprintf("Error generating auth switch packet for %s: %v", c, err)) return } if err := c.writeAuthSwitchRequest(string(negotiatedAuthMethod.Name()), serverAuthPluginData); err != nil { - log.Errorf("Error writing auth switch packet for %s: %v", c, err) + log.Error(fmt.Sprintf("Error writing auth switch packet for %s: %v", c, err)) return } clientAuthResponse, err = c.readEphemeralPacket() if err != nil { - log.Errorf("Error reading auth switch response for %s: %v", c, err) + log.Error(fmt.Sprintf("Error reading auth switch response for %s: %v", c, err)) return } c.recycleReadPacket() @@ -514,7 +515,7 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti userData, err := negotiatedAuthMethod.HandleAuthPluginData(c, user, serverAuthPluginData, clientAuthResponse, conn.RemoteAddr()) if err != nil { - log.Warningf("Error authenticating user %s using: %s", user, negotiatedAuthMethod.Name()) + log.Warn(fmt.Sprintf("Error authenticating user %s using: %s", user, negotiatedAuthMethod.Name())) c.writeErrorPacketFromError(err) return } @@ -540,7 +541,7 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti // Negotiation worked, send OK packet. if err := c.writeOKPacket(&PacketOK{statusFlags: c.StatusFlags}); err != nil { - log.Errorf("Cannot write OK packet to %s: %v", c, err) + log.Error(fmt.Sprintf("Cannot write OK packet to %s: %v", c, err)) return } @@ -551,7 +552,7 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti connectTime := time.Since(acceptTime).Nanoseconds() if threshold := l.SlowConnectWarnThreshold.Load(); threshold != 0 && connectTime > threshold { connSlow.Add(1) - log.Warningf("Slow connection from %s: %v", c, connectTime) + log.Warn(fmt.Sprintf("Slow connection from %s: %v", c, connectTime)) } // Tell our handler that we're finished handshake and are ready to @@ -816,7 +817,7 @@ func (l *Listener) parseClientHandshakePacket(c *Conn, firstTime bool, data []by if clientFlags&CapabilityClientConnAttr != 0 { clientAttributes, _, err := parseConnAttrs(data, pos) if err != nil { - log.Warningf("Decode connection attributes send by the client: %v", err) + log.Warn(fmt.Sprintf("Decode connection attributes send by the client: %v", err)) } c.Attributes = clientAttributes diff --git a/go/mysql/vault/auth_server_vault.go b/go/mysql/vault/auth_server_vault.go index 76608c42443..8ed7971249c 100644 --- a/go/mysql/vault/auth_server_vault.go +++ b/go/mysql/vault/auth_server_vault.go @@ -55,11 +55,12 @@ type AuthServerVault struct { func InitAuthServerVault(vaultAddr string, vaultTimeout time.Duration, vaultCACert, vaultPath string, vaultCacheTTL time.Duration, vaultTokenFile, vaultRoleID, vaultRoleSecretIDFile, vaultRoleMountPoint string) { // Check critical parameters. if vaultAddr == "" { - log.Infof("Not configuring AuthServerVault, as --mysql_auth_vault_addr is empty.") + log.Info("Not configuring AuthServerVault, as --mysql_auth_vault_addr is empty.") return } if vaultPath == "" { - log.Exitf("If using Vault auth server, --mysql_auth_vault_path is required.") + log.Error("If using Vault auth server, --mysql_auth_vault_path is required.") + os.Exit(1) } registerAuthServerVault(vaultAddr, vaultTimeout, vaultCACert, vaultPath, vaultCacheTTL, vaultTokenFile, vaultRoleID, vaultRoleSecretIDFile, vaultRoleMountPoint) @@ -68,7 +69,8 @@ func InitAuthServerVault(vaultAddr string, vaultTimeout time.Duration, vaultCACe func registerAuthServerVault(addr string, timeout time.Duration, caCertPath string, path string, ttl time.Duration, tokenFilePath string, roleID string, secretIDPath string, roleMountPoint string) { authServerVault, err := newAuthServerVault(addr, timeout, caCertPath, path, ttl, tokenFilePath, roleID, secretIDPath, roleMountPoint) if err != nil { - log.Exitf("%s", err) + log.Error(fmt.Sprintf("%s", err)) + os.Exit(1) } mysql.RegisterAuthServer("vault", authServerVault) } @@ -117,7 +119,7 @@ func newAuthServerVault(addr string, timeout time.Duration, caCertPath string, p client, err := vaultapi.NewClient(config) if err != nil || client == nil { - log.Errorf("Error in vault client initialization, will retry: %v", err) + log.Error(fmt.Sprintf("Error in vault client initialization, will retry: %v", err)) } a := &AuthServerVault{ @@ -222,7 +224,7 @@ func (a *AuthServerVault) reloadVault() error { return errors.New("vtgate credentials from Vault empty! Not updating previously cached values") } - log.Infof("reloadVault(): success. Client status: %s", a.vaultClient.GetStatus()) + log.Info("reloadVault(): success. Client status: " + a.vaultClient.GetStatus()) a.mu.Lock() a.entries = entries a.mu.Unlock() @@ -240,14 +242,14 @@ func (a *AuthServerVault) installSignalHandlers() { for range a.sigChan { err := a.reloadVault() if err != nil { - log.Errorf("%s", err) + log.Error(fmt.Sprintf("%s", err)) } } }() } func (a *AuthServerVault) close() { - log.Warningf("Closing AuthServerVault instance.") + log.Warn("Closing AuthServerVault instance.") a.mu.Lock() defer a.mu.Unlock() if a.vaultCacheExpireTicker != nil { @@ -267,7 +269,7 @@ func readFromFile(filePath string) (string, error) { } fileBytes, err := os.ReadFile(filePath) if err != nil { - log.Errorf("Could not read file: %s", filePath) + log.Error("Could not read file: " + filePath) return "", err } return strings.TrimSpace(string(fileBytes)), nil diff --git a/go/pools/refresh_pool.go b/go/pools/refresh_pool.go index 7f56e5fbf9c..e953e0ab384 100644 --- a/go/pools/refresh_pool.go +++ b/go/pools/refresh_pool.go @@ -17,6 +17,7 @@ limitations under the License. package pools import ( + "fmt" "sync" "time" @@ -72,7 +73,7 @@ func (pr *poolRefresh) startRefreshTicker() { case <-pr.refreshTicker.C: val, err := pr.refreshCheck() if err != nil { - log.Info(err) + log.Info(fmt.Sprint(err)) } if val { go pr.pool.reopen() diff --git a/go/pools/resource_pool.go b/go/pools/resource_pool.go index 763b99df11f..9b081f43f46 100644 --- a/go/pools/resource_pool.go +++ b/go/pools/resource_pool.go @@ -176,7 +176,7 @@ func (rp *ResourcePool) reopen() { rp.reopenMutex.Lock() // Avoid race, since we can refresh asynchronously defer rp.reopenMutex.Unlock() capacity := int(rp.capacity.Load()) - log.Infof("Draining and reopening resource pool with capacity %d by request", capacity) + log.Info(fmt.Sprintf("Draining and reopening resource pool with capacity %d by request", capacity)) rp.Close() _ = rp.SetCapacity(capacity) if rp.idleTimer != nil { diff --git a/go/pools/smartconnpool/benchmarking/legacy/refresh_pool.go b/go/pools/smartconnpool/benchmarking/legacy/refresh_pool.go index 77fdc6a2ada..f7f4dba8136 100644 --- a/go/pools/smartconnpool/benchmarking/legacy/refresh_pool.go +++ b/go/pools/smartconnpool/benchmarking/legacy/refresh_pool.go @@ -17,6 +17,7 @@ limitations under the License. package legacy import ( + "fmt" "sync" "time" @@ -72,7 +73,7 @@ func (pr *poolRefresh) startRefreshTicker() { case <-pr.refreshTicker.C: val, err := pr.refreshCheck() if err != nil { - log.Info(err) + log.Info(fmt.Sprint(err)) } if val { go pr.pool.reopen() diff --git a/go/pools/smartconnpool/benchmarking/legacy/resource_pool.go b/go/pools/smartconnpool/benchmarking/legacy/resource_pool.go index 902ec718b31..0008791e4ac 100644 --- a/go/pools/smartconnpool/benchmarking/legacy/resource_pool.go +++ b/go/pools/smartconnpool/benchmarking/legacy/resource_pool.go @@ -226,7 +226,7 @@ func (rp *ResourcePool) reopen() { rp.reopenMutex.Lock() // Avoid race, since we can refresh asynchronously defer rp.reopenMutex.Unlock() capacity := int(rp.capacity.Load()) - log.Infof("Draining and reopening resource pool with capacity %d by request", capacity) + log.Info(fmt.Sprintf("Draining and reopening resource pool with capacity %d by request", capacity)) rp.Close() _ = rp.SetCapacity(capacity) if rp.idleTimer != nil { diff --git a/go/pools/smartconnpool/pool.go b/go/pools/smartconnpool/pool.go index 0b936052781..1146ec984d4 100644 --- a/go/pools/smartconnpool/pool.go +++ b/go/pools/smartconnpool/pool.go @@ -18,6 +18,7 @@ package smartconnpool import ( "context" + "fmt" "math/rand/v2" "sync" "sync/atomic" @@ -234,7 +235,7 @@ func (pool *ConnPool[C]) open() { pool.runWorker(closeChan, refreshInterval, func(_ time.Time) bool { refresh, err := pool.config.refresh() if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } if refresh { go pool.reopen() @@ -267,7 +268,7 @@ func (pool *ConnPool[C]) Close() { defer cancel() if err := pool.CloseWithContext(ctx); err != nil { - log.Errorf("failed to close pool %q: %v", pool.Name, err) + log.Error(fmt.Sprintf("failed to close pool %q: %v", pool.Name, err)) } } @@ -310,7 +311,7 @@ func (pool *ConnPool[C]) reopen() { // all the existing connections, as they're now connected to a stale MySQL // instance. if err := pool.setCapacity(ctx, 0); err != nil { - log.Errorf("failed to reopen pool %q: %v", pool.Name, err) + log.Error(fmt.Sprintf("failed to reopen pool %q: %v", pool.Name, err)) } // the second call to setCapacity cannot fail because it's only increasing the number diff --git a/go/stats/export.go b/go/stats/export.go index e9f1f4eae1c..94865c21519 100644 --- a/go/stats/export.go +++ b/go/stats/export.go @@ -32,6 +32,7 @@ import ( "context" "expvar" "fmt" + "os" "strconv" "strings" "sync" @@ -232,7 +233,8 @@ func RegisterPushBackend(name string, backend PushBackend) { pushBackendsLock.Lock() defer pushBackendsLock.Unlock() if _, ok := pushBackends[name]; ok { - log.Fatalf("PushBackend %s already exists; can't register the same name multiple times", name) + log.Error(fmt.Sprintf("PushBackend %s already exists; can't register the same name multiple times", name)) + os.Exit(1) } pushBackends[name] = backend if name == statsBackend { @@ -254,7 +256,7 @@ func emitToBackend(emitPeriod *time.Duration) { for range ticker.C { if err := pushAll(); err != nil { // TODO(aaijazi): This might cause log spam... - log.Warningf("Pushing stats to backend %v failed: %v", statsBackend, err) + log.Warn(fmt.Sprintf("Pushing stats to backend %v failed: %v", statsBackend, err)) } } } diff --git a/go/stats/opentsdb/init.go b/go/stats/opentsdb/init.go index 2b1ceb7679f..f426c9b134d 100644 --- a/go/stats/opentsdb/init.go +++ b/go/stats/opentsdb/init.go @@ -40,7 +40,7 @@ func Init(prefix string) { log.Info("Initializing opentsdb backend...") backend, err := InitWithoutServenv(prefix) if err != nil { - log.Infof("Failed to initialize singleton opentsdb backend: %v", err) + log.Info(fmt.Sprintf("Failed to initialize singleton opentsdb backend: %v", err)) } else { singletonBackend = backend log.Info("Initialized opentsdb backend.") diff --git a/go/stats/prometheusbackend/collectors.go b/go/stats/prometheusbackend/collectors.go index 839d1a93725..ce328e4db08 100644 --- a/go/stats/prometheusbackend/collectors.go +++ b/go/stats/prometheusbackend/collectors.go @@ -17,6 +17,7 @@ limitations under the License. package prometheusbackend import ( + "fmt" "strings" "github.com/prometheus/client_golang/prometheus" @@ -56,7 +57,7 @@ func (mc *metricFuncCollector) Describe(ch chan<- *prometheus.Desc) { func (mc *metricFuncCollector) Collect(ch chan<- prometheus.Metric) { metric, err := prometheus.NewConstMetric(mc.desc, mc.vt, float64(mc.f())) if err != nil { - log.Errorf("Error adding metric: %s", mc.desc) + log.Error(fmt.Sprintf("Error adding metric: %s", mc.desc)) } else { ch <- metric } @@ -93,7 +94,7 @@ func (c *countersWithSingleLabelCollector) Collect(ch chan<- prometheus.Metric) for tag, val := range c.counters.Counts() { metric, err := prometheus.NewConstMetric(c.desc, c.vt, float64(val), tag) if err != nil { - log.Errorf("Error adding metric: %s", c.desc) + log.Error(fmt.Sprintf("Error adding metric: %s", c.desc)) } else { ch <- metric } @@ -131,7 +132,7 @@ func (g *gaugesWithSingleLabelCollector) Collect(ch chan<- prometheus.Metric) { for tag, val := range g.gauges.Counts() { metric, err := prometheus.NewConstMetric(g.desc, g.vt, float64(val), tag) if err != nil { - log.Errorf("Error adding metric: %s", g.desc) + log.Error(fmt.Sprintf("Error adding metric: %s", g.desc)) } else { ch <- metric } @@ -168,7 +169,7 @@ func (c *metricWithMultiLabelsCollector) Collect(ch chan<- prometheus.Metric) { value := float64(val) metric, err := prometheus.NewConstMetric(c.desc, prometheus.CounterValue, value, labelValues...) if err != nil { - log.Errorf("Error adding metric: %s", c.desc) + log.Error(fmt.Sprintf("Error adding metric: %s", c.desc)) } else { ch <- metric } @@ -205,7 +206,7 @@ func (c *gaugesWithMultiLabelsCollector) Collect(ch chan<- prometheus.Metric) { value := float64(val) metric, err := prometheus.NewConstMetric(c.desc, prometheus.GaugeValue, value, labelValues...) if err != nil { - log.Errorf("Error adding metric: %s", c.desc) + log.Error(fmt.Sprintf("Error adding metric: %s", c.desc)) } else { ch <- metric } @@ -244,7 +245,7 @@ func (c *metricsFuncWithMultiLabelsCollector) Collect(ch chan<- prometheus.Metri value := float64(val) metric, err := prometheus.NewConstMetric(c.desc, c.vt, value, labelValues...) if err != nil { - log.Errorf("Error adding metric: %s", c.desc) + log.Error(fmt.Sprintf("Error adding metric: %s", c.desc)) } else { ch <- metric } @@ -290,7 +291,7 @@ func (c *timingsCollector) Collect(ch chan<- prometheus.Metric) { makeCumulativeBuckets(c.cutoffs, his.Buckets()), cat) if err != nil { - log.Errorf("Error adding metric: %s", c.desc) + log.Error(fmt.Sprintf("Error adding metric: %s", c.desc)) } else { ch <- metric } @@ -349,7 +350,7 @@ func (c *multiTimingsCollector) Collect(ch chan<- prometheus.Metric) { makeCumulativeBuckets(c.cutoffs, his.Buckets()), labelValues...) if err != nil { - log.Errorf("Error adding metric: %s", c.desc) + log.Error(fmt.Sprintf("Error adding metric: %s", c.desc)) } else { ch <- metric } @@ -393,7 +394,7 @@ func (c *histogramCollector) Collect(ch chan<- prometheus.Metric) { float64(c.h.Total()), makeCumulativeBuckets(c.cutoffs, c.h.Buckets())) if err != nil { - log.Errorf("Error adding metric: %s", c.desc) + log.Error(fmt.Sprintf("Error adding metric: %s", c.desc)) } else { ch <- metric } @@ -428,7 +429,7 @@ func (c *stringMapFuncWithMultiLabelsCollector) Collect(ch chan<- prometheus.Met labelValues := append(strings.Split(lvs, "."), val) metric, err := prometheus.NewConstMetric(c.desc, prometheus.GaugeValue, 1.0, labelValues...) if err != nil { - log.Errorf("Error adding metric: %s", c.desc) + log.Error(fmt.Sprintf("Error adding metric: %s", c.desc)) } else { ch <- metric } diff --git a/go/stats/prometheusbackend/prometheusbackend.go b/go/stats/prometheusbackend/prometheusbackend.go index 39f875380a3..8eae29d7b28 100644 --- a/go/stats/prometheusbackend/prometheusbackend.go +++ b/go/stats/prometheusbackend/prometheusbackend.go @@ -18,6 +18,8 @@ package prometheusbackend import ( "expvar" + "fmt" + "os" "strings" "github.com/prometheus/client_golang/prometheus" @@ -89,7 +91,8 @@ func (be PromBackend) publishPrometheusMetric(name string, v expvar.Var) { // Silently ignore these types since they don't make sense to // export to Prometheus' data model. default: - log.Fatalf("prometheus: Metric type %T (seen for variable: %s) is not covered by type switch. Add it there and to all other plugins which register a NewVarHook.", st, name) + log.Error(fmt.Sprintf("prometheus: Metric type %T (seen for variable: %s) is not covered by type switch. Add it there and to all other plugins which register a NewVarHook.", st, name)) + os.Exit(1) } } diff --git a/go/stats/statsd/statsd.go b/go/stats/statsd/statsd.go index 6101d570bbe..1ff449e8da9 100644 --- a/go/stats/statsd/statsd.go +++ b/go/stats/statsd/statsd.go @@ -91,7 +91,7 @@ func InitWithoutServenv(namespace string) { } statsdC, err := statsd.New(statsdAddress, opts...) if err != nil { - log.Errorf("Failed to create statsd client %v", err) + log.Error(fmt.Sprintf("Failed to create statsd client %v", err)) return } sb.namespace = namespace @@ -101,12 +101,12 @@ func InitWithoutServenv(namespace string) { stats.RegisterTimerHook(func(statsName, name string, value int64, timings *stats.Timings) { tags := makeLabels(strings.Split(timings.Label(), "."), name) if err := statsdC.TimeInMilliseconds(statsName, float64(value), tags, sb.sampleRate); err != nil { - log.Errorf("Fail to TimeInMilliseconds %v: %v", statsName, err) + log.Error(fmt.Sprintf("Fail to TimeInMilliseconds %v: %v", statsName, err)) } }) stats.RegisterHistogramHook(func(statsName string, val int64) { if err := statsdC.Histogram(statsName, float64(val), []string{}, sb.sampleRate); err != nil { - log.Errorf("Fail to Histogram for %v: %v", statsName, err) + log.Error(fmt.Sprintf("Fail to Histogram for %v: %v", statsName, err)) } }) } @@ -116,74 +116,74 @@ func (sb StatsBackend) addExpVar(kv expvar.KeyValue) { switch v := kv.Value.(type) { case *stats.Counter: if err := sb.statsdClient.Count(k, v.Get(), nil, sb.sampleRate); err != nil { - log.Errorf("Failed to add Counter %v for key %v", v, k) + log.Error(fmt.Sprintf("Failed to add Counter %v for key %v", v, k)) } case *stats.Gauge: if err := sb.statsdClient.Gauge(k, float64(v.Get()), nil, sb.sampleRate); err != nil { - log.Errorf("Failed to add Gauge %v for key %v", v, k) + log.Error(fmt.Sprintf("Failed to add Gauge %v for key %v", v, k)) } case *stats.GaugeFloat64: if err := sb.statsdClient.Gauge(k, v.Get(), nil, sb.sampleRate); err != nil { - log.Errorf("Failed to add GaugeFloat64 %v for key %v", v, k) + log.Error(fmt.Sprintf("Failed to add GaugeFloat64 %v for key %v", v, k)) } case *stats.GaugeFunc: if err := sb.statsdClient.Gauge(k, float64(v.F()), nil, sb.sampleRate); err != nil { - log.Errorf("Failed to add GaugeFunc %v for key %v", v, k) + log.Error(fmt.Sprintf("Failed to add GaugeFunc %v for key %v", v, k)) } case *stats.CounterFunc: if err := sb.statsdClient.Gauge(k, float64(v.F()), nil, sb.sampleRate); err != nil { - log.Errorf("Failed to add CounterFunc %v for key %v", v, k) + log.Error(fmt.Sprintf("Failed to add CounterFunc %v for key %v", v, k)) } case *stats.CounterDuration: if err := sb.statsdClient.TimeInMilliseconds(k, float64(v.Get().Milliseconds()), nil, sb.sampleRate); err != nil { - log.Errorf("Failed to add CounterDuration %v for key %v", v, k) + log.Error(fmt.Sprintf("Failed to add CounterDuration %v for key %v", v, k)) } case *stats.CounterDurationFunc: if err := sb.statsdClient.TimeInMilliseconds(k, float64(v.F().Milliseconds()), nil, sb.sampleRate); err != nil { - log.Errorf("Failed to add CounterDuration %v for key %v", v, k) + log.Error(fmt.Sprintf("Failed to add CounterDuration %v for key %v", v, k)) } case *stats.GaugeDuration: if err := sb.statsdClient.TimeInMilliseconds(k, float64(v.Get().Milliseconds()), nil, sb.sampleRate); err != nil { - log.Errorf("Failed to add GaugeDuration %v for key %v", v, k) + log.Error(fmt.Sprintf("Failed to add GaugeDuration %v for key %v", v, k)) } case *stats.GaugeDurationFunc: if err := sb.statsdClient.TimeInMilliseconds(k, float64(v.F().Milliseconds()), nil, sb.sampleRate); err != nil { - log.Errorf("Failed to add GaugeDuration %v for key %v", v, k) + log.Error(fmt.Sprintf("Failed to add GaugeDuration %v for key %v", v, k)) } case *stats.CountersWithSingleLabel: for labelVal, val := range v.Counts() { if err := sb.statsdClient.Count(k, val, makeLabel(v.Label(), labelVal), sb.sampleRate); err != nil { - log.Errorf("Failed to add CountersWithSingleLabel %v for key %v", v, k) + log.Error(fmt.Sprintf("Failed to add CountersWithSingleLabel %v for key %v", v, k)) } } case *stats.CountersWithMultiLabels: for labelVals, val := range v.Counts() { if err := sb.statsdClient.Count(k, val, makeLabels(v.Labels(), labelVals), sb.sampleRate); err != nil { - log.Errorf("Failed to add CountersFuncWithMultiLabels %v for key %v", v, k) + log.Error(fmt.Sprintf("Failed to add CountersFuncWithMultiLabels %v for key %v", v, k)) } } case *stats.CountersFuncWithMultiLabels: for labelVals, val := range v.Counts() { if err := sb.statsdClient.Count(k, val, makeLabels(v.Labels(), labelVals), sb.sampleRate); err != nil { - log.Errorf("Failed to add CountersFuncWithMultiLabels %v for key %v", v, k) + log.Error(fmt.Sprintf("Failed to add CountersFuncWithMultiLabels %v for key %v", v, k)) } } case *stats.GaugesWithMultiLabels: for labelVals, val := range v.Counts() { if err := sb.statsdClient.Gauge(k, float64(val), makeLabels(v.Labels(), labelVals), sb.sampleRate); err != nil { - log.Errorf("Failed to add GaugesWithMultiLabels %v for key %v", v, k) + log.Error(fmt.Sprintf("Failed to add GaugesWithMultiLabels %v for key %v", v, k)) } } case *stats.GaugesFuncWithMultiLabels: for labelVals, val := range v.Counts() { if err := sb.statsdClient.Gauge(k, float64(val), makeLabels(v.Labels(), labelVals), sb.sampleRate); err != nil { - log.Errorf("Failed to add GaugesFuncWithMultiLabels %v for key %v", v, k) + log.Error(fmt.Sprintf("Failed to add GaugesFuncWithMultiLabels %v for key %v", v, k)) } } case *stats.GaugesWithSingleLabel: for labelVal, val := range v.Counts() { if err := sb.statsdClient.Gauge(k, float64(val), makeLabel(v.Label(), labelVal), sb.sampleRate); err != nil { - log.Errorf("Failed to add GaugesWithSingleLabel %v for key %v", v, k) + log.Error(fmt.Sprintf("Failed to add GaugesWithSingleLabel %v for key %v", v, k)) } } case *stats.Timings, *stats.MultiTimings, *stats.Histogram: @@ -201,7 +201,7 @@ func (sb StatsBackend) addExpVar(kv expvar.KeyValue) { if ok { memstatsKey := "memstats." + k if err := sb.statsdClient.Gauge(memstatsKey, memstatsVal, []string{}, sb.sampleRate); err != nil { - log.Errorf("Failed to export %v %v", k, v) + log.Error(fmt.Sprintf("Failed to export %v %v", k, v)) } } } @@ -211,7 +211,7 @@ func (sb StatsBackend) addExpVar(kv expvar.KeyValue) { buildGitRecOnce.Do(func() { checksum := crc32.ChecksumIEEE([]byte(v.Get())) if err := sb.statsdClient.Gauge(k, float64(checksum), []string{}, sb.sampleRate); err != nil { - log.Errorf("Failed to export %v %v", k, v) + log.Error(fmt.Sprintf("Failed to export %v %v", k, v)) } }) } @@ -219,7 +219,7 @@ func (sb StatsBackend) addExpVar(kv expvar.KeyValue) { stats.StringFunc, stats.StringMapFunc: // Silently ignore metrics that does not make sense to be exported to statsd default: - log.Warningf("Silently ignore metrics with key %v [%T]", k, kv.Value) + log.Warn(fmt.Sprintf("Silently ignore metrics with key %v [%T]", k, kv.Value)) } } diff --git a/go/streamlog/streamlog.go b/go/streamlog/streamlog.go index c5f9442bc0d..721c0d28dc3 100644 --- a/go/streamlog/streamlog.go +++ b/go/streamlog/streamlog.go @@ -221,7 +221,7 @@ func (logger *StreamLogger[T]) ServeLogs(url string, logf LogFormatter) { w.(http.Flusher).Flush() } }) - log.Infof("Streaming logs from %s at %v.", logger.Name(), url) + log.Info(fmt.Sprintf("Streaming logs from %s at %v.", logger.Name(), url)) } // LogToFile starts logging to the specified file path and will reopen the diff --git a/go/streamlog/streamlog_windows.go b/go/streamlog/streamlog_windows.go index ef69058b97c..762dcef7c48 100644 --- a/go/streamlog/streamlog_windows.go +++ b/go/streamlog/streamlog_windows.go @@ -25,5 +25,5 @@ import ( ) func setupRotate(ch chan os.Signal) { - log.Warningf("signal based log rotation is not supported on Windows") + log.Warn("signal based log rotation is not supported on Windows") } diff --git a/go/test/endtoend/backup/s3/s3_builtin_test.go b/go/test/endtoend/backup/s3/s3_builtin_test.go index 4f93a749153..19094dc3b5c 100644 --- a/go/test/endtoend/backup/s3/s3_builtin_test.go +++ b/go/test/endtoend/backup/s3/s3_builtin_test.go @@ -18,8 +18,8 @@ package s3 import ( "context" + "fmt" "io" - "log" "os" "os/exec" "path" @@ -34,6 +34,7 @@ import ( "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/mysqlctl/backupstats" @@ -59,16 +60,19 @@ func TestMain(m *testing.M) { f := func() int { minioPath, err := exec.LookPath("minio") if err != nil { - log.Fatalf("minio binary not found: %v", err) + log.Error(fmt.Sprintf("minio binary not found: %v", err)) + os.Exit(1) } dataDir, err := os.MkdirTemp("", "") if err != nil { - log.Fatalf("could not create temporary directory: %v", err) + log.Error(fmt.Sprintf("could not create temporary directory: %v", err)) + os.Exit(1) } err = os.MkdirAll(dataDir, 0o755) if err != nil { - log.Fatalf("failed to create MinIO data directory: %v", err) + log.Error(fmt.Sprintf("failed to create MinIO data directory: %v", err)) + os.Exit(1) } cmd := exec.Command(minioPath, "server", dataDir, "--console-address", ":9001") @@ -77,7 +81,8 @@ func TestMain(m *testing.M) { err = cmd.Start() if err != nil { - log.Fatalf("failed to start MinIO: %v", err) + log.Error(fmt.Sprintf("failed to start MinIO: %v", err)) + os.Exit(1) } defer func() { cmd.Process.Kill() @@ -92,13 +97,15 @@ func TestMain(m *testing.M) { client, err := minio.New("localhost:9000", accessKey, secretKey, false) if err != nil { - log.Fatalf("failed to create MinIO client: %v", err) + log.Error(fmt.Sprintf("failed to create MinIO client: %v", err)) + os.Exit(1) } waitForMinio(client) err = client.MakeBucket(bucketName, region) if err != nil { - log.Fatalf("failed to create test bucket: %v", err) + log.Error(fmt.Sprintf("failed to create test bucket: %v", err)) + os.Exit(1) } // Same env variables that are used between AWS S3 and Minio @@ -122,7 +129,8 @@ func waitForMinio(client *minio.Client) { } time.Sleep(1 * time.Second) } - log.Fatalf("MinIO server did not become ready in time") + log.Error("MinIO server did not become ready in time") + os.Exit(1) } func checkEnvForS3(t *testing.T) { diff --git a/go/test/endtoend/backup/vtbackup/backup_only_test.go b/go/test/endtoend/backup/vtbackup/backup_only_test.go index b7815046bf4..7bedc3aa6e0 100644 --- a/go/test/endtoend/backup/vtbackup/backup_only_test.go +++ b/go/test/endtoend/backup/vtbackup/backup_only_test.go @@ -192,9 +192,9 @@ func firstBackupTest(t *testing.T, removeBackup bool) { cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 1) // backup the replica - log.Infof("taking backup %s", time.Now()) + log.Info(fmt.Sprintf("taking backup %s", time.Now())) dataPointReader := vtBackup(t, false, true, true) - log.Infof("done taking backup %s", time.Now()) + log.Info(fmt.Sprintf("done taking backup %s", time.Now())) // check that the backup shows up in the listing verifyBackupCount(t, shardKsName, len(backups)+1) @@ -259,7 +259,7 @@ func startVtBackup(t *testing.T, initialBackup bool, restartBeforeBackup, disabl go verifyDisableEnableRedoLogs(ctx, t, mysqlSocket.Name()) } - log.Infof("starting backup tablet %s", time.Now()) + log.Info(fmt.Sprintf("starting backup tablet %s", time.Now())) err = localCluster.StartVtbackup(newInitDBFile, initialBackup, keyspaceName, shardName, cell, extraArgs...) if err != nil { return nil, err @@ -335,7 +335,7 @@ func initTablets(t *testing.T, startTablet bool, initShardPrimary bool) { func restore(t *testing.T, tablet *cluster.Vttablet, tabletType string, waitForState string) { // Erase mysql/tablet dir, then start tablet with restore enabled. - log.Infof("restoring tablet %s", time.Now()) + log.Info(fmt.Sprintf("restoring tablet %s", time.Now())) resetTabletDirectory(t, *tablet, true) // Start tablets diff --git a/go/test/endtoend/clone/clone_test.go b/go/test/endtoend/clone/clone_test.go index df6782a9ce2..0177348a46b 100644 --- a/go/test/endtoend/clone/clone_test.go +++ b/go/test/endtoend/clone/clone_test.go @@ -53,25 +53,25 @@ func TestMain(m *testing.M) { // Check MySQL version first - skip entire test suite if not supported versionStr, err := mysqlctl.GetVersionString() if err != nil { - log.Infof("Skipping clone tests: unable to get MySQL version: %v", err) + log.Info(fmt.Sprintf("Skipping clone tests: unable to get MySQL version: %v", err)) return 0 } - log.Infof("Detected MySQL version: %s", versionStr) + log.Info("Detected MySQL version: " + versionStr) flavor, version, err := mysqlctl.ParseVersionString(versionStr) if err != nil { - log.Infof("Skipping clone tests: unable to parse MySQL version: %v", err) + log.Info(fmt.Sprintf("Skipping clone tests: unable to parse MySQL version: %v", err)) return 0 } - log.Infof("Parsed flavor: %v, version: %d.%d.%d", flavor, version.Major, version.Minor, version.Patch) + log.Info(fmt.Sprintf("Parsed flavor: %v, version: %d.%d.%d", flavor, version.Major, version.Minor, version.Patch)) // Clone is only supported on MySQL 8.0.17+ if flavor != mysqlctl.FlavorMySQL && flavor != mysqlctl.FlavorPercona { - log.Infof("Skipping clone tests: MySQL CLONE requires MySQL or Percona, got flavor: %v", flavor) + log.Info(fmt.Sprintf("Skipping clone tests: MySQL CLONE requires MySQL or Percona, got flavor: %v", flavor)) return 0 } if version.Major < 8 || (version.Major == 8 && version.Minor == 0 && version.Patch < 17) { - log.Infof("Skipping clone tests: MySQL CLONE requires version 8.0.17+, got: %d.%d.%d", version.Major, version.Minor, version.Patch) + log.Info(fmt.Sprintf("Skipping clone tests: MySQL CLONE requires version 8.0.17+, got: %d.%d.%d", version.Major, version.Minor, version.Patch)) return 0 } @@ -79,19 +79,19 @@ func TestMain(m *testing.M) { cleanVersion := fmt.Sprintf("%d.%d.%d", version.Major, version.Minor, version.Patch) capableOf := mysql.ServerVersionCapableOf(cleanVersion) if capableOf == nil { - log.Infof("Skipping clone tests: unable to get capability checker for version %s", cleanVersion) + log.Info("Skipping clone tests: unable to get capability checker for version " + cleanVersion) return 0 } hasClone, err := capableOf(capabilities.MySQLClonePluginFlavorCapability) if err != nil || !hasClone { - log.Infof("Skipping clone tests: MySQL version %s does not support CLONE plugin", cleanVersion) + log.Info(fmt.Sprintf("Skipping clone tests: MySQL version %s does not support CLONE plugin", cleanVersion)) return 0 } - log.Infof("MySQL version %s supports CLONE plugin, proceeding with tests", cleanVersion) + log.Info(fmt.Sprintf("MySQL version %s supports CLONE plugin, proceeding with tests", cleanVersion)) // Setup EXTRA_MY_CNF for clone plugin if err := setupExtraMyCnf(); err != nil { - log.Errorf("Failed to setup extra MySQL config: %v", err) + log.Error(fmt.Sprintf("Failed to setup extra MySQL config: %v", err)) return 1 } @@ -100,13 +100,13 @@ func TestMain(m *testing.M) { // Start topo server if err := clusterInstance.StartTopo(); err != nil { - log.Errorf("Failed to start topo: %v", err) + log.Error(fmt.Sprintf("Failed to start topo: %v", err)) return 1 } // Initialize cluster with 2 tablets for clone testing if err := initClusterForClone(); err != nil { - log.Errorf("Failed to init cluster: %v", err) + log.Error(fmt.Sprintf("Failed to init cluster: %v", err)) return 1 } @@ -115,7 +115,7 @@ func TestMain(m *testing.M) { for _, tablet := range []*cluster.Vttablet{donorTablet, recipientTablet} { if tablet != nil { if err := tablet.MysqlctlProcess.Stop(); err != nil { - log.Errorf("Failed to stop MySQL for tablet %d: %v", tablet.TabletUID, err) + log.Error(fmt.Sprintf("Failed to stop MySQL for tablet %d: %v", tablet.TabletUID, err)) } } } @@ -146,7 +146,7 @@ func setupExtraMyCnf() error { } } - log.Infof("Set EXTRA_MY_CNF to include clone plugin: %s", os.Getenv("EXTRA_MY_CNF")) + log.Info("Set EXTRA_MY_CNF to include clone plugin: " + os.Getenv("EXTRA_MY_CNF")) return nil } @@ -157,7 +157,7 @@ func initClusterForClone() error { if err != nil { return fmt.Errorf("failed to create init DB file: %v", err) } - log.Infof("Created combined init file at: %s", initDBWithClone) + log.Info("Created combined init file at: " + initDBWithClone) var mysqlCtlProcessList []*exec.Cmd @@ -208,7 +208,7 @@ func initClusterForClone() error { return fmt.Errorf("MySQL process failed to start: %v", err) } } - log.Infof("MySQL processes started successfully") + log.Info("MySQL processes started successfully") // Note: We intentionally do NOT register tablets with shards/keyspaces // because we only start MySQL processes (not vttablets). The standard diff --git a/go/test/endtoend/cluster/cluster_process.go b/go/test/endtoend/cluster/cluster_process.go index 097a3c04c1b..e37465cb16d 100644 --- a/go/test/endtoend/cluster/cluster_process.go +++ b/go/test/endtoend/cluster/cluster_process.go @@ -244,7 +244,7 @@ func (cluster *LocalProcessCluster) StartTopo() (err error) { cluster.TmpDirectory = path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/tmp_%d", cluster.GetAndReservePort())) cluster.TopoProcess = *TopoProcessInstance(cluster.TopoPort, cluster.GetAndReservePort(), cluster.Hostname, *topoFlavor, "global") - log.Infof("Starting topo server %v on port: %d", *topoFlavor, cluster.TopoPort) + log.Info(fmt.Sprintf("Starting topo server %v on port: %d", *topoFlavor, cluster.TopoPort)) if err = cluster.TopoProcess.Setup(*topoFlavor, cluster); err != nil { log.Error(err.Error()) return @@ -265,7 +265,7 @@ func (cluster *LocalProcessCluster) StartTopo() (err error) { cluster.VtctldProcess = *VtctldProcessInstance(cluster.GetAndReservePort(), cluster.GetAndReservePort(), cluster.TopoProcess.Port, cluster.Hostname, cluster.TmpDirectory) - log.Infof("Starting vtctld server on port: %d", cluster.VtctldProcess.Port) + log.Info(fmt.Sprintf("Starting vtctld server on port: %d", cluster.VtctldProcess.Port)) cluster.VtctldHTTPPort = cluster.VtctldProcess.Port if err = cluster.VtctldProcess.Setup(cluster.Cell, cluster.VtctldExtraArgs...); err != nil { log.Error(err.Error()) @@ -275,7 +275,7 @@ func (cluster *LocalProcessCluster) StartTopo() (err error) { cluster.VtctldClientProcess = *cluster.NewVtctldClientProcessInstance("localhost", cluster.VtctldProcess.GrpcPort, cluster.TmpDirectory) if !cluster.ReusingVTDATAROOT { if err = cluster.VtctldClientProcess.AddCellInfo(cluster.Cell); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return } cluster.VtctldClientProcess.LogDir = cluster.TmpDirectory @@ -394,7 +394,7 @@ func (cluster *LocalProcessCluster) startKeyspace(keyspace Keyspace, shardNames totalTabletsRequired = totalTabletsRequired + 1 // + 1 for rdonly } - log.Infof("Starting keyspace: %v", keyspace.Name) + log.Info(fmt.Sprintf("Starting keyspace: %v", keyspace.Name)) if keyspace.SidecarDBName == "" { keyspace.SidecarDBName = sidecar.DefaultName } @@ -421,7 +421,7 @@ func (cluster *LocalProcessCluster) startKeyspace(keyspace Keyspace, shardNames // Apply Schema SQL if keyspace.SchemaSQL != "" { if err = cluster.VtctldClientProcess.ApplySchema(keyspace.Name, keyspace.SchemaSQL); err != nil { - log.Errorf("error applying schema: %v, %v", keyspace.SchemaSQL, err) + log.Error(fmt.Sprintf("error applying schema: %v, %v", keyspace.SchemaSQL, err)) return } } @@ -429,16 +429,16 @@ func (cluster *LocalProcessCluster) startKeyspace(keyspace Keyspace, shardNames // Apply VSchema if keyspace.VSchema != "" { if err = cluster.VtctldClientProcess.ApplyVSchema(keyspace.Name, keyspace.VSchema); err != nil { - log.Errorf("error applying vschema: %v, %v", keyspace.VSchema, err) + log.Error(fmt.Sprintf("error applying vschema: %v, %v", keyspace.VSchema, err)) return } } - log.Infof("Done creating keyspace: %v ", keyspace.Name) + log.Info(fmt.Sprintf("Done creating keyspace: %v ", keyspace.Name)) err = cluster.StartVTOrc(vtorcCell, keyspace.Name) if err != nil { - log.Errorf("Error starting VTOrc - %v", err) + log.Error(fmt.Sprintf("Error starting VTOrc - %v", err)) return err } } @@ -450,7 +450,7 @@ func (cluster *LocalProcessCluster) AddShard(keyspaceName string, shardName stri shard := &Shard{ Name: shardName, } - log.Infof("Starting shard: %v", shardName) + log.Info(fmt.Sprintf("Starting shard: %v", shardName)) var mysqlctlProcessList []*exec.Cmd for i := range totalTabletsRequired { // instantiate vttablet object with reserved ports @@ -470,7 +470,7 @@ func (cluster *LocalProcessCluster) AddShard(keyspaceName string, shardName stri tablet.Type = "rdonly" } // Start Mysqlctl process - log.Infof("Starting mysqlctl for table uid %d, mysql port %d", tablet.TabletUID, tablet.MySQLPort) + log.Info(fmt.Sprintf("Starting mysqlctl for table uid %d, mysql port %d", tablet.TabletUID, tablet.MySQLPort)) mysqlctlProcess, err := MysqlCtlProcessInstanceOptionalInit(tablet.TabletUID, tablet.MySQLPort, cluster.TmpDirectory, !cluster.ReusingVTDATAROOT) if err != nil { return nil, err @@ -484,7 +484,7 @@ func (cluster *LocalProcessCluster) AddShard(keyspaceName string, shardName stri tablet.MysqlctlProcess = *mysqlctlProcess proc, err := tablet.MysqlctlProcess.StartProcess() if err != nil { - log.Errorf("error starting mysqlctl process: %v, %v", tablet.MysqlctldProcess, err) + log.Error(fmt.Sprintf("error starting mysqlctl process: %v, %v", tablet.MysqlctldProcess, err)) return nil, err } mysqlctlProcessList = append(mysqlctlProcessList, proc) @@ -528,22 +528,22 @@ func (cluster *LocalProcessCluster) AddShard(keyspaceName string, shardName stri // wait till all mysqlctl is instantiated for _, proc := range mysqlctlProcessList { if err := proc.Wait(); err != nil { - log.Errorf("unable to start mysql process %v: %v", proc, err) + log.Error(fmt.Sprintf("unable to start mysql process %v: %v", proc, err)) return nil, err } } for _, tablet := range shard.Vttablets { - log.Infof("Starting vttablet for tablet uid %d, grpc port %d", tablet.TabletUID, tablet.GrpcPort) + log.Info(fmt.Sprintf("Starting vttablet for tablet uid %d, grpc port %d", tablet.TabletUID, tablet.GrpcPort)) if err := tablet.VttabletProcess.Setup(); err != nil { - log.Errorf("error starting vttablet for tablet uid %d, grpc port %d: %v", tablet.TabletUID, tablet.GrpcPort, err) + log.Error(fmt.Sprintf("error starting vttablet for tablet uid %d, grpc port %d: %v", tablet.TabletUID, tablet.GrpcPort, err)) return nil, err } } // Make first tablet as primary if err := cluster.VtctldClientProcess.InitializeShard(keyspaceName, shardName, cluster.Cell, shard.Vttablets[0].TabletUID); err != nil { - log.Errorf("error running InitializeShard on keyspace %v, shard %v: %v", keyspaceName, shardName, err) + log.Error(fmt.Sprintf("error running InitializeShard on keyspace %v, shard %v: %v", keyspaceName, shardName, err)) return nil, err } return shard, nil @@ -567,7 +567,7 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard totalTabletsRequired = totalTabletsRequired + 1 // + 1 for rdonly } - log.Infof("Starting keyspace: %v", keyspace.Name) + log.Info(fmt.Sprintf("Starting keyspace: %v", keyspace.Name)) if keyspace.SidecarDBName == "" { keyspace.SidecarDBName = sidecar.DefaultName } @@ -578,7 +578,7 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard shard := &Shard{ Name: shardName, } - log.Infof("Starting shard: %v", shardName) + log.Info(fmt.Sprintf("Starting shard: %v", shardName)) mysqlctlProcessList = []*exec.Cmd{} for i := 0; i < totalTabletsRequired; i++ { // instantiate vttablet object with reserved ports @@ -597,7 +597,7 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard tablet.Type = "rdonly" } // Start Mysqlctl process - log.Infof("Starting mysqlctl for table uid %d, mysql port %d", tablet.TabletUID, tablet.MySQLPort) + log.Info(fmt.Sprintf("Starting mysqlctl for table uid %d, mysql port %d", tablet.TabletUID, tablet.MySQLPort)) mysqlctlProcess, err := MysqlCtlProcessInstanceOptionalInit(tablet.TabletUID, tablet.MySQLPort, cluster.TmpDirectory, !cluster.ReusingVTDATAROOT) if err != nil { return err @@ -605,7 +605,7 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard tablet.MysqlctlProcess = *mysqlctlProcess proc, err := tablet.MysqlctlProcess.StartProcess() if err != nil { - log.Errorf("error starting mysqlctl process: %v, %v", tablet.MysqlctldProcess, err) + log.Error(fmt.Sprintf("error starting mysqlctl process: %v, %v", tablet.MysqlctldProcess, err)) return err } mysqlctlProcessList = append(mysqlctlProcessList, proc) @@ -643,29 +643,29 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard // wait till all mysqlctl is instantiated for _, proc := range mysqlctlProcessList { if err = proc.Wait(); err != nil { - log.Errorf("unable to start mysql process %v: %v", proc, err) + log.Error(fmt.Sprintf("unable to start mysql process %v: %v", proc, err)) return err } } for _, tablet := range shard.Vttablets { if !cluster.ReusingVTDATAROOT { if _, err = tablet.VttabletProcess.QueryTablet("create database vt_"+keyspace.Name, keyspace.Name, false); err != nil { - log.Errorf("error creating database for keyspace %v: %v", keyspace.Name, err) + log.Error(fmt.Sprintf("error creating database for keyspace %v: %v", keyspace.Name, err)) return } } - log.Infof("Starting vttablet for tablet uid %d, grpc port %d", tablet.TabletUID, tablet.GrpcPort) + log.Info(fmt.Sprintf("Starting vttablet for tablet uid %d, grpc port %d", tablet.TabletUID, tablet.GrpcPort)) if err = tablet.VttabletProcess.Setup(); err != nil { - log.Errorf("error starting vttablet for tablet uid %d, grpc port %d: %v", tablet.TabletUID, tablet.GrpcPort, err) + log.Error(fmt.Sprintf("error starting vttablet for tablet uid %d, grpc port %d: %v", tablet.TabletUID, tablet.GrpcPort, err)) return } } // Make first tablet as primary if err = cluster.VtctldClientProcess.InitShardPrimary(keyspace.Name, shardName, cluster.Cell, shard.Vttablets[0].TabletUID); err != nil { - log.Errorf("error running ISM on keyspace %v, shard %v: %v", keyspace.Name, shardName, err) + log.Error(fmt.Sprintf("error running ISM on keyspace %v, shard %v: %v", keyspace.Name, shardName, err)) return } keyspace.Shards = append(keyspace.Shards, *shard) @@ -685,7 +685,7 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard // Apply Schema SQL if keyspace.SchemaSQL != "" { if err = cluster.VtctldClientProcess.ApplySchema(keyspace.Name, keyspace.SchemaSQL); err != nil { - log.Errorf("error applying schema: %v, %v", keyspace.SchemaSQL, err) + log.Error(fmt.Sprintf("error applying schema: %v, %v", keyspace.SchemaSQL, err)) return } } @@ -693,12 +693,12 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard // Apply VSchema if keyspace.VSchema != "" { if err = cluster.VtctldClientProcess.ApplyVSchema(keyspace.Name, keyspace.VSchema); err != nil { - log.Errorf("error applying vschema: %v, %v", keyspace.VSchema, err) + log.Error(fmt.Sprintf("error applying vschema: %v, %v", keyspace.VSchema, err)) return } } - log.Infof("Done creating keyspace: %v ", keyspace.Name) + log.Info(fmt.Sprintf("Done creating keyspace: %v ", keyspace.Name)) return } @@ -707,7 +707,7 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard // This does not start any process and user have to explicitly start all // the required services (ex topo, vtgate, mysql and vttablet) func (cluster *LocalProcessCluster) SetupCluster(keyspace *Keyspace, shards []Shard) (err error) { - log.Infof("Starting keyspace: %v", keyspace.Name) + log.Info(fmt.Sprintf("Starting keyspace: %v", keyspace.Name)) if keyspace.SidecarDBName == "" { keyspace.SidecarDBName = sidecar.DefaultName @@ -717,7 +717,7 @@ func (cluster *LocalProcessCluster) SetupCluster(keyspace *Keyspace, shards []Sh // Create Keyspace err = cluster.VtctldClientProcess.CreateKeyspace(keyspace.Name, keyspace.SidecarDBName, keyspace.DurabilityPolicy) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return } } @@ -763,7 +763,7 @@ func (cluster *LocalProcessCluster) SetupCluster(keyspace *Keyspace, shards []Sh cluster.Keyspaces = append(cluster.Keyspaces, *keyspace) } - log.Infof("Done launching keyspace: %v", keyspace.Name) + log.Info(fmt.Sprintf("Done launching keyspace: %v", keyspace.Name)) return err } @@ -774,8 +774,8 @@ func (cluster *LocalProcessCluster) StartVtgate() (err error) { } vtgateInstance := *cluster.NewVtgateInstance() cluster.VtgateProcess = vtgateInstance - log.Infof("Starting vtgate on port %d", vtgateInstance.Port) - log.Infof("Vtgate started, connect to mysql using : mysql -h 127.0.0.1 -P %d", cluster.VtgateMySQLPort) + log.Info(fmt.Sprintf("Starting vtgate on port %d", vtgateInstance.Port)) + log.Info(fmt.Sprintf("Vtgate started, connect to mysql using : mysql -h 127.0.0.1 -P %d", cluster.VtgateMySQLPort)) return cluster.VtgateProcess.Setup() } @@ -816,11 +816,12 @@ func NewBareCluster(cell string, hostname string) *LocalProcessCluster { } else { err = createDirectory(cluster.CurrentVTDATAROOT, 0o700) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } } _ = os.Setenv("VTDATAROOT", cluster.CurrentVTDATAROOT) - log.Infof("Created cluster on %s. ReusingVTDATAROOT=%v", cluster.CurrentVTDATAROOT, cluster.ReusingVTDATAROOT) + log.Info(fmt.Sprintf("Created cluster on %s. ReusingVTDATAROOT=%v", cluster.CurrentVTDATAROOT, cluster.ReusingVTDATAROOT)) return cluster } @@ -831,7 +832,7 @@ func NewCluster(cell string, hostname string) *LocalProcessCluster { err := cluster.populateVersionInfo() if err != nil { - log.Errorf("Error populating version information - %v", err) + log.Error(fmt.Sprintf("Error populating version information - %v", err)) } return cluster } @@ -870,12 +871,12 @@ func GetMajorVersion(binaryName string) (int, error) { func (cluster *LocalProcessCluster) RestartVtgate() (err error) { err = cluster.VtgateProcess.TearDown() if err != nil { - log.Errorf("error stopping vtgate %v: %v", cluster.VtgateProcess, err) + log.Error(fmt.Sprintf("error stopping vtgate %v: %v", cluster.VtgateProcess, err)) return } err = cluster.StartVtgate() if err != nil { - log.Errorf("error starting vtgate %v: %v", cluster.VtgateProcess, err) + log.Error(fmt.Sprintf("error starting vtgate %v: %v", cluster.VtgateProcess, err)) return } return err @@ -1069,17 +1070,17 @@ func (cluster *LocalProcessCluster) Teardown() { cluster.CancelFunc() } if err := cluster.VtgateProcess.TearDown(); err != nil { - log.Errorf("Error in vtgate teardown: %v", err) + log.Error(fmt.Sprintf("Error in vtgate teardown: %v", err)) } for _, vtorcProcess := range cluster.VTOrcProcesses { if err := vtorcProcess.TearDown(); err != nil { - log.Errorf("Error in vtorc teardown: %v", err) + log.Error(fmt.Sprintf("Error in vtorc teardown: %v", err)) } } if err := cluster.VtadminProcess.TearDown(); err != nil { - log.Errorf("Error in vtadmin teardown: %v", err) + log.Error(fmt.Sprintf("Error in vtadmin teardown: %v", err)) } var mysqlctlProcessList []*exec.Cmd @@ -1089,7 +1090,7 @@ func (cluster *LocalProcessCluster) Teardown() { for _, tablet := range shard.Vttablets { if tablet.MysqlctlProcess.TabletUID > 0 { if proc, err := tablet.MysqlctlProcess.StopProcess(); err != nil { - log.Errorf("Error in mysqlctl teardown: %v", err) + log.Error(fmt.Sprintf("Error in mysqlctl teardown: %v", err)) } else { mysqlctlProcessList = append(mysqlctlProcessList, proc) mysqlctlTabletUIDs = append(mysqlctlTabletUIDs, tablet.MysqlctlProcess.TabletUID) @@ -1097,12 +1098,12 @@ func (cluster *LocalProcessCluster) Teardown() { } if tablet.MysqlctldProcess.TabletUID > 0 { if err := tablet.MysqlctldProcess.Stop(); err != nil { - log.Errorf("Error in mysqlctl teardown: %v", err) + log.Error(fmt.Sprintf("Error in mysqlctl teardown: %v", err)) } } if err := tablet.VttabletProcess.TearDown(); err != nil { - log.Errorf("Error in vttablet teardown: %v", err) + log.Error(fmt.Sprintf("Error in vttablet teardown: %v", err)) } } } @@ -1115,11 +1116,11 @@ func (cluster *LocalProcessCluster) Teardown() { cluster.waitForMySQLProcessToExit(mysqlctlProcessList, mysqlctlTabletUIDs) if err := cluster.VtctldProcess.TearDown(); err != nil { - log.Errorf("Error in vtctld teardown: %v", err) + log.Error(fmt.Sprintf("Error in vtctld teardown: %v", err)) } if err := cluster.TopoProcess.TearDown(cluster.Cell, cluster.OriginalVTDATAROOT, cluster.CurrentVTDATAROOT, *keepData, *topoFlavor); err != nil { - log.Errorf("Error in topo server teardown: %v", err) + log.Error(fmt.Sprintf("Error in topo server teardown: %v", err)) } // reset the VTDATAROOT path. @@ -1147,7 +1148,7 @@ func (cluster *LocalProcessCluster) waitForMySQLProcessToExit(mysqlctlProcessLis if err == nil { return } - log.Errorf("Error in mysqlctl teardown wait: %v", err) + log.Error(fmt.Sprintf("Error in mysqlctl teardown wait: %v", err)) break } pidFile := path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d/mysql.pid", tabletUID)) @@ -1159,12 +1160,12 @@ func (cluster *LocalProcessCluster) waitForMySQLProcessToExit(mysqlctlProcessLis } pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes))) if err != nil { - log.Errorf("Error in conversion to integer: %v", err) + log.Error(fmt.Sprintf("Error in conversion to integer: %v", err)) return } err = syscallutil.Kill(pid, syscall.SIGKILL) if err != nil { - log.Errorf("Error in killing process: %v", err) + log.Error(fmt.Sprintf("Error in killing process: %v", err)) } }(cmd, mysqlctlTabletUIDs[i]) } @@ -1202,14 +1203,14 @@ func (cluster *LocalProcessCluster) GetAndReservePort() int { } for { cluster.nextPortForProcess = cluster.nextPortForProcess + 1 - log.Infof("Attempting to reserve port: %v", cluster.nextPortForProcess) + log.Info(fmt.Sprintf("Attempting to reserve port: %v", cluster.nextPortForProcess)) ln, err := net.Listen("tcp", net.JoinHostPort("127.0.0.1", strconv.Itoa(cluster.nextPortForProcess))) if err != nil { - log.Errorf("Can't listen on port %v: %s, trying next port", cluster.nextPortForProcess, err) + log.Error(fmt.Sprintf("Can't listen on port %v: %s, trying next port", cluster.nextPortForProcess, err)) continue } - log.Infof("Port %v is available, reserving..", cluster.nextPortForProcess) + log.Info(fmt.Sprintf("Port %v is available, reserving..", cluster.nextPortForProcess)) ln.Close() break } @@ -1325,7 +1326,7 @@ func (cluster *LocalProcessCluster) NewVTOrcProcess(config VTOrcConfiguration, c func (cluster *LocalProcessCluster) NewVtctldClientProcessInstance(hostname string, grpcPort int, tmpDirectory string) *VtctldClientProcess { version, err := GetMajorVersion("vtctldclient") if err != nil { - log.Warningf("failed to get major vtctldclient version; interop with CLI changes for VEP-4 may not work: %v", err) + log.Warn(fmt.Sprintf("failed to get major vtctldclient version; interop with CLI changes for VEP-4 may not work: %v", err)) } base := VtProcessInstance("vtctldclient", "vtctldclient", cluster.TopoProcess.Port, cluster.Hostname) @@ -1425,9 +1426,9 @@ func (cluster *LocalProcessCluster) PrintMysqlctlLogFiles() { files, _ := os.ReadDir(logDir) for _, fileInfo := range files { if !fileInfo.IsDir() && strings.Contains(fileInfo.Name(), "mysqlctl") { - log.Errorf("Printing the log file - " + fileInfo.Name()) + log.Error("Printing the log file - " + fileInfo.Name()) logOut, _ := os.ReadFile(path.Join(logDir, fileInfo.Name())) - log.Errorf(string(logOut)) + log.Error(string(logOut)) } } } diff --git a/go/test/endtoend/cluster/cluster_util.go b/go/test/endtoend/cluster/cluster_util.go index 0e9a15a7ec2..bdad3dc95a5 100644 --- a/go/test/endtoend/cluster/cluster_util.go +++ b/go/test/endtoend/cluster/cluster_util.go @@ -490,7 +490,7 @@ func PrintFiles(t *testing.T, dir string, files ...string) { directories = directories[1:] entries, err := os.ReadDir(dir) if err != nil { - log.Errorf("Couldn't read directory - %v", dir) + log.Error(fmt.Sprintf("Couldn't read directory - %v", dir)) continue } for _, entry := range entries { @@ -516,8 +516,8 @@ func PrintFiles(t *testing.T, dir string, files ...string) { // Read and print the file. res, err := os.ReadFile(name) require.NoError(t, err) - log.Errorf("READING FILE - %v", name) - log.Errorf("%v", string(res)) + log.Error(fmt.Sprintf("READING FILE - %v", name)) + log.Error(string(res)) } } } diff --git a/go/test/endtoend/cluster/mysqlctl_process.go b/go/test/endtoend/cluster/mysqlctl_process.go index 29c1d435c77..7ea46ba9703 100644 --- a/go/test/endtoend/cluster/mysqlctl_process.go +++ b/go/test/endtoend/cluster/mysqlctl_process.go @@ -136,7 +136,7 @@ func (mysqlctl *MysqlctlProcess) startProcess(init bool) (*exec.Cmd, error) { extraMyCNF := path.Join(sslPath, "ssl.cnf") fout, err := os.Create(extraMyCNF) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return nil, err } @@ -168,15 +168,15 @@ ssl_key={{.ServerKey}} } tmpProcess.Env = append(tmpProcess.Env, os.Environ()...) tmpProcess.Env = append(tmpProcess.Env, DefaultVttestEnv) - log.Infof("Starting mysqlctl with command: %v", tmpProcess.Args) + log.Info(fmt.Sprintf("Starting mysqlctl with command: %v", tmpProcess.Args)) return tmpProcess, tmpProcess.Start() } // Stop executes mysqlctl command to stop mysql instance and kills the mysql instance // if it doesn't shutdown in 30 seconds. func (mysqlctl *MysqlctlProcess) Stop() (err error) { - log.Infof("Shutting down MySQL: %d", mysqlctl.TabletUID) - defer log.Infof("MySQL shutdown complete: %d", mysqlctl.TabletUID) + log.Info(fmt.Sprintf("Shutting down MySQL: %d", mysqlctl.TabletUID)) + defer log.Info(fmt.Sprintf("MySQL shutdown complete: %d", mysqlctl.TabletUID)) tmpProcess, err := mysqlctl.StopProcess() if err != nil { return err @@ -268,7 +268,7 @@ func MysqlCtlProcessInstanceOptionalInit(tabletUID int, mySQLPort int, tmpDirect version, err := GetMajorVersion("mysqlctl") if err != nil { - log.Warningf("failed to get major mysqlctl version; backwards-compatibility for CLI changes may not work: %s", err) + log.Warn(fmt.Sprintf("failed to get major mysqlctl version; backwards-compatibility for CLI changes may not work: %s", err)) } mysqlctl := &MysqlctlProcess{ Name: "mysqlctl", @@ -342,7 +342,7 @@ func (mysqlctl *MysqlctlProcess) ExecuteCommandWithOutput(args ...string) (resul mysqlctl.Binary, args..., ) - log.Info(fmt.Sprintf("Executing mysqlctl with arguments %v", strings.Join(tmpProcess.Args, " "))) + log.Info("Executing mysqlctl with arguments " + strings.Join(tmpProcess.Args, " ")) resultByte, err := tmpProcess.CombinedOutput() return string(resultByte), err } diff --git a/go/test/endtoend/cluster/mysqlctld_process.go b/go/test/endtoend/cluster/mysqlctld_process.go index c8f421105b9..a43af4c200b 100644 --- a/go/test/endtoend/cluster/mysqlctld_process.go +++ b/go/test/endtoend/cluster/mysqlctld_process.go @@ -100,12 +100,12 @@ func (mysqlctld *MysqlctldProcess) Start() error { err := os.MkdirAll(mysqlctld.LogDirectory, 0o755) if err != nil { - log.Errorf("Failed to create directory for mysqlctld logs: %v", err) + log.Error(fmt.Sprintf("Failed to create directory for mysqlctld logs: %v", err)) return err } errFile, err := os.Create(path.Join(mysqlctld.LogDirectory, "mysqlctld-stderr.txt")) if err != nil { - log.Errorf("Failed to create directory for mysqlctld stderr: %v", err) + log.Error(fmt.Sprintf("Failed to create directory for mysqlctld stderr: %v", err)) } tempProcess.Stderr = errFile @@ -115,7 +115,7 @@ func (mysqlctld *MysqlctldProcess) Start() error { tempProcess.Stderr = os.Stderr mysqlctld.ErrorLog = errFile.Name() - log.Infof("%v", strings.Join(tempProcess.Args, " ")) + log.Info(strings.Join(tempProcess.Args, " ")) err = tempProcess.Start() if err != nil { @@ -130,9 +130,9 @@ func (mysqlctld *MysqlctldProcess) Start() error { if !mysqlctld.exitSignalReceived { errBytes, ferr := os.ReadFile(mysqlctld.ErrorLog) if ferr == nil { - log.Errorf("mysqlctld error log contents:\n%s", string(errBytes)) + log.Error("mysqlctld error log contents:\n" + string(errBytes)) } else { - log.Errorf("Failed to read the mysqlctld error log file %q: %v", mysqlctld.ErrorLog, ferr) + log.Error(fmt.Sprintf("Failed to read the mysqlctld error log file %q: %v", mysqlctld.ErrorLog, ferr)) } fmt.Printf("mysqlctld stopped unexpectedly, tabletUID %v, mysql port %v, PID %v\n", mysqlctld.TabletUID, mysqlctld.MySQLPort, mysqlctld.process.Process.Pid) } diff --git a/go/test/endtoend/cluster/topo_process.go b/go/test/endtoend/cluster/topo_process.go index 80070f6ca47..936773a9094 100644 --- a/go/test/endtoend/cluster/topo_process.go +++ b/go/test/endtoend/cluster/topo_process.go @@ -115,7 +115,7 @@ func (topo *TopoProcess) SetupEtcd() (err error) { topo.proc.Env = append(topo.proc.Env, os.Environ()...) topo.proc.Env = append(topo.proc.Env, DefaultVttestEnv) - log.Infof("Starting etcd with command: %v", strings.Join(topo.proc.Args, " ")) + log.Info(fmt.Sprintf("Starting etcd with command: %v", strings.Join(topo.proc.Args, " "))) err = topo.proc.Start() if err != nil { @@ -145,9 +145,9 @@ func (topo *TopoProcess) SetupEtcd() (err error) { case err := <-topo.exit: errBytes, ferr := os.ReadFile(topo.ErrorLog) if ferr == nil { - log.Errorf("%s error log contents:\n%s", topo.Binary, string(errBytes)) + log.Error(fmt.Sprintf("%s error log contents:\n%s", topo.Binary, string(errBytes))) } else { - log.Errorf("Failed to read the %s error log file %q: %v", topo.Binary, topo.ErrorLog, ferr) + log.Error(fmt.Sprintf("Failed to read the %s error log file %q: %v", topo.Binary, topo.ErrorLog, ferr)) } return fmt.Errorf("process '%s' exited prematurely (err: %s)", topo.Binary, err) default: @@ -177,18 +177,18 @@ func (topo *TopoProcess) SetupZookeeper(cluster *LocalProcessCluster) error { err = os.MkdirAll(topo.LogDirectory, 0o755) if err != nil { - log.Errorf("Failed to create log directory for zookeeper: %v", err) + log.Error(fmt.Sprintf("Failed to create log directory for zookeeper: %v", err)) return err } errFile, err := os.Create(path.Join(topo.LogDirectory, "topo-stderr.txt")) if err != nil { - log.Errorf("Failed to create file for zookeeper stderr: %v", err) + log.Error(fmt.Sprintf("Failed to create file for zookeeper stderr: %v", err)) return err } topo.proc.Stderr = errFile topo.proc.Env = append(topo.proc.Env, os.Environ()...) - log.Infof("Starting zookeeper with args %v", strings.Join(topo.proc.Args, " ")) + log.Info(fmt.Sprintf("Starting zookeeper with args %v", strings.Join(topo.proc.Args, " "))) return topo.proc.Run() } @@ -215,12 +215,12 @@ func (topo *TopoProcess) SetupConsul(cluster *LocalProcessCluster) (err error) { err = os.MkdirAll(topo.LogDirectory, os.ModePerm) if err != nil { - log.Errorf("Failed to create directory for consul logs: %v", err) + log.Error(fmt.Sprintf("Failed to create directory for consul logs: %v", err)) return } err = os.MkdirAll(topo.DataDirectory, os.ModePerm) if err != nil { - log.Errorf("Failed to create directory for consul data: %v", err) + log.Error(fmt.Sprintf("Failed to create directory for consul data: %v", err)) return } @@ -229,7 +229,7 @@ func (topo *TopoProcess) SetupConsul(cluster *LocalProcessCluster) (err error) { logFile := path.Join(topo.LogDirectory, "/consul.log") _, err = os.Create(logFile) if err != nil { - log.Errorf("Failed to create file for consul logs: %v", err) + log.Error(fmt.Sprintf("Failed to create file for consul logs: %v", err)) return } @@ -267,14 +267,14 @@ func (topo *TopoProcess) SetupConsul(cluster *LocalProcessCluster) (err error) { errFile, err := os.Create(path.Join(topo.LogDirectory, "topo-stderr.txt")) if err != nil { - log.Errorf("Failed to create file for consul stderr: %v", err) + log.Error(fmt.Sprintf("Failed to create file for consul stderr: %v", err)) return } topo.proc.Stderr = errFile topo.proc.Env = append(topo.proc.Env, os.Environ()...) - log.Errorf("Starting consul with args %v", strings.Join(topo.proc.Args, " ")) + log.Error(fmt.Sprintf("Starting consul with args %v", strings.Join(topo.proc.Args, " "))) err = topo.proc.Start() if err != nil { return @@ -314,7 +314,7 @@ func (topo *TopoProcess) TearDown(Cell string, originalVtRoot string, currentRoo case *clientv3.Client: _ = cli.Close() default: - log.Errorf("Unknown topo client type %T", cli) + log.Error(fmt.Sprintf("Unknown topo client type %T", cli)) } } @@ -380,10 +380,10 @@ func (topo *TopoProcess) IsHealthy() bool { func (topo *TopoProcess) removeTopoDirectories(Cell string) { if err := topo.ManageTopoDir("rmdir", "/vitess/global"); err != nil { - log.Errorf("Failed to remove global topo directory: %v", err) + log.Error(fmt.Sprintf("Failed to remove global topo directory: %v", err)) } if err := topo.ManageTopoDir("rmdir", "/vitess/"+Cell); err != nil { - log.Errorf("Failed to remove local topo directory: %v", err) + log.Error(fmt.Sprintf("Failed to remove local topo directory: %v", err)) } } diff --git a/go/test/endtoend/cluster/vtadmin_process.go b/go/test/endtoend/cluster/vtadmin_process.go index 1c8bdc91194..bfcc8c09124 100644 --- a/go/test/endtoend/cluster/vtadmin_process.go +++ b/go/test/endtoend/cluster/vtadmin_process.go @@ -58,7 +58,7 @@ func (vp *VtAdminProcess) Setup() (err error) { timeNow := time.Now().UnixNano() err = os.MkdirAll(vp.LogDir, 0o755) if err != nil { - log.Errorf("cannot create log directory for vtadmin: %v", err) + log.Error(fmt.Sprintf("cannot create log directory for vtadmin: %v", err)) return err } rbacFile, err := vp.CreateAndWriteFile("rbac", `rules: @@ -127,7 +127,7 @@ func (vp *VtAdminProcess) Setup() (err error) { logFile := fmt.Sprintf("vtadmin-stderr-%d.txt", timeNow) errFile, err := os.Create(path.Join(vp.LogDir, logFile)) if err != nil { - log.Errorf("cannot create error log file for vtadmin: %v", err) + log.Error(fmt.Sprintf("cannot create error log file for vtadmin: %v", err)) return err } vp.proc.Stderr = errFile @@ -135,7 +135,7 @@ func (vp *VtAdminProcess) Setup() (err error) { vp.proc.Env = append(vp.proc.Env, os.Environ()...) vp.proc.Env = append(vp.proc.Env, DefaultVttestEnv) - log.Infof("Running vtadmin with command: %v", strings.Join(vp.proc.Args, " ")) + log.Info(fmt.Sprintf("Running vtadmin with command: %v", strings.Join(vp.proc.Args, " "))) err = vp.proc.Start() if err != nil { @@ -147,9 +147,9 @@ func (vp *VtAdminProcess) Setup() (err error) { if vp.proc != nil { exitErr := vp.proc.Wait() if exitErr != nil { - log.Errorf("vtadmin process exited with error: %v", exitErr) + log.Error(fmt.Sprintf("vtadmin process exited with error: %v", exitErr)) data, _ := os.ReadFile(logFile) - log.Errorf("vtadmin stderr - %s", string(data)) + log.Error("vtadmin stderr - " + string(data)) } vp.exit <- exitErr close(vp.exit) @@ -164,7 +164,7 @@ func (vp *VtAdminProcess) CreateAndWriteFile(prefix string, content string, exte timeNow := time.Now().UnixNano() file, err := os.Create(path.Join(vp.LogDir, fmt.Sprintf("%s-%d.%s", prefix, timeNow, extension))) if err != nil { - log.Errorf("cannot create file for vtadmin: %v", err) + log.Error(fmt.Sprintf("cannot create file for vtadmin: %v", err)) return "", err } diff --git a/go/test/endtoend/cluster/vtbackup_process.go b/go/test/endtoend/cluster/vtbackup_process.go index 7cc0102ba95..e9eef1ee0ef 100644 --- a/go/test/endtoend/cluster/vtbackup_process.go +++ b/go/test/endtoend/cluster/vtbackup_process.go @@ -104,7 +104,7 @@ func (vtbackup *VtbackupProcess) Setup() (err error) { vtbackup.proc.Env = append(vtbackup.proc.Env, os.Environ()...) vtbackup.proc.Env = append(vtbackup.proc.Env, DefaultVttestEnv) - log.Infof("Running vtbackup with args: %v", strings.Join(vtbackup.proc.Args, " ")) + log.Info(fmt.Sprintf("Running vtbackup with args: %v", strings.Join(vtbackup.proc.Args, " "))) err = vtbackup.proc.Run() if err != nil { diff --git a/go/test/endtoend/cluster/vtctld_process.go b/go/test/endtoend/cluster/vtctld_process.go index 3c5554dd507..9da201d08f3 100644 --- a/go/test/endtoend/cluster/vtctld_process.go +++ b/go/test/endtoend/cluster/vtctld_process.go @@ -82,12 +82,12 @@ func (vtctld *VtctldProcess) Setup(cell string, extraArgs ...string) (err error) err = os.MkdirAll(vtctld.LogDir, 0o755) if err != nil { - log.Errorf("cannot create log directory for vtctld: %v", err) + log.Error(fmt.Sprintf("cannot create log directory for vtctld: %v", err)) return err } errFile, err := os.Create(path.Join(vtctld.LogDir, "vtctld-stderr.txt")) if err != nil { - log.Errorf("cannot create error log file for vtctld: %v", err) + log.Error(fmt.Sprintf("cannot create error log file for vtctld: %v", err)) return err } vtctld.proc.Stderr = errFile @@ -96,7 +96,7 @@ func (vtctld *VtctldProcess) Setup(cell string, extraArgs ...string) (err error) vtctld.proc.Env = append(vtctld.proc.Env, os.Environ()...) vtctld.proc.Env = append(vtctld.proc.Env, DefaultVttestEnv) - log.Infof("Starting vtctld with command: %v", strings.Join(vtctld.proc.Args, " ")) + log.Info(fmt.Sprintf("Starting vtctld with command: %v", strings.Join(vtctld.proc.Args, " "))) err = vtctld.proc.Start() if err != nil { @@ -118,9 +118,9 @@ func (vtctld *VtctldProcess) Setup(cell string, extraArgs ...string) (err error) case err := <-vtctld.exit: errBytes, ferr := os.ReadFile(vtctld.ErrorLog) if ferr == nil { - log.Errorf("vtctld error log contents:\n%s", string(errBytes)) + log.Error("vtctld error log contents:\n" + string(errBytes)) } else { - log.Errorf("Failed to read the vtctld error log file %q: %v", vtctld.ErrorLog, ferr) + log.Error(fmt.Sprintf("Failed to read the vtctld error log file %q: %v", vtctld.ErrorLog, ferr)) } return fmt.Errorf("process '%s' exited prematurely (err: %s)", vtctld.Name, err) default: diff --git a/go/test/endtoend/cluster/vtctldclient_process.go b/go/test/endtoend/cluster/vtctldclient_process.go index f21c91dfe6d..35675c36bbf 100644 --- a/go/test/endtoend/cluster/vtctldclient_process.go +++ b/go/test/endtoend/cluster/vtctldclient_process.go @@ -51,7 +51,7 @@ type VtctldClientProcess struct { func VtctldClientProcessInstance(grpcPort int, topoPort int, hostname string, tmpDirectory string) *VtctldClientProcess { version, err := GetMajorVersion("vtctld") // `vtctldclient` does not have a --version flag, so we assume both vtctl/vtctldclient have the same version if err != nil { - log.Warningf("failed to get major vtctldclient version; interop with CLI changes for VEP-4 may not work: %s", err) + log.Warn(fmt.Sprintf("failed to get major vtctldclient version; interop with CLI changes for VEP-4 may not work: %s", err)) } base := VtProcessInstance("vtctldclient", "vtctldclient", topoPort, hostname) @@ -69,7 +69,7 @@ func (vtctldclient *VtctldClientProcess) ExecuteCommand(args ...string) (err err output, err := vtctldclient.ExecuteCommandWithOutput(args...) if output != "" { if err != nil { - log.Errorf("Output:\n%v", output) + log.Error(fmt.Sprintf("Output:\n%v", output)) } } return err @@ -104,7 +104,7 @@ func (vtctldclient *VtctldClientProcess) ExecuteCommandWithOutput(args ...string ) msg := binlogplayer.LimitString(strings.Join(tmpProcess.Args, " "), 256) // limit log line length if !vtctldclient.Quiet { - log.Infof("Executing vtctldclient with command: %v (attempt %d of %d)", msg, i+1, retries) + log.Info(fmt.Sprintf("Executing vtctldclient with command: %v (attempt %d of %d)", msg, i+1, retries)) } resultByte, err = tmpProcess.CombinedOutput() resultStr = string(resultByte) @@ -229,7 +229,7 @@ func (vtctldclient *VtctldClientProcess) PlannedReparentShard(Keyspace string, S "--wait-replicas-timeout", "30s", ) if err != nil { - log.Errorf("error in PlannedReparentShard output %s, err %s", output, err.Error()) + log.Error(fmt.Sprintf("error in PlannedReparentShard output %s, err %s", output, err.Error())) } return err } @@ -242,7 +242,7 @@ func (vtctldclient *VtctldClientProcess) InitializeShard(keyspace string, shard "--wait-replicas-timeout", "31s", "--new-primary", fmt.Sprintf("%s-%d", cell, uid)) if err != nil { - log.Errorf("error in PlannedReparentShard output %s, err %s", output, err.Error()) + log.Error(fmt.Sprintf("error in PlannedReparentShard output %s, err %s", output, err.Error())) } return err } @@ -255,7 +255,7 @@ func (vtctldclient *VtctldClientProcess) InitShardPrimary(keyspace string, shard fmt.Sprintf("%s/%s", keyspace, shard), fmt.Sprintf("%s-%d", cell, uid)) if err != nil { - log.Errorf("error in InitShardPrimary output %s, err %s", output, err.Error()) + log.Error(fmt.Sprintf("error in InitShardPrimary output %s, err %s", output, err.Error())) } return err } @@ -272,7 +272,7 @@ func (vtctldclient *VtctldClientProcess) CreateKeyspace(keyspaceName string, sid } output, err = vtctldclient.ExecuteCommandWithOutput(args...) if err != nil { - log.Errorf("CreateKeyspace returned err: %s, output: %s", err, output) + log.Error(fmt.Sprintf("CreateKeyspace returned err: %s, output: %s", err, output)) } return err } diff --git a/go/test/endtoend/cluster/vtgate_process.go b/go/test/endtoend/cluster/vtgate_process.go index bbfbbab61b7..298c2c9c893 100644 --- a/go/test/endtoend/cluster/vtgate_process.go +++ b/go/test/endtoend/cluster/vtgate_process.go @@ -183,7 +183,7 @@ func (vtgate *VtgateProcess) Setup() (err error) { } configFile, err := os.Create(vtgate.ConfigFile) if err != nil { - log.Errorf("cannot create config file for vtgate: %v", err) + log.Error(fmt.Sprintf("cannot create config file for vtgate: %v", err)) return err } _, err = configFile.WriteString(vtgate.Config.ToJSONString()) @@ -225,7 +225,7 @@ func (vtgate *VtgateProcess) Setup() (err error) { errFile, err := os.Create(path.Join(vtgate.LogDir, "vtgate-stderr.txt")) if err != nil { - log.Errorf("cannot create error log file for vtgate: %v", err) + log.Error(fmt.Sprintf("cannot create error log file for vtgate: %v", err)) return err } vtgate.proc.Stderr = errFile @@ -234,7 +234,7 @@ func (vtgate *VtgateProcess) Setup() (err error) { vtgate.proc.Env = append(vtgate.proc.Env, os.Environ()...) vtgate.proc.Env = append(vtgate.proc.Env, DefaultVttestEnv) - log.Infof("Running vtgate with command: %v", strings.Join(vtgate.proc.Args, " ")) + log.Info(fmt.Sprintf("Running vtgate with command: %v", strings.Join(vtgate.proc.Args, " "))) err = vtgate.proc.Start() if err != nil { @@ -257,9 +257,9 @@ func (vtgate *VtgateProcess) Setup() (err error) { case err := <-vtgate.exit: errBytes, ferr := os.ReadFile(vtgate.ErrorLog) if ferr == nil { - log.Errorf("vtgate error log contents:\n%s", string(errBytes)) + log.Error("vtgate error log contents:\n" + string(errBytes)) } else { - log.Errorf("Failed to read the vtgate error log file %q: %v", vtgate.ErrorLog, ferr) + log.Error(fmt.Sprintf("Failed to read the vtgate error log file %q: %v", vtgate.ErrorLog, ferr)) } return fmt.Errorf("process '%s' exited prematurely (err: %s)", vtgate.Name, err) default: @@ -314,8 +314,7 @@ func (vtgate *VtgateProcess) GetStatusForTabletOfShard(name string, endPointsCou // WaitForStatusOfTabletInShard function waits till status of a tablet in shard is 1 // endPointsCount: how many endpoints to wait for func (vtgate *VtgateProcess) WaitForStatusOfTabletInShard(name string, endPointsCount int, timeout time.Duration) error { - log.Infof("Waiting for healthy status of %d %s tablets in cell %s", - endPointsCount, name, vtgate.Cell) + log.Info(fmt.Sprintf("Waiting for healthy status of %d %s tablets in cell %s", endPointsCount, name, vtgate.Cell)) deadline := time.Now().Add(timeout) for time.Now().Before(deadline) { if vtgate.GetStatusForTabletOfShard(name, endPointsCount) { diff --git a/go/test/endtoend/cluster/vtorc_process.go b/go/test/endtoend/cluster/vtorc_process.go index 1e74fab634b..3917859bf18 100644 --- a/go/test/endtoend/cluster/vtorc_process.go +++ b/go/test/endtoend/cluster/vtorc_process.go @@ -96,12 +96,12 @@ func (orc *VTOrcProcess) Setup() (err error) { timeNow := time.Now().UnixNano() err = os.MkdirAll(orc.LogDir, 0o755) if err != nil { - log.Errorf("cannot create log directory for vtorc: %v", err) + log.Error(fmt.Sprintf("cannot create log directory for vtorc: %v", err)) return err } configFile, err := os.Create(path.Join(orc.LogDir, fmt.Sprintf("orc-config-%d.json", timeNow))) if err != nil { - log.Errorf("cannot create config file for vtorc: %v", err) + log.Error(fmt.Sprintf("cannot create config file for vtorc: %v", err)) return err } orc.ConfigPath = configFile.Name() @@ -110,7 +110,7 @@ func (orc *VTOrcProcess) Setup() (err error) { if !orc.NoOverride { orc.Config.addValuesToCheckOverride() } - log.Errorf("configuration - %v", orc.Config.ToJSONString()) + log.Error(fmt.Sprintf("configuration - %v", orc.Config.ToJSONString())) _, err = configFile.WriteString(orc.Config.ToJSONString()) if err != nil { return err @@ -164,7 +164,7 @@ func (orc *VTOrcProcess) Setup() (err error) { } errFile, err := os.Create(path.Join(orc.LogDir, orc.LogFileName)) if err != nil { - log.Errorf("cannot create error log file for vtorc: %v", err) + log.Error(fmt.Sprintf("cannot create error log file for vtorc: %v", err)) return err } orc.proc.Stderr = errFile @@ -172,7 +172,7 @@ func (orc *VTOrcProcess) Setup() (err error) { orc.proc.Env = append(orc.proc.Env, os.Environ()...) orc.proc.Env = append(orc.proc.Env, DefaultVttestEnv) - log.Infof("Running vtorc with command: %v", strings.Join(orc.proc.Args, " ")) + log.Info(fmt.Sprintf("Running vtorc with command: %v", strings.Join(orc.proc.Args, " "))) err = orc.proc.Start() if err != nil { diff --git a/go/test/endtoend/cluster/vttablet_process.go b/go/test/endtoend/cluster/vttablet_process.go index 0c5c4c336b1..5f8815019d1 100644 --- a/go/test/endtoend/cluster/vttablet_process.go +++ b/go/test/endtoend/cluster/vttablet_process.go @@ -149,7 +149,7 @@ func (vttablet *VttabletProcess) Setup() (err error) { vttablet.proc.Env = append(vttablet.proc.Env, os.Environ()...) vttablet.proc.Env = append(vttablet.proc.Env, DefaultVttestEnv) - log.Infof("Running vttablet with command: %v", strings.Join(vttablet.proc.Args, " ")) + log.Info(fmt.Sprintf("Running vttablet with command: %v", strings.Join(vttablet.proc.Args, " "))) err = vttablet.proc.Start() if err != nil { @@ -178,7 +178,7 @@ func (vttablet *VttabletProcess) Setup() (err error) { if err = vttablet.WaitForTabletStatuses(servingStatus); err != nil { errFileContent, _ := os.ReadFile(fname) if errFileContent != nil { - log.Infof("vttablet error:\n%s\n", string(errFileContent)) + log.Info(fmt.Sprintf("vttablet error:\n%s\n", string(errFileContent))) } return fmt.Errorf("process '%s' timed out after 10s (err: %s)", vttablet.Name, err) } @@ -325,9 +325,9 @@ func (vttablet *VttabletProcess) WaitForTabletStatusesForTimeout(expectedStatuse case err := <-vttablet.exit: errBytes, ferr := os.ReadFile(vttablet.ErrorLog) if ferr == nil { - log.Errorf("vttablet error log contents:\n%s", string(errBytes)) + log.Error("vttablet error log contents:\n" + string(errBytes)) } else { - log.Errorf("Failed to read the vttablet error log file %q: %v", vttablet.ErrorLog, ferr) + log.Error(fmt.Sprintf("Failed to read the vttablet error log file %q: %v", vttablet.ErrorLog, ferr)) } return fmt.Errorf("process '%s' exited prematurely (err: %s)", vttablet.Name, err) default: @@ -519,7 +519,7 @@ func (vttablet *VttabletProcess) QueryTabletMultiple(queries []string, keyspace defer conn.Close() for _, query := range queries { - log.Infof("Executing query %s (on %s)", query, vttablet.Name) + log.Info(fmt.Sprintf("Executing query %s (on %s)", query, vttablet.Name)) _, err := executeQuery(conn, query) if err != nil { return err @@ -586,7 +586,7 @@ func executeQuery(dbConn *mysql.Conn, query string) (*sqltypes.Result, error) { for i := range retries { if i > 0 { // We only audit from 2nd attempt and onwards, otherwise this is just too verbose. - log.Infof("Executing query %s (attempt %d of %d)", query, (i + 1), retries) + log.Info(fmt.Sprintf("Executing query %s (attempt %d of %d)", query, (i + 1), retries)) } result, err = dbConn.ExecuteFetch(query, 10000, true) if err == nil { @@ -606,7 +606,7 @@ func executeMultiQuery(dbConn *mysql.Conn, query string) (err error) { for i := range retries { if i > 0 { // We only audit from 2nd attempt and onwards, otherwise this is just too verbose. - log.Infof("Executing query %s (attempt %d of %d)", query, (i + 1), retries) + log.Info(fmt.Sprintf("Executing query %s (attempt %d of %d)", query, (i + 1), retries)) } err = dbConn.ExecuteFetchMultiDrain(query) if err == nil { @@ -667,7 +667,7 @@ func (vttablet *VttabletProcess) WaitForVReplicationToCatchup(t testing.TB, work for ind, query := range queries { waitDuration := 500 * time.Millisecond for duration > 0 { - log.Infof("Executing query %s on %s", query, vttablet.TabletPath) + log.Info(fmt.Sprintf("Executing query %s on %s", query, vttablet.TabletPath)) lastChecked = time.Now() qr, err := executeQuery(conn, query) if err != nil { @@ -676,7 +676,7 @@ func (vttablet *VttabletProcess) WaitForVReplicationToCatchup(t testing.TB, work if qr != nil && qr.Rows != nil && len(qr.Rows) > 0 && fmt.Sprintf("%v", qr.Rows[0]) == string(results[ind]) { break } else { - log.Infof("In WaitForVReplicationToCatchup: %s %+v", query, qr.Rows) + log.Info(fmt.Sprintf("In WaitForVReplicationToCatchup: %s %+v", query, qr.Rows)) } time.Sleep(waitDuration) duration -= waitDuration @@ -685,7 +685,7 @@ func (vttablet *VttabletProcess) WaitForVReplicationToCatchup(t testing.TB, work t.Fatalf("WaitForVReplicationToCatchup timed out for workflow %s, keyspace %s", workflow, database) } } - log.Infof("WaitForVReplicationToCatchup succeeded at %v", lastChecked) + log.Info(fmt.Sprintf("WaitForVReplicationToCatchup succeeded at %v", lastChecked)) } // BulkLoad performs a bulk load of rows into a given vttablet. @@ -696,7 +696,7 @@ func (vttablet *VttabletProcess) BulkLoad(t testing.TB, db, table string, bulkIn } defer os.Remove(tmpbulk.Name()) - log.Infof("create temporary file for bulk loading %q", tmpbulk.Name()) + log.Info(fmt.Sprintf("create temporary file for bulk loading %q", tmpbulk.Name())) bufStart := time.Now() bulkBuffer := bufio.NewWriter(tmpbulk) @@ -705,7 +705,7 @@ func (vttablet *VttabletProcess) BulkLoad(t testing.TB, db, table string, bulkIn pos, _ := tmpbulk.Seek(0, 1) bufFinish := time.Now() - log.Infof("bulk loading %d bytes from %q...", pos, tmpbulk.Name()) + log.Info(fmt.Sprintf("bulk loading %d bytes from %q...", pos, tmpbulk.Name())) if err := tmpbulk.Close(); err != nil { t.Fatal(err) @@ -724,8 +724,7 @@ func (vttablet *VttabletProcess) BulkLoad(t testing.TB, db, table string, bulkIn } end := time.Now() - log.Infof("bulk insert successful (write tmp file = %v, mysql bulk load = %v, total = %v", - bufFinish.Sub(bufStart), end.Sub(bufFinish), end.Sub(bufStart)) + log.Info(fmt.Sprintf("bulk insert successful (write tmp file = %v, mysql bulk load = %v, total = %v", bufFinish.Sub(bufStart), end.Sub(bufFinish), end.Sub(bufStart))) } // IsShutdown returns whether a vttablet is shutdown or not diff --git a/go/test/endtoend/clustertest/add_keyspace_test.go b/go/test/endtoend/clustertest/add_keyspace_test.go index ed85e481d10..d2fac993e41 100644 --- a/go/test/endtoend/clustertest/add_keyspace_test.go +++ b/go/test/endtoend/clustertest/add_keyspace_test.go @@ -58,7 +58,7 @@ primary key (id) func TestAddKeyspace(t *testing.T) { cell := clusterInstance.Cell if err := clusterInstance.StartKeyspace(*testKeyspace, []string{"-80", "80-"}, 0, false, cell); err != nil { - log.Errorf("failed to AddKeyspace %v: %v", *testKeyspace, err) + log.Error(fmt.Sprintf("failed to AddKeyspace %v: %v", *testKeyspace, err)) t.Fatal(err) } // Restart vtgate process diff --git a/go/test/endtoend/clustertest/vtctld_test.go b/go/test/endtoend/clustertest/vtctld_test.go index 030fb5cda38..8e78695b5ba 100644 --- a/go/test/endtoend/clustertest/vtctld_test.go +++ b/go/test/endtoend/clustertest/vtctld_test.go @@ -119,7 +119,7 @@ func testTabletStatus(t *testing.T) { respByte, err := io.ReadAll(resp.Body) require.NoError(t, err) result := string(respByte) - log.Infof("Tablet status response: %v", result) + log.Info(fmt.Sprintf("Tablet status response: %v", result)) assert.True(t, strings.Contains(result, `/debug/health`)) assert.True(t, strings.Contains(result, ``)) } diff --git a/go/test/endtoend/docker/vttestserver.go b/go/test/endtoend/docker/vttestserver.go index b0285aa34ad..47ea1fa09e4 100644 --- a/go/test/endtoend/docker/vttestserver.go +++ b/go/test/endtoend/docker/vttestserver.go @@ -57,7 +57,7 @@ func (v *vttestserver) teardown() { cmd := exec.Command("docker", "rm", "--force", "vttestserver-end2end-test") err := cmd.Run() if err != nil { - log.Errorf("docker teardown failed :- %s", err.Error()) + log.Error("docker teardown failed :- " + err.Error()) } } diff --git a/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go b/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go index f0e598d39f7..b845168e727 100644 --- a/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go +++ b/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go @@ -397,7 +397,7 @@ func clusterSetUp(t *testing.T) (int, error) { } func createIntermediateCA(ca string, serial string, name string, commonName string) error { - log.Infof("Creating intermediate signed cert and key %s", commonName) + log.Info("Creating intermediate signed cert and key " + commonName) tmpProcess := exec.Command( "vttlstest", "CreateIntermediateCA", @@ -410,7 +410,7 @@ func createIntermediateCA(ca string, serial string, name string, commonName stri } func createSignedCert(ca string, serial string, name string, commonName string) error { - log.Infof("Creating signed cert and key %s", commonName) + log.Info("Creating signed cert and key " + commonName) tmpProcess := exec.Command( "vttlstest", "CreateSignedCert", diff --git a/go/test/endtoend/messaging/message_test.go b/go/test/endtoend/messaging/message_test.go index 10ba6f82925..a59df4e9928 100644 --- a/go/test/endtoend/messaging/message_test.go +++ b/go/test/endtoend/messaging/message_test.go @@ -596,7 +596,7 @@ func (stream *VTGateStream) MessageStream(ks, shard string, keyRange *topodatapb for { qr, err := resultStream.Recv() if err != nil { - log.Infof("Message stream ended: %v", err) + log.Info(fmt.Sprintf("Message stream ended: %v", err)) return } diff --git a/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go b/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go index 748b5f80d64..d004be58479 100644 --- a/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go +++ b/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go @@ -519,7 +519,7 @@ func generateDelete(t *testing.T, conn *mysql.Conn) error { } func runSingleConnection(ctx context.Context, t *testing.T, sleepInterval time.Duration) { - log.Infof("Running single connection") + log.Info("Running single connection") conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) defer conn.Close() @@ -545,7 +545,7 @@ func runSingleConnection(ctx context.Context, t *testing.T, sleepInterval time.D } select { case <-ctx.Done(): - log.Infof("Terminating single connection") + log.Info("Terminating single connection") return case <-ticker.C: } @@ -565,7 +565,7 @@ func runMultipleConnections(ctx context.Context, t *testing.T) { singleConnectionSleepIntervalNanoseconds := float64(baseSleepInterval.Nanoseconds()) * sleepModifier sleepInterval := time.Duration(int64(singleConnectionSleepIntervalNanoseconds)) - log.Infof("Running multiple connections: maxConcurrency=%v, sleep interval=%v", maxConcurrency, sleepInterval) + log.Info(fmt.Sprintf("Running multiple connections: maxConcurrency=%v, sleep interval=%v", maxConcurrency, sleepInterval)) var wg sync.WaitGroup for range maxConcurrency { wg.Go(func() { @@ -573,12 +573,12 @@ func runMultipleConnections(ctx context.Context, t *testing.T) { }) } wg.Wait() - log.Infof("Running multiple connections: done") + log.Info("Running multiple connections: done") } func initTable(t *testing.T) { - log.Infof("initTable begin") - defer log.Infof("initTable complete") + log.Info("initTable begin") + defer log.Info("initTable complete") ctx := t.Context() conn, err := mysql.Connect(ctx, &vtParams) diff --git a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go index 6bdcdb64828..49d2e229c37 100644 --- a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go +++ b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go @@ -1358,7 +1358,7 @@ func generateDelete(t *testing.T, conn *mysql.Conn) error { } func runSingleConnection(ctx context.Context, t *testing.T, done *int64) { - log.Infof("Running single connection") + log.Info("Running single connection") conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) defer conn.Close() @@ -1370,7 +1370,7 @@ func runSingleConnection(ctx context.Context, t *testing.T, done *int64) { for { if atomic.LoadInt64(done) == 1 { - log.Infof("Terminating single connection") + log.Info("Terminating single connection") return } switch rand.Int32N(3) { @@ -1387,7 +1387,7 @@ func runSingleConnection(ctx context.Context, t *testing.T, done *int64) { } func runMultipleConnections(ctx context.Context, t *testing.T) { - log.Infof("Running multiple connections") + log.Info("Running multiple connections") require.True(t, checkTable(t, tableName, true)) var done int64 @@ -1399,14 +1399,14 @@ func runMultipleConnections(ctx context.Context, t *testing.T) { } <-ctx.Done() atomic.StoreInt64(&done, 1) - log.Infof("Running multiple connections: done") + log.Info("Running multiple connections: done") wg.Wait() - log.Infof("All connections cancelled") + log.Info("All connections cancelled") } func initTable(t *testing.T) { - log.Infof("initTable begin") - defer log.Infof("initTable complete") + log.Info("initTable begin") + defer log.Info("initTable complete") ctx := t.Context() conn, err := mysql.Connect(ctx, &vtParams) @@ -1432,7 +1432,7 @@ func testSelectTableMetrics(t *testing.T) { writeMetrics.mu.Lock() defer writeMetrics.mu.Unlock() - log.Infof("%s", writeMetrics.String()) + log.Info(writeMetrics.String()) ctx := t.Context() conn, err := mysql.Connect(ctx, &vtParams) @@ -1444,7 +1444,7 @@ func testSelectTableMetrics(t *testing.T) { row := rs.Named().Row() require.NotNil(t, row) - log.Infof("testSelectTableMetrics, row: %v", row) + log.Info(fmt.Sprintf("testSelectTableMetrics, row: %v", row)) numRows := row.AsInt64("num_rows", 0) sumUpdates := row.AsInt64("sum_updates", 0) diff --git a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go index 598202a7fc4..e210de17ce7 100644 --- a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go +++ b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go @@ -2342,8 +2342,8 @@ func testDeclarative(t *testing.T) { } initTable := func(t *testing.T) { - log.Infof("initTable begin") - defer log.Infof("initTable complete") + log.Info("initTable begin") + defer log.Info("initTable complete") ctx := t.Context() conn, err := mysql.Connect(ctx, &vtParams) @@ -2369,7 +2369,7 @@ func testDeclarative(t *testing.T) { writeMetrics.mu.Lock() defer writeMetrics.mu.Unlock() - log.Infof("%s", writeMetrics.String()) + log.Info(writeMetrics.String()) ctx := t.Context() conn, err := mysql.Connect(ctx, &vtParams) @@ -2381,7 +2381,7 @@ func testDeclarative(t *testing.T) { row := rs.Named().Row() require.NotNil(t, row) - log.Infof("testSelectTableMetrics, row: %v", row) + log.Info(fmt.Sprintf("testSelectTableMetrics, row: %v", row)) numRows := row.AsInt64("num_rows", 0) sumUpdates := row.AsInt64("sum_updates", 0) diff --git a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go index 9368d08f3c2..23ba0047cda 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go @@ -487,7 +487,7 @@ func generateDelete(t *testing.T, conn *mysql.Conn) error { } func runSingleConnection(ctx context.Context, t *testing.T, sleepInterval time.Duration) { - log.Infof("Running single connection") + log.Info("Running single connection") conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) defer conn.Close() @@ -511,7 +511,7 @@ func runSingleConnection(ctx context.Context, t *testing.T, sleepInterval time.D } select { case <-ctx.Done(): - log.Infof("Terminating single connection") + log.Info("Terminating single connection") return case <-ticker.C: } @@ -531,7 +531,7 @@ func runMultipleConnections(ctx context.Context, t *testing.T) { singleConnectionSleepIntervalNanoseconds := float64(baseSleepInterval.Nanoseconds()) * sleepModifier sleepInterval := time.Duration(int64(singleConnectionSleepIntervalNanoseconds)) - log.Infof("Running multiple connections: maxConcurrency=%v, sleep interval=%v", maxConcurrency, sleepInterval) + log.Info(fmt.Sprintf("Running multiple connections: maxConcurrency=%v, sleep interval=%v", maxConcurrency, sleepInterval)) var wg sync.WaitGroup for range maxConcurrency { wg.Go(func() { @@ -539,12 +539,12 @@ func runMultipleConnections(ctx context.Context, t *testing.T) { }) } wg.Wait() - log.Infof("Running multiple connections: done") + log.Info("Running multiple connections: done") } func initTable(t *testing.T) { - log.Infof("initTable begin") - defer log.Infof("initTable complete") + log.Info("initTable begin") + defer log.Info("initTable complete") t.Run("cancel pending migrations", func(t *testing.T) { cancelQuery := "alter vitess_migration cancel all" @@ -588,7 +588,7 @@ func testSelectTableMetrics(t *testing.T) { fmt.Printf("# max op_order in table: %d\n", maxOpOrder) } - log.Infof("%s", writeMetrics.String()) + log.Info(writeMetrics.String()) ctx := t.Context() conn, err := mysql.Connect(ctx, &vtParams) @@ -600,7 +600,7 @@ func testSelectTableMetrics(t *testing.T) { row := rs.Named().Row() require.NotNil(t, row) - log.Infof("testSelectTableMetrics, row: %v", row) + log.Info(fmt.Sprintf("testSelectTableMetrics, row: %v", row)) numRows := row.AsInt64("num_rows", 0) sumUpdates := row.AsInt64("sum_updates", 0) assert.NotZero(t, numRows) diff --git a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go index e0df2f68309..647d99369e0 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go @@ -673,7 +673,7 @@ func generateDelete(t *testing.T, conn *mysql.Conn) error { } func runSingleConnection(ctx context.Context, t *testing.T, autoIncInsert bool, done *int64) { - log.Infof("Running single connection") + log.Info("Running single connection") conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) defer conn.Close() @@ -689,7 +689,7 @@ func runSingleConnection(ctx context.Context, t *testing.T, autoIncInsert bool, defer periodicRest.Stop() for { if atomic.LoadInt64(done) == 1 { - log.Infof("Terminating single connection") + log.Info("Terminating single connection") return } switch rand.Int32N(3) { @@ -730,7 +730,7 @@ func runSingleConnection(ctx context.Context, t *testing.T, autoIncInsert bool, } func runMultipleConnections(ctx context.Context, t *testing.T, autoIncInsert bool) { - log.Infof("Running multiple connections") + log.Info("Running multiple connections") var done int64 var wg sync.WaitGroup for range maxConcurrency { @@ -740,14 +740,14 @@ func runMultipleConnections(ctx context.Context, t *testing.T, autoIncInsert boo } <-ctx.Done() atomic.StoreInt64(&done, 1) - log.Infof("Running multiple connections: done") + log.Info("Running multiple connections: done") wg.Wait() - log.Infof("All connections cancelled") + log.Info("All connections cancelled") } func initTable(t *testing.T) { - log.Infof("initTable begin") - defer log.Infof("initTable complete") + log.Info("initTable begin") + defer log.Info("initTable complete") ctx := t.Context() conn, err := mysql.Connect(ctx, &vtParams) diff --git a/go/test/endtoend/reparent/emergencyreparent/ers_test.go b/go/test/endtoend/reparent/emergencyreparent/ers_test.go index 98972d24b56..588a0e84d4e 100644 --- a/go/test/endtoend/reparent/emergencyreparent/ers_test.go +++ b/go/test/endtoend/reparent/emergencyreparent/ers_test.go @@ -18,6 +18,7 @@ package emergencyreparent import ( "context" + "fmt" "os/exec" "sync" "testing" @@ -46,14 +47,14 @@ func TestTrivialERS(t *testing.T) { // is down, without issue for i := 1; i <= 4; i++ { out, err := utils.Ers(clusterInstance, nil, "60s", "30s") - log.Infof("ERS loop %d. EmergencyReparentShard Output: %v", i, out) + log.Info(fmt.Sprintf("ERS loop %d. EmergencyReparentShard Output: %v", i, out)) require.NoError(t, err) time.Sleep(5 * time.Second) } // We should do the same for vtctl binary for i := 1; i <= 4; i++ { out, err := utils.ErsWithVtctldClient(clusterInstance) - log.Infof("ERS-vtctldclient loop %d. EmergencyReparentShard Output: %v", i, out) + log.Info(fmt.Sprintf("ERS-vtctldclient loop %d. EmergencyReparentShard Output: %v", i, out)) require.NoError(t, err) time.Sleep(5 * time.Second) } @@ -119,7 +120,7 @@ func TestReparentDownPrimary(t *testing.T) { // Run forced reparent operation, this should now proceed unimpeded. out, err := utils.Ers(clusterInstance, tablets[1], "60s", "30s") - log.Infof("EmergencyReparentShard Output: %v", out) + log.Info(fmt.Sprintf("EmergencyReparentShard Output: %v", out)) require.NoError(t, err) // Check that old primary tablet is left around for human intervention. @@ -538,7 +539,7 @@ func TestERSForInitialization(t *testing.T) { var mysqlCtlProcessList []*exec.Cmd for _, shard := range clusterInstance.Keyspaces[0].Shards { for _, tablet := range shard.Vttablets { - log.Infof("Starting MySql for tablet %v", tablet.Alias) + log.Info(fmt.Sprintf("Starting MySql for tablet %v", tablet.Alias)) proc, err := tablet.MysqlctlProcess.StartProcess() require.NoError(t, err) mysqlCtlProcessList = append(mysqlCtlProcessList, proc) diff --git a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go index 61cf8a94993..7078eb6370e 100644 --- a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go +++ b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go @@ -281,7 +281,7 @@ func TestSemiSyncBlockDueToDisruption(t *testing.T) { // runCommandWithSudo(t, "pfctl", "-e") runCommandWithSudo(t, "pfctl", "-f", "/etc/pf.conf") rules := runCommandWithSudo(t, "pfctl", "-s", "rules") - log.Errorf("Rules enforced - %v", rules) + log.Error(fmt.Sprintf("Rules enforced - %v", rules)) // Start a write that will be blocked by the primary waiting for semi-sync ACKs ch := make(chan any) @@ -304,7 +304,7 @@ func TestSemiSyncBlockDueToDisruption(t *testing.T) { case <-time.After(1 * time.Second): str, isPresent := tablets[0].VttabletProcess.GetVars()["SemiSyncMonitorWritesBlocked"] if isPresent { - log.Errorf("SemiSyncMonitorWritesBlocked - %v", str) + log.Error(fmt.Sprintf("SemiSyncMonitorWritesBlocked - %v", str)) } } } @@ -322,7 +322,7 @@ func TestSemiSyncBlockDueToDisruption(t *testing.T) { case <-time.After(30 * time.Second): t.Errorf("Timed out waiting for semi-sync to be unblocked") case <-ch: - log.Errorf("Woohoo, write finished!") + log.Error("Woohoo, write finished!") } } diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_test.go index be17e1ed440..d50274a33c2 100644 --- a/go/test/endtoend/reparent/plannedreparent/reparent_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_test.go @@ -237,7 +237,7 @@ func TestReparentFromOutsideWithNoPrimary(t *testing.T) { // FIXME: @Deepthi: is this needed, since we teardown the cluster, does this achieve any additional test coverage? // We will have to restart mysql to avoid hanging/locks due to external Reparent for _, tablet := range tablets { - log.Infof("Restarting MySql for tablet %v", tablet.Alias) + log.Info(fmt.Sprintf("Restarting MySql for tablet %v", tablet.Alias)) err := tablet.MysqlctlProcess.Stop() require.NoError(t, err) tablet.MysqlctlProcess.InitMysql = false diff --git a/go/test/endtoend/reparent/utils/utils.go b/go/test/endtoend/reparent/utils/utils.go index 3b5a1be3c1f..6601160cbd2 100644 --- a/go/test/endtoend/reparent/utils/utils.go +++ b/go/test/endtoend/reparent/utils/utils.go @@ -151,7 +151,7 @@ func TeardownCluster(clusterInstance *cluster.LocalProcessCluster) { // We're running in the CI, so free up disk space for any // subsequent tests. if err := os.RemoveAll(usedRoot); err != nil { - log.Errorf("Failed to remove previously used VTDATAROOT (%s): %v", usedRoot, err) + log.Error(fmt.Sprintf("Failed to remove previously used VTDATAROOT (%s): %v", usedRoot, err)) } } @@ -207,7 +207,7 @@ func setupCluster(ctx context.Context, t *testing.T, shardName string, cells []s var mysqlCtlProcessList []*exec.Cmd for _, shard := range clusterInstance.Keyspaces[0].Shards { for _, tablet := range shard.Vttablets { - log.Infof("Starting MySql for tablet %v", tablet.Alias) + log.Info(fmt.Sprintf("Starting MySql for tablet %v", tablet.Alias)) proc, err := tablet.MysqlctlProcess.StartProcess() require.NoError(t, err, "Error starting start mysql") mysqlCtlProcessList = append(mysqlCtlProcessList, proc) @@ -291,7 +291,7 @@ func StartNewVTTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster clusterInstance.DefaultCharset) tablet.VttabletProcess.SupportsBackup = supportsBackup - log.Infof("Starting MySql for tablet %v", tablet.Alias) + log.Info(fmt.Sprintf("Starting MySql for tablet %v", tablet.Alias)) proc, err := tablet.MysqlctlProcess.StartProcess() require.NoError(t, err, "Error starting start mysql") if err := proc.Wait(); err != nil { @@ -644,9 +644,9 @@ func GetShardReplicationPositions(t *testing.T, clusterInstance *cluster.LocalPr strArray = strArray[:len(strArray)-1] // Truncate slice, remove empty line } if doPrint { - log.Infof("Positions:") + log.Info("Positions:") for _, pos := range strArray { - log.Infof("\t%s", pos) + log.Info("\t" + pos) } } return strArray diff --git a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go index cefcc1644eb..41d009e6c64 100644 --- a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go +++ b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go @@ -91,21 +91,21 @@ func (c *threadParams) threadRun(wg *sync.WaitGroup, vtParams *mysql.ConnParams) conn, err := mysql.Connect(context.Background(), vtParams) if err != nil { - log.Errorf("error connecting to mysql with params %v: %v", vtParams, err) + log.Error(fmt.Sprintf("error connecting to mysql with params %v: %v", vtParams, err)) } defer conn.Close() if c.reservedConn { _, err = conn.ExecuteFetch("set default_week_format = 1", 1000, true) if err != nil { c.errors = append(c.errors, err) - log.Errorf("error setting default_week_format: %v", err) + log.Error(fmt.Sprintf("error setting default_week_format: %v", err)) } } for !c.quit { err = c.executeFunction(c, conn) if err != nil { c.errors = append(c.errors, err) - log.Errorf("error executing function %s: %v", c.typ, err) + log.Error(fmt.Sprintf("error executing function %s: %v", c.typ, err)) } c.rpcs++ // If notifications are requested, check if we already executed the @@ -144,13 +144,13 @@ func readExecute(c *threadParams, conn *mysql.Conn) error { } qr, err := conn.ExecuteFetch(fmt.Sprintf("SELECT %s FROM buffer WHERE id = %d", sel, criticalReadRowID), 1000, true) if err != nil { - log.Errorf("select attempt #%d, failed with err: %v", attempt, err) + log.Error(fmt.Sprintf("select attempt #%d, failed with err: %v", attempt, err)) // For a reserved connection, read query can fail as it does not go through the gateway and // goes to tablet directly and later is directed to use Gateway if the error is caused due to cluster failover operation. if c.reservedConn { c.internalErrs++ if c.internalErrs > 1 { - log.Errorf("More Read Errors: %d", c.internalErrs) + log.Error(fmt.Sprintf("More Read Errors: %d", c.internalErrs)) return err } log.Error("This is okay once because we do not support buffering it.") @@ -159,7 +159,7 @@ func readExecute(c *threadParams, conn *mysql.Conn) error { return err } - log.Infof("select attempt #%d, rows: %d", attempt, len(qr.Rows)) + log.Info(fmt.Sprintf("select attempt #%d, rows: %d", attempt, len(qr.Rows))) return nil } @@ -181,31 +181,31 @@ func updateExecute(c *threadParams, conn *mysql.Conn) error { time.Sleep(dur) if err == nil { - log.Infof("update attempt #%d affected %v rows", attempt, result.RowsAffected) + log.Info(fmt.Sprintf("update attempt #%d affected %v rows", attempt, result.RowsAffected)) _, err = conn.ExecuteFetch("commit", 1000, true) if err != nil { - log.Errorf("UPDATE #%d failed during COMMIT, err: %v", attempt, err) + log.Error(fmt.Sprintf("UPDATE #%d failed during COMMIT, err: %v", attempt, err)) _, errRollback := conn.ExecuteFetch("rollback", 1000, true) if errRollback != nil { - log.Errorf("Error in rollback #%d: %v", attempt, errRollback) + log.Error(fmt.Sprintf("Error in rollback #%d: %v", attempt, errRollback)) } c.internalErrs++ if c.internalErrs > 1 { - log.Errorf("More Commit Errors: %d", c.internalErrs) + log.Error(fmt.Sprintf("More Commit Errors: %d", c.internalErrs)) return err } log.Error("This is okay once because we do not support buffering it.") } return nil } - log.Errorf("UPDATE #%d failed with err: %v", attempt, err) + log.Error(fmt.Sprintf("UPDATE #%d failed with err: %v", attempt, err)) _, errRollback := conn.ExecuteFetch("rollback", 1000, true) if errRollback != nil { - log.Errorf("Error in rollback #%d: %v", attempt, errRollback) + log.Error(fmt.Sprintf("Error in rollback #%d: %v", attempt, errRollback)) } c.internalErrs++ if c.internalErrs > 1 { - log.Errorf("More Rollback Errors: %d", c.internalErrs) + log.Error(fmt.Sprintf("More Rollback Errors: %d", c.internalErrs)) return err } log.Error("This is okay once because we do not support buffering it.") diff --git a/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go b/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go index 5cc27a552f7..0614c5cc3cd 100644 --- a/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go +++ b/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go @@ -58,7 +58,7 @@ func failoverExternalReparenting(t *testing.T, clusterInstance *cluster.LocalPro minUnavailabilityInS := 1.0 if duration.Seconds() < minUnavailabilityInS { w := minUnavailabilityInS - duration.Seconds() - log.Infof("Waiting for %.1f seconds because the failover was too fast (took only %.3f seconds)", w, duration.Seconds()) + log.Info(fmt.Sprintf("Waiting for %.1f seconds because the failover was too fast (took only %.3f seconds)", w, duration.Seconds())) time.Sleep(time.Duration(w) * time.Second) } diff --git a/go/test/endtoend/tabletgateway/buffer/reshard/sharded_buffer_test.go b/go/test/endtoend/tabletgateway/buffer/reshard/sharded_buffer_test.go index fa7d6c034ae..ede18476ca8 100644 --- a/go/test/endtoend/tabletgateway/buffer/reshard/sharded_buffer_test.go +++ b/go/test/endtoend/tabletgateway/buffer/reshard/sharded_buffer_test.go @@ -53,10 +53,10 @@ func waitForLowLag(t *testing.T, clusterInstance *cluster.LocalProcessCluster, k require.NoError(t, err, output) if lagSeconds <= acceptableLagSeconds { - log.Infof("waitForLowLag acceptable for workflow %s, keyspace %s, current lag is %d", workflow, keyspace, lagSeconds) + log.Info(fmt.Sprintf("waitForLowLag acceptable for workflow %s, keyspace %s, current lag is %d", workflow, keyspace, lagSeconds)) break } else { - log.Infof("waitForLowLag too high for workflow %s, keyspace %s, current lag is %d", workflow, keyspace, lagSeconds) + log.Info(fmt.Sprintf("waitForLowLag too high for workflow %s, keyspace %s, current lag is %d", workflow, keyspace, lagSeconds)) } time.Sleep(waitDuration) duration -= waitDuration diff --git a/go/test/endtoend/throttler/util.go b/go/test/endtoend/throttler/util.go index 145eed3f703..f1e22785008 100644 --- a/go/test/endtoend/throttler/util.go +++ b/go/test/endtoend/throttler/util.go @@ -355,14 +355,14 @@ func WaitForThrottlerStatusEnabled(t *testing.T, vtctldProcess *cluster.VtctldCl class := strings.ToLower(gjson.Get(tabletBody, "0.Class").String()) value := strings.ToLower(gjson.Get(tabletBody, "0.Value").String()) if class == "unhappy" && strings.Contains(value, "not serving") { - log.Infof("tablet %s is Not Serving, so ignoring throttler status as the throttler will not be Opened", tablet.Alias) + log.Info(fmt.Sprintf("tablet %s is Not Serving, so ignoring throttler status as the throttler will not be Opened", tablet.Alias)) return } status, err := GetThrottlerStatus(vtctldProcess, tablet) good := func() bool { if err != nil { - log.Errorf("GetThrottlerStatus failed: %v", err) + log.Error(fmt.Sprintf("GetThrottlerStatus failed: %v", err)) return false } if status.IsEnabled != enabled { @@ -425,7 +425,7 @@ func WaitForThrottledApp(t *testing.T, vtctldProcess *cluster.VtctldClientProces class := strings.ToLower(gjson.Get(tabletBody, "0.Class").String()) value := strings.ToLower(gjson.Get(tabletBody, "0.Value").String()) if class == "unhappy" && strings.Contains(value, "not serving") { - log.Infof("tablet %s is Not Serving, so ignoring throttler status as the throttler will not be Opened", tablet.Alias) + log.Info(fmt.Sprintf("tablet %s is Not Serving, so ignoring throttler status as the throttler will not be Opened", tablet.Alias)) return } select { @@ -478,12 +478,12 @@ func WaitForCheckThrottlerResult(t *testing.T, vtctldProcess *cluster.VtctldClie func getHTTPBody(url string) string { resp, err := http.Get(url) if err != nil { - log.Infof("http Get returns %+v", err) + log.Info(fmt.Sprintf("http Get returns %+v", err)) return "" } defer resp.Body.Close() if resp.StatusCode != 200 { - log.Infof("http Get returns status %d", resp.StatusCode) + log.Info(fmt.Sprintf("http Get returns status %d", resp.StatusCode)) return "" } respByte, _ := io.ReadAll(resp.Body) diff --git a/go/test/endtoend/topotest/consul/main_test.go b/go/test/endtoend/topotest/consul/main_test.go index 6b61aa4c633..b5f7e3e4270 100644 --- a/go/test/endtoend/topotest/consul/main_test.go +++ b/go/test/endtoend/topotest/consul/main_test.go @@ -81,13 +81,15 @@ func TestMain(m *testing.M) { VSchema: VSchema, } if err := clusterInstance.StartUnshardedKeyspace(*Keyspace, 0, false, clusterInstance.Cell); err != nil { - log.Fatal(err.Error()) + log.Error(err.Error()) + os.Exit(1) return 1 } // Start vtgate if err := clusterInstance.StartVtgate(); err != nil { - log.Fatal(err.Error()) + log.Error(err.Error()) + os.Exit(1) return 1 } diff --git a/go/test/endtoend/topotest/etcd2/main_test.go b/go/test/endtoend/topotest/etcd2/main_test.go index f17532e65f5..4807a2f3ee8 100644 --- a/go/test/endtoend/topotest/etcd2/main_test.go +++ b/go/test/endtoend/topotest/etcd2/main_test.go @@ -82,13 +82,15 @@ func TestMain(m *testing.M) { VSchema: VSchema, } if err := clusterInstance.StartUnshardedKeyspace(*Keyspace, 0, false, clusterInstance.Cell); err != nil { - log.Fatal(err.Error()) + log.Error(err.Error()) + os.Exit(1) return 1 } // Start vtgate if err := clusterInstance.StartVtgate(); err != nil { - log.Fatal(err.Error()) + log.Error(err.Error()) + os.Exit(1) return 1 } diff --git a/go/test/endtoend/topotest/zk2/main_test.go b/go/test/endtoend/topotest/zk2/main_test.go index 5f95b78c507..6f153f95981 100644 --- a/go/test/endtoend/topotest/zk2/main_test.go +++ b/go/test/endtoend/topotest/zk2/main_test.go @@ -81,13 +81,15 @@ func TestMain(m *testing.M) { VSchema: VSchema, } if err := clusterInstance.StartUnshardedKeyspace(*Keyspace, 0, false, clusterInstance.Cell); err != nil { - log.Fatal(err.Error()) + log.Error(err.Error()) + os.Exit(1) return 1 } // Start vtgate if err := clusterInstance.StartVtgate(); err != nil { - log.Fatal(err.Error()) + log.Error(err.Error()) + os.Exit(1) return 1 } diff --git a/go/test/endtoend/transaction/twopc/fuzz/fuzzer_test.go b/go/test/endtoend/transaction/twopc/fuzz/fuzzer_test.go index 69165c98e2a..1ee6e8cb518 100644 --- a/go/test/endtoend/transaction/twopc/fuzz/fuzzer_test.go +++ b/go/test/endtoend/transaction/twopc/fuzz/fuzzer_test.go @@ -150,7 +150,7 @@ func TestTwoPCFuzzTest(t *testing.T) { // Verify that all the transactions run were actually atomic and no data issues have occurred. fz.verifyTransactionsWereAtomic(t) - log.Errorf("Verification complete. All good!") + log.Error("Verification complete. All good!") }) } } @@ -451,10 +451,10 @@ func prs(t *testing.T) { shard := shards[rand.IntN(len(shards))] vttablets := shard.Vttablets newPrimary := vttablets[rand.IntN(len(vttablets))] - log.Errorf("Running PRS for - %v/%v with new primary - %v", keyspaceName, shard.Name, newPrimary.Alias) + log.Error(fmt.Sprintf("Running PRS for - %v/%v with new primary - %v", keyspaceName, shard.Name, newPrimary.Alias)) err := clusterInstance.VtctldClientProcess.PlannedReparentShard(keyspaceName, shard.Name, newPrimary.Alias) if err != nil { - log.Errorf("error running PRS - %v", err) + log.Error(fmt.Sprintf("error running PRS - %v", err)) } } @@ -463,10 +463,10 @@ func ers(t *testing.T) { shard := shards[rand.IntN(len(shards))] vttablets := shard.Vttablets newPrimary := vttablets[rand.IntN(len(vttablets))] - log.Errorf("Running ERS for - %v/%v with new primary - %v", keyspaceName, shard.Name, newPrimary.Alias) + log.Error(fmt.Sprintf("Running ERS for - %v/%v with new primary - %v", keyspaceName, shard.Name, newPrimary.Alias)) _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("EmergencyReparentShard", fmt.Sprintf("%s/%s", keyspaceName, shard.Name), "--new-primary", newPrimary.Alias) if err != nil { - log.Errorf("error running ERS - %v", err) + log.Error(fmt.Sprintf("error running ERS - %v", err)) } } @@ -475,10 +475,10 @@ func vttabletRestarts(t *testing.T) { shard := shards[rand.IntN(len(shards))] vttablets := shard.Vttablets tablet := vttablets[rand.IntN(len(vttablets))] - log.Errorf("Restarting vttablet for - %v/%v - %v", keyspaceName, shard.Name, tablet.Alias) + log.Error(fmt.Sprintf("Restarting vttablet for - %v/%v - %v", keyspaceName, shard.Name, tablet.Alias)) err := tablet.VttabletProcess.TearDown() if err != nil { - log.Errorf("error stopping vttablet - %v", err) + log.Error(fmt.Sprintf("error stopping vttablet - %v", err)) return } tablet.VttabletProcess.ServingStatus = "SERVING" @@ -489,7 +489,7 @@ func vttabletRestarts(t *testing.T) { } // Sometimes vttablets fail to connect to the topo server due to a minor blip there. // We don't want to fail the test, so we retry setting up the vttablet. - log.Errorf("error restarting vttablet - %v", err) + log.Error(fmt.Sprintf("error restarting vttablet - %v", err)) time.Sleep(1 * time.Second) } } @@ -533,12 +533,12 @@ func moveTablesFuzzer(t *testing.T) { err := clusterInstance.VtctldClientProcess.ApplyVSchema(keyspaceName, VSchema) require.NoError(t, err) } - log.Errorf("MoveTables from - %v to %v", srcKeyspace, targetKeyspace) + log.Error(fmt.Sprintf("MoveTables from - %v to %v", srcKeyspace, targetKeyspace)) mtw := cluster.NewMoveTables(t, clusterInstance, workflow, targetKeyspace, srcKeyspace, "twopc_fuzzer_update", []string{topodatapb.TabletType_REPLICA.String()}) // Initiate MoveTables for twopc_fuzzer_update. output, err := mtw.Create() if err != nil { - log.Errorf("error creating MoveTables - %v, output - %v", err, output) + log.Error(fmt.Sprintf("error creating MoveTables - %v, output - %v", err, output)) return } moveTablesCount++ @@ -562,7 +562,7 @@ func reshardFuzzer(t *testing.T) { srcShards = "40-80,80-" targetShards = "40-" } - log.Errorf("Reshard from - \"%v\" to \"%v\"", srcShards, targetShards) + log.Error(fmt.Sprintf("Reshard from - \"%v\" to \"%v\"", srcShards, targetShards)) twopcutil.AddShards(t, clusterInstance, keyspaceName, strings.Split(targetShards, ",")) err := twopcutil.RunReshard(t, clusterInstance, "TestTwoPCFuzzTest", keyspaceName, srcShards, targetShards) require.NoError(t, err) @@ -573,7 +573,7 @@ func mysqlRestarts(t *testing.T) { shard := shards[rand.IntN(len(shards))] vttablets := shard.Vttablets tablet := vttablets[rand.IntN(len(vttablets))] - log.Errorf("Restarting MySQL for - %v/%v tablet - %v", keyspaceName, shard.Name, tablet.Alias) + log.Error(fmt.Sprintf("Restarting MySQL for - %v/%v tablet - %v", keyspaceName, shard.Name, tablet.Alias)) pidFile := path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d/mysql.pid", tablet.TabletUID)) pidBytes, err := os.ReadFile(pidFile) if err != nil { @@ -583,11 +583,11 @@ func mysqlRestarts(t *testing.T) { } pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes))) if err != nil { - log.Errorf("Error in conversion to integer: %v", err) + log.Error(fmt.Sprintf("Error in conversion to integer: %v", err)) return } err = syscallutil.Kill(pid, syscall.SIGKILL) if err != nil { - log.Errorf("Error in killing process: %v", err) + log.Error(fmt.Sprintf("Error in killing process: %v", err)) } } diff --git a/go/test/endtoend/transaction/twopc/stress/stress_test.go b/go/test/endtoend/transaction/twopc/stress/stress_test.go index e767dbd8618..d87f6437189 100644 --- a/go/test/endtoend/transaction/twopc/stress/stress_test.go +++ b/go/test/endtoend/transaction/twopc/stress/stress_test.go @@ -333,7 +333,7 @@ func mysqlRestartShard3(t *testing.T) error { shard := clusterInstance.Keyspaces[0].Shards[2] vttablets := shard.Vttablets tablet := vttablets[0] - log.Errorf("Restarting MySQL for - %v/%v tablet - %v", keyspaceName, shard.Name, tablet.Alias) + log.Error(fmt.Sprintf("Restarting MySQL for - %v/%v tablet - %v", keyspaceName, shard.Name, tablet.Alias)) pidFile := path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d/mysql.pid", tablet.TabletUID)) pidBytes, err := os.ReadFile(pidFile) if err != nil { diff --git a/go/test/endtoend/transaction/twopc/utils/utils.go b/go/test/endtoend/transaction/twopc/utils/utils.go index b545629f198..d8dbd92918a 100644 --- a/go/test/endtoend/transaction/twopc/utils/utils.go +++ b/go/test/endtoend/transaction/twopc/utils/utils.go @@ -56,14 +56,14 @@ func ClearOutTable(t testing.TB, vtParams mysql.ConnParams, tableName string) { } conn, err := mysql.Connect(ctx, &vtParams) if err != nil { - log.Errorf("Error in connection - %v\n", err) + log.Error(fmt.Sprintf("Error in connection - %v\n", err)) time.Sleep(100 * time.Millisecond) continue } res, err := conn.ExecuteFetch(fmt.Sprintf("SELECT count(*) FROM %v", tableName), 1, false) if err != nil { - log.Errorf("Error in selecting - %v\n", err) + log.Error(fmt.Sprintf("Error in selecting - %v\n", err)) conn.Close() time.Sleep(100 * time.Millisecond) continue @@ -79,7 +79,7 @@ func ClearOutTable(t testing.TB, vtParams mysql.ConnParams, tableName string) { _, err = conn.ExecuteFetch(fmt.Sprintf("DELETE FROM %v LIMIT 10000", tableName), 10000, false) conn.Close() if err != nil { - log.Errorf("Error in cleanup deletion - %v\n", err) + log.Error(fmt.Sprintf("Error in cleanup deletion - %v\n", err)) time.Sleep(100 * time.Millisecond) continue } @@ -109,7 +109,7 @@ func RunMultiShardCommitWithDelay(t *testing.T, conn *mysql.Conn, commitDelayTim wg.Go(func() { _, err := utils.ExecAllowError(t, conn, "commit") if err != nil { - log.Errorf("Error in commit - %v", err) + log.Error(fmt.Sprintf("Error in commit - %v", err)) } }) } diff --git a/go/test/endtoend/vault/vault_server.go b/go/test/endtoend/vault/vault_server.go index 624c94cfc03..6356846507f 100644 --- a/go/test/endtoend/vault/vault_server.go +++ b/go/test/endtoend/vault/vault_server.go @@ -62,14 +62,14 @@ func (vs *Server) start() error { vs.execPath = path.Join(os.Getenv("EXTRA_BIN"), vaultExecutableName) fileStat, err := os.Stat(vs.execPath) if err != nil || fileStat.Size() != vaultDownloadSize { - log.Warningf("Downloading Vault binary to: %v", vs.execPath) + log.Warn(fmt.Sprintf("Downloading Vault binary to: %v", vs.execPath)) err := downloadExecFile(vs.execPath, vaultDownloadSource) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } } else { - log.Warningf("Vault binary already present at %v , not re-downloading", vs.execPath) + log.Warn(fmt.Sprintf("Vault binary already present at %v , not re-downloading", vs.execPath)) } // Create Vault log directory @@ -77,7 +77,7 @@ func (vs *Server) start() error { if _, err := os.Stat(vs.logDir); os.IsNotExist(err) { err := os.Mkdir(vs.logDir, 0o700) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } } @@ -92,7 +92,7 @@ func (vs *Server) start() error { newHclFile := path.Join(vs.logDir, vaultConfigFileName) err = os.WriteFile(newHclFile, hcl, 0o700) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } @@ -104,7 +104,7 @@ func (vs *Server) start() error { logFile, err := os.Create(path.Join(vs.logDir, "log.txt")) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } vs.proc.Stderr = logFile @@ -112,7 +112,7 @@ func (vs *Server) start() error { vs.proc.Env = append(vs.proc.Env, os.Environ()...) - log.Infof("Running Vault server with command: %v", strings.Join(vs.proc.Args, " ")) + log.Info(fmt.Sprintf("Running Vault server with command: %v", strings.Join(vs.proc.Args, " "))) err = vs.proc.Start() if err != nil { diff --git a/go/test/endtoend/vault/vault_test.go b/go/test/endtoend/vault/vault_test.go index 6be291b8462..809dd4262e5 100644 --- a/go/test/endtoend/vault/vault_test.go +++ b/go/test/endtoend/vault/vault_test.go @@ -183,17 +183,17 @@ func setupVaultServer(t *testing.T, vs *Server) (string, string) { setup.Stdout = logFile setup.Env = append(setup.Env, os.Environ()...) - log.Infof("Running Vault setup command: %v", strings.Join(setup.Args, " ")) + log.Info(fmt.Sprintf("Running Vault setup command: %v", strings.Join(setup.Args, " "))) err := setup.Start() if err != nil { - log.Errorf("Error during Vault setup: %v", err) + log.Error(fmt.Sprintf("Error during Vault setup: %v", err)) } setup.Wait() var secretID, roleID string file, err := os.Open(logFilePath) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } defer file.Close() @@ -206,7 +206,7 @@ func setupVaultServer(t *testing.T, vs *Server) (string, string) { } } if err := scanner.Err(); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } return roleID, secretID diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go index aa556ca43aa..c3816a3c593 100644 --- a/go/test/endtoend/vreplication/cluster_test.go +++ b/go/test/endtoend/vreplication/cluster_test.go @@ -432,10 +432,10 @@ func (vc *VitessCluster) CleanupDataroot(t *testing.T, recreate bool) { retries := 3 for i := 1; i <= retries; i++ { if err = os.RemoveAll(dir); err == nil { - log.Infof("Deleted vtdataroot %q", dir) + log.Info(fmt.Sprintf("Deleted vtdataroot %q", dir)) break } - log.Errorf("Failed to delete vtdataroot (attempt %d of %d) %q: %v", i, retries, dir, err) + log.Error(fmt.Sprintf("Failed to delete vtdataroot (attempt %d of %d) %q: %v", i, retries, dir, err)) time.Sleep(1 * time.Second) } require.NoError(t, err) @@ -457,7 +457,7 @@ func (vc *VitessCluster) AddKeyspace(t *testing.T, cells []*Cell, ksName string, err := vc.VtctldClient.CreateKeyspace(keyspace.Name, keyspace.SidecarDBName, "") require.NoError(t, err) - log.Infof("Applying throttler config for keyspace %s", keyspace.Name) + log.Info("Applying throttler config for keyspace " + keyspace.Name) req := &vtctldatapb.UpdateThrottlerConfigRequest{Enable: true, Threshold: throttlerConfig.Threshold, CustomQuery: throttlerConfig.Query} res, err := throttler.UpdateThrottlerTopoConfigRaw(vc.VtctldClient, keyspace.Name, req, nil, nil) require.NoError(t, err, res) @@ -474,7 +474,7 @@ func (vc *VitessCluster) AddKeyspace(t *testing.T, cells []*Cell, ksName string, cellsToWatch += cellsToWatchSb466.String() for _, cell := range cells { if len(cell.Vtgates) == 0 { - log.Infof("Starting vtgate") + log.Info("Starting vtgate") vc.StartVtgate(t, cell, cellsToWatch) } } @@ -570,7 +570,7 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa } shardNames := strings.Split(names, ",") - log.Infof("Addshards got %d shards with %+v", len(shardNames), shardNames) + log.Info(fmt.Sprintf("Addshards got %d shards with %+v", len(shardNames), shardNames)) isSharded := len(shardNames) > 1 primaryTabletUID := 0 for ind, shardName := range shardNames { @@ -578,9 +578,9 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa tabletIndex := 0 shard := &Shard{Name: shardName, IsSharded: isSharded, Tablets: make(map[string]*Tablet, 1)} if _, ok := keyspace.Shards[shardName]; ok { - log.Infof("Shard %s already exists, not adding", shardName) + log.Info(fmt.Sprintf("Shard %s already exists, not adding", shardName)) } else { - log.Infof("Adding Shard %s", shardName) + log.Info("Adding Shard " + shardName) if err := vc.VtctldClient.ExecuteCommand("CreateShard", keyspace.Name+"/"+shardName); err != nil { t.Fatalf("CreateShard command failed with %+v\n", err) } @@ -591,7 +591,7 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa tablets := make([]*Tablet, 0) if i == 0 { // only add primary tablet for first cell, so first time CreateShard is called - log.Infof("Adding Primary tablet") + log.Info("Adding Primary tablet") primary, proc, err := vc.AddTablet(t, cell, keyspace, shard, "replica", tabletID+tabletIndex) require.NoError(t, err) require.NotNil(t, primary) @@ -603,7 +603,7 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa } for range numReplicas { - log.Infof("Adding Replica tablet") + log.Info("Adding Replica tablet") tablet, proc, err := vc.AddTablet(t, cell, keyspace, shard, "replica", tabletID+tabletIndex) require.NoError(t, err) require.NotNil(t, tablet) @@ -614,7 +614,7 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa // Only create RDONLY tablets in the default cell if cell.Name == cluster.DefaultCell { for range numRdonly { - log.Infof("Adding RdOnly tablet") + log.Info("Adding RdOnly tablet") tablet, proc, err := vc.AddTablet(t, cell, keyspace, shard, "rdonly", tabletID+tabletIndex) require.NoError(t, err) require.NotNil(t, tablet) @@ -625,7 +625,7 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa } for ind, proc := range dbProcesses { - log.Infof("Waiting for mysql process for tablet %s", tablets[ind].Name) + log.Info("Waiting for mysql process for tablet " + tablets[ind].Name) if err := proc.Wait(); err != nil { // Retry starting the database process before giving up. t.Logf("%v :: Unable to start mysql server for %v. Will cleanup files and processes, then retry...", err, tablets[ind].Vttablet) @@ -634,7 +634,7 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa // want to use as that is the most common problem. tablets[ind].DbServer.Stop() if _, err = exec.Command("fuser", "-n", "tcp", "-k", strconv.Itoa(tablets[ind].DbServer.MySQLPort)).Output(); err != nil { - log.Errorf("Failed to kill process listening on port %d: %v", tablets[ind].DbServer.MySQLPort, err) + log.Error(fmt.Sprintf("Failed to kill process listening on port %d: %v", tablets[ind].DbServer.MySQLPort, err)) } // Sleep for the kernel's TCP TIME_WAIT timeout to avoid the // port already in use error, which is the common cause for @@ -649,16 +649,16 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa mysqlctlLog := path.Join(vtdataroot, "/tmp/mysqlctl.INFO") logBytes, ferr := os.ReadFile(mysqlctlLog) if ferr == nil { - log.Errorf("mysqlctl log contents:\n%s", string(logBytes)) + log.Error("mysqlctl log contents:\n" + string(logBytes)) } else { - log.Errorf("Failed to read the mysqlctl log file %q: %v", mysqlctlLog, ferr) + log.Error(fmt.Sprintf("Failed to read the mysqlctl log file %q: %v", mysqlctlLog, ferr)) } mysqldLog := path.Join(vtdataroot, fmt.Sprintf("/vt_%010d/error.log", tablets[ind].Vttablet.TabletUID)) logBytes, ferr = os.ReadFile(mysqldLog) if ferr == nil { - log.Errorf("mysqld error log contents:\n%s", string(logBytes)) + log.Error("mysqld error log contents:\n" + string(logBytes)) } else { - log.Errorf("Failed to read the mysqld error log file %q: %v", mysqldLog, ferr) + log.Error(fmt.Sprintf("Failed to read the mysqld error log file %q: %v", mysqldLog, ferr)) } output, _ := dbcmd.CombinedOutput() t.Fatalf("%v :: Unable to start mysql server for %v; Output: %s", err, @@ -667,7 +667,7 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa } } for ind, tablet := range tablets { - log.Infof("Running Setup() for vttablet %s", tablets[ind].Name) + log.Info("Running Setup() for vttablet " + tablets[ind].Name) err := tablet.Vttablet.Setup() require.NoError(t, err) // Set time_zone to UTC for all tablets. Without this it fails locally on some MacOS setups. @@ -679,10 +679,10 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa } } require.NotEqual(t, 0, primaryTabletUID, "Should have created a primary tablet") - log.Infof("InitializeShard and make %d primary", primaryTabletUID) + log.Info(fmt.Sprintf("InitializeShard and make %d primary", primaryTabletUID)) require.NoError(t, vc.VtctldClient.InitializeShard(keyspace.Name, shardName, cells[0].Name, primaryTabletUID)) - log.Infof("Finished creating shard %s", shard.Name) + log.Info("Finished creating shard " + shard.Name) } for _, shard := range shardNames { require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, keyspace.Name, shard)) @@ -720,7 +720,7 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa err := vc.VtctldClient.ExecuteCommand("RebuildKeyspaceGraph", keyspace.Name) require.NoError(t, err) - log.Infof("Waiting for throttler config to be applied on all shards") + log.Info("Waiting for throttler config to be applied on all shards") for _, shardName := range shardNames { shard := keyspace.Shards[shardName] for _, tablet := range shard.Tablets { @@ -728,11 +728,11 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa Alias: tablet.Name, HTTPPort: tablet.Vttablet.Port, } - log.Infof("+ Waiting for throttler config to be applied on %s, type=%v", tablet.Name, tablet.Vttablet.TabletType) + log.Info(fmt.Sprintf("+ Waiting for throttler config to be applied on %s, type=%v", tablet.Name, tablet.Vttablet.TabletType)) throttler.WaitForThrottlerStatusEnabled(t, vc.VtctldClient, clusterTablet, true, nil, time.Minute) } } - log.Infof("Throttler config applied on all shards") + log.Info("Throttler config applied on all shards") return nil } @@ -741,10 +741,10 @@ func (vc *VitessCluster) DeleteShard(t testing.TB, cellName string, ksName strin shard := vc.Cells[cellName].Keyspaces[ksName].Shards[shardName] require.NotNil(t, shard) for _, tab := range shard.Tablets { - log.Infof("Shutting down tablet %s", tab.Name) + log.Info("Shutting down tablet " + tab.Name) tab.Vttablet.TearDown() } - log.Infof("Deleting Shard %s", shardName) + log.Info("Deleting Shard " + shardName) // TODO how can we avoid the use of even_if_serving? if output, err := vc.VtctldClient.ExecuteCommandWithOutput("DeleteShard", "--recursive", "--even-if-serving", ksName+"/"+shardName); err != nil { t.Fatalf("DeleteShard command failed with error %+v and output %s\n", err, output) @@ -782,9 +782,9 @@ func (vc *VitessCluster) teardown() { for _, cell := range vc.Cells { for _, vtgate := range cell.Vtgates { if err := vtgate.TearDown(); err != nil { - log.Errorf("Error in vtgate teardown - %s", err.Error()) + log.Error("Error in vtgate teardown - " + err.Error()) } else { - log.Infof("vtgate teardown successful") + log.Info("vtgate teardown successful") } } } @@ -800,22 +800,22 @@ func (vc *VitessCluster) teardown() { _ = vc.TearDownKeyspace(keyspace) } if err := vc.Vtctld.TearDown(); err != nil { - log.Infof("Error stopping Vtctld: %s", err.Error()) + log.Info("Error stopping Vtctld: " + err.Error()) } else { log.Info("Successfully stopped vtctld") } for _, cell := range vc.Cells { if err := vc.Topo.TearDown(cell.Name, originalVtdataroot, vtdataroot, false, "etcd2"); err != nil { - log.Infof("Error in etcd teardown - %s", err.Error()) + log.Info("Error in etcd teardown - " + err.Error()) } else { - log.Infof("Successfully tore down topo %s", vc.Topo.Name) + log.Info("Successfully tore down topo " + vc.Topo.Name) } } if vc.VTOrcProcess != nil { if err := vc.VTOrcProcess.TearDown(); err != nil { - log.Infof("Error stopping VTOrc: %s", err.Error()) + log.Info("Error stopping VTOrc: " + err.Error()) } } } @@ -827,15 +827,15 @@ func (vc *VitessCluster) TearDownKeyspace(ks *Keyspace) error { eg.Go(func() error { if tablet.DbServer != nil && tablet.DbServer.TabletUID > 0 { if err := tablet.DbServer.Stop(); err != nil { - log.Infof("Error stopping mysql process associated with vttablet %s: %v", tablet.Name, err) + log.Info(fmt.Sprintf("Error stopping mysql process associated with vttablet %s: %v", tablet.Name, err)) return err } } if err := tablet.Vttablet.TearDown(); err != nil { - log.Infof("Error stopping vttablet %s: %v", tablet.Name, err) + log.Info(fmt.Sprintf("Error stopping vttablet %s: %v", tablet.Name, err)) return err } else { - log.Infof("Successfully stopped vttablet %s", tablet.Name) + log.Info("Successfully stopped vttablet " + tablet.Name) } return nil }) @@ -847,7 +847,7 @@ func (vc *VitessCluster) TearDownKeyspace(ks *Keyspace) error { func (vc *VitessCluster) DeleteKeyspace(t testing.TB, ksName string) { out, err := vc.VtctldClient.ExecuteCommandWithOutput("DeleteKeyspace", ksName, "--recursive") if err != nil { - log.Error("DeleteKeyspace failed with error: , output: %s", err, out) + log.Error(fmt.Sprintf("DeleteKeyspace failed with error: %v, output: %s", err, out)) } require.NoError(t, err) } @@ -864,9 +864,9 @@ func (vc *VitessCluster) TearDown() { }() select { case <-done: - log.Infof("TearDown() was successful") + log.Info("TearDown() was successful") case <-time.After(1 * time.Minute): - log.Infof("TearDown() timed out") + log.Info("TearDown() timed out") } // some processes seem to hang around for a bit time.Sleep(5 * time.Second) @@ -879,7 +879,7 @@ func (vc *VitessCluster) getVttabletsInKeyspace(t *testing.T, cell *Cell, ksName for _, shard := range keyspace.Shards { for _, tablet := range shard.Tablets { if tablet.Vttablet.GetTabletStatus() == "SERVING" && (tabletType == "" || strings.EqualFold(tablet.Vttablet.GetTabletType(), tabletType)) { - log.Infof("Serving status of tablet %s is %s, %s", tablet.Name, tablet.Vttablet.ServingStatus, tablet.Vttablet.GetTabletStatus()) + log.Info(fmt.Sprintf("Serving status of tablet %s is %s, %s", tablet.Name, tablet.Vttablet.ServingStatus, tablet.Vttablet.GetTabletStatus())) tablets[tablet.Name] = tablet.Vttablet } } @@ -928,14 +928,14 @@ func (vc *VitessCluster) startQuery(t *testing.T, query string) (func(t *testing commit := func(t *testing.T) { _, err = conn.ExecuteFetch("commit", 1000, false) - log.Infof("startQuery:commit:err: %+v", err) + log.Info(fmt.Sprintf("startQuery:commit:err: %+v", err)) conn.Close() - log.Infof("startQuery:after closing connection") + log.Info("startQuery:after closing connection") } rollback := func(t *testing.T) { defer conn.Close() _, err = conn.ExecuteFetch("rollback", 1000, false) - log.Infof("startQuery:rollback:err: %+v", err) + log.Info(fmt.Sprintf("startQuery:rollback:err: %+v", err)) } return commit, rollback } diff --git a/go/test/endtoend/vreplication/fk_ext_load_generator_test.go b/go/test/endtoend/vreplication/fk_ext_load_generator_test.go index 46659da18c1..e2e4048abb6 100644 --- a/go/test/endtoend/vreplication/fk_ext_load_generator_test.go +++ b/go/test/endtoend/vreplication/fk_ext_load_generator_test.go @@ -211,7 +211,7 @@ func (lg *SimpleLoadGenerator) execQueryWithRetry(query string) (*sqltypes.Resul default: } if lg.runCtx != nil && lg.runCtx.Err() != nil { - log.Infof("Load generator run context done, query never completed: %q", query) + log.Info(fmt.Sprintf("Load generator run context done, query never completed: %q", query)) errCh <- errors.New("load generator stopped") return } @@ -246,7 +246,7 @@ func (lg *SimpleLoadGenerator) execQueryWithRetry(query string) (*sqltypes.Resul case qr := <-qrCh: return qr, nil case err := <-errCh: - log.Infof("query %q failed with error %v", query, err) + log.Info(fmt.Sprintf("query %q failed with error %v", query, err)) return nil, err } } @@ -254,7 +254,7 @@ func (lg *SimpleLoadGenerator) execQueryWithRetry(query string) (*sqltypes.Resul func (lg *SimpleLoadGenerator) Load() error { lg.state = LoadGeneratorStateLoading defer func() { lg.state = LoadGeneratorStateStopped }() - log.Infof("Inserting initial FK data") + log.Info("Inserting initial FK data") queries := []string{ "insert into parent values(1, 'parent1'), (2, 'parent2');", "insert into child values(1, 1, 'child11'), (2, 1, 'child21'), (3, 2, 'child32');", @@ -263,20 +263,20 @@ func (lg *SimpleLoadGenerator) Load() error { _, err := lg.exec(query) require.NoError(lg.vc.t, err) } - log.Infof("Done inserting initial FK data") + log.Info("Done inserting initial FK data") return nil } func (lg *SimpleLoadGenerator) Start() error { if lg.state == LoadGeneratorStateRunning { - log.Infof("Load generator already running") + log.Info("Load generator already running") return nil } lg.state = LoadGeneratorStateRunning go func() { defer func() { lg.state = LoadGeneratorStateStopped - log.Infof("Load generator stopped") + log.Info("Load generator stopped") }() lg.runCtx, lg.runCtxCancel = context.WithCancel(lg.ctx) defer func() { @@ -285,19 +285,19 @@ func (lg *SimpleLoadGenerator) Start() error { }() t := lg.vc.t var err error - log.Infof("Load generator starting") + log.Info("Load generator starting") for i := 0; ; i++ { if i%1000 == 0 { // Log occasionally to show that the test is still running. - log.Infof("Load simulation iteration %d", i) + log.Info(fmt.Sprintf("Load simulation iteration %d", i)) } select { case <-lg.ctx.Done(): - log.Infof("Load generator context done") + log.Info("Load generator context done") lg.ch <- true return case <-lg.runCtx.Done(): - log.Infof("Load generator run context done") + log.Info("Load generator run context done") lg.ch <- true return default: @@ -320,22 +320,22 @@ func (lg *SimpleLoadGenerator) Start() error { func (lg *SimpleLoadGenerator) Stop() error { if lg.state == LoadGeneratorStateStopped { - log.Infof("Load generator already stopped") + log.Info("Load generator already stopped") return nil } if lg.runCtx != nil && lg.runCtxCancel != nil { - log.Infof("Canceling load generator") + log.Info("Canceling load generator") lg.runCtxCancel() } // Wait for ch to be closed or we hit a timeout. timeout := vdiffTimeout select { case <-lg.ch: - log.Infof("Load generator stopped") + log.Info("Load generator stopped") lg.state = LoadGeneratorStateStopped return nil case <-time.After(timeout): - log.Infof("Timed out waiting for load generator to stop") + log.Info("Timed out waiting for load generator to stop") return errors.New("timed out waiting for load generator to stop") } } @@ -495,7 +495,7 @@ func waitForColumn(t *testing.T, vtgateProcess *cluster.VtgateProcess, ks, tbl, break } if colName, exists := colDef["name"]; exists && colName == col { - log.Infof("Found column '%s' in table '%s' for keyspace '%s'", col, tbl, ks) + log.Info(fmt.Sprintf("Found column '%s' in table '%s' for keyspace '%s'", col, tbl, ks)) return nil } } diff --git a/go/test/endtoend/vreplication/fk_ext_test.go b/go/test/endtoend/vreplication/fk_ext_test.go index f51618cff35..d5d899cac09 100644 --- a/go/test/endtoend/vreplication/fk_ext_test.go +++ b/go/test/endtoend/vreplication/fk_ext_test.go @@ -213,10 +213,9 @@ func checkRowCounts(t *testing.T, keyspace string, sourceShards, targetShards [] count, _ = getCount(tab, "child") targetChildCount += count } - log.Infof("Source parent count: %d, child count: %d, target parent count: %d, child count: %d.", - sourceParentCount, sourceChildCount, targetParentCount, targetChildCount) + log.Info(fmt.Sprintf("Source parent count: %d, child count: %d, target parent count: %d, child count: %d.", sourceParentCount, sourceChildCount, targetParentCount, targetChildCount)) if sourceParentCount != targetParentCount || sourceChildCount != targetChildCount { - log.Infof("Row counts do not match for keyspace %s, source shards: %v, target shards: %v", keyspace, sourceShards, targetShards) + log.Info(fmt.Sprintf("Row counts do not match for keyspace %s, source shards: %v, target shards: %v", keyspace, sourceShards, targetShards)) return false } return true @@ -226,7 +225,7 @@ func checkRowCounts(t *testing.T, keyspace string, sourceShards, targetShards [] // it is another check to ensure that both tables have the same number of rows in the source and target shards after load generation // has stopped. func compareRowCounts(t *testing.T, keyspace string, sourceShards, targetShards []string) error { - log.Infof("Comparing row counts for keyspace %s, source shards: %v, target shards: %v", keyspace, sourceShards, targetShards) + log.Info(fmt.Sprintf("Comparing row counts for keyspace %s, source shards: %v, target shards: %v", keyspace, sourceShards, targetShards)) lg.Stop() defer lg.Start() if err := waitForCondition("load generator to stop", func() bool { return lg.State() == LoadGeneratorStateStopped }, 10*time.Second); err != nil { @@ -288,8 +287,7 @@ func areRowCountsEqual(t *testing.T) bool { childRowCount := getRowCount(t, vtgateConn, "target2.child") parentCopyRowCount := getRowCount(t, vtgateConn, "target1.parent_copy") childCopyRowCount := getRowCount(t, vtgateConn, "target1.child_copy") - log.Infof("Post-materialize row counts are parent: %d, child: %d, parent_copy: %d, child_copy: %d", - parentRowCount, childRowCount, parentCopyRowCount, childCopyRowCount) + log.Info(fmt.Sprintf("Post-materialize row counts are parent: %d, child: %d, parent_copy: %d, child_copy: %d", parentRowCount, childRowCount, parentCopyRowCount, childCopyRowCount)) if parentRowCount != parentCopyRowCount || childRowCount != childCopyRowCount { return false } diff --git a/go/test/endtoend/vreplication/fk_test.go b/go/test/endtoend/vreplication/fk_test.go index 002f47450c7..972e4ef31ac 100644 --- a/go/test/endtoend/vreplication/fk_test.go +++ b/go/test/endtoend/vreplication/fk_test.go @@ -159,7 +159,7 @@ func TestFKWorkflow(t *testing.T) { } mt.SwitchReadsAndWrites() - log.Infof("Switch traffic done") + log.Info("Switch traffic done") if withLoad { ctx, cancel = context.WithCancel(context.Background()) @@ -192,9 +192,9 @@ func insertInitialFKData(t *testing.T) { sourceKeyspace := "fksource" shard := "0" db := fmt.Sprintf("%s:%s", sourceKeyspace, shard) - log.Infof("Inserting initial FK data") + log.Info("Inserting initial FK data") execMultipleQueries(t, vtgateConn, db, initialFKData) - log.Infof("Done inserting initial FK data") + log.Info("Done inserting initial FK data") type tableCounts struct { name string @@ -243,7 +243,7 @@ func (ls *fkLoadSimulator) simulateLoad() { var err error for i := 0; ; i++ { if i%1000 == 0 { - log.Infof("Load simulation iteration %d", i) + log.Info(fmt.Sprintf("Load simulation iteration %d", i)) } select { case <-ls.ctx.Done(): diff --git a/go/test/endtoend/vreplication/helper_test.go b/go/test/endtoend/vreplication/helper_test.go index d11cf17b2c9..d9c46668410 100644 --- a/go/test/endtoend/vreplication/helper_test.go +++ b/go/test/endtoend/vreplication/helper_test.go @@ -99,7 +99,7 @@ func execQueryWithRetry(t *testing.T, conn *mysql.Conn, query string, timeout ti require.FailNow(t, fmt.Sprintf("query %q did not succeed before the timeout of %s; last seen result: %v", query, timeout, qr)) case <-ticker.C: - log.Infof("query %q failed with error %v, retrying in %ds", query, err, defaultTick) + log.Info(fmt.Sprintf("query %q failed with error %v, retrying in %ds", query, err, defaultTick)) } } } @@ -107,7 +107,7 @@ func execQueryWithRetry(t *testing.T, conn *mysql.Conn, query string, timeout ti func execQuery(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { qr, err := conn.ExecuteFetch(query, 1000, false) if err != nil { - log.Errorf("Error executing query: %s: %v", query, err) + log.Error(fmt.Sprintf("Error executing query: %s: %v", query, err)) } require.NoError(t, err) return qr @@ -265,7 +265,7 @@ func waitForRowCountInTablet(t *testing.T, vttablet *cluster.VttabletProcess, da got := row.AsInt64("c", 0) require.LessOrEqual(t, got, want) if got == want { - log.Infof("waitForRowCountInTablet: found %d rows in table %s on tablet %s", want, table, vttablet.Name) + log.Info(fmt.Sprintf("waitForRowCountInTablet: found %d rows in table %s on tablet %s", want, table, vttablet.Name)) return } select { @@ -358,7 +358,7 @@ func waitForWorkflowState(t *testing.T, vc *VitessCluster, ksWorkflow string, wa done := false timer := time.NewTimer(workflowStateTimeout) defer timer.Stop() - log.Infof("Waiting for workflow %q to fully reach %q state", ksWorkflow, wantState) + log.Info(fmt.Sprintf("Waiting for workflow %q to fully reach %q state", ksWorkflow, wantState)) for { output, err := vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", keyspace, "show", "--workflow", workflow, "--compact", "--include-logs=false") require.NoError(t, err, output) @@ -394,7 +394,7 @@ func waitForWorkflowState(t *testing.T, vc *VitessCluster, ksWorkflow string, wa return true }) if done { - log.Infof("Workflow %q has fully reached the desired state of %q", ksWorkflow, wantState) + log.Info(fmt.Sprintf("Workflow %q has fully reached the desired state of %q", ksWorkflow, wantState)) return } select { @@ -644,7 +644,7 @@ func getDebugVar(t *testing.T, port int, varPath []string) (string, error) { var val []byte var err error url := fmt.Sprintf("http://localhost:%d/debug/vars", port) - log.Infof("url: %s, varPath: %s", url, strings.Join(varPath, ":")) + log.Info(fmt.Sprintf("url: %s, varPath: %s", url, strings.Join(varPath, ":"))) body := getHTTPBody(t, url) val, _, _, err = jsonparser.Get(body, varPath...) require.NoError(t, err) @@ -686,7 +686,7 @@ func confirmWorkflowHasCopiedNoData(t *testing.T, defaultTargetKs, workflow stri // compact, and easy to compare results for tests. func getShardRoutingRules(t *testing.T) string { output, err := osExec(t, "vtctldclient", []string{"--server", getVtctldGRPCURL(), "GetShardRoutingRules"}) - log.Infof("GetShardRoutingRules err: %+v, output: %+v", err, output) + log.Info(fmt.Sprintf("GetShardRoutingRules err: %+v, output: %+v", err, output)) require.Nilf(t, err, output) require.NotNil(t, output) @@ -829,7 +829,7 @@ func (lg *loadGenerator) stop() { // Wait for buffering to stop and additional records to be inserted by start // after traffic is switched. time.Sleep(loadTestBufferingWindowDuration * 2) - log.Infof("Canceling load") + log.Info("Canceling load") lg.cancel() lg.wg.Wait() } @@ -840,22 +840,20 @@ func (lg *loadGenerator) start() { var connectionCount atomic.Int64 var id int64 - log.Infof("loadGenerator: starting") + log.Info("loadGenerator: starting") queryTemplate := "insert into loadtest(id, name) values (%d, 'name-%d')" var totalQueries, successfulQueries int64 var deniedErrors, ambiguousErrors, reshardedErrors, tableNotFoundErrors, otherErrors int64 lg.wg.Add(1) defer func() { defer lg.wg.Done() - log.Infof("loadGenerator: totalQueries: %d, successfulQueries: %d, deniedErrors: %d, ambiguousErrors: %d, reshardedErrors: %d, tableNotFoundErrors: %d, otherErrors: %d", - totalQueries, successfulQueries, deniedErrors, ambiguousErrors, reshardedErrors, tableNotFoundErrors, otherErrors) + log.Info(fmt.Sprintf("loadGenerator: totalQueries: %d, successfulQueries: %d, deniedErrors: %d, ambiguousErrors: %d, reshardedErrors: %d, tableNotFoundErrors: %d, otherErrors: %d", totalQueries, successfulQueries, deniedErrors, ambiguousErrors, reshardedErrors, tableNotFoundErrors, otherErrors)) }() for { select { case <-lg.ctx.Done(): - log.Infof("loadGenerator: context cancelled") - log.Infof("loadGenerator: deniedErrors: %d, ambiguousErrors: %d, reshardedErrors: %d, tableNotFoundErrors: %d, otherErrors: %d", - deniedErrors, ambiguousErrors, reshardedErrors, tableNotFoundErrors, otherErrors) + log.Info("loadGenerator: context cancelled") + log.Info(fmt.Sprintf("loadGenerator: deniedErrors: %d, ambiguousErrors: %d, reshardedErrors: %d, tableNotFoundErrors: %d, otherErrors: %d", deniedErrors, ambiguousErrors, reshardedErrors, tableNotFoundErrors, otherErrors)) require.Equal(t, int64(0), deniedErrors) require.Equal(t, int64(0), otherErrors) require.Equal(t, int64(0), reshardedErrors) @@ -939,12 +937,12 @@ func (lg *loadGenerator) waitForCount(want int64) { func appendToQueryLog(msg string) { file, err := os.OpenFile(queryLog, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { - log.Errorf("Error opening query log file: %v", err) + log.Error(fmt.Sprintf("Error opening query log file: %v", err)) return } defer file.Close() if _, err := file.WriteString(msg + "\n"); err != nil { - log.Errorf("Error writing to query log file: %v", err) + log.Error(fmt.Sprintf("Error writing to query log file: %v", err)) } } diff --git a/go/test/endtoend/vreplication/initial_data_test.go b/go/test/endtoend/vreplication/initial_data_test.go index 00f76182561..1fff7ea3857 100644 --- a/go/test/endtoend/vreplication/initial_data_test.go +++ b/go/test/endtoend/vreplication/initial_data_test.go @@ -31,13 +31,13 @@ func insertInitialData(t *testing.T) { t.Run("insertInitialData", func(t *testing.T) { vtgateConn, closeConn := getVTGateConn() defer closeConn() - log.Infof("Inserting initial data") + log.Info("Inserting initial data") lines, _ := os.ReadFile("unsharded_init_data.sql") execMultipleQueries(t, vtgateConn, defaultSourceKs+":0", string(lines)) execVtgateQuery(t, vtgateConn, defaultSourceKs+":0", "insert into customer_seq(id, next_id, cache) values(0, 100, 100);") execVtgateQuery(t, vtgateConn, defaultSourceKs+":0", "insert into order_seq(id, next_id, cache) values(0, 100, 100);") execVtgateQuery(t, vtgateConn, defaultSourceKs+":0", "insert into customer_seq2(id, next_id, cache) values(0, 100, 100);") - log.Infof("Done inserting initial data") + log.Info("Done inserting initial data") waitForRowCount(t, vtgateConn, defaultSourceKs+":0", "product", 2) waitForRowCount(t, vtgateConn, defaultSourceKs+":0", "customer", 3) @@ -47,10 +47,10 @@ func insertInitialData(t *testing.T) { insertJSONValues(t) insertLargeTransactionForChunkTesting(t, vtgateConn, defaultSourceKs+":0", 50000) - log.Infof("Inserted large transaction for chunking tests") + log.Info("Inserted large transaction for chunking tests") execVtgateQuery(t, vtgateConn, defaultSourceKs, "delete from customer where cid >= 50000 and cid < 50100") - log.Infof("Cleaned up chunk testing rows from source keyspace") + log.Info("Cleaned up chunk testing rows from source keyspace") }) } diff --git a/go/test/endtoend/vreplication/movetables_buffering_test.go b/go/test/endtoend/vreplication/movetables_buffering_test.go index e324f7dcb28..7aa11a4eb4e 100644 --- a/go/test/endtoend/vreplication/movetables_buffering_test.go +++ b/go/test/endtoend/vreplication/movetables_buffering_test.go @@ -47,8 +47,8 @@ func TestMoveTablesBuffering(t *testing.T) { tstWorkflowReverseReadsAndWrites(t) time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) } - log.Infof("SwitchWrites done") + log.Info("SwitchWrites done") lg.stop() - log.Infof("TestMoveTablesBuffering: done") + log.Info("TestMoveTablesBuffering: done") } diff --git a/go/test/endtoend/vreplication/multi_tenant_test.go b/go/test/endtoend/vreplication/multi_tenant_test.go index e2c3d2952c7..1ced9ebb6ad 100644 --- a/go/test/endtoend/vreplication/multi_tenant_test.go +++ b/go/test/endtoend/vreplication/multi_tenant_test.go @@ -257,7 +257,7 @@ func TestMultiTenantSimple(t *testing.T) { lastIndex = insertRows(lastIndex, targetKeyspace) actualRowsInserted := getRowCount(t, vtgateConn, fmt.Sprintf("%s.%s", targetKeyspace, "t1")) - log.Infof("Migration completed, total rows in target: %d", actualRowsInserted) + log.Info(fmt.Sprintf("Migration completed, total rows in target: %d", actualRowsInserted)) require.Equal(t, lastIndex, int64(actualRowsInserted)) t.Run("Test ApplyKeyspaceRoutingRules", func(t *testing.T) { @@ -406,7 +406,7 @@ func TestMultiTenantSharded(t *testing.T) { actualRowsInserted := getRowCount(t, vtgateConn, fmt.Sprintf("%s.%s", targetKeyspace, "t1")) require.Equal(t, lastIndex, int64(actualRowsInserted)) require.Equal(t, lastIndex, int64(getRowCount(t, vtgateConn, fmt.Sprintf("%s.%s", targetKeyspace, "t1")))) - log.Infof("Migration completed, total rows in target: %d", actualRowsInserted) + log.Info(fmt.Sprintf("Migration completed, total rows in target: %d", actualRowsInserted)) } func confirmBothReadsAndWritesSwitched(t *testing.T) { @@ -491,7 +491,7 @@ func TestMultiTenantComplex(t *testing.T) { totalRowsInserted := totalRowsInsertedPerTenant * numTenants totalActualRowsInserted := getRowCount(t, vtgateConn, fmt.Sprintf("%s.%s", mtm.targetKeyspace, "t1")) require.Equal(t, totalRowsInserted, totalActualRowsInserted) - log.Infof("Migration completed, total rows inserted in target: %d", totalActualRowsInserted) + log.Info(fmt.Sprintf("Migration completed, total rows inserted in target: %d", totalActualRowsInserted)) }) } @@ -562,7 +562,7 @@ func getInitialTabletIdForTenant(tenantId int64) int { } func (mtm *multiTenantMigration) setup(tenantId int64) { - log.Infof("Creating MoveTables for tenant %d", tenantId) + log.Info(fmt.Sprintf("Creating MoveTables for tenant %d", tenantId)) mtm.setLastID(tenantId, 0) sourceKeyspace := getSourceKeyspace(tenantId) _, err := vc.AddKeyspace(mtm.t, []*Cell{vc.Cells["zone1"]}, sourceKeyspace, "0", stVSchema, stSchema, diff --git a/go/test/endtoend/vreplication/partial_movetables_seq_test.go b/go/test/endtoend/vreplication/partial_movetables_seq_test.go index d4078c8d2a4..a3394457e6b 100644 --- a/go/test/endtoend/vreplication/partial_movetables_seq_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_seq_test.go @@ -483,14 +483,14 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { // Confirm shard targeting works before we switch any traffic. // Everything should be routed to the source keyspace (customer). - log.Infof("Testing reverse route (target->source) for shard being switched") + log.Info("Testing reverse route (target->source) for shard being switched") _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) require.NoError(t, err) _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.80-.primary", "Query was routed to the target before any SwitchTraffic") - log.Infof("Testing reverse route (target->source) for shard NOT being switched") + log.Info("Testing reverse route (target->source) for shard NOT being switched") _, err = vtgateConn.ExecuteFetch("use `customer2:-80`", 0, false) require.NoError(t, err) _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) @@ -579,17 +579,17 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { currentCustomerCount = getCustomerCount(t, "") t.Run("Switch sequence traffic forward and reverse and validate workflows still exist and sequence routing works", func(t *testing.T) { wfSeq.switchTraffic() - log.Infof("SwitchTraffic was successful for workflow seqTgt.seq, with output %s", lastOutput) + log.Info("SwitchTraffic was successful for workflow seqTgt.seq, with output " + lastOutput) insertCustomers(t) wfSeq.reverseTraffic() - log.Infof("ReverseTraffic was successful for workflow seqTgt.seq, with output %s", lastOutput) + log.Info("ReverseTraffic was successful for workflow seqTgt.seq, with output " + lastOutput) insertCustomers(t) wfSeq.switchTraffic() - log.Infof("SwitchTraffic was successful for workflow seqTgt.seq, with output %s", lastOutput) + log.Info("SwitchTraffic was successful for workflow seqTgt.seq, with output " + lastOutput) insertCustomers(t) diff --git a/go/test/endtoend/vreplication/partial_movetables_test.go b/go/test/endtoend/vreplication/partial_movetables_test.go index 090d2933488..0c76060bcc5 100644 --- a/go/test/endtoend/vreplication/partial_movetables_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_test.go @@ -238,14 +238,14 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { // Confirm shard targeting works before we switch any traffic. // Everything should be routed to the source keyspace (customer). - log.Infof("Testing reverse route (target->source) for shard being switched") + log.Info("Testing reverse route (target->source) for shard being switched") _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) require.NoError(t, err) _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), fmt.Sprintf("target: %s.80-.primary", sourceKeyspace), "Query was routed to the target before any SwitchTraffic") - log.Infof("Testing reverse route (target->source) for shard NOT being switched") + log.Info("Testing reverse route (target->source) for shard NOT being switched") _, err = vtgateConn.ExecuteFetch("use `customer2:-80`", 0, false) require.NoError(t, err) _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index 3549f99c847..dfba9da2e7a 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -372,7 +372,7 @@ func getVtctldGRPCURL() string { func applyShardRoutingRules(t *testing.T, rules string) { output, err := osExec(t, "vtctldclient", []string{"--server", getVtctldGRPCURL(), "ApplyShardRoutingRules", "--rules", rules}) - log.Infof("ApplyShardRoutingRules err: %+v, output: %+v", err, output) + log.Info(fmt.Sprintf("ApplyShardRoutingRules err: %+v, output: %+v", err, output)) require.NoError(t, err, output) require.NotNil(t, output) } diff --git a/go/test/endtoend/vreplication/time_zone_test.go b/go/test/endtoend/vreplication/time_zone_test.go index ec53f14539f..376df23eaeb 100644 --- a/go/test/endtoend/vreplication/time_zone_test.go +++ b/go/test/endtoend/vreplication/time_zone_test.go @@ -155,7 +155,7 @@ func TestMoveTablesTZ(t *testing.T) { hoursBehind = 8 } // extra logging, so that we can spot any issues in CI test runs - log.Infof("times are %s, %s, hours behind %d", dt2a, dt2b, hoursBehind) + log.Info(fmt.Sprintf("times are %s, %s, hours behind %d", dt2a, dt2b, hoursBehind)) require.Equal(t, hoursBehind*3600, targetUTCTUnix-sourceUSPacific) } diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go index 01b4af3b51b..651b1c934e5 100644 --- a/go/test/endtoend/vreplication/vdiff2_test.go +++ b/go/test/endtoend/vreplication/vdiff2_test.go @@ -260,7 +260,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, // rows for each second in the diff duration, depending on the test host vCPU count. perSecondCount := int64(math.Min(float64(perVCpuCount*int64(runtime.NumCPU())), 1000000)) totalRowsToCreate := seconds * perSecondCount - log.Infof("Test host has %d vCPUs. Generating %d rows in the customer table to test --max-diff-duration", runtime.NumCPU(), totalRowsToCreate) + log.Info(fmt.Sprintf("Test host has %d vCPUs. Generating %d rows in the customer table to test --max-diff-duration", runtime.NumCPU(), totalRowsToCreate)) for i := int64(0); i < totalRowsToCreate; i += chunkSize { generateMoreCustomers(t, tc.sourceKs, chunkSize) } diff --git a/go/test/endtoend/vreplication/vdiff_helper_test.go b/go/test/endtoend/vreplication/vdiff_helper_test.go index 9da5c4e245a..2a5806c0ac9 100644 --- a/go/test/endtoend/vreplication/vdiff_helper_test.go +++ b/go/test/endtoend/vreplication/vdiff_helper_test.go @@ -109,7 +109,7 @@ func waitForVDiff2ToComplete(t *testing.T, ksWorkflow, cells, uuid string, compl case <-ch: return info case <-time.After(vdiffTimeout): - log.Errorf("VDiff never completed for UUID %s. Latest output: %s", uuid, jsonStr) + log.Error(fmt.Sprintf("VDiff never completed for UUID %s. Latest output: %s", uuid, jsonStr)) require.FailNow(t, "VDiff never completed for UUID "+uuid) return nil } @@ -146,7 +146,7 @@ func doVtctldclientVDiff(t *testing.T, keyspace, workflow, cells string, want *e require.False(t, info.HasMismatch, "vdiff results: %+v", info) } if strings.Contains(t.Name(), "AcrossDBVersions") { - log.Errorf("VDiff resume cannot be guaranteed between major MySQL versions due to implied collation differences, skipping resume test...") + log.Error("VDiff resume cannot be guaranteed between major MySQL versions due to implied collation differences, skipping resume test...") return } }) @@ -173,7 +173,7 @@ func performVDiff2Action(t *testing.T, ksWorkflow, cells, action, actionArg stri } output, err = execVDiffWithRetry(t, expectError, args) - log.Infof("vdiff output: %+v (err: %+v)", output, err) + log.Info(fmt.Sprintf("vdiff output: %+v (err: %+v)", output, err)) if !expectError { require.NoError(t, err) ouuid := gjson.Get(output, "UUID").String() @@ -204,7 +204,7 @@ type vdiffResult struct { // execVDiffWithRetry will ignore transient errors that can occur during workflow state changes. func execVDiffWithRetry(t *testing.T, expectError bool, args []string) (string, error) { - log.Infof("Executing vdiff with retry with args: %+v", args) + log.Info(fmt.Sprintf("Executing vdiff with retry with args: %+v", args)) ctx, cancel := context.WithTimeout(context.Background(), vdiffRetryTimeout*3) defer cancel() vdiffResultCh := make(chan vdiffResult) @@ -212,7 +212,7 @@ func execVDiffWithRetry(t *testing.T, expectError bool, args []string) (string, var output string var err error retry := false - log.Infof("vdiff attempt: args=%+v", args) + log.Info(fmt.Sprintf("vdiff attempt: args=%+v", args)) for { select { case <-ctx.Done(): @@ -226,16 +226,16 @@ func execVDiffWithRetry(t *testing.T, expectError bool, args []string) (string, time.Sleep(vdiffRetryInterval) } retry = false - log.Infof("Calling vtctldclient with args: %+v", args) + log.Info(fmt.Sprintf("Calling vtctldclient with args: %+v", args)) output, err = vc.VtctldClient.ExecuteCommandWithOutput(args...) - log.Infof("vtctldclient finished: err=%v output=%q", err, output) + log.Info(fmt.Sprintf("vtctldclient finished: err=%v output=%q", err, output)) if err != nil { if expectError { result := vdiffResult{output: output, err: err} vdiffResultCh <- result return } - log.Infof("vdiff error: %s", err) + log.Info(fmt.Sprintf("vdiff error: %s", err)) if isVDiffRetryable(err.Error()) { retry = true } else { @@ -287,7 +287,7 @@ func encodeString(in string) string { func generateMoreCustomers(t *testing.T, keyspace string, numCustomers int64) { vtgateConn, closeConn := getVTGateConn() defer closeConn() - log.Infof("Generating more test data with an additional %d customers", numCustomers) + log.Info(fmt.Sprintf("Generating more test data with an additional %d customers", numCustomers)) res := execVtgateQuery(t, vtgateConn, keyspace, "select max(cid) from customer") startingID, _ := res.Rows[0][0].ToInt64() insert := strings.Builder{} diff --git a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go index 4ca439adc2c..f1ab057d60a 100644 --- a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go +++ b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go @@ -57,7 +57,7 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { for { select { case <-loadCtx.Done(): - log.Infof("load cancelled") + log.Info("load cancelled") return default: index += 1 @@ -111,7 +111,7 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { // confirm that show all shows the correct workflow and only that workflow. output, err := vc.VtctldClient.ExecuteCommandWithOutput("VDiff", "--format", "json", "--workflow", "wf1", "--target-keyspace", defaultTargetKs, "show", "all") require.NoError(t, err) - log.Infof("VDiff output: %s", output) + log.Info("VDiff output: " + output) count := gjson.Get(output, "..#").Int() wf := gjson.Get(output, "0.Workflow").String() ksName := gjson.Get(output, "0.Keyspace").String() diff --git a/go/test/endtoend/vreplication/vdiff_online_ddl_test.go b/go/test/endtoend/vreplication/vdiff_online_ddl_test.go index 3db9bcd68b3..76fb0b41b2a 100644 --- a/go/test/endtoend/vreplication/vdiff_online_ddl_test.go +++ b/go/test/endtoend/vreplication/vdiff_online_ddl_test.go @@ -163,7 +163,7 @@ func populate(ctx context.Context, t *testing.T, done chan bool, insertTemplate, for { select { case <-ctx.Done(): - log.Infof("load cancelled") + log.Info("load cancelled") return default: query := fmt.Sprintf(insertTemplate, id, id, id) diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index bdba2461437..dae6afa386c 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -678,13 +678,13 @@ func testVStreamCellFlag(t *testing.T) { switch err { case nil: if len(events) > 0 { - log.Infof("received %d events", len(events)) + log.Info(fmt.Sprintf("received %d events", len(events))) rowsReceived = true } case io.EOF: - log.Infof("stream ended without data") + log.Info("stream ended without data") default: - log.Infof("%s:: remote error: %v", time.Now(), err) + log.Info(fmt.Sprintf("%s:: remote error: %v", time.Now(), err)) } }) wg.Wait() @@ -790,11 +790,11 @@ func testVStreamFrom(t *testing.T, vtgate *cluster.VtgateProcess, table string, gotRows, err := streamConn.FetchNext(nil) require.NoError(t, err) - log.Infof("QR1:%v\n", gotRows) + log.Info(fmt.Sprintf("QR1:%v\n", gotRows)) gotRows, err = streamConn.FetchNext(nil) require.NoError(t, err) - log.Infof("QR2:%+v\n", gotRows) + log.Info(fmt.Sprintf("QR2:%+v\n", gotRows)) ch <- true }() @@ -1181,11 +1181,11 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou for _, tab := range tablets { if strings.Contains(targetShards, ","+tab.Shard+",") { targetTablets = append(targetTablets, tab) - log.Infof("Waiting for vrepl to catch up on %s since it IS a target shard", tab.Shard) + log.Info(fmt.Sprintf("Waiting for vrepl to catch up on %s since it IS a target shard", tab.Shard)) catchup(t, tab, workflow, "Reshard") } else { sourceTablets = append(sourceTablets, tab) - log.Infof("Not waiting for vrepl to catch up on %s since it is NOT a target shard", tab.Shard) + log.Info(fmt.Sprintf("Not waiting for vrepl to catch up on %s since it is NOT a target shard", tab.Shard)) continue } } @@ -1567,10 +1567,10 @@ func waitForLowLag(t *testing.T, keyspace, workflow string) { require.NoError(t, err) if lagSeconds <= acceptableLagSeconds { - log.Infof("waitForLowLag acceptable for workflow %s, keyspace %s, current lag is %d", workflow, keyspace, lagSeconds) + log.Info(fmt.Sprintf("waitForLowLag acceptable for workflow %s, keyspace %s, current lag is %d", workflow, keyspace, lagSeconds)) break } else { - log.Infof("waitForLowLag too high for workflow %s, keyspace %s, current lag is %d", workflow, keyspace, lagSeconds) + log.Info(fmt.Sprintf("waitForLowLag too high for workflow %s, keyspace %s, current lag is %d", workflow, keyspace, lagSeconds)) } time.Sleep(waitDuration) duration -= waitDuration @@ -1655,8 +1655,7 @@ func reshardAction(t *testing.T, action, workflow, keyspaceName, sourceShards, t args = append(args, extraFlags...) output, err := vc.VtctldClient.ExecuteCommandWithOutput(args...) if output != "" { - log.Infof("Output of vtctldclient Reshard %s for %s workflow:\n++++++\n%s\n--------\n", - action, workflow, output) + log.Info(fmt.Sprintf("Output of vtctldclient Reshard %s for %s workflow:\n++++++\n%s\n--------\n", action, workflow, output)) } if err != nil { t.Fatalf("Reshard %s command failed with %+v\nOutput: %s", action, err, output) @@ -1777,7 +1776,7 @@ func testSwitchTrafficPermissionChecks(t *testing.T, workflowType, sourceKeyspac applyPrivileges := func(query string) { for _, shard := range sourceShards { primary := vc.getPrimaryTablet(t, sourceKeyspace, shard) - log.Infof("Running permission query on %s: %s", primary.Name, query) + log.Info(fmt.Sprintf("Running permission query on %s: %s", primary.Name, query)) _, err := primary.QueryTablet(query, primary.Keyspace, false) require.NoError(t, err) } @@ -1986,7 +1985,7 @@ func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) { // Temporary code: print lots of info for debugging occasional flaky failures in customer reshard in CI for multicell test debug := true if debug { - log.Infof("------------------- START Extra debug info %s Switch writes %s", msg, ksWorkflow) + log.Info(fmt.Sprintf("------------------- START Extra debug info %s Switch writes %s", msg, ksWorkflow)) ksShards := []string{defaultSourceKs + "/0", defaultTargetKs + "/-80", defaultTargetKs + "/80-"} printShardPositions(vc, ksShards) defaultCell := vc.Cells[vc.CellNames[0]] @@ -2005,11 +2004,10 @@ func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) { for _, query := range queries { qr, err := tab.QueryTablet(query, "", false) require.NoError(t, err) - log.Infof("\nTablet:%s.%s.%s.%d\nQuery: %s\n%+v\n", - tab.Cell, tab.Keyspace, tab.Shard, tab.TabletUID, query, qr.Rows) + log.Info(fmt.Sprintf("\nTablet:%s.%s.%s.%d\nQuery: %s\n%+v\n", tab.Cell, tab.Keyspace, tab.Shard, tab.TabletUID, query, qr.Rows)) } } - log.Infof("------------------- END Extra debug info %s SwitchWrites %s", msg, ksWorkflow) + log.Info(fmt.Sprintf("------------------- END Extra debug info %s SwitchWrites %s", msg, ksWorkflow)) } } diff --git a/go/test/endtoend/vreplication/vschema_load_test.go b/go/test/endtoend/vreplication/vschema_load_test.go index aa49e305738..f4a5807c98c 100644 --- a/go/test/endtoend/vreplication/vschema_load_test.go +++ b/go/test/endtoend/vreplication/vschema_load_test.go @@ -57,11 +57,11 @@ func TestVSchemaChangesUnderLoad(t *testing.T) { insertData := func() { timer := time.NewTimer(extendedTimeout) defer timer.Stop() - log.Infof("Inserting data into customer") + log.Info("Inserting data into customer") cid := startCid for { if !initialDataInserted && cid > warmupRowCount { - log.Infof("Done inserting initial data into customer") + log.Info("Done inserting initial data into customer") initialDataInserted = true ch <- true } @@ -72,14 +72,14 @@ func TestVSchemaChangesUnderLoad(t *testing.T) { _, _ = vtgateConn.ExecuteFetch(query, 10000, false) select { case <-timer.C: - log.Infof("Done inserting data into customer") + log.Info("Done inserting data into customer") return default: } } } go func() { - log.Infof("Starting to vstream from replica") + log.Info("Starting to vstream from replica") vgtid := &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ Keyspace: "product", @@ -106,9 +106,9 @@ func TestVSchemaChangesUnderLoad(t *testing.T) { require.NoError(t, err) _, err = reader.Recv() require.NoError(t, err) - log.Infof("About to sleep in vstreaming to block the vstream Recv() channel") + log.Info("About to sleep in vstreaming to block the vstream Recv() channel") time.Sleep(extendedTimeout) - log.Infof("Done vstreaming") + log.Info("Done vstreaming") }() go insertData() @@ -118,10 +118,10 @@ func TestVSchemaChangesUnderLoad(t *testing.T) { numApplyVSchema := 0 timer := time.NewTimer(extendedTimeout) defer timer.Stop() - log.Infof("Started ApplyVSchema") + log.Info("Started ApplyVSchema") for { if err := vc.VtctldClient.ExecuteCommand("ApplyVSchema", "--vschema={}", "product"); err != nil { - log.Errorf("ApplyVSchema command failed with %+v\n", err) + log.Error(fmt.Sprintf("ApplyVSchema command failed with %+v\n", err)) return } numApplyVSchema++ @@ -130,7 +130,7 @@ func TestVSchemaChangesUnderLoad(t *testing.T) { } select { case <-timer.C: - log.Infof("Done ApplyVSchema") + log.Info("Done ApplyVSchema") ch <- true return default: diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go index 557ab374f47..b0a5fb4c395 100644 --- a/go/test/endtoend/vreplication/vstream_test.go +++ b/go/test/endtoend/vreplication/vstream_test.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "io" + "os" "strings" "sync" "sync/atomic" @@ -55,7 +56,8 @@ func TestVStreamWithTablesToSkipCopyFlag(t *testing.T) { ctx := t.Context() vstreamConn, err := vtgateconn.Dial(ctx, fmt.Sprintf("%s:%d", vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateGrpcPort)) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } defer vstreamConn.Close() vgtid := &binlogdatapb.VGtid{ @@ -131,9 +133,9 @@ func TestVStreamWithTablesToSkipCopyFlag(t *testing.T) { } } case io.EOF: - log.Infof("Stream Ended") + log.Info("Stream Ended") default: - log.Infof("%s:: remote error: %v", time.Now(), err) + log.Info(fmt.Sprintf("%s:: remote error: %v", time.Now(), err)) } if done.Load() { @@ -232,7 +234,8 @@ func testVStreamWithFailover(t *testing.T, failover bool) { ctx := t.Context() vstreamConn, err := vtgateconn.Dial(ctx, fmt.Sprintf("%s:%d", vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateGrpcPort)) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } defer vstreamConn.Close() vgtid := &binlogdatapb.VGtid{ @@ -300,9 +303,9 @@ func testVStreamWithFailover(t *testing.T, failover bool) { } } case io.EOF: - log.Infof("Stream Ended") + log.Info("Stream Ended") default: - log.Infof("%s:: remote error: %v", time.Now(), err) + log.Info(fmt.Sprintf("%s:: remote error: %v", time.Now(), err)) } if done.Load() { @@ -325,7 +328,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { insertMu.Lock() output, err := vc.VtctldClient.ExecuteCommandWithOutput("PlannedReparentShard", defaultSourceKs+"/0", "--new-primary=zone1-101") insertMu.Unlock() - log.Infof("output of first PRS is %s", output) + log.Info("output of first PRS is " + output) require.NoError(t, err) } case 2: @@ -333,7 +336,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { insertMu.Lock() output, err := vc.VtctldClient.ExecuteCommandWithOutput("PlannedReparentShard", defaultSourceKs+"/0", "--new-primary=zone1-100") insertMu.Unlock() - log.Infof("output of second PRS is %s", output) + log.Info("output of second PRS is " + output) require.NoError(t, err) } time.Sleep(100 * time.Millisecond) @@ -409,7 +412,7 @@ func insertRow(keyspace, table string, id int) { vtgateConn.ExecuteFetch("begin", 1000, false) _, err := vtgateConn.ExecuteFetch(fmt.Sprintf("insert into %s (name) values ('%s%d')", table, table, id), 1000, false) if err != nil { - log.Errorf("error inserting row %d: %v", id, err) + log.Error(fmt.Sprintf("error inserting row %d: %v", id, err)) } vtgateConn.ExecuteFetch("commit", 1000, false) } @@ -447,7 +450,8 @@ func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID ctx := t.Context() vstreamConn, err := vtgateconn.Dial(ctx, fmt.Sprintf("%s:%d", vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateGrpcPort)) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } defer vstreamConn.Close() vgtid := &binlogdatapb.VGtid{ @@ -524,10 +528,10 @@ func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID } } case io.EOF: - log.Infof("Stream Ended") + log.Info("Stream Ended") done = true default: - log.Infof("%s:: remote error: %v", time.Now(), err) + log.Info(fmt.Sprintf("%s:: remote error: %v", time.Now(), err)) numErrors++ if numErrors > 10 { // if vtgate is continuously unavailable error the test return @@ -591,7 +595,8 @@ func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEven ctx := t.Context() vstreamConn, err := vtgateconn.Dial(ctx, fmt.Sprintf("%s:%d", vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateGrpcPort)) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } defer vstreamConn.Close() vgtid := &binlogdatapb.VGtid{ @@ -664,10 +669,10 @@ func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEven } } case io.EOF: - log.Infof("Stream Ended") + log.Info("Stream Ended") done = true default: - log.Errorf("Returned err %v", err) + log.Error(fmt.Sprintf("Returned err %v", err)) done = true } if done { @@ -692,7 +697,7 @@ func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEven break } } - log.Infof("ne=%v", ne) + log.Info(fmt.Sprintf("ne=%v", ne)) // The number of row events streamed by the VStream API should match the number of rows inserted. // This is important for sharded tables, where we need to ensure that no row events are missed during the resharding process. @@ -1217,10 +1222,10 @@ func doVStream(t *testing.T, vc *VitessCluster, flags *vtgatepb.VStreamFlags) (n } } case io.EOF: - log.Infof("Stream Ended") + log.Info("Stream Ended") done = true default: - log.Errorf("remote error: %v", err) + log.Error(fmt.Sprintf("remote error: %v", err)) done = true } } @@ -1285,7 +1290,7 @@ func TestVStreamHeartbeats(t *testing.T) { require.Equalf(t, 1, gotNumFieldEvents[k], "incorrect number of field events for table %s, got %d", k, gotNumFieldEvents[k]) } require.GreaterOrEqual(t, gotNumRowEvents["heartbeat"], tc.expectedHeartbeats, "incorrect number of heartbeat events received") - log.Infof("Total number of heartbeat events received: %v", gotNumRowEvents["heartbeat"]) + log.Info(fmt.Sprintf("Total number of heartbeat events received: %v", gotNumRowEvents["heartbeat"])) delete(gotNumRowEvents, "heartbeat") require.Equal(t, expectedNumRowEvents, gotNumRowEvents) }) diff --git a/go/test/endtoend/vreplication/wrappers_test.go b/go/test/endtoend/vreplication/wrappers_test.go index 2ca1b3bb724..cd9c517e1fb 100644 --- a/go/test/endtoend/vreplication/wrappers_test.go +++ b/go/test/endtoend/vreplication/wrappers_test.go @@ -101,7 +101,7 @@ func newMoveTables(vc *VitessCluster, mt *moveTablesWorkflow, flavor workflowFla default: panic("unreachable") } - log.Infof("Using moveTables flavor: %s", mt2.Flavor()) + log.Info("Using moveTables flavor: " + mt2.Flavor()) return mt2 } @@ -250,7 +250,7 @@ func newReshard(vc *VitessCluster, rs *reshardWorkflow, flavor workflowFlavor) i default: panic("unreachable") } - log.Infof("Using reshard flavor: %s", rs2.Flavor()) + log.Info("Using reshard flavor: " + rs2.Flavor()) return rs2 } diff --git a/go/test/endtoend/vtcombo/recreate/recreate_test.go b/go/test/endtoend/vtcombo/recreate/recreate_test.go index 3e7a70ac00f..dacb1e0509f 100644 --- a/go/test/endtoend/vtcombo/recreate/recreate_test.go +++ b/go/test/endtoend/vtcombo/recreate/recreate_test.go @@ -86,7 +86,7 @@ func TestMain(m *testing.M) { return m.Run(), nil }() if err != nil { - log.Errorf("top level error: %v\n", err) + log.Error(fmt.Sprintf("top level error: %v\n", err)) os.Exit(1) } else { os.Exit(exitcode) @@ -133,7 +133,7 @@ func getMySQLConnectionCount(ctx context.Context, session *vtgateconn.VTGateSess func assertTabletsPresent(t *testing.T) { tmpCmd := exec.Command("vtctldclient", "--server", grpcAddress, "GetTablets", "--cell", "test") - log.Infof("Running vtctldclient with command: %v", tmpCmd.Args) + log.Info(fmt.Sprintf("Running vtctldclient with command: %v", tmpCmd.Args)) output, err := tmpCmd.CombinedOutput() require.Nil(t, err) diff --git a/go/test/endtoend/vtcombo/vttest_sample_test.go b/go/test/endtoend/vtcombo/vttest_sample_test.go index 4a21face5ae..a99f027fa10 100644 --- a/go/test/endtoend/vtcombo/vttest_sample_test.go +++ b/go/test/endtoend/vtcombo/vttest_sample_test.go @@ -108,7 +108,7 @@ func TestMain(m *testing.M) { return m.Run(), nil }() if err != nil { - log.Errorf("top level error: %v\n", err) + log.Error(fmt.Sprintf("top level error: %v\n", err)) os.Exit(1) } else { os.Exit(exitcode) @@ -165,7 +165,7 @@ func TestStandalone(t *testing.T) { func assertVSchemaExists(t *testing.T, grpcAddress string) { tmpCmd := exec.Command("vtctldclient", "--server", grpcAddress, "--compact", "GetVSchema", "routed") - log.Infof("Running vtctldclient with command: %v", tmpCmd.Args) + log.Info(fmt.Sprintf("Running vtctldclient with command: %v", tmpCmd.Args)) output, err := tmpCmd.CombinedOutput() require.NoError(t, err, fmt.Sprintf("Output:\n%v", string(output))) @@ -240,7 +240,7 @@ func insertManyRows(ctx context.Context, t *testing.T, conn *vtgateconn.VTGateCo func assertTabletsPresent(t *testing.T) { tmpCmd := exec.Command("vtctldclient", "--server", grpcAddress, "GetTablets", "--cell", "test") - log.Infof("Running vtctldclient with command: %v", tmpCmd.Args) + log.Info(fmt.Sprintf("Running vtctldclient with command: %v", tmpCmd.Args)) output, err := tmpCmd.CombinedOutput() require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/foreignkey/fk_fuzz_test.go b/go/test/endtoend/vtgate/foreignkey/fk_fuzz_test.go index adcd96f4022..4cfd27bae96 100644 --- a/go/test/endtoend/vtgate/foreignkey/fk_fuzz_test.go +++ b/go/test/endtoend/vtgate/foreignkey/fk_fuzz_test.go @@ -750,10 +750,10 @@ func TestFkFuzzTest(t *testing.T) { // We encountered an error while running the fuzzer. Let's print out the information! if fz.firstFailureInfo != nil { - log.Errorf("Failing query - %v", fz.firstFailureInfo.queryToFail) + log.Error(fmt.Sprintf("Failing query - %v", fz.firstFailureInfo.queryToFail)) for idx, table := range fkTables { - log.Errorf("MySQL data for %v -\n%v", table, fz.firstFailureInfo.mysqlState[idx].Rows) - log.Errorf("Vitess data for %v -\n%v", table, fz.firstFailureInfo.vitessState[idx].Rows) + log.Error(fmt.Sprintf("MySQL data for %v -\n%v", table, fz.firstFailureInfo.mysqlState[idx].Rows)) + log.Error(fmt.Sprintf("Vitess data for %v -\n%v", table, fz.firstFailureInfo.vitessState[idx].Rows)) } } diff --git a/go/test/endtoend/vtgate/foreignkey/fk_test.go b/go/test/endtoend/vtgate/foreignkey/fk_test.go index 52e36b662a2..b2d05372051 100644 --- a/go/test/endtoend/vtgate/foreignkey/fk_test.go +++ b/go/test/endtoend/vtgate/foreignkey/fk_test.go @@ -1234,18 +1234,18 @@ func TestFkOneCase(t *testing.T) { for _, query := range queries { if strings.HasPrefix(query, "vexplain") { res := utils.Exec(t, mcmp.VtConn, query) - log.Errorf("Query %v, Result - %v", query, res.Rows) + log.Error(fmt.Sprintf("Query %v, Result - %v", query, res.Rows)) continue } _, _ = mcmp.ExecAllowAndCompareError(query, utils.CompareOptions{}) if t.Failed() { - log.Errorf("Query failed - %v", query) + log.Error(fmt.Sprintf("Query failed - %v", query)) break } } vitessData := collectFkTablesState(mcmp.VtConn) for idx, table := range fkTables { - log.Errorf("Vitess data for %v -\n%v", table, vitessData[idx].Rows) + log.Error(fmt.Sprintf("Vitess data for %v -\n%v", table, vitessData[idx].Rows)) } // ensure Vitess database has some data. This ensures not all the commands failed. diff --git a/go/test/endtoend/vtgate/foreignkey/main_test.go b/go/test/endtoend/vtgate/foreignkey/main_test.go index fc542d6a0cc..f1f639bd353 100644 --- a/go/test/endtoend/vtgate/foreignkey/main_test.go +++ b/go/test/endtoend/vtgate/foreignkey/main_test.go @@ -266,6 +266,6 @@ func setupExtraMyConfig() error { return fmt.Errorf("failed to set EXTRA_MY_CNF environment variable: %v", err) } - log.Infof("Set EXTRA_MY_CNF to: %s", configPath) + log.Info("Set EXTRA_MY_CNF to: " + configPath) return nil } diff --git a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go index 6049d1e9fdf..c607f3fca67 100644 --- a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go +++ b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go @@ -1155,7 +1155,7 @@ func generateDelete(t *testing.T, tableName string, conn *mysql.Conn) error { } func runSingleConnection(ctx context.Context, t *testing.T, tableName string, tcase *testCase, sleepInterval time.Duration) { - log.Infof("Running single connection on %s", tableName) + log.Info("Running single connection on " + tableName) conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) defer conn.Close() @@ -1178,7 +1178,7 @@ func runSingleConnection(ctx context.Context, t *testing.T, tableName string, tc } select { case <-ctx.Done(): - log.Infof("Terminating single connection") + log.Info("Terminating single connection") return case <-ticker.C: } @@ -1187,8 +1187,8 @@ func runSingleConnection(ctx context.Context, t *testing.T, tableName string, tc // populateTables randomly populates all test tables. This is done sequentially. func populateTables(t *testing.T, tcase *testCase) { - log.Infof("initTable begin") - defer log.Infof("initTable complete") + log.Info("initTable begin") + defer log.Info("initTable complete") ctx := t.Context() conn, err := mysql.Connect(ctx, &vtParams) @@ -1345,13 +1345,13 @@ func testSelectTableMetrics( writeMetrics[tableName].mu.Lock() defer writeMetrics[tableName].mu.Unlock() - log.Infof("%s %s", tableName, writeMetrics[tableName].String()) + log.Info(fmt.Sprintf("%s %s", tableName, writeMetrics[tableName].String())) rs := queryTablet(t, tablet, fmt.Sprintf(selectCountRowsStatement, tableName), "") row := rs.Named().Row() require.NotNil(t, row) - log.Infof("testSelectTableMetrics, row: %v", row) + log.Info(fmt.Sprintf("testSelectTableMetrics, row: %v", row)) numRows := row.AsInt64("num_rows", 0) sumUpdates := row.AsInt64("sum_updates", 0) assert.NotZero(t, numRows) diff --git a/go/test/endtoend/vtgate/godriver/main_test.go b/go/test/endtoend/vtgate/godriver/main_test.go index dca9c508fc7..609ab1d42d3 100644 --- a/go/test/endtoend/vtgate/godriver/main_test.go +++ b/go/test/endtoend/vtgate/godriver/main_test.go @@ -108,14 +108,16 @@ func TestMain(m *testing.M) { "--queryserver-config-transaction-timeout", "3s", } if err := clusterInstance.StartKeyspace(*Keyspace, []string{"-80", "80-"}, 1, false, clusterInstance.Cell); err != nil { - log.Fatal(err.Error()) + log.Error(err.Error()) + os.Exit(1) return 1 } // Start vtgate clusterInstance.VtGateExtraArgs = []string{utils.GetFlagVariantForTests("--warn-sharded-only") + "=true"} if err := clusterInstance.StartVtgate(); err != nil { - log.Fatal(err.Error()) + log.Error(err.Error()) + os.Exit(1) return 1 } diff --git a/go/test/endtoend/vtgate/queries/random/query_gen.go b/go/test/endtoend/vtgate/queries/random/query_gen.go index dfdc4ee5325..63bcccd586b 100644 --- a/go/test/endtoend/vtgate/queries/random/query_gen.go +++ b/go/test/endtoend/vtgate/queries/random/query_gen.go @@ -19,6 +19,7 @@ package random import ( "fmt" "math/rand/v2" + "os" "slices" "vitess.io/vitess/go/slice" @@ -83,7 +84,8 @@ func newQueryGenerator(genConfig sqlparser.ExprGeneratorConfig, maxTables, maxAg func newSelectGenerator(genConfig sqlparser.ExprGeneratorConfig, maxTables, maxAggrs, maxGBs int, schemaTables []tableT) *selectGenerator { if maxTables <= 0 { - log.Fatalf("maxTables must be at least 1, currently %d\n", maxTables) + log.Error(fmt.Sprintf("maxTables must be at least 1, currently %d\n", maxTables)) + os.Exit(1) } return &selectGenerator{ @@ -386,7 +388,8 @@ func (sg *selectGenerator) createTablesAndJoin() ([]tableT, bool) { func (sg *selectGenerator) createJoin(tables []tableT) { n := len(sg.sel.From) if len(tables) != n+1 { - log.Fatalf("sel has %d tables and tables has %d tables", len(sg.sel.From), n) + log.Error(fmt.Sprintf("sel has %d tables and tables has %d tables", len(sg.sel.From), n)) + os.Exit(1) } joinPredicate := sqlparser.AndExpressions(sg.createJoinPredicates(tables)...) @@ -399,7 +402,8 @@ func (sg *selectGenerator) createJoin(tables []tableT) { // tables should have at least two elements func (sg *selectGenerator) createJoinPredicates(tables []tableT) []sqlparser.Expr { if len(tables) < 2 { - log.Fatalf("tables has %d elements, needs at least 2", len(tables)) + log.Error(fmt.Sprintf("tables has %d elements, needs at least 2", len(tables))) + os.Exit(1) } exprGenerators := []sqlparser.ExprGenerator{&tables[len(tables)-2], &tables[len(tables)-1]} @@ -440,7 +444,8 @@ func (sg *selectGenerator) createGroupBy(tables []tableT) (grouping []column) { // aliasGroupingColumns randomly aliases the grouping columns in the SelectExprs func (sg *selectGenerator) aliasGroupingColumns(grouping []column) []column { if len(grouping) != len(sg.sel.SelectExprs.Exprs) { - log.Fatalf("grouping (length: %d) and sg.sel.SelectExprs (length: %d) should have the same length at this point", len(grouping), len(sg.sel.SelectExprs.Exprs)) + log.Error(fmt.Sprintf("grouping (length: %d) and sg.sel.SelectExprs (length: %d) should have the same length at this point", len(grouping), len(sg.sel.SelectExprs.Exprs))) + os.Exit(1) } for i := range grouping { @@ -531,7 +536,8 @@ func (sg *selectGenerator) createHavingPredicates(grouping []column) { // returns between minExprs and maxExprs random expressions using generators func (sg *selectGenerator) createRandomExprs(minExprs, maxExprs int, generators ...sqlparser.ExprGenerator) (predicates []sqlparser.Expr) { if minExprs > maxExprs { - log.Fatalf("minExprs is greater than maxExprs; minExprs: %d, maxExprs: %d\n", minExprs, maxExprs) + log.Error(fmt.Sprintf("minExprs is greater than maxExprs; minExprs: %d, maxExprs: %d\n", minExprs, maxExprs)) + os.Exit(1) } else if maxExprs <= 0 { return } diff --git a/go/test/endtoend/vtgate/unsharded/main_test.go b/go/test/endtoend/vtgate/unsharded/main_test.go index 41f9f53dad7..5e702d35309 100644 --- a/go/test/endtoend/vtgate/unsharded/main_test.go +++ b/go/test/endtoend/vtgate/unsharded/main_test.go @@ -179,14 +179,16 @@ func TestMain(m *testing.M) { } clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-transaction-timeout", "3s", "--queryserver-config-max-result-size", "30"} if err := clusterInstance.StartUnshardedKeyspace(*Keyspace, 0, false, clusterInstance.Cell); err != nil { - log.Fatal(err.Error()) + log.Error(err.Error()) + os.Exit(1) return 1 } // Start vtgate clusterInstance.VtGateExtraArgs = []string{vtutils.GetFlagVariantForTests("--warn-sharded-only") + "=true"} if err := clusterInstance.StartVtgate(); err != nil { - log.Fatal(err.Error()) + log.Error(err.Error()) + os.Exit(1) return 1 } @@ -197,14 +199,16 @@ func TestMain(m *testing.M) { } conn, err := mysql.Connect(context.Background(), &vtParams) if err != nil { - log.Fatal(err.Error()) + log.Error(err.Error()) + os.Exit(1) return 1 } defer conn.Close() err = runCreateProcedures(conn) if err != nil { - log.Fatal(err.Error()) + log.Error(err.Error()) + os.Exit(1) return 1 } diff --git a/go/test/endtoend/vtorc/general/vtorc_test.go b/go/test/endtoend/vtorc/general/vtorc_test.go index fe768051a2d..a63387cebff 100644 --- a/go/test/endtoend/vtorc/general/vtorc_test.go +++ b/go/test/endtoend/vtorc/general/vtorc_test.go @@ -567,7 +567,7 @@ func TestSemiSync(t *testing.T) { utils.IsPrimarySemiSyncSetupCorrectly(t, primary, "ON") { return } - log.Warningf("semi sync settings not fixed yet") + log.Warn("semi sync settings not fixed yet") time.Sleep(1 * time.Second) } } diff --git a/go/test/endtoend/vtorc/utils/utils.go b/go/test/endtoend/vtorc/utils/utils.go index 0d04b8f325c..3083267068b 100644 --- a/go/test/endtoend/vtorc/utils/utils.go +++ b/go/test/endtoend/vtorc/utils/utils.go @@ -146,7 +146,7 @@ func createVttablets(clusterInstance *cluster.LocalProcessCluster, cellInfos []* // Start MySql var mysqlCtlProcessList []*exec.Cmd for _, tablet := range shard0.Vttablets { - log.Infof("Starting MySql for tablet %v", tablet.Alias) + log.Info(fmt.Sprintf("Starting MySql for tablet %v", tablet.Alias)) proc, err := tablet.MysqlctlProcess.StartProcess() if err != nil { return err @@ -259,7 +259,7 @@ func StopVTOrcs(t *testing.T, clusterInfo *VTOrcClusterInfo) { // Stop vtorc for _, vtorcProcess := range clusterInfo.ClusterInstance.VTOrcProcesses { if err := vtorcProcess.TearDown(); err != nil { - log.Errorf("Error in vtorc teardown: %v", err) + log.Error(fmt.Sprintf("Error in vtorc teardown: %v", err)) } } clusterInfo.ClusterInstance.VTOrcProcesses = nil @@ -372,7 +372,7 @@ func ShardPrimaryTablet(t *testing.T, clusterInfo *VTOrcClusterInfo, keyspace *c require.NoError(t, err) if si.Shard.PrimaryAlias == nil { - log.Warningf("Shard %v/%v has no primary yet, sleep for 1 second\n", keyspace.Name, shard.Name) + log.Warn(fmt.Sprintf("Shard %v/%v has no primary yet, sleep for 1 second\n", keyspace.Name, shard.Name)) time.Sleep(time.Second) continue } @@ -396,7 +396,7 @@ func CheckPrimaryTablet(t *testing.T, clusterInfo *VTOrcClusterInfo, tablet *clu tabletInfo, err := clusterInfo.ClusterInstance.VtctldClientProcess.GetTablet(tablet.Alias) require.NoError(t, err) if topodatapb.TabletType_PRIMARY != tabletInfo.GetType() { - log.Warningf("Tablet %v is not primary yet, sleep for 1 second\n", tablet.Alias) + log.Warn(fmt.Sprintf("Tablet %v is not primary yet, sleep for 1 second\n", tablet.Alias)) time.Sleep(time.Second) continue } @@ -407,13 +407,13 @@ func CheckPrimaryTablet(t *testing.T, clusterInfo *VTOrcClusterInfo, tablet *clu streamHealthResponse := shrs[0] if checkServing && !streamHealthResponse.GetServing() { - log.Warningf("Tablet %v is not serving in health stream yet, sleep for 1 second\n", tablet.Alias) + log.Warn(fmt.Sprintf("Tablet %v is not serving in health stream yet, sleep for 1 second\n", tablet.Alias)) time.Sleep(time.Second) continue } tabletType := streamHealthResponse.GetTarget().GetTabletType() if tabletType != topodatapb.TabletType_PRIMARY { - log.Warningf("Tablet %v is not primary in health stream yet, sleep for 1 second\n", tablet.Alias) + log.Warn(fmt.Sprintf("Tablet %v is not primary in health stream yet, sleep for 1 second\n", tablet.Alias)) time.Sleep(time.Second) continue } @@ -441,7 +441,7 @@ func CheckReplication(t *testing.T, clusterInfo *VTOrcClusterInfo, primary *clus default: _, err := RunSQL(t, sqlSchema, primary, "") if err != nil { - log.Warningf("create table failed on primary - %v, will retry", err) + log.Warn(fmt.Sprintf("create table failed on primary - %v, will retry", err)) time.Sleep(100 * time.Millisecond) break } @@ -463,7 +463,7 @@ func VerifyWritesSucceed(t *testing.T, clusterInfo *VTOrcClusterInfo, primary *c func confirmReplication(t *testing.T, primary *cluster.Vttablet, replicas []*cluster.Vttablet, timeToWait time.Duration, valueToInsert int) { t.Helper() - log.Infof("Insert data into primary and check that it is replicated to replica") + log.Info("Insert data into primary and check that it is replicated to replica") // insert data into the new primary, check the connected replica work insertSQL := fmt.Sprintf("insert into vt_insert_test(id, msg) values (%d, 'test %d')", valueToInsert, valueToInsert) _, err := RunSQL(t, insertSQL, primary, "vt_ks") @@ -484,7 +484,7 @@ func confirmReplication(t *testing.T, primary *cluster.Vttablet, replicas []*clu } } if err != nil { - log.Warningf("waiting for replication - error received - %v, will retry", err) + log.Warn(fmt.Sprintf("waiting for replication - error received - %v, will retry", err)) time.Sleep(300 * time.Millisecond) break } @@ -548,7 +548,7 @@ func validateTopology(t *testing.T, clusterInfo *VTOrcClusterInfo, pingTablets b output, err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("Validate") } if err != nil { - log.Warningf("Validate failed, retrying, output - %s", output) + log.Warn("Validate failed, retrying, output - " + output) time.Sleep(100 * time.Millisecond) break } @@ -570,9 +570,9 @@ func validateTopology(t *testing.T, clusterInfo *VTOrcClusterInfo, pingTablets b // KillTablets is used to kill the tablets func KillTablets(vttablets []*cluster.Vttablet) { for _, tablet := range vttablets { - log.Infof("Shutting down MySQL for %v", tablet.Alias) + log.Info(fmt.Sprintf("Shutting down MySQL for %v", tablet.Alias)) _ = tablet.MysqlctlProcess.Stop() - log.Infof("Calling TearDown on tablet %v", tablet.Alias) + log.Info(fmt.Sprintf("Calling TearDown on tablet %v", tablet.Alias)) _ = tablet.VttabletProcess.TearDown() } } @@ -714,7 +714,7 @@ func CheckSourcePort(t *testing.T, replica *cluster.Vttablet, source *cluster.Vt require.NoError(t, err) if len(res.Rows) != 1 { - log.Warningf("no replication status yet, will retry") + log.Warn("no replication status yet, will retry") break } @@ -727,7 +727,7 @@ func CheckSourcePort(t *testing.T, replica *cluster.Vttablet, source *cluster.Vt } } } - log.Warningf("source port not set correctly yet, will retry") + log.Warn("source port not set correctly yet, will retry") } time.Sleep(300 * time.Millisecond) } @@ -747,7 +747,7 @@ func CheckHeartbeatInterval(t *testing.T, replica *cluster.Vttablet, heartbeatIn require.NoError(t, err) if len(res.Rows) != 1 { - log.Warningf("no replication configuration yet, will retry") + log.Warn("no replication configuration yet, will retry") break } @@ -758,11 +758,11 @@ func CheckHeartbeatInterval(t *testing.T, replica *cluster.Vttablet, heartbeatIn if readVal == heartbeatInterval { return } else { - log.Warningf("heartbeat interval set to - %v", readVal) + log.Warn(fmt.Sprintf("heartbeat interval set to - %v", readVal)) } } } - log.Warningf("heartbeat interval not set correctly yet, will retry") + log.Warn("heartbeat interval not set correctly yet, will retry") } time.Sleep(300 * time.Millisecond) } @@ -831,7 +831,7 @@ func SetupNewClusterSemiSync(t *testing.T) *VTOrcClusterInfo { var mysqlCtlProcessList []*exec.Cmd for _, shard := range clusterInstance.Keyspaces[0].Shards { for _, tablet := range shard.Vttablets { - log.Infof("Starting MySql for tablet %v", tablet.Alias) + log.Info(fmt.Sprintf("Starting MySql for tablet %v", tablet.Alias)) proc, err := tablet.MysqlctlProcess.StartProcess() if err != nil { require.NoError(t, err, "Error starting start mysql: %v", err) @@ -905,7 +905,7 @@ func AddSemiSyncKeyspace(t *testing.T, clusterInfo *VTOrcClusterInfo) { var mysqlCtlProcessList []*exec.Cmd for _, shard := range clusterInfo.ClusterInstance.Keyspaces[1].Shards { for _, tablet := range shard.Vttablets { - log.Infof("Starting MySql for tablet %v", tablet.Alias) + log.Info(fmt.Sprintf("Starting MySql for tablet %v", tablet.Alias)) proc, err := tablet.MysqlctlProcess.StartProcess() if err != nil { require.NoError(t, err, "Error starting start mysql: %v", err) @@ -1153,18 +1153,18 @@ func PrintVTOrcLogsOnFailure(t *testing.T, clusterInstance *cluster.LocalProcess return } - log.Errorf("Printing VTOrc logs") + log.Error("Printing VTOrc logs") for _, vtorc := range clusterInstance.VTOrcProcesses { if vtorc == nil || vtorc.LogFileName == "" { continue } filePath := path.Join(vtorc.LogDir, vtorc.LogFileName) - log.Errorf("Printing file - %s", filePath) + log.Error("Printing file - " + filePath) content, err := os.ReadFile(filePath) if err != nil { - log.Errorf("Error while reading the file - %v", err) + log.Error(fmt.Sprintf("Error while reading the file - %v", err)) } - log.Errorf("%s", string(content)) + log.Error(string(content)) } } diff --git a/go/trace/logger.go b/go/trace/logger.go index 8fefe67a4c4..f83d2e74030 100644 --- a/go/trace/logger.go +++ b/go/trace/logger.go @@ -16,17 +16,21 @@ limitations under the License. package trace -import "vitess.io/vitess/go/vt/log" +import ( + "fmt" + + "vitess.io/vitess/go/vt/log" +) // traceLogger wraps the standard vitess log package to satisfy the datadog and // jaeger logger interfaces. type traceLogger struct{} // Log is part of the ddtrace.Logger interface. Datadog only ever logs errors. -func (*traceLogger) Log(msg string) { log.Errorf(msg) } +func (*traceLogger) Log(msg string) { log.Error(msg) } // Error is part of the jaeger.Logger interface. -func (*traceLogger) Error(msg string) { log.Errorf(msg) } +func (*traceLogger) Error(msg string) { log.Error(msg) } // Infof is part of the jaeger.Logger interface. -func (*traceLogger) Infof(msg string, args ...any) { log.Infof(msg, args...) } +func (*traceLogger) Infof(msg string, args ...any) { log.Info(fmt.Sprintf(msg, args...)) } diff --git a/go/trace/plugin_jaeger.go b/go/trace/plugin_jaeger.go index b6a96ee3f39..d738ee57c65 100644 --- a/go/trace/plugin_jaeger.go +++ b/go/trace/plugin_jaeger.go @@ -17,6 +17,7 @@ limitations under the License. package trace import ( + "fmt" "io" "github.com/opentracing/opentracing-go" @@ -102,17 +103,17 @@ func newJagerTracerFromEnv(serviceName string) (tracingService, io.Closer, error if host := agentHost.Get(); host != "" { cfg.Reporter.LocalAgentHostPort = host } - log.Infof("Tracing to: %v as %v", cfg.Reporter.LocalAgentHostPort, cfg.ServiceName) + log.Info(fmt.Sprintf("Tracing to: %v as %v", cfg.Reporter.LocalAgentHostPort, cfg.ServiceName)) cfg.Sampler.Param = samplingRate.Get() cfg.Sampler.Type = samplingType.Get() - log.Infof("Tracing sampler type %v (param: %v)", cfg.Sampler.Type, cfg.Sampler.Param) + log.Info(fmt.Sprintf("Tracing sampler type %v (param: %v)", cfg.Sampler.Type, cfg.Sampler.Param)) var opts []config.Option if enableLogging.Get() { opts = append(opts, config.Logger(&traceLogger{})) } else if cfg.Reporter.LogSpans { - log.Warningf("JAEGER_REPORTER_LOG_SPANS was set, but --tracing-enable-logging was not; spans will not be logged") + log.Warn("JAEGER_REPORTER_LOG_SPANS was set, but --tracing-enable-logging was not; spans will not be logged") } tracer, closer, err := cfg.NewTracer(opts...) diff --git a/go/trace/trace.go b/go/trace/trace.go index e68de9499f5..abd9a10225e 100644 --- a/go/trace/trace.go +++ b/go/trace/trace.go @@ -167,13 +167,13 @@ func StartTracing(serviceName string) io.Closer { tracer, closer, err := factory(serviceName) if err != nil { - log.Error(vterrors.Wrapf(err, "failed to create a %s tracer", tracingBackend)) + log.Error(fmt.Sprint(vterrors.Wrapf(err, "failed to create a %s tracer", tracingBackend))) return &nilCloser{} } currentTracer = tracer if tracingBackend != "noop" { - log.Infof("successfully started tracing with [%s]", tracingBackend) + log.Info(fmt.Sprintf("successfully started tracing with [%s]", tracingBackend)) } return closer @@ -185,7 +185,7 @@ func fail(serviceName string) io.Closer { options = append(options, k) } altStr := strings.Join(options, ", ") - log.Errorf("no such [%s] tracing service found. alternatives are: %v", serviceName, altStr) + log.Error(fmt.Sprintf("no such [%s] tracing service found. alternatives are: %v", serviceName, altStr)) return &nilCloser{} } diff --git a/go/trace/utils.go b/go/trace/utils.go index c88bb625035..13ab072b8da 100644 --- a/go/trace/utils.go +++ b/go/trace/utils.go @@ -17,6 +17,7 @@ limitations under the License. package trace import ( + "fmt" "io" "vitess.io/vitess/go/vt/log" @@ -27,7 +28,7 @@ func LogErrorsWhenClosing(in io.Closer) func() { return func() { err := in.Close() if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } } } diff --git a/go/viperutil/internal/log/log.go b/go/viperutil/internal/log/log.go index b201eadd408..839110b4a79 100644 --- a/go/viperutil/internal/log/log.go +++ b/go/viperutil/internal/log/log.go @@ -21,6 +21,9 @@ viper's jww log. package log import ( + "fmt" + "os" + jww "github.com/spf13/jwalterweatherman" "vitess.io/vitess/go/vt/log" @@ -47,12 +50,36 @@ var ( // DEBUG logs to viper's DEBUG level, and nothing to vitess logs. DEBUG = jwwlog(jww.DEBUG, nil) // INFO logs to viper and vitess at INFO levels. - INFO = jwwlog(jww.INFO, log.Infof) + INFO = jwwlog(jww.INFO, infof) // WARN logs to viper and vitess at WARN/WARNING levels. - WARN = jwwlog(jww.WARN, log.Warningf) + WARN = jwwlog(jww.WARN, warnf) // ERROR logs to viper and vitess at ERROR levels. - ERROR = jwwlog(jww.ERROR, log.Errorf) + ERROR = jwwlog(jww.ERROR, errorf) // CRITICAL logs to viper at CRITICAL level, and then fatally logs to // vitess, exiting the process. - CRITICAL = jwwlog(jww.CRITICAL, log.Fatalf) + CRITICAL = jwwlog(jww.CRITICAL, criticalf) ) + +// infof formats an info message and emits it through the structured logger. +func infof(format string, args ...any) { + log.Info(fmt.Sprintf(format, args...)) +} + +// warnf formats a warning message and emits it through the structured logger. +func warnf(format string, args ...any) { + log.Warn(fmt.Sprintf(format, args...)) +} + +// errorf formats an error message and emits it through the structured logger. +func errorf(format string, args ...any) { + log.Error(fmt.Sprintf(format, args...)) +} + +// criticalf formats an error message and terminates the process. +// +// It mirrors the behavior of log.Fatalf, but uses ErrorS and os.Exit +// explicitly so callers do not depend on the removed Fatalf wrapper. +func criticalf(format string, args ...any) { + log.Error(fmt.Sprintf(format, args...)) + os.Exit(1) +} diff --git a/go/vt/binlog/binlog_connection.go b/go/vt/binlog/binlog_connection.go index 9062be6cefd..cfac6653e44 100644 --- a/go/vt/binlog/binlog_connection.go +++ b/go/vt/binlog/binlog_connection.go @@ -75,7 +75,7 @@ func NewBinlogConnection(cp dbconfigs.Connector) (*BinlogConnection, error) { cp: cp, serverID: serverIDPool.Get(), } - log.Infof("new binlog connection: serverID=%d", bc.serverID) + log.Info(fmt.Sprintf("new binlog connection: serverID=%d", bc.serverID)) return bc, nil } @@ -121,9 +121,9 @@ func (bc *BinlogConnection) StartBinlogDumpFromCurrent(ctx context.Context) (rep func (bc *BinlogConnection) StartBinlogDumpFromPosition(ctx context.Context, binlogFilename string, startPos replication.Position) (<-chan mysql.BinlogEvent, <-chan error, error) { ctx, bc.cancel = context.WithCancel(ctx) - log.Infof("sending binlog dump command: startPos=%v, serverID=%v", startPos, bc.serverID) + log.Info(fmt.Sprintf("sending binlog dump command: startPos=%v, serverID=%v", startPos, bc.serverID)) if err := bc.SendBinlogDumpCommand(bc.serverID, binlogFilename, startPos); err != nil { - log.Errorf("couldn't send binlog dump command: %v", err) + log.Error(fmt.Sprintf("couldn't send binlog dump command: %v", err)) return nil, nil, err } @@ -158,10 +158,10 @@ func (bc *BinlogConnection) streamEvents(ctx context.Context) (chan mysql.Binlog // CRServerLost = Lost connection to MySQL server during query // This is not necessarily an error. It could just be that we closed // the connection from outside. - log.Infof("connection closed during binlog stream (possibly intentional): %v", err) + log.Info(fmt.Sprintf("connection closed during binlog stream (possibly intentional): %v", err)) return } - log.Errorf("read error while streaming binlog events: %v", err) + log.Error(fmt.Sprintf("read error while streaming binlog events: %v", err)) return } @@ -248,7 +248,7 @@ func (bc *BinlogConnection) findFileBeforeTimestamp(ctx context.Context, timesta } } - log.Errorf("couldn't find an old enough binlog to match timestamp >= %v (looked at %v files)", timestamp, len(binlogs.Rows)) + log.Error(fmt.Sprintf("couldn't find an old enough binlog to match timestamp >= %v (looked at %v files)", timestamp, len(binlogs.Rows))) return "", ErrBinlogUnavailable } @@ -285,7 +285,7 @@ func (bc *BinlogConnection) getBinlogTimeStamp(filename string) (blTimestamp int // The ID for the binlog connection is recycled back into the pool. func (bc *BinlogConnection) Close() { if bc.Conn != nil { - log.Infof("closing binlog socket to unblock reads") + log.Info("closing binlog socket to unblock reads") bc.Conn.Close() // bc.cancel is set at the beginning of the StartBinlogDump* @@ -293,13 +293,13 @@ func (bc *BinlogConnection) Close() { // Note we also may error out before adding 1 to bc.wg, // but then the Wait() still works. if bc.cancel != nil { - log.Infof("waiting for binlog dump thread to end") + log.Info("waiting for binlog dump thread to end") bc.cancel() bc.wg.Wait() bc.cancel = nil } - log.Infof("closing binlog MySQL client with serverID %v. Will recycle ID.", bc.serverID) + log.Info(fmt.Sprintf("closing binlog MySQL client with serverID %v. Will recycle ID.", bc.serverID)) bc.Conn = nil serverIDPool.Put(bc.serverID) } diff --git a/go/vt/binlog/binlog_streamer.go b/go/vt/binlog/binlog_streamer.go index f4949d405f0..992709ee585 100644 --- a/go/vt/binlog/binlog_streamer.go +++ b/go/vt/binlog/binlog_streamer.go @@ -183,7 +183,7 @@ func (bls *Streamer) Stream(ctx context.Context) (err error) { if err != nil && err != ErrBinlogUnavailable { err = fmt.Errorf("stream error @ (including the GTID we failed to process) %v: %v", stopPos, err) } - log.Infof("stream ended @ %v, err = %v", stopPos, err) + log.Info(fmt.Sprintf("stream ended @ %v, err = %v", stopPos, err)) }() if bls.conn, err = NewBinlogConnection(bls.cp); err != nil { @@ -203,7 +203,7 @@ func (bls *Streamer) Stream(ctx context.Context) (err error) { if err != nil { return fmt.Errorf("can't get charset to check binlog stream: %v", err) } - log.Infof("binlog stream client charset = %v, server charset = %v", bls.clientCharset, cs) + log.Info(fmt.Sprintf("binlog stream client charset = %v, server charset = %v", bls.clientCharset, cs)) if !proto.Equal(cs, bls.clientCharset) { return fmt.Errorf("binlog stream client charset (%v) doesn't match server (%v)", bls.clientCharset, cs) } @@ -264,7 +264,7 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog begin := func() { if statements != nil { // If this happened, it would be a legitimate error. - log.Errorf("BEGIN in binlog stream while still in another transaction; dropping %d statements: %v", len(statements), statements) + log.Error(fmt.Sprintf("BEGIN in binlog stream while still in another transaction; dropping %d statements: %v", len(statements), statements)) binlogStreamerErrors.Add("ParseEvents", 1) } statements = make([]FullBinlogStatement, 0, 10) @@ -299,13 +299,13 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog case ev, ok = <-events: if !ok { // events channel has been closed, which means the connection died. - log.Infof("reached end of binlog event stream") + log.Info("reached end of binlog event stream") return pos, ErrServerEOF } case err = <-errs: return pos, err case <-ctx.Done(): - log.Infof("stopping early due to binlog Streamer service shutdown or client disconnect") + log.Info("stopping early due to binlog Streamer service shutdown or client disconnect") return pos, ctx.Err() } @@ -615,7 +615,7 @@ func (bls *Streamer) appendInserts(statements []FullBinlogStatement, tce *tableC keyspaceIDCell, pkValues, err := writeValuesAsSQL(sql, tce, rows, i, tce.pkNames != nil) if err != nil { - log.Warningf("writeValuesAsSQL(%v) failed: %v", i, err) + log.Warn(fmt.Sprintf("writeValuesAsSQL(%v) failed: %v", i, err)) continue } @@ -625,7 +625,7 @@ func (bls *Streamer) appendInserts(statements []FullBinlogStatement, tce *tableC var err error ksid, err = tce.resolver.keyspaceID(keyspaceIDCell) if err != nil { - log.Warningf("resolver(%v) failed: %v", err) + log.Warn(fmt.Sprintf("resolver(%v) failed: %v", keyspaceIDCell, err)) } } @@ -651,14 +651,14 @@ func (bls *Streamer) appendUpdates(statements []FullBinlogStatement, tce *tableC keyspaceIDCell, pkValues, err := writeValuesAsSQL(sql, tce, rows, i, tce.pkNames != nil) if err != nil { - log.Warningf("writeValuesAsSQL(%v) failed: %v", i, err) + log.Warn(fmt.Sprintf("writeValuesAsSQL(%v) failed: %v", i, err)) continue } sql.WriteString(" WHERE ") if _, _, err := writeIdentifiersAsSQL(sql, tce, rows, i, false); err != nil { - log.Warningf("writeIdentifiesAsSQL(%v) failed: %v", i, err) + log.Warn(fmt.Sprintf("writeIdentifiesAsSQL(%v) failed: %v", i, err)) continue } @@ -668,7 +668,7 @@ func (bls *Streamer) appendUpdates(statements []FullBinlogStatement, tce *tableC var err error ksid, err = tce.resolver.keyspaceID(keyspaceIDCell) if err != nil { - log.Warningf("resolver(%v) failed: %v", err) + log.Warn(fmt.Sprintf("resolver(%v) failed: %v", keyspaceIDCell, err)) } } @@ -694,7 +694,7 @@ func (bls *Streamer) appendDeletes(statements []FullBinlogStatement, tce *tableC keyspaceIDCell, pkValues, err := writeIdentifiersAsSQL(sql, tce, rows, i, tce.pkNames != nil) if err != nil { - log.Warningf("writeIdentifiesAsSQL(%v) failed: %v", i, err) + log.Warn(fmt.Sprintf("writeIdentifiesAsSQL(%v) failed: %v", i, err)) continue } @@ -704,7 +704,7 @@ func (bls *Streamer) appendDeletes(statements []FullBinlogStatement, tce *tableC var err error ksid, err = tce.resolver.keyspaceID(keyspaceIDCell) if err != nil { - log.Warningf("resolver(%v) failed: %v", err) + log.Warn(fmt.Sprintf("resolver(%v) failed: %v", keyspaceIDCell, err)) } } diff --git a/go/vt/binlog/binlogplayer/binlog_player.go b/go/vt/binlog/binlogplayer/binlog_player.go index d4d029be912..cca75a0d850 100644 --- a/go/vt/binlog/binlogplayer/binlog_player.go +++ b/go/vt/binlog/binlogplayer/binlog_player.go @@ -261,12 +261,12 @@ func NewBinlogPlayerTables(dbClient DBClient, tablet *topodatapb.Tablet, tables // If a stop position was specified, and reached, the state is updated to "Stopped". func (blp *BinlogPlayer) ApplyBinlogEvents(ctx context.Context) error { if err := blp.setVReplicationState(binlogdatapb.VReplicationWorkflowState_Running, ""); err != nil { - log.Errorf("Error writing Running state: %v", err) + log.Error(fmt.Sprintf("Error writing Running state: %v", err)) } if err := blp.applyEvents(ctx); err != nil { if err := blp.setVReplicationState(binlogdatapb.VReplicationWorkflowState_Error, err.Error()); err != nil { - log.Errorf("Error writing stop state: %v", err) + log.Error(fmt.Sprintf("Error writing stop state: %v", err)) } return err } @@ -278,7 +278,7 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { // Read starting values for vreplication. settings, err := ReadVRSettings(blp.dbClient, blp.uid) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } @@ -293,27 +293,23 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { ) if err != nil { err := fmt.Errorf("failed to instantiate throttler: %v", err) - log.Error(err) + log.Error(fmt.Sprint(err)) return err } defer t.Close() // Log the mode of operation and when the player stops. if len(blp.tables) > 0 { - log.Infof("BinlogPlayer client %v for tables %v starting @ '%v', server: %v", - blp.uid, + log.Info(fmt.Sprintf("BinlogPlayer client %v for tables %v starting @ '%v', server: %v", blp.uid, blp.tables, blp.position, - blp.tablet, - ) + blp.tablet)) } else { - log.Infof("BinlogPlayer client %v for keyrange '%v-%v' starting @ '%v', server: %v", - blp.uid, + log.Info(fmt.Sprintf("BinlogPlayer client %v for keyrange '%v-%v' starting @ '%v', server: %v", blp.uid, hex.EncodeToString(blp.keyRange.GetStart()), hex.EncodeToString(blp.keyRange.GetEnd()), blp.position, - blp.tablet, - ) + blp.tablet)) } if !blp.stopPosition.IsZero() { switch { @@ -321,19 +317,19 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { msg := fmt.Sprintf("not starting BinlogPlayer, we're already at the desired position %v", blp.stopPosition) log.Info(msg) if err := blp.setVReplicationState(binlogdatapb.VReplicationWorkflowState_Stopped, msg); err != nil { - log.Errorf("Error writing stop state: %v", err) + log.Error(fmt.Sprintf("Error writing stop state: %v", err)) } return nil case blp.position.AtLeast(blp.stopPosition): msg := fmt.Sprintf("starting point %v greater than stopping point %v", blp.position, blp.stopPosition) log.Error(msg) if err := blp.setVReplicationState(binlogdatapb.VReplicationWorkflowState_Stopped, msg); err != nil { - log.Errorf("Error writing stop state: %v", err) + log.Error(fmt.Sprintf("Error writing stop state: %v", err)) } // Don't return an error. Otherwise, it will keep retrying. return nil default: - log.Infof("Will stop player when reaching %v", blp.stopPosition) + log.Info(fmt.Sprintf("Will stop player when reaching %v", blp.stopPosition)) } } @@ -345,7 +341,7 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { err = blplClient.Dial(ctx, blp.tablet) if err != nil { err := fmt.Errorf("error dialing binlog server: %v", err) - log.Error(err) + log.Error(fmt.Sprint(err)) return err } defer blplClient.Close() @@ -358,7 +354,7 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { if err != nil { return fmt.Errorf("can't get charset to request binlog stream: %v", err) } - log.Infof("original charset: %v", blp.defaultCharset) + log.Info(fmt.Sprintf("original charset: %v", blp.defaultCharset)) blp.currentCharset = blp.defaultCharset // Restore original charset when we're done. defer func() { @@ -367,9 +363,9 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { if dbClient.dbConn == nil { return } - log.Infof("restoring original charset %v", blp.defaultCharset) + log.Info(fmt.Sprintf("restoring original charset %v", blp.defaultCharset)) if csErr := mysql.SetCharset(dbClient.dbConn, blp.defaultCharset); csErr != nil { - log.Errorf("can't restore original charset %v: %v", blp.defaultCharset, csErr) + log.Error(fmt.Sprintf("can't restore original charset %v: %v", blp.defaultCharset, csErr)) } }() } @@ -382,7 +378,7 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { } if err != nil { err := fmt.Errorf("error sending streaming query to binlog server: %v", err) - log.Error(err) + log.Error(fmt.Sprint(err)) return err } @@ -417,9 +413,9 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { for { ok, err = blp.processTransaction(response) if err != nil { - log.Infof("transaction failed: %v", err) + log.Info(fmt.Sprintf("transaction failed: %v", err)) for _, stmt := range response.Statements { - log.Infof("statement: %q", stmt.Sql) + log.Info(fmt.Sprintf("statement: %q", stmt.Sql)) } return fmt.Errorf("error in processing binlog event %v", err) } @@ -429,14 +425,14 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { msg := "Reached stopping position, done playing logs" log.Info(msg) if err := blp.setVReplicationState(binlogdatapb.VReplicationWorkflowState_Stopped, msg); err != nil { - log.Errorf("Error writing stop state: %v", err) + log.Error(fmt.Sprintf("Error writing stop state: %v", err)) } return nil } } break } - log.Infof("Retrying txn in %v.", blp.deadlockRetry) + log.Info(fmt.Sprintf("Retrying txn in %v.", blp.deadlockRetry)) time.Sleep(blp.deadlockRetry) } } @@ -463,7 +459,7 @@ func (blp *BinlogPlayer) processTransaction(tx *binlogdatapb.BinlogTransaction) // needed during event playback. Here we also adjust so that playback // proceeds, but in Vitess-land this usually means a misconfigured // server or a misbehaving client, so we spam the logs with warnings. - log.Warningf("BinlogPlayer changing charset from %v to %v for statement %d in transaction %v", blp.currentCharset, stmtCharset, i, tx) + log.Warn(fmt.Sprintf("BinlogPlayer changing charset from %v to %v for statement %d in transaction %v", blp.currentCharset, stmtCharset, i, tx)) err = mysql.SetCharset(dbClient.dbConn, stmtCharset) if err != nil { return false, fmt.Errorf("can't set charset for statement %d in transaction %v: %v", i, tx, err) @@ -476,7 +472,7 @@ func (blp *BinlogPlayer) processTransaction(tx *binlogdatapb.BinlogTransaction) } if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERLockDeadlock { // Deadlock: ask for retry - log.Infof("Deadlock: %v", err) + log.Info(fmt.Sprintf("Deadlock: %v", err)) if err = blp.dbClient.Rollback(); err != nil { return false, err } @@ -503,7 +499,7 @@ func (blp *BinlogPlayer) exec(sql string) (*sqltypes.Result, error) { qr, err := blp.dbClient.ExecuteFetch(sql, 0) blp.blplStats.Timings.Record(BlplQuery, queryStartTime) if d := time.Since(queryStartTime); d > SlowQueryThreshold { - log.Infof("SLOW QUERY (took %.2fs) '%s'", d.Seconds(), sql) + log.Info(fmt.Sprintf("SLOW QUERY (took %.2fs) '%s'", d.Seconds(), sql)) } return qr, err } @@ -819,13 +815,13 @@ func SetProtocol(name string, protocol string) (reset func()) { case nil: reset = func() { SetProtocol(name, oldVal) } default: - log.Errorf("failed to get string value for flag %q: %v", binlogPlayerProtocolFlagName, err) + log.Error(fmt.Sprintf("failed to get string value for flag %q: %v", binlogPlayerProtocolFlagName, err)) reset = func() {} } if err := pflag.Set(binlogPlayerProtocolFlagName, protocol); err != nil { msg := "failed to set flag %q to %q: %v" - log.Errorf(msg, binlogPlayerProtocolFlagName, protocol, err) + log.Error(fmt.Sprintf(msg, binlogPlayerProtocolFlagName, protocol, err)) reset = func() {} } diff --git a/go/vt/binlog/binlogplayer/client.go b/go/vt/binlog/binlogplayer/client.go index c046999af5d..aa1a3818893 100644 --- a/go/vt/binlog/binlogplayer/client.go +++ b/go/vt/binlog/binlogplayer/client.go @@ -18,6 +18,8 @@ package binlogplayer import ( "context" + "fmt" + "os" "github.com/spf13/pflag" @@ -76,7 +78,8 @@ var clientFactories = make(map[string]ClientFactory) // RegisterClientFactory adds a new factory. Call during init(). func RegisterClientFactory(name string, factory ClientFactory) { if _, ok := clientFactories[name]; ok { - log.Fatalf("ClientFactory %s already exists", name) + log.Error(fmt.Sprintf("ClientFactory %s already exists", name)) + os.Exit(1) } clientFactories[name] = factory } diff --git a/go/vt/binlog/binlogplayer/dbclient.go b/go/vt/binlog/binlogplayer/dbclient.go index 4cbfd962528..609de3fe86f 100644 --- a/go/vt/binlog/binlogplayer/dbclient.go +++ b/go/vt/binlog/binlogplayer/dbclient.go @@ -136,7 +136,7 @@ func (dc *dbClientImpl) SupportsCapability(capability capabilities.FlavorCapabil // LogError logs a message after truncating it to avoid spamming logs func LogError(msg string, err error) { - log.Errorf("%s: %s", msg, MessageTruncate(err.Error())) + log.Error(fmt.Sprintf("%s: %s", msg, MessageTruncate(err.Error()))) } // LimitString truncates string to specified size diff --git a/go/vt/binlog/keyrange_filter.go b/go/vt/binlog/keyrange_filter.go index 1bca87106ba..91cb52a61f6 100644 --- a/go/vt/binlog/keyrange_filter.go +++ b/go/vt/binlog/keyrange_filter.go @@ -40,7 +40,7 @@ func keyRangeFilterFunc(keyrange *topodatapb.KeyRange, callback func(*binlogdata case binlogdatapb.BinlogTransaction_Statement_BL_SET: filtered = append(filtered, statement.Statement) case binlogdatapb.BinlogTransaction_Statement_BL_DDL: - log.Warningf("Not forwarding DDL: %s", statement.Statement.Sql) + log.Warn(fmt.Sprintf("Not forwarding DDL: %s", statement.Statement.Sql)) continue case binlogdatapb.BinlogTransaction_Statement_BL_INSERT, binlogdatapb.BinlogTransaction_Statement_BL_UPDATE, @@ -56,7 +56,7 @@ func keyRangeFilterFunc(keyrange *topodatapb.KeyRange, callback func(*binlogdata matched = true case binlogdatapb.BinlogTransaction_Statement_BL_UNRECOGNIZED: updateStreamErrors.Add("KeyRangeStream", 1) - log.Errorf("Error parsing keyspace id: %s", statement.Statement.Sql) + log.Error(fmt.Sprintf("Error parsing keyspace id: %s", statement.Statement.Sql)) continue } } diff --git a/go/vt/binlog/tables_filter.go b/go/vt/binlog/tables_filter.go index c5a160a4974..3702249c0d1 100644 --- a/go/vt/binlog/tables_filter.go +++ b/go/vt/binlog/tables_filter.go @@ -17,6 +17,7 @@ limitations under the License. package binlog import ( + "fmt" "slices" "strings" @@ -44,7 +45,7 @@ func tablesFilterFunc(tables []string, callback func(*binlogdatapb.BinlogTransac case binlogdatapb.BinlogTransaction_Statement_BL_SET: filtered = append(filtered, statement.Statement) case binlogdatapb.BinlogTransaction_Statement_BL_DDL: - log.Warningf("Not forwarding DDL: %s", statement.Statement.Sql) + log.Warn(fmt.Sprintf("Not forwarding DDL: %s", statement.Statement.Sql)) continue case binlogdatapb.BinlogTransaction_Statement_BL_INSERT, binlogdatapb.BinlogTransaction_Statement_BL_UPDATE, @@ -58,14 +59,14 @@ func tablesFilterFunc(tables []string, callback func(*binlogdatapb.BinlogTransac tableIndex := strings.LastIndex(sql, streamComment) if tableIndex == -1 { updateStreamErrors.Add("TablesStream", 1) - log.Errorf("Error parsing table name: %s", sql) + log.Error("Error parsing table name: " + sql) continue } tableStart := tableIndex + len(streamComment) tableEnd := strings.Index(sql[tableStart:], space) if tableEnd == -1 { updateStreamErrors.Add("TablesStream", 1) - log.Errorf("Error parsing table name: %s", sql) + log.Error("Error parsing table name: " + sql) continue } tableName = sql[tableStart : tableStart+tableEnd] @@ -76,7 +77,7 @@ func tablesFilterFunc(tables []string, callback func(*binlogdatapb.BinlogTransac } case binlogdatapb.BinlogTransaction_Statement_BL_UNRECOGNIZED: updateStreamErrors.Add("TablesStream", 1) - log.Errorf("Error parsing table name: %s", string(statement.Statement.Sql)) + log.Error("Error parsing table name: " + string(statement.Statement.Sql)) continue } } diff --git a/go/vt/binlog/updatestreamctl.go b/go/vt/binlog/updatestreamctl.go index c2c453712cd..d2154215390 100644 --- a/go/vt/binlog/updatestreamctl.go +++ b/go/vt/binlog/updatestreamctl.go @@ -172,7 +172,7 @@ func (updateStream *UpdateStreamImpl) RegisterService() { func logError() { if x := recover(); x != nil { - log.Errorf("%s at\n%s", x.(error).Error(), tb.Stack(4)) + log.Error(fmt.Sprintf("%s at\n%s", x.(error).Error(), tb.Stack(4))) } } @@ -187,7 +187,7 @@ func (updateStream *UpdateStreamImpl) Enable() { updateStream.state.Store(usEnabled) updateStream.streams.Init() - log.Infof("Enabling update stream, dbname: %s", updateStream.cp.DBName()) + log.Info("Enabling update stream, dbname: " + updateStream.cp.DBName()) } // Disable will disallow any connection to the service @@ -202,7 +202,7 @@ func (updateStream *UpdateStreamImpl) Disable() { updateStream.state.Store(usDisabled) updateStream.streams.Stop() updateStream.stateWaitGroup.Wait() - log.Infof("Update Stream Disabled") + log.Info("Update Stream Disabled") } // IsEnabled returns true if UpdateStreamImpl is enabled @@ -220,7 +220,7 @@ func (updateStream *UpdateStreamImpl) StreamKeyRange(ctx context.Context, positi updateStream.actionLock.Lock() if !updateStream.IsEnabled() { updateStream.actionLock.Unlock() - log.Errorf("Unable to serve client request: Update stream service is not enabled") + log.Error("Unable to serve client request: Update stream service is not enabled") return errors.New("update stream service is not enabled") } updateStream.stateWaitGroup.Add(1) @@ -229,7 +229,7 @@ func (updateStream *UpdateStreamImpl) StreamKeyRange(ctx context.Context, positi streamCount.Add("KeyRange", 1) defer streamCount.Add("KeyRange", -1) - log.Infof("ServeUpdateStream starting @ %#v", pos) + log.Info(fmt.Sprintf("ServeUpdateStream starting @ %#v", pos)) // Calls cascade like this: binlog.Streamer->keyRangeFilterFunc->func(*binlogdatapb.BinlogTransaction)->callback f := keyRangeFilterFunc(keyRange, func(trans *binlogdatapb.BinlogTransaction) error { @@ -260,7 +260,7 @@ func (updateStream *UpdateStreamImpl) StreamTables(ctx context.Context, position updateStream.actionLock.Lock() if !updateStream.IsEnabled() { updateStream.actionLock.Unlock() - log.Errorf("Unable to serve client request: Update stream service is not enabled") + log.Error("Unable to serve client request: Update stream service is not enabled") return errors.New("update stream service is not enabled") } updateStream.stateWaitGroup.Add(1) @@ -269,7 +269,7 @@ func (updateStream *UpdateStreamImpl) StreamTables(ctx context.Context, position streamCount.Add("Tables", 1) defer streamCount.Add("Tables", -1) - log.Infof("ServeUpdateStream starting @ %#v", pos) + log.Info(fmt.Sprintf("ServeUpdateStream starting @ %#v", pos)) // Calls cascade like this: binlog.Streamer->tablesFilterFunc->func(*binlogdatapb.BinlogTransaction)->callback f := tablesFilterFunc(tables, func(trans *binlogdatapb.BinlogTransaction) error { @@ -289,7 +289,7 @@ func (updateStream *UpdateStreamImpl) StreamTables(ctx context.Context, position // HandlePanic is part of the UpdateStream interface func (updateStream *UpdateStreamImpl) HandlePanic(err *error) { if x := recover(); x != nil { - log.Errorf("Uncaught panic:\n%v\n%s", x, tb.Stack(4)) + log.Error(fmt.Sprintf("Uncaught panic:\n%v\n%s", x, tb.Stack(4))) *err = fmt.Errorf("uncaught panic: %v", x) } } diff --git a/go/vt/concurrency/error_recorder.go b/go/vt/concurrency/error_recorder.go index 7daf1a2debd..d6390d1d3c2 100644 --- a/go/vt/concurrency/error_recorder.go +++ b/go/vt/concurrency/error_recorder.go @@ -54,7 +54,7 @@ func (fer *FirstErrorRecorder) RecordError(err error) { if fer.errorCount == 1 { fer.firstError = err } else { - log.Errorf("FirstErrorRecorder: error[%v]: %v", fer.errorCount, err) + log.Error(fmt.Sprintf("FirstErrorRecorder: error[%v]: %v", fer.errorCount, err)) } fer.mu.Unlock() } diff --git a/go/vt/dbconfigs/credentials.go b/go/vt/dbconfigs/credentials.go index ecc094b9cc0..3735630f90f 100644 --- a/go/vt/dbconfigs/credentials.go +++ b/go/vt/dbconfigs/credentials.go @@ -24,6 +24,7 @@ package dbconfigs import ( "encoding/json" "errors" + "fmt" "os" "os/signal" "strings" @@ -129,7 +130,8 @@ func init() { func GetCredentialsServer() CredentialsServer { cs, ok := AllCredentialsServers[dbCredentialsServer] if !ok { - log.Exitf("Invalid credential server: %v", dbCredentialsServer) + log.Error(fmt.Sprintf("Invalid credential server: %v", dbCredentialsServer)) + os.Exit(1) } return cs } @@ -174,12 +176,12 @@ func (fcs *FileCredentialsServer) GetUserAndPassword(user string) (string, strin data, err := os.ReadFile(dbCredentialsFile) if err != nil { - log.Warningf("Failed to read dbCredentials file: %v", dbCredentialsFile) + log.Warn(fmt.Sprintf("Failed to read dbCredentials file: %v", dbCredentialsFile)) return "", "", err } if err = json.Unmarshal(data, &fcs.dbCredentials); err != nil { - log.Warningf("Failed to parse dbCredentials file: %v", dbCredentialsFile) + log.Warn(fmt.Sprintf("Failed to parse dbCredentials file: %v", dbCredentialsFile)) return "", "", err } } @@ -209,7 +211,7 @@ func (vcs *VaultCredentialsServer) GetUserAndPassword(user string) (string, stri if vcs.cacheValid && vcs.dbCredsCache != nil { if vcs.dbCredsCache[user] == nil { - log.Errorf("Vault cache is valid, but user %s unknown in cache, will retry", user) + log.Error(fmt.Sprintf("Vault cache is valid, but user %s unknown in cache, will retry", user)) return "", "", ErrUnknownUser } return user, vcs.dbCredsCache[user][0], nil @@ -265,7 +267,7 @@ func (vcs *VaultCredentialsServer) GetUserAndPassword(user string) (string, stri var err error vcs.vaultClient, err = vaultapi.NewClient(config) if err != nil || vcs.vaultClient == nil { - log.Errorf("Error in vault client initialization, will retry: %v", err) + log.Error(fmt.Sprintf("Error in vault client initialization, will retry: %v", err)) vcs.vaultClient = nil return "", "", ErrUnknownUser } @@ -273,25 +275,25 @@ func (vcs *VaultCredentialsServer) GetUserAndPassword(user string) (string, stri secret, err := vcs.vaultClient.GetSecret(vaultPath) if err != nil { - log.Errorf("Error in Vault server params: %v", err) + log.Error(fmt.Sprintf("Error in Vault server params: %v", err)) return "", "", ErrUnknownUser } if secret.JSONSecret == nil { - log.Errorf("Empty DB credentials retrieved from Vault server") + log.Error("Empty DB credentials retrieved from Vault server") return "", "", ErrUnknownUser } dbCreds := make(map[string][]string) if err = json.Unmarshal(secret.JSONSecret, &dbCreds); err != nil { - log.Errorf("Error unmarshaling DB credentials from Vault server") + log.Error("Error unmarshaling DB credentials from Vault server") return "", "", ErrUnknownUser } if dbCreds[user] == nil { - log.Warningf("Vault lookup for user not found: %v\n", user) + log.Warn(fmt.Sprintf("Vault lookup for user not found: %v\n", user)) return "", "", ErrUnknownUser } - log.Infof("Vault client status: %s", vcs.vaultClient.GetStatus()) + log.Info("Vault client status: " + vcs.vaultClient.GetStatus()) vcs.dbCredsCache = dbCreds vcs.cacheValid = true diff --git a/go/vt/dbconfigs/dbconfigs.go b/go/vt/dbconfigs/dbconfigs.go index 240556e3903..031b49e33d2 100644 --- a/go/vt/dbconfigs/dbconfigs.go +++ b/go/vt/dbconfigs/dbconfigs.go @@ -23,6 +23,8 @@ package dbconfigs import ( "context" "encoding/json" + "fmt" + "os" "github.com/spf13/pflag" @@ -354,7 +356,7 @@ func (dbcfgs *DBConfigs) InitWithSocket(defaultSocketFile string, collationEnv * if dbcfgs.Charset != "" && cp.Charset == collations.Unknown { ch, err := collationEnv.ParseConnectionCharset(dbcfgs.Charset) if err != nil { - log.Warningf("Error parsing charset %s: %v", dbcfgs.Charset, err) + log.Warn(fmt.Sprintf("Error parsing charset %s: %v", dbcfgs.Charset, err)) ch = collationEnv.DefaultConnectionCharset() } cp.Charset = ch @@ -382,7 +384,7 @@ func (dbcfgs *DBConfigs) InitWithSocket(defaultSocketFile string, collationEnv * } } - log.Infof("DBConfigs: %v\n", dbcfgs.String()) + log.Info(fmt.Sprintf("DBConfigs: %v\n", dbcfgs.String())) } func (dbcfgs *DBConfigs) getParams(userKey string) (*UserConfig, *mysql.ConnParams) { @@ -414,7 +416,8 @@ func (dbcfgs *DBConfigs) getParams(userKey string) (*UserConfig, *mysql.ConnPara uc = &dbcfgs.CloneUser cp = &dbcfgs.cloneParams default: - log.Exitf("Invalid db user key requested: %s", userKey) + log.Error("Invalid db user key requested: " + userKey) + os.Exit(1) } return uc, cp } diff --git a/go/vt/discovery/healthcheck.go b/go/vt/discovery/healthcheck.go index ee5e2fe1cad..4e012ce33ea 100644 --- a/go/vt/discovery/healthcheck.go +++ b/go/vt/discovery/healthcheck.go @@ -160,7 +160,8 @@ func ParseTabletURLTemplateFromFlag() { tabletURLTemplate = template.New("") _, err := tabletURLTemplate.ParseFromTrustedTemplate(uncheckedconversions.TrustedTemplateFromStringKnownToSatisfyTypeContract(TabletURLTemplateString)) if err != nil { - log.Exitf("error parsing template: %v", err) + log.Error(fmt.Sprintf("error parsing template: %v", err)) + os.Exit(1) } } diff --git a/go/vt/discovery/keyspace_events.go b/go/vt/discovery/keyspace_events.go index c66a1c0030f..fafad17e4cc 100644 --- a/go/vt/discovery/keyspace_events.go +++ b/go/vt/discovery/keyspace_events.go @@ -98,7 +98,7 @@ func NewKeyspaceEventWatcher(ctx context.Context, topoServer srvtopo.Server, hc subs: make(map[chan *KeyspaceEvent]struct{}), } kew.run(ctx) - log.Infof("started watching keyspace events in %q", localCell) + log.Info(fmt.Sprintf("started watching keyspace events in %q", localCell)) return kew } @@ -245,7 +245,7 @@ func (kew *KeyspaceEventWatcher) run(ctx context.Context) { // Seed the keyspace statuses once at startup keyspaces, err := kew.ts.GetSrvKeyspaceNames(ctx, kew.localCell, true) if err != nil { - log.Errorf("CEM: initialize failed for cell %q: %v", kew.localCell, err) + log.Error(fmt.Sprintf("CEM: initialize failed for cell %q: %v", kew.localCell, err)) return } for _, ks := range keyspaces { @@ -319,7 +319,7 @@ func (kss *keyspaceState) ensureConsistentLocked() { // watcher. this means the ongoing availability event has been resolved, so we can broadcast // a resolution event to all listeners kss.consistent = true - log.Infof("keyspace %s is now consistent", kss.keyspace) + log.Info(fmt.Sprintf("keyspace %s is now consistent", kss.keyspace)) kss.moveTablesState = nil @@ -330,10 +330,9 @@ func (kss *keyspaceState) ensureConsistentLocked() { Serving: sstate.serving, }) - log.V(2).Infof("keyspace event resolved: %s is now consistent (serving: %t)", + log.Debug(fmt.Sprintf("keyspace event resolved: %s is now consistent (serving: %t)", topoproto.KeyspaceShardString(sstate.target.Keyspace, sstate.target.Shard), - sstate.serving, - ) + sstate.serving)) if !sstate.serving { delete(kss.shards, shard) @@ -529,7 +528,7 @@ func (kss *keyspaceState) getMoveTablesStatus(vs *vschemapb.SrvVSchema) (*MoveTa break } } - log.Infof("getMoveTablesStatus: keyspace %s declaring partial move tables %s", kss.keyspace, mtState.String()) + log.Info(fmt.Sprintf("getMoveTablesStatus: keyspace %s declaring partial move tables %s", kss.keyspace, mtState.String())) return mtState, nil } @@ -543,10 +542,10 @@ func (kss *keyspaceState) getMoveTablesStatus(vs *vschemapb.SrvVSchema) (*MoveTa // If a rule exists for the table and points to the target keyspace, writes have been switched. if ok && len(r) > 0 && r[0] != fmt.Sprintf("%s.%s", kss.keyspace, oneDeniedTable) { mtState.State = MoveTablesSwitched - log.Infof("onSrvKeyspace:: keyspace %s writes have been switched for table %s, rule %v", kss.keyspace, oneDeniedTable, r[0]) + log.Info(fmt.Sprintf("onSrvKeyspace:: keyspace %s writes have been switched for table %s, rule %v", kss.keyspace, oneDeniedTable, r[0])) } } - log.Infof("getMoveTablesStatus: keyspace %s declaring regular move tables %s", kss.keyspace, mtState.String()) + log.Info(fmt.Sprintf("getMoveTablesStatus: keyspace %s declaring regular move tables %s", kss.keyspace, mtState.String())) return mtState, nil } @@ -564,7 +563,7 @@ func (kss *keyspaceState) onSrvKeyspace(newKeyspace *topodatapb.SrvKeyspace, new // to keep watching for events in this keyspace. if topo.IsErrType(newError, topo.NoNode) { kss.deleted = true - log.Infof("keyspace %q deleted", kss.keyspace) + log.Info(fmt.Sprintf("keyspace %q deleted", kss.keyspace)) return false } @@ -573,7 +572,7 @@ func (kss *keyspaceState) onSrvKeyspace(newKeyspace *topodatapb.SrvKeyspace, new // topology events. if newError != nil { kss.lastError = newError - log.Errorf("error while watching keyspace %q: %v", kss.keyspace, newError) + log.Error(fmt.Sprintf("error while watching keyspace %q: %v", kss.keyspace, newError)) return true } @@ -629,7 +628,7 @@ func (kss *keyspaceState) onSrvVSchema(vs *vschemapb.SrvVSchema, err error) bool defer kss.mu.Unlock() var kerr error if kss.moveTablesState, kerr = kss.getMoveTablesStatus(vs); err != nil { - log.Errorf("onSrvVSchema: keyspace %s failed to get move tables status: %v", kss.keyspace, kerr) + log.Error(fmt.Sprintf("onSrvVSchema: keyspace %s failed to get move tables status: %v", kss.keyspace, kerr)) } if kss.moveTablesState != nil && kss.moveTablesState.Typ != MoveTablesNone { // Mark the keyspace as inconsistent. ensureConsistentLocked() checks if the workflow is @@ -645,7 +644,7 @@ func (kss *keyspaceState) onSrvVSchema(vs *vschemapb.SrvVSchema, err error) bool // in this keyspace, and starts up a SrvKeyspace watcher on our topology server which will update // our keyspaceState with any topology changes in real time. func newKeyspaceState(ctx context.Context, kew *KeyspaceEventWatcher, cell, keyspace string) *keyspaceState { - log.Infof("created dedicated watcher for keyspace %s/%s", cell, keyspace) + log.Info(fmt.Sprintf("created dedicated watcher for keyspace %s/%s", cell, keyspace)) kss := &keyspaceState{ kew: kew, keyspace: keyspace, @@ -808,7 +807,7 @@ func (kew *KeyspaceEventWatcher) WaitForConsistentKeyspaces(ctx context.Context, case <-ctx.Done(): for _, ks := range keyspaces { if ks != "" { - log.Infof("keyspace %v didn't become consistent", ks) + log.Info(fmt.Sprintf("keyspace %v didn't become consistent", ks)) } } return ctx.Err() diff --git a/go/vt/discovery/tablet_picker.go b/go/vt/discovery/tablet_picker.go index 37ab0f097bf..148c158d350 100644 --- a/go/vt/discovery/tablet_picker.go +++ b/go/vt/discovery/tablet_picker.go @@ -18,6 +18,7 @@ package discovery import ( "context" + "fmt" "io" "math/rand/v2" "sort" @@ -356,8 +357,7 @@ func (tp *TabletPicker) PickForStreaming(ctx context.Context) (*topodatapb.Table if len(candidates) == 0 { // If no viable candidates were found, sleep and try again. tp.incNoTabletFoundStat() - log.Infof("No healthy serving tablet found for streaming, shard %s.%s, cells %v, tabletTypes %v, maxReplicationLag: %v, sleeping for %.3f seconds.", - tp.keyspace, tp.shard, tp.cells, tp.tabletTypes, tp.options.ExcludeTabletsWithMaxReplicationLag, float64(GetTabletPickerRetryDelay().Milliseconds())/1000.0) + log.Info(fmt.Sprintf("No healthy serving tablet found for streaming, shard %s.%s, cells %v, tabletTypes %v, maxReplicationLag: %v, sleeping for %.3f seconds.", tp.keyspace, tp.shard, tp.cells, tp.tabletTypes, tp.options.ExcludeTabletsWithMaxReplicationLag, float64(GetTabletPickerRetryDelay().Milliseconds())/1000.0)) timer := time.NewTimer(GetTabletPickerRetryDelay()) select { case <-ctx.Done(): @@ -367,7 +367,7 @@ func (tp *TabletPicker) PickForStreaming(ctx context.Context) (*topodatapb.Table } continue } - log.Infof("Tablet picker found a healthy tablet for streaming: %s", candidates[0].Tablet.String()) + log.Info("Tablet picker found a healthy tablet for streaming: " + candidates[0].Tablet.String()) return candidates[0].Tablet, nil } } @@ -384,7 +384,7 @@ func (tp *TabletPicker) GetMatchingTablets(ctx context.Context) []*topo.TabletIn defer cancel() si, err := tp.ts.GetShard(shortCtx, tp.keyspace, tp.shard) if err != nil { - log.Errorf("Error getting shard %s/%s: %v", tp.keyspace, tp.shard, err) + log.Error(fmt.Sprintf("Error getting shard %s/%s: %v", tp.keyspace, tp.shard, err)) return nil } @@ -413,7 +413,7 @@ func (tp *TabletPicker) GetMatchingTablets(ctx context.Context) []*topo.TabletIn if err == nil { actualCells = append(actualCells, alias.Cells...) } else { - log.Infof("Unable to resolve cell %s, ignoring", cell) + log.Info(fmt.Sprintf("Unable to resolve cell %s, ignoring", cell)) } } else { // Valid cell, add it to our list. @@ -448,7 +448,7 @@ func (tp *TabletPicker) GetMatchingTablets(ctx context.Context) []*topo.TabletIn defer cancel() tabletMap, err := tp.ts.GetTabletMap(shortCtx, aliases, nil) if err != nil { - log.Warningf("Error fetching tablets from topo: %v", err) + log.Warn(fmt.Sprintf("Error fetching tablets from topo: %v", err)) // If we get a partial result we can still use it, otherwise return. if len(tabletMap) == 0 { return nil @@ -461,7 +461,7 @@ func (tp *TabletPicker) GetMatchingTablets(ctx context.Context) []*topo.TabletIn if !ok { // Either tablet disappeared on us, or we got a partial result // (GetTabletMap ignores topo.ErrNoNode); just log a warning. - log.Warningf("Tablet picker failed to load tablet %v", tabletAlias) + log.Warn(fmt.Sprintf("Tablet picker failed to load tablet %v", tabletAlias)) } else if topoproto.IsTypeInList(tabletInfo.Type, tp.tabletTypes) { // Try to connect to the tablet and confirm that it's usable. if conn, err := tabletconn.GetDialer()(ctx, tabletInfo.Tablet, grpcclient.FailFast(true)); err == nil { diff --git a/go/vt/external/golib/sqlutils/sqlutils.go b/go/vt/external/golib/sqlutils/sqlutils.go index bdd34d7a116..d1f1ebb8881 100644 --- a/go/vt/external/golib/sqlutils/sqlutils.go +++ b/go/vt/external/golib/sqlutils/sqlutils.go @@ -235,7 +235,7 @@ func QueryRowsMap(db *sql.DB, query string, on_row func(RowMap) error, args ...a defer rows.Close() } if err != nil && err != sql.ErrNoRows { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } err = ScanRowsToMaps(rows, on_row) @@ -252,7 +252,7 @@ func ExecNoPrepare(db *sql.DB, query string, args ...any) (res sql.Result, err e res, err = db.Exec(query, args...) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } return res, err } diff --git a/go/vt/grpcclient/client.go b/go/vt/grpcclient/client.go index 0a39656dbe3..95df7246930 100644 --- a/go/vt/grpcclient/client.go +++ b/go/vt/grpcclient/client.go @@ -21,7 +21,9 @@ package grpcclient import ( "context" "crypto/tls" + "fmt" "net" + "os" "sync" "time" @@ -152,7 +154,8 @@ func DialContext(ctx context.Context, target string, failFast FailFast, opts ... for _, grpcDialOptionInitializer := range grpcDialOptions { newopts, err = grpcDialOptionInitializer(newopts) if err != nil { - log.Fatalf("There was an error initializing client grpc.DialOption: %v", err) + log.Error(fmt.Sprintf("There was an error initializing client grpc.DialOption: %v", err)) + os.Exit(1) } } grpcDialOptionsMu.Unlock() diff --git a/go/vt/grpcclient/glogger.go b/go/vt/grpcclient/glogger.go index 404a491de2e..4110505d2a1 100644 --- a/go/vt/grpcclient/glogger.go +++ b/go/vt/grpcclient/glogger.go @@ -18,6 +18,8 @@ package grpcclient import ( "fmt" + "log/slog" + "os" "google.golang.org/grpc/grpclog" @@ -44,19 +46,19 @@ func (g *glogger) Infof(format string, args ...any) { } func (g *glogger) Warning(args ...any) { - log.WarningDepth(2, args...) + log.WarnDepth(2, fmt.Sprint(args...)) } func (g *glogger) Warningln(args ...any) { - log.WarningDepth(2, fmt.Sprintln(args...)) + log.WarnDepth(2, fmt.Sprintln(args...)) } func (g *glogger) Warningf(format string, args ...any) { - log.WarningDepth(2, fmt.Sprintf(format, args...)) + log.WarnDepth(2, fmt.Sprintf(format, args...)) } func (g *glogger) Error(args ...any) { - log.ErrorDepth(2, args...) + log.ErrorDepth(2, fmt.Sprint(args...)) } func (g *glogger) Errorln(args ...any) { @@ -68,17 +70,20 @@ func (g *glogger) Errorf(format string, args ...any) { } func (g *glogger) Fatal(args ...any) { - log.FatalDepth(2, args...) + log.ErrorDepth(2, fmt.Sprint(args...)) + os.Exit(1) } func (g *glogger) Fatalln(args ...any) { - log.FatalDepth(2, fmt.Sprintln(args...)) + log.ErrorDepth(2, fmt.Sprintln(args...)) + os.Exit(1) } func (g *glogger) Fatalf(format string, args ...any) { - log.FatalDepth(2, fmt.Sprintf(format, args...)) + log.ErrorDepth(2, fmt.Sprintf(format, args...)) + os.Exit(1) } func (g *glogger) V(l int) bool { - return bool(log.V(log.Level(l))) + return log.Enabled(slog.Level(l)) } diff --git a/go/vt/hook/hook.go b/go/vt/hook/hook.go index 38c35c24bc7..dd96bc7e5c5 100644 --- a/go/vt/hook/hook.go +++ b/go/vt/hook/hook.go @@ -121,7 +121,7 @@ func (hook *Hook) findHook(ctx context.Context) (*exec.Cmd, int, error) { } // Configure the command. - log.Infof("hook: executing hook: %v %v", vthook, strings.Join(hook.Parameters, " ")) + log.Info(fmt.Sprintf("hook: executing hook: %v %v", vthook, strings.Join(hook.Parameters, " "))) cmd := exec.CommandContext(ctx, vthook, hook.Parameters...) if len(hook.ExtraEnv) > 0 { cmd.Env = os.Environ() @@ -158,7 +158,7 @@ func (hook *Hook) ExecuteContext(ctx context.Context) (result *HookResult) { result.Stderr = stderr.String() defer func() { - log.Infof("hook: result is %v", result.String()) + log.Info(fmt.Sprintf("hook: result is %v", result.String())) }() if err == nil { @@ -203,9 +203,9 @@ func (hook *Hook) ExecuteOptional() error { hr := hook.Execute() switch hr.ExitStatus { case HOOK_DOES_NOT_EXIST: - log.Infof("%v hook doesn't exist", hook.Name) + log.Info(fmt.Sprintf("%v hook doesn't exist", hook.Name)) case HOOK_VTROOT_ERROR: - log.Infof("VTROOT not set, so %v hook doesn't exist", hook.Name) + log.Info(fmt.Sprintf("VTROOT not set, so %v hook doesn't exist", hook.Name)) case HOOK_SUCCESS: // nothing to do here default: diff --git a/go/vt/log/capture.go b/go/vt/log/capture.go new file mode 100644 index 00000000000..f360a482e27 --- /dev/null +++ b/go/vt/log/capture.go @@ -0,0 +1,90 @@ +/* +Copyright 2026 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "context" + "log/slog" + "sync" +) + +// CaptureHandler records slog records in memory. Used for testing. +type CaptureHandler struct { + mu sync.Mutex + + records []slog.Record +} + +// NewCaptureHandler returns a CaptureHandler that records all log output. +func NewCaptureHandler() *CaptureHandler { + return &CaptureHandler{} +} + +// Enabled implements [slog.Handler]. +func (h *CaptureHandler) Enabled(ctx context.Context, level slog.Level) bool { + return true +} + +// Handle implements [slog.Handler]. +func (h *CaptureHandler) Handle(ctx context.Context, record slog.Record) error { + h.mu.Lock() + defer h.mu.Unlock() + + h.records = append(h.records, record.Clone()) + return nil +} + +// WithAttrs implements [slog.Handler]. +func (h *CaptureHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + return h +} + +// WithGroup implements [slog.Handler]. +func (h *CaptureHandler) WithGroup(name string) slog.Handler { + return h +} + +// Records returns a copy of the captured records. +func (h *CaptureHandler) Records() []slog.Record { + h.mu.Lock() + defer h.mu.Unlock() + + records := make([]slog.Record, len(h.records)) + copy(records, h.records) + + return records +} + +// Last returns the most recent captured record. +func (h *CaptureHandler) Last() (slog.Record, bool) { + h.mu.Lock() + defer h.mu.Unlock() + + if len(h.records) == 0 { + return slog.Record{}, false + } + + return h.records[len(h.records)-1], true +} + +// Reset clears all captured records. +func (h *CaptureHandler) Reset() { + h.mu.Lock() + defer h.mu.Unlock() + + h.records = nil +} diff --git a/go/vt/log/log.go b/go/vt/log/log.go index cd8f1a35aa1..5a1d8572787 100644 --- a/go/vt/log/log.go +++ b/go/vt/log/log.go @@ -14,16 +14,23 @@ See the License for the specific language governing permissions and limitations under the License. */ -// You can modify this file to hook up a different logging library instead of glog. -// If you adapt to a different logging framework, you may need to use that -// framework's equivalent of *Depth() functions so the file and line number printed -// point to the real caller instead of your adapter function. - +// Package log provides a thin adapter around slog, with a glog fallback when +// structured logging is disabled. +// +// By default, it uses JSON output. The --log-format flag selects pretty console +// output instead, rendered with the tint handler. package log import ( + "context" + "fmt" + "log/slog" + "os" + "runtime" "strconv" + "strings" "sync/atomic" + "time" "github.com/golang/glog" "github.com/spf13/pflag" @@ -31,50 +38,19 @@ import ( "vitess.io/vitess/go/vt/utils" ) -// Level is used with V() to test log verbosity. -type Level = glog.Level - var ( - // V quickly checks if the logging verbosity meets a threshold. - V = glog.V - // Flush ensures any pending I/O is written. Flush = glog.Flush - // Info formats arguments like fmt.Print. - Info = glog.Info - // Infof formats arguments like fmt.Printf. - Infof = glog.Infof - // InfoDepth formats arguments like fmt.Print and uses depth to choose which call frame to log. - InfoDepth = glog.InfoDepth - - // Warning formats arguments like fmt.Print. - Warning = glog.Warning - // Warningf formats arguments like fmt.Printf. - Warningf = glog.Warningf - // WarningDepth formats arguments like fmt.Print and uses depth to choose which call frame to log. - WarningDepth = glog.WarningDepth - - // Error formats arguments like fmt.Print. - Error = glog.Error - // Errorf formats arguments like fmt.Printf. - Errorf = glog.Errorf - // ErrorDepth formats arguments like fmt.Print and uses depth to choose which call frame to log. - ErrorDepth = glog.ErrorDepth - - // Exit formats arguments like fmt.Print. - Exit = glog.Exit - // Exitf formats arguments like fmt.Printf. - Exitf = glog.Exitf - // ExitDepth formats arguments like fmt.Print and uses depth to choose which call frame to log. - ExitDepth = glog.ExitDepth - - // Fatal formats arguments like fmt.Print. - Fatal = glog.Fatal - // Fatalf formats arguments like fmt.Printf - Fatalf = glog.Fatalf - // FatalDepth formats arguments like fmt.Print and uses depth to choose which call frame to log. - FatalDepth = glog.FatalDepth + // logStructured is whether structured logging is enabled or not. + logStructured bool + + // logLevel is the configured log level. + logLevel string + + // structuredLoggingEnabled controls whether structured logging is enabled. If it's disabled, + // logging is performed through glog. If enabled, logging is instead through slog. + structuredLoggingEnabled atomic.Bool ) // RegisterFlags installs log flags on the given FlagSet. @@ -87,119 +63,192 @@ func RegisterFlags(fs *pflag.FlagSet) { val: strconv.FormatUint(atomic.LoadUint64(&glog.MaxSize), 10), } utils.SetFlagVar(fs, &flagVal, "log-rotate-max-size", "size in bytes at which logs are rotated (glog.MaxSize)") -} -// logRotateMaxSize implements pflag.Value and is used to -// try and provide thread-safe access to glog.MaxSize. -type logRotateMaxSize struct { - val string + // Structured logging flags. + utils.SetFlagBoolVar(fs, &logStructured, "log-structured", false, "enable structured logging") + utils.SetFlagStringVar(fs, &logLevel, "log-level", "info", "minimum structured logging level: info, warn, debug, or error") } -func (lrms *logRotateMaxSize) Set(s string) error { - maxSize, err := strconv.ParseUint(s, 10, 64) +// Init configures logging based on the parsed flags. +func Init() error { + if !logStructured { + return nil + } + + level, err := slogLevel(logLevel) if err != nil { return err } - atomic.StoreUint64(&glog.MaxSize, maxSize) - lrms.val = s + + opts := &slog.HandlerOptions{AddSource: true, Level: level} + handler := slog.NewJSONHandler(os.Stderr, opts) + + logger := slog.New(handler) + structuredLoggingEnabled.Store(true) + slog.SetDefault(logger) + return nil } -func (lrms *logRotateMaxSize) String() string { - return lrms.val +// slogLevel maps the log-level flag value to a slog.Level. +func slogLevel(level string) (slog.Level, error) { + normalized := strings.ToLower(strings.TrimSpace(level)) + + switch normalized { + case "debug": + return slog.LevelDebug, nil + case "info": + return slog.LevelInfo, nil + case "warn": + return slog.LevelWarn, nil + case "error": + return slog.LevelError, nil + default: + return 0, fmt.Errorf("invalid --log-level %q: expected debug, info, warn, or error", level) + } } -func (lrms *logRotateMaxSize) Type() string { - return "uint64" -} +// log emits a structured log record when structured logging is enabled. +// When structured logging is disabled, log forwards the call to glog +// using the severity implied by level. +func log(level slog.Level, depth int, msg string, args ...any) { + depth += 3 -type PrefixedLogger struct { - prefix string -} + if !structuredLoggingEnabled.Load() { + logGlog(level, depth, msg, args...) + return + } -func NewPrefixedLogger(prefix string) *PrefixedLogger { - return &PrefixedLogger{prefix: prefix + ": "} -} + logger := slog.Default() -func (pl *PrefixedLogger) V(level glog.Level) glog.Verbose { - return V(level) -} + ctx := context.Background() + if !logger.Enabled(ctx, level) { + return + } + + // Adjust the caller depth (+3) to bypass the helper functions. + var pcs [1]uintptr + runtime.Callers(depth, pcs[:]) -func (pl *PrefixedLogger) Flush() { - Flush() + // Rebuild the record with the proper source. + record := slog.NewRecord(time.Now(), level, msg, pcs[0]) + record.Add(args...) + + _ = logger.Handler().Handle(ctx, record) } -func (pl *PrefixedLogger) Info(args ...any) { - args = append([]any{pl.prefix}, args...) - Info(args...) +// Enabled reports whether a log call at the provided level would be emitted. +// When structured logging is enabled, Enabled consults the configured slog +// logger. When structured logging is disabled, Enabled returns true for info +// and above, and uses glog verbosity to gate debug logging. +func Enabled(level slog.Level) bool { + if structuredLoggingEnabled.Load() { + return slog.Default().Enabled(context.Background(), level) + } + + if level < slog.LevelInfo { + return bool(glog.V(glog.Level(1))) + } + + return true } -func (pl *PrefixedLogger) Infof(format string, args ...any) { - args = append([]any{pl.prefix}, args...) - Infof("%s"+format, args...) +// logGlog formats a structured log call as a glog message. +func logGlog(level slog.Level, depth int, msg string, args ...any) { + // Preserve the slog message as the first printed element. + args = append([]any{msg}, args...) + + switch level { + case slog.LevelDebug, slog.LevelInfo: + glog.InfoDepth(depth, args...) + case slog.LevelWarn: + glog.WarningDepth(depth, args...) + case slog.LevelError: + glog.ErrorDepth(depth, args...) + default: + glog.InfoDepth(depth, args...) + } } -func (pl *PrefixedLogger) InfoDepth(depth int, args ...any) { - args = append([]any{pl.prefix}, args...) - InfoDepth(depth, args...) +// Info logs at the Info level. +func Info(msg string, args ...any) { + log(slog.LevelInfo, 0, msg, args...) } -func (pl *PrefixedLogger) Warning(args ...any) { - args = append([]any{pl.prefix}, args...) - Warning(args...) +// InfoDepth logs at the Info level with an adjusted caller depth. +func InfoDepth(depth int, msg string, args ...any) { + log(slog.LevelInfo, depth, msg, args...) } -func (pl *PrefixedLogger) Warningf(format string, args ...any) { - args = append([]any{pl.prefix}, args...) - Warningf("%s"+format, args...) +// Warn logs at the Warn level. +func Warn(msg string, args ...any) { + log(slog.LevelWarn, 0, msg, args...) } -func (pl *PrefixedLogger) WarningDepth(depth int, args ...any) { - args = append([]any{pl.prefix}, args...) - WarningDepth(depth, args...) +// WarnDepth logs at the Warn level with an adjusted caller depth. +func WarnDepth(depth int, msg string, args ...any) { + log(slog.LevelWarn, depth, msg, args...) } -func (pl *PrefixedLogger) Error(args ...any) { - args = append([]any{pl.prefix}, args...) - Error(args...) +// Debug logs at the Debug level. +func Debug(msg string, args ...any) { + log(slog.LevelDebug, 0, msg, args...) } -func (pl *PrefixedLogger) Errorf(format string, args ...any) { - args = append([]any{pl.prefix}, args...) - Errorf("%s"+format, args...) +// DebugDepth logs at the Debug level with an adjusted caller depth. +func DebugDepth(depth int, msg string, args ...any) { + log(slog.LevelDebug, depth, msg, args...) } -func (pl *PrefixedLogger) ErrorDepth(depth int, args ...any) { - args = append([]any{pl.prefix}, args...) - ErrorDepth(depth, args...) +// Error logs at the Error level. +func Error(msg string, args ...any) { + log(slog.LevelError, 0, msg, args...) } -func (pl *PrefixedLogger) Exit(args ...any) { - args = append([]any{pl.prefix}, args...) - Exit(args...) +// ErrorDepth logs at the Error level with an adjusted caller depth. +func ErrorDepth(depth int, msg string, args ...any) { + log(slog.LevelError, depth, msg, args...) } -func (pl *PrefixedLogger) Exitf(format string, args ...any) { - args = append([]any{pl.prefix}, args...) - Exitf("%s"+format, args...) +// logRotateMaxSize implements pflag.Value and is used to +// try and provide thread-safe access to glog.MaxSize. +type logRotateMaxSize struct { + val string } -func (pl *PrefixedLogger) ExitDepth(depth int, args ...any) { - args = append([]any{pl.prefix}, args...) - ExitDepth(depth, args...) +func (lrms *logRotateMaxSize) Set(s string) error { + maxSize, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return err + } + atomic.StoreUint64(&glog.MaxSize, maxSize) + lrms.val = s + return nil } -func (pl *PrefixedLogger) Fatal(args ...any) { - args = append([]any{pl.prefix}, args...) - Fatal(args...) +func (lrms *logRotateMaxSize) String() string { + return lrms.val } -func (pl *PrefixedLogger) Fatalf(format string, args ...any) { - args = append([]any{pl.prefix}, args...) - Fatalf("%s"+format, args...) +func (lrms *logRotateMaxSize) Type() string { + return "uint64" } -func (pl *PrefixedLogger) FatalDepth(depth int, args ...any) { - args = append([]any{pl.prefix}, args...) - FatalDepth(depth, args...) +// SetLogger replaces the structured logger used by the log package. The returned function restores +// the previous logger. Used for testing. +func SetLogger(logger *slog.Logger) func() { + if logger == nil { + return func() {} + } + + previousEnabled := structuredLoggingEnabled.Load() + previousDefault := slog.Default() + + slog.SetDefault(logger) + structuredLoggingEnabled.Store(true) + + return func() { + slog.SetDefault(previousDefault) + structuredLoggingEnabled.Store(previousEnabled) + } } diff --git a/go/vt/log/prefix.go b/go/vt/log/prefix.go new file mode 100644 index 00000000000..e7a8611de34 --- /dev/null +++ b/go/vt/log/prefix.go @@ -0,0 +1,76 @@ +/* +Copyright 2026 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import "fmt" + +type PrefixedLogger struct { + prefix string +} + +func NewPrefixedLogger(prefix string) *PrefixedLogger { + return &PrefixedLogger{prefix: prefix + ": "} +} + +func (pl *PrefixedLogger) Flush() { + Flush() +} + +func (pl *PrefixedLogger) Info(args ...any) { + args = append([]any{pl.prefix}, args...) + Info(fmt.Sprint(args...)) +} + +func (pl *PrefixedLogger) Infof(format string, args ...any) { + args = append([]any{pl.prefix}, args...) + Info(fmt.Sprintf("%s"+format, args...)) +} + +func (pl *PrefixedLogger) InfoDepth(depth int, args ...any) { + args = append([]any{pl.prefix}, args...) + InfoDepth(depth, fmt.Sprint(args...)) +} + +func (pl *PrefixedLogger) Warning(args ...any) { + args = append([]any{pl.prefix}, args...) + Warn(fmt.Sprint(args...)) +} + +func (pl *PrefixedLogger) Warningf(format string, args ...any) { + args = append([]any{pl.prefix}, args...) + Warn(fmt.Sprintf("%s"+format, args...)) +} + +func (pl *PrefixedLogger) WarningDepth(depth int, args ...any) { + args = append([]any{pl.prefix}, args...) + WarnDepth(depth, fmt.Sprint(args...)) +} + +func (pl *PrefixedLogger) Error(args ...any) { + args = append([]any{pl.prefix}, args...) + Error(fmt.Sprint(args...)) +} + +func (pl *PrefixedLogger) Errorf(format string, args ...any) { + args = append([]any{pl.prefix}, args...) + Error(fmt.Sprintf("%s"+format, args...)) +} + +func (pl *PrefixedLogger) ErrorDepth(depth int, args ...any) { + args = append([]any{pl.prefix}, args...) + ErrorDepth(depth, fmt.Sprint(args...)) +} diff --git a/go/vt/logutil/console_logger.go b/go/vt/logutil/console_logger.go index 0325aa94f03..9dc2bc0d65c 100644 --- a/go/vt/logutil/console_logger.go +++ b/go/vt/logutil/console_logger.go @@ -22,7 +22,8 @@ import ( "vitess.io/vitess/go/vt/log" ) -// ConsoleLogger is a Logger that uses glog directly to log, at the right level. +// ConsoleLogger is a Logger that forwards to the vt/log package at the +// appropriate level. // // Note that methods on ConsoleLogger must use pointer receivers, // because otherwise an autogenerated conversion method will be inserted in the @@ -72,7 +73,7 @@ func (cl *ConsoleLogger) InfoDepth(depth int, s string) { // WarningDepth is part of the Logger interface. func (cl *ConsoleLogger) WarningDepth(depth int, s string) { - log.WarningDepth(1+depth, s) + log.WarnDepth(1+depth, s) } // ErrorDepth is part of the Logger interface. diff --git a/go/vt/logutil/logutil.go b/go/vt/logutil/logutil.go index eea87484921..c9bda768a99 100644 --- a/go/vt/logutil/logutil.go +++ b/go/vt/logutil/logutil.go @@ -14,8 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// package logutil provides some utilities for logging using glog and -// redirects the stdlib logging to glog. +// Package logutil provides logging helpers for Vitess and redirects the +// standard library logger into the vt/log package. package logutil diff --git a/go/vt/logutil/throttled.go b/go/vt/logutil/throttled.go index fa63be328bc..0d49882165d 100644 --- a/go/vt/logutil/throttled.go +++ b/go/vt/logutil/throttled.go @@ -46,12 +46,20 @@ func NewThrottledLogger(name string, maxInterval time.Duration) *ThrottledLogger } } -type logFunc func(int, ...any) +type logFunc func(int, string) var ( - infoDepth = log.InfoDepth - warningDepth = log.WarningDepth - errorDepth = log.ErrorDepth + infoDepth = func(depth int, msg string) { + log.InfoDepth(depth, msg) + } + + warningDepth = func(depth int, msg string) { + log.WarnDepth(depth, msg) + } + + errorDepth = func(depth int, msg string) { + log.ErrorDepth(depth, msg) + } ) // GetLastLogTime gets the last log time for the throttled logger. diff --git a/go/vt/logutil/throttled_test.go b/go/vt/logutil/throttled_test.go index 5241edf0f87..1873f277bbf 100644 --- a/go/vt/logutil/throttled_test.go +++ b/go/vt/logutil/throttled_test.go @@ -17,7 +17,6 @@ limitations under the License. package logutil import ( - "fmt" "testing" "time" ) @@ -31,8 +30,8 @@ func skippedCount(tl *ThrottledLogger) int { func TestThrottledLogger(t *testing.T) { // Install a fake log func for testing. log := make(chan string) - infoDepth = func(depth int, args ...any) { - log <- fmt.Sprint(args...) + infoDepth = func(depth int, msg string) { + log <- msg } interval := 100 * time.Millisecond tl := NewThrottledLogger("name", interval) diff --git a/go/vt/mysqlctl/azblobbackupstorage/azblob.go b/go/vt/mysqlctl/azblobbackupstorage/azblob.go index d673a692812..6eb58a79b9a 100644 --- a/go/vt/mysqlctl/azblobbackupstorage/azblob.go +++ b/go/vt/mysqlctl/azblobbackupstorage/azblob.go @@ -23,6 +23,7 @@ import ( "errors" "fmt" "io" + "log/slog" "net/url" "os" "strings" @@ -135,7 +136,7 @@ func azInternalCredentials() (string, string, error) { var actKey string if keyFile := accountKeyFile.Get(); keyFile != "" { - log.Infof("Getting Azure Storage Account key from file: %s", keyFile) + log.Info("Getting Azure Storage Account key from file: " + keyFile) dat, err := os.ReadFile(keyFile) if err != nil { return "", "", err @@ -173,25 +174,26 @@ func azServiceURL(credentials *azblob.SharedKeyCredential) azblob.ServiceURL { Log: func(level pipeline.LogLevel, message string) { switch level { case pipeline.LogFatal, pipeline.LogPanic: - log.Fatal(message) + log.Error(message) + os.Exit(1) case pipeline.LogError: log.Error(message) case pipeline.LogWarning: - log.Warning(message) + log.Warn(message) case pipeline.LogInfo, pipeline.LogDebug: log.Info(message) } }, ShouldLog: func(level pipeline.LogLevel) bool { switch level { - case pipeline.LogFatal, pipeline.LogPanic: - return bool(log.V(3)) - case pipeline.LogError: - return bool(log.V(3)) + case pipeline.LogFatal, pipeline.LogPanic, pipeline.LogError: + return log.Enabled(slog.LevelError) case pipeline.LogWarning: - return bool(log.V(2)) - case pipeline.LogInfo, pipeline.LogDebug: - return bool(log.V(1)) + return log.Enabled(slog.LevelWarn) + case pipeline.LogInfo: + return log.Enabled(slog.LevelInfo) + case pipeline.LogDebug: + return log.Enabled(slog.LevelDebug) } return false }, @@ -303,7 +305,7 @@ func (bh *AZBlobBackupHandle) ReadFile(ctx context.Context, filename string) (io return resp.Body(azblob.RetryReaderOptions{ MaxRetryRequests: defaultRetryCount, NotifyFailedRead: func(failureCount int, lastError error, offset int64, count int64, willRetry bool) { - log.Warningf("ReadFile: [azblob] container: %s, directory: %s, filename: %s, error: %v", containerName, objName(bh.dir, ""), filename, lastError) + log.Warn(fmt.Sprintf("ReadFile: [azblob] container: %s, directory: %s, filename: %s, error: %v", containerName, objName(bh.dir, ""), filename, lastError)) }, TreatEarlyCloseAsError: true, }), nil @@ -330,7 +332,7 @@ func (bs *AZBlobBackupStorage) ListBackups(ctx context.Context, dir string) ([]b searchPrefix = objName(dir, "") } - log.Infof("ListBackups: [azblob] container: %s, directory: %v", containerName, searchPrefix) + log.Info(fmt.Sprintf("ListBackups: [azblob] container: %s, directory: %v", containerName, searchPrefix)) containerURL, err := bs.containerURL() if err != nil { @@ -389,7 +391,7 @@ func (bs *AZBlobBackupStorage) StartBackup(ctx context.Context, dir, name string // RemoveBackup implements BackupStorage. func (bs *AZBlobBackupStorage) RemoveBackup(ctx context.Context, dir, name string) error { - log.Infof("ListBackups: [azblob] container: %s, directory: %s", containerName, objName(dir, "")) + log.Info(fmt.Sprintf("ListBackups: [azblob] container: %s, directory: %s", containerName, objName(dir, ""))) containerURL, err := bs.containerURL() if err != nil { @@ -430,7 +432,7 @@ func (bs *AZBlobBackupStorage) RemoveBackup(ctx context.Context, dir, name strin return err } - log.Infof("Removing backup directory: %v", strings.TrimSuffix(searchPrefix, "/")) + log.Info(fmt.Sprintf("Removing backup directory: %v", strings.TrimSuffix(searchPrefix, "/"))) _, err = containerURL.NewBlobURL(strings.TrimSuffix(searchPrefix, "/")).Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) if err == nil { break diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index 97af3224894..6470170fd97 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -227,14 +227,14 @@ func ParseBackupName(dir string, name string) (backupTime *time.Time, alias *top btime, err := time.Parse(BackupTimestampFormat, timestamp) if err != nil { - log.Errorf("error parsing backup time for %s/%s: %s", dir, name, err) + log.Error(fmt.Sprintf("error parsing backup time for %s/%s: %s", dir, name, err)) } else { backupTime = &btime } alias, err = topoproto.ParseTabletAlias(aliasStr) if err != nil { - log.Errorf("error parsing tablet alias for %s/%s: %s", dir, name, err) + log.Error(fmt.Sprintf("error parsing tablet alias for %s/%s: %s", dir, name, err)) alias = nil } @@ -257,7 +257,7 @@ func checkNoDB(ctx context.Context, mysqld MysqlDaemon, dbName string) (bool, er for _, row := range qr.Rows { if row[0].ToString() == dbName { // found active db - log.Warningf("checkNoDB failed, found active db %v", dbName) + log.Warn(fmt.Sprintf("checkNoDB failed, found active db %v", dbName)) return false, nil } } @@ -287,7 +287,7 @@ func removeExistingFiles(cnf *Mycnf) error { // These paths are actually filename prefixes, not directories. // An extension of the form ".###" is appended by mysqld. path += ".*" - log.Infof("Restore: removing files in %v (%v)", name, path) + log.Info(fmt.Sprintf("Restore: removing files in %v (%v)", name, path)) matches, err := filepath.Glob(path) if err != nil { return vterrors.Wrapf(err, "can't expand path glob %q", path) @@ -302,10 +302,10 @@ func removeExistingFiles(cnf *Mycnf) error { // Regular directory: delete recursively. if _, err := os.Stat(path); os.IsNotExist(err) { - log.Infof("Restore: skipping removal of nonexistent %v (%v)", name, path) + log.Info(fmt.Sprintf("Restore: skipping removal of nonexistent %v (%v)", name, path)) continue } - log.Infof("Restore: removing files in %v (%v)", name, path) + log.Info(fmt.Sprintf("Restore: removing files in %v (%v)", name, path)) if err := os.RemoveAll(path); err != nil { return vterrors.Wrapf(err, "can't remove existing files in %v (%v)", name, path) } diff --git a/go/vt/mysqlctl/builtinbackupengine.go b/go/vt/mysqlctl/builtinbackupengine.go index 976beaa11ab..ad19bc1fe58 100644 --- a/go/vt/mysqlctl/builtinbackupengine.go +++ b/go/vt/mysqlctl/builtinbackupengine.go @@ -382,7 +382,7 @@ func (be *BuiltinBackupEngine) executeIncrementalBackup(ctx context.Context, par if resp.FirstTimestampBinlog == "" || resp.LastTimestampBinlog == "" { return BackupUnusable, vterrors.Errorf(vtrpcpb.Code_ABORTED, "empty binlog name in response. Request=%v, Response=%v", req, resp) } - log.Infof("ReadBinlogFilesTimestampsResponse: %+v", resp) + log.Info(fmt.Sprintf("ReadBinlogFilesTimestampsResponse: %+v", resp)) incrDetails := &IncrementalBackupDetails{ FirstTimestamp: FormatRFC3339(protoutil.TimeFromProto(resp.FirstTimestamp).UTC()), FirstTimestampBinlog: filepath.Base(resp.FirstTimestampBinlog), @@ -440,7 +440,7 @@ func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params Bac if err != nil { return BackupUnusable, vterrors.Wrap(err, "can't get super_read_only status") } - log.Infof("Flag values during full backup, read_only: %v, super_read_only:%t", readOnly, superReadOnly) + log.Info(fmt.Sprintf("Flag values during full backup, read_only: %v, super_read_only:%t", readOnly, superReadOnly)) // get the replication position if sourceIsPrimary { @@ -700,7 +700,7 @@ func (be *BuiltinBackupEngine) backupFileEntries(ctx context.Context, fes []File // unpredictability in my test cases, so in order to avoid that, I am adding this cancellation check. select { case <-ctxCancel.Done(): - log.Errorf("Context canceled or timed out during %q backup", fe.Name) + log.Error(fmt.Sprintf("Context canceled or timed out during %q backup", fe.Name)) bh.RecordError(name, vterrors.Errorf(vtrpcpb.Code_CANCELED, "context canceled")) return nil default: @@ -1229,7 +1229,7 @@ func (be *BuiltinBackupEngine) restoreFileEntries(ctx context.Context, fes []Fil // unpredictability in my test cases, so in order to avoid that, I am adding this cancellation check. select { case <-ctx.Done(): - log.Errorf("Context canceled or timed out during %q restore", fe.Name) + log.Error(fmt.Sprintf("Context canceled or timed out during %q restore", fe.Name)) bh.RecordError(name, vterrors.Errorf(vtrpcpb.Code_CANCELED, "context canceled")) return nil default: diff --git a/go/vt/mysqlctl/cephbackupstorage/ceph.go b/go/vt/mysqlctl/cephbackupstorage/ceph.go index 4047693242a..68dee79e96f 100644 --- a/go/vt/mysqlctl/cephbackupstorage/ceph.go +++ b/go/vt/mysqlctl/cephbackupstorage/ceph.go @@ -201,14 +201,14 @@ func (bs *CephBackupStorage) StartBackup(ctx context.Context, dir, name string) found, err := c.BucketExists(bucket) if err != nil { - log.Info("Error from BucketExists: %v, quitting", bucket) + log.Info(fmt.Sprintf("Error from BucketExists: %v, quitting", err)) return nil, errors.New("Error checking whether bucket exists: " + bucket) } if !found { - log.Info("Bucket: %v doesn't exist, creating new bucket with the required name", bucket) + log.Info(fmt.Sprintf("Bucket: %v doesn't exist, creating new bucket with the required name", bucket)) err = c.MakeBucket(bucket, "") if err != nil { - log.Info("Error creating Bucket: %v, quitting", bucket) + log.Info(fmt.Sprintf("Error creating Bucket: %v, quitting", err)) return nil, errors.New("Error creating new bucket: " + bucket) } } diff --git a/go/vt/mysqlctl/clone.go b/go/vt/mysqlctl/clone.go index 928853742e7..22f8ed3563b 100644 --- a/go/vt/mysqlctl/clone.go +++ b/go/vt/mysqlctl/clone.go @@ -77,7 +77,7 @@ func CloneFromDonor(ctx context.Context, topoServer *topo.Server, mysqld MysqlDa return replication.Position{}, errors.New("--clone-from-primary and --clone-from-tablet are mutually exclusive") case cloneFromPrimary: // Look up the primary tablet from topology. - log.Infof("Looking up primary tablet for shard %s/%s for use as CLONE REMOTE donor", keyspace, shard) + log.Info(fmt.Sprintf("Looking up primary tablet for shard %s/%s for use as CLONE REMOTE donor", keyspace, shard)) si, err := topoServer.GetShard(ctx, keyspace, shard) if err != nil { return replication.Position{}, fmt.Errorf("failed to get shard %s/%s: %v", keyspace, shard, err) @@ -86,10 +86,10 @@ func CloneFromDonor(ctx context.Context, topoServer *topo.Server, mysqld MysqlDa return replication.Position{}, fmt.Errorf("shard %s/%s has no primary", keyspace, shard) } donorAlias = si.PrimaryAlias - log.Infof("Found primary tablet %s for use as CLONE REMOTE donor", topoproto.TabletAliasString(donorAlias)) + log.Info(fmt.Sprintf("Found primary tablet %s for use as CLONE REMOTE donor", topoproto.TabletAliasString(donorAlias))) case cloneFromTablet != "": // Parse the explicit donor tablet alias. - log.Infof("Using tablet %s for use as CLONE REMOTE donor", cloneFromTablet) + log.Info(fmt.Sprintf("Using tablet %s for use as CLONE REMOTE donor", cloneFromTablet)) donorAlias, err = topoproto.ParseTabletAlias(cloneFromTablet) if err != nil { return replication.Position{}, fmt.Errorf("invalid tablet alias %q: %v", cloneFromTablet, err) @@ -119,7 +119,7 @@ func CloneFromDonor(ctx context.Context, topoServer *topo.Server, mysqld MysqlDa UseSSL: cloneConfig.UseSSL, } - log.Infof("Clone executor configured for donor %s:%d", executor.DonorHost, executor.DonorPort) + log.Info(fmt.Sprintf("Clone executor configured for donor %s:%d", executor.DonorHost, executor.DonorPort)) // Execute the clone operation. // Note: ExecuteClone will wait for mysqld to restart and for the CLONE plugin to report successful completion @@ -134,7 +134,7 @@ func CloneFromDonor(ctx context.Context, topoServer *topo.Server, mysqld MysqlDa return replication.Position{}, fmt.Errorf("failed to get position after clone: %v", err) } - log.Infof("Clone completed successfully at position %v", pos) + log.Info(fmt.Sprintf("Clone completed successfully at position %v", pos)) return pos, nil } @@ -215,7 +215,7 @@ func (c *CloneExecutor) validateDonorRemote(ctx context.Context) error { return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "clone plugin is not active on donor (status: %s)", status) } - log.Infof("Donor %s:%d validated successfully (MySQL %s)", c.DonorHost, c.DonorPort, versionStr) + log.Info(fmt.Sprintf("Donor %s:%d validated successfully (MySQL %s)", c.DonorHost, c.DonorPort, versionStr)) return nil } @@ -290,7 +290,7 @@ func (c *CloneExecutor) ExecuteClone(ctx context.Context, mysqld MysqlDaemon, wa return vterrors.Wrapf(err, "donor validation failed") } - log.Infof("Starting CLONE REMOTE from %s:%d", c.DonorHost, c.DonorPort) + log.Info(fmt.Sprintf("Starting CLONE REMOTE from %s:%d", c.DonorHost, c.DonorPort)) // Set the valid donor list donorAddr := fmt.Sprintf("%s:%d", c.DonorHost, c.DonorPort) @@ -303,7 +303,7 @@ func (c *CloneExecutor) ExecuteClone(ctx context.Context, mysqld MysqlDaemon, wa // Build the CLONE INSTANCE command cloneCmd := c.buildCloneCommand() - log.Infof("Executing CLONE INSTANCE FROM %s:%d (this may take a while)", c.DonorHost, c.DonorPort) + log.Info(fmt.Sprintf("Executing CLONE INSTANCE FROM %s:%d (this may take a while)", c.DonorHost, c.DonorPort)) // Execute the clone command. When clone completes, MySQL restarts automatically // which will cause the connection to drop. We ignore this error and verify @@ -312,7 +312,7 @@ func (c *CloneExecutor) ExecuteClone(ctx context.Context, mysqld MysqlDaemon, wa if !isCloneConnError(err) { return vterrors.Wrapf(err, "clone command failed") } - log.Infof("CLONE command returned (connection likely lost due to MySQL restart): %v", err) + log.Info(fmt.Sprintf("CLONE command returned (connection likely lost due to MySQL restart): %v", err)) } // Wait for MySQL to restart and verify clone completed successfully @@ -320,7 +320,7 @@ func (c *CloneExecutor) ExecuteClone(ctx context.Context, mysqld MysqlDaemon, wa return vterrors.Wrapf(err, "clone success verification failed") } - log.Infof("CLONE REMOTE completed successfully from %s:%d", c.DonorHost, c.DonorPort) + log.Info(fmt.Sprintf("CLONE REMOTE completed successfully from %s:%d", c.DonorHost, c.DonorPort)) return nil } @@ -381,7 +381,7 @@ func (c *CloneExecutor) checkClonePluginInstalled(ctx context.Context, mysqld My func (c *CloneExecutor) waitForCloneComplete(ctx context.Context, mysqld MysqlDaemon, timeout time.Duration) error { const pollInterval = time.Second - log.Infof("Waiting for clone to complete (timeout: %v)", timeout) + log.Info(fmt.Sprintf("Waiting for clone to complete (timeout: %v)", timeout)) timer := time.NewTimer(timeout) defer timer.Stop() @@ -399,19 +399,19 @@ func (c *CloneExecutor) waitForCloneComplete(ctx context.Context, mysqld MysqlDa result, err := mysqld.FetchSuperQuery(ctx, cloneStatusQuery) if err != nil { // Connection failures are expected during MySQL restart - log.Infof("Clone status query failed (MySQL may be restarting): %v", err) + log.Info(fmt.Sprintf("Clone status query failed (MySQL may be restarting): %v", err)) continue } if len(result.Rows) == 0 { // No clone status yet - MySQL may have just started - log.Infof("No clone status found, waiting...") + log.Info("No clone status found, waiting...") continue } if len(result.Rows[0]) < 3 { // Unexpected row format - log.Warningf("Unexpected clone_status row format: got %d columns, expected 3", len(result.Rows[0])) + log.Warn(fmt.Sprintf("Unexpected clone_status row format: got %d columns, expected 3", len(result.Rows[0]))) continue } @@ -419,14 +419,14 @@ func (c *CloneExecutor) waitForCloneComplete(ctx context.Context, mysqld MysqlDa errorNo := result.Rows[0][1].ToString() errorMsg := result.Rows[0][2].ToString() - log.Infof("Clone status: STATE=%s, ERROR_NO=%s", state, errorNo) + log.Info(fmt.Sprintf("Clone status: STATE=%s, ERROR_NO=%s", state, errorNo)) switch { case strings.EqualFold(state, "Completed"): if errorNo != "0" { return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "clone completed with error %s: %s", errorNo, errorMsg) } - log.Infof("Clone completed successfully from %s:%d", c.DonorHost, c.DonorPort) + log.Info(fmt.Sprintf("Clone completed successfully from %s:%d", c.DonorHost, c.DonorPort)) return nil case strings.EqualFold(state, "Failed"): return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "clone failed with error %s: %s", errorNo, errorMsg) @@ -435,7 +435,7 @@ func (c *CloneExecutor) waitForCloneComplete(ctx context.Context, mysqld MysqlDa continue default: // Unknown state, keep waiting but log it - log.Warningf("Unknown clone state: %s", state) + log.Warn("Unknown clone state: " + state) continue } } diff --git a/go/vt/mysqlctl/mycnf_flag.go b/go/vt/mysqlctl/mycnf_flag.go index 70d5c601916..77dd0e513d5 100644 --- a/go/vt/mysqlctl/mycnf_flag.go +++ b/go/vt/mysqlctl/mycnf_flag.go @@ -17,6 +17,7 @@ limitations under the License. package mysqlctl import ( + "fmt" "time" "github.com/spf13/pflag" @@ -130,9 +131,9 @@ func NewMycnfFromFlags(uid uint32) (mycnf *Mycnf, err error) { if flagMycnfFile == "" { flagMycnfFile = MycnfFile(uid) - log.Infof("No mycnf-server-id, no mycnf-file specified, using default config for server id %v: %v", uid, flagMycnfFile) + log.Info(fmt.Sprintf("No mycnf-server-id, no mycnf-file specified, using default config for server id %v: %v", uid, flagMycnfFile)) } else { - log.Infof("No mycnf-server-id specified, using mycnf-file file %v", flagMycnfFile) + log.Info(fmt.Sprintf("No mycnf-server-id specified, using mycnf-file file %v", flagMycnfFile)) } mycnf = NewMycnf(uid, 0) mycnf.Path = flagMycnfFile diff --git a/go/vt/mysqlctl/mysqlctlclient/interface.go b/go/vt/mysqlctl/mysqlctlclient/interface.go index 16ee8a38953..f415a4cb743 100644 --- a/go/vt/mysqlctl/mysqlctlclient/interface.go +++ b/go/vt/mysqlctl/mysqlctlclient/interface.go @@ -21,6 +21,7 @@ package mysqlctlclient import ( "context" "fmt" + "os" "github.com/spf13/pflag" @@ -78,7 +79,8 @@ var factories = make(map[string]Factory) // RegisterFactory allows a client implementation to register itself func RegisterFactory(name string, factory Factory) { if _, ok := factories[name]; ok { - log.Fatalf("RegisterFactory %s already exists", name) + log.Error(fmt.Sprintf("RegisterFactory %s already exists", name)) + os.Exit(1) } factories[name] = factory } diff --git a/go/vt/mysqlctl/mysqld.go b/go/vt/mysqlctl/mysqld.go index 18493c9da31..cc3a2d522f5 100644 --- a/go/vt/mysqlctl/mysqld.go +++ b/go/vt/mysqlctl/mysqld.go @@ -213,7 +213,7 @@ func NewMysqld(dbcfgs *dbconfigs.DBConfigs) *Mysqld { failVersionDetection(err) } - log.Infof("Using flavor: %v, version: %v", f, v) + log.Info(fmt.Sprintf("Using flavor: %v, version: %v", f, v)) result.capabilities = newCapabilitySet(f, v) return result } @@ -273,7 +273,7 @@ func ParseVersionString(version string) (flavor MySQLFlavor, ver ServerVersion, func (mysqld *Mysqld) RunMysqlUpgrade(ctx context.Context) error { // Execute as remote action on mysqlctld if requested. if socketFile != "" { - log.Infof("executing Mysqld.RunMysqlUpgrade() remotely via mysqlctld server: %v", socketFile) + log.Info(fmt.Sprintf("executing Mysqld.RunMysqlUpgrade() remotely via mysqlctld server: %v", socketFile)) client, err := mysqlctlclient.New(ctx, "unix", socketFile) if err != nil { return fmt.Errorf("can't dial mysqlctld: %v", err) @@ -283,7 +283,7 @@ func (mysqld *Mysqld) RunMysqlUpgrade(ctx context.Context) error { } if mysqld.capabilities.hasMySQLUpgradeInServer() { - log.Warningf("MySQL version has built-in upgrade, skipping RunMySQLUpgrade") + log.Warn("MySQL version has built-in upgrade, skipping RunMySQLUpgrade") return nil } @@ -315,18 +315,18 @@ func (mysqld *Mysqld) RunMysqlUpgrade(ctx context.Context) error { // Find mysql_upgrade. If not there, we do nothing. vtMysqlRoot, err := vtenv.VtMysqlRoot() if err != nil { - log.Warningf("VT_MYSQL_ROOT not set, skipping mysql_upgrade step: %v", err) + log.Warn(fmt.Sprintf("VT_MYSQL_ROOT not set, skipping mysql_upgrade step: %v", err)) return nil } name, err := binaryPath(vtMysqlRoot, "mysql_upgrade") if err != nil { - log.Warningf("mysql_upgrade binary not present, skipping it: %v", err) + log.Warn(fmt.Sprintf("mysql_upgrade binary not present, skipping it: %v", err)) return nil } env, err := buildLdPaths() if err != nil { - log.Warningf("skipping mysql_upgrade step: %v", err) + log.Warn(fmt.Sprintf("skipping mysql_upgrade step: %v", err)) return nil } @@ -342,7 +342,7 @@ func (mysqld *Mysqld) RunMysqlUpgrade(ctx context.Context) error { func (mysqld *Mysqld) Start(ctx context.Context, cnf *Mycnf, mysqldArgs ...string) error { // Execute as remote action on mysqlctld if requested. if socketFile != "" { - log.Infof("executing Mysqld.Start() remotely via mysqlctld server: %v", socketFile) + log.Info(fmt.Sprintf("executing Mysqld.Start() remotely via mysqlctld server: %v", socketFile)) client, err := mysqlctlclient.New(ctx, "unix", socketFile) if err != nil { return fmt.Errorf("can't dial mysqlctld: %v", err) @@ -370,7 +370,7 @@ func (mysqld *Mysqld) startNoWait(cnf *Mycnf, mysqldArgs ...string) error { name = "mysqld_start hook" //nolint:ineffassign case hook.HOOK_DOES_NOT_EXIST: // hook doesn't exist, run mysqld_safe ourselves - log.Infof("%v: No mysqld_start hook, running mysqld_safe directly", ts) + log.Info(fmt.Sprintf("%v: No mysqld_start hook, running mysqld_safe directly", ts)) vtMysqlRoot, err := vtenv.VtMysqlRoot() if err != nil { return err @@ -379,7 +379,7 @@ func (mysqld *Mysqld) startNoWait(cnf *Mycnf, mysqldArgs ...string) error { if err != nil { // The movement to use systemd means that mysqld_safe is not always provided. // This should not be considered an issue do not generate a warning. - log.Infof("%v: trying to launch mysqld instead", err) + log.Info(fmt.Sprintf("%v: trying to launch mysqld instead", err)) name, err = binaryPath(vtMysqlRoot, "mysqld") // If this also fails, return an error. if err != nil { @@ -410,7 +410,7 @@ func (mysqld *Mysqld) startNoWait(cnf *Mycnf, mysqldArgs ...string) error { cmd := exec.Command(name, args...) cmd.Dir = vtMysqlRoot cmd.Env = env - log.Infof("%v %#v", ts, cmd) + log.Info(fmt.Sprintf("%v %#v", ts, cmd)) stderr, err := cmd.StderrPipe() if err != nil { return err @@ -422,13 +422,13 @@ func (mysqld *Mysqld) startNoWait(cnf *Mycnf, mysqldArgs ...string) error { go func() { scanner := bufio.NewScanner(stderr) for scanner.Scan() { - log.Infof("%v stderr: %v", ts, scanner.Text()) + log.Info(fmt.Sprintf("%v stderr: %v", ts, scanner.Text())) } }() go func() { scanner := bufio.NewScanner(stdout) for scanner.Scan() { - log.Infof("%v stdout: %v", ts, scanner.Text()) + log.Info(fmt.Sprintf("%v stdout: %v", ts, scanner.Text())) } }() err = cmd.Start() @@ -441,7 +441,7 @@ func (mysqld *Mysqld) startNoWait(cnf *Mycnf, mysqldArgs ...string) error { go func(cancel <-chan struct{}) { // Wait regardless of cancel, so we don't generate defunct processes. err := cmd.Wait() - log.Infof("%v exit: %v", ts, err) + log.Info(fmt.Sprintf("%v exit: %v", ts, err)) // The process exited. Trigger OnTerm callbacks, unless we were canceled. select { @@ -476,27 +476,27 @@ func cleanupLockfile(socket string, ts string) error { lockPath := socket + ".lock" pid, err := os.ReadFile(lockPath) if errors.Is(err, os.ErrNotExist) { - log.Infof("%v: no stale lock file at %s", ts, lockPath) + log.Info(fmt.Sprintf("%v: no stale lock file at %s", ts, lockPath)) // If there's no lock file, we can early return here, nothing // to clean up then. return nil } else if err != nil { - log.Errorf("%v: error checking if lock file exists: %v", ts, err) + log.Error(fmt.Sprintf("%v: error checking if lock file exists: %v", ts, err)) // Any other errors here are unexpected. return err } p, err := strconv.Atoi(string(bytes.TrimSpace(pid))) if err != nil { - log.Errorf("%v: error parsing pid from lock file: %v", ts, err) + log.Error(fmt.Sprintf("%v: error parsing pid from lock file: %v", ts, err)) return err } if os.Getpid() == p { - log.Infof("%v: lock file at %s is ours, removing it", ts, lockPath) + log.Info(fmt.Sprintf("%v: lock file at %s is ours, removing it", ts, lockPath)) return os.Remove(lockPath) } proc, err := os.FindProcess(p) if err != nil { - log.Errorf("%v: error finding process: %v", ts, err) + log.Error(fmt.Sprintf("%v: error finding process: %v", ts, err)) return err } err = proc.Signal(syscall.Signal(0)) @@ -506,21 +506,21 @@ func cleanupLockfile(socket string, ts string) error { cmdline, err := os.ReadFile(fmt.Sprintf("/proc/%d/cmdline", p)) if err == nil { name := string(bytes.ReplaceAll(cmdline, []byte{0}, []byte(" "))) - log.Errorf("%v: not removing socket lock file: %v with pid %v for %q", ts, lockPath, p, name) + log.Error(fmt.Sprintf("%v: not removing socket lock file: %v with pid %v for %q", ts, lockPath, p, name)) } else { - log.Errorf("%v: not removing socket lock file: %v with pid %v (failed to read process name: %v)", ts, lockPath, p, err) + log.Error(fmt.Sprintf("%v: not removing socket lock file: %v with pid %v (failed to read process name: %v)", ts, lockPath, p, err)) } return fmt.Errorf("process %v is still running", p) } if !errors.Is(err, os.ErrProcessDone) { // Any errors except for the process being done // is unexpected here. - log.Errorf("%v: error checking process %v: %v", ts, p, err) + log.Error(fmt.Sprintf("%v: error checking process %v: %v", ts, p, err)) return err } // All good, process is gone and we can safely clean up the lock file. - log.Infof("%v: removing stale socket lock file: %v", ts, lockPath) + log.Info(fmt.Sprintf("%v: removing stale socket lock file: %v", ts, lockPath)) return os.Remove(lockPath) } @@ -554,7 +554,7 @@ func (mysqld *Mysqld) WaitForDBAGrants(ctx context.Context, waitTime time.Durati res, fetchErr := conn.ExecuteFetch("SHOW GRANTS", 1000, false) conn.Close() if fetchErr != nil { - log.Errorf("Error running SHOW GRANTS - %v", fetchErr) + log.Error(fmt.Sprintf("Error running SHOW GRANTS - %v", fetchErr)) } if fetchErr == nil && res != nil && len(res.Rows) > 0 && len(res.Rows[0]) > 0 { privileges := res.Rows[0][0].ToString() @@ -576,7 +576,7 @@ func (mysqld *Mysqld) WaitForDBAGrants(ctx context.Context, waitTime time.Durati // wait is the internal version of Wait, that takes credentials. func (mysqld *Mysqld) wait(ctx context.Context, cnf *Mycnf, params *mysql.ConnParams) error { - log.Infof("Waiting for mysqld socket file (%v) to be ready...", cnf.SocketFile) + log.Info(fmt.Sprintf("Waiting for mysqld socket file (%v) to be ready...", cnf.SocketFile)) for { select { @@ -593,7 +593,7 @@ func (mysqld *Mysqld) wait(ctx context.Context, cnf *Mycnf, params *mysql.ConnPa conn.Close() return nil } - log.Infof("mysqld socket file exists, but can't connect: %v", connErr) + log.Info(fmt.Sprintf("mysqld socket file exists, but can't connect: %v", connErr)) } else if !os.IsNotExist(statErr) { return fmt.Errorf("can't stat mysqld socket file: %v", statErr) } @@ -609,11 +609,11 @@ func (mysqld *Mysqld) wait(ctx context.Context, cnf *Mycnf, params *mysql.ConnPa // // If a mysqlctld address is provided in a flag, Shutdown will run remotely. func (mysqld *Mysqld) Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bool, shutdownTimeout time.Duration) error { - log.Infof("Mysqld.Shutdown") + log.Info("Mysqld.Shutdown") // Execute as remote action on mysqlctld if requested. if socketFile != "" { - log.Infof("executing Mysqld.Shutdown() remotely via mysqlctld server: %v", socketFile) + log.Info(fmt.Sprintf("executing Mysqld.Shutdown() remotely via mysqlctld server: %v", socketFile)) client, err := mysqlctlclient.New(ctx, "unix", socketFile) if err != nil { return fmt.Errorf("can't dial mysqlctld: %v", err) @@ -635,7 +635,7 @@ func (mysqld *Mysqld) Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bo _, socketPathErr := os.Stat(cnf.SocketFile) _, pidPathErr := os.Stat(cnf.PidFile) if os.IsNotExist(socketPathErr) && os.IsNotExist(pidPathErr) { - log.Warningf("assuming mysqld already shut down - no socket, no pid file found") + log.Warn("assuming mysqld already shut down - no socket, no pid file found") return nil } @@ -658,7 +658,7 @@ func (mysqld *Mysqld) Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bo // hook exists and worked, we can keep going case hook.HOOK_DOES_NOT_EXIST: // hook doesn't exist, try mysqladmin - log.Infof("No mysqld_shutdown hook, running mysqladmin directly") + log.Info("No mysqld_shutdown hook, running mysqladmin directly") dir, err := vtenv.VtMysqlRoot() if err != nil { return err @@ -699,8 +699,7 @@ func (mysqld *Mysqld) Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bo // proxy for that since we can't call wait() in a process we // didn't start. if waitForMysqld { - log.Infof("Mysqld.Shutdown: waiting for socket file (%v) and pid file (%v) to disappear", - cnf.SocketFile, cnf.PidFile) + log.Info(fmt.Sprintf("Mysqld.Shutdown: waiting for socket file (%v) and pid file (%v) to disappear", cnf.SocketFile, cnf.PidFile)) for { select { @@ -734,7 +733,7 @@ func execCmd(name string, args, env []string, dir string, input io.Reader) (cmd out, err := cmd.CombinedOutput() output = string(out) if err != nil { - log.Errorf("execCmd: %v failed: %v", name, err) + log.Error(fmt.Sprintf("execCmd: %v failed: %v", name, err)) err = fmt.Errorf("%v: %w, output: %v", name, err, output) } return cmd, output, err @@ -758,15 +757,15 @@ func binaryPath(root, binary string) (string, error) { // InitConfig will create the default directory structure for the mysqld process, // generate / configure a my.cnf file. func (mysqld *Mysqld) InitConfig(cnf *Mycnf) error { - log.Infof("mysqlctl.InitConfig") + log.Info("mysqlctl.InitConfig") err := mysqld.createDirs(cnf) if err != nil { - log.Errorf("%s", err.Error()) + log.Error(err.Error()) return err } // Set up config files. if err = mysqld.initConfig(cnf, cnf.Path); err != nil { - log.Errorf("failed creating %v: %v", cnf.Path, err) + log.Error(fmt.Sprintf("failed creating %v: %v", cnf.Path, err)) return err } return nil @@ -776,10 +775,10 @@ func (mysqld *Mysqld) InitConfig(cnf *Mycnf) error { // generate / configure a my.cnf file install a skeleton database, // and apply the provided initial SQL file. func (mysqld *Mysqld) Init(ctx context.Context, cnf *Mycnf, initDBSQLFile string) error { - log.Infof("mysqlctl.Init running with contents previously embedded from %s", initDBSQLFile) + log.Info("mysqlctl.Init running with contents previously embedded from " + initDBSQLFile) err := mysqld.InitConfig(cnf) if err != nil { - log.Errorf("%s", err.Error()) + log.Error(err.Error()) return err } // Install data dir. @@ -790,7 +789,7 @@ func (mysqld *Mysqld) Init(ctx context.Context, cnf *Mycnf, initDBSQLFile string // Start mysqld. We do not use Start, as we have to wait using // the root user. if err = mysqld.startNoWait(cnf); err != nil { - log.Errorf("failed starting mysqld: %v\n%v", err, readTailOfMysqldErrorLog(cnf.ErrorLogPath)) + log.Error(fmt.Sprintf("failed starting mysqld: %v\n%v", err, readTailOfMysqldErrorLog(cnf.ErrorLogPath))) return err } @@ -801,7 +800,7 @@ func (mysqld *Mysqld) Init(ctx context.Context, cnf *Mycnf, initDBSQLFile string UnixSocket: cnf.SocketFile, } if err = mysqld.wait(ctx, cnf, params); err != nil { - log.Errorf("failed starting mysqld in time: %v\n%v", err, readTailOfMysqldErrorLog(cnf.ErrorLogPath)) + log.Error(fmt.Sprintf("failed starting mysqld in time: %v\n%v", err, readTailOfMysqldErrorLog(cnf.ErrorLogPath))) return err } if initDBSQLFile == "" { // default to built-in @@ -878,20 +877,20 @@ func (mysqld *Mysqld) installDataDir(cnf *Mycnf) error { return err } if mysqld.capabilities.hasInitializeInServer() { - log.Infof("Installing data dir with mysqld --initialize-insecure") + log.Info("Installing data dir with mysqld --initialize-insecure") args := []string{ "--defaults-file=" + cnf.Path, "--basedir=" + mysqlBaseDir, "--initialize-insecure", // Use empty 'root'@'localhost' password. } if _, _, err = execCmd(mysqldPath, args, nil, mysqlRoot, nil); err != nil { - log.Errorf("mysqld --initialize-insecure failed: %v\n%v", err, readTailOfMysqldErrorLog(cnf.ErrorLogPath)) + log.Error(fmt.Sprintf("mysqld --initialize-insecure failed: %v\n%v", err, readTailOfMysqldErrorLog(cnf.ErrorLogPath))) return err } return nil } - log.Infof("Installing data dir with mysql_install_db") + log.Info("Installing data dir with mysql_install_db") args := []string{ "--defaults-file=" + cnf.Path, "--basedir=" + mysqlBaseDir, @@ -904,7 +903,7 @@ func (mysqld *Mysqld) installDataDir(cnf *Mycnf) error { return err } if _, _, err = execCmd(cmdPath, args, nil, mysqlRoot, nil); err != nil { - log.Errorf("mysql_install_db failed: %v\n%v", err, readTailOfMysqldErrorLog(cnf.ErrorLogPath)) + log.Error(fmt.Sprintf("mysql_install_db failed: %v\n%v", err, readTailOfMysqldErrorLog(cnf.ErrorLogPath))) return err } return nil @@ -922,7 +921,7 @@ func (mysqld *Mysqld) initConfig(cnf *Mycnf, outFile string) error { switch hr := hook.NewHookWithEnv("make_mycnf", nil, env).Execute(); hr.ExitStatus { case hook.HOOK_DOES_NOT_EXIST: - log.Infof("make_mycnf hook doesn't exist, reading template files") + log.Info("make_mycnf hook doesn't exist, reading template files") configData, err = cnf.makeMycnf(mysqld.getMycnfTemplate()) case hook.HOOK_SUCCESS: configData, err = cnf.fillMycnfTemplate(hr.Stdout) @@ -940,7 +939,8 @@ func (mysqld *Mysqld) getMycnfTemplate() string { if mycnfTemplateFile != "" { data, err := os.ReadFile(mycnfTemplateFile) if err != nil { - log.Fatalf("template file specified by -mysqlctl-mycnf-template could not be read: %v", mycnfTemplateFile) + log.Error(fmt.Sprintf("template file specified by -mysqlctl-mycnf-template could not be read: %v", mycnfTemplateFile)) + os.Exit(1) } return string(data) // use only specified template } @@ -962,7 +962,7 @@ func (mysqld *Mysqld) getMycnfTemplate() string { if mysqld.capabilities.version.Minor == 7 { versionConfig = config.MycnfMySQL57 } else { - log.Infof("this version of Vitess does not include built-in support for %v %v", mysqld.capabilities.flavor, mysqld.capabilities.version) + log.Info(fmt.Sprintf("this version of Vitess does not include built-in support for %v %v", mysqld.capabilities.flavor, mysqld.capabilities.version)) } case 8: if mysqld.capabilities.version.Minor >= 4 { @@ -975,14 +975,14 @@ func (mysqld *Mysqld) getMycnfTemplate() string { case 9: versionConfig = config.MycnfMySQL90 default: - log.Infof("this version of Vitess does not include built-in support for %v %v", mysqld.capabilities.flavor, mysqld.capabilities.version) + log.Info(fmt.Sprintf("this version of Vitess does not include built-in support for %v %v", mysqld.capabilities.flavor, mysqld.capabilities.version)) } case FlavorMariaDB: switch mysqld.capabilities.version.Major { case 10: versionConfig = config.MycnfMariaDB10 default: - log.Infof("this version of Vitess does not include built-in support for %v %v", mysqld.capabilities.flavor, mysqld.capabilities.version) + log.Info(fmt.Sprintf("this version of Vitess does not include built-in support for %v %v", mysqld.capabilities.flavor, mysqld.capabilities.version)) } } @@ -992,8 +992,7 @@ func (mysqld *Mysqld) getMycnfTemplate() string { if mysqlCloneEnabled && f == FlavorMySQL { v := mysqld.capabilities.version if v.Major < 8 || (v.Major == 8 && v.Minor == 0 && v.Patch < 17) { - log.Warningf("--mysql-clone-enabled is set but MySQL version %d.%d.%d does not support CLONE (requires 8.0.17+); flag will be ignored", - v.Major, v.Minor, v.Patch) + log.Warn(fmt.Sprintf("--mysql-clone-enabled is set but MySQL version %d.%d.%d does not support CLONE (requires 8.0.17+); flag will be ignored", v.Major, v.Minor, v.Patch)) } else { myTemplateSource.WriteString("\n## Clone plugin (--mysql-clone-enabled)\n") myTemplateSource.WriteString(config.MycnfClone) @@ -1005,10 +1004,10 @@ func (mysqld *Mysqld) getMycnfTemplate() string { for path := range parts { data, dataErr := os.ReadFile(path) if dataErr != nil { - log.Infof("could not open config file for mycnf: %v", path) + log.Info(fmt.Sprintf("could not open config file for mycnf: %v", path)) continue } - log.Infof("loaded extra MySQL config from: %s", path) + log.Info("loaded extra MySQL config from: " + path) myTemplateSource.WriteString("## " + path + "\n") myTemplateSource.Write(data) } @@ -1022,7 +1021,7 @@ func (mysqld *Mysqld) getMycnfTemplate() string { func (mysqld *Mysqld) RefreshConfig(ctx context.Context, cnf *Mycnf) error { // Execute as remote action on mysqlctld if requested. if socketFile != "" { - log.Infof("executing Mysqld.RefreshConfig() remotely via mysqlctld server: %v", socketFile) + log.Info(fmt.Sprintf("executing Mysqld.RefreshConfig() remotely via mysqlctld server: %v", socketFile)) client, err := mysqlctlclient.New(ctx, "unix", socketFile) if err != nil { return fmt.Errorf("can't dial mysqlctld: %v", err) @@ -1053,7 +1052,7 @@ func (mysqld *Mysqld) RefreshConfig(ctx context.Context, cnf *Mycnf) error { } if bytes.Equal(existing, updated) { - log.Infof("No changes to my.cnf. Continuing.") + log.Info("No changes to my.cnf. Continuing.") return nil } @@ -1066,7 +1065,7 @@ func (mysqld *Mysqld) RefreshConfig(ctx context.Context, cnf *Mycnf) error { if err != nil { return fmt.Errorf("could not move %v to %v: %v", f.Name(), cnf.Path, err) } - log.Infof("Updated my.cnf. Backup of previous version available in %v", backupPath) + log.Info(fmt.Sprintf("Updated my.cnf. Backup of previous version available in %v", backupPath)) return nil } @@ -1076,11 +1075,11 @@ func (mysqld *Mysqld) RefreshConfig(ctx context.Context, cnf *Mycnf) error { // from a backup and then give it the same ServerID as before, MySQL can then // skip transactions in the replication stream with the same server_id. func (mysqld *Mysqld) ReinitConfig(ctx context.Context, cnf *Mycnf) error { - log.Infof("Mysqld.ReinitConfig") + log.Info("Mysqld.ReinitConfig") // Execute as remote action on mysqlctld if requested. if socketFile != "" { - log.Infof("executing Mysqld.ReinitConfig() remotely via mysqlctld server: %v", socketFile) + log.Info(fmt.Sprintf("executing Mysqld.ReinitConfig() remotely via mysqlctld server: %v", socketFile)) client, err := mysqlctlclient.New(ctx, "unix", socketFile) if err != nil { return fmt.Errorf("can't dial mysqlctld: %v", err) @@ -1097,7 +1096,7 @@ func (mysqld *Mysqld) ReinitConfig(ctx context.Context, cnf *Mycnf) error { func (mysqld *Mysqld) createDirs(cnf *Mycnf) error { tabletDir := cnf.TabletDir() - log.Infof("creating directory %s", tabletDir) + log.Info("creating directory " + tabletDir) if err := os2.MkdirAll(tabletDir); err != nil { return err } @@ -1107,7 +1106,7 @@ func (mysqld *Mysqld) createDirs(cnf *Mycnf) error { } } for _, dir := range cnf.directoryList() { - log.Infof("creating directory %s", dir) + log.Info("creating directory " + dir) if err := os2.MkdirAll(dir); err != nil { return err } @@ -1131,27 +1130,27 @@ func (mysqld *Mysqld) createTopDir(cnf *Mycnf, dir string) error { if err != nil { if os.IsNotExist(err) { topdir := path.Join(tabletDir, dir) - log.Infof("creating directory %s", topdir) + log.Info("creating directory " + topdir) return os2.MkdirAll(topdir) } return err } linkto := path.Join(target, vtname) source := path.Join(tabletDir, dir) - log.Infof("creating directory %s", linkto) + log.Info("creating directory " + linkto) err = os2.MkdirAll(linkto) if err != nil { return err } - log.Infof("creating symlink %s -> %s", source, linkto) + log.Info(fmt.Sprintf("creating symlink %s -> %s", source, linkto)) return os.Symlink(linkto, source) } // Teardown will shutdown the running daemon, and delete the root directory. func (mysqld *Mysqld) Teardown(ctx context.Context, cnf *Mycnf, force bool, shutdownTimeout time.Duration) error { - log.Infof("mysqlctl.Teardown") + log.Info("mysqlctl.Teardown") if err := mysqld.Shutdown(ctx, cnf, true, shutdownTimeout); err != nil { - log.Warningf("failed mysqld shutdown: %v", err.Error()) + log.Warn(fmt.Sprintf("failed mysqld shutdown: %v", err.Error())) if !force { return err } @@ -1169,23 +1168,23 @@ func (mysqld *Mysqld) Teardown(ctx context.Context, cnf *Mycnf, force bool, shut func deleteTopDir(dir string) (removalErr error) { fi, err := os.Lstat(dir) if err != nil { - log.Errorf("error deleting dir %v: %v", dir, err.Error()) + log.Error(fmt.Sprintf("error deleting dir %v: %v", dir, err.Error())) removalErr = err } else if fi.Mode()&os.ModeSymlink != 0 { target, err := filepath.EvalSymlinks(dir) if err != nil { - log.Errorf("could not resolve symlink %v: %v", dir, err.Error()) + log.Error(fmt.Sprintf("could not resolve symlink %v: %v", dir, err.Error())) removalErr = err } - log.Infof("remove data dir (symlinked) %v", target) + log.Info(fmt.Sprintf("remove data dir (symlinked) %v", target)) if err = os.RemoveAll(target); err != nil { - log.Errorf("failed removing %v: %v", target, err.Error()) + log.Error(fmt.Sprintf("failed removing %v: %v", target, err.Error())) removalErr = err } } - log.Infof("remove data dir %v", dir) + log.Info(fmt.Sprintf("remove data dir %v", dir)) if err = os.RemoveAll(dir); err != nil { - log.Errorf("failed removing %v: %v", dir, err.Error()) + log.Error(fmt.Sprintf("failed removing %v: %v", dir, err.Error())) removalErr = err } return @@ -1407,7 +1406,7 @@ func (mysqld *Mysqld) HostMetrics(ctx context.Context, cnf *Mycnf) (*mysqlctlpb. // $ mysqlbinlog --include-gtids binlog.file | mysql func (mysqld *Mysqld) ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.ApplyBinlogFileRequest) error { if socketFile != "" { - log.Infof("executing Mysqld.ApplyBinlogFile() remotely via mysqlctld server: %v", socketFile) + log.Info(fmt.Sprintf("executing Mysqld.ApplyBinlogFile() remotely via mysqlctld server: %v", socketFile)) client, err := mysqlctlclient.New(ctx, "unix", socketFile) if err != nil { return fmt.Errorf("can't dial mysqlctld: %v", err) @@ -1459,7 +1458,7 @@ func (mysqld *Mysqld) ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.Apply mysqlbinlogCmd.Dir = dir mysqlbinlogCmd.Env = env mysqlbinlogCmd.Stderr = mysqlbinlogErrFile - log.Infof("ApplyBinlogFile: running mysqlbinlog command: %#v with errfile=%v", mysqlbinlogCmd, mysqlbinlogErrFile.Name()) + log.Info(fmt.Sprintf("ApplyBinlogFile: running mysqlbinlog command: %#v with errfile=%v", mysqlbinlogCmd, mysqlbinlogErrFile.Name())) pipe, err = mysqlbinlogCmd.StdoutPipe() // to be piped into mysql if err != nil { return err @@ -1493,13 +1492,13 @@ func (mysqld *Mysqld) ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.Apply // We disable super_read_only, in case it is in the default MySQL startup // parameters. We do it blindly, since this will fail on MariaDB, which doesn't // have super_read_only This is safe, since we're restarting MySQL after the restore anyway - log.Infof("ApplyBinlogFile: disabling super_read_only") + log.Info("ApplyBinlogFile: disabling super_read_only") resetFunc, err := mysqld.SetSuperReadOnly(ctx, false) if err != nil { if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERUnknownSystemVariable { - log.Warningf("ApplyBinlogFile: server does not know about super_read_only, continuing anyway...") + log.Warn("ApplyBinlogFile: server does not know about super_read_only, continuing anyway...") } else { - log.Errorf("ApplyBinlogFile: unexpected error while trying to set super_read_only: %v", err) + log.Error(fmt.Sprintf("ApplyBinlogFile: unexpected error while trying to set super_read_only: %v", err)) return err } } @@ -1518,7 +1517,7 @@ func (mysqld *Mysqld) ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.Apply mysqlCmd.Stdin = pipe // piped from mysqlbinlog mysqlCmd.Stderr = mysqlErrFile - log.Infof("ApplyBinlogFile: running mysql command: %#v with errfile=%v", mysqlCmd, mysqlErrFile.Name()) + log.Info(fmt.Sprintf("ApplyBinlogFile: running mysql command: %#v with errfile=%v", mysqlCmd, mysqlErrFile.Name())) } // Run both processes, piped: if err := mysqlbinlogCmd.Start(); err != nil { @@ -1594,7 +1593,7 @@ func (mysqld *Mysqld) scanBinlogTimestamp( mysqlbinlogCmd := exec.Command(mysqlbinlogName, args...) mysqlbinlogCmd.Dir = mysqlbinlogDir mysqlbinlogCmd.Env = mysqlbinlogEnv - log.Infof("ApplyBinlogFile: running mysqlbinlog command: %#v", mysqlbinlogCmd) + log.Info(fmt.Sprintf("ApplyBinlogFile: running mysqlbinlog command: %#v", mysqlbinlogCmd)) pipe, err := mysqlbinlogCmd.StdoutPipe() // to be piped into mysql if err != nil { return firstMatchedTime, lastMatchedTime, err @@ -1638,7 +1637,7 @@ func (mysqld *Mysqld) ReadBinlogFilesTimestamps(ctx context.Context, req *mysqlc return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "empty binlog list in ReadBinlogFilesTimestampsRequest") } if socketFile != "" { - log.Infof("executing Mysqld.ReadBinlogFilesTimestamps() remotely via mysqlctld server: %v", socketFile) + log.Info(fmt.Sprintf("executing Mysqld.ReadBinlogFilesTimestamps() remotely via mysqlctld server: %v", socketFile)) client, err := mysqlctlclient.New(ctx, "unix", socketFile) if err != nil { return nil, fmt.Errorf("can't dial mysqlctld: %v", err) @@ -1725,7 +1724,7 @@ func noSocketFile() { // We log an error for now until we fix the issue with ApplySchema surfacing in MoveTables. // See https://github.com/vitessio/vitess/issues/13203 and https://github.com/vitessio/vitess/pull/13178 // panic("Running remotely through mysqlctl, socketFile must not be set") - log.Warning("Running remotely through mysqlctl and thus socketFile should not be set") + log.Warn("Running remotely through mysqlctl and thus socketFile should not be set") } } diff --git a/go/vt/mysqlctl/mysqlshellbackupengine.go b/go/vt/mysqlctl/mysqlshellbackupengine.go index d1b4d2acae0..24156d1dc37 100644 --- a/go/vt/mysqlctl/mysqlshellbackupengine.go +++ b/go/vt/mysqlctl/mysqlshellbackupengine.go @@ -296,7 +296,7 @@ func (be *MySQLShellBackupEngine) ExecuteRestore(ctx context.Context, params Res err = cleanupMySQL(ctx, params, shouldDeleteUsers) if err != nil { - log.Errorf(err.Error()) + log.Error(err.Error()) // time.Sleep(time.Minute * 2) return nil, vterrors.Wrap(err, "error cleaning MySQL") } diff --git a/go/vt/mysqlctl/query.go b/go/vt/mysqlctl/query.go index 765bdd2b9ee..b67c1dddb39 100644 --- a/go/vt/mysqlctl/query.go +++ b/go/vt/mysqlctl/query.go @@ -78,9 +78,9 @@ func limitString(s string, limit int) string { func (mysqld *Mysqld) executeSuperQueryListConn(ctx context.Context, conn *dbconnpool.PooledDBConnection, queryList []string) error { const LogQueryLengthLimit = 200 for _, query := range queryList { - log.Infof("exec %s", limitString(redactPassword(query), LogQueryLengthLimit)) + log.Info("exec " + limitString(redactPassword(query), LogQueryLengthLimit)) if _, err := mysqld.executeFetchContext(ctx, conn, query, 10000, false); err != nil { - log.Errorf("ExecuteFetch(%v) failed: %v", redactPassword(query), redactPassword(err.Error())) + log.Error(fmt.Sprintf("ExecuteFetch(%v) failed: %v", redactPassword(query), redactPassword(err.Error()))) return fmt.Errorf("ExecuteFetch(%v) failed: %v", redactPassword(query), redactPassword(err.Error())) } } @@ -138,10 +138,10 @@ func (mysqld *Mysqld) executeFetchContext(ctx context.Context, conn *dbconnpool. // The context expired or was canceled. // Try to kill the connection to effectively cancel the ExecuteFetch(). connID := conn.Conn.ID() - log.Infof("Mysqld.executeFetchContext(): killing connID %v due to timeout of query: %v", connID, query) + log.Info(fmt.Sprintf("Mysqld.executeFetchContext(): killing connID %v due to timeout of query: %v", connID, query)) if killErr := mysqld.killConnection(connID); killErr != nil { // Log it, but go ahead and wait for the query anyway. - log.Warningf("Mysqld.executeFetchContext(): failed to kill connID %v: %v", connID, killErr) + log.Warn(fmt.Sprintf("Mysqld.executeFetchContext(): failed to kill connID %v: %v", connID, killErr)) } // Wait for the conn.ExecuteFetch() call to return. <-done diff --git a/go/vt/mysqlctl/reparent.go b/go/vt/mysqlctl/reparent.go index 9c7e0f91361..e2243694b2a 100644 --- a/go/vt/mysqlctl/reparent.go +++ b/go/vt/mysqlctl/reparent.go @@ -72,7 +72,7 @@ func (mysqld *Mysqld) WaitForReparentJournal(ctx context.Context, timeCreatedNS for { qr, err := mysqld.FetchSuperQuery(ctx, queryReparentJournal(timeCreatedNS)) if err != nil { - log.Infof("Error querying reparent journal: %v", err) + log.Info(fmt.Sprintf("Error querying reparent journal: %v", err)) } if err == nil && len(qr.Rows) == 1 { // we have the row, we're done @@ -83,7 +83,7 @@ func (mysqld *Mysqld) WaitForReparentJournal(ctx context.Context, timeCreatedNS t := time.After(100 * time.Millisecond) select { case <-ctx.Done(): - log.Warning("WaitForReparentJournal failed to see row before timeout.") + log.Warn("WaitForReparentJournal failed to see row before timeout.") return ctx.Err() case <-t: } diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index 1f7f5c00a22..bf06f35ac1f 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -487,7 +487,7 @@ func (mysqld *Mysqld) SetReplicationPosition(ctx context.Context, pos replicatio defer conn.Recycle() cmds := conn.Conn.SetReplicationPositionCommands(pos) - log.Infof("Executing commands to set replication position: %v", cmds) + log.Info(fmt.Sprintf("Executing commands to set replication position: %v", cmds)) return mysqld.executeSuperQueryListConn(ctx, conn, cmds) } @@ -697,7 +697,7 @@ func (mysqld *Mysqld) semiSyncReplicationStatusQuery(ctx context.Context) (strin // SetSemiSyncEnabled enables or disables semi-sync replication for // primary and/or replica mode. func (mysqld *Mysqld) SetSemiSyncEnabled(ctx context.Context, primary, replica bool) error { - log.Infof("Setting semi-sync mode: primary=%v, replica=%v", primary, replica) + log.Info(fmt.Sprintf("Setting semi-sync mode: primary=%v, replica=%v", primary, replica)) // Convert bool to int. var p, s int diff --git a/go/vt/mysqlctl/s3backupstorage/s3.go b/go/vt/mysqlctl/s3backupstorage/s3.go index f7629925054..56ee868c729 100644 --- a/go/vt/mysqlctl/s3backupstorage/s3.go +++ b/go/vt/mysqlctl/s3backupstorage/s3.go @@ -314,7 +314,7 @@ func (s3ServerSideEncryption *S3ServerSideEncryption) init() error { sseCustomerKeyFile := after base64CodedKey, err := os.ReadFile(sseCustomerKeyFile) if err != nil { - log.Errorf(err.Error()) + log.Error(err.Error()) return err } @@ -361,7 +361,7 @@ func newS3BackupStorage() *S3BackupStorage { // ListBackups is part of the backupstorage.BackupStorage interface. func (bs *S3BackupStorage) ListBackups(ctx context.Context, dir string) ([]backupstorage.BackupHandle, error) { - log.Infof("ListBackups: [s3] dir: %v, bucket: %v", dir, bucket) + log.Info(fmt.Sprintf("ListBackups: [s3] dir: %v, bucket: %v", dir, bucket)) c, err := bs.client() if err != nil { return nil, err @@ -373,7 +373,7 @@ func (bs *S3BackupStorage) ListBackups(ctx context.Context, dir string) ([]backu } else { searchPrefix = objName(dir, "") } - log.Infof("objName: %s", searchPrefix) + log.Info("objName: " + searchPrefix) query := &s3.ListObjectsV2Input{ Bucket: &bucket, @@ -417,7 +417,7 @@ func (bs *S3BackupStorage) ListBackups(ctx context.Context, dir string) ([]backu // StartBackup is part of the backupstorage.BackupStorage interface. func (bs *S3BackupStorage) StartBackup(ctx context.Context, dir, name string) (backupstorage.BackupHandle, error) { - log.Infof("StartBackup: [s3] dir: %v, name: %v, bucket: %v", dir, name, bucket) + log.Info(fmt.Sprintf("StartBackup: [s3] dir: %v, name: %v, bucket: %v", dir, name, bucket)) c, err := bs.client() if err != nil { return nil, err @@ -434,7 +434,7 @@ func (bs *S3BackupStorage) StartBackup(ctx context.Context, dir, name string) (b // RemoveBackup is part of the backupstorage.BackupStorage interface. func (bs *S3BackupStorage) RemoveBackup(ctx context.Context, dir, name string) error { - log.Infof("RemoveBackup: [s3] dir: %v, name: %v, bucket: %v", dir, name, bucket) + log.Info(fmt.Sprintf("RemoveBackup: [s3] dir: %v, name: %v, bucket: %v", dir, name, bucket)) c, err := bs.client() if err != nil { diff --git a/go/vt/mysqlctl/schema.go b/go/vt/mysqlctl/schema.go index 81b19b0f8b7..2da0f6afe9c 100644 --- a/go/vt/mysqlctl/schema.go +++ b/go/vt/mysqlctl/schema.go @@ -504,7 +504,7 @@ func (mysqld *Mysqld) ApplySchemaChange(ctx context.Context, dbName string, chan schemaDiffs := tmutils.DiffSchemaToArray("actual", beforeSchema, "expected", change.BeforeSchema) if len(schemaDiffs) > 0 { for _, msg := range schemaDiffs { - log.Warningf("BeforeSchema differs: %v", msg) + log.Warn(fmt.Sprintf("BeforeSchema differs: %v", msg)) } // let's see if the schema was already applied @@ -522,7 +522,7 @@ func (mysqld *Mysqld) ApplySchemaChange(ctx context.Context, dbName string, chan } if change.Force { - log.Warningf("BeforeSchema differs, applying anyway") + log.Warn("BeforeSchema differs, applying anyway") } else { return nil, errors.New("BeforeSchema differs") } @@ -565,10 +565,10 @@ func (mysqld *Mysqld) ApplySchemaChange(ctx context.Context, dbName string, chan schemaDiffs := tmutils.DiffSchemaToArray("actual", afterSchema, "expected", change.AfterSchema) if len(schemaDiffs) > 0 { for _, msg := range schemaDiffs { - log.Warningf("AfterSchema differs: %v", msg) + log.Warn(fmt.Sprintf("AfterSchema differs: %v", msg)) } if change.Force { - log.Warningf("AfterSchema differs, not reporting error") + log.Warn("AfterSchema differs, not reporting error") } else { return nil, errors.New("AfterSchema differs") } diff --git a/go/vt/schemamanager/local_controller.go b/go/vt/schemamanager/local_controller.go index 44e1bbcf565..c59cf5a59fe 100644 --- a/go/vt/schemamanager/local_controller.go +++ b/go/vt/schemamanager/local_controller.go @@ -88,7 +88,7 @@ func (controller *LocalController) Open(ctx context.Context) error { dirpath := path.Join(controller.schemaChangeDir, fileinfo.Name()) schemaChanges, err := os.ReadDir(path.Join(dirpath, "input")) if err != nil { - log.Warningf("there is no input dir in %s", dirpath) + log.Warn("there is no input dir in " + dirpath) continue } // found a schema change @@ -148,7 +148,7 @@ func (controller *LocalController) OnReadSuccess(ctx context.Context) error { // OnReadFail is no-op func (controller *LocalController) OnReadFail(ctx context.Context, err error) error { - log.Errorf("failed to read file: %s, error: %v", controller.sqlPath, err) + log.Error(fmt.Sprintf("failed to read file: %s, error: %v", controller.sqlPath, err)) return nil } diff --git a/go/vt/schemamanager/plain_controller.go b/go/vt/schemamanager/plain_controller.go index 60ba0f85607..057843054b1 100644 --- a/go/vt/schemamanager/plain_controller.go +++ b/go/vt/schemamanager/plain_controller.go @@ -19,6 +19,7 @@ package schemamanager import ( "context" "encoding/json" + "fmt" "strings" "vitess.io/vitess/go/vt/log" @@ -74,7 +75,7 @@ func (controller *PlainController) OnReadSuccess(ctx context.Context) error { // OnReadFail is called when schemamanager fails to read all sql statements. func (controller *PlainController) OnReadFail(ctx context.Context, err error) error { - log.Errorf("Failed to read schema changes, error: %v\n", err) + log.Error(fmt.Sprintf("Failed to read schema changes, error: %v\n", err)) return err } @@ -86,14 +87,14 @@ func (controller *PlainController) OnValidationSuccess(ctx context.Context) erro // OnValidationFail is called when schemamanager fails to validate sql statements. func (controller *PlainController) OnValidationFail(ctx context.Context, err error) error { - log.Errorf("Failed to validate SQL statements, error: %v\n", err) + log.Error(fmt.Sprintf("Failed to validate SQL statements, error: %v\n", err)) return err } // OnExecutorComplete is called when schemamanager finishes applying schema changes. func (controller *PlainController) OnExecutorComplete(ctx context.Context, result *ExecuteResult) error { out, _ := json.MarshalIndent(result, "", " ") - log.Infof("Executor finished, result: %s\n", string(out)) + log.Info(fmt.Sprintf("Executor finished, result: %s\n", string(out))) return nil } diff --git a/go/vt/schemamanager/schemamanager.go b/go/vt/schemamanager/schemamanager.go index 6778f8fdfe4..dc7d97369bc 100644 --- a/go/vt/schemamanager/schemamanager.go +++ b/go/vt/schemamanager/schemamanager.go @@ -93,13 +93,13 @@ type ShardResult struct { // Run applies schema changes on Vitess through VtGate. func Run(ctx context.Context, controller Controller, executor Executor) (execResult *ExecuteResult, err error) { if err := controller.Open(ctx); err != nil { - log.Errorf("failed to open data sourcer: %v", err) + log.Error(fmt.Sprintf("failed to open data sourcer: %v", err)) return execResult, err } defer controller.Close() sqls, err := controller.Read(ctx) if err != nil { - log.Errorf("failed to read data from data sourcer: %v", err) + log.Error(fmt.Sprintf("failed to read data from data sourcer: %v", err)) controller.OnReadFail(ctx, err) return execResult, err } @@ -109,12 +109,12 @@ func Run(ctx context.Context, controller Controller, executor Executor) (execRes } keyspace := controller.Keyspace() if err := executor.Open(ctx, keyspace); err != nil { - log.Errorf("failed to open executor: %v", err) + log.Error(fmt.Sprintf("failed to open executor: %v", err)) return execResult, err } defer executor.Close() if err := executor.Validate(ctx, sqls); err != nil { - log.Errorf("validation fail: %v", err) + log.Error(fmt.Sprintf("validation fail: %v", err)) controller.OnValidationFail(ctx, err) return execResult, err } diff --git a/go/vt/schemamanager/ui_controller.go b/go/vt/schemamanager/ui_controller.go index 1625a5e8fd7..34756b4f1ba 100644 --- a/go/vt/schemamanager/ui_controller.go +++ b/go/vt/schemamanager/ui_controller.go @@ -103,7 +103,7 @@ func (controller *UIController) OnValidationFail(ctx context.Context, err error) func (controller *UIController) OnExecutorComplete(ctx context.Context, result *ExecuteResult) error { data, err := json.Marshal(result) if err != nil { - log.Errorf("Failed to serialize ExecuteResult: %v", err) + log.Error(fmt.Sprintf("Failed to serialize ExecuteResult: %v", err)) return err } fmt.Fprintf(controller.writer, "Executor succeeds: %s", string(data)) diff --git a/go/vt/servenv/grpc_auth.go b/go/vt/servenv/grpc_auth.go index a9b3c86c273..a7848a304d0 100644 --- a/go/vt/servenv/grpc_auth.go +++ b/go/vt/servenv/grpc_auth.go @@ -18,6 +18,8 @@ package servenv import ( "context" + "fmt" + "os" "github.com/spf13/pflag" "google.golang.org/grpc" @@ -63,7 +65,8 @@ var authPlugins = make(map[string]func() (Authenticator, error)) // RegisterAuthPlugin registers an implementation of AuthServer. func RegisterAuthPlugin(name string, authPlugin func() (Authenticator, error)) { if _, ok := authPlugins[name]; ok { - log.Fatalf("AuthPlugin named %v already exists", name) + log.Error(fmt.Sprintf("AuthPlugin named %v already exists", name)) + os.Exit(1) } authPlugins[name] = authPlugin } @@ -72,7 +75,8 @@ func RegisterAuthPlugin(name string, authPlugin func() (Authenticator, error)) { func GetAuthenticator(name string) func() (Authenticator, error) { authPlugin, ok := authPlugins[name] if !ok { - log.Fatalf("no AuthPlugin name %v registered", name) + log.Error(fmt.Sprintf("no AuthPlugin name %v registered", name)) + os.Exit(1) } return authPlugin } diff --git a/go/vt/servenv/grpc_server.go b/go/vt/servenv/grpc_server.go index 8b413f1b6ba..c98a0803226 100644 --- a/go/vt/servenv/grpc_server.go +++ b/go/vt/servenv/grpc_server.go @@ -19,8 +19,10 @@ package servenv import ( "context" "crypto/tls" + "fmt" "math" "net" + "os" "strconv" "time" @@ -207,7 +209,7 @@ func isGRPCEnabled() bool { func createGRPCServer() { // skip if not registered if !isGRPCEnabled() { - log.Infof("Skipping gRPC server creation") + log.Info("Skipping gRPC server creation") return } @@ -215,13 +217,14 @@ func createGRPCServer() { if gRPCCert != "" && gRPCKey != "" { config, err := vttls.ServerConfig(gRPCCert, gRPCKey, gRPCCA, gRPCCRL, gRPCServerCA, tls.VersionTLS12) if err != nil { - log.Exitf("Failed to log gRPC cert/key/ca: %v", err) + log.Error(fmt.Sprintf("Failed to log gRPC cert/key/ca: %v", err)) + os.Exit(1) } // create the creds server options creds := credentials.NewTLS(config) if gRPCEnableOptionalTLS { - log.Warning("Optional TLS is active. Plain-text connections will be accepted") + log.Warn("Optional TLS is active. Plain-text connections will be accepted") creds = grpcoptionaltls.New(creds) } opts = []grpc.ServerOption{grpc.Creds(creds)} @@ -234,7 +237,7 @@ func createGRPCServer() { // Note: For gRPC 1.0.0 it's sufficient to set the limit on the server only // because it's not enforced on the client side. msgSize := grpccommon.MaxMessageSize() - log.Infof("Setting grpc max message size to %d", msgSize) + log.Info(fmt.Sprintf("Setting grpc max message size to %d", msgSize)) opts = append(opts, grpc.MaxRecvMsgSize(msgSize)) opts = append(opts, grpc.MaxSendMsgSize(msgSize)) @@ -244,12 +247,12 @@ func createGRPCServer() { } if gRPCInitialConnWindowSize != 0 { - log.Infof("Setting grpc server initial conn window size to %d", int32(gRPCInitialConnWindowSize)) + log.Info(fmt.Sprintf("Setting grpc server initial conn window size to %d", int32(gRPCInitialConnWindowSize))) opts = append(opts, grpc.InitialConnWindowSize(int32(gRPCInitialConnWindowSize))) } if gRPCInitialWindowSize != 0 { - log.Infof("Setting grpc server initial window size to %d", int32(gRPCInitialWindowSize)) + log.Info(fmt.Sprintf("Setting grpc server initial window size to %d", int32(gRPCInitialWindowSize))) opts = append(opts, grpc.InitialWindowSize(int32(gRPCInitialWindowSize))) } @@ -277,11 +280,12 @@ func interceptors() []grpc.ServerOption { interceptors := &serverInterceptorBuilder{} if gRPCAuth != "" { - log.Infof("enabling auth plugin %v", gRPCAuth) + log.Info(fmt.Sprintf("enabling auth plugin %v", gRPCAuth)) pluginInitializer := GetAuthenticator(gRPCAuth) authPluginImpl, err := pluginInitializer() if err != nil { - log.Fatalf("Failed to load auth plugin: %v", err) + log.Error(fmt.Sprintf("Failed to load auth plugin: %v", err)) + os.Exit(1) } authPlugin = authPluginImpl interceptors.Add(authenticatingStreamInterceptor, authenticatingUnaryInterceptor) @@ -322,10 +326,11 @@ func serveGRPC() { } // listen on the port - log.Infof("Listening for gRPC calls on port %v", gRPCPort) + log.Info(fmt.Sprintf("Listening for gRPC calls on port %v", gRPCPort)) listener, err := net.Listen("tcp", net.JoinHostPort(gRPCBindAddress, strconv.Itoa(gRPCPort))) if err != nil { - log.Exitf("Cannot listen on port %v for gRPC: %v", gRPCPort, err) + log.Error(fmt.Sprintf("Cannot listen on port %v for gRPC: %v", gRPCPort, err)) + os.Exit(1) } // and serve on it @@ -337,7 +342,8 @@ func serveGRPC() { go func() { err := GRPCServer.Serve(listener) if err != nil { - log.Exitf("Failed to start grpc server: %v", err) + log.Error(fmt.Sprintf("Failed to start grpc server: %v", err)) + os.Exit(1) } }() @@ -354,7 +360,8 @@ func registerOrca() { MinReportingInterval: 30 * time.Second, ServerMetricsProvider: GRPCServerMetricsRecorder, }); err != nil { - log.Exitf("Failed to register ORCA service: %v", err) + log.Error(fmt.Sprintf("Failed to register ORCA service: %v", err)) + os.Exit(1) } // Initialize the server metrics values. @@ -442,7 +449,7 @@ func (collector *serverInterceptorBuilder) AddUnary(u grpc.UnaryServerIntercepto // Build returns DialOptions to add to the grpc.Dial call func (collector *serverInterceptorBuilder) Build() []grpc.ServerOption { - log.Infof("Building interceptors with %d unary interceptors and %d stream interceptors", len(collector.unaryInterceptors), len(collector.streamInterceptors)) + log.Info(fmt.Sprintf("Building interceptors with %d unary interceptors and %d stream interceptors", len(collector.unaryInterceptors), len(collector.streamInterceptors))) switch len(collector.unaryInterceptors) + len(collector.streamInterceptors) { case 0: return []grpc.ServerOption{} diff --git a/go/vt/servenv/grpc_server_auth_mtls.go b/go/vt/servenv/grpc_server_auth_mtls.go index 42259ba505a..dd67d9cbe55 100644 --- a/go/vt/servenv/grpc_server_auth_mtls.go +++ b/go/vt/servenv/grpc_server_auth_mtls.go @@ -18,6 +18,7 @@ package servenv import ( "context" + "fmt" "strings" "github.com/spf13/pflag" @@ -72,7 +73,7 @@ func mtlsAuthPluginInitializer() (Authenticator, error) { mtlsAuthPlugin := &MtlsAuthPlugin{ clientCertSubstrings: strings.Split(clientCertSubstrings, ":"), } - log.Infof("mtls auth plugin have initialized successfully with allowed client cert name substrings of %v", clientCertSubstrings) + log.Info(fmt.Sprintf("mtls auth plugin have initialized successfully with allowed client cert name substrings of %v", clientCertSubstrings)) return mtlsAuthPlugin, nil } diff --git a/go/vt/servenv/metrics_cgroup.go b/go/vt/servenv/metrics_cgroup.go index fc30c2baa0e..beda04ec3c9 100644 --- a/go/vt/servenv/metrics_cgroup.go +++ b/go/vt/servenv/metrics_cgroup.go @@ -43,17 +43,17 @@ var ( func setup() { if cgroups.Mode() != cgroups.Unified { - log.Warning("cgroup metrics are only supported with cgroup v2, will use host metrics") + log.Warn("cgroup metrics are only supported with cgroup v2, will use host metrics") return } manager, err := getCgroupManager() if err != nil { - log.Warningf("Failed to init cgroup manager for metrics, will use host metrics: %v", err) + log.Warn(fmt.Sprintf("Failed to init cgroup manager for metrics, will use host metrics: %v", err)) } cgroupManager = manager lastCpu, err = getCurrentCgroupCpuUsage() if err != nil { - log.Warningf("Failed to get initial cgroup CPU usage: %v", err) + log.Warn(fmt.Sprintf("Failed to get initial cgroup CPU usage: %v", err)) } lastTime = time.Now() } diff --git a/go/vt/servenv/pid_file.go b/go/vt/servenv/pid_file.go index aaaff36c22d..7874f436c94 100644 --- a/go/vt/servenv/pid_file.go +++ b/go/vt/servenv/pid_file.go @@ -36,7 +36,7 @@ func init() { file, err := os.OpenFile(pidFile, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o666) if err != nil { - log.Errorf("Unable to create pid file '%s': %v", pidFile, err) + log.Error(fmt.Sprintf("Unable to create pid file '%s': %v", pidFile, err)) return } pidFileCreated = true @@ -54,7 +54,7 @@ func init() { } if err := os.Remove(pidFile); err != nil { - log.Errorf("Unable to remove pid file '%s': %v", pidFile, err) + log.Error(fmt.Sprintf("Unable to remove pid file '%s': %v", pidFile, err)) } }) } diff --git a/go/vt/servenv/pprof.go b/go/vt/servenv/pprof.go index a2116d609ad..931e075b940 100644 --- a/go/vt/servenv/pprof.go +++ b/go/vt/servenv/pprof.go @@ -150,7 +150,8 @@ func startCallback(start func()) func() { if atomic.CompareAndSwapUint32(&profileStarted, 0, 1) { start() } else { - log.Fatal("profile: Start() already called") + log.Error("profile: Start() already called") + os.Exit(1) } } } @@ -177,17 +178,21 @@ func (prof *profile) mkprofile() io.WriteCloser { path, err = os.MkdirTemp("", "profile") } if err != nil { - log.Fatalf("pprof: could not create initial output directory: %v", err) + log.Error(fmt.Sprintf("pprof: could not create initial output directory: %v", err)) + os.Exit(1) } if !prof.quiet { - logf = log.Infof + logf = func(format string, args ...any) { + log.Info(fmt.Sprintf(format, args...)) + } } fn := filepath.Join(path, prof.mode.filename()) f, err := os.Create(fn) if err != nil { - log.Fatalf("pprof: could not create profile %q: %v", fn, err) + log.Error(fmt.Sprintf("pprof: could not create profile %q: %v", fn, err)) + os.Exit(1) } logf("pprof: %s profiling enabled, %s", string(prof.mode), fn) @@ -272,7 +277,8 @@ func (prof *profile) init() (start func(), stop func()) { start = startCallback(func() { pf = prof.mkprofile() if err := trace.Start(pf); err != nil { - log.Fatalf("pprof: could not start trace: %v", err) + log.Error(fmt.Sprintf("pprof: could not start trace: %v", err)) + os.Exit(1) } }) stop = stopCallback(func() { diff --git a/go/vt/servenv/pprof_unix.go b/go/vt/servenv/pprof_unix.go index 097abc08720..6c20253e7d9 100644 --- a/go/vt/servenv/pprof_unix.go +++ b/go/vt/servenv/pprof_unix.go @@ -19,6 +19,7 @@ limitations under the License. package servenv import ( + "fmt" "os" "os/signal" "syscall" @@ -29,7 +30,8 @@ import ( func pprofInit() { prof, err := parseProfileFlag(pprofFlag) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } if prof != nil { start, stop := prof.init() diff --git a/go/vt/servenv/pprof_windows.go b/go/vt/servenv/pprof_windows.go index 7ec4be816df..1f71569cea0 100644 --- a/go/vt/servenv/pprof_windows.go +++ b/go/vt/servenv/pprof_windows.go @@ -23,5 +23,5 @@ import ( ) func pprofInit() { - log.Warningf("pprof is not supported on Windows") + log.Warn("pprof is not supported on Windows") } diff --git a/go/vt/servenv/run.go b/go/vt/servenv/run.go index cef81e87a99..a19ffb5039c 100644 --- a/go/vt/servenv/run.go +++ b/go/vt/servenv/run.go @@ -17,6 +17,7 @@ limitations under the License. package servenv import ( + "fmt" "net" "net/url" "os" @@ -46,12 +47,13 @@ func Run(bindAddress string, port int) { l, err := net.Listen("tcp", net.JoinHostPort(bindAddress, strconv.Itoa(port))) if err != nil { - log.Exit(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } go func() { err := HTTPServe(l) if err != nil { - log.Errorf("http serve returned unexpected error: %v", err) + log.Error(fmt.Sprintf("http serve returned unexpected error: %v", err)) } }() @@ -61,13 +63,13 @@ func Run(bindAddress string, port int) { <-ExitChan startTime := time.Now() - log.Infof("Entering lameduck mode for at least %v", timeouts.LameduckPeriod) - log.Infof("Firing asynchronous OnTerm hooks") + log.Info(fmt.Sprintf("Entering lameduck mode for at least %v", timeouts.LameduckPeriod)) + log.Info("Firing asynchronous OnTerm hooks") go onTermHooks.Fire() fireOnTermSyncHooks(timeouts.OnTermTimeout) if remain := timeouts.LameduckPeriod - time.Since(startTime); remain > 0 { - log.Infof("Sleeping an extra %v after OnTermSync to finish lameduck period", remain) + log.Info(fmt.Sprintf("Sleeping an extra %v after OnTermSync to finish lameduck period", remain)) time.Sleep(remain) } l.Close() diff --git a/go/vt/servenv/servenv.go b/go/vt/servenv/servenv.go index d7ddccf1890..68ad9533427 100644 --- a/go/vt/servenv/servenv.go +++ b/go/vt/servenv/servenv.go @@ -139,7 +139,8 @@ func populateListeningURL(port int32) { if err != nil { host, err = os.Hostname() if err != nil { - log.Exitf("os.Hostname() failed: %v", err) + log.Error(fmt.Sprintf("os.Hostname() failed: %v", err)) + os.Exit(1) } } ListeningURL = url.URL{ @@ -193,7 +194,7 @@ func fireOnCloseHooks(timeout time.Duration) bool { // fireHooksWithTimeout returns true iff all the hooks finish before the timeout. func fireHooksWithTimeout(timeout time.Duration, name string, hookFn func()) bool { defer log.Flush() - log.Infof("Firing %s hooks and waiting up to %v for them", name, timeout) + log.Info(fmt.Sprintf("Firing %s hooks and waiting up to %v for them", name, timeout)) timer := time.NewTimer(timeout) defer timer.Stop() @@ -206,10 +207,10 @@ func fireHooksWithTimeout(timeout time.Duration, name string, hookFn func()) boo select { case <-done: - log.Infof("%s hooks finished", name) + log.Info(name + " hooks finished") return true case <-timer.C: - log.Infof("%s hooks timed out", name) + log.Info(name + " hooks timed out") return false } } @@ -306,6 +307,11 @@ func ParseFlags(cmd string) { _flag.Parse(fs) + if err := log.Init(); err != nil { + log.Error(fmt.Sprintf("log.Init failed: %v", err)) + os.Exit(1) + } + if version { AppVersion.Print() os.Exit(0) @@ -314,7 +320,8 @@ func ParseFlags(cmd string) { args := fs.Args() if len(args) > 0 { _flag.Usage() - log.Exitf("%s doesn't take any positional arguments, got '%s'", cmd, strings.Join(args, " ")) + log.Error(fmt.Sprintf("%s doesn't take any positional arguments, got '%s'", cmd, strings.Join(args, " "))) + os.Exit(1) } loadViper(cmd) @@ -330,6 +337,11 @@ func ParseFlagsForTests(cmd string) { pflag.CommandLine = fs pflag.Parse() viperutil.BindFlags(fs) + + if err := log.Init(); err != nil { + log.Error(fmt.Sprintf("log.Init failed: %v", err)) + os.Exit(1) + } loadViper(cmd) } @@ -371,12 +383,15 @@ func moveFlags(name string, fs *pflag.FlagSet) { // functions. func CobraPreRunE(cmd *cobra.Command, args []string) error { _flag.TrickGlog() + if err := log.Init(); err != nil { + return err + } // Register logging on config file change. ch := make(chan struct{}) viperutil.NotifyConfigReload(ch) go func() { for range ch { - log.Infof("Change in configuration - %v", viperdebug.AllSettings()) + log.Info(fmt.Sprintf("Change in configuration - %v", viperdebug.AllSettings())) } }() @@ -416,6 +431,11 @@ func ParseFlagsWithArgs(cmd string) []string { _flag.Parse(fs) + if err := log.Init(); err != nil { + log.Error(fmt.Sprintf("log.Init failed: %v", err)) + os.Exit(1) + } + if version { AppVersion.Print() os.Exit(0) @@ -423,7 +443,8 @@ func ParseFlagsWithArgs(cmd string) []string { args := fs.Args() if len(args) == 0 { - log.Exitf("%s expected at least one positional argument", cmd) + log.Error(cmd + " expected at least one positional argument") + os.Exit(1) } loadViper(cmd) @@ -436,7 +457,8 @@ func ParseFlagsWithArgs(cmd string) []string { func loadViper(cmd string) { watchCancel, err := viperutil.LoadConfig() if err != nil { - log.Exitf("%s: failed to read in config: %s", cmd, err.Error()) + log.Error(fmt.Sprintf("%s: failed to read in config: %s", cmd, err.Error())) + os.Exit(1) } OnTerm(watchCancel) debugConfigRegisterOnce.Do(func() { diff --git a/go/vt/servenv/servenv_unix.go b/go/vt/servenv/servenv_unix.go index 17fa85c4167..2c66b494d0e 100644 --- a/go/vt/servenv/servenv_unix.go +++ b/go/vt/servenv/servenv_unix.go @@ -19,6 +19,7 @@ limitations under the License. package servenv import ( + "fmt" "os" "os/signal" "runtime/debug" @@ -48,22 +49,24 @@ func Init() { signal.Notify(sigChan, syscall.SIGPIPE) go func() { <-sigChan - log.Warning("Caught SIGPIPE (ignoring all future SIGPIPEs)") + log.Warn("Caught SIGPIPE (ignoring all future SIGPIPEs)") signal.Ignore(syscall.SIGPIPE) }() } // Add version tag to every info log - log.Infof(AppVersion.String()) + log.Info(AppVersion.String()) if inited { - log.Fatal("servenv.Init called second time") + log.Error("servenv.Init called second time") + os.Exit(1) } inited = true // Once you run as root, you pretty much destroy the chances of a // non-privileged user starting the program correctly. if uid := os.Getuid(); uid == 0 { - log.Exitf("servenv.Init: running this as root makes no sense") + log.Error("servenv.Init: running this as root makes no sense") + os.Exit(1) } // We used to set this limit directly, but you pretty much have to @@ -73,7 +76,7 @@ func Init() { // the server. fdLimit := &syscall.Rlimit{} if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, fdLimit); err != nil { - log.Errorf("max-open-fds failed: %v", err) + log.Error(fmt.Sprintf("max-open-fds failed: %v", err)) } fdl := stats.NewGauge("MaxFds", "File descriptor limit") fdl.Set(int64(fdLimit.Cur)) diff --git a/go/vt/servenv/service_map.go b/go/vt/servenv/service_map.go index cd1bf758272..0b8939c26c3 100644 --- a/go/vt/servenv/service_map.go +++ b/go/vt/servenv/service_map.go @@ -17,6 +17,8 @@ limitations under the License. package servenv import ( + "fmt" + "github.com/spf13/pflag" "vitess.io/vitess/go/vt/log" @@ -64,9 +66,9 @@ func updateServiceMap() { // (and also logs how to enable / disable it) func checkServiceMap(protocol, name string) bool { if serviceMap[protocol+"-"+name] { - log.Infof("Registering %v for %v, disable it with -%v-%v service-map parameter", name, protocol, protocol, name) + log.Info(fmt.Sprintf("Registering %v for %v, disable it with -%v-%v service-map parameter", name, protocol, protocol, name)) return true } - log.Infof("Not registering %v for %v, enable it with %v-%v service-map parameter", name, protocol, protocol, name) + log.Info(fmt.Sprintf("Not registering %v for %v, enable it with %v-%v service-map parameter", name, protocol, protocol, name)) return false } diff --git a/go/vt/servenv/status.go b/go/vt/servenv/status.go index 422e6907a76..d959f16cc80 100644 --- a/go/vt/servenv/status.go +++ b/go/vt/servenv/status.go @@ -251,7 +251,7 @@ func (sp *statusPage) statusHandler(w http.ResponseWriter, r *http.Request) { if err := sp.tmpl.ExecuteTemplate(w, "status", data); err != nil { if _, ok := err.(net.Error); !ok { - log.Errorf("servenv: couldn't execute template: %v", err) + log.Error(fmt.Sprintf("servenv: couldn't execute template: %v", err)) } } } @@ -300,7 +300,7 @@ func registerDebugBlockProfileRate() { } blockProfileRate = rate runtime.SetBlockProfileRate(rate) - log.Infof("Set block profile rate to: %d", rate) + log.Info(fmt.Sprintf("Set block profile rate to: %d", rate)) w.Header().Set("Content-Type", "text/plain") io.WriteString(w, message) }) @@ -328,7 +328,7 @@ func registerDebugMutexProfileFraction() { message = fmt.Sprintf("Mutex profiling set to fraction %d", fraction) } runtime.SetMutexProfileFraction(fraction) - log.Infof("Set mutex profiling fraction to: %d", fraction) + log.Info(fmt.Sprintf("Set mutex profiling fraction to: %d", fraction)) w.Header().Set("Content-Type", "text/plain") io.WriteString(w, message) }) @@ -338,6 +338,7 @@ func init() { var err error hostname, err = os.Hostname() if err != nil { - log.Exitf("os.Hostname: %v", err) + log.Error(fmt.Sprintf("os.Hostname: %v", err)) + os.Exit(1) } } diff --git a/go/vt/servenv/unix_socket.go b/go/vt/servenv/unix_socket.go index 9e6b25a2f8b..59134fcb308 100644 --- a/go/vt/servenv/unix_socket.go +++ b/go/vt/servenv/unix_socket.go @@ -17,6 +17,7 @@ limitations under the License. package servenv import ( + "fmt" "net" "os" @@ -33,7 +34,7 @@ var socketFile string // serveSocketFile listen to the named socket and serves RPCs on it. func serveSocketFile() { if socketFile == "" { - log.Infof("Not listening on socket file") + log.Info("Not listening on socket file") return } name := socketFile @@ -42,15 +43,17 @@ func serveSocketFile() { if _, err := os.Stat(name); err == nil { err = os.Remove(name) if err != nil { - log.Exitf("Cannot remove socket file %v: %v", name, err) + log.Error(fmt.Sprintf("Cannot remove socket file %v: %v", name, err)) + os.Exit(1) } } l, err := net.Listen("unix", name) if err != nil { - log.Exitf("Error listening on socket file %v: %v", name, err) + log.Error(fmt.Sprintf("Error listening on socket file %v: %v", name, err)) + os.Exit(1) } - log.Infof("Listening on socket file %v for gRPC", name) + log.Info(fmt.Sprintf("Listening on socket file %v for gRPC", name)) go GRPCServer.Serve(l) } diff --git a/go/vt/sidecardb/sidecardb.go b/go/vt/sidecardb/sidecardb.go index b39a0dbc404..4784189076c 100644 --- a/go/vt/sidecardb/sidecardb.go +++ b/go/vt/sidecardb/sidecardb.go @@ -155,7 +155,7 @@ func loadSchemaDefinitions(parser *sqlparser.Parser) { var module string dir, fname := filepath.Split(path) if !strings.HasSuffix(strings.ToLower(fname), sqlFileExtension) { - log.Infof("Ignoring non-SQL file: %s, found during sidecar database initialization", path) + log.Info(fmt.Sprintf("Ignoring non-SQL file: %s, found during sidecar database initialization", path)) return nil } dirparts := strings.Split(strings.Trim(dir, "/"), "/") @@ -182,7 +182,7 @@ func loadSchemaDefinitions(parser *sqlparser.Parser) { return nil }) if err != nil { - log.Errorf("error loading schema files: %+v", err) + log.Error(fmt.Sprintf("error loading schema files: %+v", err)) } } @@ -191,7 +191,7 @@ func printCallerDetails() { pc, _, line, ok := runtime.Caller(2) details := runtime.FuncForPC(pc) if ok && details != nil { - log.Infof("%s schema init called from %s:%d\n", sidecar.GetName(), details.Name(), line) + log.Info(fmt.Sprintf("%s schema init called from %s:%d\n", sidecar.GetName(), details.Name(), line)) } } @@ -234,7 +234,7 @@ func getDDLErrorHistory() []*ddlError { // the declarative schema defined for all tables. func Init(ctx context.Context, env *vtenv.Environment, exec Exec) error { printCallerDetails() // for debug purposes only, remove in v17 - log.Infof("Starting sidecardb.Init()") + log.Info("Starting sidecardb.Init()") once.Do(func() { loadSchemaDefinitions(env.Parser()) @@ -316,16 +316,16 @@ func (si *schemaInit) doesSidecarDBExist() (bool, error) { } rs, err := si.exec(si.ctx, query, 2, false) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return false, err } switch len(rs.Rows) { case 0: - log.Infof("doesSidecarDBExist: %s not found", sidecar.GetName()) + log.Info(fmt.Sprintf("doesSidecarDBExist: %s not found", sidecar.GetName())) return false, nil case 1: - log.Infof("doesSidecarDBExist: found %s", sidecar.GetName()) + log.Info("doesSidecarDBExist: found " + sidecar.GetName()) return true, nil default: // This should never happen. @@ -336,10 +336,10 @@ func (si *schemaInit) doesSidecarDBExist() (bool, error) { func (si *schemaInit) createSidecarDB() error { _, err := si.exec(si.ctx, sidecar.GetCreateQuery(), 1, false) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } - log.Infof("createSidecarDB: %s", sidecar.GetName()) + log.Info("createSidecarDB: " + sidecar.GetName()) return nil } @@ -352,7 +352,7 @@ func (si *schemaInit) setCurrentDatabase(dbName string) error { func (si *schemaInit) collation() (collations.ID, error) { rs, err := si.exec(si.ctx, sidecarCollationQuery, 2, false) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return collations.Unknown, err } @@ -379,7 +379,7 @@ func (si *schemaInit) getCurrentSchema(tableName string) (string, error) { // table does not exist in the sidecar database return "", nil } - log.Errorf("Error getting table schema for %s: %+v", tableName, err) + log.Error(fmt.Sprintf("Error getting table schema for %s: %+v", tableName, err)) return "", err } if len(rs.Rows) > 0 { @@ -409,9 +409,9 @@ func (si *schemaInit) findTableSchemaDiff(tableName, current, desired string) (s ddl = diff.CanonicalStatementString() if ddl == "" { - log.Infof("No changes needed for table %s", tableName) + log.Info("No changes needed for table " + tableName) } else { - log.Infof("Applying DDL for table %s:\n%s", tableName, ddl) + log.Info(fmt.Sprintf("Applying DDL for table %s:\n%s", tableName, ddl)) } } @@ -458,16 +458,16 @@ func (si *schemaInit) ensureSchema(table *sidecarTable) error { } return nil } - log.Infof("Applied DDL %s for table %s during sidecar database initialization", ddl, table) + log.Info(fmt.Sprintf("Applied DDL %s for table %s during sidecar database initialization", ddl, table)) ddlCount.Add(1) return nil } - log.Infof("Table schema was already up to date for the %s table in the %s sidecar database", table.name, sidecar.GetName()) + log.Info(fmt.Sprintf("Table schema was already up to date for the %s table in the %s sidecar database", table.name, sidecar.GetName())) return nil } func recordDDLError(tableName string, err error) { - log.Error(err) + log.Error(fmt.Sprint(err)) ddlErrorCount.Add(1) ddlErrorHistory.Add(&ddlError{ tableName: tableName, diff --git a/go/vt/sqlparser/ast_funcs.go b/go/vt/sqlparser/ast_funcs.go index e6cfaf831c2..eca3fecd802 100644 --- a/go/vt/sqlparser/ast_funcs.go +++ b/go/vt/sqlparser/ast_funcs.go @@ -467,7 +467,7 @@ func ReplaceExpr(root, from, to Expr) Expr { expr, success := tmp.(Expr) if !success { - log.Errorf("Failed to rewrite expression. Rewriter returned a non-expression: %s", String(tmp)) + log.Error("Failed to rewrite expression. Rewriter returned a non-expression: " + String(tmp)) return from } diff --git a/go/vt/sqlparser/parser.go b/go/vt/sqlparser/parser.go index 0bc368e847c..8c1f6526698 100644 --- a/go/vt/sqlparser/parser.go +++ b/go/vt/sqlparser/parser.go @@ -81,7 +81,7 @@ func (p *Parser) Parse2(sql string) (Statement, BindVars, error) { if typ, val := tokenizer.Scan(); typ != 0 { return nil, nil, fmt.Errorf("extra characters encountered after end of DDL: '%s'", val) } - log.Warningf("ignoring error parsing DDL '%s': %v", sql, tokenizer.LastError) + log.Warn(fmt.Sprintf("ignoring error parsing DDL '%s': %v", sql, tokenizer.LastError)) switch x := tokenizer.partialDDL.(type) { case DBDDLStatement: x.SetFullyParsed(false) diff --git a/go/vt/srvtopo/discover.go b/go/vt/srvtopo/discover.go index 2b020e89887..288a193d881 100644 --- a/go/vt/srvtopo/discover.go +++ b/go/vt/srvtopo/discover.go @@ -18,6 +18,7 @@ package srvtopo import ( "context" + "fmt" "sync" "vitess.io/vitess/go/vt/concurrency" @@ -56,7 +57,7 @@ func FindAllTargetsAndKeyspaces(ctx context.Context, ts Server, cell string, key if topo.IsErrType(err, topo.NoNode) { // Possibly a race condition, or leftover // crud in the topology service. Just log it. - log.Warningf("GetSrvKeyspace(%v, %v) returned ErrNoNode, skipping that SrvKeyspace", cell, keyspace) + log.Warn(fmt.Sprintf("GetSrvKeyspace(%v, %v) returned ErrNoNode, skipping that SrvKeyspace", cell, keyspace)) } else { // More serious error, abort. errRecorder.RecordError(err) diff --git a/go/vt/srvtopo/query.go b/go/vt/srvtopo/query.go index a2cd7c46a3a..750e4251294 100644 --- a/go/vt/srvtopo/query.go +++ b/go/vt/srvtopo/query.go @@ -107,7 +107,7 @@ func (q *resilientQuery) getCurrentValue(ctx context.Context, wkey fmt.Stringer, go func() { defer func() { if err := recover(); err != nil { - log.Errorf("ResilientQuery uncaught panic, cell :%v, err :%v)", key, err) + log.Error(fmt.Sprintf("ResilientQuery uncaught panic, cell :%v, err :%v)", key, err)) } }() @@ -132,14 +132,14 @@ func (q *resilientQuery) getCurrentValue(ctx context.Context, wkey fmt.Stringer, } else { q.counts.Add(errorCategory, 1) if entry.insertionTime.IsZero() { - log.Errorf("ResilientQuery(%v, %v) failed: %v (no cached value, caching and returning error)", ctx, wkey, err) + log.Error(fmt.Sprintf("ResilientQuery(%v, %v) failed: %v (no cached value, caching and returning error)", ctx, wkey, err)) } else if newCtx.Err() == context.DeadlineExceeded { - log.Errorf("ResilientQuery(%v, %v) failed: %v (request timeout), (keeping cached value: %v)", ctx, wkey, err, entry.value) + log.Error(fmt.Sprintf("ResilientQuery(%v, %v) failed: %v (request timeout), (keeping cached value: %v)", ctx, wkey, err, entry.value)) } else if entry.value != nil && time.Since(entry.insertionTime) < q.cacheTTL { q.counts.Add(cachedCategory, 1) - log.Warningf("ResilientQuery(%v, %v) failed: %v (cached value still considered valid: %v)", ctx, wkey, err, entry.value) + log.Warn(fmt.Sprintf("ResilientQuery(%v, %v) failed: %v (cached value still considered valid: %v)", ctx, wkey, err, entry.value)) } else { - log.Errorf("ResilientQuery(%v, %v) failed: %v (cached value expired, keeping cached value)", ctx, wkey, err) + log.Error(fmt.Sprintf("ResilientQuery(%v, %v) failed: %v (cached value expired, keeping cached value)", ctx, wkey, err)) } } diff --git a/go/vt/srvtopo/resilient_server.go b/go/vt/srvtopo/resilient_server.go index 1b8b390f087..a86923facd0 100644 --- a/go/vt/srvtopo/resilient_server.go +++ b/go/vt/srvtopo/resilient_server.go @@ -18,6 +18,7 @@ package srvtopo import ( "context" + "os" "time" "github.com/spf13/pflag" @@ -81,7 +82,8 @@ type ResilientServer struct { // based on the provided topo.Server. func NewResilientServer(ctx context.Context, base *topo.Server, counts *stats.CountersWithSingleLabel) *ResilientServer { if srvTopoCacheRefresh > srvTopoCacheTTL { - log.Fatalf("srv-topo-cache-refresh must be less than or equal to srv-topo-cache-ttl") + log.Error("srv-topo-cache-refresh must be less than or equal to srv-topo-cache-ttl") + os.Exit(1) } return &ResilientServer{ diff --git a/go/vt/srvtopo/watch.go b/go/vt/srvtopo/watch.go index e81fa2f5b76..e2af2fbf4da 100644 --- a/go/vt/srvtopo/watch.go +++ b/go/vt/srvtopo/watch.go @@ -207,14 +207,14 @@ func (entry *watchEntry) onErrorLocked(ctx context.Context, err error, init bool // TTL cache is only checked if the error is a known error i.e topo.Error. _, isTopoErr := err.(topo.Error) if entry.value != nil && isTopoErr && time.Since(entry.lastValueTime) > entry.rw.cacheTTL { - log.Errorf("WatchSrvKeyspace clearing cached entry for %v", entry.key) + log.Error(fmt.Sprintf("WatchSrvKeyspace clearing cached entry for %v", entry.key)) entry.value = nil } } else { if !topo.IsErrType(err, topo.Interrupted) { // No need to log if we're explicitly interrupted. entry.lastError = fmt.Errorf("ResilientWatch stream failed for %v: %w", entry.key, err) - log.Errorf("%v", entry.lastError) + log.Error(fmt.Sprintf("%v", entry.lastError)) } // Even though we didn't get a new value, update the lastValueTime diff --git a/go/vt/tableacl/tableacl.go b/go/vt/tableacl/tableacl.go index 26efdc74cc3..48a569e550e 100644 --- a/go/vt/tableacl/tableacl.go +++ b/go/vt/tableacl/tableacl.go @@ -107,7 +107,7 @@ func (tacl *tableACL) init(configFile string, aclCB func()) error { } data, err := os.ReadFile(configFile) if err != nil { - log.Errorf("unable to read tableACL config file: %v Error: %v", configFile, err) + log.Error(fmt.Sprintf("unable to read tableACL config file: %v Error: %v", configFile, err)) return err } if len(data) == 0 { @@ -118,7 +118,7 @@ func (tacl *tableACL) init(configFile string, aclCB func()) error { if err := config.UnmarshalVT(data); err != nil { // try to parse tableacl as json file if jsonErr := json2.UnmarshalPB(data, config); jsonErr != nil { - log.Errorf("unable to parse tableACL config file as a protobuf or json file. protobuf err: %v json err: %v", err, jsonErr) + log.Error(fmt.Sprintf("unable to parse tableACL config file as a protobuf or json file. protobuf err: %v json err: %v", err, jsonErr)) return fmt.Errorf("unable to unmarshal Table ACL data: %s", data) } } diff --git a/go/vt/throttler/demo/throttler_demo.go b/go/vt/throttler/demo/throttler_demo.go index 6988adb3ba7..f82c60bb560 100644 --- a/go/vt/throttler/demo/throttler_demo.go +++ b/go/vt/throttler/demo/throttler_demo.go @@ -18,8 +18,10 @@ package main import ( "context" + "fmt" "math/rand/v2" "net/http" + "os" "sync" "testing" "time" @@ -134,7 +136,8 @@ func newReplica(env *vtenv.Environment, lagUpdateInterval, degrationInterval, de throttler, err := throttler.NewThrottler("replica", "TPS", 1, rate, throttler.ReplicationLagModuleDisabled) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } var nextDegration time.Time @@ -184,19 +187,19 @@ func (r *replica) processReplicationStream() { lagTruncated := uint32(now.Unix() - msg.Unix()) // Display lag with a higher precision as well. lag := now.Sub(msg).Seconds() - log.Infof("current lag: %1ds (%1.1fs) replica rate: % 7.1f chan len: % 6d", lagTruncated, lag, float64(actualRate)/r.lagUpdateInterval.Seconds(), len(r.replicationStream)) + log.Info(fmt.Sprintf("current lag: %1ds (%1.1fs) replica rate: % 7.1f chan len: % 6d", lagTruncated, lag, float64(actualRate)/r.lagUpdateInterval.Seconds(), len(r.replicationStream))) r.qs.AddHealthResponseWithReplicationLag(lagTruncated) r.lastHealthUpdate = now actualRate = 0 } if !r.nextDegration.IsZero() && time.Now().After(r.nextDegration) && r.currentDegrationEnd.IsZero() { degradedRate := rand.Int64N(rate) - log.Infof("degrading the replica for %.f seconds from %v TPS to %v", r.degrationDuration.Seconds(), rate, degradedRate) + log.Info(fmt.Sprintf("degrading the replica for %.f seconds from %v TPS to %v", r.degrationDuration.Seconds(), rate, degradedRate)) r.throttler.SetMaxRate(degradedRate) r.currentDegrationEnd = time.Now().Add(r.degrationDuration) } if !r.currentDegrationEnd.IsZero() && time.Now().After(r.currentDegrationEnd) { - log.Infof("degrading the replica stopped. Restoring TPS to: %v", rate) + log.Info(fmt.Sprintf("degrading the replica stopped. Restoring TPS to: %v", rate)) r.throttler.SetMaxRate(rate) r.currentDegrationEnd = time.Time{} r.nextDegration = time.Now().Add(r.degrationInterval) @@ -237,7 +240,8 @@ type client struct { func newClient(ctx context.Context, primary *primary, replica *replica, ts *topo.Server) *client { t, err := throttler.NewThrottler("client", "TPS", 1, throttler.MaxRateModuleDisabled, 5 /* seconds */) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } healthCheck := discovery.NewHealthCheck(ctx, 5*time.Second, 1*time.Minute, ts, "cell1", "", nil) @@ -309,7 +313,7 @@ func main() { http.Redirect(w, r, "/throttlerz", http.StatusTemporaryRedirect) }) - log.Infof("start rate set to: %v", rate) + log.Info(fmt.Sprintf("start rate set to: %v", rate)) ts := memorytopo.NewServer(context.Background(), "cell1") env, err := vtenv.New(vtenv.Options{ MySQLServerVersion: servenv.MySQLServerVersion(), @@ -317,7 +321,8 @@ func main() { TruncateErrLen: servenv.TruncateErrLen, }) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } replica := newReplica(env, lagUpdateInterval, replicaDegrationInterval, replicaDegrationDuration, ts) primary := &primary{replica: replica} diff --git a/go/vt/throttler/manager.go b/go/vt/throttler/manager.go index ee142190f75..66de7b3b64c 100644 --- a/go/vt/throttler/manager.go +++ b/go/vt/throttler/manager.go @@ -89,7 +89,7 @@ func (m *managerImpl) unregisterThrottler(name string) { defer m.mu.Unlock() if _, ok := m.throttlers[name]; !ok { - log.Errorf("unregisterThrottler(): throttler with name '%v' is not registered", name) + log.Error(fmt.Sprintf("unregisterThrottler(): throttler with name '%v' is not registered", name)) return } delete(m.throttlers, name) diff --git a/go/vt/throttler/max_replication_lag_module.go b/go/vt/throttler/max_replication_lag_module.go index 8d682819037..eba33708f50 100644 --- a/go/vt/throttler/max_replication_lag_module.go +++ b/go/vt/throttler/max_replication_lag_module.go @@ -618,7 +618,7 @@ func (m *MaxReplicationLagModule) decreaseAndGuessRate(r *Result, now time.Time, d := lagRecordNow.time.Sub(lagRecordBefore.time) lagDifference := time.Duration(lagRecordNow.lag()-lagRecordBefore.lag()) * time.Second if lagDifference > d { - log.Errorf("Replication lag increase is higher than the elapsed time: %v > %v. This should not happen. Replication Lag Data points: Before: %+v Now: %+v", lagDifference, d, lagRecordBefore, lagRecordNow) + log.Error(fmt.Sprintf("Replication lag increase is higher than the elapsed time: %v > %v. This should not happen. Replication Lag Data points: Before: %+v Now: %+v", lagDifference, d, lagRecordBefore, lagRecordNow)) d = lagDifference } @@ -637,7 +637,7 @@ func (m *MaxReplicationLagModule) guessReplicationRate(r *Result, avgPrimaryRate // from the relative change in the replication lag. avgReplicationRate := avgPrimaryRate * (d - lagDifference).Seconds() / d.Seconds() if avgReplicationRate <= 0 { - log.Warningf("guessed Replication rate was <= 0 (%v). Primary rate: %v d: %.1f lag difference: %.1f", avgReplicationRate, avgPrimaryRate, d.Seconds(), lagDifference.Seconds()) + log.Warn(fmt.Sprintf("guessed Replication rate was <= 0 (%v). Primary rate: %v d: %.1f lag difference: %.1f", avgReplicationRate, avgPrimaryRate, d.Seconds(), lagDifference.Seconds())) avgReplicationRate = 1 } r.PrimaryRate = int64(avgPrimaryRate) diff --git a/go/vt/throttler/max_replication_lag_module_test.go b/go/vt/throttler/max_replication_lag_module_test.go index 77be6501e4c..69ac96e7e0d 100644 --- a/go/vt/throttler/max_replication_lag_module_test.go +++ b/go/vt/throttler/max_replication_lag_module_test.go @@ -414,7 +414,7 @@ func TestMaxReplicationLagModule_Increase_BadRateUpperBound(t *testing.T) { // Assume that a bad value of 150 was set @ 30s and log error if err := tf.m.memory.markBad(150, sinceZero(30*time.Second)); err != nil { - log.Errorf("tf.m.memory.markBad(150, sinceZero(30*time.Second)) falied : %v", err) + log.Error(fmt.Sprintf("tf.m.memory.markBad(150, sinceZero(30*time.Second)) falied : %v", err)) } // r2 @ 70s, 0s lag diff --git a/go/vt/throttler/throttler.go b/go/vt/throttler/throttler.go index 823e37ab75c..455fd73d925 100644 --- a/go/vt/throttler/throttler.go +++ b/go/vt/throttler/throttler.go @@ -301,7 +301,7 @@ func (t *ThrottlerImpl) updateMaxRate() { } if maxRate != ZeroRateNoProgess && maxRate < int64(threadsRunning) { - log.Warningf("Set maxRate is less than the number of threads (%v). To prevent threads from starving, maxRate was increased from: %v to: %v.", threadsRunning, maxRate, threadsRunning) + log.Warn(fmt.Sprintf("Set maxRate is less than the number of threads (%v). To prevent threads from starving, maxRate was increased from: %v to: %v.", threadsRunning, maxRate, threadsRunning)) maxRate = int64(threadsRunning) } maxRatePerThread := maxRate / int64(threadsRunning) diff --git a/go/vt/throttler/throttlerz.go b/go/vt/throttler/throttlerz.go index 84431aad62f..8d8285932b0 100644 --- a/go/vt/throttler/throttlerz.go +++ b/go/vt/throttler/throttlerz.go @@ -17,6 +17,7 @@ limitations under the License. package throttler import ( + "fmt" "net/http" "slices" "strings" @@ -86,13 +87,13 @@ func listThrottlers(w http.ResponseWriter, m *managerImpl) { if err := listTemplate.Execute(w, map[string]any{ "Throttlers": throttlers, }); err != nil { - log.Errorf("listThrottlers failed :%v", err) + log.Error(fmt.Sprintf("listThrottlers failed :%v", err)) } } func showThrottlerDetails(w http.ResponseWriter, name string) { // Log error if err := detailsTemplate.Execute(w, name); err != nil { - log.Errorf("showThrottlerDetails failed :%v", err) + log.Error(fmt.Sprintf("showThrottlerDetails failed :%v", err)) } } diff --git a/go/vt/tlstest/tlstest.go b/go/vt/tlstest/tlstest.go index 7ee4c80a402..428373dbb3c 100644 --- a/go/vt/tlstest/tlstest.go +++ b/go/vt/tlstest/tlstest.go @@ -164,28 +164,32 @@ func signCert(parent *x509.Certificate, parentPriv crypto.PrivateKey, certPub cr // in the provided directory. Temporary files are also created in that // directory. func CreateCA(root string) { - log.Infof("Creating test root CA in %v", root) + log.Info(fmt.Sprintf("Creating test root CA in %v", root)) keyPath := path.Join(root, "ca-key.pem") certPath := path.Join(root, "ca-cert.pem") priv, err := generateKey() if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } err = saveKey(priv, keyPath) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } ca, err := signCert(nil, priv, publicKey(priv), CA, 1, true) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } err = saveCert(ca, certPath) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } } @@ -197,35 +201,42 @@ func CreateIntermediateCA(root, parent, serial, name, commonName string) { caKey, err := loadKey(caKeyPath) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } caCert, err := loadCert(caCertPath) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } priv, err := generateKey() if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } err = saveKey(priv, keyPath) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } serialNr, err := strconv.ParseInt(serial, 10, 64) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } intermediate, err := signCert(caCert, caKey, publicKey(priv), commonName, serialNr, true) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } err = saveCert(intermediate, certPath) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } } @@ -233,7 +244,7 @@ func CreateIntermediateCA(root, parent, serial, name, commonName string) { // with the provided serial number, name and common name. // name is the file name to use. Common Name is the certificate common name. func CreateSignedCert(root, parent, serial, name, commonName string) { - log.Infof("Creating signed cert and key %v", commonName) + log.Info(fmt.Sprintf("Creating signed cert and key %v", commonName)) caKeyPath := path.Join(root, parent+"-key.pem") caCertPath := path.Join(root, parent+"-cert.pem") @@ -242,54 +253,63 @@ func CreateSignedCert(root, parent, serial, name, commonName string) { caKey, err := loadKey(caKeyPath) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } caCert, err := loadCert(caCertPath) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } priv, err := generateKey() if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } err = saveKey(priv, keyPath) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } serialNr, err := strconv.ParseInt(serial, 10, 64) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } leaf, err := signCert(caCert, caKey, publicKey(priv), commonName, serialNr, false) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } err = saveCert(leaf, certPath) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } } // CreateCRL creates a new empty certificate revocation list // for the provided parent func CreateCRL(root, parent string) { - log.Infof("Creating CRL for root CA in %v", root) + log.Info(fmt.Sprintf("Creating CRL for root CA in %v", root)) caKeyPath := path.Join(root, parent+"-key.pem") caCertPath := path.Join(root, parent+"-cert.pem") crlPath := path.Join(root, parent+"-crl.pem") caKey, err := loadKey(caKeyPath) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } caCert, err := loadCert(caCertPath) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } crlList, err := x509.CreateRevocationList(rand.Reader, &x509.RevocationList{ @@ -297,25 +317,28 @@ func CreateCRL(root, parent string) { Number: big.NewInt(1), }, caCert, caKey.(crypto.Signer)) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } out := &bytes.Buffer{} err = pem.Encode(out, &pem.Block{Type: "X509 CRL", Bytes: crlList}) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } err = os.WriteFile(crlPath, out.Bytes(), permissions) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } } // RevokeCertAndRegenerateCRL revokes a provided certificate under the // provided parent CA and regenerates the CRL file for that parent func RevokeCertAndRegenerateCRL(root, parent, name string) { - log.Infof("Revoking certificate %s", name) + log.Info("Revoking certificate " + name) caKeyPath := path.Join(root, parent+"-key.pem") caCertPath := path.Join(root, parent+"-cert.pem") crlPath := path.Join(root, parent+"-crl.pem") @@ -323,7 +346,8 @@ func RevokeCertAndRegenerateCRL(root, parent, name string) { certificate, err := loadCert(certPath) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } // Check if CRL already exists. If it doesn't, @@ -335,17 +359,20 @@ func RevokeCertAndRegenerateCRL(root, parent, name string) { data, err := os.ReadFile(crlPath) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } block, _ := pem.Decode(data) if block == nil || block.Type != "X509 CRL" { - log.Fatal("failed to parse CRL PEM") + log.Error("failed to parse CRL PEM") + os.Exit(1) } crlList, err := x509.ParseRevocationList(block.Bytes) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } revoked := crlList.RevokedCertificateEntries @@ -356,11 +383,13 @@ func RevokeCertAndRegenerateCRL(root, parent, name string) { caKey, err := loadKey(caKeyPath) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } caCert, err := loadCert(caCertPath) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } var crlNumber big.Int @@ -369,18 +398,21 @@ func RevokeCertAndRegenerateCRL(root, parent, name string) { Number: crlNumber.Add(crlList.Number, big.NewInt(1)), }, caCert, caKey.(crypto.Signer)) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } out := &bytes.Buffer{} err = pem.Encode(out, &pem.Block{Type: "X509 CRL", Bytes: newCrl}) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } err = os.WriteFile(crlPath, out.Bytes(), permissions) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } } @@ -450,17 +482,20 @@ func CreateClientServerCertPairs(root string) ClientServerKeyPairs { serverCRLBytes, err := os.ReadFile(serverCRLPath) if err != nil { - log.Fatalf("Could not read server CRL file") + log.Error("Could not read server CRL file") + os.Exit(1) } clientCRLBytes, err := os.ReadFile(clientCRLPath) if err != nil { - log.Fatalf("Could not read client CRL file") + log.Error("Could not read client CRL file") + os.Exit(1) } err = os.WriteFile(combinedCRLPath, append(serverCRLBytes, clientCRLBytes...), permissions) if err != nil { - log.Fatalf("Could not write combined CRL file") + log.Error("Could not write combined CRL file") + os.Exit(1) } return ClientServerKeyPairs{ diff --git a/go/vt/topo/consultopo/election.go b/go/vt/topo/consultopo/election.go index 9f3ad61e5ee..eec98f83a82 100644 --- a/go/vt/topo/consultopo/election.go +++ b/go/vt/topo/consultopo/election.go @@ -18,6 +18,7 @@ package consultopo import ( "context" + "fmt" "path" "github.com/hashicorp/consul/api" @@ -96,7 +97,7 @@ func (mp *consulLeaderParticipation) WaitForLeadership() (context.Context, error lockCancel() // We could have lost the lock. Per consul API, explicitly call Unlock to make sure that session will not be renewed. if err := l.Unlock(); err != nil { - log.Errorf("Leader election(%v) Unlock failed: %v", mp.name, err) + log.Error(fmt.Sprintf("Leader election(%v) Unlock failed: %v", mp.name, err)) } case <-mp.stop: // Stop was called. We stop the context first, @@ -104,7 +105,7 @@ func (mp *consulLeaderParticipation) WaitForLeadership() (context.Context, error // is the primary any more, then we unlock. lockCancel() if err := l.Unlock(); err != nil { - log.Errorf("Leader election(%v) Unlock failed: %v", mp.name, err) + log.Error(fmt.Sprintf("Leader election(%v) Unlock failed: %v", mp.name, err)) } close(mp.done) } diff --git a/go/vt/topo/consultopo/lock.go b/go/vt/topo/consultopo/lock.go index a9ca98d6f14..89972b0cd8e 100644 --- a/go/vt/topo/consultopo/lock.go +++ b/go/vt/topo/consultopo/lock.go @@ -18,6 +18,7 @@ package consultopo import ( "context" + "fmt" "path" "time" @@ -216,7 +217,7 @@ func (s *Server) unlock(ctx context.Context, lockPath string) error { // If someone else has the lock, we can't remove it, // but we don't need to. if err != api.ErrLockInUse { - log.Warningf("failed to clean up lock file %v: %v", lockPath, err) + log.Warn(fmt.Sprintf("failed to clean up lock file %v: %v", lockPath, err)) } } diff --git a/go/vt/topo/consultopo/server.go b/go/vt/topo/consultopo/server.go index 70448349927..70011b76497 100644 --- a/go/vt/topo/consultopo/server.go +++ b/go/vt/topo/consultopo/server.go @@ -21,6 +21,7 @@ package consultopo import ( "encoding/json" + "fmt" "os" "strings" "sync" @@ -80,7 +81,7 @@ func getClientCreds() (creds map[string]*ClientAuthCred, err error) { if consulAuthClientStaticFile == "" { // Not configured, nothing to do. - log.Infof("Consul client auth is not set up. consul-auth-static-file was not provided") + log.Info("Consul client auth is not set up. consul-auth-static-file was not provided") return nil, nil } @@ -142,7 +143,7 @@ func NewServer(cell, serverAddr, root string) (*Server, error) { if creds[cell] != nil { cfg.Token = creds[cell].ACLToken } else { - log.Warningf("Client auth not configured for cell: %v", cell) + log.Warn(fmt.Sprintf("Client auth not configured for cell: %v", cell)) } } diff --git a/go/vt/topo/consultopo/server_flaky_test.go b/go/vt/topo/consultopo/server_flaky_test.go index a4d54a19be9..d3ca05218a5 100644 --- a/go/vt/topo/consultopo/server_flaky_test.go +++ b/go/vt/topo/consultopo/server_flaky_test.go @@ -141,11 +141,11 @@ func TestConsulTopo(t *testing.T) { defer func() { // Alerts command did not run successful if err := cmd.Process.Kill(); err != nil { - log.Errorf("cmd process kill has an error: %v", err) + log.Error(fmt.Sprintf("cmd process kill has an error: %v", err)) } // Alerts command did not run successful if err := cmd.Wait(); err != nil { - log.Errorf("cmd wait has an error: %v", err) + log.Error(fmt.Sprintf("cmd wait has an error: %v", err)) } os.Remove(configFilename) @@ -199,11 +199,11 @@ func TestConsulTopoWithChecks(t *testing.T) { defer func() { // Alerts command did not run successful if err := cmd.Process.Kill(); err != nil { - log.Errorf("cmd process kill has an error: %v", err) + log.Error(fmt.Sprintf("cmd process kill has an error: %v", err)) } // Alerts command did not run successful if err := cmd.Wait(); err != nil { - log.Errorf("cmd wait has an error: %v", err) + log.Error(fmt.Sprintf("cmd wait has an error: %v", err)) } os.Remove(configFilename) @@ -244,11 +244,11 @@ func TestConsulTopoWithAuth(t *testing.T) { defer func() { // Alerts command did not run successful if err := cmd.Process.Kill(); err != nil { - log.Errorf("cmd process kill has an error: %v", err) + log.Error(fmt.Sprintf("cmd process kill has an error: %v", err)) } // Alerts command did not run successful if err := cmd.Wait(); err != nil { - log.Errorf("cmd process wait has an error: %v", err) + log.Error(fmt.Sprintf("cmd process wait has an error: %v", err)) } os.Remove(configFilename) }() @@ -381,10 +381,10 @@ func TestConsulWatcherStormPrevention(t *testing.T) { cmd, configFilename, serverAddr := startConsul(t, "") defer func() { if err := cmd.Process.Kill(); err != nil { - log.Errorf("cmd process kill has an error: %v", err) + log.Error(fmt.Sprintf("cmd process kill has an error: %v", err)) } if err := cmd.Wait(); err != nil { - log.Errorf("cmd wait has an error: %v", err) + log.Error(fmt.Sprintf("cmd wait has an error: %v", err)) } os.Remove(configFilename) }() diff --git a/go/vt/topo/etcd2topo/election.go b/go/vt/topo/etcd2topo/election.go index 94768b50470..2bbf484ad9e 100644 --- a/go/vt/topo/etcd2topo/election.go +++ b/go/vt/topo/etcd2topo/election.go @@ -18,6 +18,7 @@ package etcd2topo import ( "context" + "fmt" "path" clientv3 "go.etcd.io/etcd/client/v3" @@ -82,7 +83,7 @@ func (mp *etcdLeaderParticipation) WaitForLeadership() (context.Context, error) } if ld != nil { if err := ld.Unlock(context.Background()); err != nil { - log.Errorf("failed to unlock electionPath %v: %v", electionPath, err) + log.Error(fmt.Sprintf("failed to unlock electionPath %v: %v", electionPath, err)) } } lockCancel() diff --git a/go/vt/topo/etcd2topo/lock.go b/go/vt/topo/etcd2topo/lock.go index 33332200257..03cb47e7a10 100644 --- a/go/vt/topo/etcd2topo/lock.go +++ b/go/vt/topo/etcd2topo/lock.go @@ -70,7 +70,7 @@ func (s *Server) newUniqueEphemeralKV(ctx context.Context, cli *clientv3.Client, // node behind for *leaseTTL time. if _, err := cli.Delete(context.Background(), newKey); err != nil { - log.Errorf("cli.Delete(context.Background(), newKey) failed :%v", err) + log.Error(fmt.Sprintf("cli.Delete(context.Background(), newKey) failed :%v", err)) } } return "", 0, convertError(err, newKey) @@ -224,7 +224,7 @@ func (s *Server) lock(ctx context.Context, nodePath, contents string, ttl int) ( // We had an error waiting on the last node. // Revoke our lease, this will delete the file. if _, rerr := s.cli.Revoke(context.Background(), lease.ID); rerr != nil { - log.Warningf("Revoke(%d) failed, may have left %v behind: %v", lease.ID, key, rerr) + log.Warn(fmt.Sprintf("Revoke(%d) failed, may have left %v behind: %v", lease.ID, key, rerr)) } return nil, err } diff --git a/go/vt/topo/etcd2topo/server_test.go b/go/vt/topo/etcd2topo/server_test.go index cb84bb5248d..7540b3dce37 100644 --- a/go/vt/topo/etcd2topo/server_test.go +++ b/go/vt/topo/etcd2topo/server_test.go @@ -91,11 +91,11 @@ func startEtcd(t *testing.T, port int) (string, *exec.Cmd) { t.Cleanup(func() { // log error if err := cmd.Process.Kill(); err != nil { - log.Errorf("cmd.Process.Kill() failed : %v", err) + log.Error(fmt.Sprintf("cmd.Process.Kill() failed : %v", err)) } // log error if err := cmd.Wait(); err != nil { - log.Errorf("cmd.wait() failed : %v", err) + log.Error(fmt.Sprintf("cmd.wait() failed : %v", err)) } }) @@ -181,11 +181,11 @@ func startEtcdWithTLS(t *testing.T) (string, *tlstest.ClientServerKeyPairs) { t.Cleanup(func() { // log error if err := cmd.Process.Kill(); err != nil { - log.Errorf("cmd.Process.Kill() failed : %v", err) + log.Error(fmt.Sprintf("cmd.Process.Kill() failed : %v", err)) } // log error if err := cmd.Wait(); err != nil { - log.Errorf("cmd.wait() failed : %v", err) + log.Error(fmt.Sprintf("cmd.wait() failed : %v", err)) } }) diff --git a/go/vt/topo/etcd2topo/watch.go b/go/vt/topo/etcd2topo/watch.go index 0c97294ecae..3eb5764f9e7 100644 --- a/go/vt/topo/etcd2topo/watch.go +++ b/go/vt/topo/etcd2topo/watch.go @@ -18,6 +18,7 @@ package etcd2topo import ( "context" + "fmt" "path" "strings" "time" @@ -115,7 +116,7 @@ func (s *Server) Watch(ctx context.Context, filePath string) (*topo.WatchData, < watchCtx, watchCancel = context.WithCancel(ctx) newWatcher := s.cli.Watch(watchCtx, nodePath, clientv3.WithRev(rev)) if newWatcher == nil { - log.Warningf("watch %v failed and get a nil channel returned, rev: %v", nodePath, rev) + log.Warn(fmt.Sprintf("watch %v failed and get a nil channel returned, rev: %v", nodePath, rev)) } else { watcher = newWatcher } @@ -239,7 +240,7 @@ func (s *Server) WatchRecursive(ctx context.Context, dirpath string) ([]*topo.Wa newWatcher := s.cli.Watch(watchCtx, nodePath, clientv3.WithRev(rev), clientv3.WithPrefix()) if newWatcher == nil { - log.Warningf("watch %v failed and get a nil channel returned, rev: %v", nodePath, rev) + log.Warn(fmt.Sprintf("watch %v failed and get a nil channel returned, rev: %v", nodePath, rev)) } else { watcher = newWatcher } diff --git a/go/vt/topo/faketopo/faketopo.go b/go/vt/topo/faketopo/faketopo.go index c592fc71079..1ac736c5e85 100644 --- a/go/vt/topo/faketopo/faketopo.go +++ b/go/vt/topo/faketopo/faketopo.go @@ -18,6 +18,8 @@ package faketopo import ( "context" + "fmt" + "os" "strings" "sync" "time" @@ -403,11 +405,13 @@ func (f *FakeConn) Close() { func NewFakeTopoServer(ctx context.Context, factory *FakeFactory) *topo.Server { ts, err := topo.NewWithFactory(factory, "" /*serverAddress*/, "" /*root*/) if err != nil { - log.Exitf("topo.NewWithFactory() failed: %v", err) + log.Error(fmt.Sprintf("topo.NewWithFactory() failed: %v", err)) + os.Exit(1) } for cell := range factory.cells { if err := ts.CreateCellInfo(ctx, cell, &topodatapb.CellInfo{}); err != nil { - log.Exitf("ts.CreateCellInfo(%v) failed: %v", cell, err) + log.Error(fmt.Sprintf("ts.CreateCellInfo(%v) failed: %v", cell, err)) + os.Exit(1) } } return ts diff --git a/go/vt/topo/helpers/copy.go b/go/vt/topo/helpers/copy.go index 8fb9e80bfac..9ae688174fd 100644 --- a/go/vt/topo/helpers/copy.go +++ b/go/vt/topo/helpers/copy.go @@ -48,9 +48,9 @@ func CopyKeyspaces(ctx context.Context, fromTS, toTS *topo.Server, parser *sqlpa if err := toTS.CreateKeyspace(ctx, keyspace, ki.Keyspace); err != nil { if topo.IsErrType(err, topo.NodeExists) { - log.Warningf("keyspace %v already exists", keyspace) + log.Warn(fmt.Sprintf("keyspace %v already exists", keyspace)) } else { - log.Errorf("CreateKeyspace(%v): %v", keyspace, err) + log.Error(fmt.Sprintf("CreateKeyspace(%v): %v", keyspace, err)) } } @@ -59,16 +59,16 @@ func CopyKeyspaces(ctx context.Context, fromTS, toTS *topo.Server, parser *sqlpa case err == nil: _, err = vindexes.BuildKeyspace(ksvs.Keyspace, parser) if err != nil { - log.Errorf("BuildKeyspace(%v): %v", keyspace, err) + log.Error(fmt.Sprintf("BuildKeyspace(%v): %v", keyspace, err)) break } if err := toTS.SaveVSchema(ctx, ksvs); err != nil { - log.Errorf("SaveVSchema(%v): %v", keyspace, err) + log.Error(fmt.Sprintf("SaveVSchema(%v): %v", keyspace, err)) } case topo.IsErrType(err, topo.NoNode): // Nothing to do. default: - log.Errorf("GetVSchema(%v): %v", keyspace, err) + log.Error(fmt.Sprintf("GetVSchema(%v): %v", keyspace, err)) } } @@ -96,7 +96,7 @@ func CopyShards(ctx context.Context, fromTS, toTS *topo.Server) error { if err := toTS.CreateShard(ctx, keyspace, shard); err != nil { if topo.IsErrType(err, topo.NodeExists) { - log.Warningf("shard %v/%v already exists", keyspace, shard) + log.Warn(fmt.Sprintf("shard %v/%v already exists", keyspace, shard)) } else { return fmt.Errorf("CreateShard(%v, %v): %w", keyspace, shard, err) } @@ -136,7 +136,7 @@ func CopyTablets(ctx context.Context, fromTS, toTS *topo.Server) error { err = toTS.CreateTablet(ctx, ti.Tablet) if topo.IsErrType(err, topo.NodeExists) { // update the destination tablet - log.Warningf("tablet %v already exists, updating it", tabletAlias) + log.Warn(fmt.Sprintf("tablet %v already exists, updating it", tabletAlias)) _, err = toTS.UpdateTabletFields(ctx, tabletAlias, func(t *topodatapb.Tablet) error { proto.Merge(t, ti.Tablet) return nil @@ -201,7 +201,7 @@ func CopyShardReplications(ctx context.Context, fromTS, toTS *topo.Server) error oldSR.Nodes = nodes return nil }); err != nil { - log.Warningf("UpdateShardReplicationFields(%v, %v, %v): %v", cell, keyspace, shard, err) + log.Warn(fmt.Sprintf("UpdateShardReplicationFields(%v, %v, %v): %v", cell, keyspace, shard, err)) } } } @@ -217,7 +217,7 @@ func CopyRoutingRules(ctx context.Context, fromTS, toTS *topo.Server) error { return fmt.Errorf("GetRoutingRules: %w", err) } if err := toTS.SaveRoutingRules(ctx, rr); err != nil { - log.Errorf("SaveRoutingRules(%v): %v", rr, err) + log.Error(fmt.Sprintf("SaveRoutingRules(%v): %v", rr, err)) } return nil diff --git a/go/vt/topo/keyspace.go b/go/vt/topo/keyspace.go index 3f23cd185fe..fcdf8a93879 100755 --- a/go/vt/topo/keyspace.go +++ b/go/vt/topo/keyspace.go @@ -18,6 +18,7 @@ package topo import ( "context" + "fmt" "path" "sort" "sync" @@ -305,7 +306,7 @@ func (ts *Server) FindAllShardsInKeyspace(ctx context.Context, keyspace string, si, err := ts.GetShard(ctx, keyspace, shard) switch { case IsErrType(err, NoNode): - log.Warningf("GetShard(%s, %s) returned ErrNoNode, consider checking the topology.", keyspace, shard) + log.Warn(fmt.Sprintf("GetShard(%s, %s) returned ErrNoNode, consider checking the topology.", keyspace, shard)) return nil case err == nil: mu.Lock() diff --git a/go/vt/topo/locks.go b/go/vt/topo/locks.go index b2a3957d116..bcf2acf6bfc 100644 --- a/go/vt/topo/locks.go +++ b/go/vt/topo/locks.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "errors" + "fmt" "os" "os/user" "sync" @@ -163,7 +164,7 @@ func (l *Lock) lock(ctx context.Context, ts *Server, lt iTopoLock, opts ...LockO for _, o := range opts { o.apply(&l.Options) } - log.Infof("Locking %s %s for action %s with options: %+v", lt.Type(), lt.ResourceName(), l.Action, l.Options) + log.Info(fmt.Sprintf("Locking %s %s for action %s with options: %+v", lt.Type(), lt.ResourceName(), l.Action, l.Options)) ctx, cancel := context.WithTimeout(ctx, LockTimeout) defer cancel() @@ -212,10 +213,10 @@ func (l *Lock) unlock(ctx context.Context, lt iTopoLock, lockDescriptor LockDesc // first update the actionNode if actionError != nil { - log.Infof("Unlocking %v %v for action %v with error %v", lt.Type(), lt.ResourceName(), l.Action, actionError) + log.Info(fmt.Sprintf("Unlocking %v %v for action %v with error %v", lt.Type(), lt.ResourceName(), l.Action, actionError)) l.Status = "Error: " + actionError.Error() } else { - log.Infof("Unlocking %v %v for successful action %v", lt.Type(), lt.ResourceName(), l.Action) + log.Info(fmt.Sprintf("Unlocking %v %v for successful action %v", lt.Type(), lt.ResourceName(), l.Action)) l.Status = "Done" } return lockDescriptor.Unlock(ctx) @@ -253,7 +254,7 @@ func (ts *Server) internalLock(ctx context.Context, lt iTopoLock, action string, if _, ok := i.info[lt.ResourceName()]; !ok { if *finalErr != nil { - log.Errorf("trying to unlock %v %v multiple times", lt.Type(), lt.ResourceName()) + log.Error(fmt.Sprintf("trying to unlock %v %v multiple times", lt.Type(), lt.ResourceName())) } else { *finalErr = vterrors.Errorf(vtrpc.Code_INTERNAL, "trying to unlock %v %v multiple times", lt.Type(), lt.ResourceName()) } @@ -265,7 +266,7 @@ func (ts *Server) internalLock(ctx context.Context, lt iTopoLock, action string, if *finalErr != nil { if err != nil { // both error are set, just log the unlock error - log.Warningf("unlock %v %v failed: %v", lt.Type(), lt.ResourceName(), err) + log.Warn(fmt.Sprintf("unlock %v %v failed: %v", lt.Type(), lt.ResourceName(), err)) } } else { *finalErr = err diff --git a/go/vt/topo/memorytopo/election.go b/go/vt/topo/memorytopo/election.go index 1b6d2292f5c..e1d295220e1 100644 --- a/go/vt/topo/memorytopo/election.go +++ b/go/vt/topo/memorytopo/election.go @@ -18,6 +18,7 @@ package memorytopo import ( "context" + "fmt" "path" "vitess.io/vitess/go/vt/log" @@ -107,7 +108,7 @@ func (mp *cLeaderParticipation) WaitForLeadership() (context.Context, error) { go func() { <-mp.stop if err := ld.Unlock(context.Background()); err != nil { - log.Errorf("failed to unlock LockDescriptor %v: %v", electionPath, err) + log.Error(fmt.Sprintf("failed to unlock LockDescriptor %v: %v", electionPath, err)) } lockCancel() close(mp.done) diff --git a/go/vt/topo/memorytopo/memorytopo.go b/go/vt/topo/memorytopo/memorytopo.go index e11ea783a71..f96c7220690 100644 --- a/go/vt/topo/memorytopo/memorytopo.go +++ b/go/vt/topo/memorytopo/memorytopo.go @@ -22,7 +22,9 @@ package memorytopo import ( "context" "errors" + "fmt" "math/rand/v2" + "os" "regexp" "strings" "sync" @@ -283,12 +285,14 @@ func NewServerAndFactory(ctx context.Context, cells ...string) (*topo.Server, *F ts, err := topo.NewWithFactory(f, "" /*serverAddress*/, "" /*root*/) if err != nil { - log.Exitf("topo.NewWithFactory() failed: %v", err) + log.Error(fmt.Sprintf("topo.NewWithFactory() failed: %v", err)) + os.Exit(1) } for _, cell := range cells { f.cells[cell] = f.newDirectory(cell, nil) if err := ts.CreateCellInfo(ctx, cell, &topodatapb.CellInfo{}); err != nil { - log.Exitf("ts.CreateCellInfo(%v) failed: %v", cell, err) + log.Error(fmt.Sprintf("ts.CreateCellInfo(%v) failed: %v", cell, err)) + os.Exit(1) } } return ts, f diff --git a/go/vt/topo/replication.go b/go/vt/topo/replication.go index ace6b4445da..488f3571d61 100644 --- a/go/vt/topo/replication.go +++ b/go/vt/topo/replication.go @@ -18,6 +18,7 @@ package topo import ( "context" + "fmt" "path" "google.golang.org/protobuf/proto" @@ -93,7 +94,7 @@ func UpdateShardReplicationRecord(ctx context.Context, ts *Server, keyspace, sha for _, node := range (*sr).Nodes { if proto.Equal(node.TabletAlias, tabletAlias) { if found { - log.Warningf("Found a second ShardReplication_Node for tablet %v, deleting it", tabletAlias) + log.Warn(fmt.Sprintf("Found a second ShardReplication_Node for tablet %v, deleting it", tabletAlias)) modified = true continue } diff --git a/go/vt/topo/server.go b/go/vt/topo/server.go index 21814255206..e4b1458a432 100644 --- a/go/vt/topo/server.go +++ b/go/vt/topo/server.go @@ -45,6 +45,7 @@ package topo import ( "context" "fmt" + "os" "path" "slices" "sync" @@ -209,7 +210,8 @@ func registerTopoFlags(fs *pflag.FlagSet) { // Call this in the 'init' function in your topology implementation module. func RegisterFactory(name string, factory Factory) { if factories[name] != nil { - log.Fatalf("Duplicate topo.Factory registration for %v", name) + log.Error(fmt.Sprintf("Duplicate topo.Factory registration for %v", name)) + os.Exit(1) } factories[name] = factory } @@ -257,14 +259,17 @@ func OpenServer(implementation, serverAddress, root string) (*Server, error) { // for implementation, address and root. It log.Exits out if an error occurs. func Open() *Server { if topoGlobalServerAddress == "" { - log.Exitf("topo-global-server-address must be configured") + log.Error("topo-global-server-address must be configured") + os.Exit(1) } if topoGlobalRoot == "" { - log.Exit("topo-global-root must be non-empty") + log.Error("topo-global-root must be non-empty") + os.Exit(1) } ts, err := OpenServer(topoImplementation, topoGlobalServerAddress, topoGlobalRoot) if err != nil { - log.Exitf("Failed to open topo server (%v,%v,%v): %v", topoImplementation, topoGlobalServerAddress, topoGlobalRoot, err) + log.Error(fmt.Sprintf("Failed to open topo server (%v,%v,%v): %v", topoImplementation, topoGlobalServerAddress, topoGlobalRoot, err)) + os.Exit(1) } return ts } diff --git a/go/vt/topo/shard.go b/go/vt/topo/shard.go index 7606afd5473..3acd1659206 100644 --- a/go/vt/topo/shard.go +++ b/go/vt/topo/shard.go @@ -20,6 +20,7 @@ import ( "context" "encoding/hex" "errors" + "fmt" "path" "slices" "sort" @@ -414,7 +415,7 @@ func (si *ShardInfo) UpdateDeniedTables(ctx context.Context, tabletType topodata if remove { // We tried to remove something that doesn't exist, log a warning. // But we know that our work is done. - log.Warningf("Trying to remove TabletControl.DeniedTables for missing type %v in shard %v/%v", tabletType, si.keyspace, si.shardName) + log.Warn(fmt.Sprintf("Trying to remove TabletControl.DeniedTables for missing type %v in shard %v/%v", tabletType, si.keyspace, si.shardName)) return nil } @@ -458,7 +459,7 @@ func (si *ShardInfo) updatePrimaryTabletControl(tc *topodatapb.Shard_TabletContr if remove { if len(newTables) != 0 { // These tables did not exist in the denied list so we don't need to remove them. - log.Warningf("%s:%s", dlTablesNotPresent, strings.Join(newTables, ",")) + log.Warn(fmt.Sprintf("%s:%s", dlTablesNotPresent, strings.Join(newTables, ","))) } var newDenyList []string if len(tables) != 0 { // legacy uses @@ -478,7 +479,7 @@ func (si *ShardInfo) updatePrimaryTabletControl(tc *topodatapb.Shard_TabletContr if len(newTables) != len(tables) { // Some of the tables already existed in the DeniedTables list so we don't // need to add them. - log.Warningf("%s:%s", dlTablesAlreadyPresent, strings.Join(tables, ",")) + log.Warn(fmt.Sprintf("%s:%s", dlTablesAlreadyPresent, strings.Join(tables, ","))) // We do need to merge the lists, however. tables = append(tables, newTables...) tc.DeniedTables = append(tc.DeniedTables, tables...) @@ -599,7 +600,7 @@ func (ts *Server) FindAllTabletAliasesInShardByCell(ctx context.Context, keyspac wg.Wait() err = nil if rec.HasErrors() { - log.Warningf("FindAllTabletAliasesInShard(%v,%v): got partial result: %v", keyspace, shard, rec.Error()) + log.Warn(fmt.Sprintf("FindAllTabletAliasesInShard(%v,%v): got partial result: %v", keyspace, shard, rec.Error())) err = NewError(PartialResult, shard) } diff --git a/go/vt/topo/srv_vschema.go b/go/vt/topo/srv_vschema.go index e698c4f8851..21dad99d796 100644 --- a/go/vt/topo/srv_vschema.go +++ b/go/vt/topo/srv_vschema.go @@ -183,7 +183,7 @@ func (ts *Server) RebuildSrvVSchema(ctx context.Context, cells []string) error { mu.Lock() defer mu.Unlock() if err != nil { - log.Errorf("%v: GetVSchema(%v) failed", err, keyspace) + log.Error(fmt.Sprintf("%v: GetVSchema(%v) failed", err, keyspace)) finalErr = err return } @@ -225,7 +225,7 @@ func (ts *Server) RebuildSrvVSchema(ctx context.Context, cells []string) error { go func(cell string) { defer wg.Done() if err := ts.UpdateSrvVSchema(ctx, cell, srvVSchema); err != nil { - log.Errorf("%v: UpdateSrvVSchema(%v) failed", err, cell) + log.Error(fmt.Sprintf("%v: UpdateSrvVSchema(%v) failed", err, cell)) mu.Lock() finalErr = err mu.Unlock() diff --git a/go/vt/topo/tablet.go b/go/vt/topo/tablet.go index 8cc134bb69c..038ddb8ed87 100644 --- a/go/vt/topo/tablet.go +++ b/go/vt/topo/tablet.go @@ -173,7 +173,7 @@ func NewTabletInfo(tablet *topodatapb.Tablet, version Version) *TabletInfo { func (ts *Server) GetTablet(ctx context.Context, alias *topodatapb.TabletAlias) (*TabletInfo, error) { conn, err := ts.ConnForCell(ctx, alias.Cell) if err != nil { - log.Errorf("unable to get connection for cell %q: %v", alias.Cell, err) + log.Error(fmt.Sprintf("unable to get connection for cell %q: %v", alias.Cell, err)) return nil, err } @@ -184,7 +184,7 @@ func (ts *Server) GetTablet(ctx context.Context, alias *topodatapb.TabletAlias) tabletPath := path.Join(TabletsPath, topoproto.TabletAliasString(alias), TabletFile) data, version, err := conn.Get(ctx, tabletPath) if err != nil { - log.Errorf("unable to connect to tablet %q: %v", alias, err) + log.Error(fmt.Sprintf("unable to connect to tablet %q: %v", alias, err)) return nil, err } tablet := &topodatapb.Tablet{} @@ -320,7 +320,7 @@ func (ts *Server) GetTabletsIndividuallyByCell(ctx context.Context, cell string, if !ok { // tablet disappeared on us (GetTabletMap ignores // topo.ErrNoNode), just echo a warning - log.Warningf("failed to load tablet %v", tabletAlias) + log.Warn(fmt.Sprintf("failed to load tablet %v", tabletAlias)) } else { tablets = append(tablets, tabletInfo) } @@ -510,7 +510,7 @@ func (ts *Server) GetTabletMap(ctx context.Context, tabletAliases []*topodatapb. mu.Lock() defer mu.Unlock() if err != nil { - log.Warningf("%v: %v", tabletAlias, err) + log.Warn(fmt.Sprintf("%v: %v", tabletAlias, err)) // There can be data races removing nodes - ignore them for now. // We only need to set this on first error. if returnErr == nil && !IsErrType(err, NoNode) { @@ -560,7 +560,7 @@ func (ts *Server) GetTabletList(ctx context.Context, tabletAliases []*topodatapb mu.Lock() defer mu.Unlock() if err != nil { - log.Warningf("%v: %v", tabletAlias, err) + log.Warn(fmt.Sprintf("%v: %v", tabletAlias, err)) // There can be data races removing nodes - ignore them for now. // We only need to set this on first error. if returnErr == nil && !IsErrType(err, NoNode) { diff --git a/go/vt/topo/vschema.go b/go/vt/topo/vschema.go index 9fbf646a74e..d093bf5b529 100644 --- a/go/vt/topo/vschema.go +++ b/go/vt/topo/vschema.go @@ -18,6 +18,7 @@ package topo import ( "context" + "fmt" "path" "vitess.io/vitess/go/vt/log" @@ -65,18 +66,18 @@ func (ts *Server) SaveVSchema(ctx context.Context, ksvs *KeyspaceVSchemaInfo) er version, err := ts.globalCell.Update(ctx, nodePath, data, ksvs.version) if err != nil { - log.Errorf("failed to update vschema for keyspace %s: %v", ksvs.Name, err) + log.Error(fmt.Sprintf("failed to update vschema for keyspace %s: %v", ksvs.Name, err)) return err } ksvs.version = version - log.Infof("successfully updated vschema for keyspace %s: %+v", ksvs.Name, ksvs.Keyspace) + log.Info(fmt.Sprintf("successfully updated vschema for keyspace %s: %+v", ksvs.Name, ksvs.Keyspace)) return nil } // DeleteVSchema delete the keyspace if it exists func (ts *Server) DeleteVSchema(ctx context.Context, keyspace string) error { - log.Infof("deleting vschema for keyspace %s", keyspace) + log.Info("deleting vschema for keyspace " + keyspace) if err := ctx.Err(); err != nil { return err } @@ -112,7 +113,7 @@ func (ts *Server) GetVSchema(ctx context.Context, keyspace string) (*KeyspaceVSc func (ts *Server) EnsureVSchema(ctx context.Context, keyspace string) error { ksvs, err := ts.GetVSchema(ctx, keyspace) if err != nil && !IsErrType(err, NoNode) { - log.Infof("error in getting vschema for keyspace %s: %v", keyspace, err) + log.Info(fmt.Sprintf("error in getting vschema for keyspace %s: %v", keyspace, err)) } if ksvs == nil || ksvs.Keyspace == nil || IsErrType(err, NoNode) { err = ts.SaveVSchema(ctx, &KeyspaceVSchemaInfo{ @@ -124,7 +125,7 @@ func (ts *Server) EnsureVSchema(ctx context.Context, keyspace string) error { }, }) if err != nil { - log.Errorf("could not create blank vschema: %v", err) + log.Error(fmt.Sprintf("could not create blank vschema: %v", err)) return err } } diff --git a/go/vt/topo/wildcards.go b/go/vt/topo/wildcards.go index 45192387aa8..f6c9d21f517 100644 --- a/go/vt/topo/wildcards.go +++ b/go/vt/topo/wildcards.go @@ -18,6 +18,7 @@ package topo import ( "context" + "fmt" "path" "strings" "sync" @@ -162,7 +163,7 @@ func (ts *Server) ResolveWildcards(ctx context.Context, cell string, paths []str if err != nil { mu.Lock() if firstError != nil { - log.Infof("Multiple error: %v", err) + log.Info(fmt.Sprintf("Multiple error: %v", err)) } else { firstError = err } @@ -235,7 +236,7 @@ func (ts *Server) resolveRecursive(ctx context.Context, cell string, parts []str if err != nil { mu.Lock() if firstError != nil { - log.Infof("Multiple error: %v", err) + log.Info(fmt.Sprintf("Multiple error: %v", err)) } else { firstError = err } diff --git a/go/vt/topo/zk2topo/election.go b/go/vt/topo/zk2topo/election.go index e82a31d0756..4eb51dfc4a3 100644 --- a/go/vt/topo/zk2topo/election.go +++ b/go/vt/topo/zk2topo/election.go @@ -18,6 +18,7 @@ package zk2topo import ( "context" + "fmt" "path" "sort" @@ -141,23 +142,23 @@ func (mp *zkLeaderParticipation) watchLeadership(ctx context.Context, conn *ZkCo // get to work watching our own proposal _, stats, events, err := conn.GetW(ctx, proposal) if err != nil { - log.Warningf("Cannot watch proposal while being Leader, stopping: %v", err) + log.Warn(fmt.Sprintf("Cannot watch proposal while being Leader, stopping: %v", err)) return } select { case <-mp.stopCtx.Done(): // we were asked to stop, we're done. Remove our node. - log.Infof("Canceling leadership '%v' upon Stop.", mp.name) + log.Info(fmt.Sprintf("Canceling leadership '%v' upon Stop.", mp.name)) if err := conn.Delete(ctx, proposal, stats.Version); err != nil { - log.Warningf("Error deleting our proposal %v: %v", proposal, err) + log.Warn(fmt.Sprintf("Error deleting our proposal %v: %v", proposal, err)) } close(mp.done) case e := <-events: // something happened to our proposal, that can only be bad. - log.Warningf("Watch on proposal triggered, canceling leadership '%v': %v", mp.name, e) + log.Warn(fmt.Sprintf("Watch on proposal triggered, canceling leadership '%v': %v", mp.name, e)) } } diff --git a/go/vt/topo/zk2topo/lock.go b/go/vt/topo/zk2topo/lock.go index 00486f66169..c8d9bb74ff9 100644 --- a/go/vt/topo/zk2topo/lock.go +++ b/go/vt/topo/zk2topo/lock.go @@ -18,6 +18,7 @@ package zk2topo import ( "context" + "fmt" "path" "time" @@ -106,36 +107,36 @@ func (zs *Server) lock(ctx context.Context, dirPath, contents string) (topo.Lock } // Regardless of the reason, try to cleanup. - log.Warningf("Failed to obtain lock: %v", err) + log.Warn(fmt.Sprintf("Failed to obtain lock: %v", err)) cleanupCtx, cancel := context.WithTimeout(context.Background(), baseTimeout) defer cancel() if err := zs.conn.Delete(cleanupCtx, nodePath, -1); err != nil { - log.Warningf("Failed to cleanup unsuccessful lock path %s: %v", nodePath, err) + log.Warn(fmt.Sprintf("Failed to cleanup unsuccessful lock path %s: %v", nodePath, err)) } // Show the other locks in the directory dir := path.Dir(nodePath) children, _, err := zs.conn.Children(cleanupCtx, dir) if err != nil { - log.Warningf("Failed to get children of %v: %v", dir, err) + log.Warn(fmt.Sprintf("Failed to get children of %v: %v", dir, err)) return nil, errToReturn } if len(children) == 0 { - log.Warningf("No other locks present, you may just try again now.") + log.Warn("No other locks present, you may just try again now.") return nil, errToReturn } childPath := path.Join(dir, children[0]) data, _, err := zs.conn.Get(cleanupCtx, childPath) if err != nil { - log.Warningf("Failed to get first locks node %v (may have just ended): %v", childPath, err) + log.Warn(fmt.Sprintf("Failed to get first locks node %v (may have just ended): %v", childPath, err)) return nil, errToReturn } - log.Warningf("------ Most likely blocking lock: %v\n%v", childPath, string(data)) + log.Warn(fmt.Sprintf("------ Most likely blocking lock: %v\n%v", childPath, string(data))) return nil, errToReturn } diff --git a/go/vt/topo/zk2topo/utils.go b/go/vt/topo/zk2topo/utils.go index 357f814b22c..eff409b63d9 100644 --- a/go/vt/topo/zk2topo/utils.go +++ b/go/vt/topo/zk2topo/utils.go @@ -129,7 +129,7 @@ func ResolveWildcards(ctx context.Context, zconn *ZkConn, zkPaths []string) ([]s if err != nil { mu.Lock() if firstError != nil { - log.Infof("Multiple error: %v", err) + log.Info(fmt.Sprintf("Multiple error: %v", err)) } else { firstError = err } @@ -201,7 +201,7 @@ func resolveRecursive(ctx context.Context, zconn *ZkConn, parts []string, toplev if err != nil { mu.Lock() if firstError != nil { - log.Infof("Multiple error: %v", err) + log.Info(fmt.Sprintf("Multiple error: %v", err)) } else { firstError = err } diff --git a/go/vt/topo/zk2topo/zk_conn.go b/go/vt/topo/zk2topo/zk_conn.go index 79392aa168f..0040219bf52 100644 --- a/go/vt/topo/zk2topo/zk_conn.go +++ b/go/vt/topo/zk2topo/zk_conn.go @@ -21,6 +21,7 @@ import ( "crypto/tls" "crypto/x509" "errors" + "fmt" "math/rand/v2" "net" "os" @@ -271,7 +272,7 @@ func (c *ZkConn) withRetry(ctx context.Context, action func(conn *zk.Conn) error c.conn = nil } c.mu.Unlock() - log.Infof("zk conn: got ErrConnectionClosed for addr %v: closing", c.addr) + log.Info(fmt.Sprintf("zk conn: got ErrConnectionClosed for addr %v: closing", c.addr)) conn.Close() } return @@ -302,18 +303,18 @@ func (c *ZkConn) maybeAddAuth(ctx context.Context) { } authInfoBytes, err := os.ReadFile(authFile) if err != nil { - log.Errorf("failed to read topo-zk-auth-file: %v", err) + log.Error(fmt.Sprintf("failed to read topo-zk-auth-file: %v", err)) return } authInfo := strings.TrimRight(string(authInfoBytes), "\n") authInfoParts := strings.SplitN(authInfo, ":", 2) if len(authInfoParts) != 2 { - log.Errorf("failed to parse topo-zk-auth-file contents, expected format : but saw: %s", authInfo) + log.Error("failed to parse topo-zk-auth-file contents, expected format : but saw: " + authInfo) return } err = c.conn.AddAuth(authInfoParts[0], []byte(authInfoParts[1])) if err != nil { - log.Errorf("failed to add auth from topo-zk-auth-file: %v", err) + log.Error(fmt.Sprintf("failed to add auth from topo-zk-auth-file: %v", err)) return } } @@ -333,10 +334,10 @@ func (c *ZkConn) handleSessionEvents(conn *zk.Conn, session <-chan zk.Event) { } c.mu.Unlock() conn.Close() - log.Infof("zk conn: session for addr %v ended: %v", c.addr, event) + log.Info(fmt.Sprintf("zk conn: session for addr %v ended: %v", c.addr, event)) return } - log.Infof("zk conn: session for addr %v event: %v", c.addr, event) + log.Info(fmt.Sprintf("zk conn: session for addr %v event: %v", c.addr, event)) } } @@ -349,20 +350,23 @@ func dialZk(ctx context.Context, addr string) (*zk.Conn, <-chan zk.Event, error) // If TLS is enabled use a TLS enabled dialer option if certPath != "" && keyPath != "" { if strings.Contains(addr, ",") { - log.Fatalf("This TLS zk code requires that the all the zk servers validate to a single server name.") + log.Error("This TLS zk code requires that the all the zk servers validate to a single server name.") + os.Exit(1) } serverName := strings.Split(addr, ":")[0] - log.Infof("Using TLS ZK, connecting to %v server name %v", addr, serverName) + log.Info(fmt.Sprintf("Using TLS ZK, connecting to %v server name %v", addr, serverName)) cert, err := tls.LoadX509KeyPair(certPath, keyPath) if err != nil { - log.Fatalf("Unable to load cert %v and key %v, err %v", certPath, keyPath, err) + log.Error(fmt.Sprintf("Unable to load cert %v and key %v, err %v", certPath, keyPath, err)) + os.Exit(1) } clientCACert, err := os.ReadFile(caPath) if err != nil { - log.Fatalf("Unable to open ca cert %v, err %v", caPath, err) + log.Error(fmt.Sprintf("Unable to open ca cert %v, err %v", caPath, err)) + os.Exit(1) } clientCertPool := x509.NewCertPool() diff --git a/go/vt/topotools/mirror_rules.go b/go/vt/topotools/mirror_rules.go index 067b0544dab..0a07db6a5c9 100644 --- a/go/vt/topotools/mirror_rules.go +++ b/go/vt/topotools/mirror_rules.go @@ -18,6 +18,7 @@ package topotools import ( "context" + "fmt" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/topo" @@ -55,7 +56,7 @@ func GetMirrorRules(ctx context.Context, ts *topo.Server) (map[string]map[string // SaveMirrorRules converts a mapping of fromTable=>[]toTables into a // vschemapb.MirrorRules protobuf message and saves it in the topology. func SaveMirrorRules(ctx context.Context, ts *topo.Server, rules map[string]map[string]float32) error { - log.V(2).Infof("Saving mirror rules %v\n", rules) + log.Debug(fmt.Sprintf("Saving mirror rules %v\n", rules)) rrs := &vschemapb.MirrorRules{Rules: make([]*vschemapb.MirrorRule, 0)} for fromTable, mrs := range rules { diff --git a/go/vt/topotools/routing_rules.go b/go/vt/topotools/routing_rules.go index 7eff52f1d66..77f11564128 100644 --- a/go/vt/topotools/routing_rules.go +++ b/go/vt/topotools/routing_rules.go @@ -56,7 +56,7 @@ func GetRoutingRules(ctx context.Context, ts *topo.Server) (map[string][]string, // SaveRoutingRules converts a mapping of fromTable=>[]toTables into a // vschemapb.RoutingRules protobuf message and saves it in the topology. func SaveRoutingRules(ctx context.Context, ts *topo.Server, rules map[string][]string) error { - log.Infof("Saving routing rules %v\n", rules) + log.Info(fmt.Sprintf("Saving routing rules %v\n", rules)) rrs := &vschemapb.RoutingRules{Rules: make([]*vschemapb.RoutingRule, 0, len(rules))} for from, to := range rules { @@ -109,7 +109,7 @@ func GetShardRoutingRules(ctx context.Context, ts *topo.Server) (map[string]stri // SaveShardRoutingRules converts a mapping of fromKeyspace.Shard=>toKeyspace into a // vschemapb.ShardRoutingRules protobuf message and saves it in the topology. func SaveShardRoutingRules(ctx context.Context, ts *topo.Server, srr map[string]string) error { - log.Infof("Saving shard routing rules %v\n", srr) + log.Info(fmt.Sprintf("Saving shard routing rules %v\n", srr)) srs := &vschemapb.ShardRoutingRules{Rules: make([]*vschemapb.ShardRoutingRule, 0, len(srr))} for from, to := range srr { diff --git a/go/vt/topotools/tablet.go b/go/vt/topotools/tablet.go index 67687e83511..500ed75fc40 100644 --- a/go/vt/topotools/tablet.go +++ b/go/vt/topotools/tablet.go @@ -263,7 +263,7 @@ func DeleteTablet(ctx context.Context, ts *topo.Server, tablet *topodatapb.Table err = nil } if err != nil { - log.Warningf("remove replication data for %v failed: %v", topoproto.TabletAliasString(tablet.Alias), err) + log.Warn(fmt.Sprintf("remove replication data for %v failed: %v", topoproto.TabletAliasString(tablet.Alias), err)) } } diff --git a/go/vt/vtadmin/api.go b/go/vt/vtadmin/api.go index c5bd172c226..89b1271c7b9 100644 --- a/go/vt/vtadmin/api.go +++ b/go/vt/vtadmin/api.go @@ -284,12 +284,12 @@ func (api *API) ServeHTTP(w http.ResponseWriter, r *http.Request) { c, id, err := dynamic.ClusterFromString(r.Context(), urlDecoded) if id != "" { if err != nil { - log.Warningf("failed to extract valid cluster from cookie; attempting to use existing cluster with id=%s; error: %s", id, err) + log.Warn(fmt.Sprintf("failed to extract valid cluster from cookie; attempting to use existing cluster with id=%s; error: %s", id, err)) } dynamicAPI = api.WithCluster(c, id) } else { - log.Warningf("failed to unmarshal dynamic cluster spec from cookie; falling back to static API; error: %s", err) + log.Warn(fmt.Sprintf("failed to unmarshal dynamic cluster spec from cookie; falling back to static API; error: %s", err)) } } } @@ -325,14 +325,14 @@ func (api *API) WithCluster(c *cluster.Cluster, id string) dynamic.API { if exists { isEqual, err := existingCluster.Equal(c) if err != nil { - log.Errorf("Error checking for existing cluster %s equality with new cluster %s: %v", existingCluster.ID, id, err) + log.Error(fmt.Sprintf("Error checking for existing cluster %s equality with new cluster %s: %v", existingCluster.ID, id, err)) } shouldAddCluster = shouldAddCluster || !isEqual } if shouldAddCluster { if existingCluster != nil { if err := existingCluster.Close(); err != nil { - log.Errorf("%s; some connections and goroutines may linger", err.Error()) + log.Error(err.Error() + "; some connections and goroutines may linger") } idx := stdsort.Search(len(api.clusters), func(i int) bool { @@ -351,7 +351,7 @@ func (api *API) WithCluster(c *cluster.Cluster, id string) dynamic.API { api.clusterCache.Set(id, c, cache.DefaultExpiration) } else { - log.Infof("API already has cluster with id %s, using that instead", id) + log.Info(fmt.Sprintf("API already has cluster with id %s, using that instead", id)) } } @@ -462,14 +462,14 @@ func (api *API) EjectDynamicCluster(key string, value any) { if ok { delete(api.clusterMap, key) if err := c.Close(); err != nil { - log.Errorf("%s; some connections and goroutines may linger", err.Error()) + log.Error(err.Error() + "; some connections and goroutines may linger") } } // Maintain order of clusters when removing dynamic cluster clusterIndex := stdsort.Search(len(api.clusters), func(i int) bool { return api.clusters[i].ID == key }) if clusterIndex >= len(api.clusters) || clusterIndex < 0 { - log.Errorf("Cannot remove cluster %s from api.clusters. Cluster index %d is out of range for clusters slice of %d length.", key, clusterIndex, len(api.clusters)) + log.Error(fmt.Sprintf("Cannot remove cluster %s from api.clusters. Cluster index %d is out of range for clusters slice of %d length.", key, clusterIndex, len(api.clusters))) } api.clusters = append(api.clusters[:clusterIndex], api.clusters[clusterIndex+1:]...) @@ -766,7 +766,7 @@ func (api *API) FindSchema(ctx context.Context, req *vtadminpb.FindSchemaRequest } } - log.Infof("cluster %s has no tables named %s", c.ID, req.Table) + log.Info(fmt.Sprintf("cluster %s has no tables named %s", c.ID, req.Table)) }(c) } @@ -2660,7 +2660,7 @@ func (api *API) VExplain(ctx context.Context, req *vtadminpb.VExplainRequest) (* // VTExplain is part of the vtadminpb.VTAdminServer interface. func (api *API) VTExplain(ctx context.Context, req *vtadminpb.VTExplainRequest) (*vtadminpb.VTExplainResponse, error) { // TODO (andrew): https://github.com/vitessio/vitess/issues/12161. - log.Warningf("VTAdminServer.VTExplain is deprecated; please use a vexplain query instead. For more details, see https://vitess.io/docs/user-guides/sql/vexplain/.") + log.Warn("VTAdminServer.VTExplain is deprecated; please use a vexplain query instead. For more details, see https://vitess.io/docs/user-guides/sql/vexplain/.") span, ctx := trace.NewSpan(ctx, "API.VTExplain") defer span.Finish() @@ -2695,7 +2695,7 @@ func (api *API) VTExplain(ctx context.Context, req *vtadminpb.VTExplainRequest) defer api.vtexplainLock.Unlock() lockWaitTime := time.Since(lockWaitStart) - log.Infof("vtexplain lock wait time: %s", lockWaitTime) + log.Info(fmt.Sprintf("vtexplain lock wait time: %s", lockWaitTime)) span.Annotate("vtexplain_lock_wait_time", lockWaitTime.String()) diff --git a/go/vt/vtadmin/cache/cache.go b/go/vt/vtadmin/cache/cache.go index 7278990f9f1..76897bde754 100644 --- a/go/vt/vtadmin/cache/cache.go +++ b/go/vt/vtadmin/cache/cache.go @@ -20,6 +20,7 @@ package cache import ( "context" + "fmt" "sync" "time" @@ -119,17 +120,17 @@ type Cache[Key Keyer, Value any] struct { // enqueued (via EnqueueBackfill), fillFunc will be called with that request. func New[Key Keyer, Value any](fillFunc func(ctx context.Context, req Key) (Value, error), cfg Config) *Cache[Key, Value] { if cfg.BackfillEnqueueWaitTime <= 0 { - log.Warningf("BackfillEnqueueWaitTime (%v) must be positive, defaulting to %v", cfg.BackfillEnqueueWaitTime, DefaultBackfillEnqueueWaitTime) + log.Warn(fmt.Sprintf("BackfillEnqueueWaitTime (%v) must be positive, defaulting to %v", cfg.BackfillEnqueueWaitTime, DefaultBackfillEnqueueWaitTime)) cfg.BackfillEnqueueWaitTime = DefaultBackfillEnqueueWaitTime } if cfg.BackfillRequestTTL <= 0 { - log.Warningf("BackfillRequestTTL (%v) must be positive, defaulting to %v", cfg.BackfillRequestTTL, DefaultBackfillRequestTTL) + log.Warn(fmt.Sprintf("BackfillRequestTTL (%v) must be positive, defaulting to %v", cfg.BackfillRequestTTL, DefaultBackfillRequestTTL)) cfg.BackfillRequestTTL = DefaultBackfillRequestTTL } if cfg.BackfillQueueSize < 0 { - log.Warningf("BackfillQueueSize (%v) must be positive, defaulting to %v", cfg.BackfillQueueSize, DefaultBackfillQueueSize) + log.Warn(fmt.Sprintf("BackfillQueueSize (%v) must be positive, defaulting to %v", cfg.BackfillQueueSize, DefaultBackfillQueueSize)) cfg.BackfillQueueSize = DefaultBackfillQueueSize } @@ -224,7 +225,7 @@ func (c *Cache[Key, Value]) backfill() { if req.requestedAt.Add(c.cfg.BackfillRequestTTL).Before(time.Now()) { // We took too long to get to this request, per config options. - log.Warningf("backfill for %s requested at %s; discarding due to exceeding TTL (%s)", req.k.Key(), req.requestedAt, c.cfg.BackfillRequestTTL) + log.Warn(fmt.Sprintf("backfill for %s requested at %s; discarding due to exceeding TTL (%s)", req.k.Key(), req.requestedAt, c.cfg.BackfillRequestTTL)) continue } @@ -235,7 +236,7 @@ func (c *Cache[Key, Value]) backfill() { if !t.IsZero() && t.Add(c.cfg.BackfillRequestDuplicateInterval).After(time.Now()) { // We recently added a value for this key to the cache, either via // another backfill request, or directly via a call to Add. - log.Infof("filled cache for %s less than %s ago (at %s)", key, c.cfg.BackfillRequestDuplicateInterval, t.UTC()) + log.Info(fmt.Sprintf("filled cache for %s less than %s ago (at %s)", key, c.cfg.BackfillRequestDuplicateInterval, t.UTC())) c.m.Unlock() continue } @@ -252,14 +253,14 @@ func (c *Cache[Key, Value]) backfill() { val, err := c.fillFunc(c.ctx, req.k) if err != nil { - log.Errorf("backfill failed for key %s: %s", key, err) + log.Error(fmt.Sprintf("backfill failed for key %s: %s", key, err)) // TODO: consider re-requesting with a retry-counter paired with a config to give up after N attempts continue } // Finally, store the value. if err := c.add(key, val, cache.DefaultExpiration); err != nil { - log.Warningf("failed to add (%s, %+v) to cache: %s", key, val, err) + log.Warn(fmt.Sprintf("failed to add (%s, %+v) to cache: %s", key, val, err)) } } } diff --git a/go/vt/vtadmin/cache/refresh.go b/go/vt/vtadmin/cache/refresh.go index e4f4da8fe06..75d136ccd9c 100644 --- a/go/vt/vtadmin/cache/refresh.go +++ b/go/vt/vtadmin/cache/refresh.go @@ -18,6 +18,7 @@ package cache import ( "context" + "fmt" "net/http" "strconv" "strings" @@ -70,7 +71,7 @@ func ShouldRefreshFromIncomingContext(ctx context.Context) bool { shouldRefresh, err := strconv.ParseBool(vals[0]) if err != nil { - log.Warningf("failed to parse %s metadata key as bool: %s", cacheRefreshGRPCMetadataKey, err) + log.Warn(fmt.Sprintf("failed to parse %s metadata key as bool: %s", cacheRefreshGRPCMetadataKey, err)) return false } @@ -91,7 +92,7 @@ func ShouldRefreshFromRequest(r *http.Request) bool { shouldRefresh, err := strconv.ParseBool(h) if err != nil { - log.Warningf("failed to parse %s header as bool: %s", cacheRefreshHeader, err) + log.Warn(fmt.Sprintf("failed to parse %s header as bool: %s", cacheRefreshHeader, err)) return false } diff --git a/go/vt/vtadmin/cluster/cluster.go b/go/vt/vtadmin/cluster/cluster.go index 7ab64a20695..4a613e3ad5a 100644 --- a/go/vt/vtadmin/cluster/cluster.go +++ b/go/vt/vtadmin/cluster/cluster.go @@ -331,7 +331,7 @@ func (c *Cluster) parseTablet(rows *sql.Rows) (*vtadminpb.Tablet, error) { if topotablet.Alias.Cell != cell { // (TODO:@amason) ??? - log.Warningf("tablet cell %s does not match alias %s. ignoring for now", cell, topoproto.TabletAliasString(topotablet.Alias)) + log.Warn(fmt.Sprintf("tablet cell %s does not match alias %s. ignoring for now", cell, topoproto.TabletAliasString(topotablet.Alias))) } if mtstStr != "" { @@ -749,7 +749,7 @@ func (c *Cluster) findWorkflows(ctx context.Context, keyspaces []string, opts Fi span.Finish() } else if opts.IgnoreKeyspaces.Len() > 0 { - log.Warningf("Cluster.findWorkflows: IgnoreKeyspaces was set, but Keyspaces was not empty; ignoring IgnoreKeyspaces in favor of explicitly checking everything in Keyspaces: (%s)", strings.Join(keyspaces, ", ")) + log.Warn(fmt.Sprintf("Cluster.findWorkflows: IgnoreKeyspaces was set, but Keyspaces was not empty; ignoring IgnoreKeyspaces in favor of explicitly checking everything in Keyspaces: (%s)", strings.Join(keyspaces, ", "))) opts.IgnoreKeyspaces = sets.New[string]() } @@ -772,7 +772,7 @@ func (c *Cluster) findWorkflows(ctx context.Context, keyspaces []string, opts Fi for _, ks := range keyspaces { if opts.IgnoreKeyspaces.Has(ks) { - log.Infof("Cluster.findWorkflows: ignoring keyspace %s", ks) + log.Info("Cluster.findWorkflows: ignoring keyspace " + ks) continue } @@ -988,7 +988,7 @@ func (c *Cluster) getShardSets(ctx context.Context, keyspaces []string, keyspace // vtctld side, and we can do better checking here. // Since this is on the client-side of an RPC we can't // even use topo.IsErrType(topo.NoNode) :( - log.Warningf("getShardSets(): keyspace %s does not exist in cluster %s", ksName, c.ID) + log.Warn(fmt.Sprintf("getShardSets(): keyspace %s does not exist in cluster %s", ksName, c.ID)) m.Lock() defer m.Unlock() @@ -1015,7 +1015,7 @@ func (c *Cluster) getShardSets(ctx context.Context, keyspaces []string, keyspace overlap := shardSet.Intersection(fullShardSet) if overlap.Len() != shardSet.Len() { - log.Warningf("getShardSets(): keyspace %s is missing specified shards in cluster %s: %v", ksName, c.ID, sets.List(shardSet.Difference(overlap))) + log.Warn(fmt.Sprintf("getShardSets(): keyspace %s is missing specified shards in cluster %s: %v", ksName, c.ID, sets.List(shardSet.Difference(overlap)))) } m.Lock() @@ -1065,7 +1065,7 @@ func (c *Cluster) GetCellInfos(ctx context.Context, req *vtadminpb.GetCellInfosR namesOnly := req.NamesOnly if namesOnly && len(req.Cells) > 0 { - log.Warning("Cluster.GetCellInfos: req.Cells and req.NamesOnly set, ignoring NamesOnly") + log.Warn("Cluster.GetCellInfos: req.Cells and req.NamesOnly set, ignoring NamesOnly") namesOnly = false } @@ -1399,7 +1399,7 @@ func (c *Cluster) GetSchema(ctx context.Context, keyspace string, opts GetSchema } if opts.TableSizeOptions.AggregateSizes && opts.BaseRequest.TableNamesOnly { - log.Warningf("GetSchema(cluster = %s) size aggregation is incompatible with TableNamesOnly, ignoring the latter in favor of aggregating sizes", c.ID) + log.Warn(fmt.Sprintf("GetSchema(cluster = %s) size aggregation is incompatible with TableNamesOnly, ignoring the latter in favor of aggregating sizes", c.ID)) opts.BaseRequest.TableNamesOnly = false } @@ -1469,7 +1469,7 @@ func (c *Cluster) GetSchemas(ctx context.Context, opts GetSchemaOptions) ([]*vta } if opts.TableSizeOptions.AggregateSizes && opts.BaseRequest.TableNamesOnly { - log.Warningf("GetSchemas(cluster = %s) size aggregation is incompatible with TableNamesOnly, ignoring the latter in favor of aggregating sizes", c.ID) + log.Warn(fmt.Sprintf("GetSchemas(cluster = %s) size aggregation is incompatible with TableNamesOnly, ignoring the latter in favor of aggregating sizes", c.ID)) opts.BaseRequest.TableNamesOnly = false } @@ -1491,10 +1491,10 @@ func (c *Cluster) GetSchemas(ctx context.Context, opts GetSchemaOptions) ([]*vta span.Annotate("cache_hit", ok) if ok { - log.Infof("GetSchemas(cluster = %s) fetching schemas from schema cache", c.ID) + log.Info(fmt.Sprintf("GetSchemas(cluster = %s) fetching schemas from schema cache", c.ID)) return schemas, err } else { - log.Infof("GetSchemas(cluster = %s) bypassing schema cache", c.ID) + log.Info(fmt.Sprintf("GetSchemas(cluster = %s) bypassing schema cache", c.ID)) } } @@ -1563,7 +1563,7 @@ func (c *Cluster) GetSchemas(ctx context.Context, opts GetSchemaOptions) ([]*vta if err != nil { // Ignore keyspaces without any serving tablets. if stderrors.Is(err, errors.ErrNoServingTablet) { - log.Infof(err.Error()) + log.Info(err.Error()) return } @@ -1579,12 +1579,12 @@ func (c *Cluster) GetSchemas(ctx context.Context, opts GetSchemaOptions) ([]*vta // Ignore keyspaces without schemas if schema == nil { - log.Infof("No schemas for %s", ks.Keyspace.Name) + log.Info("No schemas for " + ks.Keyspace.Name) return } if len(schema.TableDefinitions) == 0 { - log.Infof("No tables in schema for %s", ks.Keyspace.Name) + log.Info("No tables in schema for " + ks.Keyspace.Name) return } @@ -1732,7 +1732,7 @@ func (c *Cluster) getSchemaFromTablets(ctx context.Context, keyspace string, tab if _, ok = tableSize.ByShard[tablet.Tablet.Shard]; ok { err := fmt.Errorf("duplicate shard queries for table %s on shard %s/%s", td.Name, keyspace, tablet.Tablet.Shard) - log.Warningf("Impossible: %s", err) + log.Warn(fmt.Sprintf("Impossible: %s", err)) rec.RecordError(err) return @@ -1780,7 +1780,7 @@ func (c *Cluster) getTabletsToQueryForSchemas(ctx context.Context, keyspace stri // primary (via PrimaryAlias) in addition to the IsPrimaryServing bit. if !shard.Shard.IsPrimaryServing || shard.Shard.PrimaryAlias == nil { if !opts.TableSizeOptions.IncludeNonServingShards { - log.Infof("%s/%s is not serving; ignoring because IncludeNonServingShards = false", keyspace, shard.Name) + log.Info(fmt.Sprintf("%s/%s is not serving; ignoring because IncludeNonServingShards = false", keyspace, shard.Name)) continue } } @@ -1806,7 +1806,7 @@ func (c *Cluster) getTabletsToQueryForSchemas(ctx context.Context, keyspace stri if len(keyspaceTablets) == 0 { err := fmt.Errorf("%w for keyspace %s", errors.ErrNoServingTablet, keyspace) - log.Warningf("%s. Searched tablets: %v", err, vtadminproto.Tablets(tablets).AliasStringList()) + log.Warn(fmt.Sprintf("%s. Searched tablets: %v", err, vtadminproto.Tablets(tablets).AliasStringList())) return nil, err } diff --git a/go/vt/vtadmin/cluster/config.go b/go/vt/vtadmin/cluster/config.go index 3cab3fabb36..861982d23c8 100644 --- a/go/vt/vtadmin/cluster/config.go +++ b/go/vt/vtadmin/cluster/config.go @@ -135,7 +135,7 @@ var ErrNoConfigID = stderrors.New("loaded config has no id") func LoadConfig(r io.Reader, configType string) (cfg *Config, id string, err error) { v := viper.New() if configType == "" { - log.Warning("no configType specified, defaulting to 'json'") + log.Warn("no configType specified, defaulting to 'json'") configType = "json" } diff --git a/go/vt/vtadmin/cluster/dynamic/interceptors.go b/go/vt/vtadmin/cluster/dynamic/interceptors.go index 2ddaa38dd34..d36d89204dd 100644 --- a/go/vt/vtadmin/cluster/dynamic/interceptors.go +++ b/go/vt/vtadmin/cluster/dynamic/interceptors.go @@ -2,6 +2,7 @@ package dynamic import ( "context" + "fmt" "strings" "google.golang.org/grpc" @@ -28,12 +29,12 @@ func StreamServerInterceptor(api API) grpc.StreamServerInterceptor { case id == "": // There was a cluster spec in the metadata, but we couldn't even // get an id out of it. Warn and fallback to static API. - log.Warningf("failed to unmarshal dynamic cluster spec from incoming context metadata; falling back to static API; error: %v", err) + log.Warn(fmt.Sprintf("failed to unmarshal dynamic cluster spec from incoming context metadata; falling back to static API; error: %v", err)) return handler(srv, ss) } if err != nil { - log.Warningf("failed to extract valid cluster from incoming metadata; attempting to use existing cluster with id=%s; error: %v", id, err) + log.Warn(fmt.Sprintf("failed to extract valid cluster from incoming metadata; attempting to use existing cluster with id=%s; error: %v", id, err)) } dynamicAPI := api.WithCluster(c, id) @@ -56,12 +57,12 @@ func UnaryServerInterceptor(api API) grpc.UnaryServerInterceptor { case id == "": // There was a cluster spec in the metadata, but we couldn't even // get an id out of it. Warn and fallback to static API. - log.Warningf("failed to unmarshal dynamic cluster spec from incoming context metadata; falling back to static API; error: %v", err) + log.Warn(fmt.Sprintf("failed to unmarshal dynamic cluster spec from incoming context metadata; falling back to static API; error: %v", err)) return handler(ctx, req) } if err != nil { - log.Warningf("failed to extract valid cluster from incoming metadata; attempting to use existing cluster with id=%s; error: %v", id, err) + log.Warn(fmt.Sprintf("failed to extract valid cluster from incoming metadata; attempting to use existing cluster with id=%s; error: %v", id, err)) } dynamicAPI := api.WithCluster(c, id) diff --git a/go/vt/vtadmin/cluster/flags.go b/go/vt/vtadmin/cluster/flags.go index c6bea8ac108..394eb86ae45 100644 --- a/go/vt/vtadmin/cluster/flags.go +++ b/go/vt/vtadmin/cluster/flags.go @@ -255,7 +255,7 @@ func parseOne(cfg *Config, name string, val string) error { match := discoveryFlagRegexp.FindStringSubmatch(name) if match == nil { // not a discovery flag - log.Warningf("Attempted to parse %q as a discovery flag, ignoring ...", name) + log.Warn(fmt.Sprintf("Attempted to parse %q as a discovery flag, ignoring ...", name)) return nil } diff --git a/go/vt/vtadmin/cluster/internal/caches/schemacache/cache.go b/go/vt/vtadmin/cluster/internal/caches/schemacache/cache.go index ebc3899e82f..232718802c0 100644 --- a/go/vt/vtadmin/cluster/internal/caches/schemacache/cache.go +++ b/go/vt/vtadmin/cluster/internal/caches/schemacache/cache.go @@ -60,11 +60,11 @@ type schemaCache = cache.Cache[Key, []*vtadminpb.Schema] // define a type alias func AddOrBackfill(c *schemaCache, schemas []*vtadminpb.Schema, key Key, d time.Duration, opts LoadOptions) { if opts.isFullPayload() { if err := c.Add(key, schemas, d); err != nil { - log.Warningf("failed to add schema to cache for %+v: %s", key, err) + log.Warn(fmt.Sprintf("failed to add schema to cache for %+v: %s", key, err)) } } else { if !c.EnqueueBackfill(key) { - log.Warningf("failed to enqueue backfill for schema cache %+v", key) + log.Warn(fmt.Sprintf("failed to enqueue backfill for schema cache %+v", key)) } } } @@ -168,7 +168,7 @@ func LoadOne(c *schemaCache, key Key, opts LoadOptions) (schema *vtadminpb.Schem if len(schemas) != 1 { err = vterrors.Errorf(vtrpc.Code_INTERNAL, "impossible: cache should have exactly 1 schema for request %+v (have %d)", key, len(schemas)) - log.Errorf(err.Error()) + log.Error(err.Error()) return nil, found, err } diff --git a/go/vt/vtadmin/cluster/resolver/resolver.go b/go/vt/vtadmin/cluster/resolver/resolver.go index 00297ff06d4..f3ec5d2689f 100644 --- a/go/vt/vtadmin/cluster/resolver/resolver.go +++ b/go/vt/vtadmin/cluster/resolver/resolver.go @@ -370,17 +370,17 @@ func (r *resolver) watch() { case nil: switch len(state.Addresses) { case 0: - log.Warningf("%s: found no %ss (cluster %s); updating grpc clientconn state anyway", logPrefix, r.component, r.cluster) + log.Warn(fmt.Sprintf("%s: found no %ss (cluster %s); updating grpc clientconn state anyway", logPrefix, r.component, r.cluster)) default: - log.Infof("%s: found %d %ss (cluster %s)", logPrefix, len(state.Addresses), r.component, r.cluster) + log.Info(fmt.Sprintf("%s: found %d %ss (cluster %s)", logPrefix, len(state.Addresses), r.component, r.cluster)) } if updateErr := r.cc.UpdateState(*state); updateErr != nil { - log.Errorf("%s: failed to update %ss addresses for %s (cluster %s): %s", logPrefix, r.component, r.cluster, updateErr) + log.Error(fmt.Sprintf("%s: failed to update %ss addresses for %s (cluster %s): %s", logPrefix, r.component, r.component, r.cluster, updateErr)) err = updateErr } default: - log.Errorf("%s: failed to resolve new addresses for %s (cluster %s): %s", logPrefix, r.component, r.cluster, err) + log.Error(fmt.Sprintf("%s: failed to resolve new addresses for %s (cluster %s): %s", logPrefix, r.component, r.cluster, err)) r.cc.ReportError(err) } @@ -425,7 +425,7 @@ func (r *resolver) resolve() (*grpcresolver.State, error) { span.Annotate("cluster_id", r.cluster) span.Annotate("component", r.component) - log.Infof("%s: resolving %ss (cluster %s)", logPrefix, r.component, r.cluster) + log.Info(fmt.Sprintf("%s: resolving %ss (cluster %s)", logPrefix, r.component, r.cluster)) ctx, cancel := context.WithTimeout(ctx, r.opts.DiscoveryTimeout) defer cancel() diff --git a/go/vt/vtadmin/grpcserver/server.go b/go/vt/vtadmin/grpcserver/server.go index 84c464210a1..6eec5d31593 100644 --- a/go/vt/vtadmin/grpcserver/server.go +++ b/go/vt/vtadmin/grpcserver/server.go @@ -122,7 +122,7 @@ func New(name string, opts Options) *Server { streamInterceptors = append(streamInterceptors, func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { err := handler(srv, ss) if err != nil { - log.Errorf("%s error: %s", info.FullMethod, err) + log.Error(fmt.Sprintf("%s error: %s", info.FullMethod, err)) } return err @@ -130,7 +130,7 @@ func New(name string, opts Options) *Server { unaryInterceptors = append(unaryInterceptors, func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { resp, err = handler(ctx, req) if err != nil { - log.Errorf("%s error: %s", info.FullMethod, err) + log.Error(fmt.Sprintf("%s error: %s", info.FullMethod, err)) } return resp, err @@ -220,7 +220,7 @@ func (s *Server) ListenAndServe() error { go func() { sig := <-signals err := fmt.Errorf("received signal: %v", sig) - log.Warning(err) + log.Warn(fmt.Sprint(err)) shutdown <- err }() @@ -237,14 +237,14 @@ func (s *Server) ListenAndServe() error { go func() { err := s.gRPCServer.Serve(grpcLis) err = fmt.Errorf("grpc server stopped: %w", err) - log.Warning(err) + log.Warn(fmt.Sprint(err)) shutdown <- err }() go func() { err := http.Serve(anyLis, s.router) err = fmt.Errorf("http server stopped: %w", err) - log.Warning(err) + log.Warn(fmt.Sprint(err)) shutdown <- err }() @@ -252,7 +252,7 @@ func (s *Server) ListenAndServe() error { go func() { err := lmux.Serve() err = fmt.Errorf("listener closed: %w", err) - log.Warning(err) + log.Warn(fmt.Sprint(err)) shutdown <- err }() @@ -261,17 +261,17 @@ func (s *Server) ListenAndServe() error { } s.setServing(true) - log.Infof("server %s listening on %s", s.name, s.opts.Addr) + log.Info(fmt.Sprintf("server %s listening on %s", s.name, s.opts.Addr)) reason := <-shutdown - log.Warningf("graceful shutdown triggered by: %v", reason) + log.Warn(fmt.Sprintf("graceful shutdown triggered by: %v", reason)) if s.opts.LameDuckDuration > 0 { - log.Infof("entering lame duck period for %v", s.opts.LameDuckDuration) + log.Info(fmt.Sprintf("entering lame duck period for %v", s.opts.LameDuckDuration)) s.healthServer.Shutdown() time.Sleep(s.opts.LameDuckDuration) } else { - log.Infof("lame duck disabled") + log.Info("lame duck disabled") } log.Info("beginning graceful shutdown") diff --git a/go/vt/vtadmin/http/api.go b/go/vt/vtadmin/http/api.go index 3a74b1e7aaf..92cc112a3a4 100644 --- a/go/vt/vtadmin/http/api.go +++ b/go/vt/vtadmin/http/api.go @@ -18,6 +18,7 @@ package http import ( "context" + "fmt" "net/http" "vitess.io/vitess/go/sets" @@ -107,7 +108,7 @@ func deprecateQueryParam(r *http.Request, newName string, oldName string) { q := r.URL.Query() if q.Has(oldName) { - log.Warningf("query param %s is deprecated in favor of %s. support for %s will be dropped in the next version", oldName, newName, oldName) + log.Warn(fmt.Sprintf("query param %s is deprecated in favor of %s. support for %s will be dropped in the next version", oldName, newName, oldName)) newVals := sets.New(q[newName]...) for _, oldVal := range q[oldName] { diff --git a/go/vt/vtadmin/http/handlers/panic_recovery.go b/go/vt/vtadmin/http/handlers/panic_recovery.go index 2f9af188a86..000cca6a62b 100644 --- a/go/vt/vtadmin/http/handlers/panic_recovery.go +++ b/go/vt/vtadmin/http/handlers/panic_recovery.go @@ -37,7 +37,7 @@ func PanicRecoveryHandler(next http.Handler) http.Handler { name := mux.CurrentRoute(r).GetName() defer func() { if err := recover(); err != nil { - log.Errorf("uncaught panic in %s: %s", name, err) + log.Error(fmt.Sprintf("uncaught panic in %s: %s", name, err)) http.Error(w, fmt.Sprintf("%v", err), http.StatusInternalServerError) } }() diff --git a/go/vt/vtadmin/http/response.go b/go/vt/vtadmin/http/response.go index 899a610e57e..638ed1c42b5 100644 --- a/go/vt/vtadmin/http/response.go +++ b/go/vt/vtadmin/http/response.go @@ -45,7 +45,7 @@ type errorBody struct { // 500 unknown. func NewJSONResponse(value any, err error) *JSONResponse { if err != nil { - log.Errorf(err.Error()) + log.Error(err.Error()) switch e := err.(type) { case errors.TypedError: diff --git a/go/vt/vtadmin/internal/backoff/backoff.go b/go/vt/vtadmin/internal/backoff/backoff.go index 16f3ec0bfec..b7d250ffee6 100644 --- a/go/vt/vtadmin/internal/backoff/backoff.go +++ b/go/vt/vtadmin/internal/backoff/backoff.go @@ -112,7 +112,7 @@ func (None) Backoff(int) time.Duration { return 0 } func Get(strategy string, cfg grpcbackoff.Config) Strategy { switch strings.ToLower(strategy) { case "": - log.Warningf("no backoff strategy specified; defaulting to exponential") + log.Warn("no backoff strategy specified; defaulting to exponential") fallthrough case "exponential": return Exponential{cfg} diff --git a/go/vt/vtadmin/rbac/config.go b/go/vt/vtadmin/rbac/config.go index dd90257374e..9fa01c411b2 100644 --- a/go/vt/vtadmin/rbac/config.go +++ b/go/vt/vtadmin/rbac/config.go @@ -119,7 +119,7 @@ func (c *Config) Reify() error { return rec.Error() } - log.Infof("[rbac]: loaded authorizer with %d rules", len(c.Rules)) + log.Info(fmt.Sprintf("[rbac]: loaded authorizer with %d rules", len(c.Rules))) c.cfg = byResource c.authorizer = &Authorizer{ diff --git a/go/vt/vtadmin/vtctldclient/proxy.go b/go/vt/vtadmin/vtctldclient/proxy.go index 10900d0b507..645aeccc20e 100644 --- a/go/vt/vtadmin/vtctldclient/proxy.go +++ b/go/vt/vtadmin/vtctldclient/proxy.go @@ -129,7 +129,7 @@ func (vtctld *ClientProxy) dial(ctx context.Context) error { return err } - log.Infof("Established gRPC connection to vtctld\n") + log.Info("Established gRPC connection to vtctld\n") vtctld.m.Lock() defer vtctld.m.Unlock() diff --git a/go/vt/vtadmin/vtsql/vtsql.go b/go/vt/vtadmin/vtsql/vtsql.go index 9237a4501f7..b2550a8046f 100644 --- a/go/vt/vtadmin/vtsql/vtsql.go +++ b/go/vt/vtadmin/vtsql/vtsql.go @@ -160,7 +160,7 @@ func (vtgate *VTGateProxy) dial(ctx context.Context, target string, opts ...grpc return fmt.Errorf("error dialing vtgate: %w", err) } - log.Infof("Established gRPC connection to vtgate\n") + log.Info("Established gRPC connection to vtgate\n") vtgate.m.Lock() defer vtgate.m.Unlock() diff --git a/go/vt/vtcombo/tablet_map.go b/go/vt/vtcombo/tablet_map.go index 0185ab00d42..1f4b81f1081 100644 --- a/go/vt/vtcombo/tablet_map.go +++ b/go/vt/vtcombo/tablet_map.go @@ -92,7 +92,7 @@ func CreateTablet( Cell: cell, Uid: uid, } - log.Infof("Creating %v tablet %v for %v/%v", tabletType, topoproto.TabletAliasString(alias), keyspace, shard) + log.Info(fmt.Sprintf("Creating %v tablet %v for %v/%v", tabletType, topoproto.TabletAliasString(alias), keyspace, shard)) controller := tabletserver.NewServer(ctx, env, topoproto.TabletAliasString(alias), ts, alias, srvTopoCounts) initTabletType := tabletType @@ -418,7 +418,7 @@ func CreateKs( return 0, fmt.Errorf("SaveVSchema(%v) failed: %v", keyspace, err) } } else { - log.Infof("File %v doesn't exist, skipping vschema for keyspace %v", f, keyspace) + log.Info(fmt.Sprintf("File %v doesn't exist, skipping vschema for keyspace %v", f, keyspace)) } } diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go index 6265ec17033..62a4931c0ba 100644 --- a/go/vt/vtctl/grpcvtctldserver/server.go +++ b/go/vt/vtctl/grpcvtctldserver/server.go @@ -191,7 +191,7 @@ func (s *VtctldServer) ApplyRoutingRules(ctx context.Context, req *vtctldatapb.A resp = &vtctldatapb.ApplyRoutingRulesResponse{} if req.SkipRebuild { - log.Warningf("Skipping rebuild of SrvVSchema, will need to run RebuildVSchemaGraph for changes to take effect") + log.Warn("Skipping rebuild of SrvVSchema, will need to run RebuildVSchemaGraph for changes to take effect") return resp, nil } @@ -218,7 +218,7 @@ func (s *VtctldServer) ApplyShardRoutingRules(ctx context.Context, req *vtctldat resp := &vtctldatapb.ApplyShardRoutingRulesResponse{} if req.SkipRebuild { - log.Warningf("Skipping rebuild of SrvVSchema as requested, you will need to run RebuildVSchemaGraph for changes to take effect") + log.Warn("Skipping rebuild of SrvVSchema as requested, you will need to run RebuildVSchemaGraph for changes to take effect") return resp, nil } @@ -231,7 +231,7 @@ func (s *VtctldServer) ApplyShardRoutingRules(ctx context.Context, req *vtctldat // ApplySchema is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) ApplySchema(ctx context.Context, req *vtctldatapb.ApplySchemaRequest) (resp *vtctldatapb.ApplySchemaResponse, err error) { - log.Infof("VtctldServer.ApplySchema: keyspace=%s, migrationContext=%v, ddlStrategy=%v, batchSize=%v", req.Keyspace, req.MigrationContext, req.DdlStrategy, req.BatchSize) + log.Info(fmt.Sprintf("VtctldServer.ApplySchema: keyspace=%s, migrationContext=%v, ddlStrategy=%v, batchSize=%v", req.Keyspace, req.MigrationContext, req.DdlStrategy, req.BatchSize)) span, ctx := trace.NewSpan(ctx, "VtctldServer.ApplySchema") defer span.Finish() @@ -686,7 +686,7 @@ func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.Ch if err != nil { return nil, err } - log.Infof("Getting a new durability policy for %v", durabilityName) + log.Info(fmt.Sprintf("Getting a new durability policy for %v", durabilityName)) durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return nil, err @@ -726,7 +726,7 @@ func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.Ch changedTabletInfo, err := s.ts.GetTablet(ctx, req.TabletAlias) if err != nil { - log.Warningf("error while reading the tablet we just changed back out of the topo: %v", err) + log.Warn(fmt.Sprintf("error while reading the tablet we just changed back out of the topo: %v", err)) } else { changedTablet = changedTabletInfo.Tablet } @@ -842,7 +842,7 @@ func (s *VtctldServer) ForceCutOverSchemaMigration(ctx context.Context, req *vtc return nil, err } - log.Infof("Calling ApplySchema to force cut-over migration %s", req.Uuid) + log.Info("Calling ApplySchema to force cut-over migration " + req.Uuid) qr, err := s.ApplySchema(ctx, &vtctldatapb.ApplySchemaRequest{ Keyspace: req.Keyspace, Sql: []string{query}, @@ -955,7 +955,7 @@ func (s *VtctldServer) CreateKeyspace(ctx context.Context, req *vtctldatapb.Crea err = s.ts.CreateKeyspace(ctx, req.Name, ki) if req.Force && topo.IsErrType(err, topo.NodeExists) { - log.Infof("keyspace %v already exists (ignoring error with Force=true)", req.Name) + log.Info(fmt.Sprintf("keyspace %v already exists (ignoring error with Force=true)", req.Name)) err = nil // Get the actual keyspace out of the topo; it may differ in structure, @@ -979,9 +979,9 @@ func (s *VtctldServer) CreateKeyspace(ctx context.Context, req *vtctldatapb.Crea if req.Type == topodatapb.KeyspaceType_SNAPSHOT { bksvs, err := s.ts.GetVSchema(ctx, req.BaseKeyspace) if err != nil { - log.Infof("error from GetVSchema(%v) = %v", req.BaseKeyspace, err) + log.Info(fmt.Sprintf("error from GetVSchema(%v) = %v", req.BaseKeyspace, err)) if topo.IsErrType(err, topo.NoNode) { - log.Infof("base keyspace %v does not exist; continuing with bare, unsharded vschema", req.BaseKeyspace) + log.Info(fmt.Sprintf("base keyspace %v does not exist; continuing with bare, unsharded vschema", req.BaseKeyspace)) bksvs = &topo.KeyspaceVSchemaInfo{ Name: req.Name, Keyspace: &vschemapb.Keyspace{ @@ -1038,10 +1038,10 @@ func (s *VtctldServer) CreateShard(ctx context.Context, req *vtctldatapb.CreateS span.Annotate("include_parent", req.IncludeParent) if req.IncludeParent { - log.Infof("Creating empty keyspace for %s", req.Keyspace) + log.Info("Creating empty keyspace for " + req.Keyspace) if err2 := s.ts.CreateKeyspace(ctx, req.Keyspace, &topodatapb.Keyspace{}); err2 != nil { if req.Force && topo.IsErrType(err2, topo.NodeExists) { - log.Infof("keyspace %v already exists; ignoring error because Force = true", req.Keyspace) + log.Info(fmt.Sprintf("keyspace %v already exists; ignoring error because Force = true", req.Keyspace)) } else { err = err2 return nil, err @@ -1053,7 +1053,7 @@ func (s *VtctldServer) CreateShard(ctx context.Context, req *vtctldatapb.CreateS if err = s.ts.CreateShard(ctx, req.Keyspace, req.ShardName); err != nil { if req.Force && topo.IsErrType(err, topo.NodeExists) { - log.Infof("shard %v/%v already exists; ignoring error because Force = true", req.Keyspace, req.ShardName) + log.Info(fmt.Sprintf("shard %v/%v already exists; ignoring error because Force = true", req.Keyspace, req.ShardName)) shardExists = true err = nil } else { @@ -1146,7 +1146,7 @@ func (s *VtctldServer) DeleteKeyspace(ctx context.Context, req *vtctldatapb.Dele err = fmt.Errorf("failed to lock %s; if you really want to delete this keyspace, re-run with Force=true: %w", req.Keyspace, lerr) return nil, err default: - log.Warningf("%s: failed to lock keyspace %s for deletion, but force=true, proceeding anyway ...", lerr, req.Keyspace) + log.Warn(fmt.Sprintf("%s: failed to lock keyspace %s for deletion, but force=true, proceeding anyway ...", lerr, req.Keyspace)) } if unlock != nil { @@ -1176,13 +1176,13 @@ func (s *VtctldServer) DeleteKeyspace(ctx context.Context, req *vtctldatapb.Dele return nil, err } - log.Infof("Deleting all %d shards (and their tablets) in keyspace %v", len(shards), req.Keyspace) + log.Info(fmt.Sprintf("Deleting all %d shards (and their tablets) in keyspace %v", len(shards), req.Keyspace)) recursive := true evenIfServing := true force := req.Force for _, shard := range shards { - log.Infof("Recursively deleting shard %v/%v", req.Keyspace, shard) + log.Info(fmt.Sprintf("Recursively deleting shard %v/%v", req.Keyspace, shard)) err = deleteShard(ctx, s.ts, req.Keyspace, shard, recursive, evenIfServing, force) if err != nil { err = fmt.Errorf("cannot delete shard %v/%v: %w", req.Keyspace, shard, err) @@ -1198,11 +1198,11 @@ func (s *VtctldServer) DeleteKeyspace(ctx context.Context, req *vtctldatapb.Dele for _, cell := range cells { if err := s.ts.DeleteKeyspaceReplication(ctx, cell, req.Keyspace); err != nil && !topo.IsErrType(err, topo.NoNode) { - log.Warningf("Cannot delete KeyspaceReplication in cell %v for %v: %v", cell, req.Keyspace, err) + log.Warn(fmt.Sprintf("Cannot delete KeyspaceReplication in cell %v for %v: %v", cell, req.Keyspace, err)) } if err := s.ts.DeleteSrvKeyspace(ctx, cell, req.Keyspace); err != nil && !topo.IsErrType(err, topo.NoNode) { - log.Warningf("Cannot delete SrvKeyspace in cell %v for %v: %v", cell, req.Keyspace, err) + log.Warn(fmt.Sprintf("Cannot delete SrvKeyspace in cell %v for %v: %v", cell, req.Keyspace, err)) } } @@ -2061,7 +2061,7 @@ func (s *VtctldServer) GetSrvKeyspaces(ctx context.Context, req *vtctldatapb.Get return nil, err } - log.Warningf("no srvkeyspace for keyspace %s in cell %s", req.Keyspace, cell) + log.Warn(fmt.Sprintf("no srvkeyspace for keyspace %s in cell %s", req.Keyspace, cell)) srvKeyspace = nil } @@ -2235,7 +2235,7 @@ func (s *VtctldServer) GetSrvVSchemas(ctx context.Context, req *vtctldatapb.GetS return nil, err } - log.Warningf("no SrvVSchema for cell %s", cell) + log.Warn("no SrvVSchema for cell " + cell) sv = nil } @@ -2327,7 +2327,7 @@ func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTable if req.Strict { return nil, err } - log.Warningf("GetTablets encountered non-fatal error %s; continuing because Strict=false", err) + log.Warn(fmt.Sprintf("GetTablets encountered non-fatal error %s; continuing because Strict=false", err)) default: return nil, err } @@ -2387,7 +2387,7 @@ func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTable tablets, err := s.ts.GetTabletsByCell(ctx, cell, nil) if err != nil { if req.Strict { - log.Infof("GetTablets got an error from cell %s: %s. Running in strict mode, so canceling other cell RPCs", cell, err) + log.Info(fmt.Sprintf("GetTablets got an error from cell %s: %s. Running in strict mode, so canceling other cell RPCs", cell, err)) cancel() } rec.RecordError(fmt.Errorf("GetTabletsByCell(%s) failed: %w", cell, err)) @@ -2407,6 +2407,8 @@ func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTable err = rec.Error() return nil, err } + + log.Warn(fmt.Sprintf("GetTablets encountered non-fatal error %s; continuing because Strict=false", rec.Error())) } // Collect true primary term start times, and optionally filter out any @@ -2703,7 +2705,7 @@ func (s *VtctldServer) GetVersion(ctx context.Context, req *vtctldatapb.GetVersi if err != nil { return nil, err } - log.Infof("Tablet %v is running version '%v'", topoproto.TabletAliasString(tabletAlias), version) + log.Info(fmt.Sprintf("Tablet %v is running version '%v'", topoproto.TabletAliasString(tabletAlias), version)) return &vtctldatapb.GetVersionResponse{Version: version}, err } @@ -2835,7 +2837,7 @@ func (s *VtctldServer) InitShardPrimaryLocked( if err != nil { return err } - log.Infof("Getting a new durability policy for %v", durabilityName) + log.Info(fmt.Sprintf("Getting a new durability policy for %v", durabilityName)) durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return err @@ -3013,7 +3015,7 @@ func (s *VtctldServer) InitShardPrimaryLocked( } // Refresh the state to force the tabletserver to reconnect after db has been created. if err := tmc.RefreshState(ctx, primaryElectTabletInfo.Tablet); err != nil { - log.Warningf("RefreshState failed: %v", err) + log.Warn(fmt.Sprintf("RefreshState failed: %v", err)) } return nil @@ -3442,11 +3444,11 @@ func (s *VtctldServer) RefreshStateByShard(ctx context.Context, req *vtctldatapb isPartial, partialDetails, err := topotools.RefreshTabletsByShard(ctx, s.ts, s.tmc, si, req.Cells, logutil.NewCallbackLogger(func(e *logutilpb.Event) { switch e.Level { case logutilpb.Level_WARNING: - log.Warningf(e.Value) + log.Warn(e.Value) case logutilpb.Level_ERROR: - log.Errorf(e.Value) + log.Error(e.Value) default: - log.Infof(e.Value) + log.Info(e.Value) } })) if err != nil { @@ -3612,7 +3614,7 @@ func (s *VtctldServer) RemoveKeyspaceCell(ctx context.Context, req *vtctldatapb. // Remove all the shards, serially. Stop immediately if any fail. for _, shard := range shards { - log.Infof("Removing cell %v from shard %v/%v", req.Cell, req.Keyspace, shard) + log.Info(fmt.Sprintf("Removing cell %v from shard %v/%v", req.Cell, req.Keyspace, shard)) if err2 := removeShardCell(ctx, s.ts, req.Cell, req.Keyspace, shard, req.Recursive, req.Force); err2 != nil { err = fmt.Errorf("cannot remove cell %v from shard %v/%v: %w", req.Cell, req.Keyspace, shard, err2) return nil, err @@ -3620,7 +3622,7 @@ func (s *VtctldServer) RemoveKeyspaceCell(ctx context.Context, req *vtctldatapb. } // Last, remove the SrvKeyspace object. - log.Infof("Removing cell %v keyspace %v SrvKeyspace object", req.Cell, req.Keyspace) + log.Info(fmt.Sprintf("Removing cell %v keyspace %v SrvKeyspace object", req.Cell, req.Keyspace)) if err = s.ts.DeleteSrvKeyspace(ctx, req.Cell, req.Keyspace); err != nil { err = fmt.Errorf("cannot delete SrvKeyspace from cell %v for keyspace %v: %w", req.Cell, req.Keyspace, err) return nil, err @@ -3703,7 +3705,7 @@ func (s *VtctldServer) ReparentTablet(ctx context.Context, req *vtctldatapb.Repa if err != nil { return nil, err } - log.Infof("Getting a new durability policy for %v", durabilityName) + log.Info(fmt.Sprintf("Getting a new durability policy for %v", durabilityName)) durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return nil, err @@ -4070,7 +4072,7 @@ func (s *VtctldServer) SetWritable(ctx context.Context, req *vtctldatapb.SetWrit tablet, err := s.ts.GetTablet(ctx, req.TabletAlias) if err != nil { - log.Errorf("SetWritable: failed to read tablet record for %v: %v", alias, err) + log.Error(fmt.Sprintf("SetWritable: failed to read tablet record for %v: %v", alias, err)) return nil, err } @@ -4083,7 +4085,7 @@ func (s *VtctldServer) SetWritable(ctx context.Context, req *vtctldatapb.SetWrit } if err = f(ctx, tablet.Tablet); err != nil { - log.Errorf("SetWritable: failed to set writable=%v on %v: %v", req.Writable, alias, err) + log.Error(fmt.Sprintf("SetWritable: failed to set writable=%v on %v: %v", req.Writable, alias, err)) return nil, err } @@ -4150,7 +4152,7 @@ func (s *VtctldServer) ShardReplicationPositions(ctx context.Context, req *vtctl return nil, err } - log.Infof("Gathering tablet replication status for: %v", tabletInfoMap) + log.Info(fmt.Sprintf("Gathering tablet replication status for: %v", tabletInfoMap)) var ( m sync.Mutex @@ -4190,9 +4192,9 @@ func (s *VtctldServer) ShardReplicationPositions(ctx context.Context, req *vtctl if err != nil { switch ctx.Err() { case context.Canceled: - log.Warningf("context canceled before obtaining primary position from %s: %s", alias, err) + log.Warn(fmt.Sprintf("context canceled before obtaining primary position from %s: %s", alias, err)) case context.DeadlineExceeded: - log.Warningf("context deadline exceeded before obtaining primary position from %s: %s", alias, err) + log.Warn(fmt.Sprintf("context deadline exceeded before obtaining primary position from %s: %s", alias, err)) default: // The RPC was not timed out or canceled. We treat this // as a fatal error for the overall request. @@ -4230,9 +4232,9 @@ func (s *VtctldServer) ShardReplicationPositions(ctx context.Context, req *vtctl if err != nil { switch ctx.Err() { case context.Canceled: - log.Warningf("context canceled before obtaining replication position from %s: %s", alias, err) + log.Warn(fmt.Sprintf("context canceled before obtaining replication position from %s: %s", alias, err)) case context.DeadlineExceeded: - log.Warningf("context deadline exceeded before obtaining replication position from %s: %s", alias, err) + log.Warn(fmt.Sprintf("context deadline exceeded before obtaining replication position from %s: %s", alias, err)) default: // The RPC was not timed out or canceled. We treat this // as a fatal error for the overall request. @@ -4438,7 +4440,7 @@ func (s *VtctldServer) StartReplication(ctx context.Context, req *vtctldatapb.St tablet, err := s.ts.GetTablet(ctx, req.TabletAlias) if err != nil { - log.Errorf("StartReplication: failed to read tablet record for %v: %v", alias, err) + log.Error(fmt.Sprintf("StartReplication: failed to read tablet record for %v: %v", alias, err)) return nil, err } @@ -4472,14 +4474,14 @@ func (s *VtctldServer) StartReplication(ctx context.Context, req *vtctldatapb.St if err != nil { return nil, err } - log.Infof("Getting a new durability policy for %v", durabilityName) + log.Info(fmt.Sprintf("Getting a new durability policy for %v", durabilityName)) durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return nil, err } if err = s.tmc.StartReplication(ctx, tablet.Tablet, policy.IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet.Tablet)); err != nil { - log.Errorf("StartReplication: failed to start replication on %v: %v", alias, err) + log.Error(fmt.Sprintf("StartReplication: failed to start replication on %v: %v", alias, err)) return nil, err } @@ -4503,12 +4505,12 @@ func (s *VtctldServer) StopReplication(ctx context.Context, req *vtctldatapb.Sto tablet, err := s.ts.GetTablet(ctx, req.TabletAlias) if err != nil { - log.Errorf("StopReplication: failed to read tablet record for %v: %v", alias, err) + log.Error(fmt.Sprintf("StopReplication: failed to read tablet record for %v: %v", alias, err)) return nil, err } if err := s.tmc.StopReplication(ctx, tablet.Tablet); err != nil { - log.Errorf("StopReplication: failed to stop replication on %v: %v", alias, err) + log.Error(fmt.Sprintf("StopReplication: failed to stop replication on %v: %v", alias, err)) return nil, err } @@ -4531,13 +4533,13 @@ func (s *VtctldServer) TabletExternallyReparented(ctx context.Context, req *vtct tablet, err := s.ts.GetTablet(ctx, req.Tablet) if err != nil { - log.Warningf("TabletExternallyReparented: failed to read tablet record for %v: %v", topoproto.TabletAliasString(req.Tablet), err) + log.Warn(fmt.Sprintf("TabletExternallyReparented: failed to read tablet record for %v: %v", topoproto.TabletAliasString(req.Tablet), err)) return nil, err } shard, err := s.ts.GetShard(ctx, tablet.Keyspace, tablet.Shard) if err != nil { - log.Warningf("TabletExternallyReparented: failed to read global shard record for %v/%v: %v", tablet.Keyspace, tablet.Shard, err) + log.Warn(fmt.Sprintf("TabletExternallyReparented: failed to read global shard record for %v/%v: %v", tablet.Keyspace, tablet.Shard, err)) return nil, err } @@ -4554,7 +4556,7 @@ func (s *VtctldServer) TabletExternallyReparented(ctx context.Context, req *vtct return resp, nil } - log.Infof("TabletExternallyReparented: executing tablet type change %v -> PRIMARY on %v", tablet.Type, topoproto.TabletAliasString(req.Tablet)) + log.Info(fmt.Sprintf("TabletExternallyReparented: executing tablet type change %v -> PRIMARY on %v", tablet.Type, topoproto.TabletAliasString(req.Tablet))) ev := &events.Reparent{ ShardInfo: *shard, NewPrimary: tablet.CloneVT(), @@ -4577,14 +4579,14 @@ func (s *VtctldServer) TabletExternallyReparented(ctx context.Context, req *vtct if err != nil { return nil, err } - log.Infof("Getting a new durability policy for %v", durabilityName) + log.Info(fmt.Sprintf("Getting a new durability policy for %v", durabilityName)) durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return nil, err } if err = s.tmc.ChangeType(ctx, tablet.Tablet, topodatapb.TabletType_PRIMARY, policy.SemiSyncAckers(durability, tablet.Tablet) > 0); err != nil { - log.Warningf("ChangeType(%v, PRIMARY): %v", topoproto.TabletAliasString(req.Tablet), err) + log.Warn(fmt.Sprintf("ChangeType(%v, PRIMARY): %v", topoproto.TabletAliasString(req.Tablet), err)) return nil, err } @@ -4763,7 +4765,7 @@ func (s *VtctldServer) Validate(ctx context.Context, req *vtctldatapb.ValidateRe return } - log.Infof("tablet %v is valid", key) + log.Info(fmt.Sprintf("tablet %v is valid", key)) }(alias) } } @@ -4898,7 +4900,7 @@ func (s *VtctldServer) ValidatePermissionsKeyspace(ctx context.Context, req *vtc return nil, fmt.Errorf("no primary tablet in shard %s/%s", req.Keyspace, shards[0]) } referenceAlias := si.PrimaryAlias - log.Infof("Gathering permissions for reference primary %s", topoproto.TabletAliasString(referenceAlias)) + log.Info("Gathering permissions for reference primary " + topoproto.TabletAliasString(referenceAlias)) pres, err := s.GetPermissions(ctx, &vtctldatapb.GetPermissionsRequest{ TabletAlias: si.PrimaryAlias, }) @@ -4919,7 +4921,7 @@ func (s *VtctldServer) ValidatePermissionsKeyspace(ctx context.Context, req *vtc if topoproto.TabletAliasEqual(alias, si.PrimaryAlias) { continue } - log.Infof("Gathering permissions for %s", topoproto.TabletAliasString(alias)) + log.Info("Gathering permissions for " + topoproto.TabletAliasString(alias)) presp, err := s.GetPermissions(ctx, &vtctldatapb.GetPermissionsRequest{ TabletAlias: alias, }) @@ -4927,8 +4929,8 @@ func (s *VtctldServer) ValidatePermissionsKeyspace(ctx context.Context, req *vtc return err } - log.Infof("Diffing permissions between %s and %s", topoproto.TabletAliasString(referenceAlias), - topoproto.TabletAliasString(alias)) + log.Info(fmt.Sprintf("Diffing permissions between %s and %s", topoproto.TabletAliasString(referenceAlias), + topoproto.TabletAliasString(alias))) er := &concurrency.AllErrorRecorder{} tmutils.DiffPermissions(topoproto.TabletAliasString(referenceAlias), referencePermissions, topoproto.TabletAliasString(alias), presp.Permissions, er) @@ -5183,7 +5185,7 @@ func (s *VtctldServer) ValidateShard(ctx context.Context, req *vtctldatapb.Valid return } - log.Infof("tablet %v is valid", topoproto.TabletAliasString(alias)) + log.Info(fmt.Sprintf("tablet %v is valid", topoproto.TabletAliasString(alias))) }(alias) } @@ -5401,7 +5403,7 @@ func (s *VtctldServer) ValidateVersionShard(ctx context.Context, req *vtctldatap return nil, err } - log.Infof("Gathering version for primary %v", topoproto.TabletAliasString(shard.PrimaryAlias)) + log.Info(fmt.Sprintf("Gathering version for primary %v", topoproto.TabletAliasString(shard.PrimaryAlias))) primaryVersion, err := s.GetVersion(ctx, &vtctldatapb.GetVersionRequest{ TabletAlias: shard.PrimaryAlias, }) @@ -5817,7 +5819,7 @@ func GetVersionFunc() func(string) (string, error) { // helper method to asynchronously get and diff a version func (s *VtctldServer) diffVersion(ctx context.Context, primaryVersion string, primaryAlias *topodatapb.TabletAlias, alias *topodatapb.TabletAlias, wg *sync.WaitGroup, er concurrency.ErrorRecorder) { defer wg.Done() - log.Infof("Gathering version for %v", topoproto.TabletAliasString(alias)) + log.Info(fmt.Sprintf("Gathering version for %v", topoproto.TabletAliasString(alias))) replicaVersion, err := s.GetVersion(ctx, &vtctldatapb.GetVersionRequest{ TabletAlias: alias, }) diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go index 055cc466382..5da2255a8aa 100644 --- a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go +++ b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go @@ -141,13 +141,13 @@ func setTMClientProtocol(protocol string) (reset func()) { case nil: reset = func() { setTMClientProtocol(oldVal) } default: - log.Errorf("failed to get string value for flag %q: %v", tmclientProtocolFlagName, err) + log.Error(fmt.Sprintf("failed to get string value for flag %q: %v", tmclientProtocolFlagName, err)) reset = func() {} } if err := fs.Set(tmclientProtocolFlagName, protocol); err != nil { msg := "failed to set flag %q to %q: %v" - log.Errorf(msg, tmclientProtocolFlagName, protocol, err) + log.Error(fmt.Sprintf(msg, tmclientProtocolFlagName, protocol, err)) reset = func() {} } @@ -443,12 +443,12 @@ func (fake *TabletManagerClient) Backup(ctx context.Context, tablet *topodatapb. go func() { if testdata.EventInterval == 0 { testdata.EventInterval = 10 * time.Millisecond - log.Warningf("testutil.TabletManagerClient.Backup faked with no event interval for %s, defaulting to %s", key, testdata.EventInterval) + log.Warn(fmt.Sprintf("testutil.TabletManagerClient.Backup faked with no event interval for %s, defaulting to %s", key, testdata.EventInterval)) } if testdata.EventJitter == 0 { testdata.EventJitter = time.Millisecond - log.Warningf("testutil.TabletManagerClient.Backup faked with no event jitter for %s, defaulting to %s", key, testdata.EventJitter) + log.Warn(fmt.Sprintf("testutil.TabletManagerClient.Backup faked with no event jitter for %s, defaulting to %s", key, testdata.EventJitter)) } errCtx, errCancel := context.WithCancel(context.Background()) @@ -1166,12 +1166,12 @@ func (fake *TabletManagerClient) RestoreFromBackup(ctx context.Context, tablet * go func() { if testdata.EventInterval == 0 { testdata.EventInterval = 10 * time.Millisecond - log.Warningf("testutil.TabletManagerClient.RestoreFromBackup faked with no event interval for %s, defaulting to %s", key, testdata.EventInterval) + log.Warn(fmt.Sprintf("testutil.TabletManagerClient.RestoreFromBackup faked with no event interval for %s, defaulting to %s", key, testdata.EventInterval)) } if testdata.EventJitter == 0 { testdata.EventJitter = time.Millisecond - log.Warningf("testutil.TabletManagerClient.RestoreFromBackup faked with no event jitter for %s, defaulting to %s", key, testdata.EventJitter) + log.Warn(fmt.Sprintf("testutil.TabletManagerClient.RestoreFromBackup faked with no event jitter for %s, defaulting to %s", key, testdata.EventJitter)) } errCtx, errCancel := context.WithCancel(context.Background()) diff --git a/go/vt/vtctl/grpcvtctldserver/topo.go b/go/vt/vtctl/grpcvtctldserver/topo.go index a5010c66f0f..20efe48bf79 100644 --- a/go/vt/vtctl/grpcvtctldserver/topo.go +++ b/go/vt/vtctl/grpcvtctldserver/topo.go @@ -51,7 +51,7 @@ func deleteShard(ctx context.Context, ts *topo.Server, keyspace string, shard st return fmt.Errorf("failed to lock %s/%s; if you really want to delete this shard, re-run with Force=true: %w", keyspace, shard, lerr) default: // Failed to lock, but force=true. Warn and continue - log.Warningf("%s: failed to lock shard %s/%s for deletion, but force=true, proceeding anyway ...", lerr, keyspace, shard) + log.Warn(fmt.Sprintf("%s: failed to lock shard %s/%s for deletion, but force=true, proceeding anyway ...", lerr, keyspace, shard)) } if unlock != nil { @@ -75,7 +75,7 @@ func deleteShard(ctx context.Context, ts *topo.Server, keyspace string, shard st shardInfo, err := ts.GetShard(ctx, keyspace, shard) if err != nil { if topo.IsErrType(err, topo.NoNode) { - log.Infof("Shard %v/%v doesn't seem to exist; cleaning up any potential leftover topo data", keyspace, shard) + log.Info(fmt.Sprintf("Shard %v/%v doesn't seem to exist; cleaning up any potential leftover topo data", keyspace, shard)) _ = ts.DeleteShard(ctx, keyspace, shard) return nil @@ -111,7 +111,7 @@ func deleteShard(ctx context.Context, ts *topo.Server, keyspace string, shard st // regardless of whether they exist. for _, cell := range cells { if err := ts.DeleteShardReplication(ctx, cell, keyspace, shard); err != nil && !topo.IsErrType(err, topo.NoNode) { - log.Warningf("Cannot delete ShardReplication in cell %v for %v/%v: %w", cell, keyspace, shard, err) + log.Warn(fmt.Sprintf("Cannot delete ShardReplication in cell %v for %v/%v: %v", cell, keyspace, shard, err)) } } @@ -181,11 +181,11 @@ func deleteShardCell(ctx context.Context, ts *topo.Server, keyspace string, shar return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "Shard %v/%v still hase %v tablets in cell %v; use Recursive = true or remove them manually", keyspace, shard, len(tabletMap), cell) } - log.Infof("Deleting all %d tablets in shard %v/%v cell %v", len(tabletMap), keyspace, shard, cell) + log.Info(fmt.Sprintf("Deleting all %d tablets in shard %v/%v cell %v", len(tabletMap), keyspace, shard, cell)) for alias, tablet := range tabletMap { // We don't care about updating the ShardReplication object, because // later we're going to delete the entire object. - log.Infof("Deleting tablet %v", alias) + log.Info(fmt.Sprintf("Deleting tablet %v", alias)) if err := ts.DeleteTablet(ctx, tablet.Alias); err != nil && !topo.IsErrType(err, topo.NoNode) { // We don't want to continue if a DeleteTablet fails for any // reason other than a missing tablet (in which case it's just @@ -241,10 +241,7 @@ func deleteTablet(ctx context.Context, ts *topo.Server, alias *topodatapb.Tablet if _, err := ts.UpdateShardFields(lockCtx, tablet.Keyspace, tablet.Shard, func(si *topo.ShardInfo) error { if !topoproto.TabletAliasEqual(si.PrimaryAlias, alias) { - log.Warningf( - "Deleting primary %v from shard %v/%v but primary in Shard object was %v", - topoproto.TabletAliasString(alias), tablet.Keyspace, tablet.Shard, topoproto.TabletAliasString(si.PrimaryAlias), - ) + log.Warn(fmt.Sprintf("Deleting primary %v from shard %v/%v but primary in Shard object was %v", topoproto.TabletAliasString(alias), tablet.Keyspace, tablet.Shard, topoproto.TabletAliasString(si.PrimaryAlias))) return topo.NewError(topo.NoUpdateNeeded, si.Keyspace()+"/"+si.ShardName()) } @@ -298,12 +295,12 @@ func removeShardCell(ctx context.Context, ts *topo.Server, cell string, keyspace case err == nil: // We have tablets in the shard in this cell. if recursive { - log.Infof("Deleting all tablets in cell %v in shard %v/%v", cell, keyspace, shardName) + log.Info(fmt.Sprintf("Deleting all tablets in cell %v in shard %v/%v", cell, keyspace, shardName)) for _, node := range replication.Nodes { // We don't care about scrapping or updating the replication // graph, because we're about to delete the entire replication // graph. - log.Infof("Deleting tablet %v", topoproto.TabletAliasString(node.TabletAlias)) + log.Info(fmt.Sprintf("Deleting tablet %v", topoproto.TabletAliasString(node.TabletAlias))) if err := ts.DeleteTablet(ctx, node.TabletAlias); err != nil && !topo.IsErrType(err, topo.NoNode) { return fmt.Errorf("cannot delete tablet %v: %w", topoproto.TabletAliasString(node.TabletAlias), err) } @@ -328,12 +325,12 @@ func removeShardCell(ctx context.Context, ts *topo.Server, cell string, keyspace return err } - log.Warningf("Cannot get ShardReplication from cell %v; assuming cell topo server is down and forcing removal", cell) + log.Warn(fmt.Sprintf("Cannot get ShardReplication from cell %v; assuming cell topo server is down and forcing removal", cell)) } // Finally, update the shard. - log.Infof("Removing cell %v from SrvKeyspace %v/%v", cell, keyspace, shardName) + log.Info(fmt.Sprintf("Removing cell %v from SrvKeyspace %v/%v", cell, keyspace, shardName)) ctx, unlock, lockErr := ts.LockKeyspace(ctx, keyspace, "Locking keyspace to remove shard from SrvKeyspace") if lockErr != nil { diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter.go b/go/vt/vtctl/reparentutil/emergency_reparenter.go index 12bb8bd7e2b..3c5b0658af3 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter.go @@ -858,7 +858,7 @@ func (erp *EmergencyReparenter) findErrantGTIDs( return nil, err } if errantGTIDs != nil { - log.Errorf("skipping %v with GTIDSet:%v because we detected errant GTIDs - %v", candidate, afterStatus.RelayLogPosition.GTIDSet, errantGTIDs) + log.Error(fmt.Sprintf("skipping %v with GTIDSet:%v because we detected errant GTIDs - %v", candidate, afterStatus.RelayLogPosition.GTIDSet, errantGTIDs)) continue } maxLenPositions = append(maxLenPositions, candidatePositions.Combined) @@ -892,7 +892,7 @@ func (erp *EmergencyReparenter) findErrantGTIDs( return nil, err } if errantGTIDs != nil { - log.Errorf("skipping %v with GTIDSet:%v because we detected errant GTIDs - %v", alias, validCandidates[alias], errantGTIDs) + log.Error(fmt.Sprintf("skipping %v with GTIDSet:%v because we detected errant GTIDs - %v", alias, validCandidates[alias], errantGTIDs)) continue } updatedValidCandidates[alias] = validCandidates[alias] diff --git a/go/vt/vtctl/reparentutil/policy/durability.go b/go/vt/vtctl/reparentutil/policy/durability.go index 8980036e3f5..06acbd677ef 100644 --- a/go/vt/vtctl/reparentutil/policy/durability.go +++ b/go/vt/vtctl/reparentutil/policy/durability.go @@ -18,6 +18,7 @@ package policy import ( "fmt" + "os" "vitess.io/vitess/go/vt/log" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -95,7 +96,8 @@ type Durabler interface { func RegisterDurability(name string, newDurablerFunc NewDurabler) { if durabilityPolicies[name] != nil { - log.Fatalf("durability policy %v already registered", name) + log.Error(fmt.Sprintf("durability policy %v already registered", name)) + os.Exit(1) } durabilityPolicies[name] = newDurablerFunc } diff --git a/go/vt/vtctl/reparentutil/replication.go b/go/vt/vtctl/reparentutil/replication.go index e13a3e0dd44..4777499589d 100644 --- a/go/vt/vtctl/reparentutil/replication.go +++ b/go/vt/vtctl/reparentutil/replication.go @@ -18,6 +18,7 @@ package reparentutil import ( "context" + "fmt" "sync" "time" @@ -186,7 +187,7 @@ func SetReplicationSource(ctx context.Context, ts *topo.Server, tmc tmclient.Tab if err != nil { return err } - log.Infof("Getting a new durability policy for %v", durabilityName) + log.Info(fmt.Sprintf("Getting a new durability policy for %v", durabilityName)) durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return err diff --git a/go/vt/vtctl/reparentutil/util.go b/go/vt/vtctl/reparentutil/util.go index dde181c7112..c323ff86866 100644 --- a/go/vt/vtctl/reparentutil/util.go +++ b/go/vt/vtctl/reparentutil/util.go @@ -262,7 +262,7 @@ func ShardReplicationStatuses(ctx context.Context, ts *topo.Server, tmc tmclient } tablets := maps.Values(tabletMap) - log.Infof("Gathering tablet replication status for: %v", tablets) + log.Info(fmt.Sprintf("Gathering tablet replication status for: %v", tablets)) wg := sync.WaitGroup{} rec := concurrency.AllErrorRecorder{} result := make([]*replicationdatapb.Status, len(tablets)) diff --git a/go/vt/vtctl/vdiff2.go b/go/vt/vtctl/vdiff2.go index f5be01cd07f..548b691d315 100644 --- a/go/vt/vtctl/vdiff2.go +++ b/go/vt/vtctl/vdiff2.go @@ -169,7 +169,7 @@ func commandVDiff2(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.F output, err := wr.VDiff2(ctx, keyspace, workflowName, action, actionArg, vdiffUUID.String(), options) if err != nil { - log.Errorf("vdiff2 returning with error: %v", err) + log.Error(fmt.Sprintf("vdiff2 returning with error: %v", err)) return err } diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 2caaaf8b8e0..76cde0bf5ba 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -2205,7 +2205,7 @@ func commandVReplicationWorkflow(ctx context.Context, wr *wrangler.Wrangler, sub wrapError := func(wf *wrangler.VReplicationWorkflow, err error) error { wr.Logger().Errorf("\n%s\n", err.Error()) - log.Infof("In wrapError wf is %+v", wf) + log.Info(fmt.Sprintf("In wrapError wf is %+v", wf)) wr.Logger().Infof("Workflow Status: %s\n", wf.CurrentState()) if wf.Exists() { printDetails() @@ -2331,7 +2331,7 @@ func commandVReplicationWorkflow(ctx context.Context, wr *wrangler.Wrangler, sub vrwp.ShardSubset = *shards wf, err := wr.NewVReplicationWorkflow(ctx, workflowType, vrwp) if err != nil { - log.Warningf("NewVReplicationWorkflow returned error %+v", wf) + log.Warn(fmt.Sprintf("NewVReplicationWorkflow returned error %+v", wf)) return err } if !wf.Exists() && action != vReplicationWorkflowActionCreate { @@ -2340,8 +2340,7 @@ func commandVReplicationWorkflow(ctx context.Context, wr *wrangler.Wrangler, sub if len(vrwp.ShardSubset) > 0 { if workflowType == wrangler.MoveTablesWorkflow && action != vReplicationWorkflowActionCreate && wf.IsPartialMigration() { - log.Infof("Subset of shards: %s have been specified for keyspace %s, workflow %s, for action %s", - vrwp.ShardSubset, target, workflowName, action) + log.Info(fmt.Sprintf("Subset of shards: %s have been specified for keyspace %s, workflow %s, for action %s", vrwp.ShardSubset, target, workflowName, action)) } else { return errors.New("The --shards option can only be specified for existing Partial MoveTables workflows") } @@ -2493,7 +2492,7 @@ func commandVReplicationWorkflow(ctx context.Context, wr *wrangler.Wrangler, sub return fmt.Errorf("found unsupported action %s", originalAction) } if err != nil { - log.Warningf(" %s error: %v", originalAction, wf) + log.Warn(fmt.Sprintf(" %s error: %v", originalAction, wf)) return wrapError(wf, err) } if *dryRun { @@ -2613,7 +2612,7 @@ func commandVDiff(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.Fl _, err = wr.VDiff(ctx, keyspace, workflow, *sourceCell, *targetCell, *tabletTypesStr, *filteredReplicationWaitTime, *format, *maxRows, *tables, *debugQuery, *onlyPks, *maxExtraRowsToCompare) if err != nil { - log.Errorf("vdiff returning with error: %v", err) + log.Error(fmt.Sprintf("vdiff returning with error: %v", err)) if strings.Contains(err.Error(), "context deadline exceeded") { return errors.New("vdiff timed out: you may want to increase it with the flag --filtered_replication_wait_time=") } @@ -3736,7 +3735,7 @@ func commandWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag return errors.New(usage) } if len(*shards) > 0 { - log.Infof("Subset of shards specified: %d, %v", len(*shards), strings.Join(*shards, ",")) + log.Info(fmt.Sprintf("Subset of shards specified: %d, %v", len(*shards), strings.Join(*shards, ","))) } keyspace := subFlags.Arg(0) action := strings.ToLower(subFlags.Arg(1)) diff --git a/go/vt/vtctl/vtctlclient/interface.go b/go/vt/vtctl/vtctlclient/interface.go index c314db4c07c..a8e162b4f52 100644 --- a/go/vt/vtctl/vtctlclient/interface.go +++ b/go/vt/vtctl/vtctlclient/interface.go @@ -20,6 +20,7 @@ package vtctlclient import ( "context" "fmt" + "os" "time" "github.com/spf13/pflag" @@ -64,7 +65,8 @@ var factories = make(map[string]Factory) // RegisterFactory allows a client implementation to register itself. func RegisterFactory(name string, factory Factory) { if _, ok := factories[name]; ok { - log.Fatalf("RegisterFactory: %s already exists", name) + log.Error(fmt.Sprintf("RegisterFactory: %s already exists", name)) + os.Exit(1) } factories[name] = factory } diff --git a/go/vt/vtctl/workflow/common/utils.go b/go/vt/vtctl/workflow/common/utils.go index 86fa33bc191..95a7091168a 100644 --- a/go/vt/vtctl/workflow/common/utils.go +++ b/go/vt/vtctl/workflow/common/utils.go @@ -49,7 +49,6 @@ func GetShards(ctx context.Context, ts *topo.Server, keyspace string, shardSubse return nil, fmt.Errorf("shard %s not found in keyspace %s", shard, keyspace) } } - log.Infof("Selecting subset of shards in keyspace %s: %d from %d :: %+v", - keyspace, len(shardSubset), len(allShards), shardSubset) + log.Info(fmt.Sprintf("Selecting subset of shards in keyspace %s: %d from %d :: %+v", keyspace, len(shardSubset), len(allShards), shardSubset)) return shardSubset, nil } diff --git a/go/vt/vtctl/workflow/materializer.go b/go/vt/vtctl/workflow/materializer.go index 3b277997086..28328bfd1c7 100644 --- a/go/vt/vtctl/workflow/materializer.go +++ b/go/vt/vtctl/workflow/materializer.go @@ -346,7 +346,7 @@ func (mz *materializer) deploySchema() error { } mu.Unlock() if err != nil { - log.Errorf("Error getting DDLs of source tables: %s", err.Error()) + log.Error("Error getting DDLs of source tables: " + err.Error()) return err } @@ -447,10 +447,10 @@ func (mz *materializer) deploySchema() error { env := schemadiff.NewEnv(mz.env, mz.env.CollationEnv().DefaultConnectionCharset()) schema, err := schemadiff.NewSchemaFromQueries(env, applyDDLs) if err != nil { - log.Error(vterrors.Wrapf(err, "AtomicCopy: failed to normalize schema via schemadiff")) + log.Error(fmt.Sprint(vterrors.Wrapf(err, "AtomicCopy: failed to normalize schema via schemadiff"))) } else { applyDDLs = schema.ToQueries() - log.Infof("AtomicCopy used, and schema was normalized via schemadiff. %v queries normalized", len(applyDDLs)) + log.Info(fmt.Sprintf("AtomicCopy used, and schema was normalized via schemadiff. %v queries normalized", len(applyDDLs))) } } sql := strings.Join(applyDDLs, ";\n") diff --git a/go/vt/vtctl/workflow/server.go b/go/vt/vtctl/workflow/server.go index 12792dc20db..6c9906d2bef 100644 --- a/go/vt/vtctl/workflow/server.go +++ b/go/vt/vtctl/workflow/server.go @@ -3387,7 +3387,7 @@ func (s *Server) canSwitch(ctx context.Context, ts *trafficSwitcher, maxAllowedR msg := fmt.Sprintf("failed to successfully refresh all tablets in the %s/%s %s shard (%v):\n %v\n", si.Keyspace(), si.ShardName(), stype, err, partialDetails) if partial && ts.force { - log.Warning(msg) + log.Warn(msg) } else { m.Lock() refreshErrors.WriteString(msg) diff --git a/go/vt/vtctl/workflow/traffic_switcher.go b/go/vt/vtctl/workflow/traffic_switcher.go index 98b6e594a4f..ff36627b450 100644 --- a/go/vt/vtctl/workflow/traffic_switcher.go +++ b/go/vt/vtctl/workflow/traffic_switcher.go @@ -480,7 +480,7 @@ func (ts *trafficSwitcher) dropSourceDeniedTables(ctx context.Context) error { msg := fmt.Sprintf("failed to successfully refresh all tablets in the %s/%s source shard (%v):\n %v", source.GetShard().Keyspace(), source.GetShard().ShardName(), err, partialDetails) if ts.force { - log.Warning(msg) + log.Warn(msg) return nil } else { return errors.New(msg) @@ -504,7 +504,7 @@ func (ts *trafficSwitcher) dropTargetDeniedTables(ctx context.Context) error { msg := fmt.Sprintf("failed to successfully refresh all tablets in the %s/%s target shard (%v):\n %v", target.GetShard().Keyspace(), target.GetShard().ShardName(), err, partialDetails) if ts.force { - log.Warning(msg) + log.Warn(msg) return nil } else { return errors.New(msg) @@ -1099,7 +1099,7 @@ func (ts *trafficSwitcher) switchDeniedTables(ctx context.Context, backward bool msg := fmt.Sprintf("failed to successfully refresh all tablets in the %s/%s source shard (%v):\n %v", source.GetShard().Keyspace(), source.GetShard().ShardName(), err, partialDetails) if ts.force { - log.Warning(msg) + log.Warn(msg) return nil } else { return errors.New(msg) @@ -1122,7 +1122,7 @@ func (ts *trafficSwitcher) switchDeniedTables(ctx context.Context, backward bool msg := fmt.Sprintf("failed to successfully refresh all tablets in the %s/%s target shard (%v):\n %v", target.GetShard().Keyspace(), target.GetShard().ShardName(), err, partialDetails) if ts.force { - log.Warning(msg) + log.Warn(msg) return nil } else { return errors.New(msg) diff --git a/go/vt/vtctl/workflow/utils.go b/go/vt/vtctl/workflow/utils.go index 70ab3c04dae..22dd09355c6 100644 --- a/go/vt/vtctl/workflow/utils.go +++ b/go/vt/vtctl/workflow/utils.go @@ -82,7 +82,7 @@ func getTablesInKeyspace(ctx context.Context, ts *topo.Server, tmc tmclient.Tabl if err != nil { return nil, err } - log.Infof("got table schemas: %+v from source primary %v.", schema, primary) + log.Info(fmt.Sprintf("got table schemas: %+v from source primary %v.", schema, primary)) var sourceTables []string for _, td := range schema.TableDefinitions { @@ -155,7 +155,7 @@ func createDefaultShardRoutingRules(ctx context.Context, ms *vtctldatapb.Materia if srr[fromSource] == "" && srr[fromTarget] == "" { srr[fromTarget] = ms.SourceKeyspace changed = true - log.Infof("Added default shard routing rule from %q to %q", fromTarget, fromSource) + log.Info(fmt.Sprintf("Added default shard routing rule from %q to %q", fromTarget, fromSource)) } } if changed { diff --git a/go/vt/vtctl/workflow/utils_test.go b/go/vt/vtctl/workflow/utils_test.go index 5416c471f6b..bbcc44be2a8 100644 --- a/go/vt/vtctl/workflow/utils_test.go +++ b/go/vt/vtctl/workflow/utils_test.go @@ -134,7 +134,7 @@ func TestConcurrentKeyspaceRoutingRulesUpdates(t *testing.T) { }) etcdServerAddress := startEtcd(t) - log.Infof("Successfully started etcd server at %s", etcdServerAddress) + log.Info("Successfully started etcd server at " + etcdServerAddress) topoName := "etcd2_test" // "etcd2" is already registered on init(), so using a different name topo.RegisterFactory(topoName, etcd2topo.Factory{}) ts, err := topo.OpenServer(topoName, etcdServerAddress, "/vitess") @@ -154,7 +154,7 @@ func testConcurrentKeyspaceRoutingRulesUpdates(t *testing.T, ctx context.Context shortCtx, cancel := context.WithTimeout(ctx, duration) defer cancel() - log.Infof("Starting %d concurrent updates", concurrency) + log.Info(fmt.Sprintf("Starting %d concurrent updates", concurrency)) for i := range concurrency { go func(id int) { defer wg.Done() @@ -169,7 +169,7 @@ func testConcurrentKeyspaceRoutingRulesUpdates(t *testing.T, ctx context.Context }(i) } wg.Wait() - log.Infof("All updates completed") + log.Info("All updates completed") rules, err := ts.GetKeyspaceRoutingRules(ctx) require.NoError(t, err) require.LessOrEqual(t, concurrency, len(rules.Rules)) @@ -239,7 +239,7 @@ func startEtcd(t *testing.T) string { } t.Cleanup(func() { if cmd.Process.Kill() != nil { - log.Infof("cmd.Process.Kill() failed : %v", err) + log.Info(fmt.Sprintf("cmd.Process.Kill() failed : %v", err)) } }) diff --git a/go/vt/vtctl/workflow/vexec/query_plan.go b/go/vt/vtctl/workflow/vexec/query_plan.go index 52e7ee00b61..85c050605de 100644 --- a/go/vt/vtctl/workflow/vexec/query_plan.go +++ b/go/vt/vtctl/workflow/vexec/query_plan.go @@ -61,7 +61,7 @@ func (qp *FixedQueryPlan) Execute(ctx context.Context, target *topo.TabletInfo) defer func() { if err != nil { - log.Warningf("Result on %v: %v", targetAliasStr, err) + log.Warn(fmt.Sprintf("Result on %v: %v", targetAliasStr, err)) return } }() @@ -147,7 +147,7 @@ func (qp *PerTargetQueryPlan) Execute(ctx context.Context, target *topo.TabletIn defer func() { if err != nil { - log.Warningf("Result on %v: %v", targetAliasStr, err) + log.Warn(fmt.Sprintf("Result on %v: %v", targetAliasStr, err)) return } }() diff --git a/go/vt/vtctl/workflow/vexec/vexec.go b/go/vt/vtctl/workflow/vexec/vexec.go index 9e6fbe96940..fd4ec605f9a 100644 --- a/go/vt/vtctl/workflow/vexec/vexec.go +++ b/go/vt/vtctl/workflow/vexec/vexec.go @@ -199,7 +199,7 @@ func (vx *VExec) execCallback(ctx context.Context, callback func(context.Context allErrors.RecordError(err) } else { if qr == nil { - log.Infof("Callback returned nil result for tablet %s-%s", primary.Alias.Cell, primary.Alias.Uid) + log.Info(fmt.Sprintf("Callback returned nil result for tablet %s-%d", primary.Alias.Cell, primary.Alias.Uid)) return // no result } mu.Lock() diff --git a/go/vt/vtctld/api.go b/go/vt/vtctld/api.go index 06870c37e4a..d9b01c9f652 100644 --- a/go/vt/vtctld/api.go +++ b/go/vt/vtctld/api.go @@ -115,7 +115,7 @@ func newTabletWithURL(t *topodatapb.Tablet) *TabletWithURL { func httpErrorf(w http.ResponseWriter, r *http.Request, format string, args ...any) { errMsg := fmt.Sprintf(format, args...) - log.Errorf("HTTP error on %v: %v, request: %#v", r.URL.Path, errMsg, r) + log.Error(fmt.Sprintf("HTTP error on %v: %v, request: %#v", r.URL.Path, errMsg, r)) http.Error(w, errMsg, http.StatusInternalServerError) } diff --git a/go/vt/vtctld/tablet_data.go b/go/vt/vtctld/tablet_data.go index 90c5a24d794..73b32aacd26 100644 --- a/go/vt/vtctld/tablet_data.go +++ b/go/vt/vtctld/tablet_data.go @@ -18,6 +18,7 @@ package vtctld import ( "context" + "fmt" "io" "sync" "time" @@ -164,9 +165,9 @@ func (thc *tabletHealthCache) Get(ctx context.Context, tabletAlias *topodatapb.T thc.tabletMap[tabletAliasStr] = th go func() { - log.Infof("starting health stream for tablet %v", tabletAlias) + log.Info(fmt.Sprintf("starting health stream for tablet %v", tabletAlias)) err := th.stream(context.Background(), thc.ts, tabletAlias) - log.Infof("tablet %v health stream ended, error: %v", tabletAlias, err) + log.Info(fmt.Sprintf("tablet %v health stream ended, error: %v", tabletAlias, err)) thc.delete(tabletAliasStr) }() } diff --git a/go/vt/vterrors/last_error.go b/go/vt/vterrors/last_error.go index 1f051825041..5c97cc33170 100644 --- a/go/vt/vterrors/last_error.go +++ b/go/vt/vterrors/last_error.go @@ -17,6 +17,7 @@ limitations under the License. package vterrors import ( + "fmt" "sync" "time" @@ -61,7 +62,7 @@ func (le *LastError) Record(err error) { // same error seen if time.Since(le.lastSeen) > le.maxTimeInError { // reset firstSeen, since it has been long enough since the last time we saw this error - log.Infof("Resetting firstSeen for %s, since it is too long since the last one", le.name) + log.Info(fmt.Sprintf("Resetting firstSeen for %s, since it is too long since the last one", le.name)) le.firstSeen = time.Now() } le.lastSeen = time.Now() @@ -82,7 +83,6 @@ func (le *LastError) ShouldRetry() bool { // within the max time range return true } - log.Errorf("%s: the same error was encountered continuously since %s, it is now assumed to be unrecoverable; any affected operations will need to be manually restarted once error '%s' has been addressed", - le.name, le.firstSeen.UTC(), le.err) + log.Error(fmt.Sprintf("%s: the same error was encountered continuously since %s, it is now assumed to be unrecoverable; any affected operations will need to be manually restarted once error '%s' has been addressed", le.name, le.firstSeen.UTC(), le.err)) return false } diff --git a/go/vt/vtexplain/vtexplain.go b/go/vt/vtexplain/vtexplain.go index 64f1ca3cea1..c7d1c340f64 100644 --- a/go/vt/vtexplain/vtexplain.go +++ b/go/vt/vtexplain/vtexplain.go @@ -259,21 +259,21 @@ func parseSchema(sqlSchema string, opts *Options, parser *sqlparser.Parser) ([]s } else { stmt, err = parser.Parse(sql) if err != nil { - log.Errorf("ERROR: failed to parse sql: %s, got error: %v", sql, err) + log.Error(fmt.Sprintf("ERROR: failed to parse sql: %s, got error: %v", sql, err)) continue } } ddl, ok := stmt.(sqlparser.DDLStatement) if !ok { - log.Infof("ignoring non-DDL statement: %s", sql) + log.Info("ignoring non-DDL statement: " + sql) continue } if ddl.GetAction() != sqlparser.CreateDDLAction { - log.Infof("ignoring %s table statement", ddl.GetAction().ToString()) + log.Info(fmt.Sprintf("ignoring %s table statement", ddl.GetAction().ToString())) continue } if ddl.GetTableSpec() == nil && ddl.GetOptLike() == nil { - log.Errorf("invalid create table statement: %s", sql) + log.Error("invalid create table statement: " + sql) continue } parsedDDLs = append(parsedDDLs, ddl) diff --git a/go/vt/vtexplain/vtexplain_vtgate.go b/go/vt/vtexplain/vtexplain_vtgate.go index 4be0731953c..b3ad91ba33a 100644 --- a/go/vt/vtexplain/vtexplain_vtgate.go +++ b/go/vt/vtexplain/vtexplain_vtgate.go @@ -176,7 +176,7 @@ func (vte *VTExplain) buildTopology(ctx context.Context, ts *topo.Server, opts * continue } hostname := fmt.Sprintf("%s/%s", ks, shard.Name) - log.Infof("registering test tablet %s for keyspace %s shard %s", hostname, ks, shard.Name) + log.Info(fmt.Sprintf("registering test tablet %s for keyspace %s shard %s", hostname, ks, shard.Name)) tablet := vte.healthCheck.AddFakeTablet(Cell, hostname, 1, ks, shard.Name, topodatapb.TabletType_PRIMARY, true, 1, nil, func(t *topodatapb.Tablet) queryservice.QueryService { return vte.newTablet(ctx, vte.env, opts, t, ts, srvTopoCounts) diff --git a/go/vt/vtexplain/vtexplain_vttablet.go b/go/vt/vtexplain/vtexplain_vttablet.go index 1e54cf2760b..931e2a1e1f2 100644 --- a/go/vt/vtexplain/vtexplain_vttablet.go +++ b/go/vt/vtexplain/vtexplain_vttablet.go @@ -837,7 +837,7 @@ func inferColTypeFromExpr(node sqlparser.Expr, tableColumnMap map[sqlparser.Iden for _, colTypeMap := range tableColumnMap { if colTypeMap[col] != querypb.Type_NULL_TYPE { if colType != querypb.Type_NULL_TYPE { - log.Errorf("vtexplain: ambiguous column %s", col) + log.Error("vtexplain: ambiguous column " + col) return colNames, colTypes } @@ -846,7 +846,7 @@ func inferColTypeFromExpr(node sqlparser.Expr, tableColumnMap map[sqlparser.Iden } if colType == querypb.Type_NULL_TYPE { - log.Errorf("vtexplain: invalid column %s.%s, tableColumnMap +%v", node.Qualifier.Name, col, tableColumnMap) + log.Error(fmt.Sprintf("vtexplain: invalid column %s.%s, tableColumnMap +%v", node.Qualifier.Name, col, tableColumnMap)) } colNames = append(colNames, col) @@ -858,7 +858,7 @@ func inferColTypeFromExpr(node sqlparser.Expr, tableColumnMap map[sqlparser.Iden colType := colTypeMap[col] if colType == querypb.Type_NULL_TYPE { - log.Errorf("vtexplain: invalid column %s.%s, tableColumnMap +%v", node.Qualifier.Name, col, tableColumnMap) + log.Error(fmt.Sprintf("vtexplain: invalid column %s.%s, tableColumnMap +%v", node.Qualifier.Name, col, tableColumnMap)) } colNames = append(colNames, col) @@ -892,7 +892,7 @@ func inferColTypeFromExpr(node sqlparser.Expr, tableColumnMap map[sqlparser.Iden case sqlparser.DecimalVal: colTypes = append(colTypes, querypb.Type_DECIMAL) default: - log.Errorf("vtexplain: unsupported sql value %s", sqlparser.String(node)) + log.Error("vtexplain: unsupported sql value " + sqlparser.String(node)) } case *sqlparser.CaseExpr: colNames, colTypes = inferColTypeFromExpr(node.Whens[0].Val, tableColumnMap, colNames, colTypes) @@ -903,7 +903,7 @@ func inferColTypeFromExpr(node sqlparser.Expr, tableColumnMap map[sqlparser.Iden colNames = append(colNames, sqlparser.String(node)) colTypes = append(colTypes, querypb.Type_INT64) default: - log.Errorf("vtexplain: unsupported select expression type +%v node %s", reflect.TypeOf(node), sqlparser.String(node)) + log.Error(fmt.Sprintf("vtexplain: unsupported select expression type +%v node %s", reflect.TypeOf(node), sqlparser.String(node))) } return colNames, colTypes diff --git a/go/vt/vtgate/api.go b/go/vt/vtgate/api.go index d4d7d143b21..d8e4d22a166 100644 --- a/go/vt/vtgate/api.go +++ b/go/vt/vtgate/api.go @@ -37,7 +37,7 @@ const ( func httpErrorf(w http.ResponseWriter, r *http.Request, format string, args ...any) { errMsg := fmt.Sprintf(format, args...) - log.Errorf("HTTP error on %v: %v, request: %#v", r.URL.Path, errMsg, r) + log.Error(fmt.Sprintf("HTTP error on %v: %v, request: %#v", r.URL.Path, errMsg, r)) http.Error(w, errMsg, http.StatusInternalServerError) } diff --git a/go/vt/vtgate/buffer/buffer.go b/go/vt/vtgate/buffer/buffer.go index 35dd573556b..11c41a7e550 100644 --- a/go/vt/vtgate/buffer/buffer.go +++ b/go/vt/vtgate/buffer/buffer.go @@ -28,6 +28,7 @@ package buffer import ( "context" + "fmt" "strings" "sync" @@ -86,10 +87,10 @@ func init() { // To simplify things, we've merged the detection for different MySQL flavors // in one function. Supported flavors: MariaDB, MySQL func CausedByFailover(err error) bool { - log.V(2).Infof("Checking error (type: %T) if it is caused by a failover. err: %v", err, err) + log.Debug(fmt.Sprintf("Checking error (type: %T) if it is caused by a failover. err: %v", err, err)) reason, isFailover := isFailoverError(err) if isFailover { - log.Infof("CausedByFailover signalling failover for reason: %s", reason) + log.Info("CausedByFailover signalling failover for reason: " + reason) } return isFailover } @@ -208,7 +209,7 @@ func (b *Buffer) WaitForFailoverEnd(ctx context.Context, keyspace, shard string, } func (b *Buffer) HandleKeyspaceEvent(ksevent *discovery.KeyspaceEvent) { - log.Infof("Keyspace Event received for keyspace %v", ksevent.Keyspace) + log.Info(fmt.Sprintf("Keyspace Event received for keyspace %v", ksevent.Keyspace)) for _, shard := range ksevent.Shards { sb := b.getOrCreateBuffer(shard.Target.Keyspace, shard.Target.Shard) if sb != nil { diff --git a/go/vt/vtgate/buffer/flags.go b/go/vt/vtgate/buffer/flags.go index eddb764d2ba..570f7568cdb 100644 --- a/go/vt/vtgate/buffer/flags.go +++ b/go/vt/vtgate/buffer/flags.go @@ -19,6 +19,7 @@ package buffer import ( "errors" "fmt" + "os" "strings" "time" @@ -168,17 +169,18 @@ func NewDefaultConfig() *Config { func NewConfigFromFlags() *Config { if err := verifyFlags(); err != nil { - log.Fatalf("Invalid buffer configuration: %v", err) + log.Error(fmt.Sprintf("Invalid buffer configuration: %v", err)) + os.Exit(1) } bufferSizeStat.Set(int64(bufferSize)) keyspaces, shards := keyspaceShardsToSets(bufferKeyspaceShards) if bufferEnabledDryRun { - log.Infof("vtgate buffer in dry-run mode enabled for all requests. Dry-run bufferings will log failovers but not buffer requests.") + log.Info("vtgate buffer in dry-run mode enabled for all requests. Dry-run bufferings will log failovers but not buffer requests.") } if bufferEnabled { - log.Infof("vtgate buffer enabled. PRIMARY requests will be buffered during detected failovers.") + log.Info("vtgate buffer enabled. PRIMARY requests will be buffered during detected failovers.") // Log a second line if it's only enabled for some keyspaces or shards. header := "Buffering limited to configured " @@ -198,12 +200,12 @@ func NewConfigFromFlags() *Config { if bufferEnabledDryRun { dryRunOverride = " Dry-run mode is overridden for these entries and actual buffering will take place." } - log.Infof("%v.%v", limited, dryRunOverride) + log.Info(fmt.Sprintf("%v.%v", limited, dryRunOverride)) } } if !bufferEnabledDryRun && !bufferEnabled { - log.Infof("vtgate buffer not enabled.") + log.Info("vtgate buffer not enabled.") } return &Config{ diff --git a/go/vt/vtgate/buffer/shard_buffer.go b/go/vt/vtgate/buffer/shard_buffer.go index a4fe4c1bafc..a998ce7bbe7 100644 --- a/go/vt/vtgate/buffer/shard_buffer.go +++ b/go/vt/vtgate/buffer/shard_buffer.go @@ -286,14 +286,12 @@ func (sb *shardBuffer) startBufferingLocked(ctx context.Context, kev *discovery. msg = "Dry-run: Would have started buffering" } starts.Add(sb.statsKey, 1) - log.Infof("%v for shard: %s (window: %v, size: %v, max failover duration: %v) (A failover was detected by this seen error: %v.)", - msg, + log.Info(fmt.Sprintf("%v for shard: %s (window: %v, size: %v, max failover duration: %v) (A failover was detected by this seen error: %v.)", msg, topoproto.KeyspaceShardString(sb.keyspace, sb.shard), sb.buf.config.Window, sb.buf.config.Size, sb.buf.config.MaxFailoverDuration, - errorsanitizer.NormalizeError(err.Error()), - ) + errorsanitizer.NormalizeError(err.Error()))) return true } @@ -304,7 +302,7 @@ func (sb *shardBuffer) startBufferingLocked(ctx context.Context, kev *discovery. // that "sb.mu" must be locked before calling the method. func (sb *shardBuffer) logErrorIfStateNotLocked(state bufferState) { if sb.state != state { - log.Errorf("BUG: Buffer state should be '%v' and not '%v'. Full state of buffer object: %#v Stacktrace:\n%s", state, sb.state, sb, debug.Stack()) + log.Error(fmt.Sprintf("BUG: Buffer state should be '%v' and not '%v'. Full state of buffer object: %#v Stacktrace:\n%s", state, sb.state, sb, debug.Stack())) } } @@ -488,8 +486,8 @@ func (sb *shardBuffer) recordKeyspaceEvent(alias *topodatapb.TabletAlias, stillS sb.mu.Lock() defer sb.mu.Unlock() - log.V(2).Infof("disruption in shard %s/%s resolved (serving: %v), movetable state %#v", - sb.keyspace, sb.shard, stillServing, keyspaceEvent.MoveTablesState) + log.Debug(fmt.Sprintf("disruption in shard %s/%s resolved (serving: %v), movetable state %#v", + sb.keyspace, sb.shard, stillServing, keyspaceEvent.MoveTablesState)) if !topoproto.TabletAliasEqual(alias, sb.currentPrimary) { if sb.currentPrimary != nil { @@ -560,8 +558,7 @@ func (sb *shardBuffer) stopBufferingLocked(reason stopReason, details string) { if sb.mode == bufferModeDryRun { msg = "Dry-run: Would have stopped buffering" } - log.Infof("%v for shard: %s after: %.1f seconds due to: %v. Draining %d buffered requests now.", - msg, topoproto.KeyspaceShardString(sb.keyspace, sb.shard), d.Seconds(), details, len(q)) + log.Info(fmt.Sprintf("%v for shard: %s after: %.1f seconds due to: %v. Draining %d buffered requests now.", msg, topoproto.KeyspaceShardString(sb.keyspace, sb.shard), d.Seconds(), details, len(q))) var clientEntryError error if reason == stopShardMissing { @@ -620,7 +617,7 @@ func (sb *shardBuffer) drain(q []*entry, err error) { wg.Wait() d := sb.timeNow().Sub(start) - log.Infof("Draining finished for shard: %s Took: %v for: %d requests.", topoproto.KeyspaceShardString(sb.keyspace, sb.shard), d, len(q)) + log.Info(fmt.Sprintf("Draining finished for shard: %s Took: %v for: %d requests.", topoproto.KeyspaceShardString(sb.keyspace, sb.shard), d, len(q))) requestsDrained.Add(sb.statsKey, int64(len(q))) // Draining is done. Change state from "draining" to "idle". diff --git a/go/vt/vtgate/debug_2pc.go b/go/vt/vtgate/debug_2pc.go index dc052df33d6..2ba5be0dd33 100644 --- a/go/vt/vtgate/debug_2pc.go +++ b/go/vt/vtgate/debug_2pc.go @@ -38,29 +38,29 @@ func checkTestFailure(ctx context.Context, expectCaller string, target *querypb. } switch callerID.Principal { case "TRCreated_FailNow": - log.Errorf("Fail After TR created") + log.Error("Fail After TR created") // no commit decision is made. Transaction should be a rolled back. return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "Fail After TR created") case "RMPrepare_-40_FailNow": if target.Shard != "-40" { return nil } - log.Errorf("Fail During RM prepare") + log.Error("Fail During RM prepare") // no commit decision is made. Transaction should be a rolled back. return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "Fail During RM prepare") case "RMPrepared_FailNow": - log.Errorf("Fail After RM prepared") + log.Error("Fail After RM prepared") // no commit decision is made. Transaction should be a rolled back. return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "Fail After RM prepared") case "MMCommitted_FailNow": - log.Errorf("Fail After MM commit") + log.Error("Fail After MM commit") // commit decision is made. Transaction should be committed. return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "Fail After MM commit") case "RMCommit_-40_FailNow": if target.Shard != "-40" { return nil } - log.Errorf("Fail During RM commit") + log.Error("Fail During RM commit") // commit decision is made. Transaction should be a committed. return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "Fail During RM commit") default: diff --git a/go/vt/vtgate/debugenv.go b/go/vt/vtgate/debugenv.go index 7ab5a56b617..f4cd21d5818 100644 --- a/go/vt/vtgate/debugenv.go +++ b/go/vt/vtgate/debugenv.go @@ -163,7 +163,7 @@ func debugEnvHandler(vtg *VTGate, w http.ResponseWriter, r *http.Request) { w.Write(debugEnvHeader) for _, v := range vars { if err := debugEnvRow.Execute(w, v); err != nil { - log.Errorf("debugenv: couldn't execute template: %v", err) + log.Error(fmt.Sprintf("debugenv: couldn't execute template: %v", err)) } } w.Write(endTable) diff --git a/go/vt/vtgate/endtoend/deletetest/delete_test.go b/go/vt/vtgate/endtoend/deletetest/delete_test.go index a1d6811bcc7..0859342d02f 100644 --- a/go/vt/vtgate/endtoend/deletetest/delete_test.go +++ b/go/vt/vtgate/endtoend/deletetest/delete_test.go @@ -141,7 +141,7 @@ func TestMain(m *testing.M) { fmt.Fprintf(os.Stderr, "%v\n", err) // log error if err := cluster.TearDown(); err != nil { - log.Errorf("cluster.TearDown() did not work: ", err) + log.Error(fmt.Sprintf("cluster.TearDown() did not work: %v", err)) } return 1 } diff --git a/go/vt/vtgate/endtoend/update/lookup_unique_test.go b/go/vt/vtgate/endtoend/update/lookup_unique_test.go index 8268e7036bf..11b71cb2153 100644 --- a/go/vt/vtgate/endtoend/update/lookup_unique_test.go +++ b/go/vt/vtgate/endtoend/update/lookup_unique_test.go @@ -159,7 +159,7 @@ func TestMain(m *testing.M) { fmt.Fprintf(os.Stderr, "%v\n", err) // log error if err := cluster.TearDown(); err != nil { - log.Errorf("cluster.TearDown() did not work: ", err) + log.Error(fmt.Sprintf("cluster.TearDown() did not work: %v", err)) } return 1 } diff --git a/go/vt/vtgate/endtoend/vstream_test.go b/go/vt/vtgate/endtoend/vstream_test.go index 4bcfc3c2b56..4eaffb7fb2b 100644 --- a/go/vt/vtgate/endtoend/vstream_test.go +++ b/go/vt/vtgate/endtoend/vstream_test.go @@ -250,10 +250,10 @@ func TestVStreamCopyBasic(t *testing.T) { t.Fatalf("len(events)=%v are not expected\n", len(evs)) } case io.EOF: - log.Infof("stream ended\n") + log.Info("stream ended\n") cancel() default: - log.Errorf("Returned err %v", err) + log.Error(fmt.Sprintf("Returned err %v", err)) t.Fatalf("remote error: %v\n", err) } } @@ -373,10 +373,10 @@ func TestVStreamCopyUnspecifiedShardGtid(t *testing.T) { require.FailNow(t, fmt.Sprintf("len(events)=%d are not expected\n", len(evs))) } case io.EOF: - log.Infof("stream ended\n") + log.Info("stream ended\n") cancel() default: - log.Errorf("Returned err %v", err) + log.Error(fmt.Sprintf("Returned err %v", err)) require.FailNow(t, "remote error: %v\n", err) } } @@ -605,10 +605,10 @@ func TestVStreamCopyResume(t *testing.T) { return } case io.EOF: - log.Infof("stream ended\n") + log.Info("stream ended\n") cancel() default: - log.Errorf("Returned err %v", err) + log.Error(fmt.Sprintf("Returned err %v", err)) t.Fatalf("remote error: %v\n", err) } } @@ -659,10 +659,10 @@ func TestVStreamCurrent(t *testing.T) { return } case io.EOF: - log.Infof("stream ended\n") + log.Info("stream ended\n") cancel() default: - log.Errorf("Returned err %v", err) + log.Error(fmt.Sprintf("Returned err %v", err)) t.Fatalf("remote error: %v\n", err) } } @@ -761,10 +761,10 @@ func TestVStreamSharded(t *testing.T) { return } case io.EOF: - log.Infof("stream ended\n") + log.Info("stream ended\n") cancel() default: - log.Errorf("Returned err %v", err) + log.Error(fmt.Sprintf("Returned err %v", err)) t.Fatalf("remote error: %v\n", err) } } @@ -915,7 +915,7 @@ func printEvents(evs []*binlogdatapb.VEvent) { } s += sSb911.String() s += "===END===" + "\n" - log.Infof("%s", s) + log.Info(s) } // Sort the VEvents by the first row change's after value bytes primarily, with diff --git a/go/vt/vtgate/engine/dbddl.go b/go/vt/vtgate/engine/dbddl.go index cb2b859e08e..f23fb139a23 100644 --- a/go/vt/vtgate/engine/dbddl.go +++ b/go/vt/vtgate/engine/dbddl.go @@ -18,6 +18,7 @@ package engine import ( "context" + "fmt" "strings" "time" @@ -89,7 +90,7 @@ func (c *DBDDL) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[st name := vcursor.GetDBDDLPluginName() plugin, ok := databaseCreatorPlugins[name] if !ok { - log.Errorf("'%s' database ddl plugin is not registered. Falling back to default plugin", name) + log.Error(fmt.Sprintf("'%s' database ddl plugin is not registered. Falling back to default plugin", name)) plugin = databaseCreatorPlugins[defaultDBDDLPlugin] } diff --git a/go/vt/vtgate/engine/route.go b/go/vt/vtgate/engine/route.go index bc4142db79b..137d6332fc6 100644 --- a/go/vt/vtgate/engine/route.go +++ b/go/vt/vtgate/engine/route.go @@ -527,12 +527,12 @@ func (route *Route) executeWarmingReplicaRead(ctx context.Context, vcursor VCurs _, errs := replicaVCursor.ExecuteMultiShard(ctx, route, rss, queries, false /*rollbackOnError*/, false /*canAutocommit*/, route.FetchLastInsertID) if len(errs) > 0 { - log.Warningf("Failed to execute warming replica read: %v", errs) + log.Warn(fmt.Sprintf("Failed to execute warming replica read: %v", errs)) } else { replicaWarmingReadsMirrored.Add([]string{route.Keyspace.Name}, 1) } }(replicaVCursor) default: - log.Warning("Failed to execute warming replica read as pool is full") + log.Warn("Failed to execute warming replica read as pool is full") } } diff --git a/go/vt/vtgate/engine/routing.go b/go/vt/vtgate/engine/routing.go index 222aeada2bf..f362c5868c6 100644 --- a/go/vt/vtgate/engine/routing.go +++ b/go/vt/vtgate/engine/routing.go @@ -19,6 +19,7 @@ package engine import ( "context" "encoding/json" + "fmt" "maps" "strconv" @@ -261,7 +262,7 @@ func (rp *RoutingParameters) routeInfoSchemaQuery(ctx context.Context, vcursor V // we only have table_schema to work with destinations, _, err := vcursor.ResolveDestinations(ctx, specifiedKS, nil, []key.ShardDestination{key.DestinationAnyShard{}}) if err != nil { - log.Errorf("failed to route information_schema query to keyspace [%s]", specifiedKS) + log.Error(fmt.Sprintf("failed to route information_schema query to keyspace [%s]", specifiedKS)) bindVars[sqltypes.BvSchemaName] = sqltypes.StringBindVariable(specifiedKS) return defaultRoute() } diff --git a/go/vt/vtgate/engine/set.go b/go/vt/vtgate/engine/set.go index d6017cf473d..e548853bed4 100644 --- a/go/vt/vtgate/engine/set.go +++ b/go/vt/vtgate/engine/set.go @@ -230,7 +230,7 @@ func (svci *SysVarCheckAndIgnore) Execute(ctx context.Context, vcursor VCursor, // Rather than returning the error, we will just log the error // as the intention for executing the query it to validate the current setting and eventually ignore it anyways. // There is no benefit of returning the error back to client. - log.Warningf("unable to validate the current settings for '%s': %s", svci.Name, err.Error()) + log.Warn(fmt.Sprintf("unable to validate the current settings for '%s': %s", svci.Name, err.Error())) return nil } return nil diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go index 9cb78aedf47..59e2613f1bd 100644 --- a/go/vt/vtgate/executor.go +++ b/go/vt/vtgate/executor.go @@ -833,7 +833,7 @@ func (e *Executor) ShowShards(ctx context.Context, filter *sqlparser.ShowFilter, if filter.Filter != nil { // TODO build a query planner I guess? lol that should be fun - log.Infof("SHOW VITESS_SHARDS where clause %+v. Ignoring this (for now).", filter.Filter) + log.Info(fmt.Sprintf("SHOW VITESS_SHARDS where clause %+v. Ignoring this (for now).", filter.Filter)) } return keyspaceFilters, shardFilters @@ -917,7 +917,7 @@ func (e *Executor) ShowTablets(filter *sqlparser.ShowFilter) (*sqltypes.Result, } if filter.Filter != nil { - log.Infof("SHOW VITESS_TABLETS where clause: %+v. Ignoring this (for now).", filter.Filter) + log.Info(fmt.Sprintf("SHOW VITESS_TABLETS where clause: %+v. Ignoring this (for now).", filter.Filter)) } return filters @@ -996,7 +996,7 @@ func (e *Executor) ShowVitessReplicationStatus(ctx context.Context, filter *sqlp tabletHostPort := ts.GetTabletHostPort() throttlerStatus, err := getTabletThrottlerStatus(tabletHostPort) if err != nil { - log.Warningf("Could not get throttler status from %s: %v", topoproto.TabletAliasString(ts.Tablet.Alias), err) + log.Warn(fmt.Sprintf("Could not get throttler status from %s: %v", topoproto.TabletAliasString(ts.Tablet.Alias), err)) } replSourceHost := "" @@ -1022,7 +1022,7 @@ func (e *Executor) ShowVitessReplicationStatus(ctx context.Context, filter *sqlp } results, err := e.txConn.tabletGateway.Execute(ctx, nil, ts.Target, sql, nil, 0, 0, nil) if err != nil || results == nil { - log.Warningf("Could not get replication status from %s: %v", tabletHostPort, err) + log.Warn(fmt.Sprintf("Could not get replication status from %s: %v", tabletHostPort, err)) } else if row := results.Named().Row(); row != nil { replSourceHost = row[sourceHostField].ToString() replSourcePort, _ = row[sourcePortField].ToInt64() diff --git a/go/vt/vtgate/executor_framework_test.go b/go/vt/vtgate/executor_framework_test.go index 88e60b16841..95bb50d2687 100644 --- a/go/vt/vtgate/executor_framework_test.go +++ b/go/vt/vtgate/executor_framework_test.go @@ -21,6 +21,7 @@ import ( "context" _ "embed" "fmt" + "os" "strconv" "strings" "testing" @@ -156,7 +157,8 @@ func createExecutorEnvCallback(t testing.TB, eConfig ExecutorConfig, eachShard f return ki.SidecarDbName, nil }) if !created { - log.Fatal("Failed to [re]create a sidecar database identifier cache!") + log.Error("Failed to [re]create a sidecar database identifier cache!") + os.Exit(1) } resolver := newTestResolver(ctx, hc, serv, cell) diff --git a/go/vt/vtgate/grpcvtgateservice/server.go b/go/vt/vtgate/grpcvtgateservice/server.go index b17d92acabe..e5cbffd68f8 100644 --- a/go/vt/vtgate/grpcvtgateservice/server.go +++ b/go/vt/vtgate/grpcvtgateservice/server.go @@ -19,6 +19,7 @@ package grpcvtgateservice import ( "context" + "fmt" "github.com/spf13/pflag" "google.golang.org/grpc" @@ -329,7 +330,7 @@ func (vtg *VTGate) VStream(request *vtgatepb.VStreamRequest, stream vtgateservic }) }) if vtgErr != nil { - log.Infof("VStream grpc error: %v", vtgErr) + log.Info(fmt.Sprintf("VStream grpc error: %v", vtgErr)) } return vterrors.ToGRPC(vtgErr) } diff --git a/go/vt/vtgate/plan_execute.go b/go/vt/vtgate/plan_execute.go index 71b86925d4d..9c46545199a 100644 --- a/go/vt/vtgate/plan_execute.go +++ b/go/vt/vtgate/plan_execute.go @@ -185,7 +185,7 @@ func (e *Executor) newExecute( // Retry if needed. rootCause := vterrors.RootCause(err) if rootCause != nil && strings.Contains(rootCause.Error(), "enforce denied tables") { - log.V(2).Infof("Retry: %d, will retry query %s due to %v", try, sql, err) + log.Debug(fmt.Sprintf("Retry: %d, will retry query %s due to %v", try, sql, err)) if try == 0 { // We are going to retry at least once defer func() { // Prevent any plan cache pollution from queries planned against the wrong keyspace during a MoveTables diff --git a/go/vt/vtgate/planbuilder/locktables.go b/go/vt/vtgate/planbuilder/locktables.go index e8776d13e65..42ef4c7c7d0 100644 --- a/go/vt/vtgate/planbuilder/locktables.go +++ b/go/vt/vtgate/planbuilder/locktables.go @@ -17,6 +17,8 @@ limitations under the License. package planbuilder import ( + "fmt" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" @@ -27,7 +29,7 @@ import ( // buildLockPlan plans lock tables statement. func buildLockPlan(stmt sqlparser.Statement, _ *sqlparser.ReservedVars, _ plancontext.VSchema) (*planResult, error) { - log.Warningf("Lock Tables statement is ignored: %v", stmt) + log.Warn(fmt.Sprintf("Lock Tables statement is ignored: %v", stmt)) return newPlanResult(engine.NewRowsPrimitive(make([][]sqltypes.Value, 0), make([]*querypb.Field, 0))), nil } diff --git a/go/vt/vtgate/planbuilder/simplifier_test.go b/go/vt/vtgate/planbuilder/simplifier_test.go index 634e7e2351b..f386a03faa3 100644 --- a/go/vt/vtgate/planbuilder/simplifier_test.go +++ b/go/vt/vtgate/planbuilder/simplifier_test.go @@ -94,7 +94,7 @@ func TestUnsupportedFile(t *testing.T) { fmt.Println(vschema) for _, tcase := range readJSONTests("unsupported_cases.txt") { t.Run(tcase.Query, func(t *testing.T) { - log.Errorf("unsupported_cases.txt - %s", tcase.Query) + log.Error("unsupported_cases.txt - " + tcase.Query) stmt, reserved, err := sqlparser.NewTestParser().Parse2(tcase.Query) require.NoError(t, err) _, ok := stmt.(sqlparser.TableStatement) @@ -159,13 +159,13 @@ func keepPanicking(query string, reservedVars *sqlparser.ReservedVars, vschema * defer func() { r := recover() if r != nil { - log.Errorf("panicked with %v", r) + log.Error(fmt.Sprintf("panicked with %v", r)) res = true } }() - log.Errorf("trying %s", sqlparser.String(statement)) + log.Error("trying " + sqlparser.String(statement)) _, _ = BuildFromStmt(context.Background(), query, statement, reservedVars, vschema, needs, staticConfig{}) - log.Errorf("did not panic") + log.Error("did not panic") return false } diff --git a/go/vt/vtgate/plugin_mysql_server.go b/go/vt/vtgate/plugin_mysql_server.go index 329bcee6d8a..d2e17728061 100644 --- a/go/vt/vtgate/plugin_mysql_server.go +++ b/go/vt/vtgate/plugin_mysql_server.go @@ -18,6 +18,7 @@ package vtgate import ( "context" + "fmt" "net" "os" "os/signal" @@ -148,7 +149,7 @@ func (vh *vtgateHandler) ComResetConnection(c *mysql.Conn) { } err := vh.vtg.CloseSession(ctx, session) if err != nil { - log.Errorf("Error happened in transaction rollback: %v", err) + log.Error(fmt.Sprintf("Error happened in transaction rollback: %v", err)) } } @@ -200,7 +201,7 @@ func getSpan(ctx context.Context, match []string, newSpan func(context.Context, if err == nil { return span, ctx } - log.Warningf("Unable to parse VT_SPAN_CONTEXT: %s", err.Error()) + log.Warn("Unable to parse VT_SPAN_CONTEXT: " + err.Error()) } span, ctx = newSpan(ctx, label) return span, ctx @@ -550,7 +551,8 @@ type mysqlServer struct { func initTLSConfig(ctx context.Context, srv *mysqlServer, mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslCrl, mysqlSslServerCA string, mysqlServerRequireSecureTransport bool, mysqlMinTLSVersion uint16) error { serverConfig, err := vttls.ServerConfig(mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslCrl, mysqlSslServerCA, mysqlMinTLSVersion) if err != nil { - log.Exitf("grpcutils.TLSServerConfig failed: %v", err) + log.Error(fmt.Sprintf("grpcutils.TLSServerConfig failed: %v", err)) + os.Exit(1) return err } srv.tcpListener.TLSConfig.Store(serverConfig) @@ -565,7 +567,7 @@ func initTLSConfig(ctx context.Context, srv *mysqlServer, mysqlSslCert, mysqlSsl case <-srv.sigChan: serverConfig, err := vttls.ServerConfig(mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslCrl, mysqlSslServerCA, mysqlMinTLSVersion) if err != nil { - log.Errorf("grpcutils.TLSServerConfig failed: %v", err) + log.Error(fmt.Sprintf("grpcutils.TLSServerConfig failed: %v", err)) } else { log.Info("grpcutils.TLSServerConfig updated") srv.tcpListener.TLSConfig.Store(serverConfig) @@ -598,14 +600,16 @@ func initMySQLProtocol(vtgate *VTGate) *mysqlServer { // Check mysql-default-workload var ok bool if mysqlDefaultWorkload, ok = querypb.ExecuteOptions_Workload_value[strings.ToUpper(mysqlDefaultWorkloadName)]; !ok { - log.Exitf("-mysql-default-workload must be one of [OLTP, OLAP, DBA, UNSPECIFIED]") + log.Error("-mysql-default-workload must be one of [OLTP, OLAP, DBA, UNSPECIFIED]") + os.Exit(1) } switch mysqlTCPVersion { case "tcp", "tcp4", "tcp6": // Valid flag value. default: - log.Exitf("-mysql-tcp-version must be one of [tcp, tcp4, tcp6]") + log.Error("-mysql-tcp-version must be one of [tcp, tcp4, tcp6]") + os.Exit(1) } // Create a Listener. @@ -627,12 +631,14 @@ func initMySQLProtocol(vtgate *VTGate) *mysqlServer { mysqlServerMultiQuery, ) if err != nil { - log.Exitf("mysql.NewListener failed: %v", err) + log.Error(fmt.Sprintf("mysql.NewListener failed: %v", err)) + os.Exit(1) } if mysqlSslCert != "" && mysqlSslKey != "" { tlsVersion, err := vttls.TLSVersionToNumber(mysqlTLSMinVersion) if err != nil { - log.Exitf("mysql.NewListener failed: %v", err) + log.Error(fmt.Sprintf("mysql.NewListener failed: %v", err)) + os.Exit(1) } _ = initTLSConfig(context.Background(), srv, mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslCrl, mysqlSslServerCA, mysqlServerRequireSecureTransport, tlsVersion) @@ -640,7 +646,7 @@ func initMySQLProtocol(vtgate *VTGate) *mysqlServer { srv.tcpListener.AllowClearTextWithoutTLS.Store(mysqlAllowClearTextWithoutTLS) // Check for the connection threshold if mysqlSlowConnectWarnThreshold != 0 { - log.Infof("setting mysql slow connection threshold to %v", mysqlSlowConnectWarnThreshold) + log.Info(fmt.Sprintf("setting mysql slow connection threshold to %v", mysqlSlowConnectWarnThreshold)) srv.tcpListener.SlowConnectWarnThreshold.Store(mysqlSlowConnectWarnThreshold.Nanoseconds()) } // Start listening for tcp @@ -650,7 +656,8 @@ func initMySQLProtocol(vtgate *VTGate) *mysqlServer { if mysqlServerSocketPath != "" { err = setupUnixSocket(srv, authServer, mysqlServerSocketPath) if err != nil { - log.Exitf("mysql.NewListener failed: %v", err) + log.Error(fmt.Sprintf("mysql.NewListener failed: %v", err)) + os.Exit(1) } } return srv @@ -677,7 +684,7 @@ func newMysqlUnixSocket(address string, authServer mysql.AuthServer, handler mys case nil: return listener, nil case *net.OpError: - log.Warningf("Found existent socket when trying to create new unix mysql listener: %s, attempting to clean up", address) + log.Warn(fmt.Sprintf("Found existent socket when trying to create new unix mysql listener: %s, attempting to clean up", address)) // err.Op should never be different from listen, just being extra careful // in case in the future other errors are returned here if err.Op != "listen" { @@ -685,12 +692,12 @@ func newMysqlUnixSocket(address string, authServer mysql.AuthServer, handler mys } _, dialErr := net.Dial("unix", address) if dialErr == nil { - log.Errorf("Existent socket '%s' is still accepting connections, aborting", address) + log.Error(fmt.Sprintf("Existent socket '%s' is still accepting connections, aborting", address)) return nil, err } removeFileErr := os.Remove(address) if removeFileErr != nil { - log.Errorf("Couldn't remove existent socket file: %s", address) + log.Error("Couldn't remove existent socket file: " + address) return nil, err } listener, listenerErr := mysql.NewListener( @@ -726,11 +733,11 @@ func (srv *mysqlServer) shutdownMysqlProtocolAndDrain() { stopListener(srv.tcpListener, false) setListenerToNil() // We wait for connected clients to drain by themselves or to run into the onterm timeout - log.Infof("Starting drain loop, waiting for all clients to disconnect") + log.Info("Starting drain loop, waiting for all clients to disconnect") reported := time.Now() for srv.vtgateHandle.numConnections() > 0 { if time.Since(reported) > 2*time.Second { - log.Infof("Still waiting for client connections to drain (%d connected)...", srv.vtgateHandle.numConnections()) + log.Info(fmt.Sprintf("Still waiting for client connections to drain (%d connected)...", srv.vtgateHandle.numConnections())) reported = time.Now() } time.Sleep(1000 * time.Millisecond) @@ -742,12 +749,12 @@ func (srv *mysqlServer) shutdownMysqlProtocolAndDrain() { stopListener(srv.tcpListener, true) setListenerToNil() if busy := srv.vtgateHandle.busyConnections.Load(); busy > 0 { - log.Infof("Waiting for all client connections to be idle (%d active)...", busy) + log.Info(fmt.Sprintf("Waiting for all client connections to be idle (%d active)...", busy)) start := time.Now() reported := start for busy > 0 { if time.Since(reported) > 2*time.Second { - log.Infof("Still waiting for client connections to be idle (%d active)...", busy) + log.Info(fmt.Sprintf("Still waiting for client connections to be idle (%d active)...", busy)) reported = time.Now() } @@ -784,7 +791,7 @@ func (srv *mysqlServer) rollbackAtShutdown() { defer srv.vtgateHandle.mu.Unlock() for id, c := range srv.vtgateHandle.connections { if c != nil { - log.Infof("Rolling back transactions associated with connection ID: %v", id) + log.Info(fmt.Sprintf("Rolling back transactions associated with connection ID: %v", id)) c.Close() } } @@ -795,12 +802,12 @@ func (srv *mysqlServer) rollbackAtShutdown() { // will be non-zero. Give another second for those queries to finish. for range 100 { if srv.vtgateHandle.numConnections() == 0 { - log.Infof("All connections have been rolled back.") + log.Info("All connections have been rolled back.") return } time.Sleep(10 * time.Millisecond) } - log.Errorf("All connections did not go idle. Shutting down anyway.") + log.Error("All connections did not go idle. Shutting down anyway.") } func mysqlSocketPath() string { diff --git a/go/vt/vtgate/querylogz.go b/go/vt/vtgate/querylogz.go index 7df2886be33..808b0fcafd3 100644 --- a/go/vt/vtgate/querylogz.go +++ b/go/vt/vtgate/querylogz.go @@ -17,6 +17,7 @@ limitations under the License. package vtgate import ( + "fmt" "net/http" "strconv" "strings" @@ -117,7 +118,7 @@ func querylogzHandler(ch chan *logstats.LogStats, w http.ResponseWriter, r *http Parser *sqlparser.Parser }{stats, level, parser} if err := querylogzTmpl.Execute(w, tmplData); err != nil { - log.Errorf("querylogz: couldn't execute template: %v", err) + log.Error(fmt.Sprintf("querylogz: couldn't execute template: %v", err)) } case <-tmr.C: return diff --git a/go/vt/vtgate/queryz.go b/go/vt/vtgate/queryz.go index 540b014e11c..a091e2efdce 100644 --- a/go/vt/vtgate/queryz.go +++ b/go/vt/vtgate/queryz.go @@ -165,7 +165,7 @@ func queryzHandler(e *Executor, w http.ResponseWriter, r *http.Request) { sort.Sort(&sorter) for _, row := range sorter.rows { if err := queryzTmpl.Execute(w, row); err != nil { - log.Errorf("queryz: couldn't execute template: %v", err) + log.Error(fmt.Sprintf("queryz: couldn't execute template: %v", err)) } } } diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go index e9008ef5be4..7d2626bc717 100644 --- a/go/vt/vtgate/scatter_conn.go +++ b/go/vt/vtgate/scatter_conn.go @@ -306,7 +306,7 @@ func (stc *ScatterConn) runLockQuery(ctx context.Context, session *econtext.Safe query := &querypb.BoundQuery{Sql: "select 1", BindVariables: nil} _, lockErr := stc.ExecuteLock(ctx, rs, query, session, sqlparser.IsUsedLock) if lockErr != nil { - log.Warningf("Locking heartbeat failed, held locks might be released: %s", lockErr.Error()) + log.Warn("Locking heartbeat failed, held locks might be released: " + lockErr.Error()) } } @@ -736,7 +736,7 @@ func (stc *ScatterConn) multiGoTransaction( } wg.Wait() if pr, ok := panicRecord.Load().(*panicData); ok { - log.Errorf("caught a panic during parallel execution:\n%s", string(pr.trace)) + log.Error("caught a panic during parallel execution:\n" + string(pr.trace)) panic(pr.p) // rethrow the captured panic in the main thread } } diff --git a/go/vt/vtgate/scatter_conn_test.go b/go/vt/vtgate/scatter_conn_test.go index 8079bcd7836..4a4ae08d2ba 100644 --- a/go/vt/vtgate/scatter_conn_test.go +++ b/go/vt/vtgate/scatter_conn_test.go @@ -17,7 +17,7 @@ limitations under the License. package vtgate import ( - "fmt" + "log/slog" "testing" "github.com/aws/smithy-go/ptr" @@ -274,20 +274,19 @@ func TestExecutePanic(t *testing.T) { Autocommit: false, } - original := log.Errorf - defer func() { - log.Errorf = original - }() - - var logMessage string - log.Errorf = func(format string, args ...any) { - logMessage = fmt.Sprintf(format, args...) - } + handler := log.NewCaptureHandler() + restoreLogger := log.SetLogger(slog.New(handler)) + defer restoreLogger() assert.Panics(t, func() { _, _ = sc.ExecuteMultiShard(ctx, nil, rss, queries, econtext.NewSafeSession(session), true /*autocommit*/, false, nullResultsObserver{}, false) }) - require.Contains(t, logMessage, "(*ScatterConn).multiGoTransaction") + + if record, ok := handler.Last(); ok { + require.Contains(t, record.Message, "(*ScatterConn).multiGoTransaction") + } else { + require.Fail(t, "expected log entry") + } } func TestReservedOnMultiReplica(t *testing.T) { diff --git a/go/vt/vtgate/schema/tracker.go b/go/vt/vtgate/schema/tracker.go index 7661e702d93..fa7ae682912 100644 --- a/go/vt/vtgate/schema/tracker.go +++ b/go/vt/vtgate/schema/tracker.go @@ -18,6 +18,7 @@ package schema import ( "context" + "fmt" "maps" "slices" "strings" @@ -125,7 +126,7 @@ func (t *Tracker) loadTables(conn queryservice.QueryService, target *querypb.Tar if err != nil { return err } - log.Infof("finished loading tables for keyspace %s. Found %d tables", target.Keyspace, numTables) + log.Info(fmt.Sprintf("finished loading tables for keyspace %s. Found %d tables", target.Keyspace, numTables)) return nil } @@ -152,7 +153,7 @@ func (t *Tracker) loadViews(conn queryservice.QueryService, target *querypb.Targ if err != nil { return err } - log.Infof("finished loading views for keyspace %s. Found %d views", target.Keyspace, numViews) + log.Info(fmt.Sprintf("finished loading views for keyspace %s. Found %d views", target.Keyspace, numViews)) return nil } @@ -176,11 +177,11 @@ func (t *Tracker) loadUDFs(conn queryservice.QueryService, target *querypb.Targe return nil }) if err != nil { - log.Errorf("error fetching new UDFs for %v: %w", target.Keyspace, err) + log.Error(fmt.Sprintf("error fetching new UDFs for %v: %v", target.Keyspace, err)) return err } t.udfs[target.Keyspace] = udfs - log.Infof("finished loading %d UDFs for keyspace %s", len(udfs), target.Keyspace) + log.Info(fmt.Sprintf("finished loading %d UDFs for keyspace %s", len(udfs), target.Keyspace)) return nil } @@ -238,7 +239,7 @@ func (t *Tracker) setLoaded(ks keyspaceStr, loaded bool) { func (t *Tracker) initKeyspace(th *discovery.TabletHealth) error { err := t.LoadKeyspace(th.Conn, th.Target) if err != nil { - log.Warningf("Unable to add the %s keyspace to the schema tracker: %v", th.Target.Keyspace, err) + log.Warn(fmt.Sprintf("Unable to add the %s keyspace to the schema tracker: %v", th.Target.Keyspace, err)) return err } return nil @@ -356,7 +357,7 @@ func (t *Tracker) updatedTableSchema(th *discovery.TabletHealth) bool { if err != nil { t.setLoaded(th.Target.Keyspace, false) // TODO: optimize for the tables that got errored out. - log.Warningf("error fetching new schema for %v, making them non-authoritative: %v", tablesUpdated, err) + log.Warn(fmt.Sprintf("error fetching new schema for %v, making them non-authoritative: %v", tablesUpdated, err)) return false } return true @@ -366,12 +367,12 @@ func (t *Tracker) updateTables(keyspace string, res map[string]string) { for tableName, tableDef := range res { stmt, err := t.parser.ParseStrictDDL(tableDef) if err != nil { - log.Warningf("error parsing table definition for %s: %v", tableName, err) + log.Warn(fmt.Sprintf("error parsing table definition for %s: %v", tableName, err)) continue } ddl, ok := stmt.(*sqlparser.CreateTable) if !ok { - log.Warningf("parsed table definition for '%s' is not a create table definition", tableName) + log.Warn(fmt.Sprintf("parsed table definition for '%s' is not a create table definition", tableName)) continue } @@ -464,7 +465,7 @@ func (t *Tracker) updatedViewSchema(th *discovery.TabletHealth) bool { if err != nil { t.setLoaded(th.Target.Keyspace, false) // TODO: optimize for the views that got errored out. - log.Warningf("error fetching new views definition for %v", viewsUpdated, err) + log.Warn(fmt.Sprintf("error fetching new views definition for %v: %v", viewsUpdated, err)) return false } return true @@ -549,12 +550,12 @@ func (vm *viewMap) set(ks, tbl, sql string) { } stmt, err := vm.parser.ParseStrictDDL(sql) if err != nil { - log.Warningf("ignoring view '%s', parsing error in view definition: '%s'", tbl, sql) + log.Warn(fmt.Sprintf("ignoring view '%s', parsing error in view definition: '%s'", tbl, sql)) return } cv, ok := stmt.(*sqlparser.CreateView) if !ok { - log.Warningf("ignoring view '%s', view definition is not a create view query: %T", tbl, stmt) + log.Warn(fmt.Sprintf("ignoring view '%s', view definition is not a create view query: %T", tbl, stmt)) return } sqlparser.AddKeyspace(cv.Select, ks) diff --git a/go/vt/vtgate/simplifier/expression_simplifier.go b/go/vt/vtgate/simplifier/expression_simplifier.go index 6160c0f128c..f5da172de32 100644 --- a/go/vt/vtgate/simplifier/expression_simplifier.go +++ b/go/vt/vtgate/simplifier/expression_simplifier.go @@ -46,7 +46,7 @@ func SimplifyExpr(in sqlparser.Expr, test CheckF) sqlparser.Expr { if valid { break // we will still continue trying to simplify other expressions at this level } else { - log.Errorf("failed attempt: tried changing {%s} to {%s} in {%s}", sqlparser.String(node), sqlparser.String(expr), sqlparser.String(in)) + log.Error(fmt.Sprintf("failed attempt: tried changing {%s} to {%s} in {%s}", sqlparser.String(node), sqlparser.String(expr), sqlparser.String(in))) // undo the change cursor.Replace(node) } diff --git a/go/vt/vtgate/simplifier/simplifier.go b/go/vt/vtgate/simplifier/simplifier.go index a95523631ce..a38492de889 100644 --- a/go/vt/vtgate/simplifier/simplifier.go +++ b/go/vt/vtgate/simplifier/simplifier.go @@ -17,6 +17,8 @@ limitations under the License. package simplifier import ( + "fmt" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/semantics" @@ -79,7 +81,7 @@ func trySimplifyDistinct(in sqlparser.TableStatement, test func(statement sqlpar if sel.Distinct { sel.Distinct = false if test(sel) { - log.Errorf("removed distinct to yield: %s", sqlparser.String(sel)) + log.Error("removed distinct to yield: " + sqlparser.String(sel)) simplified = true } else { sel.Distinct = true @@ -105,7 +107,7 @@ func trySimplifyExpressions(in sqlparser.TableStatement, test func(sqlparser.Tab // first - let's try to remove the expression if cursor.remove() { if test(in) { - log.Errorf("removed expression: %s", sqlparser.String(cursor.expr)) + log.Error("removed expression: " + sqlparser.String(cursor.expr)) simplified = true // initially return false, but that made the rewriter prematurely abort, if it was the last selectExpr return true @@ -117,7 +119,7 @@ func trySimplifyExpressions(in sqlparser.TableStatement, test func(sqlparser.Tab newExpr := SimplifyExpr(cursor.expr, func(expr sqlparser.Expr) bool { cursor.replace(expr) if test(in) { - log.Errorf("simplified expression: %s -> %s", sqlparser.String(cursor.expr), sqlparser.String(expr)) + log.Error(fmt.Sprintf("simplified expression: %s -> %s", sqlparser.String(cursor.expr), sqlparser.String(expr))) cursor.restore() simplified = true return true @@ -166,14 +168,14 @@ func trySimplifyUnions(in sqlparser.TableStatement, test func(subquery sqlparser cursor.Replace(node.Left) clone := sqlparser.Clone(in) if test(clone) { - log.Errorf("replaced UNION with its left child: %s -> %s", sqlparser.String(node), sqlparser.String(node.Left)) + log.Error(fmt.Sprintf("replaced UNION with its left child: %s -> %s", sqlparser.String(node), sqlparser.String(node.Left))) simplified = true return true } cursor.Replace(node.Right) clone = sqlparser.Clone(in) if test(clone) { - log.Errorf("replaced UNION with its right child: %s -> %s", sqlparser.String(node), sqlparser.String(node.Right)) + log.Error(fmt.Sprintf("replaced UNION with its right child: %s -> %s", sqlparser.String(node), sqlparser.String(node.Right))) simplified = true return true } @@ -199,7 +201,7 @@ func tryRemoveTable(tables []semantics.TableInfo, in sqlparser.TableStatement, c simplified := removeTable(clone, searchedTS, currentDB, si) name, _ := tbl.Name() if simplified && test(clone) { - log.Errorf("removed table `%s`: \n%s\n%s", sqlparser.String(name), sqlparser.String(in), sqlparser.String(clone)) + log.Error(fmt.Sprintf("removed table `%s`: \n%s\n%s", sqlparser.String(name), sqlparser.String(in), sqlparser.String(clone))) return clone } } @@ -232,7 +234,7 @@ func simplifyStarExpr(in sqlparser.TableStatement, test func(sqlparser.TableStat Expr: sqlparser.NewIntLiteral("0"), }) if test(in) { - log.Errorf("replaced star with literal") + log.Error("replaced star with literal") simplified = true return true } diff --git a/go/vt/vtgate/simplifier/simplifier_test.go b/go/vt/vtgate/simplifier/simplifier_test.go index d6fbe073756..e3b708b9e4b 100644 --- a/go/vt/vtgate/simplifier/simplifier_test.go +++ b/go/vt/vtgate/simplifier/simplifier_test.go @@ -136,7 +136,7 @@ func TestSimplifyEvalEngineExpr(t *testing.T) { } return toInt64 >= 8 }) - log.Errorf("simplest expr to evaluate to >= 8: [%s], started from: [%s]", sqlparser.String(expr), sqlparser.String(p0)) + log.Error(fmt.Sprintf("simplest expr to evaluate to >= 8: [%s], started from: [%s]", sqlparser.String(expr), sqlparser.String(p0))) } func plus(a, b sqlparser.Expr) sqlparser.Expr { diff --git a/go/vt/vtgate/tabletgateway.go b/go/vt/vtgate/tabletgateway.go index d2712884bd7..06153e5ef11 100644 --- a/go/vt/vtgate/tabletgateway.go +++ b/go/vt/vtgate/tabletgateway.go @@ -21,6 +21,7 @@ import ( "fmt" "math/rand/v2" "net/http" + "os" "runtime/debug" "slices" "sort" @@ -117,7 +118,8 @@ type TabletGateway struct { func createHealthCheck(ctx context.Context, retryDelay, timeout time.Duration, ts *topo.Server, cell, cellsToWatch string) discovery.HealthCheck { filters, err := discovery.NewVTGateHealthCheckFilters() if err != nil { - log.Exit(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } return discovery.NewHealthCheck(ctx, retryDelay, timeout, ts, cell, cellsToWatch, filters) } @@ -131,7 +133,8 @@ func NewTabletGateway(ctx context.Context, hc discovery.HealthCheck, serv srvtop var err error topoServer, err = serv.GetTopoServer() if err != nil { - log.Exitf("Unable to create new TabletGateway: %v", err) + log.Error(fmt.Sprintf("Unable to create new TabletGateway: %v", err)) + os.Exit(1) } } hc = createHealthCheck(ctx, healthCheckRetryDelay, healthCheckTimeout, topoServer, localCell, CellsToWatch) @@ -181,7 +184,8 @@ func (gw *TabletGateway) setupBuffering(ctx context.Context) { func (gw *TabletGateway) setupBalancer() { // Check for conflicting flags if balancerEnabled && balancerModeFlag != "" { - log.Exitf("Cannot use both --enable-balancer and --vtgate-balancer-mode flags. Please use --vtgate-balancer-mode only.") + log.Error("Cannot use both --enable-balancer and --vtgate-balancer-mode flags. Please use --vtgate-balancer-mode only.") + os.Exit(1) } // Determine the effective mode: new flag takes precedence, then deprecated flag, then default @@ -190,7 +194,7 @@ func (gw *TabletGateway) setupBalancer() { gw.balancerMode = balancer.ParseMode(balancerModeFlag) } else if balancerEnabled { // Deprecated flag for backwards compatibility - log.Warning("Flag --enable-balancer is deprecated. Please use --vtgate-balancer-mode=prefer-cell instead.") + log.Warn("Flag --enable-balancer is deprecated. Please use --vtgate-balancer-mode=prefer-cell instead.") gw.balancerMode = balancer.ModePreferCell } else { // Default: no flags set @@ -205,17 +209,19 @@ func (gw *TabletGateway) setupBalancer() { // Validate mode-specific requirements if gw.balancerMode == balancer.ModePreferCell && len(balancerVtgateCells) == 0 { - log.Exitf("--balancer-vtgate-cells is required when using --vtgate-balancer-mode=prefer-cell") + log.Error("--balancer-vtgate-cells is required when using --vtgate-balancer-mode=prefer-cell") + os.Exit(1) } // Create the balancer for prefer-cell or random modes var err error gw.balancer, err = balancer.NewTabletBalancer(gw.balancerMode, gw.localCell, balancerVtgateCells) if err != nil { - log.Exitf("Failed to create tablet balancer: %v", err) + log.Error(fmt.Sprintf("Failed to create tablet balancer: %v", err)) + os.Exit(1) } - log.Infof("Tablet balancer enabled with mode: %s", gw.balancerMode) + log.Info(fmt.Sprintf("Tablet balancer enabled with mode: %s", gw.balancerMode)) } // QueryServiceByAlias satisfies the Gateway interface @@ -240,7 +246,7 @@ func (gw *TabletGateway) RegisterStats() { // WaitForTablets is part of the Gateway interface. func (gw *TabletGateway) WaitForTablets(ctx context.Context, tabletTypesToWait []topodatapb.TabletType) (err error) { - log.Infof("Gateway waiting for serving tablets of types %v ...", tabletTypesToWait) + log.Info(fmt.Sprintf("Gateway waiting for serving tablets of types %v ...", tabletTypesToWait)) ctx, cancel := context.WithTimeout(ctx, initialTabletTimeout) defer cancel() @@ -248,12 +254,12 @@ func (gw *TabletGateway) WaitForTablets(ctx context.Context, tabletTypesToWait [ switch err { case nil: // Log so we know everything is fine. - log.Infof("Waiting for tablets completed") + log.Info("Waiting for tablets completed") case context.DeadlineExceeded: // In this scenario, we were able to reach the // topology service, but some tablets may not be // ready. We just warn and keep going. - log.Warningf("Timeout waiting for all keyspaces / shards to have healthy tablets of types %v, may be in degraded mode", tabletTypesToWait) + log.Warn(fmt.Sprintf("Timeout waiting for all keyspaces / shards to have healthy tablets of types %v, may be in degraded mode", tabletTypesToWait)) err = nil } }() @@ -378,7 +384,7 @@ func (gw *TabletGateway) withRetry(ctx context.Context, target *querypb.Target, // replica queries, so it doesn't make any sense to check for resharding or reparenting in that case. if kev := gw.kev; kev != nil && target.TabletType == topodatapb.TabletType_PRIMARY { if kev.TargetIsBeingResharded(ctx, target) { - log.V(2).Infof("current keyspace is being resharded, retrying: %s: %s", target.Keyspace, debug.Stack()) + log.Debug(fmt.Sprintf("current keyspace is being resharded, retrying: %s: %s", target.Keyspace, debug.Stack())) err = vterrors.Errorf(vtrpcpb.Code_CLUSTER_EVENT, buffer.ClusterEventReshardingInProgress) continue } diff --git a/go/vt/vtgate/tx_conn.go b/go/vt/vtgate/tx_conn.go index bb6d7f3eed3..3b6c9ab5b11 100644 --- a/go/vt/vtgate/tx_conn.go +++ b/go/vt/vtgate/tx_conn.go @@ -381,7 +381,7 @@ func (txc *TxConn) errActionAndLogWarn( commitUnresolved.Add(1) } if rollbackErr != nil { - log.Warningf("Rollback failed after %s failure: %v", phaseMessage[txPhase], rollbackErr) + log.Warn(fmt.Sprintf("Rollback failed after %s failure: %v", phaseMessage[txPhase], rollbackErr)) commitUnresolved.Add(1) } @@ -531,11 +531,11 @@ func (txc *TxConn) ResolveTransactions(ctx context.Context, target *querypb.Targ failedResolution := 0 for _, txRecord := range transactions { - log.Infof("Resolving transaction ID: %s", txRecord.Dtid) + log.Info("Resolving transaction ID: " + txRecord.Dtid) err = txc.resolveTx(ctx, target, txRecord) if err != nil { failedResolution++ - log.Errorf("Failed to resolve transaction ID: %s with error: %v", txRecord.Dtid, err) + log.Error(fmt.Sprintf("Failed to resolve transaction ID: %s with error: %v", txRecord.Dtid, err)) } } if failedResolution == 0 { diff --git a/go/vt/vtgate/txresolver/tx_resolver.go b/go/vt/vtgate/txresolver/tx_resolver.go index 3c96c7bc836..83e9320d2cc 100644 --- a/go/vt/vtgate/txresolver/tx_resolver.go +++ b/go/vt/vtgate/txresolver/tx_resolver.go @@ -18,6 +18,7 @@ package txresolver import ( "context" + "fmt" "sync" "vitess.io/vitess/go/vt/discovery" @@ -78,7 +79,7 @@ func (tr *TxResolver) resolveTransactions(ctx context.Context, target *querypb.T if !tr.tryLockTarget(dest) { return } - log.Infof("resolving transactions for shard: %s", dest) + log.Info("resolving transactions for shard: " + dest) defer func() { tr.mu.Lock() @@ -87,10 +88,10 @@ func (tr *TxResolver) resolveTransactions(ctx context.Context, target *querypb.T }() err := tr.txConn.ResolveTransactions(ctx, target) if err != nil { - log.Errorf("failed to resolve transactions for shard: %s, %v", dest, err) + log.Error(fmt.Sprintf("failed to resolve transactions for shard: %s, %v", dest, err)) return } - log.Infof("successfully resolved all the transactions for shard: %s", dest) + log.Info("successfully resolved all the transactions for shard: " + dest) } func (tr *TxResolver) tryLockTarget(dest string) bool { diff --git a/go/vt/vtgate/vindexes/region_json.go b/go/vt/vtgate/vindexes/region_json.go index 1fa91f38411..bde66d7c90a 100644 --- a/go/vt/vtgate/vindexes/region_json.go +++ b/go/vt/vtgate/vindexes/region_json.go @@ -74,7 +74,7 @@ func newRegionJSON(name string, m map[string]string) (Vindex, error) { if err != nil { return nil, err } - log.Infof("Loaded Region map from: %s", rmPath) + log.Info("Loaded Region map from: " + rmPath) err = json.Unmarshal(data, &rmap) if err != nil { return nil, err diff --git a/go/vt/vtgate/vschema_manager.go b/go/vt/vtgate/vschema_manager.go index 3e0e81aa476..0463fbb734d 100644 --- a/go/vt/vtgate/vschema_manager.go +++ b/go/vt/vtgate/vschema_manager.go @@ -19,6 +19,7 @@ package vtgate import ( "context" "errors" + "fmt" "sync" "vitess.io/vitess/go/vt/graph" @@ -99,7 +100,7 @@ func (vm *VSchemaManager) UpdateVSchema(ctx context.Context, ks *topo.KeyspaceVS cellErr := topoServer.UpdateSrvVSchema(ctx, cell, srv) if cellErr != nil { err = cellErr - log.Errorf("error updating vschema in cell %s: %v", cell, cellErr) + log.Error(fmt.Sprintf("error updating vschema in cell %s: %v", cell, cellErr)) } } if err != nil { @@ -114,7 +115,7 @@ func (vm *VSchemaManager) UpdateVSchema(ctx context.Context, ks *topo.KeyspaceVS // VSchemaUpdate builds the VSchema from SrvVschema and call subscribers. func (vm *VSchemaManager) VSchemaUpdate(v *vschemapb.SrvVSchema, err error) bool { - log.Infof("Received vschema update") + log.Info("Received vschema update") switch { case err == nil: // Good case, we can try to save that value. @@ -123,7 +124,7 @@ func (vm *VSchemaManager) VSchemaUpdate(v *vschemapb.SrvVSchema, err error) bool // Otherwise, keep what we already had before. v = nil default: - log.Errorf("SrvVschema watch error: %v", err) + log.Error(fmt.Sprintf("SrvVschema watch error: %v", err)) // Watch error, increment our counters. if vschemaCounters != nil { vschemaCounters.Add("WatchError", 1) @@ -176,9 +177,9 @@ func (vm *VSchemaManager) Rebuild() { v := vm.currentSrvVschema vm.mu.Unlock() - log.Infof("Received schema update") + log.Info("Received schema update") if v == nil { - log.Infof("No vschema to enhance") + log.Info("No vschema to enhance") return } @@ -189,7 +190,7 @@ func (vm *VSchemaManager) Rebuild() { if vm.subscriber != nil { vm.subscriber(vschema, vSchemaStats(nil, vschema)) - log.Infof("Sent vschema to subscriber") + log.Info("Sent vschema to subscriber") } } @@ -248,7 +249,7 @@ func (vm *VSchemaManager) updateTableInfo(vschema *vindexes.VSchema, ks *vindexe // We should only add foreign key table info to the routed tables only where the DML operations will be routed. rTbl, _ := vschema.FindRoutedTable(ksName, tblName, topodatapb.TabletType_PRIMARY) if rTbl == nil { - log.Warningf("unable to find routed table %s in %s", tblName, ksName) + log.Warn(fmt.Sprintf("unable to find routed table %s in %s", tblName, ksName)) continue } @@ -257,7 +258,7 @@ func (vm *VSchemaManager) updateTableInfo(vschema *vindexes.VSchema, ks *vindexe // otherwise, even in routing rules, table names are expected to be the same. // Ideally they should be in different keyspaces. if rTbl.Keyspace.Name != ksName || rTbl.Name.String() != tblName { - log.Warningf("table '%s' in keyspace '%s' routed to table '%s'", tblName, ksName, rTbl.String()) + log.Warn(fmt.Sprintf("table '%s' in keyspace '%s' routed to table '%s'", tblName, ksName, rTbl.String())) continue } @@ -268,7 +269,7 @@ func (vm *VSchemaManager) updateTableInfo(vschema *vindexes.VSchema, ks *vindexe } parentTbl, err := vschema.FindRoutedTable(ksName, fkDef.ReferenceDefinition.ReferencedTable.Name.String(), topodatapb.TabletType_PRIMARY) if err != nil || parentTbl == nil { - log.Errorf("error finding parent table %s: %v", fkDef.ReferenceDefinition.ReferencedTable.Name.String(), err) + log.Error(fmt.Sprintf("error finding parent table %s: %v", fkDef.ReferenceDefinition.ReferencedTable.Name.String(), err)) continue } rTbl.ParentForeignKeys = append(rTbl.ParentForeignKeys, vindexes.NewParentFkInfo(parentTbl, fkDef)) diff --git a/go/vt/vtgate/vstream_manager.go b/go/vt/vtgate/vstream_manager.go index 058139b8858..225efa35606 100644 --- a/go/vt/vtgate/vstream_manager.go +++ b/go/vt/vtgate/vstream_manager.go @@ -208,20 +208,19 @@ func (vsm *vstreamManager) VStream(ctx context.Context, tabletType topodatapb.Ta if err != nil { return vterrors.Wrap(err, "failed to resolve vstream parameters") } - log.Infof("VStream flags: minimize_skew=%v, heartbeat_interval=%v, stop_on_reshard=%v, cells=%v, cell_preference=%v, tablet_order=%v, stream_keyspace_heartbeats=%v, include_reshard_journal_events=%v, tables_to_copy=%v, exclude_keyspace_from_table_name=%v, transaction_chunk_size=%v", - flags.GetMinimizeSkew(), flags.GetHeartbeatInterval(), flags.GetStopOnReshard(), flags.Cells, flags.CellPreference, flags.TabletOrder, - flags.GetStreamKeyspaceHeartbeats(), flags.GetIncludeReshardJournalEvents(), flags.TablesToCopy, flags.GetExcludeKeyspaceFromTableName(), flags.TransactionChunkSize) + log.Info(fmt.Sprintf("VStream flags: minimize_skew=%v, heartbeat_interval=%v, stop_on_reshard=%v, cells=%v, cell_preference=%v, tablet_order=%v, stream_keyspace_heartbeats=%v, include_reshard_journal_events=%v, tables_to_copy=%v, exclude_keyspace_from_table_name=%v, transaction_chunk_size=%v", flags.GetMinimizeSkew(), flags.GetHeartbeatInterval(), flags.GetStopOnReshard(), flags.Cells, flags.CellPreference, flags.TabletOrder, + flags.GetStreamKeyspaceHeartbeats(), flags.GetIncludeReshardJournalEvents(), flags.TablesToCopy, flags.GetExcludeKeyspaceFromTableName(), flags.TransactionChunkSize)) ts, err := vsm.toposerv.GetTopoServer() if err != nil { return vterrors.Wrap(err, "failed to get topology server") } if ts == nil { - log.Errorf("unable to get topo server in VStream()") + log.Error("unable to get topo server in VStream()") return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unable to get topoology server") } transactionChunkSizeBytes := defaultTransactionChunkSizeBytes if flags.TransactionChunkSize > 0 && flags.GetMinimizeSkew() { - log.Warning("Minimize skew cannot be set with transaction chunk size (can cause deadlock), ignoring transaction chunk size.") + log.Warn("Minimize skew cannot be set with transaction chunk size (can cause deadlock), ignoring transaction chunk size.") } else if flags.TransactionChunkSize > 0 { transactionChunkSizeBytes = int(flags.TransactionChunkSize) } @@ -399,7 +398,7 @@ func (vs *vstream) sendEvents(ctx context.Context) { send := func(evs []*binlogdatapb.VEvent) error { if err := vs.send(evs); err != nil { - log.Infof("Error in vstream send (wrapper) to client: %v", err) + log.Info(fmt.Sprintf("Error in vstream send (wrapper) to client: %v", err)) vs.once.Do(func() { vs.setError(err, "error sending events") }) @@ -411,14 +410,14 @@ func (vs *vstream) sendEvents(ctx context.Context) { for { select { case <-ctx.Done(): - log.Infof("vstream context canceled") + log.Info("vstream context canceled") vs.once.Do(func() { vs.setError(ctx.Err(), "context ended while sending events") }) return case evs := <-vs.eventCh: if err := send(evs); err != nil { - log.Infof("Error in vstream send events to client: %v", err) + log.Info(fmt.Sprintf("Error in vstream send events to client: %v", err)) vs.once.Do(func() { vs.setError(err, "error sending events") }) @@ -433,7 +432,7 @@ func (vs *vstream) sendEvents(ctx context.Context) { CurrentTime: now, }} if err := send(evs); err != nil { - log.Infof("Error in vstream sending heartbeat to client: %v", err) + log.Info(fmt.Sprintf("Error in vstream sending heartbeat to client: %v", err)) vs.once.Do(func() { vs.setError(err, "error sending heartbeat") }) @@ -441,7 +440,7 @@ func (vs *vstream) sendEvents(ctx context.Context) { } case <-vs.streamLivenessTimer.C: msg := fmt.Sprintf("vstream failed liveness checks as there was no activity, including heartbeats, within the last %v", livenessTimeout) - log.Infof("Error in vstream: %s", msg) + log.Info("Error in vstream: " + msg) vs.once.Do(func() { vs.setError(vterrors.New(vtrpcpb.Code_UNAVAILABLE, msg), "vstream is fully throttled or otherwise hung") }) @@ -462,7 +461,7 @@ func (vs *vstream) startOneStream(ctx context.Context, sgtid *binlogdatapb.Shard err := vs.streamFromTablet(ctx, sgtid) // Set the error on exit. First one wins. if err != nil { - log.Errorf("Error in vstream for %+v: %v", sgtid, err) + log.Error(fmt.Sprintf("Error in vstream for %+v: %v", sgtid, err)) // Get the original/base error. uerr := vterrors.UnwrapAll(err) if !errors.Is(uerr, context.Canceled) && !errors.Is(uerr, context.DeadlineExceeded) { @@ -514,7 +513,7 @@ func (vs *vstream) computeSkew(streamID string, event *binlogdatapb.VEvent) bool } } else { if (maxTs - minTs) > MaxSkew { // check if we are skewed due to this event - log.Infof("Skew found, laggard is %s, %+v", laggardStream, vs.timestamps) + log.Info(fmt.Sprintf("Skew found, laggard is %s, %+v", laggardStream, vs.timestamps)) vs.laggard = laggardStream vs.skewCh = make(chan bool) } @@ -561,7 +560,7 @@ func (vs *vstream) alignStreams(ctx context.Context, event *binlogdatapb.VEvent, return vterrors.Wrapf(ctx.Err(), "context ended while waiting for skew to reduce for stream %s from %s/%s", streamID, keyspace, shard) case <-time.After(time.Duration(vs.skewTimeoutSeconds) * time.Second): - log.Errorf("timed out while waiting for skew to reduce: %s", streamID) + log.Error("timed out while waiting for skew to reduce: " + streamID) return vterrors.Errorf(vtrpcpb.Code_CANCELED, "timed out while waiting for skew to reduce for stream %s from %s/%s", streamID, keyspace, shard) case <-vs.skewCh: @@ -626,7 +625,7 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha tabletPickerErr := func(err error) error { tperr := vterrors.Wrapf(err, "failed to find a %s tablet for VStream in %s/%s within the %s cell(s)", vs.tabletType.String(), sgtid.GetKeyspace(), sgtid.GetShard(), strings.Join(cells, ",")) - log.Errorf("%v", tperr) + log.Error(fmt.Sprintf("%v", tperr)) return tperr } tp, err := discovery.NewTabletPicker(ctx, vs.ts, cells, vs.vsm.cell, sgtid.GetKeyspace(), sgtid.GetShard(), vs.tabletType.String(), tpo, ignoreTablets...) @@ -642,8 +641,7 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha return tabletPickerErr(err) } tabletAliasString := topoproto.TabletAliasString(tablet.Alias) - log.Infof("Picked %s tablet %s for VStream in %s/%s within the %s cell(s)", - vs.tabletType.String(), tabletAliasString, sgtid.GetKeyspace(), sgtid.GetShard(), strings.Join(cells, ",")) + log.Info(fmt.Sprintf("Picked %s tablet %s for VStream in %s/%s within the %s cell(s)", vs.tabletType.String(), tabletAliasString, sgtid.GetKeyspace(), sgtid.GetShard(), strings.Join(cells, ","))) target := &querypb.Target{ Keyspace: sgtid.Keyspace, @@ -653,7 +651,7 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha } tabletConn, err := vs.vsm.resolver.GetGateway().QueryServiceByAlias(ctx, tablet.Alias, target) if err != nil { - log.Errorf("%s", err.Error()) + log.Error(err.Error()) return vterrors.Wrapf(err, "failed to get tablet connection to %s", tabletAliasString) } @@ -677,7 +675,7 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha topoproto.TabletAliasString(tablet.Alias), shr.RealtimeStats.ReplicationLagSeconds, discovery.GetLowReplicationLag()) } if err != nil { - log.Warningf("Tablet state changed: %s, attempting to restart", err) + log.Warn(fmt.Sprintf("Tablet state changed: %s, attempting to restart", err)) err = vterrors.Wrapf(err, "error streaming tablet health from %s", tabletAliasString) errCh <- err return err @@ -710,7 +708,7 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha TableLastPKs: sgtid.TablePKs, Options: options, } - log.Infof("Starting to vstream from %s, with req %+v", tabletAliasString, req) + log.Info(fmt.Sprintf("Starting to vstream from %s, with req %+v", tabletAliasString, req)) var txLockHeld bool var inTransaction bool var accumulatedSize int @@ -731,7 +729,7 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha return vterrors.Wrapf(ctx.Err(), "context ended while streaming from tablet %s in %s/%s", tabletAliasString, sgtid.Keyspace, sgtid.Shard) case streamErr := <-errCh: - log.Infof("vstream for %s/%s ended due to health check, should retry: %v", sgtid.Keyspace, sgtid.Shard, streamErr) + log.Info(fmt.Sprintf("vstream for %s/%s ended due to health check, should retry: %v", sgtid.Keyspace, sgtid.Shard, streamErr)) // You must return Code_UNAVAILABLE here to trigger a restart. return vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "error streaming from tablet %s in %s/%s: %s", tabletAliasString, sgtid.Keyspace, sgtid.Shard, streamErr.Error()) @@ -739,7 +737,7 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha // Unreachable. // This can happen if a server misbehaves and does not end // the stream after we return an error. - log.Infof("vstream for %s/%s ended due to journal event, returning io.EOF", sgtid.Keyspace, sgtid.Shard) + log.Info(fmt.Sprintf("vstream for %s/%s ended due to journal event, returning io.EOF", sgtid.Keyspace, sgtid.Shard)) return io.EOF default: } @@ -785,7 +783,7 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha sendErr = vs.sendAll(ctx, sgtid, eventss) } if sendErr != nil { - log.Infof("vstream for %s/%s, error in sendAll: %v", sgtid.Keyspace, sgtid.Shard, sendErr) + log.Info(fmt.Sprintf("vstream for %s/%s, error in sendAll: %v", sgtid.Keyspace, sgtid.Shard, sendErr)) return vterrors.Wrap(sendErr, sendingEventsErr) } eventss = nil @@ -802,7 +800,7 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha } if err := vs.sendAll(ctx, sgtid, eventss); err != nil { - log.Infof("vstream for %s/%s, error in sendAll, on copy completed event: %v", sgtid.Keyspace, sgtid.Shard, err) + log.Info(fmt.Sprintf("vstream for %s/%s, error in sendAll, on copy completed event: %v", sgtid.Keyspace, sgtid.Shard, err)) return vterrors.Wrap(err, sendingEventsErr) } eventss = nil @@ -833,7 +831,7 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha } eventss = append(eventss, sendevents) if err := vs.sendAll(ctx, sgtid, eventss); err != nil { - log.Infof("vstream for %s/%s, error in sendAll, on journal event: %v", sgtid.Keyspace, sgtid.Shard, err) + log.Info(fmt.Sprintf("vstream for %s/%s, error in sendAll, on journal event: %v", sgtid.Keyspace, sgtid.Shard, err)) return vterrors.Wrap(err, sendingEventsErr) } eventss = nil @@ -868,7 +866,7 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha if endTimer != nil { <-endTimer.C } - log.Infof("vstream for %s/%s ended due to journal event, returning io.EOF", sgtid.Keyspace, sgtid.Shard) + log.Info(fmt.Sprintf("vstream for %s/%s ended due to journal event, returning io.EOF", sgtid.Keyspace, sgtid.Shard)) return io.EOF } } @@ -885,7 +883,7 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha // If chunking is enabled, and we are holding the lock (only possible when enabled), and we are not in a transaction // release the lock (this should not ever execute, acts as a safety check). if vs.isChunkingEnabled() && txLockHeld && !inTransaction { - log.Warning("Detected held lock but not in a transaction, releasing the lock") + log.Warn("Detected held lock but not in a transaction, releasing the lock") vs.mu.Unlock() txLockHeld = false } @@ -893,7 +891,7 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha // If chunking is enabled, and we are holding the lock (only possible when chunking is enabled), send the events. if vs.isChunkingEnabled() && txLockHeld && len(eventss) > 0 { if err := vs.sendEventsLocked(ctx, sgtid, eventss); err != nil { - log.Infof("vstream for %s/%s, error in sendAll at end of callback: %v", sgtid.Keyspace, sgtid.Shard, err) + log.Info(fmt.Sprintf("vstream for %s/%s, error in sendAll at end of callback: %v", sgtid.Keyspace, sgtid.Shard, err)) return vterrors.Wrap(err, sendingEventsErr) } eventss = nil @@ -902,14 +900,13 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha // If chunking is enabled and we are in a transaction, and we do not yet hold the lock, and the accumulated size is greater than our chunk size // then acquire the lock, so that we can send the events, and begin chunking the transaction. if vs.isChunkingEnabled() && inTransaction && !txLockHeld && accumulatedSize > vs.transactionChunkSizeBytes { - log.Infof("vstream for %s/%s: transaction size %d bytes exceeds chunk size %d bytes, acquiring lock for contiguous, chunked delivery", - sgtid.Keyspace, sgtid.Shard, accumulatedSize, vs.transactionChunkSizeBytes) + log.Info(fmt.Sprintf("vstream for %s/%s: transaction size %d bytes exceeds chunk size %d bytes, acquiring lock for contiguous, chunked delivery", sgtid.Keyspace, sgtid.Shard, accumulatedSize, vs.transactionChunkSizeBytes)) vs.vsm.vstreamsTransactionsChunked.Add(labelValues, 1) vs.mu.Lock() txLockHeld = true if len(eventss) > 0 { if err := vs.sendEventsLocked(ctx, sgtid, eventss); err != nil { - log.Infof("vstream for %s/%s, error sending events after acquiring lock: %v", sgtid.Keyspace, sgtid.Shard, err) + log.Info(fmt.Sprintf("vstream for %s/%s, error sending events after acquiring lock: %v", sgtid.Keyspace, sgtid.Shard, err)) return vterrors.Wrap(err, sendingEventsErr) } eventss = nil @@ -932,7 +929,7 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha retry, ignoreTablet := vs.shouldRetry(err) if !retry { - log.Infof("vstream for %s/%s error, no retry: %v", sgtid.Keyspace, sgtid.Shard, err) + log.Info(fmt.Sprintf("vstream for %s/%s error, no retry: %v", sgtid.Keyspace, sgtid.Shard, err)) return vterrors.Wrapf(err, "error in vstream for %s/%s on tablet %s", sgtid.Keyspace, sgtid.Shard, tabletAliasString) } @@ -943,11 +940,11 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha errCount++ // Retry, at most, 3 times if the error can be retried. if errCount >= 3 { - log.Errorf("vstream for %s/%s had three consecutive failures: %v", sgtid.Keyspace, sgtid.Shard, err) + log.Error(fmt.Sprintf("vstream for %s/%s had three consecutive failures: %v", sgtid.Keyspace, sgtid.Shard, err)) return vterrors.Wrapf(err, "persistent error in vstream for %s/%s on tablet %s; giving up", sgtid.Keyspace, sgtid.Shard, tabletAliasString) } - log.Infof("vstream for %s/%s error, retrying: %v", sgtid.Keyspace, sgtid.Shard, err) + log.Info(fmt.Sprintf("vstream for %s/%s error, retrying: %v", sgtid.Keyspace, sgtid.Shard, err)) } } @@ -1149,7 +1146,7 @@ func (vs *vstream) getJournalEvent(ctx context.Context, sgtid *binlogdatapb.Shar je, ok := vs.journaler[journal.Id] if !ok { - log.Infof("Journal event received: %v", journal) + log.Info(fmt.Sprintf("Journal event received: %v", journal)) // Identify the list of ShardGtids that match the participants of the journal. je = &journalEvent{ journal: journal, @@ -1211,7 +1208,7 @@ func (vs *vstream) getJournalEvent(ctx context.Context, sgtid *binlogdatapb.Shar if !vs.stopOnReshard { // stop streaming from current shards and start streaming the new shards // All participants are waiting. Replace old shard gtids with new ones. newsgtids := make([]*binlogdatapb.ShardGtid, 0, len(vs.vgtid.ShardGtids)-len(je.participants)+len(je.journal.ShardGtids)) - log.Infof("Removing shard gtids: %v", je.participants) + log.Info(fmt.Sprintf("Removing shard gtids: %v", je.participants)) for _, cursgtid := range vs.vgtid.ShardGtids { if je.participants[cursgtid] { continue @@ -1219,7 +1216,7 @@ func (vs *vstream) getJournalEvent(ctx context.Context, sgtid *binlogdatapb.Shar newsgtids = append(newsgtids, cursgtid) } - log.Infof("Adding shard gtids: %v", je.journal.ShardGtids) + log.Info(fmt.Sprintf("Adding shard gtids: %v", je.journal.ShardGtids)) for _, sgtid := range je.journal.ShardGtids { newsgtids = append(newsgtids, sgtid) // It's ok to start the streams even though ShardGtids are not updated yet. diff --git a/go/vt/vtgate/vstream_manager_test.go b/go/vt/vtgate/vstream_manager_test.go index 2ca02113207..1dc18267a0c 100644 --- a/go/vt/vtgate/vstream_manager_test.go +++ b/go/vt/vtgate/vstream_manager_test.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "runtime/pprof" "strings" @@ -35,7 +36,6 @@ import ( "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" @@ -2256,8 +2256,9 @@ func TestVStreamManagerHealthCheckResponseHandling(t *testing.T) { // Capture the vstream warning log. Otherwise we need to re-implement the vstream error // handling in SandboxConn's implementation and then we're not actually testing the // production code. - logger := logutil.NewMemoryLogger() - log.Warningf = logger.Warningf + handler := log.NewCaptureHandler() + restoreLogger := log.SetLogger(slog.New(handler)) + defer restoreLogger() cell := "aa" ks := "TestVStream" @@ -2358,14 +2359,22 @@ func TestVStreamManagerHealthCheckResponseHandling(t *testing.T) { if tc.wantErr != "" { require.Error(t, err) - require.Contains(t, logger.String(), tc.wantErr) + records := handler.Records() + found := false + for _, record := range records { + if strings.Contains(record.Message, tc.wantErr) { + found = true + break + } + } + require.True(t, found, "expected warning log containing %q", tc.wantErr) } else { // Otherwise we simply expect the context to timeout require.Error(t, err) require.ErrorIs(t, vterrors.UnwrapAll(err), context.DeadlineExceeded) } - logger.Clear() + handler.Reset() }) } } diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index 3a3179588a9..353181e0eb1 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -297,7 +297,8 @@ func Init( ) *VTGate { ts, err := serv.GetTopoServer() if err != nil { - log.Fatalf("Unable to get Topo server: %v", err) + log.Error(fmt.Sprintf("Unable to get Topo server: %v", err)) + os.Exit(1) } // We need to get the keyspaces and rebuild the keyspace graphs @@ -309,12 +310,14 @@ func Init( } else { keyspaces, err = ts.GetSrvKeyspaceNames(ctx, cell) if err != nil { - log.Fatalf("Unable to get all keyspaces: %v", err) + log.Error(fmt.Sprintf("Unable to get all keyspaces: %v", err)) + os.Exit(1) } } // executor sets a watch on SrvVSchema, so let's rebuild these before creating it if err := rebuildTopoGraphs(ctx, ts, cell, keyspaces); err != nil { - log.Fatalf("rebuildTopoGraphs failed: %v", err) + log.Error(fmt.Sprintf("rebuildTopoGraphs failed: %v", err)) + os.Exit(1) } // Build objects from low to high level. // Start with the gateway. If we can't reach the topology service, @@ -323,7 +326,8 @@ func Init( gw := NewTabletGateway(ctx, hc, serv, cell) gw.RegisterStats() if err := gw.WaitForTablets(ctx, tabletTypesToWait); err != nil { - log.Fatalf("tabletGateway.WaitForTablets failed: %v", err) + log.Error(fmt.Sprintf("tabletGateway.WaitForTablets failed: %v", err)) + os.Exit(1) } dynamicConfig := NewDynamicViperConfig() @@ -331,16 +335,18 @@ func Init( // If we want to filter keyspaces replace the srvtopo.Server with a // filtering server if discovery.FilteringKeyspaces() { - log.Infof("Keyspace filtering enabled, selecting %v", discovery.KeyspacesToWatch) + log.Info(fmt.Sprintf("Keyspace filtering enabled, selecting %v", discovery.KeyspacesToWatch)) var err error serv, err = srvtopo.NewKeyspaceFilteringServer(serv, discovery.KeyspacesToWatch) if err != nil { - log.Fatalf("Unable to construct SrvTopo server: %v", err.Error()) + log.Error(fmt.Sprintf("Unable to construct SrvTopo server: %v", err.Error())) + os.Exit(1) } } if _, err := schema.ParseDDLStrategy(defaultDDLStrategy); err != nil { - log.Fatalf("Invalid value for -ddl-strategy: %v", err.Error()) + log.Error(fmt.Sprintf("Invalid value for -ddl-strategy: %v", err.Error())) + os.Exit(1) } tc := NewTxConn(gw, dynamicConfig) // ScatterConn depends on TxConn to perform forced rollbacks. @@ -362,7 +368,8 @@ func Init( }) // This should never happen. if !created { - log.Fatal("Failed to create a new sidecar database identifier cache during init as one already existed!") + log.Error("Failed to create a new sidecar database identifier cache during init as one already existed!") + os.Exit(1) } var si SchemaInfo // default nil @@ -386,7 +393,8 @@ func Init( executor := NewExecutor(ctx, env, serv, cell, resolver, eConfig, warnShardedOnly, plans, si, pv, dynamicConfig) if err := executor.defaultQueryLogger(); err != nil { - log.Fatalf("error initializing query logger: %v", err) + log.Error(fmt.Sprintf("error initializing query logger: %v", err)) + os.Exit(1) } // connect the schema tracker with the vschema manager @@ -438,7 +446,7 @@ func rebuildTopoGraphs(ctx context.Context, topoServer *topo.Server, cell string switch { case err == nil: case topo.IsErrType(err, topo.NoNode): - log.Infof("Rebuilding Serving Keyspace %v", ks) + log.Info(fmt.Sprintf("Rebuilding Serving Keyspace %v", ks)) if err := topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), topoServer, ks, []string{cell}, false); err != nil { return vterrors.Wrap(err, "vtgate Init: failed to RebuildKeyspace") } @@ -452,7 +460,7 @@ func rebuildTopoGraphs(ctx context.Context, topoServer *topo.Server, cell string case err == nil: for _, ks := range keyspaces { if _, exists := srvVSchema.GetKeyspaces()[ks]; !exists { - log.Infof("Rebuilding Serving Vschema") + log.Info("Rebuilding Serving Vschema") if err := topoServer.RebuildSrvVSchema(ctx, []string{cell}); err != nil { return vterrors.Wrap(err, "vtgate Init: failed to RebuildSrvVSchema") } @@ -461,7 +469,7 @@ func rebuildTopoGraphs(ctx context.Context, topoServer *topo.Server, cell string } } case topo.IsErrType(err, topo.NoNode): - log.Infof("Rebuilding Serving Vschema") + log.Info("Rebuilding Serving Vschema") // There is no SrvSchema in this cell at all, so we definitely need to rebuild. if err := topoServer.RebuildSrvVSchema(ctx, []string{cell}); err != nil { return vterrors.Wrap(err, "vtgate Init: failed to RebuildSrvVSchema") @@ -475,11 +483,11 @@ func rebuildTopoGraphs(ctx context.Context, topoServer *topo.Server, cell string func addKeyspacesToTracker(ctx context.Context, srvResolver *srvtopo.Resolver, st *vtschema.Tracker, gw *TabletGateway) { keyspaces, err := srvResolver.GetAllKeyspaces(ctx) if err != nil { - log.Warningf("Unable to get all keyspaces: %v", err) + log.Warn(fmt.Sprintf("Unable to get all keyspaces: %v", err)) return } if len(keyspaces) == 0 { - log.Infof("No keyspace to load") + log.Info("No keyspace to load") } for _, keyspace := range keyspaces { resolveAndLoadKeyspace(ctx, srvResolver, st, gw, keyspace) @@ -489,7 +497,7 @@ func addKeyspacesToTracker(ctx context.Context, srvResolver *srvtopo.Resolver, s func resolveAndLoadKeyspace(ctx context.Context, srvResolver *srvtopo.Resolver, st *vtschema.Tracker, gw *TabletGateway, keyspace string) { dest, err := srvResolver.ResolveDestination(ctx, keyspace, topodatapb.TabletType_PRIMARY, key.DestinationAllShards{}) if err != nil { - log.Warningf("Unable to resolve destination: %v", err) + log.Warn(fmt.Sprintf("Unable to resolve destination: %v", err)) return } @@ -497,7 +505,7 @@ func resolveAndLoadKeyspace(ctx context.Context, srvResolver *srvtopo.Resolver, for { select { case <-timeout: - log.Warningf("Unable to get initial schema reload for keyspace: %s", keyspace) + log.Warn("Unable to get initial schema reload for keyspace: " + keyspace) return case <-time.After(500 * time.Millisecond): for _, shard := range dest { @@ -836,7 +844,7 @@ func formatError(err error) error { // HandlePanic recovers from panics, and logs / increment counters func (vtg *VTGate) HandlePanic(err *error) { if x := recover(); x != nil { - log.Errorf("Uncaught panic:\n%v\n%s", x, tb.Stack(4)) + log.Error(fmt.Sprintf("Uncaught panic:\n%v\n%s", x, tb.Stack(4))) *err = fmt.Errorf("uncaught panic: %v, vtgate: %v", x, servenv.ListeningURL.String()) errorCounts.Add([]string{"Panic", "Unknown", "Unknown", vtrpcpb.Code_INTERNAL.String()}, 1) } diff --git a/go/vt/vtgate/vtgateconn/vtgateconn.go b/go/vt/vtgate/vtgateconn/vtgateconn.go index 618fb268f58..4f15ecd22fd 100644 --- a/go/vt/vtgate/vtgateconn/vtgateconn.go +++ b/go/vt/vtgate/vtgateconn/vtgateconn.go @@ -229,7 +229,7 @@ func RegisterDialer(name string, dialer DialerFunc) { defer dialersM.Unlock() if _, ok := dialers[name]; ok { - log.Warningf("Dialer %s already exists, overwriting it", name) + log.Warn(fmt.Sprintf("Dialer %s already exists, overwriting it", name)) } dialers[name] = dialer } diff --git a/go/vt/vtorc/db/db.go b/go/vt/vtorc/db/db.go index 9262beee472..c163ac21df4 100644 --- a/go/vt/vtorc/db/db.go +++ b/go/vt/vtorc/db/db.go @@ -18,6 +18,8 @@ package db import ( "database/sql" + "fmt" + "os" "vitess.io/vitess/go/vt/external/golib/sqlutils" "vitess.io/vitess/go/vt/log" @@ -43,9 +45,10 @@ func OpenVTOrc() (db *sql.DB, err error) { var fromCache bool db, fromCache, err = sqlutils.GetSQLiteDB(config.GetSQLiteDataFile()) if err == nil && !fromCache { - log.Infof("Connected to vtorc backend: sqlite on %v", config.GetSQLiteDataFile()) + log.Info(fmt.Sprintf("Connected to vtorc backend: sqlite on %v", config.GetSQLiteDataFile())) if err := initVTOrcDB(db); err != nil { - log.Fatalf("Cannot initiate vtorc: %+v", err) + log.Error(fmt.Sprintf("Cannot initiate vtorc: %+v", err)) + os.Exit(1) } } if db != nil { @@ -65,7 +68,8 @@ func registerVTOrcDeployment(db *sql.DB) error { DATETIME('now') )` if _, err := execInternal(db, query, ""); err != nil { - log.Fatalf("Unable to write to vtorc_db_deployments: %+v", err) + log.Error(fmt.Sprintf("Unable to write to vtorc_db_deployments: %+v", err)) + os.Exit(1) } return nil } @@ -91,7 +95,8 @@ func ClearVTOrcDatabase() { db, _, _ := sqlutils.GetSQLiteDB(config.GetSQLiteDataFile()) if db != nil { if err := initVTOrcDB(db); err != nil { - log.Fatalf("Cannot re-initiate vtorc: %+v", err) + log.Error(fmt.Sprintf("Cannot re-initiate vtorc: %+v", err)) + os.Exit(1) } } } @@ -148,7 +153,7 @@ func QueryVTOrc(query string, argsArray []any, onRow func(sqlutils.RowMap) error } if err = sqlutils.QueryRowsMap(db, query, onRow, argsArray...); err != nil { - log.Warning(err.Error()) + log.Warn(err.Error()) } return err diff --git a/go/vt/vtorc/inst/analysis_dao.go b/go/vt/vtorc/inst/analysis_dao.go index 9b457bee8c2..9506d4b8e87 100644 --- a/go/vt/vtorc/inst/analysis_dao.go +++ b/go/vt/vtorc/inst/analysis_dao.go @@ -292,7 +292,7 @@ func GetDetectionAnalysis(keyspace string, shard string, hints *DetectionAnalysi tablet := &topodatapb.Tablet{} opts := prototext.UnmarshalOptions{DiscardUnknown: true} if err := opts.Unmarshal([]byte(m.GetString("tablet_info")), tablet); err != nil { - log.Errorf("could not read tablet %v: %v", m.GetString("tablet_info"), err) + log.Error(fmt.Sprintf("could not read tablet %v: %v", m.GetString("tablet_info"), err)) return nil } @@ -304,7 +304,7 @@ func GetDetectionAnalysis(keyspace string, shard string, hints *DetectionAnalysi primaryTablet := &topodatapb.Tablet{} if str := m.GetString("primary_tablet_info"); str != "" { if err := opts.Unmarshal([]byte(str), primaryTablet); err != nil { - log.Errorf("could not read tablet %v: %v", str, err) + log.Error(fmt.Sprintf("could not read tablet %v: %v", str, err)) return nil } } @@ -318,7 +318,7 @@ func GetDetectionAnalysis(keyspace string, shard string, hints *DetectionAnalysi a.PrimaryTimeStamp = m.GetTime("primary_timestamp") if keyspaceType := topodatapb.KeyspaceType(m.GetInt32("keyspace_type")); keyspaceType == topodatapb.KeyspaceType_SNAPSHOT { - log.Errorf("keyspace %v is a snapshot keyspace. Skipping.", a.AnalyzedKeyspace) + log.Error(fmt.Sprintf("keyspace %v is a snapshot keyspace. Skipping.", a.AnalyzedKeyspace)) return nil } @@ -377,7 +377,7 @@ func GetDetectionAnalysis(keyspace string, shard string, hints *DetectionAnalysi a.AnalyzedInstanceAlias, a.AnalyzedKeyspace, a.AnalyzedShard, a.IsPrimary, a.LastCheckValid, a.LastCheckPartialSuccess, a.CountReplicas, a.CountValidReplicas, a.CountValidReplicatingReplicas, a.CountLaggingReplicas, a.CountDelayedReplicas, ) if util.ClearToLog("analysis_dao", analysisMessage) { - log.Infof(analysisMessage) + log.Info(analysisMessage) } } keyspaceShard := getKeyspaceShardName(a.AnalyzedKeyspace, a.AnalyzedShard) @@ -390,12 +390,12 @@ func GetDetectionAnalysis(keyspace string, shard string, hints *DetectionAnalysi } durabilityPolicy := m.GetString("durability_policy") if durabilityPolicy == "" { - log.Errorf("ignoring keyspace %v because no durability_policy is set. Please set it using SetKeyspaceDurabilityPolicy", a.AnalyzedKeyspace) + log.Error(fmt.Sprintf("ignoring keyspace %v because no durability_policy is set. Please set it using SetKeyspaceDurabilityPolicy", a.AnalyzedKeyspace)) return nil } durability, err := policy.GetDurabilityPolicy(durabilityPolicy) if err != nil { - log.Errorf("can't get the durability policy %v - %v. Skipping keyspace - %v.", durabilityPolicy, err, a.AnalyzedKeyspace) + log.Error(fmt.Sprintf("can't get the durability policy %v - %v. Skipping keyspace - %v.", durabilityPolicy, err, a.AnalyzedKeyspace)) return nil } clusters[keyspaceShard].durability = durability @@ -614,7 +614,7 @@ func GetDetectionAnalysis(keyspace string, shard string, hints *DetectionAnalysi result = postProcessAnalyses(result, clusters) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } // TODO: result, err = getConcensusDetectionAnalysis(result) return result, err @@ -702,12 +702,12 @@ func auditInstanceAnalysisInChangelog(tabletAlias string, analysisCode AnalysisC string(analysisCode), tabletAlias, string(analysisCode), ) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } rows, err := sqlResult.RowsAffected() if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } lastAnalysisChanged = rows > 0 @@ -731,12 +731,12 @@ func auditInstanceAnalysisInChangelog(tabletAlias string, analysisCode AnalysisC tabletAlias, string(analysisCode), ) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } rows, err := sqlResult.RowsAffected() if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } firstInsertion = rows > 0 @@ -762,7 +762,7 @@ func auditInstanceAnalysisInChangelog(tabletAlias string, analysisCode AnalysisC if err == nil { analysisChangeWriteCounter.Add(1) } else { - log.Error(err) + log.Error(fmt.Sprint(err)) } return err } @@ -777,7 +777,7 @@ func ExpireInstanceAnalysisChangelog() error { config.UnseenInstanceForgetHours, ) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } return err } diff --git a/go/vt/vtorc/inst/audit_dao.go b/go/vt/vtorc/inst/audit_dao.go index 0d589613f8c..96225b3021a 100644 --- a/go/vt/vtorc/inst/audit_dao.go +++ b/go/vt/vtorc/inst/audit_dao.go @@ -43,14 +43,14 @@ func AuditOperation(auditType string, tabletAlias string, message string) error go func() { f, err := os.OpenFile(config.GetAuditFileLocation(), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0o640) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return } defer f.Close() text := fmt.Sprintf("%s\t%s\t%s\t[%s:%s]\t%s\t\n", time.Now().Format("2006-01-02 15:04:05"), auditType, tabletAlias, keyspace, shard, message) if _, err = f.WriteString(text); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } }() } @@ -78,7 +78,7 @@ func AuditOperation(auditType string, tabletAlias string, message string) error message, ) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } } @@ -87,7 +87,7 @@ func AuditOperation(auditType string, tabletAlias string, message string) error auditWrittenToFile = true } if !auditWrittenToFile { - log.Infof(logMessage) + log.Info(logMessage) } auditOperationCounter.Add(1) diff --git a/go/vt/vtorc/inst/instance_dao.go b/go/vt/vtorc/inst/instance_dao.go index d6fee1f4730..967f2fa75e4 100644 --- a/go/vt/vtorc/inst/instance_dao.go +++ b/go/vt/vtorc/inst/instance_dao.go @@ -303,7 +303,7 @@ func ReadTopologyInstanceBufferable(tabletAlias string, latency *stopwatch.Named instance.SecondsBehindPrimary.Int64 = int64(fs.ReplicationStatus.ReplicationLagSeconds) } if instance.SecondsBehindPrimary.Valid && instance.SecondsBehindPrimary.Int64 < 0 { - log.Warningf("Alias: %+v, instance.SecondsBehindPrimary < 0 [%+v], correcting to 0", tabletAlias, instance.SecondsBehindPrimary.Int64) + log.Warn(fmt.Sprintf("Alias: %+v, instance.SecondsBehindPrimary < 0 [%+v], correcting to 0", tabletAlias, instance.SecondsBehindPrimary.Int64)) instance.SecondsBehindPrimary.Int64 = 0 } // And until told otherwise: @@ -523,7 +523,7 @@ func ReadInstanceClusterAttributes(instance *Instance) (err error) { return nil }) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } @@ -660,7 +660,7 @@ func readInstancesByCondition(condition string, args []any, sort string) ([](*In return nil }) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return instances, err } return instances, err @@ -739,7 +739,7 @@ func GetKeyspaceShardName(tabletAlias string) (keyspace string, shard string, er return nil }) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } return keyspace, shard, err } @@ -787,7 +787,7 @@ func ReadOutdatedInstanceKeys() ([]string, error) { return nil }) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } return res, err } @@ -989,7 +989,7 @@ func mkInsertForInstances(instances []*Instance, instanceWasActuallyFound bool, sql, err := mkInsert("database_instance", columns, values, len(instances), insertIgnore) if err != nil { errMsg := fmt.Sprintf("Failed to build query: %v", err) - log.Errorf(errMsg) + log.Error(errMsg) return sql, args, errors.New(errMsg) } @@ -1021,7 +1021,7 @@ func writeManyInstances(instances []*Instance, instanceWasActuallyFound bool, up // WriteInstance stores an instance in the vtorc backend func WriteInstance(instance *Instance, instanceWasActuallyFound bool, lastError error) error { if lastError != nil { - log.Infof("writeInstance: will not update database_instance due to error: %+v", lastError) + log.Info(fmt.Sprintf("writeInstance: will not update database_instance due to error: %+v", lastError)) return nil } return writeManyInstances([]*Instance{instance}, instanceWasActuallyFound, true) @@ -1044,7 +1044,7 @@ func UpdateInstanceLastChecked(tabletAlias string, partialSuccess bool, stalledD tabletAlias, ) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } return err } @@ -1070,7 +1070,7 @@ func UpdateInstanceLastAttemptedCheck(tabletAlias string) error { tabletAlias, ) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } return err } @@ -1087,11 +1087,11 @@ func InstanceIsForgotten(tabletAlias string) bool { func ForgetInstance(tabletAlias string) error { if tabletAlias == "" { errMsg := "ForgetInstance(): empty tabletAlias" - log.Errorf(errMsg) + log.Error(errMsg) return errors.New(errMsg) } forgetAliases.Set(tabletAlias, true, cache.DefaultExpiration) - log.Infof("Forgetting: %v", tabletAlias) + log.Info(fmt.Sprintf("Forgetting: %v", tabletAlias)) // Remove this tablet from errant GTID count metric. currentErrantGTIDCount.Reset(tabletAlias) @@ -1105,7 +1105,7 @@ func ForgetInstance(tabletAlias string) error { tabletAlias, ) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } @@ -1118,13 +1118,13 @@ func ForgetInstance(tabletAlias string) error { tabletAlias, ) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } // Get the number of rows affected. If they are zero, then we tried to forget an instance that doesn't exist. rows, err := sqlResult.RowsAffected() if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } if rows == 0 { @@ -1146,12 +1146,12 @@ func ForgetLongUnseenInstances() error { config.UnseenInstanceForgetHours, ) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } rows, err := sqlResult.RowsAffected() if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } if rows > 0 { @@ -1185,7 +1185,7 @@ func SnapshotTopologies() error { `, ) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } @@ -1205,7 +1205,7 @@ func ExpireStaleInstanceBinlogCoordinates() error { expireSeconds, ) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } return err } diff --git a/go/vt/vtorc/inst/instance_dao_test.go b/go/vt/vtorc/inst/instance_dao_test.go index 7eee427a546..7464e6f320f 100644 --- a/go/vt/vtorc/inst/instance_dao_test.go +++ b/go/vt/vtorc/inst/instance_dao_test.go @@ -593,7 +593,7 @@ last_attempted_check <= last_checked as use1, last_checked < DATETIME('now', '-1500 second') as is_outdated1, last_checked < DATETIME('now', '-3000 second') as is_outdated2 from database_instance`, func(rowMap sqlutils.RowMap) error { - log.Errorf("Row in database_instance - %+v", rowMap) + log.Error(fmt.Sprintf("Row in database_instance - %+v", rowMap)) return nil }) require.NoError(t, errInDataCollection) diff --git a/go/vt/vtorc/logic/disable_recovery.go b/go/vt/vtorc/logic/disable_recovery.go index c5446eeb9ff..e76c1f275b8 100644 --- a/go/vt/vtorc/logic/disable_recovery.go +++ b/go/vt/vtorc/logic/disable_recovery.go @@ -54,7 +54,7 @@ func IsRecoveryDisabled() (disabled bool, err error) { }) if err != nil { errMsg := fmt.Sprintf("recovery.IsRecoveryDisabled(): %v", err) - log.Errorf(errMsg) + log.Error(errMsg) err = errors.New(errMsg) } return disabled, err diff --git a/go/vt/vtorc/logic/discovery_queue.go b/go/vt/vtorc/logic/discovery_queue.go index 46f5527b196..2a1c7866c1f 100644 --- a/go/vt/vtorc/logic/discovery_queue.go +++ b/go/vt/vtorc/logic/discovery_queue.go @@ -26,6 +26,7 @@ push() operation never blocks while pop() blocks on an empty queue. package logic import ( + "fmt" "sync" "time" @@ -94,7 +95,7 @@ func (q *DiscoveryQueue) Consume() string { timeOnQueue := time.Since(item.PushedAt) if timeOnQueue > config.GetInstancePollTime() { - log.Warningf("key %v spent %.4fs waiting on a discovery queue", item.Key, timeOnQueue.Seconds()) + log.Warn(fmt.Sprintf("key %v spent %.4fs waiting on a discovery queue", item.Key, timeOnQueue.Seconds())) } return item.Key diff --git a/go/vt/vtorc/logic/keyspace_shard_discovery.go b/go/vt/vtorc/logic/keyspace_shard_discovery.go index 3a14cc43c10..313d4fad7de 100644 --- a/go/vt/vtorc/logic/keyspace_shard_discovery.go +++ b/go/vt/vtorc/logic/keyspace_shard_discovery.go @@ -18,6 +18,7 @@ package logic import ( "context" + "fmt" "sync" "golang.org/x/exp/maps" @@ -123,12 +124,12 @@ func refreshShard(keyspaceName, shardName string) error { func refreshKeyspaceHelper(ctx context.Context, keyspaceName string) error { keyspaceInfo, err := ts.GetKeyspace(ctx, keyspaceName) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } err = inst.SaveKeyspace(keyspaceInfo) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } return err } @@ -143,7 +144,7 @@ func refreshAllShards(ctx context.Context, keyspaceName string) error { Concurrency: 8, }) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } @@ -154,7 +155,7 @@ func refreshAllShards(ctx context.Context, keyspaceName string) error { continue } if err = inst.SaveShard(shardInfo); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } savedShards[shardInfo.ShardName()] = true @@ -163,7 +164,7 @@ func refreshAllShards(ctx context.Context, keyspaceName string) error { // delete shards that were not saved, indicating they are stale. shards, err := inst.ReadShardNames(keyspaceName) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } for _, shard := range shards { @@ -171,9 +172,9 @@ func refreshAllShards(ctx context.Context, keyspaceName string) error { continue } shardName := topoproto.KeyspaceShardString(keyspaceName, shard) - log.Infof("Forgetting shard: %s", shardName) + log.Info("Forgetting shard: " + shardName) if err = inst.DeleteShard(keyspaceName, shard); err != nil { - log.Errorf("Failed to delete shard %s: %+v", shardName, err) + log.Error(fmt.Sprintf("Failed to delete shard %s: %+v", shardName, err)) return err } } @@ -185,12 +186,12 @@ func refreshAllShards(ctx context.Context, keyspaceName string) error { func refreshSingleShardHelper(ctx context.Context, keyspaceName string, shardName string) error { shardInfo, err := ts.GetShard(ctx, keyspaceName, shardName) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } err = inst.SaveShard(shardInfo) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } return err } diff --git a/go/vt/vtorc/logic/tablet_discovery.go b/go/vt/vtorc/logic/tablet_discovery.go index ad0d792f6d7..a391576c383 100644 --- a/go/vt/vtorc/logic/tablet_discovery.go +++ b/go/vt/vtorc/logic/tablet_discovery.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "os" "slices" "strings" "sync" @@ -83,7 +84,7 @@ func init() { func getTabletsWatchedByCellStats() map[string]int64 { tabletCountsByCell, err := inst.ReadTabletCountsByCell() if err != nil { - log.Errorf("Failed to read tablet counts by cell: %+v", err) + log.Error(fmt.Sprintf("Failed to read tablet counts by cell: %+v", err)) } return tabletCountsByCell } @@ -93,7 +94,7 @@ func getTabletsWatchedByShardStats() map[string]int64 { tabletsWatchedByShard := make(map[string]int64) statsByKS, err := inst.ReadKeyspaceShardStats() if err != nil { - log.Errorf("Failed to read tablet counts by shard: %+v", err) + log.Error(fmt.Sprintf("Failed to read tablet counts by shard: %+v", err)) } for _, s := range statsByKS { tabletsWatchedByShard[s.Keyspace+"."+s.Shard] = s.TabletCount @@ -106,7 +107,7 @@ func getEmergencyReparentShardDisabledStats() map[string]int64 { disabledShards := make(map[string]int64) statsByKS, err := inst.ReadKeyspaceShardStats() if err != nil { - log.Errorf("Failed to read tablet counts by shard: %+v", err) + log.Error(fmt.Sprintf("Failed to read tablet counts by shard: %+v", err)) } for _, s := range statsByKS { if s.DisableEmergencyReparent { @@ -135,7 +136,7 @@ func initializeShardsToWatch() error { // Validate keyspace/shard parses. k, s, err := topoproto.ParseKeyspaceShard(ks) if err != nil { - log.Errorf("Could not parse keyspace/shard %q: %+v", ks, err) + log.Error(fmt.Sprintf("Could not parse keyspace/shard %q: %+v", ks, err)) continue } if !key.IsValidKeyRange(s) { @@ -191,19 +192,20 @@ func OpenTabletDiscovery() <-chan time.Time { tmc = inst.InitializeTMC() // Clear existing cache and perform a new refresh. if _, err := db.ExecVTOrc("DELETE FROM vitess_tablet"); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } // Parse --clusters_to_watch into a filter. err := initializeShardsToWatch() if err != nil { - log.Fatalf("Error parsing --clusters-to-watch: %v", err) + log.Error(fmt.Sprintf("Error parsing --clusters-to-watch: %v", err)) + os.Exit(1) } // We refresh all information from the topo once before we start the ticks to do // it on a timer. ctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) defer cancel() if err := refreshAllInformation(ctx); err != nil { - log.Errorf("failed to initialize topo information: %+v", err) + log.Error(fmt.Sprintf("failed to initialize topo information: %+v", err)) } return time.Tick(config.GetTopoInformationRefreshDuration()) } @@ -222,7 +224,7 @@ func getAllTablets(ctx context.Context, cells []string) (tabletsByCell map[strin mu.Lock() defer mu.Unlock() if err != nil { - log.Errorf("Failed to load tablets from cell %s: %+v", cell, err) + log.Error(fmt.Sprintf("Failed to load tablets from cell %s: %+v", cell, err)) failedCells = append(failedCells, cell) } else { tabletsByCell[cell] = tablets @@ -260,7 +262,7 @@ func refreshTabletsUsing(ctx context.Context, loader func(tabletAlias string), f return nil } if len(failedCells) > 0 { - log.Errorf("Got partial topo result. Failed cells: %s", strings.Join(failedCells, ", ")) + log.Error("Got partial topo result. Failed cells: " + strings.Join(failedCells, ", ")) } // Update each cell that provided a response. This ensures only cells that provided a @@ -299,7 +301,7 @@ func forceRefreshAllTabletsInShard(ctx context.Context, keyspace, shard string, // refreshTabletInfoOfShard only refreshes the tablet records from the topo-server for all the tablets // of the given keyspace-shard. func refreshTabletInfoOfShard(ctx context.Context, keyspace, shard string) { - log.Infof("refresh of tablet records of shard - %v/%v", keyspace, shard) + log.Info(fmt.Sprintf("refresh of tablet records of shard - %v/%v", keyspace, shard)) refreshTabletsInKeyspaceShard(ctx, keyspace, shard, func(tabletAlias string) { // No-op // We only want to refresh the tablet information for the given shard @@ -309,7 +311,7 @@ func refreshTabletInfoOfShard(ctx context.Context, keyspace, shard string) { func refreshTabletsInKeyspaceShard(ctx context.Context, keyspace, shard string, loader func(tabletAlias string), forceRefresh bool, tabletsToIgnore []string) { tablets, err := ts.GetTabletsByShard(ctx, keyspace, shard) if err != nil { - log.Errorf("Error fetching tablets for keyspace/shard %v/%v: %v", keyspace, shard, err) + log.Error(fmt.Sprintf("Error fetching tablets for keyspace/shard %v/%v: %v", keyspace, shard, err)) return } query := "select alias from vitess_tablet where keyspace = ? and shard = ?" @@ -327,14 +329,14 @@ func refreshTablets(tablets []*topo.TabletInfo, query string, args []any, loader latestInstances[tabletAliasString] = true old, err := inst.ReadTablet(tabletAliasString) if err != nil && err != inst.ErrTabletAliasNil { - log.Error(err) + log.Error(fmt.Sprint(err)) continue } if !forceRefresh && proto.Equal(tablet, old) { continue } if err := inst.SaveTablet(tablet); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) continue } wg.Go(func() { @@ -343,7 +345,7 @@ func refreshTablets(tablets []*topo.TabletInfo, query string, args []any, loader } loader(tabletAliasString) }) - log.Infof("Discovered: %v", tablet) + log.Info(fmt.Sprintf("Discovered: %v", tablet)) } wg.Wait() @@ -357,11 +359,11 @@ func refreshTablets(tablets []*topo.TabletInfo, query string, args []any, loader return nil }) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } for _, tabletAlias := range toForget { if err := inst.ForgetInstance(tabletAlias); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } } } diff --git a/go/vt/vtorc/logic/topology_recovery.go b/go/vt/vtorc/logic/topology_recovery.go index 75ba11b5931..495b5185af1 100644 --- a/go/vt/vtorc/logic/topology_recovery.go +++ b/go/vt/vtorc/logic/topology_recovery.go @@ -255,7 +255,7 @@ func LockShard(ctx context.Context, keyspace, shard, lockAction string) (context // AuditTopologyRecovery audits a single step in a topology recovery process. func AuditTopologyRecovery(topologyRecovery *TopologyRecovery, message string) error { - log.Infof("topology_recovery: %s", message) + log.Info("topology_recovery: " + message) if topologyRecovery == nil { return nil } @@ -517,19 +517,19 @@ func restartDirectReplicas(ctx context.Context, analysisEntry *inst.DetectionAna func isERSEnabled(analysisEntry *inst.DetectionAnalysis) bool { // If ERS is disabled globally we have no way of repairing the cluster. if !config.ERSEnabled() { - log.Infof("VTOrc not configured to run ERS, skipping recovering %v", analysisEntry.Analysis) + log.Info(fmt.Sprintf("VTOrc not configured to run ERS, skipping recovering %v", analysisEntry.Analysis)) return false } // Return false if ERS is disabled on the keyspace. if analysisEntry.AnalyzedKeyspaceEmergencyReparentDisabled { - log.Infof("ERS is disabled on keyspace %s, skipping recovering %v", analysisEntry.AnalyzedKeyspace, analysisEntry.Analysis) + log.Info(fmt.Sprintf("ERS is disabled on keyspace %s, skipping recovering %v", analysisEntry.AnalyzedKeyspace, analysisEntry.Analysis)) return false } // Return false if ERS is disabled on the shard. if analysisEntry.AnalyzedShardEmergencyReparentDisabled { - log.Infof("ERS is disabled on keyspace/shard %s, skipping recovering %v", topoproto.KeyspaceShardString(analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard), analysisEntry.Analysis) + log.Info(fmt.Sprintf("ERS is disabled on keyspace/shard %s, skipping recovering %v", topoproto.KeyspaceShardString(analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard), analysisEntry.Analysis)) return false } @@ -545,20 +545,20 @@ func getCheckAndRecoverFunctionCode(analysisEntry *inst.DetectionAnalysis) (reco case inst.DeadPrimary, inst.DeadPrimaryAndSomeReplicas, inst.PrimaryDiskStalled, inst.PrimarySemiSyncBlocked: // If ERS is disabled globally, on the keyspace or the shard, skip recovery. if !isERSEnabled(analysisEntry) { - log.Infof("VTOrc not configured to run EmergencyReparentShard, skipping recovering %v", analysisCode) + log.Info(fmt.Sprintf("VTOrc not configured to run EmergencyReparentShard, skipping recovering %v", analysisCode)) recoverySkipCode = RecoverySkipERSDisabled } recoveryFunc = recoverDeadPrimaryFunc case inst.PrimaryTabletDeleted: // If ERS is disabled globally, on the keyspace or the shard, skip recovery. if !isERSEnabled(analysisEntry) { - log.Infof("VTOrc not configured to run EmergencyReparentShard, skipping recovering %v", analysisCode) + log.Info(fmt.Sprintf("VTOrc not configured to run EmergencyReparentShard, skipping recovering %v", analysisCode)) recoverySkipCode = RecoverySkipERSDisabled } recoveryFunc = recoverPrimaryTabletDeletedFunc case inst.ErrantGTIDDetected: if !config.ConvertTabletWithErrantGTIDs() { - log.Infof("VTOrc not configured to do anything on detecting errant GTIDs, skipping recovering %v", analysisCode) + log.Info(fmt.Sprintf("VTOrc not configured to do anything on detecting errant GTIDs, skipping recovering %v", analysisCode)) recoverySkipCode = RecoverySkipNoRecoveryAction } recoveryFunc = recoverErrantGTIDDetectedFunc @@ -924,7 +924,7 @@ func recheckPrimaryHealth(analysisEntry *inst.DetectionAnalysis, recoveryLabels // checking if the original analysis is valid even after the primary refresh. recoveryRequired, err := checkIfAlreadyFixed(analysisEntry) if err != nil { - log.Infof("recheckPrimaryHealth: Checking if recovery is required returned err: %v", err) + log.Info(fmt.Sprintf("recheckPrimaryHealth: Checking if recovery is required returned err: %v", err)) return err } @@ -932,7 +932,7 @@ func recheckPrimaryHealth(analysisEntry *inst.DetectionAnalysis, recoveryLabels // This could mean that either the original analysis has changed or some other Vtorc instance has already performing the mitigation. // In either case, the original analysis is stale which can be safely aborted. if recoveryRequired { - log.Infof("recheckPrimaryHealth: Primary recovery is required, Tablet alias: %v", primaryTabletAlias) + log.Info(fmt.Sprintf("recheckPrimaryHealth: Primary recovery is required, Tablet alias: %v", primaryTabletAlias)) recoveriesSkippedCounter.Add(append(recoveryLabels, RecoverySkipPrimaryRecovery.String()), 1) // original analysis is stale, abort. return fmt.Errorf("aborting %s, primary mitigation is required", originalAnalysisEntry) @@ -965,7 +965,7 @@ func CheckAndRecover() { // Allow the analysis to run even if we don't want to recover detectionAnalysis, err := inst.GetDetectionAnalysis("", "", &inst.DetectionAnalysisHints{AuditAnalysis: true}) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return } @@ -1001,7 +1001,7 @@ func CheckAndRecover() { go func() { if err := executeCheckAndRecoverFunction(analysisEntry); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } }() } diff --git a/go/vt/vtorc/logic/topology_recovery_dao.go b/go/vt/vtorc/logic/topology_recovery_dao.go index 187af9cdddc..ecceec7e565 100644 --- a/go/vt/vtorc/logic/topology_recovery_dao.go +++ b/go/vt/vtorc/logic/topology_recovery_dao.go @@ -50,12 +50,12 @@ func InsertRecoveryDetection(analysisEntry *inst.DetectionAnalysis) error { analysisEntry.AnalyzedShard, ) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } id, err := sqlResult.LastInsertId() if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } analysisEntry.RecoveryId = id @@ -113,12 +113,12 @@ func AttemptRecoveryRegistration(analysisEntry *inst.DetectionAnalysis) (*Topolo // Check if there is an active recovery in progress for the cluster of the given instance. recoveries, err := ReadActiveClusterRecoveries(analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return nil, err } if len(recoveries) > 0 { errMsg := fmt.Sprintf("AttemptRecoveryRegistration: Active recovery (id:%v) in the cluster %s:%s for %s", recoveries[0].ID, analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard, recoveries[0].AnalysisEntry.Analysis) - log.Errorf(errMsg) + log.Error(errMsg) return nil, errors.New(errMsg) } @@ -126,7 +126,7 @@ func AttemptRecoveryRegistration(analysisEntry *inst.DetectionAnalysis) (*Topolo topologyRecovery, err = writeTopologyRecovery(topologyRecovery) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return nil, err } return topologyRecovery, nil @@ -150,7 +150,7 @@ func writeResolveRecovery(topologyRecovery *TopologyRecovery) error { topologyRecovery.ID, ) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } return err } @@ -202,7 +202,7 @@ func readRecoveries(whereCondition string, limit string, args []any) ([]*Topolog return nil }) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } return res, err } @@ -248,12 +248,12 @@ func writeTopologyRecoveryStep(topologyRecoveryStep *TopologyRecoveryStep) error topologyRecoveryStep.Message, ) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return err } topologyRecoveryStep.ID, err = sqlResult.LastInsertId() if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } return err } diff --git a/go/vt/vtorc/logic/vtorc.go b/go/vt/vtorc/logic/vtorc.go index 5291d7d193d..8678c3f38bc 100644 --- a/go/vt/vtorc/logic/vtorc.go +++ b/go/vt/vtorc/logic/vtorc.go @@ -18,6 +18,7 @@ package logic import ( "context" + "fmt" "sync" "sync/atomic" "time" @@ -76,14 +77,14 @@ func init() { // closeVTOrc runs all the operations required to cleanly shutdown VTOrc func closeVTOrc() { - log.Infof("Starting VTOrc shutdown") + log.Info("Starting VTOrc shutdown") atomic.StoreInt32(&hasReceivedSIGTERM, 1) // Poke other go routines to stop cleanly here ... _ = inst.AuditOperation("shutdown", "", "Triggered via SIGTERM") // wait for the locks to be released waitForLocksRelease() ts.Close() - log.Infof("VTOrc closed") + log.Info("VTOrc closed") } // waitForLocksRelease is used to wait for release of locks @@ -96,7 +97,7 @@ func waitForLocksRelease() { } select { case <-timeout: - log.Infof("wait for lock release timed out. Some locks might not have been released.") + log.Info("wait for lock release timed out. Some locks might not have been released.") default: time.Sleep(50 * time.Millisecond) continue @@ -134,7 +135,7 @@ func handleDiscoveryRequests() { // replicas (if any) are also checked. func DiscoverInstance(tabletAlias string, forceDiscovery bool) { if inst.InstanceIsForgotten(tabletAlias) { - log.Infof("discoverInstance: skipping discovery of %+v because it is set to be forgotten", tabletAlias) + log.Info(fmt.Sprintf("discoverInstance: skipping discovery of %+v because it is set to be forgotten", tabletAlias)) return } @@ -151,7 +152,7 @@ func DiscoverInstance(tabletAlias string, forceDiscovery bool) { discoveryTime := latency.Elapsed("total") if discoveryTime > config.GetInstancePollTime() { instancePollSecondsExceededCounter.Add(1) - log.Warningf("discoverInstance exceeded InstancePollSeconds for %+v, took %.4fs", tabletAlias, discoveryTime.Seconds()) + log.Warn(fmt.Sprintf("discoverInstance exceeded InstancePollSeconds for %+v, took %.4fs", tabletAlias, discoveryTime.Seconds())) } }() @@ -191,20 +192,19 @@ func DiscoverInstance(tabletAlias string, forceDiscovery bool) { discoveryInstanceTimings.Add("Other", otherLatency) if err != nil { - log.Errorf("Failed to discover %s (force: %t), err: %v", tabletAlias, forceDiscovery, err) + log.Error(fmt.Sprintf("Failed to discover %s (force: %t), err: %v", tabletAlias, forceDiscovery, err)) } else { - log.Infof("Discovered %s (force: %t): %+v", tabletAlias, forceDiscovery, instance) + log.Info(fmt.Sprintf("Discovered %s (force: %t): %+v", tabletAlias, forceDiscovery, instance)) } if instance == nil { failedDiscoveriesCounter.Add(1) if util.ClearToLog("discoverInstance", tabletAlias) { - log.Warningf("DiscoverInstance(%+v) instance is nil in %.3fs (Backend: %.3fs, Instance: %.3fs), error=%+v", - tabletAlias, + log.Warn(fmt.Sprintf("DiscoverInstance(%+v) instance is nil in %.3fs (Backend: %.3fs, Instance: %.3fs), error=%+v", tabletAlias, totalLatency.Seconds(), backendLatency.Seconds(), instanceLatency.Seconds(), - err) + err)) } return } @@ -214,7 +214,7 @@ func DiscoverInstance(tabletAlias string, forceDiscovery bool) { func onHealthTick() { tabletAliases, err := inst.ReadOutdatedInstanceKeys() if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } func() { @@ -242,13 +242,13 @@ func onHealthTick() { // periodically investigated and their status captured, and long since unseen instances are // purged and forgotten. func ContinuousDiscovery() { - log.Infof("continuous discovery: setting up") + log.Info("continuous discovery: setting up") recentDiscoveryOperationKeys = cache.New(config.GetInstancePollTime(), time.Second) if !config.GetAllowRecovery() { log.Info("--allow-recovery is set to 'false', disabling recovery actions") if err := DisableRecovery(); err != nil { - log.Errorf("failed to disable recoveries: %+v", err) + log.Error(fmt.Sprintf("failed to disable recoveries: %+v", err)) return } } @@ -262,7 +262,7 @@ func ContinuousDiscovery() { var recoveryEntrance int64 var snapshotTopologiesTick <-chan time.Time if config.GetSnapshotTopologyInterval() > 0 { - log.Warning("--snapshot-topology-interval is deprecated and will be removed in v25+") + log.Warn("--snapshot-topology-interval is deprecated and will be removed in v25+") snapshotTopologiesTick = time.Tick(config.GetSnapshotTopologyInterval()) } @@ -272,7 +272,7 @@ func ContinuousDiscovery() { // On termination of the server, we should close VTOrc cleanly servenv.OnTermSync(closeVTOrc) - log.Infof("continuous discovery: starting") + log.Info("continuous discovery: starting") for { select { case <-healthTick: @@ -309,7 +309,7 @@ func ContinuousDiscovery() { case <-tabletTopoTick: ctx, cancel := context.WithTimeout(context.Background(), config.GetTopoInformationRefreshDuration()) if err := refreshAllInformation(ctx); err != nil { - log.Errorf("failed to refresh topo information: %+v", err) + log.Error(fmt.Sprintf("failed to refresh topo information: %+v", err)) } cancel() } diff --git a/go/vt/vtorc/process/health.go b/go/vt/vtorc/process/health.go index d448f03bb83..3959164582d 100644 --- a/go/vt/vtorc/process/health.go +++ b/go/vt/vtorc/process/health.go @@ -17,6 +17,7 @@ package process import ( + "fmt" "sync/atomic" "time" @@ -37,7 +38,7 @@ var ThisNodeHealth = &NodeHealth{} func writeHealthToDatabase() bool { _, err := db.ExecVTOrc("DELETE FROM node_health") if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return false } sqlResult, err := db.ExecVTOrc(`INSERT @@ -47,12 +48,12 @@ func writeHealthToDatabase() bool { DATETIME('now') )`) if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return false } rows, err := sqlResult.RowsAffected() if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return false } return rows > 0 diff --git a/go/vt/vtorc/server/discovery.go b/go/vt/vtorc/server/discovery.go index b510a8016c9..9bbc4cacd8b 100644 --- a/go/vt/vtorc/server/discovery.go +++ b/go/vt/vtorc/server/discovery.go @@ -32,7 +32,7 @@ import ( func validateCell(cell string) error { if cell == "" { // TODO: remove warning in v25+, make flag required. - log.Warning("WARNING: --cell will become a required vtorc flag in v25 and up") + log.Warn("WARNING: --cell will become a required vtorc flag in v25 and up") return nil } diff --git a/go/vt/vttablet/customrule/filecustomrule/filecustomrule.go b/go/vt/vttablet/customrule/filecustomrule/filecustomrule.go index 6f2000c2322..4580b4845fe 100644 --- a/go/vt/vttablet/customrule/filecustomrule/filecustomrule.go +++ b/go/vt/vttablet/customrule/filecustomrule/filecustomrule.go @@ -18,6 +18,7 @@ limitations under the License. package filecustomrule import ( + "fmt" "os" "path" "time" @@ -75,14 +76,14 @@ func NewFileCustomRule() (fcr *FileCustomRule) { func ParseRules(path string) (*rules.Rules, error) { data, err := os.ReadFile(path) if err != nil { - log.Warningf("Error reading file %v: %v", path, err) + log.Warn(fmt.Sprintf("Error reading file %v: %v", path, err)) // Don't update any internal cache, just return error return nil, err } qrs := rules.New() err = qrs.UnmarshalJSON(data) if err != nil { - log.Warningf("Error unmarshaling query rules %v", err) + log.Warn(fmt.Sprintf("Error unmarshaling query rules %v", err)) return nil, err } return qrs, nil @@ -104,7 +105,7 @@ func (fcr *FileCustomRule) Open(qsc tabletserver.Controller, rulePath string) er fcr.currentRuleSet = qrs.Copy() // Push query rules to vttablet qsc.SetQueryRules(FileCustomRuleSource, qrs.Copy()) - log.Infof("Custom rule loaded from file: %s", fcr.path) + log.Info("Custom rule loaded from file: " + fcr.path) return nil } @@ -128,7 +129,8 @@ func ActivateFileCustomRules(qsc tabletserver.Controller) { watcher, err := fsnotify.NewWatcher() if err != nil { - log.Fatalf("Unable create new fsnotify watcher: %v", err) + log.Error(fmt.Sprintf("Unable create new fsnotify watcher: %v", err)) + os.Exit(1) } servenv.OnTerm(func() { watcher.Close() }) @@ -143,21 +145,22 @@ func ActivateFileCustomRules(qsc tabletserver.Controller) { continue } if err := fileCustomRule.Open(tsc, fileRulePath); err != nil { - log.Infof("Failed to load custom rules from %q: %v", fileRulePath, err) + log.Info(fmt.Sprintf("Failed to load custom rules from %q: %v", fileRulePath, err)) } else { - log.Infof("Loaded custom rules from %q", fileRulePath) + log.Info(fmt.Sprintf("Loaded custom rules from %q", fileRulePath)) } case err, ok := <-watcher.Errors: if !ok { return } - log.Errorf("Error watching %v: %v", fileRulePath, err) + log.Error(fmt.Sprintf("Error watching %v: %v", fileRulePath, err)) } } }(qsc) if err = watcher.Add(baseDir); err != nil { - log.Fatalf("Unable to set up watcher for %v + %v: %v", baseDir, ruleFileName, err) + log.Error(fmt.Sprintf("Unable to set up watcher for %v + %v: %v", baseDir, ruleFileName, err)) + os.Exit(1) } } } diff --git a/go/vt/vttablet/customrule/topocustomrule/topocustomrule.go b/go/vt/vttablet/customrule/topocustomrule/topocustomrule.go index f120c745310..e17c28ea2da 100644 --- a/go/vt/vttablet/customrule/topocustomrule/topocustomrule.go +++ b/go/vt/vttablet/customrule/topocustomrule/topocustomrule.go @@ -24,6 +24,7 @@ import ( "context" "errors" "fmt" + "os" "reflect" "sync" "time" @@ -100,7 +101,7 @@ func (cr *topoCustomRule) start() { go func() { for { if err := cr.oneWatch(); err != nil { - log.Warningf("Background watch of topo custom rule failed: %v", err) + log.Warn(fmt.Sprintf("Background watch of topo custom rule failed: %v", err)) } cr.mu.Lock() @@ -108,11 +109,11 @@ func (cr *topoCustomRule) start() { cr.mu.Unlock() if stopped { - log.Warningf("Topo custom rule was terminated") + log.Warn("Topo custom rule was terminated") return } - log.Warningf("Sleeping for %v before trying again", sleepDuringTopoFailure) + log.Warn(fmt.Sprintf("Sleeping for %v before trying again", sleepDuringTopoFailure)) time.Sleep(sleepDuringTopoFailure) } }() @@ -136,7 +137,7 @@ func (cr *topoCustomRule) apply(wd *topo.WatchData) error { if !reflect.DeepEqual(cr.qrs, qrs) { cr.qrs = qrs.Copy() cr.qsc.SetQueryRules(topoCustomRuleSource, qrs) - log.Infof("Custom rule version %v fetched from topo and applied to vttablet", wd.Version) + log.Info(fmt.Sprintf("Custom rule version %v fetched from topo and applied to vttablet", wd.Version)) } return nil @@ -204,7 +205,8 @@ func activateTopoCustomRules(qsc tabletserver.Controller) { cr, err := newTopoCustomRule(qsc, ruleCell, rulePath) if err != nil { - log.Fatalf("cannot start TopoCustomRule: %v", err) + log.Error(fmt.Sprintf("cannot start TopoCustomRule: %v", err)) + os.Exit(1) } cr.start() diff --git a/go/vt/vttablet/endtoend/endtoend.go b/go/vt/vttablet/endtoend/endtoend.go index 4a15f40f602..31f5a697177 100644 --- a/go/vt/vttablet/endtoend/endtoend.go +++ b/go/vt/vttablet/endtoend/endtoend.go @@ -29,7 +29,7 @@ import ( func prettyPrint(qr sqltypes.Result) string { out, err := json.Marshal(qr) if err != nil { - log.Errorf("Could not marshal result to json for %#v", qr) + log.Error(fmt.Sprintf("Could not marshal result to json for %#v", qr)) return fmt.Sprintf("%#v", qr) } return string(out) diff --git a/go/vt/vttablet/endtoend/framework/server.go b/go/vt/vttablet/endtoend/framework/server.go index 5062f5e6429..4b9616c69a5 100644 --- a/go/vt/vttablet/endtoend/framework/server.go +++ b/go/vt/vttablet/endtoend/framework/server.go @@ -18,6 +18,7 @@ package framework import ( "context" + "fmt" "net" "net/http" "time" @@ -87,7 +88,7 @@ func StartCustomServer(ctx context.Context, connParams, connAppDebugParams mysql go func() { err := servenv.HTTPServe(ln) if err != nil { - log.Errorf("HTTPServe failed: %v", err) + log.Error(fmt.Sprintf("HTTPServe failed: %v", err)) } }() for { @@ -119,7 +120,7 @@ func StartServer(ctx context.Context, connParams, connAppDebugParams mysql.ConnP config.QueryCacheDoorkeeper = false config.SchemaReloadInterval = 5 * time.Second gotBytes, _ := yaml2.Marshal(config) - log.Infof("Config:\n%s", gotBytes) + log.Info(fmt.Sprintf("Config:\n%s", gotBytes)) return StartCustomServer(ctx, connParams, connAppDebugParams, dbName, config) } diff --git a/go/vt/vttablet/endtoend/misc_test.go b/go/vt/vttablet/endtoend/misc_test.go index 9600724be3b..1a7f35249de 100644 --- a/go/vt/vttablet/endtoend/misc_test.go +++ b/go/vt/vttablet/endtoend/misc_test.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "io" + "log/slog" "net/http" "reflect" "strings" @@ -525,43 +526,43 @@ func TestDBAStatements(t *testing.T) { } type testLogger struct { - logs []string - savedInfof func(format string, args ...any) - savedErrorf func(format string, args ...any) + handler *log.CaptureHandler + restore func() } func newTestLogger() *testLogger { - tl := &testLogger{ - savedInfof: log.Infof, - savedErrorf: log.Errorf, + handler := log.NewCaptureHandler() + restore := log.SetLogger(slog.New(handler)) + + return &testLogger{ + handler: handler, + restore: restore, } - log.Infof = tl.recordInfof - log.Errorf = tl.recordErrorf - return tl } func (tl *testLogger) Close() { - log.Infof = tl.savedInfof - log.Errorf = tl.savedErrorf + if tl.restore != nil { + tl.restore() + } } -func (tl *testLogger) recordInfof(format string, args ...any) { - msg := fmt.Sprintf(format, args...) - tl.logs = append(tl.logs, msg) - tl.savedInfof(msg) -} +func (tl *testLogger) getLog(i int) string { + logs := tl.getLogs() + if i < len(logs) { + return logs[i] + } -func (tl *testLogger) recordErrorf(format string, args ...any) { - msg := fmt.Sprintf(format, args...) - tl.logs = append(tl.logs, msg) - tl.savedErrorf(msg) + return fmt.Sprintf("ERROR: log %d/%d does not exist", i, len(logs)) } -func (tl *testLogger) getLog(i int) string { - if i < len(tl.logs) { - return tl.logs[i] +func (tl *testLogger) getLogs() []string { + records := tl.handler.Records() + logs := make([]string, 0, len(records)) + for _, record := range records { + logs = append(logs, record.Message) } - return fmt.Sprintf("ERROR: log %d/%d does not exist", i, len(tl.logs)) + + return logs } func TestClientFoundRows(t *testing.T) { diff --git a/go/vt/vttablet/endtoend/vstreamer_test.go b/go/vt/vttablet/endtoend/vstreamer_test.go index e9604ca74c9..8f068153d70 100644 --- a/go/vt/vttablet/endtoend/vstreamer_test.go +++ b/go/vt/vttablet/endtoend/vstreamer_test.go @@ -46,7 +46,7 @@ func getSchemaVersionTableCreationEvents() []string { client := framework.NewClient() _, err := client.Execute("describe _vt.schema_version", nil) if err != nil { - log.Errorf("_vt.schema_version not found, will expect its table creation events") + log.Error("_vt.schema_version not found, will expect its table creation events") return tableCreationEvents } return nil @@ -171,7 +171,7 @@ func TestSchemaVersioning(t *testing.T) { if event.Type == binlogdatapb.VEventType_HEARTBEAT { continue } - log.Infof("Received event %v", event) + log.Info(fmt.Sprintf("Received event %v", event)) evs = append(evs, event) } select { @@ -189,7 +189,7 @@ func TestSchemaVersioning(t *testing.T) { t.Error(err) } }) - log.Infof("\n\n\n=============================================== CURRENT EVENTS START HERE ======================\n\n\n") + log.Info("\n\n\n=============================================== CURRENT EVENTS START HERE ======================\n\n\n") runCases(ctx, t, cases, eventCh) tsv.SetTracking(false) @@ -214,7 +214,7 @@ func TestSchemaVersioning(t *testing.T) { cancel() wg.Wait() - log.Infof("\n\n\n=============================================== PAST EVENTS WITH TRACK VERSIONS START HERE ======================\n\n\n") + log.Info("\n\n\n=============================================== PAST EVENTS WITH TRACK VERSIONS START HERE ======================\n\n\n") ctx, cancel = context.WithCancel(context.Background()) defer cancel() eventCh = make(chan []*binlogdatapb.VEvent) @@ -224,7 +224,7 @@ func TestSchemaVersioning(t *testing.T) { if event.Type == binlogdatapb.VEventType_HEARTBEAT { continue } - log.Infof("Received event %v", event) + log.Info(fmt.Sprintf("Received event %v", event)) evs = append(evs, event) } // Ignore unrelated events. @@ -287,7 +287,7 @@ func TestSchemaVersioning(t *testing.T) { cancel() wg.Wait() - log.Infof("\n\n\n=============================================== PAST EVENTS WITHOUT TRACK VERSIONS START HERE ======================\n\n\n") + log.Info("\n\n\n=============================================== PAST EVENTS WITHOUT TRACK VERSIONS START HERE ======================\n\n\n") tsv.EnableHistorian(false) ctx, cancel = context.WithCancel(context.Background()) defer cancel() @@ -298,7 +298,7 @@ func TestSchemaVersioning(t *testing.T) { if event.Type == binlogdatapb.VEventType_HEARTBEAT { continue } - log.Infof("Received event %v", event) + log.Info(fmt.Sprintf("Received event %v", event)) evs = append(evs, event) } // Ignore unrelated events. @@ -395,7 +395,7 @@ func expectLogs(ctx context.Context, t *testing.T, query string, eventCh chan [] timer := time.NewTimer(5 * time.Second) defer timer.Stop() var evs []*binlogdatapb.VEvent - log.Infof("In expectLogs for query %s, output len %d", query, len(output)) + log.Info(fmt.Sprintf("In expectLogs for query %s, output len %d", query, len(output))) for { select { case allevs, ok := <-eventCh: @@ -431,7 +431,7 @@ func expectLogs(ctx context.Context, t *testing.T, query string, eventCh chan [] evs = append(evs, ev) } - log.Infof("In expectLogs, have got %d events, want %d", len(evs), len(output)) + log.Info(fmt.Sprintf("In expectLogs, have got %d events, want %d", len(evs), len(output))) case <-ctx.Done(): t.Fatalf("expectLog: Done(), stream ended early") case <-timer.C: @@ -500,7 +500,7 @@ func encodeString(in string) string { func validateSchemaInserted(client *framework.QueryClient, ddl string) bool { qr, _ := client.Execute("select * from _vt.schema_version where ddl = "+encodeString(ddl), nil) if len(qr.Rows) == 1 { - log.Infof("Found ddl in schema_version: %s", ddl) + log.Info("Found ddl in schema_version: " + ddl) return true } return false @@ -513,12 +513,12 @@ func waitForVersionInsert(client *framework.QueryClient, ddl string) (bool, erro for { select { case <-timeout: - log.Infof("waitForVersionInsert timed out") + log.Info("waitForVersionInsert timed out") return false, errors.New("waitForVersionInsert timed out") case <-tick: ok := validateSchemaInserted(client, ddl) if ok { - log.Infof("Found version insert for %s", ddl) + log.Info("Found version insert for " + ddl) return true, nil } } diff --git a/go/vt/vttablet/filelogger/filelogger.go b/go/vt/vttablet/filelogger/filelogger.go index 292d6816428..e8642515ee5 100644 --- a/go/vt/vttablet/filelogger/filelogger.go +++ b/go/vt/vttablet/filelogger/filelogger.go @@ -61,7 +61,7 @@ func (l *fileLogger) Stop() { // Init starts logging to the given file path. func Init(path string) (FileLogger, error) { - log.Infof("Logging queries to file %s", path) + log.Info("Logging queries to file " + path) logChan, err := tabletenv.StatsLogger.LogToFile(path, streamlog.GetFormatter(tabletenv.StatsLogger)) if err != nil { return nil, err diff --git a/go/vt/vttablet/grpctmclient/client.go b/go/vt/vttablet/grpctmclient/client.go index 143dc4798ff..844d6eb22e4 100644 --- a/go/vt/vttablet/grpctmclient/client.go +++ b/go/vt/vttablet/grpctmclient/client.go @@ -1190,7 +1190,7 @@ func (client *Client) UpdateSequenceTables(ctx context.Context, tablet *topodata // VDiff is part of the tmclient.TabletManagerClient interface. func (client *Client) VDiff(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.VDiffRequest) (*tabletmanagerdatapb.VDiffResponse, error) { - log.Infof("VDiff for tablet %s, request %+v", tablet.Alias.String(), req) + log.Info(fmt.Sprintf("VDiff for tablet %s, request %+v", tablet.Alias.String(), req)) c, closer, err := client.dialer.dial(ctx, tablet) if err != nil { return nil, err diff --git a/go/vt/vttablet/onlineddl/executor.go b/go/vt/vttablet/onlineddl/executor.go index 23ccfe5bbf0..31f360cf5d3 100644 --- a/go/vt/vttablet/onlineddl/executor.go +++ b/go/vt/vttablet/onlineddl/executor.go @@ -280,7 +280,7 @@ func (e *Executor) Open() error { if atomic.LoadInt64(&e.isOpen) > 0 || !e.env.Config().EnableOnlineDDL { return nil } - log.Infof("onlineDDL Executor Open()") + log.Info("onlineDDL Executor Open()") e.reviewedRunningMigrationsFlag = false // will be set as "true" by reviewRunningMigrations() e.ownedRunningMigrations.Range(func(k, _ any) bool { @@ -311,7 +311,7 @@ func (e *Executor) Close() { if atomic.LoadInt64(&e.isOpen) == 0 { return } - log.Infof("onlineDDL Executor Close()") + log.Info("onlineDDL Executor Close()") e.ticks.Stop() e.pool.Close() @@ -609,7 +609,7 @@ func (e *Executor) terminateVReplMigration(ctx context.Context, uuid string, del } // silently skip error; stopping the stream is just a graceful act; later deleting it is more important if _, err := e.vreplicationExec(ctx, tablet.Tablet, query); err != nil { - log.Errorf("FAIL vreplicationExec: uuid=%s, query=%v, error=%v", uuid, query, err) + log.Error(fmt.Sprintf("FAIL vreplicationExec: uuid=%s, query=%v, error=%v", uuid, query, err)) } if deleteEntry { if err := e.deleteVReplicationEntry(ctx, uuid); err != nil { @@ -639,7 +639,7 @@ func (e *Executor) startVReplication(ctx context.Context, tablet *topodatapb.Tab // This is done on a best-effort basis, by issuing `KILL` and `KILL QUERY` commands. As MySQL goes, // it is not guaranteed that the queries/transactions will terminate in a timely manner. func (e *Executor) killTableLockHoldersAndAccessors(ctx context.Context, uuid string, tableName string, excludeIds ...int64) error { - log.Infof("killTableLockHoldersAndAccessors %v:, table-%v", uuid, tableName) + log.Info(fmt.Sprintf("killTableLockHoldersAndAccessors %v:, table-%v", uuid, tableName)) conn, err := dbconnpool.NewDBConnection(ctx, e.env.Config().DB.DbaWithDB()) if err != nil { return err @@ -662,18 +662,18 @@ func (e *Executor) killTableLockHoldersAndAccessors(ctx context.Context, uuid st return vterrors.Wrapf(err, "finding queries potentially operating on table") } - log.Infof("killTableLockHoldersAndAccessors %v: found %v potential queries", uuid, len(rs.Rows)) + log.Info(fmt.Sprintf("killTableLockHoldersAndAccessors %v: found %v potential queries", uuid, len(rs.Rows))) // Now that we have some list of queries, we actually parse them to find whether the query actually references our table: for _, row := range rs.Named().Rows { threadId := row.AsInt64("id", 0) if skipKill(threadId) { - log.Infof("killTableLockHoldersAndAccessors %v: skipping thread %v as it is excluded", uuid, threadId) + log.Info(fmt.Sprintf("killTableLockHoldersAndAccessors %v: skipping thread %v as it is excluded", uuid, threadId)) continue } infoQuery := row.AsString("info", "") stmt, err := e.env.Environment().Parser().Parse(infoQuery) if err != nil { - log.Error(vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unable to parse processlist Info query: %v", infoQuery)) + log.Error(fmt.Sprint(vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unable to parse processlist Info query: %v", infoQuery))) continue } queryUsesTable := false @@ -696,10 +696,10 @@ func (e *Executor) killTableLockHoldersAndAccessors(ctx context.Context, uuid st }, stmt) if queryUsesTable { - log.Infof("killTableLockHoldersAndAccessors %v: killing query %v: %.100s", uuid, threadId, infoQuery) + log.Info(fmt.Sprintf("killTableLockHoldersAndAccessors %v: killing query %v: %.100s", uuid, threadId, infoQuery)) killQuery := fmt.Sprintf("KILL QUERY %d", threadId) if _, err := conn.Conn.ExecuteFetch(killQuery, 1, false); err != nil { - log.Error(vterrors.Errorf(vtrpcpb.Code_ABORTED, "could not kill query %v. Ignoring", threadId)) + log.Error(fmt.Sprint(vterrors.Errorf(vtrpcpb.Code_ABORTED, "could not kill query %v. Ignoring", threadId))) } } } @@ -721,18 +721,18 @@ func (e *Executor) killTableLockHoldersAndAccessors(ctx context.Context, uuid st if err != nil { return vterrors.Wrapf(err, "finding transactions locking table `%s` %s", tableName, description) } - log.Infof("terminateTransactions %v: found %v transactions locking table `%s` %s", uuid, len(rs.Rows), tableName, description) + log.Info(fmt.Sprintf("terminateTransactions %v: found %v transactions locking table `%s` %s", uuid, len(rs.Rows), tableName, description)) for _, row := range rs.Named().Rows { threadId := row.AsInt64(column, 0) if skipKill(threadId) { - log.Infof("terminateTransactions %v: skipping thread %v as it is excluded", uuid, threadId) + log.Info(fmt.Sprintf("terminateTransactions %v: skipping thread %v as it is excluded", uuid, threadId)) continue } - log.Infof("terminateTransactions %v: killing connection %v with transaction locking table `%s` %s", uuid, threadId, tableName, description) + log.Info(fmt.Sprintf("terminateTransactions %v: killing connection %v with transaction locking table `%s` %s", uuid, threadId, tableName, description)) killConnection := fmt.Sprintf("KILL %d", threadId) _, err = conn.Conn.ExecuteFetch(killConnection, 1, false) if err != nil { - log.Errorf("terminateTransactions %v: unable to kill the connection %d locking table `%s` %s: %v", uuid, threadId, tableName, description, err) + log.Error(fmt.Sprintf("terminateTransactions %v: unable to kill the connection %d locking table `%s` %s: %v", uuid, threadId, tableName, description, err)) } } return nil @@ -915,7 +915,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh if !renameWasSuccessful { err := renameConn.Conn.Kill("premature exit while renaming tables", 0) if err != nil { - log.Warningf("Failed to kill connection being used to rename tables in OnlineDDL migration %s: %v", onlineDDL.UUID, err) + log.Warn(fmt.Sprintf("Failed to kill connection being used to rename tables in OnlineDDL migration %s: %v", onlineDDL.UUID, err)) } } }() @@ -933,7 +933,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh // - https://github.com/planetscale/mysql-server/commit/bb777e3e86387571c044fb4a2beb4f8c60462ced // - https://github.com/planetscale/mysql-server/commit/c2f1344a6863518d749f2eb01a4c74ca08a5b889 // as part of https://github.com/planetscale/mysql-server/releases/tag/8.0.34-ps3. - log.Infof("@@rename_table_preserve_foreign_key supported") + log.Info("@@rename_table_preserve_foreign_key supported") } renameQuery := sqlparser.BuildParsedQuery(sqlSwapTables, onlineDDL.Table, sentryTableName, vreplTable, onlineDDL.Table, sentryTableName, vreplTable) @@ -969,7 +969,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh defer bufferingContextCancel() // Preparation is complete. We proceed to cut-over. toggleBuffering := func(bufferQueries bool) error { - log.Infof("toggling buffering: %t in migration %v", bufferQueries, onlineDDL.UUID) + log.Info(fmt.Sprintf("toggling buffering: %t in migration %v", bufferQueries, onlineDDL.UUID)) timeout := onlineDDL.CutOverThreshold + qrBufferExtraTimeout e.toggleBufferTableFunc(bufferingCtx, onlineDDL.Table, timeout, bufferQueries) @@ -984,16 +984,16 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh return vterrors.Wrapf(err, "refreshing table state") } } - log.Infof("toggled buffering: %t in migration %v", bufferQueries, onlineDDL.UUID) + log.Info(fmt.Sprintf("toggled buffering: %t in migration %v", bufferQueries, onlineDDL.UUID)) return nil } var reenableOnce sync.Once reenableWritesOnce := func() { reenableOnce.Do(func() { - log.Infof("re-enabling writes in migration %v", onlineDDL.UUID) + log.Info(fmt.Sprintf("re-enabling writes in migration %v", onlineDDL.UUID)) toggleBuffering(false) - go log.Infof("cutOverVReplMigration %v: unbuffered queries", s.workflow) + go log.Info(fmt.Sprintf("cutOverVReplMigration %v: unbuffered queries", s.workflow)) }) } e.updateMigrationStage(ctx, onlineDDL.UUID, "buffering queries") @@ -1061,7 +1061,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh e.updateMigrationStage(ctx, onlineDDL.UUID, "RENAME found") if shouldForceCutOver { - log.Infof("cutOverVReplMigration %v: force cut-over requested, killing table lock holders and accessors while RENAME is in place", s.workflow) + log.Info(fmt.Sprintf("cutOverVReplMigration %v: force cut-over requested, killing table lock holders and accessors while RENAME is in place", s.workflow)) if err := e.killTableLockHoldersAndAccessors(killWhileRenamingContext, onlineDDL.UUID, onlineDDL.Table, lockConn.Conn.ID(), renameConn.Conn.ID()); err != nil { return vterrors.Wrapf(err, "failed killing table lock holders and accessors") } @@ -1091,21 +1091,21 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh e.updateMigrationStage(ctx, onlineDDL.UUID, "timeout while waiting for post-lock pos: %v", err) return vterrors.Wrapf(err, "failed waiting for pos after locking") } - go log.Infof("cutOverVReplMigration %v: done waiting for position %v", s.workflow, replication.EncodePosition(postWritesPos)) + go log.Info(fmt.Sprintf("cutOverVReplMigration %v: done waiting for position %v", s.workflow, replication.EncodePosition(postWritesPos))) // Stop vreplication e.updateMigrationStage(ctx, onlineDDL.UUID, "stopping vreplication") if _, err := e.vreplicationExec(ctx, tablet.Tablet, binlogplayer.StopVReplication(s.id, "stopped for online DDL cutover")); err != nil { return vterrors.Wrapf(err, "failed stopping vreplication") } - go log.Infof("cutOverVReplMigration %v: stopped vreplication", s.workflow) + go log.Info(fmt.Sprintf("cutOverVReplMigration %v: stopped vreplication", s.workflow)) defer func() { if !renameWasSuccessful { // Restarting vreplication if err := e.startVReplication(ctx, tablet.Tablet, s.workflow); err != nil { - log.Errorf("cutOverVReplMigration %v: failed restarting vreplication after cutover failure: %v", s.workflow, err) + log.Error(fmt.Sprintf("cutOverVReplMigration %v: failed restarting vreplication after cutover failure: %v", s.workflow, err)) } - go log.Infof("cutOverVReplMigration %v: started vreplication after cutover failure", s.workflow) + go log.Info(fmt.Sprintf("cutOverVReplMigration %v: started vreplication after cutover failure", s.workflow)) } }() @@ -1172,7 +1172,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh // Tables are now swapped! Migration is successful e.updateMigrationStage(ctx, onlineDDL.UUID, "re-enabling writes") reenableWritesOnce() // this function is also deferred, in case of early return; but now would be a good time to resume writes, before we publish the migration as "complete" - go log.Infof("cutOverVReplMigration %v: marking as complete", s.workflow) + go log.Info(fmt.Sprintf("cutOverVReplMigration %v: marking as complete", s.workflow)) _ = e.onSchemaMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusComplete, false, progressPctFull, etaSecondsNow, s.rowsCopied, emptyHint) return nil @@ -1550,7 +1550,7 @@ func (e *Executor) readPendingMigrationsUUIDs(ctx context.Context) (uuids []stri // terminateMigration attempts to interrupt and hard-stop a running migration func (e *Executor) terminateMigration(ctx context.Context, onlineDDL *schema.OnlineDDL) (foundRunning bool, err error) { - log.Infof("terminateMigration: request to terminate %s", onlineDDL.UUID) + log.Info("terminateMigration: request to terminate " + onlineDDL.UUID) // It's possible the killing the migration fails for whatever reason, in which case // the logic will retry killing it later on. // Whatever happens in this function, this executor stops owning the given migration. @@ -1573,7 +1573,7 @@ func (e *Executor) CancelMigration(ctx context.Context, uuid string, message str if atomic.LoadInt64(&e.isOpen) == 0 { return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, schema.ErrOnlineDDLDisabled.Error()) } - log.Infof("CancelMigration: request to cancel %s with message: %v", uuid, message) + log.Info(fmt.Sprintf("CancelMigration: request to cancel %s with message: %v", uuid, message)) e.migrationMutex.Lock() defer e.migrationMutex.Unlock() @@ -1587,7 +1587,7 @@ func (e *Executor) CancelMigration(ctx context.Context, uuid string, message str switch onlineDDL.Status { case schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled: - log.Infof("CancelMigration: migration %s is in non-cancellable status: %v", uuid, onlineDDL.Status) + log.Info(fmt.Sprintf("CancelMigration: migration %s is in non-cancellable status: %v", uuid, onlineDDL.Status)) return emptyResult, nil } // From this point on, we're actually cancelling a migration @@ -1606,16 +1606,16 @@ func (e *Executor) CancelMigration(ctx context.Context, uuid string, message str switch onlineDDL.Status { case schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady: - log.Infof("CancelMigration: cancelling %s with status: %v", uuid, onlineDDL.Status) + log.Info(fmt.Sprintf("CancelMigration: cancelling %s with status: %v", uuid, onlineDDL.Status)) return &sqltypes.Result{RowsAffected: 1}, nil } migrationFound, err := e.terminateMigration(ctx, onlineDDL) if migrationFound { - log.Infof("CancelMigration: terminated %s with status: %v", uuid, onlineDDL.Status) + log.Info(fmt.Sprintf("CancelMigration: terminated %s with status: %v", uuid, onlineDDL.Status)) rowsAffected = 1 } else { - log.Infof("CancelMigration: migration %s wasn't found to be running", uuid) + log.Info(fmt.Sprintf("CancelMigration: migration %s wasn't found to be running", uuid)) } if err != nil { return result, err @@ -1630,7 +1630,7 @@ func (e *Executor) CancelMigration(ctx context.Context, uuid string, message str // cancelMigrations attempts to abort a list of migrations func (e *Executor) cancelMigrations(ctx context.Context, cancellable []*cancellableMigration, issuedByUser bool) (err error) { for _, migration := range cancellable { - log.Infof("cancelMigrations: cancelling %s; reason: %s", migration.uuid, migration.message) + log.Info(fmt.Sprintf("cancelMigrations: cancelling %s; reason: %s", migration.uuid, migration.message)) if _, err := e.CancelMigration(ctx, migration.uuid, migration.message, issuedByUser); err != nil { return err } @@ -1649,18 +1649,18 @@ func (e *Executor) CancelPendingMigrations(ctx context.Context, message string, if err != nil { return result, err } - log.Infof("CancelPendingMigrations: iterating %v migrations %s", len(uuids)) + log.Info(fmt.Sprintf("CancelPendingMigrations: iterating %v migrations", len(uuids))) result = &sqltypes.Result{} for _, uuid := range uuids { - log.Infof("CancelPendingMigrations: cancelling %s", uuid) + log.Info("CancelPendingMigrations: cancelling " + uuid) res, err := e.CancelMigration(ctx, uuid, message, issuedByUser) if err != nil { return result, err } result.AppendResult(res) } - log.Infof("CancelPendingMigrations: done iterating %v migrations %s", len(uuids)) + log.Info(fmt.Sprintf("CancelPendingMigrations: done iterating %v migrations", len(uuids))) return result, nil } @@ -1771,7 +1771,7 @@ func (e *Executor) scheduleNextMigration(ctx context.Context) error { // We only schedule a single migration in the execution of this function onlyScheduleOneMigration.Do(func() { err = e.updateMigrationStatus(ctx, uuid, schema.OnlineDDLStatusReady) - log.Infof("Executor.scheduleNextMigration: scheduling migration %s; err: %v", uuid, err) + log.Info(fmt.Sprintf("Executor.scheduleNextMigration: scheduling migration %s; err: %v", uuid, err)) e.triggerNextCheckInterval() }) if err != nil { @@ -2557,7 +2557,7 @@ func (e *Executor) executeSpecialAlterDirectDDLActionMigration(ctx context.Conte // Buffer queries while issuing the ALTER TABLE statement (we assume this ALTER is going to be quick, // as in ALGORITHM=INSTANT or a quick partition operation) toggleBuffering := func(bufferQueries bool) { - log.Infof("toggling buffering: %t in migration %v", bufferQueries, onlineDDL.UUID) + log.Info(fmt.Sprintf("toggling buffering: %t in migration %v", bufferQueries, onlineDDL.UUID)) timeout := onlineDDL.CutOverThreshold + qrBufferExtraTimeout e.toggleBufferTableFunc(bufferingCtx, onlineDDL.Table, timeout, bufferQueries) @@ -2565,7 +2565,7 @@ func (e *Executor) executeSpecialAlterDirectDDLActionMigration(ctx context.Conte // unbuffer existing queries: bufferingContextCancel() } - log.Infof("toggled buffering: %t in migration %v", bufferQueries, onlineDDL.UUID) + log.Info(fmt.Sprintf("toggled buffering: %t in migration %v", bufferQueries, onlineDDL.UUID)) } defer toggleBuffering(false) toggleBuffering(true) @@ -2912,7 +2912,7 @@ func (e *Executor) reviewInOrderMigrations(ctx context.Context) error { return err } if wasFailed { - log.Infof("reviewInOrderMigrations: failing in-order migration uuid=%s due to previous failed/cancelled migrations in same context", uuid) + log.Info(fmt.Sprintf("reviewInOrderMigrations: failing in-order migration uuid=%s due to previous failed/cancelled migrations in same context", uuid)) } else { pendingMigrationsCount := getInOrderCompletionPendingCount(onlineDDL, pendingMigrationsUUIDs) if err := e.updateInOrderCompletionPendingCount(ctx, uuid, pendingMigrationsCount); err != nil { @@ -2960,7 +2960,7 @@ func (e *Executor) runNextMigration(ctx context.Context) error { onlineDDL.SQL = sqlparser.String(ddlStmt) } } - log.Infof("Executor.runNextMigration: migration %s is non conflicting and will be executed next", onlineDDL.UUID) + log.Info(fmt.Sprintf("Executor.runNextMigration: migration %s is non conflicting and will be executed next", onlineDDL.UUID)) e.executeMigration(ctx, onlineDDL) return nil } @@ -3243,7 +3243,7 @@ func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning i cancellable = append(cancellable, newCancellableMigration(uuid, s.message)) } if !s.isRunning() { - log.Infof("migration %s in 'running' state but vreplication state is '%s'", uuid, s.state.String()) + log.Info(fmt.Sprintf("migration %s in 'running' state but vreplication state is '%s'", uuid, s.state.String())) return nil } // This VRepl migration may have started from outside this tablet, so @@ -3257,7 +3257,7 @@ func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning i } if onlineDDL.TabletAlias != e.TabletAliasString() { _ = e.updateMigrationTablet(ctx, uuid) - log.Infof("migration %s adopted by tablet %s", uuid, e.TabletAliasString()) + log.Info(fmt.Sprintf("migration %s adopted by tablet %s", uuid, e.TabletAliasString())) } _ = e.updateRowsCopied(ctx, uuid, s.rowsCopied) _ = e.updateMigrationProgressByRowsCopied(ctx, uuid, s.rowsCopied) @@ -3322,7 +3322,7 @@ func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning i } if err := e.cutOverVReplMigration(ctx, s, shouldForceCutOver); err != nil { _ = e.updateMigrationMessage(ctx, uuid, err.Error()) - log.Errorf("cutOverVReplMigration failed %s: err=%v", onlineDDL.UUID, err) + log.Error(fmt.Sprintf("cutOverVReplMigration failed %s: err=%v", onlineDDL.UUID, err)) if sqlErr, isSQLErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError); isSQLErr && sqlErr != nil { // let's see if this error is actually acceptable @@ -3358,7 +3358,7 @@ func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning i // If we find such a migration, we do nothing. We're only looking for migrations we really // don't have any information of. if !uuidsFoundRunning[uuid] && !uuidsFoundPending[uuid] { - log.Infof("removing migration %s from ownedRunningMigrations because it's not running and not pending", uuid) + log.Info(fmt.Sprintf("removing migration %s from ownedRunningMigrations because it's not running and not pending", uuid)) e.ownedRunningMigrations.Delete(uuid) } return true @@ -3399,7 +3399,7 @@ func (e *Executor) monitorStaleMigrations(ctx context.Context) error { } livenessTimestamp := row.AsString("liveness_timestamp", "") message := fmt.Sprintf("stale migration %s: found running but indicates no liveness for %v minutes, since %v", onlineDDL.UUID, staleMinutes, livenessTimestamp) - log.Warning("warnStaleMigrations: %s", message) + log.Warn("warnStaleMigrations: " + message) maxStaleMinutes = max(maxStaleMinutes, staleMinutes) } @@ -3431,7 +3431,7 @@ func (e *Executor) reviewStaleMigrations(ctx context.Context) error { if err != nil { return err } - log.Infof("reviewStaleMigrations: stale migration found: %s", onlineDDL.UUID) + log.Info("reviewStaleMigrations: stale migration found: " + onlineDDL.UUID) message := fmt.Sprintf("stale migration %s: found running but indicates no liveness in the past %v minutes", onlineDDL.UUID, staleMigrationFailMinutes) if onlineDDL.TabletAlias != e.TabletAliasString() { // This means another tablet started the migration, and the migration has failed due to the tablet failure (e.g. primary failover) @@ -3565,7 +3565,7 @@ func (e *Executor) gcArtifacts(ctx context.Context) error { artifacts := row["artifacts"].ToString() logPath := row["log_path"].ToString() - log.Infof("Executor.gcArtifacts: will GC artifacts for migration %s", uuid) + log.Info("Executor.gcArtifacts: will GC artifacts for migration " + uuid) // Remove tables: artifactTables := textutil.SplitDelimitedList(artifacts) @@ -3576,7 +3576,7 @@ func (e *Executor) gcArtifacts(ctx context.Context) error { // is shared for multiple artifacts in this loop, we differentiate via timestamp. // Also, the timestamp we create is in the past, so that the table GC mechanism can // take it away from there on next iteration. - log.Infof("Executor.gcArtifacts: will GC artifact %s for migration %s", artifactTable, uuid) + log.Info(fmt.Sprintf("Executor.gcArtifacts: will GC artifact %s for migration %s", artifactTable, uuid)) timestampInThePast := timeNow.Add(time.Duration(-i) * time.Second).UTC() toTableName, err := e.gcArtifactTable(ctx, artifactTable, uuid, timestampInThePast) if err == nil { @@ -3586,7 +3586,7 @@ func (e *Executor) gcArtifacts(ctx context.Context) error { } else { return vterrors.Wrapf(err, "in gcArtifacts() for %s", artifactTable) } - log.Infof("Executor.gcArtifacts: renamed away artifact %s to %s", artifactTable, toTableName) + log.Info(fmt.Sprintf("Executor.gcArtifacts: renamed away artifact %s to %s", artifactTable, toTableName)) } // Remove logs: @@ -3608,7 +3608,7 @@ func (e *Executor) gcArtifacts(ctx context.Context) error { if err := e.updateMigrationTimestamp(ctx, "cleanup_timestamp", uuid); err != nil { return err } - log.Infof("Executor.gcArtifacts: done migration %s", uuid) + log.Info("Executor.gcArtifacts: done migration " + uuid) } return nil @@ -3634,39 +3634,39 @@ func (e *Executor) onMigrationCheckTick() { return } if e.keyspace == "" { - log.Errorf("Executor.onMigrationCheckTick(): empty keyspace") + log.Error("Executor.onMigrationCheckTick(): empty keyspace") return } ctx := context.Background() if err := e.retryTabletFailureMigrations(ctx); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } if err := e.reviewQueuedMigrations(ctx); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } if err := e.scheduleNextMigration(ctx); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } if err := e.reviewInOrderMigrations(ctx); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } if err := e.runNextMigration(ctx); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } if _, cancellable, err := e.reviewRunningMigrations(ctx); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } else if err := e.cancelMigrations(ctx, cancellable, false); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } if err := e.monitorStaleMigrations(ctx); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } if err := e.reviewStaleMigrations(ctx); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } if err := e.gcArtifacts(ctx); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } } @@ -3683,7 +3683,7 @@ func (e *Executor) updateMigrationStartedTimestamp(ctx context.Context, uuid str } _, err = e.execQuery(ctx, bound) if err != nil { - log.Errorf("FAIL updateMigrationStartedTimestamp: uuid=%s, error=%v", uuid, err) + log.Error(fmt.Sprintf("FAIL updateMigrationStartedTimestamp: uuid=%s, error=%v", uuid, err)) } return err } @@ -3701,7 +3701,7 @@ func (e *Executor) updateMigrationTimestamp(ctx context.Context, timestampColumn } _, err = e.execQuery(ctx, bound) if err != nil { - log.Errorf("FAIL updateMigrationStartedTimestamp: uuid=%s, timestampColumn=%v, error=%v", uuid, timestampColumn, err) + log.Error(fmt.Sprintf("FAIL updateMigrationStartedTimestamp: uuid=%s, timestampColumn=%v, error=%v", uuid, timestampColumn, err)) } return err } @@ -3756,7 +3756,7 @@ func (e *Executor) updateMigrationSpecialPlan(ctx context.Context, uuid string, func (e *Executor) updateMigrationStage(ctx context.Context, uuid string, stage string, args ...any) error { msg := fmt.Sprintf(stage, args...) - log.Infof("updateMigrationStage: uuid=%s, stage=%s", uuid, msg) + log.Info(fmt.Sprintf("updateMigrationStage: uuid=%s, stage=%s", uuid, msg)) query, err := sqlparser.ParseAndBind(sqlUpdateStage, sqltypes.StringBindVariable(msg), sqltypes.StringBindVariable(uuid), @@ -3809,7 +3809,7 @@ func (e *Executor) updateTabletFailure(ctx context.Context, uuid string) error { } func (e *Executor) updateMigrationStatusFailedOrCancelled(ctx context.Context, uuid string) error { - log.Infof("updateMigrationStatus: transitioning migration: %s into status failed or cancelled", uuid) + log.Info(fmt.Sprintf("updateMigrationStatus: transitioning migration: %s into status failed or cancelled", uuid)) query, err := sqlparser.ParseAndBind(sqlUpdateMigrationStatusFailedOrCancelled, sqltypes.StringBindVariable(uuid), ) @@ -3821,7 +3821,7 @@ func (e *Executor) updateMigrationStatusFailedOrCancelled(ctx context.Context, u } func (e *Executor) updateMigrationStatus(ctx context.Context, uuid string, status schema.OnlineDDLStatus) error { - log.Infof("updateMigrationStatus: transitioning migration: %s into status: %s", uuid, string(status)) + log.Info(fmt.Sprintf("updateMigrationStatus: transitioning migration: %s into status: %s", uuid, string(status))) query, err := sqlparser.ParseAndBind(sqlUpdateMigrationStatus, sqltypes.StringBindVariable(string(status)), sqltypes.StringBindVariable(uuid), @@ -3831,7 +3831,7 @@ func (e *Executor) updateMigrationStatus(ctx context.Context, uuid string, statu } _, err = e.execQuery(ctx, query) if err != nil { - log.Errorf("FAIL updateMigrationStatus: uuid=%s, query=%v, error=%v", uuid, query, err) + log.Error(fmt.Sprintf("FAIL updateMigrationStatus: uuid=%s, query=%v, error=%v", uuid, query, err)) } return err } @@ -3849,7 +3849,7 @@ func (e *Executor) updateDDLAction(ctx context.Context, uuid string, actionStr s } func (e *Executor) updateMigrationMessage(ctx context.Context, uuid string, message string) error { - log.Infof("updateMigrationMessage: uuid=%s, message=%s", uuid, message) + log.Info(fmt.Sprintf("updateMigrationMessage: uuid=%s, message=%s", uuid, message)) maxlen := 16383 update := func(message string) error { @@ -4087,7 +4087,7 @@ func (e *Executor) updateMigrationReadyToComplete(ctx context.Context, uuid stri atomic.StoreInt64(&runningMigration.ReadyToComplete, storeValue) } } - log.Infof("updateMigrationReadyToComplete: uuid=%s, isReady=%t", uuid, isReady) + log.Info(fmt.Sprintf("updateMigrationReadyToComplete: uuid=%s, isReady=%t", uuid, isReady)) if isReady { // We set progress to 100%. Remember that progress is based on table rows estimation. We can get here @@ -4176,7 +4176,7 @@ func (e *Executor) CleanupMigration(ctx context.Context, uuid string) (result *s if !schema.IsOnlineDDLUUID(uuid) { return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "Not a valid migration ID in CLEANUP: %s", uuid) } - log.Infof("CleanupMigration: request to cleanup migration %s", uuid) + log.Info("CleanupMigration: request to cleanup migration " + uuid) e.migrationMutex.Lock() defer e.migrationMutex.Unlock() @@ -4190,7 +4190,7 @@ func (e *Executor) CleanupMigration(ctx context.Context, uuid string) (result *s if err != nil { return nil, err } - log.Infof("CleanupMigration: migration %s marked as ready to clean up", uuid) + log.Info(fmt.Sprintf("CleanupMigration: migration %s marked as ready to clean up", uuid)) defer e.triggerNextCheckInterval() return rs, nil } @@ -4202,7 +4202,7 @@ func (e *Executor) CleanupAllMigrations(ctx context.Context) (result *sqltypes.R if atomic.LoadInt64(&e.isOpen) == 0 { return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, schema.ErrOnlineDDLDisabled.Error()) } - log.Infof("CleanupMigration: request to cleanup all terminal migrations") + log.Info("CleanupMigration: request to cleanup all terminal migrations") e.migrationMutex.Lock() defer e.migrationMutex.Unlock() @@ -4210,7 +4210,7 @@ func (e *Executor) CleanupAllMigrations(ctx context.Context) (result *sqltypes.R if err != nil { return nil, err } - log.Infof("CleanupMigration: %v migrations marked as ready to clean up", rs.RowsAffected) + log.Info(fmt.Sprintf("CleanupMigration: %v migrations marked as ready to clean up", rs.RowsAffected)) defer e.triggerNextCheckInterval() return rs, nil } @@ -4230,7 +4230,7 @@ func (e *Executor) ForceCutOverMigration(ctx context.Context, uuid string) (resu if !schema.IsOnlineDDLUUID(uuid) { return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "Not a valid migration ID in FORCE_CUTOVER: %s", uuid) } - log.Infof("ForceCutOverMigration: request to force cut-over migration %s", uuid) + log.Info("ForceCutOverMigration: request to force cut-over migration " + uuid) e.migrationMutex.Lock() defer e.migrationMutex.Unlock() @@ -4245,7 +4245,7 @@ func (e *Executor) ForceCutOverMigration(ctx context.Context, uuid string) (resu return nil, err } e.triggerNextCheckInterval() - log.Infof("ForceCutOverMigration: migration %s marked for forced cut-over", uuid) + log.Info(fmt.Sprintf("ForceCutOverMigration: migration %s marked for forced cut-over", uuid)) return rs, nil } @@ -4259,18 +4259,18 @@ func (e *Executor) ForceCutOverPendingMigrations(ctx context.Context) (result *s if err != nil { return result, err } - log.Infof("ForceCutOverPendingMigrations: iterating %v migrations", len(uuids)) + log.Info(fmt.Sprintf("ForceCutOverPendingMigrations: iterating %v migrations", len(uuids))) result = &sqltypes.Result{} for _, uuid := range uuids { - log.Infof("ForceCutOverPendingMigrations: applying to %s", uuid) + log.Info("ForceCutOverPendingMigrations: applying to " + uuid) res, err := e.ForceCutOverMigration(ctx, uuid) if err != nil { return result, err } result.AppendResult(res) } - log.Infof("ForceCutOverPendingMigrations: done iterating %v migrations %s", len(uuids)) + log.Info(fmt.Sprintf("ForceCutOverPendingMigrations: done iterating %v migrations", len(uuids))) return result, nil } @@ -4286,7 +4286,7 @@ func (e *Executor) SetMigrationCutOverThreshold(ctx context.Context, uuid string return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid cut-over threshold value: %s. Try '5s' to '30s'", thresholdString) } - log.Infof("SetMigrationCutOverThreshold: request to set cut-over threshold to %v on migration %s", threshold, uuid) + log.Info(fmt.Sprintf("SetMigrationCutOverThreshold: request to set cut-over threshold to %v on migration %s", threshold, uuid)) e.migrationMutex.Lock() defer e.migrationMutex.Unlock() @@ -4306,7 +4306,7 @@ func (e *Executor) SetMigrationCutOverThreshold(ctx context.Context, uuid string return nil, err } e.triggerNextCheckInterval() - log.Infof("SetMigrationCutOverThreshold: migration %s cut-over threshold was set to", uuid, threshold) + log.Info(fmt.Sprintf("SetMigrationCutOverThreshold: migration %s cut-over threshold was set to %v", uuid, threshold)) return rs, nil } @@ -4322,7 +4322,7 @@ func (e *Executor) CompleteMigration(ctx context.Context, uuid string, shardsArg // Does not apply to this shard! return &sqltypes.Result{}, nil } - log.Infof("CompleteMigration: request to complete migration %s", uuid) + log.Info("CompleteMigration: request to complete migration " + uuid) e.migrationMutex.Lock() defer e.migrationMutex.Unlock() @@ -4338,7 +4338,7 @@ func (e *Executor) CompleteMigration(ctx context.Context, uuid string, shardsArg if err != nil { return nil, err } - log.Infof("CompleteMigration: migration %s marked as unpostponed", uuid) + log.Info(fmt.Sprintf("CompleteMigration: migration %s marked as unpostponed", uuid)) return rs, nil } @@ -4353,18 +4353,18 @@ func (e *Executor) CompletePendingMigrations(ctx context.Context) (result *sqlty if err != nil { return result, err } - log.Infof("CompletePendingMigrations: iterating %v migrations %s", len(uuids)) + log.Info(fmt.Sprintf("CompletePendingMigrations: iterating %v migrations", len(uuids))) result = &sqltypes.Result{} for _, uuid := range uuids { - log.Infof("CompletePendingMigrations: completing %s", uuid) + log.Info("CompletePendingMigrations: completing " + uuid) res, err := e.CompleteMigration(ctx, uuid, "") if err != nil { return result, err } result.AppendResult(res) } - log.Infof("CompletePendingMigrations: done iterating %v migrations %s", len(uuids)) + log.Info(fmt.Sprintf("CompletePendingMigrations: done iterating %v migrations", len(uuids))) return result, nil } @@ -4376,7 +4376,7 @@ func (e *Executor) PostponeCompleteMigration(ctx context.Context, uuid string) ( if !schema.IsOnlineDDLUUID(uuid) { return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "Not a valid migration ID in POSTPONE COMPLETE: %s", uuid) } - log.Infof("PostponeCompleteMigration: request to postpone complete migration %s", uuid) + log.Info("PostponeCompleteMigration: request to postpone complete migration " + uuid) e.migrationMutex.Lock() defer e.migrationMutex.Unlock() @@ -4392,7 +4392,7 @@ func (e *Executor) PostponeCompleteMigration(ctx context.Context, uuid string) ( if err != nil { return nil, err } - log.Infof("PostponeCompleteMigration: migration %s marked as postponed", uuid) + log.Info(fmt.Sprintf("PostponeCompleteMigration: migration %s marked as postponed", uuid)) return rs, nil } @@ -4407,18 +4407,18 @@ func (e *Executor) PostponeCompletePendingMigrations(ctx context.Context) (resul if err != nil { return result, err } - log.Infof("PostponeCompletePendingMigrations: iterating %v migrations %s", len(uuids)) + log.Info(fmt.Sprintf("PostponeCompletePendingMigrations: iterating %v migrations", len(uuids))) result = &sqltypes.Result{} for _, uuid := range uuids { - log.Infof("PostponeCompletePendingMigrations: postpone completion of %s", uuid) + log.Info("PostponeCompletePendingMigrations: postpone completion of " + uuid) res, err := e.PostponeCompleteMigration(ctx, uuid) if err != nil { return result, err } result.AppendResult(res) } - log.Infof("PostponeCompletePendingMigrations: done iterating %v migrations %s", len(uuids)) + log.Info(fmt.Sprintf("PostponeCompletePendingMigrations: done iterating %v migrations", len(uuids))) return result, nil } @@ -4434,7 +4434,7 @@ func (e *Executor) LaunchMigration(ctx context.Context, uuid string, shardsArg s // Does not apply to this shard! return &sqltypes.Result{}, nil } - log.Infof("LaunchMigration: request to launch migration %s", uuid) + log.Info("LaunchMigration: request to launch migration " + uuid) e.migrationMutex.Lock() defer e.migrationMutex.Unlock() @@ -4450,7 +4450,7 @@ func (e *Executor) LaunchMigration(ctx context.Context, uuid string, shardsArg s if err != nil { return nil, err } - log.Infof("LaunchMigration: migration %s marked as unpostponed", uuid) + log.Info(fmt.Sprintf("LaunchMigration: migration %s marked as unpostponed", uuid)) return rs, nil } @@ -4469,18 +4469,18 @@ func (e *Executor) LaunchMigrations(ctx context.Context) (result *sqltypes.Resul return result, err } rows := r.Named().Rows - log.Infof("LaunchMigrations: iterating %v migrations %s", len(rows)) + log.Info(fmt.Sprintf("LaunchMigrations: iterating %v migrations", len(rows))) result = &sqltypes.Result{} for _, row := range rows { uuid := row["migration_uuid"].ToString() - log.Infof("LaunchMigrations: unpostponing %s", uuid) + log.Info("LaunchMigrations: unpostponing " + uuid) res, err := e.LaunchMigration(ctx, uuid, "") if err != nil { return result, err } result.AppendResult(res) } - log.Infof("LaunchMigrations: done iterating %v migrations %s", len(uuids)) + log.Info(fmt.Sprintf("LaunchMigrations: done iterating %v migrations", len(uuids))) return result, nil } @@ -4591,7 +4591,7 @@ func (e *Executor) SubmitMigration( return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, schema.ErrOnlineDDLDisabled.Error()) } - log.Infof("SubmitMigration: request to submit migration with statement: %0.50s...", sqlparser.CanonicalString(stmt)) + log.Info(fmt.Sprintf("SubmitMigration: request to submit migration with statement: %0.50s...", sqlparser.CanonicalString(stmt))) if ddlStmt, ok := stmt.(sqlparser.DDLStatement); ok { // This validation should have taken place on submission. However, the query may have mutated // during transfer, and this validation is here to catch any malformed mutation. @@ -4615,7 +4615,7 @@ func (e *Executor) SubmitMigration( return nil, vterrors.Wrapf(err, "while checking whether migration %s exists", onlineDDL.UUID) } if storedMigration != nil { - log.Infof("SubmitMigration: migration %s already exists with migration_context=%s, table=%s", onlineDDL.UUID, storedMigration.MigrationContext, onlineDDL.Table) + log.Info(fmt.Sprintf("SubmitMigration: migration %s already exists with migration_context=%s, table=%s", onlineDDL.UUID, storedMigration.MigrationContext, onlineDDL.Table)) // A migration already exists with the same UUID. This is fine, we allow re-submitting migrations // with the same UUID, as we provide idempotency. // So we will _mostly_ ignore the request: we will not submit a new migration. However, we will do @@ -4640,7 +4640,7 @@ func (e *Executor) SubmitMigration( if err != nil { return nil, err } - log.Infof("SubmitMigration: request to submit migration %s; action=%s, table=%s", onlineDDL.UUID, actionStr, onlineDDL.Table) + log.Info(fmt.Sprintf("SubmitMigration: request to submit migration %s; action=%s, table=%s", onlineDDL.UUID, actionStr, onlineDDL.Table)) revertedUUID, _ := onlineDDL.GetRevertUUID(e.env.Environment().Parser()) // Empty value if the migration is not actually a REVERT. Safe to ignore error. retainArtifactsSeconds := int64((retainOnlineDDLTables).Seconds()) @@ -4688,7 +4688,7 @@ func (e *Executor) SubmitMigration( if err != nil { return nil, vterrors.Wrapf(err, "submitting migration %v", onlineDDL.UUID) } - log.Infof("SubmitMigration: migration %s submitted", onlineDDL.UUID) + log.Info(fmt.Sprintf("SubmitMigration: migration %s submitted", onlineDDL.UUID)) defer e.triggerNextCheckInterval() diff --git a/go/vt/vttablet/onlineddl/vrepl.go b/go/vt/vttablet/onlineddl/vrepl.go index f95eb189d1e..6379d072105 100644 --- a/go/vt/vttablet/onlineddl/vrepl.go +++ b/go/vt/vttablet/onlineddl/vrepl.go @@ -200,7 +200,7 @@ func (v *VRepl) executeAnalyzeTable(ctx context.Context, conn *dbconnpool.DBConn if _, err := conn.ExecuteFetch(sqlEnableFastAnalyzeTable, 1, false); err != nil { return err } - log.Infof("@@fast_analyze_table enabled") + log.Info("@@fast_analyze_table enabled") defer conn.ExecuteFetch(sqlDisableFastAnalyzeTable, 1, false) } diff --git a/go/vt/vttablet/sandboxconn/sandboxconn.go b/go/vt/vttablet/sandboxconn/sandboxconn.go index 7c54fcbfcf9..43713c01df7 100644 --- a/go/vt/vttablet/sandboxconn/sandboxconn.go +++ b/go/vt/vttablet/sandboxconn/sandboxconn.go @@ -572,7 +572,7 @@ func (sbc *SandboxConn) AddVStreamEvents(events []*binlogdatapb.VEvent, err erro // VStream is part of the QueryService interface. func (sbc *SandboxConn) VStream(ctx context.Context, request *binlogdatapb.VStreamRequest, send func([]*binlogdatapb.VEvent) error) error { if sbc.StartPos != "" && sbc.StartPos != request.Position { - log.Errorf("startPos(%v): %v, want %v", request.Target, request.Position, sbc.StartPos) + log.Error(fmt.Sprintf("startPos(%v): %v, want %v", request.Target, request.Position, sbc.StartPos)) return fmt.Errorf("startPos(%v): %v, want %v", request.Target, request.Position, sbc.StartPos) } done := false @@ -597,7 +597,7 @@ func (sbc *SandboxConn) VStream(ctx context.Context, request *binlogdatapb.VStre }} if err := send(events); err != nil { - log.Infof("error sending event in test sandbox %s", err.Error()) + log.Info("error sending event in test sandbox " + err.Error()) return err } lastTimestamp++ @@ -607,7 +607,7 @@ func (sbc *SandboxConn) VStream(ctx context.Context, request *binlogdatapb.VStre done = true } if err := send([]*binlogdatapb.VEvent{ev}); err != nil { - log.Infof("error sending event in test sandbox %s", err.Error()) + log.Info("error sending event in test sandbox " + err.Error()) return err } lastTimestamp = ev.Timestamp diff --git a/go/vt/vttablet/sysloglogger/sysloglogger.go b/go/vt/vttablet/sysloglogger/sysloglogger.go index cf7df47fb2b..6396aa1248f 100644 --- a/go/vt/vttablet/sysloglogger/sysloglogger.go +++ b/go/vt/vttablet/sysloglogger/sysloglogger.go @@ -20,6 +20,7 @@ limitations under the License. package sysloglogger import ( + "fmt" "log/syslog" "strings" @@ -59,7 +60,7 @@ func init() { var err error writer, err = syslog.New(syslog.LOG_INFO, "vtquerylogger") if err != nil { - log.Errorf("Query logger is unable to connect to syslog: %v", err) + log.Error(fmt.Sprintf("Query logger is unable to connect to syslog: %v", err)) return } go run() @@ -84,11 +85,11 @@ func run() { for stats := range ch { b.Reset() if err := stats.Logf(&b, formatParams); err != nil { - log.Errorf("Error formatting logStats: %v", err) + log.Error(fmt.Sprintf("Error formatting logStats: %v", err)) continue } if err := writer.Info(b.String()); err != nil { - log.Errorf("Error writing to syslog: %v", err) + log.Error(fmt.Sprintf("Error writing to syslog: %v", err)) continue } } diff --git a/go/vt/vttablet/tabletconn/tablet_conn.go b/go/vt/vttablet/tabletconn/tablet_conn.go index 8c2c233dd45..2f077f3d0bc 100644 --- a/go/vt/vttablet/tabletconn/tablet_conn.go +++ b/go/vt/vttablet/tabletconn/tablet_conn.go @@ -18,6 +18,8 @@ package tabletconn import ( "context" + "fmt" + "os" "sync" "github.com/spf13/pflag" @@ -80,7 +82,8 @@ func RegisterDialer(name string, dialer TabletDialer) { mu.Lock() defer mu.Unlock() if _, ok := dialers[name]; ok { - log.Fatalf("Dialer %s already exists", name) + log.Error(fmt.Sprintf("Dialer %s already exists", name)) + os.Exit(1) } dialers[name] = dialer } @@ -91,7 +94,8 @@ func GetDialer() TabletDialer { defer mu.Unlock() td, ok := dialers[tabletProtocol] if !ok { - log.Exitf("No dialer registered for tablet protocol %s", tabletProtocol) + log.Error("No dialer registered for tablet protocol " + tabletProtocol) + os.Exit(1) } return td } diff --git a/go/vt/vttablet/tabletconntest/tabletconntest.go b/go/vt/vttablet/tabletconntest/tabletconntest.go index 1dd200329c9..988064d934a 100644 --- a/go/vt/vttablet/tabletconntest/tabletconntest.go +++ b/go/vt/vttablet/tabletconntest/tabletconntest.go @@ -20,6 +20,7 @@ package tabletconntest import ( "context" + "fmt" "io" "os" "strings" @@ -1088,6 +1089,6 @@ func SetProtocol(name string, protocol string) { if err := pflag.Set(tabletProtocolFlagName, protocol); err != nil { msg := "failed to set flag %q to %q: %v" - log.Errorf(msg, tabletProtocolFlagName, protocol, err) + log.Error(fmt.Sprintf(msg, tabletProtocolFlagName, protocol, err)) } } diff --git a/go/vt/vttablet/tabletmanager/restore.go b/go/vt/vttablet/tabletmanager/restore.go index f0127f171a6..d92f6dfd1ed 100644 --- a/go/vt/vttablet/tabletmanager/restore.go +++ b/go/vt/vttablet/tabletmanager/restore.go @@ -138,7 +138,7 @@ func (tm *TabletManager) RestoreData( case hook.HOOK_DOES_NOT_EXIST: log.Info("No vttablet_restore_done hook.") default: - log.Warning("vttablet_restore_done hook failed") + log.Warn("vttablet_restore_done hook failed") } }() }() @@ -178,7 +178,7 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L return vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, fmt.Sprintf("snapshot keyspace %v has no base_keyspace set", tablet.Keyspace)) } keyspace = keyspaceInfo.BaseKeyspace - log.Infof("Using base_keyspace %v to restore keyspace %v using a backup time of %v", keyspace, tablet.Keyspace, protoutil.TimeFromProto(request.BackupTime).UTC()) + log.Info(fmt.Sprintf("Using base_keyspace %v to restore keyspace %v using a backup time of %v", keyspace, tablet.Keyspace, protoutil.TimeFromProto(request.BackupTime).UTC())) } startTime := protoutil.TimeFromProto(request.BackupTime).UTC() @@ -254,7 +254,7 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L break } - log.Infof("No backup found. Waiting %v (from -wait-for-backup-interval flag) to check again.", waitForBackupInterval) + log.Info(fmt.Sprintf("No backup found. Waiting %v (from -wait-for-backup-interval flag) to check again.", waitForBackupInterval)) select { case <-ctx.Done(): return ctx.Err() @@ -300,7 +300,7 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L bgCtx := context.Background() // If anything failed, we should reset the original tablet type if err := tm.tmState.ChangeTabletType(bgCtx, originalType, DBActionNone); err != nil { - log.Errorf("Could not change back to original tablet type %v: %v", originalType, err) + log.Error(fmt.Sprintf("Could not change back to original tablet type %v: %v", originalType, err)) } return vterrors.Wrap(err, "Can't restore backup") } diff --git a/go/vt/vttablet/tabletmanager/rpc_lock_tables.go b/go/vt/vttablet/tabletmanager/rpc_lock_tables.go index 27f4cdf0b72..7686b59a11b 100644 --- a/go/vt/vttablet/tabletmanager/rpc_lock_tables.go +++ b/go/vt/vttablet/tabletmanager/rpc_lock_tables.go @@ -77,7 +77,7 @@ func (tm *TabletManager) LockTables(ctx context.Context) error { return err } } - log.Infof("[%v] Tables locked", conn.ConnectionID) + log.Info(fmt.Sprintf("[%v] Tables locked", conn.ConnectionID)) tm._lockTablesConnection = conn tm._lockTablesTimer = time.AfterFunc(lockTablesTimeout, func() { @@ -88,10 +88,10 @@ func (tm *TabletManager) LockTables(ctx context.Context) error { // We need the mutex locked before we check this field if tm._lockTablesConnection == conn { - log.Errorf("table lock timed out and released the lock - something went wrong") + log.Error("table lock timed out and released the lock - something went wrong") err = tm.unlockTablesHoldingMutex() if err != nil { - log.Errorf("failed to unlock tables: %v", err) + log.Error(fmt.Sprintf("failed to unlock tables: %v", err)) } } }) @@ -100,7 +100,7 @@ func (tm *TabletManager) LockTables(ctx context.Context) error { } func (tm *TabletManager) lockTablesUsingLockTables(conn *dbconnpool.DBConnection) error { - log.Warningf("failed to lock tables with FTWRL - falling back to LOCK TABLES") + log.Warn("failed to lock tables with FTWRL - falling back to LOCK TABLES") // Ensure schema engine is Open. If vttablet came up in a non_serving role, // the schema engine may not have been initialized. Open() is idempotent, so this @@ -151,7 +151,7 @@ func (tm *TabletManager) unlockTablesHoldingMutex() error { if err != nil { return err } - log.Infof("[%v] Tables unlocked", tm._lockTablesConnection.ConnectionID) + log.Info(fmt.Sprintf("[%v] Tables unlocked", tm._lockTablesConnection.ConnectionID)) tm._lockTablesConnection.Close() tm._lockTablesConnection = nil tm._lockTablesTimer = nil diff --git a/go/vt/vttablet/tabletmanager/rpc_query.go b/go/vt/vttablet/tabletmanager/rpc_query.go index bb3075175db..dff7f96494e 100644 --- a/go/vt/vttablet/tabletmanager/rpc_query.go +++ b/go/vt/vttablet/tabletmanager/rpc_query.go @@ -18,6 +18,7 @@ package tabletmanager import ( "context" + "fmt" "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/sqlescape" @@ -164,7 +165,7 @@ func (tm *TabletManager) executeMultiFetchAsDba( if err == nil && reloadSchema { reloadErr := tm.QueryServiceControl.ReloadSchema(ctx) if reloadErr != nil { - log.Errorf("failed to reload the schema %v", reloadErr) + log.Error(fmt.Sprintf("failed to reload the schema %v", reloadErr)) } } return results, err @@ -242,7 +243,7 @@ func (tm *TabletManager) ExecuteFetchAsAllPrivs(ctx context.Context, req *tablet if err == nil && req.ReloadSchema { reloadErr := tm.QueryServiceControl.ReloadSchema(ctx) if reloadErr != nil { - log.Errorf("failed to reload the schema %v", reloadErr) + log.Error(fmt.Sprintf("failed to reload the schema %v", reloadErr)) } } return sqltypes.ResultToProto3(result), err diff --git a/go/vt/vttablet/tabletmanager/rpc_replication.go b/go/vt/vttablet/tabletmanager/rpc_replication.go index 15081168a1b..7d9d36a94d9 100644 --- a/go/vt/vttablet/tabletmanager/rpc_replication.go +++ b/go/vt/vttablet/tabletmanager/rpc_replication.go @@ -220,7 +220,7 @@ func (tm *TabletManager) PrimaryPosition(ctx context.Context) (string, error) { // WaitForPosition waits until replication reaches the desired position func (tm *TabletManager) WaitForPosition(ctx context.Context, pos string) error { - log.Infof("WaitForPosition: %v", pos) + log.Info(fmt.Sprintf("WaitForPosition: %v", pos)) if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return err } @@ -234,7 +234,7 @@ func (tm *TabletManager) WaitForPosition(ctx context.Context, pos string) error // StopReplication will stop the mysql. Works both when Vitess manages // replication or not (using hook if not). func (tm *TabletManager) StopReplication(ctx context.Context) error { - log.Infof("StopReplication") + log.Info("StopReplication") if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return err } @@ -258,7 +258,7 @@ func (tm *TabletManager) stopIOThreadLocked(ctx context.Context) error { // provided position. Works both when Vitess manages // replication or not (using hook if not). func (tm *TabletManager) StopReplicationMinimum(ctx context.Context, position string, waitTime time.Duration) (string, error) { - log.Infof("StopReplicationMinimum: position: %v waitTime: %v", position, waitTime) + log.Info(fmt.Sprintf("StopReplicationMinimum: position: %v waitTime: %v", position, waitTime)) if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return "", err } @@ -289,7 +289,7 @@ func (tm *TabletManager) StopReplicationMinimum(ctx context.Context, position st // StartReplication will start the mysql. Works both when Vitess manages // replication or not (using hook if not). func (tm *TabletManager) StartReplication(ctx context.Context, semiSync bool) error { - log.Infof("StartReplication") + log.Info("StartReplication") if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return err } @@ -311,7 +311,7 @@ func (tm *TabletManager) StartReplication(ctx context.Context, semiSync bool) er // RestartReplication will stop replication and then start it again func (tm *TabletManager) RestartReplication(ctx context.Context, semiSync bool) error { - log.Infof("RestartReplication") + log.Info("RestartReplication") if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return err } @@ -341,7 +341,7 @@ func (tm *TabletManager) RestartReplication(ctx context.Context, semiSync bool) // StartReplicationUntilAfter will start the replication and let it catch up // until and including the transactions in `position` func (tm *TabletManager) StartReplicationUntilAfter(ctx context.Context, position string, waitTime time.Duration) error { - log.Infof("StartReplicationUntilAfter: position: %v waitTime: %v", position, waitTime) + log.Info(fmt.Sprintf("StartReplicationUntilAfter: position: %v waitTime: %v", position, waitTime)) if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return err } @@ -372,7 +372,7 @@ func (tm *TabletManager) GetReplicas(ctx context.Context) ([]string, error) { // ResetReplication completely resets the replication on the host. // All binary and relay logs are flushed. All replication positions are reset. func (tm *TabletManager) ResetReplication(ctx context.Context) error { - log.Infof("ResetReplication") + log.Info("ResetReplication") if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return err } @@ -386,7 +386,7 @@ func (tm *TabletManager) ResetReplication(ctx context.Context) error { // InitPrimary enables writes and returns the replication position. func (tm *TabletManager) InitPrimary(ctx context.Context, semiSync bool) (string, error) { - log.Infof("InitPrimary with semiSync as %t", semiSync) + log.Info(fmt.Sprintf("InitPrimary with semiSync as %t", semiSync)) if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return "", err } @@ -407,7 +407,7 @@ func (tm *TabletManager) InitPrimary(ctx context.Context, semiSync bool) (string // Setting super_read_only `OFF` so that we can run the DDL commands if _, err := tm.MysqlDaemon.SetSuperReadOnly(ctx, false); err != nil { if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERUnknownSystemVariable { - log.Warningf("server does not know about super_read_only, continuing anyway...") + log.Warn("server does not know about super_read_only, continuing anyway...") } else { return "", err } @@ -443,8 +443,7 @@ func (tm *TabletManager) InitPrimary(ctx context.Context, semiSync bool) (string // PopulateReparentJournal adds an entry into the reparent_journal table. func (tm *TabletManager) PopulateReparentJournal(ctx context.Context, timeCreatedNS int64, actionName string, primaryAlias *topodatapb.TabletAlias, position string) error { - log.Infof("PopulateReparentJournal: action: %v parent: %v position: %v timeCreatedNS: %d actionName: %s primaryAlias: %s", - actionName, primaryAlias, position, timeCreatedNS, actionName, primaryAlias) + log.Info(fmt.Sprintf("PopulateReparentJournal: action: %v parent: %v position: %v timeCreatedNS: %d actionName: %s primaryAlias: %s", actionName, primaryAlias, position, timeCreatedNS, actionName, primaryAlias)) if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return err } @@ -460,7 +459,7 @@ func (tm *TabletManager) PopulateReparentJournal(ctx context.Context, timeCreate // ReadReparentJournalInfo reads the information from reparent journal. func (tm *TabletManager) ReadReparentJournalInfo(ctx context.Context) (int32, error) { - log.Infof("ReadReparentJournalInfo") + log.Info("ReadReparentJournalInfo") if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return 0, err } @@ -479,7 +478,7 @@ func (tm *TabletManager) ReadReparentJournalInfo(ctx context.Context) (int32, er // InitReplica sets replication primary and position, and waits for the // reparent_journal table entry up to context timeout func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.TabletAlias, position string, timeCreatedNS int64, semiSync bool) error { - log.Infof("InitReplica: parent: %v position: %v timeCreatedNS: %d semisync: %t", parent, position, timeCreatedNS, semiSync) + log.Info(fmt.Sprintf("InitReplica: parent: %v position: %v timeCreatedNS: %d semisync: %t", parent, position, timeCreatedNS, semiSync)) if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return err } @@ -549,7 +548,7 @@ func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.Tab // // If a step fails in the middle, it will try to undo any changes it made. func (tm *TabletManager) DemotePrimary(ctx context.Context, force bool) (*replicationdatapb.PrimaryStatus, error) { - log.Infof("DemotePrimary") + log.Info("DemotePrimary") if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return nil, err } @@ -577,11 +576,11 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure case <-time.After(10 * topo.RemoteOperationTimeout): // We waited for over 10 times of remote operation timeout, but DemotePrimary is still not done. // Collect more information and signal demote primary is indefinitely stalled. - log.Errorf("DemotePrimary seems to be stalled. Collecting more information.") + log.Error("DemotePrimary seems to be stalled. Collecting more information.") tm.QueryServiceControl.SetDemotePrimaryStalled(true) buf := make([]byte, 1<<16) // 64 KB buffer size stackSize := runtime.Stack(buf, true) - log.Errorf("Stack trace:\n%s", string(buf[:stackSize])) + log.Error("Stack trace:\n" + string(buf[:stackSize])) // This condition check is only to handle the race, where we start to set the demote primary stalled // but then the function finishes. So, after we set demote primary stalled, we check if the // function has finished and if it has, we clear the demote primary stalled. @@ -610,14 +609,14 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure // have to be killed at the end of their timeout, this will be // considered successful. If we are already not serving, this will be // idempotent. - log.Infof("DemotePrimary disabling query service") + log.Info("DemotePrimary disabling query service") if err := tm.QueryServiceControl.SetServingType(tablet.Type, protoutil.TimeFromProto(tablet.PrimaryTermStartTime).UTC(), false, "demotion in progress"); err != nil { return nil, vterrors.Wrap(err, "SetServingType(serving=false) failed") } defer func() { if finalErr != nil && revertPartialFailure && wasServing { if err := tm.QueryServiceControl.SetServingType(tablet.Type, protoutil.TimeFromProto(tablet.PrimaryTermStartTime).UTC(), true, ""); err != nil { - log.Warningf("SetServingType(serving=true) failed during revert: %v", err) + log.Warn(fmt.Sprintf("SetServingType(serving=true) failed during revert: %v", err)) } } }() @@ -655,7 +654,7 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure if finalErr != nil && revertPartialFailure && wasPrimary { // enable primary-side semi-sync again if err := tm.fixSemiSync(ctx, topodatapb.TabletType_PRIMARY, SemiSyncActionSet); err != nil { - log.Warningf("fixSemiSync(PRIMARY) failed during revert: %v", err) + log.Warn(fmt.Sprintf("fixSemiSync(PRIMARY) failed during revert: %v", err)) } } }() @@ -688,7 +687,7 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure // idempotent. if _, err := tm.MysqlDaemon.SetSuperReadOnly(ctx, true); err != nil { if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERUnknownSystemVariable { - log.Warningf("server does not know about super_read_only, continuing anyway...") + log.Warn("server does not know about super_read_only, continuing anyway...") } else { return nil, err } @@ -699,7 +698,7 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure // We need to redo the prepared transactions in read only mode using the dba user to ensure we don't lose them. // setting read_only OFF will also set super_read_only OFF if it was set if err = tm.redoPreparedTransactionsAndSetReadWrite(ctx); err != nil { - log.Warningf("RedoPreparedTransactionsAndSetReadWrite failed during revert: %v", err) + log.Warn(fmt.Sprintf("RedoPreparedTransactionsAndSetReadWrite failed during revert: %v", err)) } } }() @@ -714,7 +713,7 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure if finalErr != nil && revertPartialFailure && wasPrimary { // enable primary-side semi-sync again if err := tm.fixSemiSync(ctx, topodatapb.TabletType_PRIMARY, SemiSyncActionSet); err != nil { - log.Warningf("fixSemiSync(PRIMARY) failed during revert: %v", err) + log.Warn(fmt.Sprintf("fixSemiSync(PRIMARY) failed during revert: %v", err)) } } }() @@ -732,7 +731,7 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure // it sets read-only to false, fixes semi-sync // and returns its primary position. func (tm *TabletManager) UndoDemotePrimary(ctx context.Context, semiSync bool) error { - log.Infof("UndoDemotePrimary") + log.Info("UndoDemotePrimary") if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return err } @@ -775,7 +774,7 @@ func (tm *TabletManager) UndoDemotePrimary(ctx context.Context, semiSync bool) e } // Update serving graph - log.Infof("UndoDemotePrimary re-enabling query service") + log.Info("UndoDemotePrimary re-enabling query service") if err := tm.QueryServiceControl.SetServingType(tablet.Type, protoutil.TimeFromProto(tablet.PrimaryTermStartTime).UTC(), true, ""); err != nil { return vterrors.Wrap(err, "SetServingType(serving=true) failed") } @@ -784,7 +783,7 @@ func (tm *TabletManager) UndoDemotePrimary(ctx context.Context, semiSync bool) e // ReplicaWasPromoted promotes a replica to primary, no questions asked. func (tm *TabletManager) ReplicaWasPromoted(ctx context.Context) error { - log.Infof("ReplicaWasPromoted") + log.Info("ReplicaWasPromoted") if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return err } @@ -797,7 +796,7 @@ func (tm *TabletManager) ReplicaWasPromoted(ctx context.Context) error { // ResetReplicationParameters resets the replica replication parameters func (tm *TabletManager) ResetReplicationParameters(ctx context.Context) error { - log.Infof("ResetReplicationParameters") + log.Info("ResetReplicationParameters") if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return err } @@ -821,7 +820,7 @@ func (tm *TabletManager) ResetReplicationParameters(ctx context.Context) error { // SetReplicationSource sets replication primary, and waits for the // reparent_journal table entry up to context timeout func (tm *TabletManager) SetReplicationSource(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool, semiSync bool, heartbeatInterval float64) error { - log.Infof("SetReplicationSource: parent: %v position: %s force: %v semiSync: %v timeCreatedNS: %d", parentAlias, waitPosition, forceStartReplication, semiSync, timeCreatedNS) + log.Info(fmt.Sprintf("SetReplicationSource: parent: %v position: %s force: %v semiSync: %v timeCreatedNS: %d", parentAlias, waitPosition, forceStartReplication, semiSync, timeCreatedNS)) if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return err } @@ -841,7 +840,7 @@ func (tm *TabletManager) SetReplicationSource(ctx context.Context, parentAlias * } func (tm *TabletManager) setReplicationSourceSemiSyncNoAction(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) error { - log.Infof("SetReplicationSource: parent: %v position: %v force: %v", parentAlias, waitPosition, forceStartReplication) + log.Info(fmt.Sprintf("SetReplicationSource: parent: %v position: %v force: %v", parentAlias, waitPosition, forceStartReplication)) if err := tm.lock(ctx); err != nil { return err } @@ -975,7 +974,7 @@ func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentA // GTID-based replication position or a Vitess reparent journal entry, // or both. if shouldbeReplicating { - log.Infof("Set up MySQL replication; should now be replicating from %s at %s", parentAlias, waitPosition) + log.Info(fmt.Sprintf("Set up MySQL replication; should now be replicating from %s at %s", parentAlias, waitPosition)) if waitPosition != "" { pos, err := replication.DecodePosition(waitPosition) if err != nil { @@ -997,7 +996,7 @@ func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentA // ReplicaWasRestarted updates the parent record for a tablet. func (tm *TabletManager) ReplicaWasRestarted(ctx context.Context, parent *topodatapb.TabletAlias) error { - log.Infof("ReplicaWasRestarted: parent: %v", parent) + log.Info(fmt.Sprintf("ReplicaWasRestarted: parent: %v", parent)) if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return err } @@ -1018,7 +1017,7 @@ func (tm *TabletManager) ReplicaWasRestarted(ctx context.Context, parent *topoda // StopReplicationAndGetStatus stops MySQL replication, and returns the // current status. func (tm *TabletManager) StopReplicationAndGetStatus(ctx context.Context, stopReplicationMode replicationdatapb.StopReplicationMode) (StopReplicationAndGetStatusResponse, error) { - log.Infof("StopReplicationAndGetStatus: mode: %v", stopReplicationMode) + log.Info(fmt.Sprintf("StopReplicationAndGetStatus: mode: %v", stopReplicationMode)) if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return StopReplicationAndGetStatusResponse{}, err } @@ -1110,7 +1109,7 @@ type StopReplicationAndGetStatusResponse struct { // PromoteReplica makes the current tablet the primary func (tm *TabletManager) PromoteReplica(ctx context.Context, semiSync bool) (string, error) { - log.Infof("PromoteReplica") + log.Info("PromoteReplica") if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return "", err } @@ -1228,7 +1227,7 @@ func (tm *TabletManager) fixSemiSyncAndReplication(ctx context.Context, tabletTy } // We need to restart replication - log.Infof("Restarting replication for semi-sync flag change to take effect from %v to %v", acking, shouldAck) + log.Info(fmt.Sprintf("Restarting replication for semi-sync flag change to take effect from %v to %v", acking, shouldAck)) if err := tm.MysqlDaemon.StopReplication(ctx, tm.hookExtraEnv()); err != nil { return vterrors.Wrap(err, "failed to StopReplication") } diff --git a/go/vt/vttablet/tabletmanager/rpc_schema.go b/go/vt/vttablet/tabletmanager/rpc_schema.go index b1162190128..3a5b3f0662f 100644 --- a/go/vt/vttablet/tabletmanager/rpc_schema.go +++ b/go/vt/vttablet/tabletmanager/rpc_schema.go @@ -18,6 +18,7 @@ package tabletmanager import ( "context" + "fmt" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/vterrors" @@ -48,13 +49,13 @@ func (tm *TabletManager) ReloadSchema(ctx context.Context, waitPosition string) if err != nil { return vterrors.Wrapf(err, "ReloadSchema: can't parse wait position (%q)", waitPosition) } - log.Infof("ReloadSchema: waiting for replication position: %v", waitPosition) + log.Info(fmt.Sprintf("ReloadSchema: waiting for replication position: %v", waitPosition)) if err := tm.MysqlDaemon.WaitSourcePos(ctx, pos); err != nil { return err } } - log.Infof("ReloadSchema requested via RPC") + log.Info("ReloadSchema requested via RPC") return tm.QueryServiceControl.ReloadSchema(ctx) } diff --git a/go/vt/vttablet/tabletmanager/rpc_server.go b/go/vt/vttablet/tabletmanager/rpc_server.go index 9663d9cd844..fad692c0da7 100644 --- a/go/vt/vttablet/tabletmanager/rpc_server.go +++ b/go/vt/vttablet/tabletmanager/rpc_server.go @@ -49,7 +49,7 @@ func (tm *TabletManager) unlock() { func (tm *TabletManager) HandleRPCPanic(ctx context.Context, name string, args, reply any, verbose bool, err *error) { // panic handling if x := recover(); x != nil { - log.Errorf("TabletManager.%v(%v) on %v panic: %v\n%s", name, args, topoproto.TabletAliasString(tm.tabletAlias), x, tb.Stack(4)) + log.Error(fmt.Sprintf("TabletManager.%v(%v) on %v panic: %v\n%s", name, args, topoproto.TabletAliasString(tm.tabletAlias), x, tb.Stack(4))) *err = fmt.Errorf("caught panic during %v: %v", name, x) return } @@ -76,11 +76,11 @@ func (tm *TabletManager) HandleRPCPanic(ctx context.Context, name string, args, *err = vterrors.New(sqlErr.VtRpcErrorCode(), (*err).Error()) } - log.Warningf("TabletManager.%v(%v)(on %v from %v) error: %v", name, args, topoproto.TabletAliasString(tm.tabletAlias), from, (*err).Error()) + log.Warn(fmt.Sprintf("TabletManager.%v(%v)(on %v from %v) error: %v", name, args, topoproto.TabletAliasString(tm.tabletAlias), from, (*err).Error())) *err = vterrors.ToGRPC(vterrors.Wrapf(*err, "TabletManager.%v on %v", name, topoproto.TabletAliasString(tm.tabletAlias))) } else { // success case - log.Infof("TabletManager.%v(%v)(on %v from %v): %#v", name, args, topoproto.TabletAliasString(tm.tabletAlias), from, reply) + log.Info(fmt.Sprintf("TabletManager.%v(%v)(on %v from %v): %#v", name, args, topoproto.TabletAliasString(tm.tabletAlias), from, reply)) } } diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication.go b/go/vt/vttablet/tabletmanager/rpc_vreplication.go index 5f1526c9f79..7b09c8bb7d1 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication.go @@ -212,7 +212,7 @@ func (tm *TabletManager) DeleteTableData(ctx context.Context, req *tabletmanager rowsDeleted := uint64(0) // Delete all of the matching rows from the table, in batches, until we've // deleted them all. - log.Infof("Starting deletion of data from table %s using query %q", table, query) + log.Info(fmt.Sprintf("Starting deletion of data from table %s using query %q", table, query)) for { // Back off if we're causing too much load on the database with these // batch deletes. @@ -238,8 +238,7 @@ func (tm *TabletManager) DeleteTableData(ctx context.Context, req *tabletmanager // how much work we've done, how much is left, and how long it may take // (considering throttling, system performance, etc). if rowsDeleted%progressRows == 0 { - log.Infof("Successfully deleted %d rows of data from table %s so far, using query %q", - rowsDeleted, table, query) + log.Info(fmt.Sprintf("Successfully deleted %d rows of data from table %s so far, using query %q", rowsDeleted, table, query)) } if res.RowsAffected == 0 { // We're done with this table break @@ -248,8 +247,7 @@ func (tm *TabletManager) DeleteTableData(ctx context.Context, req *tabletmanager return nil, err } } - log.Infof("Completed deletion of data (%d rows) from table %s using query %q", - rowsDeleted, table, query) + log.Info(fmt.Sprintf("Completed deletion of data (%d rows) from table %s using query %q", rowsDeleted, table, query)) } return &tabletmanagerdatapb.DeleteTableDataResponse{}, nil @@ -817,7 +815,7 @@ func (tm *TabletManager) updateSequenceValue(ctx context.Context, seq *tabletman return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid table name %s specified for sequence backing table: %v", seq.BackingTableName, err) } - log.Infof("Updating sequence %s.%s to %d", seq.BackingTableDbName, seq.BackingTableName, nextVal) + log.Info(fmt.Sprintf("Updating sequence %s.%s to %d", seq.BackingTableDbName, seq.BackingTableName, nextVal)) initQuery := sqlparser.BuildParsedQuery(sqlInitSequenceTable, backingTableDbNameEscaped, backingTableNameEscaped, @@ -891,7 +889,7 @@ func (tm *TabletManager) ValidateVReplicationPermissionsOld(ctx context.Context, if err != nil { return nil, err } - log.Infof("Validating VReplication permissions on %s using query %s", tm.tabletAlias, query) + log.Info(fmt.Sprintf("Validating VReplication permissions on %s using query %s", tm.tabletAlias, query)) conn, err := tm.MysqlDaemon.GetAllPrivsConnection(ctx) if err != nil { return nil, err @@ -914,7 +912,7 @@ func (tm *TabletManager) ValidateVReplicationPermissionsOld(ctx context.Context, if !val { errorString = fmt.Sprintf("user %s does not have the required set of permissions (select,insert,update,delete) on the %s.vreplication table on tablet %s", tm.DBConfigs.Filtered.User, sidecar.GetName(), topoproto.TabletAliasString(tm.tabletAlias)) - log.Errorf("validateVReplicationPermissions returning error: %s. Permission query run was %s", errorString, query) + log.Error(fmt.Sprintf("validateVReplicationPermissions returning error: %s. Permission query run was %s", errorString, query)) } return &tabletmanagerdatapb.ValidateVReplicationPermissionsResponse{ User: tm.DBConfigs.Filtered.User, @@ -927,7 +925,7 @@ func (tm *TabletManager) ValidateVReplicationPermissionsOld(ctx context.Context, // the minimum permissions required on the sidecardb vreplication table // using a functional testing approach that doesn't require access to mysql.user table. func (tm *TabletManager) ValidateVReplicationPermissions(ctx context.Context, req *tabletmanagerdatapb.ValidateVReplicationPermissionsRequest) (*tabletmanagerdatapb.ValidateVReplicationPermissionsResponse, error) { - log.Infof("Validating VReplication permissions on sidecar db %s", tm.tabletAlias) + log.Info(fmt.Sprintf("Validating VReplication permissions on sidecar db %s", tm.tabletAlias)) conn, err := tm.MysqlDaemon.GetFilteredConnection(ctx) if err != nil { @@ -941,7 +939,7 @@ func (tm *TabletManager) ValidateVReplicationPermissions(ctx context.Context, re defer func() { _, err := conn.ExecuteFetch("ROLLBACK", 1, false) if err != nil { - log.Warningf("failed to rollback transaction after permission testing: %v", err) + log.Warn(fmt.Sprintf("failed to rollback transaction after permission testing: %v", err)) } }() @@ -977,7 +975,7 @@ func (tm *TabletManager) ValidateVReplicationPermissions(ctx context.Context, re return nil, vterrors.Wrapf(err, "failed to bind %s query for permission testing", test.permission) } - log.Infof("Testing %s permission using query: %s", test.permission, query) + log.Info(fmt.Sprintf("Testing %s permission using query: %s", test.permission, query)) if _, err := conn.ExecuteFetch(query, 1, false); err != nil { // Check if we got `ERTableAccessDenied` error code from MySQL sqlErr, ok := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) @@ -994,8 +992,7 @@ func (tm *TabletManager) ValidateVReplicationPermissions(ctx context.Context, re } } - log.Infof("VReplication sidecardb permission validation succeeded for user %s on tablet %s", - tm.DBConfigs.Filtered.User, tm.tabletAlias) + log.Info(fmt.Sprintf("VReplication sidecardb permission validation succeeded for user %s on tablet %s", tm.DBConfigs.Filtered.User, tm.tabletAlias)) return &tabletmanagerdatapb.ValidateVReplicationPermissionsResponse{ User: tm.DBConfigs.Filtered.User, diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go index cae43b0924f..ed50962c213 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go @@ -404,7 +404,7 @@ func TestMoveTablesUnsharded(t *testing.T) { tenv.tmc.setVReplicationExecResults(sourceTablet.tablet, checkForJournal, &sqltypes.Result{}) for _, ftc := range targetShards { - log.Infof("Testing target shard %s", ftc.tablet.Alias) + log.Info(fmt.Sprintf("Testing target shard %s", ftc.tablet.Alias)) addInvariants(ftc.vrdbClient, vreplID, sourceTabletUID, position, wf, tenv.cells[0]) getCopyStateQuery := fmt.Sprintf(sqlGetVReplicationCopyStatus, sidecar.GetIdentifier(), vreplID) ftc.vrdbClient.AddInvariant(getCopyStateQuery, &sqltypes.Result{}) @@ -672,7 +672,7 @@ func TestMoveTablesSharded(t *testing.T) { tenv.tmc.setVReplicationExecResults(sourceTablet.tablet, checkForJournal, &sqltypes.Result{}) for _, ftc := range targetShards { - log.Infof("Testing target shard %s", ftc.tablet.Alias) + log.Info(fmt.Sprintf("Testing target shard %s", ftc.tablet.Alias)) addInvariants(ftc.vrdbClient, vreplID, sourceTabletUID, position, wf, tenv.cells[0]) getCopyStateQuery := fmt.Sprintf(sqlGetVReplicationCopyStatus, sidecar.GetIdentifier(), vreplID) ftc.vrdbClient.AddInvariant(getCopyStateQuery, &sqltypes.Result{}) @@ -1072,7 +1072,7 @@ func TestUpdateVReplicationWorkflow(t *testing.T) { // which doesn't play well with subtests. defer func() { if err := recover(); err != nil { - log.Infof("Got panic in test: %v", err) + log.Info(fmt.Sprintf("Got panic in test: %v", err)) log.Flush() t.Errorf("Recovered from panic: %v, stack: %s", err, debug.Stack()) } diff --git a/go/vt/vttablet/tabletmanager/semisyncmonitor/monitor.go b/go/vt/vttablet/tabletmanager/semisyncmonitor/monitor.go index 13cad547a50..e44adba92be 100644 --- a/go/vt/vttablet/tabletmanager/semisyncmonitor/monitor.go +++ b/go/vt/vttablet/tabletmanager/semisyncmonitor/monitor.go @@ -199,7 +199,7 @@ func (m *Monitor) checkAndFixSemiSyncBlocked() { m.errorCount.Add(1) // If we are unable to determine whether the primary is blocked or not, // then we can just abort the function and try again later. - log.Errorf("SemiSync Monitor: failed to check if primary is blocked on semi-sync: %v", err) + log.Error(fmt.Sprintf("SemiSync Monitor: failed to check if primary is blocked on semi-sync: %v", err)) return } // Set the isBlocked state. @@ -256,19 +256,19 @@ func (m *Monitor) WaitUntilSemiSyncUnblocked(ctx context.Context) error { if !m.stillBlocked() { // If we find that the primary isn't blocked, we're good, // we don't need to wait for anything. - log.Infof("Primary not blocked on semi-sync ACKs") + log.Info("Primary not blocked on semi-sync ACKs") return nil } - log.Infof("Waiting for semi-sync to be unblocked") + log.Info("Waiting for semi-sync to be unblocked") // The primary is blocked. We need to wait for it to be unblocked // or the context to expire. ch := m.addWaiter() select { case <-ch: - log.Infof("Finished waiting for semi-sync to be unblocked") + log.Info("Finished waiting for semi-sync to be unblocked") return nil case <-ctx.Done(): - log.Infof("Error while waiting for semi-sync to be unblocked - %s", ctx.Err().Error()) + log.Info("Error while waiting for semi-sync to be unblocked - " + ctx.Err().Error()) return ctx.Err() } } @@ -373,14 +373,14 @@ func (m *Monitor) write() { conn, err := m.appPool.Get(ctx) if err != nil { m.errorCount.Add(1) - log.Errorf("SemiSync Monitor: failed to get a connection when writing to semisync_heartbeat table: %v", err) + log.Error(fmt.Sprintf("SemiSync Monitor: failed to get a connection when writing to semisync_heartbeat table: %v", err)) return } err = conn.Conn.ExecuteFetchMultiDrain(m.addLockWaitTimeout(m.bindSideCarDBName(semiSyncHeartbeatWrite))) conn.Recycle() if err != nil { m.errorCount.Add(1) - log.Errorf("SemiSync Monitor: failed to write to semisync_heartbeat table: %v", err) + log.Error(fmt.Sprintf("SemiSync Monitor: failed to write to semisync_heartbeat table: %v", err)) } else { // One of the writes went through without an error. // This means that we aren't blocked on semi-sync anymore. @@ -412,14 +412,14 @@ func (m *Monitor) clearAllData() { conn, err := m.appPool.Get(ctx) if err != nil { m.errorCount.Add(1) - log.Errorf("SemiSync Monitor: failed get a connection to clear semisync_heartbeat table: %v", err) + log.Error(fmt.Sprintf("SemiSync Monitor: failed get a connection to clear semisync_heartbeat table: %v", err)) return } defer conn.Recycle() _, _, err = conn.Conn.ExecuteFetchMulti(m.addLockWaitTimeout(m.bindSideCarDBName(semiSyncHeartbeatClear)), 0, false) if err != nil { m.errorCount.Add(1) - log.Errorf("SemiSync Monitor: failed to clear semisync_heartbeat table: %v", err) + log.Error(fmt.Sprintf("SemiSync Monitor: failed to clear semisync_heartbeat table: %v", err)) } } diff --git a/go/vt/vttablet/tabletmanager/semisyncmonitor/monitor_test.go b/go/vt/vttablet/tabletmanager/semisyncmonitor/monitor_test.go index 1a96643efa8..f42587c135f 100644 --- a/go/vt/vttablet/tabletmanager/semisyncmonitor/monitor_test.go +++ b/go/vt/vttablet/tabletmanager/semisyncmonitor/monitor_test.go @@ -1052,7 +1052,7 @@ func TestDeadlockOnClose(t *testing.T) { // The test timed out, which means we deadlocked. buf := make([]byte, 1<<16) // 64 KB buffer size stackSize := runtime.Stack(buf, true) - log.Errorf("Stack trace:\n%s", string(buf[:stackSize])) + log.Error("Stack trace:\n" + string(buf[:stackSize])) t.Fatalf("Deadlock occurred while closing the monitor") } } diff --git a/go/vt/vttablet/tabletmanager/shard_sync.go b/go/vt/vttablet/tabletmanager/shard_sync.go index 546f60c96fe..4b9971e481d 100644 --- a/go/vt/vttablet/tabletmanager/shard_sync.go +++ b/go/vt/vttablet/tabletmanager/shard_sync.go @@ -18,6 +18,7 @@ package tabletmanager import ( "context" + "fmt" "time" "github.com/spf13/pflag" @@ -90,11 +91,11 @@ func (tm *TabletManager) shardSyncLoop(ctx context.Context, notifyChan <-chan st if event != nil { if event.Err != nil { // The watch failed. Stop it so we start a new one if needed. - log.Errorf("Shard watch failed: %v", event.Err) + log.Error(fmt.Sprintf("Shard watch failed: %v", event.Err)) shardWatch.stop() } } else { - log.Infof("Got a nil event from the shard watcher for %s. This should not happen.", tm.tabletAlias) + log.Info(fmt.Sprintf("Got a nil event from the shard watcher for %s. This should not happen.", tm.tabletAlias)) } case <-ctx.Done(): // Our context was cancelled. Terminate the loop. @@ -113,7 +114,7 @@ func (tm *TabletManager) shardSyncLoop(ctx context.Context, notifyChan <-chan st // This is a failsafe code because we've seen races that can cause // primary term start time to become zero. if tablet.PrimaryTermStartTime == nil { - log.Errorf("PrimaryTermStartTime should not be nil: %v", tablet) + log.Error(fmt.Sprintf("PrimaryTermStartTime should not be nil: %v", tablet)) // Start retry timer and go back to sleep. retryChan = time.After(shardSyncRetryDelay) continue @@ -122,7 +123,7 @@ func (tm *TabletManager) shardSyncLoop(ctx context.Context, notifyChan <-chan st // Fetch the start time from the record we just got, because the tm's tablet can change. primaryAlias, shouldDemote, err := syncShardPrimary(ctx, tm.TopoServer, tablet, protoutil.TimeFromProto(tablet.PrimaryTermStartTime).UTC()) if err != nil { - log.Errorf("Failed to sync shard record: %v", err) + log.Error(fmt.Sprintf("Failed to sync shard record: %v", err)) // Start retry timer and go back to sleep. retryChan = time.After(shardSyncRetryDelay) continue @@ -132,7 +133,7 @@ func (tm *TabletManager) shardSyncLoop(ctx context.Context, notifyChan <-chan st // This means that we should end our term, since someone else must have claimed primaryship // and wrote to the shard record if err := tm.endPrimaryTerm(ctx, primaryAlias); err != nil { - log.Errorf("Failed to end primary term: %v", err) + log.Error(fmt.Sprintf("Failed to end primary term: %v", err)) // Start retry timer and go back to sleep. retryChan = time.After(shardSyncRetryDelay) continue @@ -149,7 +150,7 @@ func (tm *TabletManager) shardSyncLoop(ctx context.Context, notifyChan <-chan st continue } if err := shardWatch.start(tm.TopoServer, tablet.Keyspace, tablet.Shard); err != nil { - log.Errorf("Failed to start shard watch: %v", err) + log.Error(fmt.Sprintf("Failed to start shard watch: %v", err)) // Start retry timer and go back to sleep. retryChan = time.After(shardSyncRetryDelay) continue @@ -195,7 +196,7 @@ func syncShardPrimary(ctx context.Context, ts *topo.Server, tablet *topodatapb.T } aliasStr := topoproto.TabletAliasString(tablet.Alias) - log.Infof("Updating shard record: primary_alias=%v, primary_term_start_time=%v", aliasStr, PrimaryTermStartTime) + log.Info(fmt.Sprintf("Updating shard record: primary_alias=%v, primary_term_start_time=%v", aliasStr, PrimaryTermStartTime)) si.PrimaryAlias = tablet.Alias si.PrimaryTermStartTime = protoutil.TimeToProto(PrimaryTermStartTime) return nil @@ -221,11 +222,11 @@ func syncShardPrimary(ctx context.Context, ts *topo.Server, tablet *topodatapb.T // We just directly update our tablet type to REPLICA. func (tm *TabletManager) endPrimaryTerm(ctx context.Context, primaryAlias *topodatapb.TabletAlias) error { primaryAliasStr := topoproto.TabletAliasString(primaryAlias) - log.Warningf("Another tablet (%v) has won primary election. Stepping down to %v.", primaryAliasStr, tm.baseTabletType) + log.Warn(fmt.Sprintf("Another tablet (%v) has won primary election. Stepping down to %v.", primaryAliasStr, tm.baseTabletType)) if mysqlctl.DisableActiveReparents { // Don't touch anything at the MySQL level. Just update tablet state. - log.Infof("Active reparents are disabled; updating tablet state only.") + log.Info("Active reparents are disabled; updating tablet state only.") changeTypeCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancel() if err := tm.tmState.ChangeTabletType(changeTypeCtx, tm.baseTabletType, DBActionNone); err != nil { @@ -239,7 +240,7 @@ func (tm *TabletManager) endPrimaryTerm(ctx context.Context, primaryAlias *topod // triggers after a new primary has taken over, so we are past the point of // no return. Instead, we should leave partial results and retry the rest // later. - log.Infof("Active reparents are enabled; converting MySQL to replica.") + log.Info("Active reparents are enabled; converting MySQL to replica.") demotePrimaryCtx, cancelDemotePrimary := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancelDemotePrimary() if _, err := tm.demotePrimary(demotePrimaryCtx, false /* revertPartialFailure */, true /* force */); err != nil { @@ -247,7 +248,7 @@ func (tm *TabletManager) endPrimaryTerm(ctx context.Context, primaryAlias *topod } setPrimaryCtx, cancelSetPrimary := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancelSetPrimary() - log.Infof("Attempting to reparent self to new primary %v.", primaryAliasStr) + log.Info(fmt.Sprintf("Attempting to reparent self to new primary %v.", primaryAliasStr)) if primaryAlias == nil { if err := tm.tmState.ChangeTabletType(ctx, topodatapb.TabletType_REPLICA, DBActionNone); err != nil { return err diff --git a/go/vt/vttablet/tabletmanager/shard_watcher.go b/go/vt/vttablet/tabletmanager/shard_watcher.go index d8a6dd2c2dc..bf0365186f4 100644 --- a/go/vt/vttablet/tabletmanager/shard_watcher.go +++ b/go/vt/vttablet/tabletmanager/shard_watcher.go @@ -18,6 +18,7 @@ package tabletmanager import ( "context" + "fmt" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/topo" @@ -33,7 +34,7 @@ func (sw *shardWatcher) active() bool { } func (sw *shardWatcher) start(ts *topo.Server, keyspace, shard string) error { - log.Infof("Starting shard watch of %v/%v", keyspace, shard) + log.Info(fmt.Sprintf("Starting shard watch of %v/%v", keyspace, shard)) ctx, cancel := context.WithCancel(context.Background()) _, c, err := ts.WatchShard(ctx, keyspace, shard) @@ -52,13 +53,13 @@ func (sw *shardWatcher) stop() { return } - log.Infof("Stopping shard watch...") + log.Info("Stopping shard watch...") sw.watchCancel() // Drain all remaining watch events. for range sw.watchChan { } - log.Infof("Shard watch stopped.") + log.Info("Shard watch stopped.") sw.watchChan = nil sw.watchCancel = nil diff --git a/go/vt/vttablet/tabletmanager/tm_init.go b/go/vt/vttablet/tabletmanager/tm_init.go index e2fdd416de8..ffdc9fece9c 100644 --- a/go/vt/vttablet/tabletmanager/tm_init.go +++ b/go/vt/vttablet/tabletmanager/tm_init.go @@ -40,6 +40,7 @@ import ( "fmt" "maps" "math/rand/v2" + "os" "regexp" "strings" "sync" @@ -236,9 +237,9 @@ func BuildTabletFromInput(alias *topodatapb.TabletAlias, port, grpcPort int32, d if err != nil { return nil, err } - log.Infof("Using detected machine hostname: %v, to change this, fix your machine network configuration or override it with --tablet-hostname. Tablet %s", hostname, alias.String()) + log.Info(fmt.Sprintf("Using detected machine hostname: %v, to change this, fix your machine network configuration or override it with --tablet-hostname. Tablet %s", hostname, alias.String())) } else { - log.Infof("Using hostname: %v from --tablet-hostname flag. Tablet %s", hostname, alias.String()) + log.Info(fmt.Sprintf("Using hostname: %v from --tablet-hostname flag. Tablet %s", hostname, alias.String())) } if initKeyspace == "" || initShard == "" { @@ -315,7 +316,7 @@ func getBuildTags(buildTags map[string]string, skipTagsCSV string) (map[string]s } } else { skippers[i] = func(s string) bool { - log.Warningf(skipTag) + log.Warn(skipTag) return s == skipTag } } @@ -367,9 +368,9 @@ func setTabletTagsStats(tablet *topodatapb.Tablet) { // Start starts the TabletManager. func (tm *TabletManager) Start(tablet *topodatapb.Tablet, config *tabletenv.TabletConfig) error { defer func() { - log.Infof("TabletManager Start took ~%d ms", time.Since(servenv.GetInitStartTime()).Milliseconds()) + log.Info(fmt.Sprintf("TabletManager Start took ~%d ms", time.Since(servenv.GetInitStartTime()).Milliseconds())) }() - log.Infof("TabletManager Start") + log.Info("TabletManager Start") tm.DBConfigs.DBName = topoproto.TabletDbName(tablet) tm.tabletAlias = tablet.Alias tm.tmc = tmclient.NewTabletManagerClient() @@ -388,25 +389,23 @@ func (tm *TabletManager) Start(tablet *topodatapb.Tablet, config *tabletenv.Tabl switch { case err != nil: // No existing tablet record found, use init-tablet-type - log.Infof("No existing tablet record found, using init-tablet-type: %v", tablet.Type) + log.Info(fmt.Sprintf("No existing tablet record found, using init-tablet-type: %v", tablet.Type)) case existingTablet.Type == topodatapb.TabletType_PRIMARY: // Don't set to PRIMARY yet - let checkPrimaryShip() validate and decide // checkPrimaryShip() has the logic to verify shard records and determine if this tablet should really be PRIMARY - log.Infof("Found existing tablet record with PRIMARY type, setting to REPLICA and allowing checkPrimaryShip() to validate") + log.Info("Found existing tablet record with PRIMARY type, setting to REPLICA and allowing checkPrimaryShip() to validate") tablet.Type = topodatapb.TabletType_REPLICA case existingTablet.Type == topodatapb.TabletType_BACKUP || existingTablet.Type == topodatapb.TabletType_RESTORE: // Skip transient operational types (BACKUP, RESTORE) // These are temporary states that should not be preserved across restarts - log.Infof("Found existing tablet record with transient type %v, using init-tablet-type %v instead", - existingTablet.Type, tablet.Type) + log.Info(fmt.Sprintf("Found existing tablet record with transient type %v, using init-tablet-type %v instead", existingTablet.Type, tablet.Type)) default: // Safe to restore the type for non-PRIMARY, non-transient types - log.Infof("Found existing tablet record with --init-tablet-type-lookup enabled, using tablet type %v from topology instead of init-tablet-type %v", - existingTablet.Type, tablet.Type) + log.Info(fmt.Sprintf("Found existing tablet record with --init-tablet-type-lookup enabled, using tablet type %v from topology instead of init-tablet-type %v", existingTablet.Type, tablet.Type)) tablet.Type = existingTablet.Type } } else { - log.Infof("Using init-tablet-type %v", tablet.Type) + log.Info(fmt.Sprintf("Using init-tablet-type %v", tablet.Type)) } tm.tmState = newTMState(tm, tablet) @@ -516,7 +515,7 @@ func (tm *TabletManager) Close() { defer updateCancel() if _, err := tm.TopoServer.UpdateTabletFields(updateCtx, tm.tabletAlias, f); err != nil { - log.Warningf("Failed to update tablet record, may contain stale identifiers: %v", err) + log.Warn(fmt.Sprintf("Failed to update tablet record, may contain stale identifiers: %v", err)) } tm.tmState.Close() @@ -558,7 +557,7 @@ func (tm *TabletManager) createKeyspaceShard(ctx context.Context) (*topo.ShardIn defer tm.mutex.Unlock() tablet := tm.Tablet() - log.Infof("Reading/creating keyspace and shard records for %v/%v", tablet.Keyspace, tablet.Shard) + log.Info(fmt.Sprintf("Reading/creating keyspace and shard records for %v/%v", tablet.Keyspace, tablet.Shard)) // Read the shard, create it if necessary. var shardInfo *topo.ShardInfo @@ -664,11 +663,11 @@ func (tm *TabletManager) rebuildKeyspace(ctx context.Context, done chan<- struct var srvKeyspace *topodatapb.SrvKeyspace defer func() { - log.Infof("Keyspace rebuilt: %v", keyspace) + log.Info(fmt.Sprintf("Keyspace rebuilt: %v", keyspace)) if ctx.Err() == nil { err := tm.tmState.RefreshFromTopoInfo(tm.BatchCtx, nil, srvKeyspace) if err != nil { - log.Errorf("Error refreshing topo information - %v", err) + log.Error(fmt.Sprintf("Error refreshing topo information - %v", err)) } } close(done) @@ -696,7 +695,7 @@ func (tm *TabletManager) rebuildKeyspace(ctx context.Context, done chan<- struct } } if firstTime { - log.Warningf("rebuildKeyspace failed, will retry every %v: %v", retryInterval, err) + log.Warn(fmt.Sprintf("rebuildKeyspace failed, will retry every %v: %v", retryInterval, err)) } firstTime = false time.Sleep(retryInterval) @@ -714,7 +713,7 @@ func (tm *TabletManager) checkPrimaryShip(ctx context.Context, si *topo.ShardInf case topo.IsErrType(err, topo.NoNode): // There's no existing tablet record, so we can assume // no one has left us a message to step down. - log.Infof("Shard primary alias matches, but there is no existing tablet record. Switching to primary with 'Now' as time") + log.Info("Shard primary alias matches, but there is no existing tablet record. Switching to primary with 'Now' as time") tm.tmState.UpdateTablet(func(tablet *topodatapb.Tablet) { tablet.Type = topodatapb.TabletType_PRIMARY // Update the primary term start time (current value is 0) because we @@ -724,7 +723,7 @@ func (tm *TabletManager) checkPrimaryShip(ctx context.Context, si *topo.ShardInf }) case err == nil: if oldTablet.Type == topodatapb.TabletType_PRIMARY { - log.Infof("Shard primary alias matches, and existing tablet agrees. Switching to primary with tablet's primary term start time: %v", oldTablet.PrimaryTermStartTime) + log.Info(fmt.Sprintf("Shard primary alias matches, and existing tablet agrees. Switching to primary with tablet's primary term start time: %v", oldTablet.PrimaryTermStartTime)) // We're marked as primary in the shard record, // and our existing tablet record agrees. tm.tmState.UpdateTablet(func(tablet *topodatapb.Tablet) { @@ -732,7 +731,7 @@ func (tm *TabletManager) checkPrimaryShip(ctx context.Context, si *topo.ShardInf tablet.PrimaryTermStartTime = oldTablet.PrimaryTermStartTime }) } else { - log.Warningf("Shard primary alias matches, but existing tablet is not primary. Switching from %v to primary with the shard's primary term start time: %v", oldTablet.Type, si.PrimaryTermStartTime) + log.Warn(fmt.Sprintf("Shard primary alias matches, but existing tablet is not primary. Switching from %v to primary with the shard's primary term start time: %v", oldTablet.Type, si.PrimaryTermStartTime)) tm.tmState.UpdateTablet(func(tablet *topodatapb.Tablet) { tablet.Type = topodatapb.TabletType_PRIMARY tablet.PrimaryTermStartTime = si.PrimaryTermStartTime @@ -753,13 +752,13 @@ func (tm *TabletManager) checkPrimaryShip(ctx context.Context, si *topo.ShardInf oldPrimaryTermStartTime := oldTablet.GetPrimaryTermStartTime() currentShardTime := si.GetPrimaryTermStartTime() if oldPrimaryTermStartTime.After(currentShardTime) { - log.Infof("Shard primary alias does not match, but the tablet's primary term start time is newer. Switching to primary with tablet's primary term start time: %v", oldTablet.PrimaryTermStartTime) + log.Info(fmt.Sprintf("Shard primary alias does not match, but the tablet's primary term start time is newer. Switching to primary with tablet's primary term start time: %v", oldTablet.PrimaryTermStartTime)) tm.tmState.UpdateTablet(func(tablet *topodatapb.Tablet) { tablet.Type = topodatapb.TabletType_PRIMARY tablet.PrimaryTermStartTime = oldTablet.PrimaryTermStartTime }) } else { - log.Infof("Existing tablet type is primary, but the shard record has a different primary with a newer timestamp. Remaining a replica") + log.Info("Existing tablet type is primary, but the shard record has a different primary with a newer timestamp. Remaining a replica") } } default: @@ -786,7 +785,7 @@ func (tm *TabletManager) checkMysql(ctx context.Context) error { }) mysqlPort, err := tm.MysqlDaemon.GetMysqlPort(ctx) if err != nil { - log.Warningf("Cannot get current mysql port, will keep retrying every %v: %v", mysqlPortRetryInterval, err) + log.Warn(fmt.Sprintf("Cannot get current mysql port, will keep retrying every %v: %v", mysqlPortRetryInterval, err)) go tm.findMysqlPort(mysqlPortRetryInterval) } else { tm.tmState.UpdateTablet(func(tablet *topodatapb.Tablet) { @@ -812,7 +811,7 @@ func (tm *TabletManager) findMysqlPort(retryInterval time.Duration) { if err != nil || mport == 0 { continue } - log.Infof("Identified mysql port: %v", mport) + log.Info(fmt.Sprintf("Identified mysql port: %v", mport)) tm.tmState.SetMysqlPort(mport) return } @@ -826,7 +825,7 @@ func (tm *TabletManager) redoPreparedTransactionsAndSetReadWrite(ctx context.Con // Ignore the error if the sever doesn't support super read only variable. // We should just redo the preapred transactions before we set it to read-write. if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERUnknownSystemVariable { - log.Warningf("server does not know about super_read_only, continuing anyway...") + log.Warn("server does not know about super_read_only, continuing anyway...") } else { return err } @@ -861,10 +860,10 @@ func (tm *TabletManager) initTablet(ctx context.Context) error { // instance of a startup timeout). Upon running this code // again, we want to fix ShardReplication. if updateErr := topo.UpdateTabletReplicationData(ctx, tm.TopoServer, tablet); updateErr != nil { - log.Errorf("UpdateTabletReplicationData failed for tablet %v: %v", topoproto.TabletAliasString(tablet.Alias), updateErr) + log.Error(fmt.Sprintf("UpdateTabletReplicationData failed for tablet %v: %v", topoproto.TabletAliasString(tablet.Alias), updateErr)) return vterrors.Wrap(updateErr, "UpdateTabletReplicationData failed") } - log.Infof("Successfully updated tablet replication data for alias: %v", topoproto.TabletAliasString(tablet.Alias)) + log.Info(fmt.Sprintf("Successfully updated tablet replication data for alias: %v", topoproto.TabletAliasString(tablet.Alias))) // Then overwrite everything, ignoring version mismatch. if err := tm.TopoServer.UpdateTablet(ctx, topo.NewTabletInfo(tablet, nil)); err != nil { @@ -895,7 +894,8 @@ func (tm *TabletManager) handleRestore(ctx context.Context, config *tabletenv.Ta var err error backupTime, err = time.Parse(mysqlctl.BackupTimestampFormat, restoreFromBackupTsStr) if err != nil { - log.Exitf(fmt.Sprintf("RestoreFromBackup failed: unable to parse the backup timestamp value provided of '%s'", restoreFromBackupTsStr)) + log.Error(fmt.Sprintf("RestoreFromBackup failed: unable to parse the backup timestamp value provided of '%s'", restoreFromBackupTsStr)) + os.Exit(1) } } @@ -904,20 +904,23 @@ func (tm *TabletManager) handleRestore(ctx context.Context, config *tabletenv.Ta var err error restoreToTimestamp, err = mysqlctl.ParseRFC3339(restoreToTimestampStr) if err != nil { - log.Exitf(fmt.Sprintf("RestoreFromBackup failed: unable to parse the --restore-to-timestamp value provided of '%s'. Error: %v", restoreToTimestampStr, err)) + log.Error(fmt.Sprintf("RestoreFromBackup failed: unable to parse the --restore-to-timestamp value provided of '%s'. Error: %v", restoreToTimestampStr, err)) + os.Exit(1) } } // restoreFromBackup will just be a regular action // (same as if it was triggered remotely) if err := tm.RestoreData(ctx, logutil.NewConsoleLogger(), waitForBackupInterval, false /* deleteBeforeRestore */, backupTime, restoreToTimestamp, restoreToPos, restoreFromBackupAllowedEngines, mysqlShutdownTimeout); err != nil { - log.Exitf("RestoreFromBackup failed: %v", err) + log.Error(fmt.Sprintf("RestoreFromBackup failed: %v", err)) + os.Exit(1) } // Make sure we have the correct privileges for the DBA user before we start the state manager. err := tm.waitForDBAGrants(config, mysqlctl.DbaGrantWaitTime) if err != nil { - log.Exitf("Failed waiting for DBA grants: %v", err) + log.Error(fmt.Sprintf("Failed waiting for DBA grants: %v", err)) + os.Exit(1) } // Open the state manager after restore is done. @@ -972,7 +975,7 @@ func (tm *TabletManager) withRetry(ctx context.Context, description string, work return err } - log.Warningf("%v failed (%v), backing off %v before retrying", description, err, backoff) + log.Warn(fmt.Sprintf("%v failed (%v), backing off %v before retrying", description, err, backoff)) select { case <-ctx.Done(): return ctx.Err() @@ -1032,14 +1035,14 @@ func (tm *TabletManager) initializeReplication(ctx context.Context, tabletType t } if si.PrimaryAlias == nil { // There's no primary. This is fine, since there might be no primary currently - log.Warningf("cannot start replication during initialization: shard %v/%v has no primary.", tablet.Keyspace, tablet.Shard) + log.Warn(fmt.Sprintf("cannot start replication during initialization: shard %v/%v has no primary.", tablet.Keyspace, tablet.Shard)) return "", nil } if topoproto.TabletAliasEqual(si.PrimaryAlias, tablet.Alias) { // We used to be the primary before we got restarted, // and no other primary has been elected in the meantime. // There isn't anything to do here either. - log.Warningf("cannot start replication during initialization: primary in shard record still points to this tablet.") + log.Warn("cannot start replication during initialization: primary in shard record still points to this tablet.") return "", nil } currentPrimary, err := tm.TopoServer.GetTablet(ctx, si.PrimaryAlias) @@ -1051,7 +1054,7 @@ func (tm *TabletManager) initializeReplication(ctx context.Context, tabletType t if err != nil { return "", vterrors.Wrapf(err, "cannot read keyspace durability policy %v", tablet.Keyspace) } - log.Infof("Getting a new durability policy for %v", durabilityName) + log.Info(fmt.Sprintf("Getting a new durability policy for %v", durabilityName)) durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return "", vterrors.Wrapf(err, "cannot get durability policy %v", durabilityName) @@ -1072,7 +1075,7 @@ func (tm *TabletManager) initializeReplication(ctx context.Context, tabletType t // Set primary and start replication. if currentPrimary.MysqlHostname == "" { - log.Warningf("primary tablet in the shard record does not have mysql hostname specified, possibly because that tablet has been shut down.") + log.Warn("primary tablet in the shard record does not have mysql hostname specified, possibly because that tablet has been shut down.") return "", nil } diff --git a/go/vt/vttablet/tabletmanager/tm_state.go b/go/vt/vttablet/tabletmanager/tm_state.go index af7468aba97..096a27ba1bd 100644 --- a/go/vt/vttablet/tabletmanager/tm_state.go +++ b/go/vt/vttablet/tabletmanager/tm_state.go @@ -101,7 +101,7 @@ func newTMState(tm *TabletManager, tablet *topodatapb.Tablet) *tmState { } func (ts *tmState) Open() { - log.Infof("In tmState.Open()") + log.Info("In tmState.Open()") ts.mu.Lock() defer ts.mu.Unlock() if ts.isOpen { @@ -116,7 +116,7 @@ func (ts *tmState) Open() { } func (ts *tmState) Close() { - log.Infof("In tmState.Close()") + log.Info("In tmState.Close()") ts.mu.Lock() defer ts.mu.Unlock() @@ -201,7 +201,7 @@ func (ts *tmState) prepareForDisableQueryService(ctx context.Context, servType t func (ts *tmState) ChangeTabletType(ctx context.Context, tabletType topodatapb.TabletType, action DBAction) error { ts.mu.Lock() defer ts.mu.Unlock() - log.Infof("Changing Tablet Type: %v for %s", tabletType, ts.tablet.Alias.String()) + log.Info(fmt.Sprintf("Changing Tablet Type: %v for %s", tabletType, ts.tablet.Alias.String())) var primaryTermStartTime *vttime.Time if tabletType == topodatapb.TabletType_PRIMARY { @@ -210,7 +210,7 @@ func (ts *tmState) ChangeTabletType(ctx context.Context, tabletType topodatapb.T // Update the tablet record first. _, err := topotools.ChangeType(ctx, ts.tm.TopoServer, ts.tm.tabletAlias, tabletType, primaryTermStartTime) if err != nil { - log.Errorf("Error changing type in topo record for tablet %s :- %v\nWill keep trying to read from the toposerver", topoproto.TabletAliasString(ts.tm.tabletAlias), err) + log.Error(fmt.Sprintf("Error changing type in topo record for tablet %s :- %v\nWill keep trying to read from the toposerver", topoproto.TabletAliasString(ts.tm.tabletAlias), err)) // In case of a topo error, we aren't sure if the data has been written or not. // We must read the data again and verify whether the previous write succeeded or not. // The only way to guarantee safety is to keep retrying read until we succeed @@ -224,10 +224,10 @@ func (ts *tmState) ChangeTabletType(ctx context.Context, tabletType topodatapb.T continue } if ti.Type == tabletType && proto.Equal(ti.PrimaryTermStartTime, primaryTermStartTime) { - log.Infof("Tablet record in toposerver matches, continuing operation") + log.Info("Tablet record in toposerver matches, continuing operation") break } - log.Errorf("Tablet record read from toposerver does not match what we attempted to write, canceling operation") + log.Error("Tablet record read from toposerver does not match what we attempted to write, canceling operation") return err } } @@ -270,7 +270,7 @@ func (ts *tmState) updateTypeAndPublish(ctx context.Context, tabletType topodata func (ts *tmState) ChangeTabletTags(ctx context.Context, tabletTags map[string]string) { ts.mu.Lock() defer ts.mu.Unlock() - log.Infof("Changing Tablet Tags: %v for %s", tabletTags, ts.tablet.Alias.String()) + log.Info(fmt.Sprintf("Changing Tablet Tags: %v for %s", tabletTags, ts.tablet.Alias.String())) ts.tablet.Tags = tabletTags ts.publishStateLocked(ctx) @@ -309,7 +309,7 @@ func (ts *tmState) updateLocked(ctx context.Context) error { // before other services are shutdown. reason := ts.canServe(ts.tablet.Type) if reason != "" { - log.Infof("Disabling query service: %v", reason) + log.Info(fmt.Sprintf("Disabling query service: %v", reason)) // SetServingType can result in error. Although we have forever retries to fix these transient errors // but, under certain conditions these errors are non-transient (see https://github.com/vitessio/vitess/issues/10145). // There is no way to distinguish between retry (transient) and non-retryable errors, therefore we will @@ -318,7 +318,7 @@ func (ts *tmState) updateLocked(ctx context.Context) error { // to propagate error to client hence no changes there but we will propagate error from 'ChangeTabletType' to client. if err := ts.tm.QueryServiceControl.SetServingType(ts.tablet.Type, ptsTime, false, reason); err != nil { errStr := fmt.Sprintf("SetServingType(serving=false) failed: %v", err) - log.Errorf(errStr) + log.Error(errStr) // No need to short circuit. Apply all steps and return error in the end. returnErr = vterrors.Wrap(err, errStr) } @@ -326,7 +326,7 @@ func (ts *tmState) updateLocked(ctx context.Context) error { if err := ts.applyDenyList(ctx); err != nil { errStr := fmt.Sprintf("Cannot update denied tables rule: %v", err) - log.Errorf(errStr) + log.Error(errStr) // No need to short circuit. Apply all steps and return error in the end. returnErr = vterrors.Wrap(err, errStr) } @@ -367,7 +367,7 @@ func (ts *tmState) updateLocked(ctx context.Context) error { if reason == "" { if err := ts.tm.QueryServiceControl.SetServingType(ts.tablet.Type, ptsTime, true, ""); err != nil { errStr := fmt.Sprintf("Cannot start query service: %v", err) - log.Errorf(errStr) + log.Error(errStr) returnErr = vterrors.Wrap(err, errStr) } } @@ -400,7 +400,7 @@ func (ts *tmState) applyDenyList(ctx context.Context) (err error) { // Verify that at least one table matches the wildcards, so // that we don't add a rule to deny all tables if len(tables) > 0 { - log.Infof("Denying tables %v", strings.Join(tables, ", ")) + log.Info(fmt.Sprintf("Denying tables %v", strings.Join(tables, ", "))) qr := rules.NewQueryRule("enforce denied tables", "denied_table", rules.QRFailRetry) for _, t := range tables { qr.AddTableCond(t) @@ -411,13 +411,13 @@ func (ts *tmState) applyDenyList(ctx context.Context) (err error) { loadRuleErr := ts.tm.QueryServiceControl.SetQueryRules(denyListQueryList, denyListRules) if loadRuleErr != nil { - log.Warningf("Fail to load query rule set %s: %s", denyListQueryList, loadRuleErr) + log.Warn(fmt.Sprintf("Fail to load query rule set %s: %s", denyListQueryList, loadRuleErr)) } return nil } func (ts *tmState) publishStateLocked(ctx context.Context) { - log.Infof("Publishing state: %v", ts.tablet) + log.Info(fmt.Sprintf("Publishing state: %v", ts.tablet)) // If retry is in progress, there's nothing to do. if ts.isPublishing { return @@ -427,7 +427,7 @@ func (ts *tmState) publishStateLocked(ctx context.Context) { defer cancel() _, err := ts.tm.TopoServer.UpdateTabletFields(ctx, ts.tm.tabletAlias, func(tablet *topodatapb.Tablet) error { if err := topotools.CheckOwnership(tablet, ts.tablet); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return topo.NewError(topo.NoUpdateNeeded, "") } proto.Reset(tablet) @@ -440,7 +440,7 @@ func (ts *tmState) publishStateLocked(ctx context.Context) { servenv.ExitChan <- syscall.SIGTERM return } - log.Errorf("Unable to publish state to topo, will keep retrying: %v", err) + log.Error(fmt.Sprintf("Unable to publish state to topo, will keep retrying: %v", err)) ts.isPublishing = true // Keep retrying until success. go ts.retryPublish() @@ -459,7 +459,7 @@ func (ts *tmState) retryPublish() { ctx, cancel := context.WithTimeout(ts.ctx, topo.RemoteOperationTimeout) _, err := ts.tm.TopoServer.UpdateTabletFields(ctx, ts.tm.tabletAlias, func(tablet *topodatapb.Tablet) error { if err := topotools.CheckOwnership(tablet, ts.tablet); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return topo.NewError(topo.NoUpdateNeeded, "") } proto.Reset(tablet) @@ -473,13 +473,13 @@ func (ts *tmState) retryPublish() { servenv.ExitChan <- syscall.SIGTERM return } - log.Errorf("Unable to publish state to topo, will keep retrying: %v", err) + log.Error(fmt.Sprintf("Unable to publish state to topo, will keep retrying: %v", err)) ts.mu.Unlock() time.Sleep(publishRetryInterval) ts.mu.Lock() continue } - log.Infof("Published state: %v", ts.tablet) + log.Info(fmt.Sprintf("Published state: %v", ts.tablet)) return } } diff --git a/go/vt/vttablet/tabletmanager/vdiff/controller.go b/go/vt/vttablet/tabletmanager/vdiff/controller.go index a64afa46ba5..c86155cc042 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/controller.go +++ b/go/vt/vttablet/tabletmanager/vdiff/controller.go @@ -87,7 +87,7 @@ type controller struct { func newController(row sqltypes.RowNamedValues, dbClientFactory func() binlogplayer.DBClient, ts *topo.Server, vde *Engine, options *tabletmanagerdata.VDiffOptions, ) (*controller, error) { - log.Infof("VDiff controller initializing for %+v", row) + log.Info(fmt.Sprintf("VDiff controller initializing for %+v", row)) id, _ := row["id"].ToInt64() ct := &controller{ @@ -115,20 +115,20 @@ func (ct *controller) Stop() { func (ct *controller) run(ctx context.Context) { defer func() { - log.Infof("Run finished for vdiff %s", ct.uuid) + log.Info("Run finished for vdiff " + ct.uuid) close(ct.done) }() dbClient := ct.vde.dbClientFactoryFiltered() if err := dbClient.Connect(); err != nil { - log.Errorf("Encountered an error connecting to database for vdiff %s: %v", ct.uuid, err) + log.Error(fmt.Sprintf("Encountered an error connecting to database for vdiff %s: %v", ct.uuid, err)) return } defer dbClient.Close() qr, err := ct.vde.getVDiffByID(ctx, dbClient, ct.id) if err != nil { - log.Errorf("Encountered an error getting vdiff record for %s: %v", ct.uuid, err) + log.Error(fmt.Sprintf("Encountered an error getting vdiff record for %s: %v", ct.uuid, err)) return } @@ -140,15 +140,15 @@ func (ct *controller) run(ctx context.Context) { if state == StartedState { action = "Restarting" } - log.Infof("%s vdiff %s", action, ct.uuid) + log.Info(fmt.Sprintf("%s vdiff %s", action, ct.uuid)) if err := ct.start(ctx, dbClient); err != nil { - log.Errorf("Encountered an error for vdiff %s: %s", ct.uuid, err) + log.Error(fmt.Sprintf("Encountered an error for vdiff %s: %s", ct.uuid, err)) if err := ct.saveErrorState(ctx, err); err != nil { - log.Errorf("Unable to save error state for vdiff %s; giving up because %s", ct.uuid, err.Error()) + log.Error(fmt.Sprintf("Unable to save error state for vdiff %s; giving up because %s", ct.uuid, err.Error())) } } default: - log.Infof("VDiff %s was not marked as runnable (state: %s), doing nothing", ct.uuid, state) + log.Info(fmt.Sprintf("VDiff %s was not marked as runnable (state: %s), doing nothing", ct.uuid, state)) } } @@ -200,7 +200,7 @@ func (ct *controller) start(ctx context.Context, dbClient binlogplayer.DBClient) if err != nil { return err } - log.Infof("Found %d vreplication streams for %s", len(qr.Rows), ct.workflow) + log.Info(fmt.Sprintf("Found %d vreplication streams for %s", len(qr.Rows), ct.workflow)) for i, row := range qr.Named().Rows { select { case <-ctx.Done(): @@ -216,7 +216,7 @@ func (ct *controller) start(ctx context.Context, dbClient binlogplayer.DBClient) } var bls binlogdatapb.BinlogSource if err := prototext.Unmarshal(sourceBytes, &bls); err != nil { - log.Errorf("Encountered an error unmarshalling vdiff binlog source for %s: %v", ct.uuid, err) + log.Error(fmt.Sprintf("Encountered an error unmarshalling vdiff binlog source for %s: %v", ct.uuid, err)) return err } source.shard = bls.Shard @@ -253,7 +253,7 @@ func (ct *controller) start(ctx context.Context, dbClient binlogplayer.DBClient) return err } if err := wd.diff(ctx); err != nil { - log.Errorf("Encountered an error performing workflow diff for vdiff %s: %v", ct.uuid, err) + log.Error(fmt.Sprintf("Encountered an error performing workflow diff for vdiff %s: %v", ct.uuid, err)) return err } @@ -328,7 +328,7 @@ func (ct *controller) saveErrorState(ctx context.Context, saveErr error) error { for { if err := save(); err != nil { - log.Warningf("Failed to persist vdiff error state: %v. Will retry in %s", err, retryDelay.String()) + log.Warn(fmt.Sprintf("Failed to persist vdiff error state: %v. Will retry in %s", err, retryDelay.String())) select { case <-ctx.Done(): return vterrors.Errorf(vtrpcpb.Code_CANCELED, "engine is shutting down") diff --git a/go/vt/vttablet/tabletmanager/vdiff/engine.go b/go/vt/vttablet/tabletmanager/vdiff/engine.go index 81d55b016d5..2e882743fec 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/engine.go +++ b/go/vt/vttablet/tabletmanager/vdiff/engine.go @@ -124,7 +124,7 @@ func (vde *Engine) Open(ctx context.Context, vre *vreplication.Engine) { if vde.ts == nil || vde.isOpen { return } - log.Infof("VDiff Engine: opening...") + log.Info("VDiff Engine: opening...") if vde.cancelRetry != nil { vde.cancelRetry() @@ -132,7 +132,7 @@ func (vde *Engine) Open(ctx context.Context, vre *vreplication.Engine) { } vde.vre = vre if err := vde.openLocked(ctx); err != nil { - log.Infof("openLocked error: %s", err) + log.Info(fmt.Sprintf("openLocked error: %s", err)) ctx, cancel := context.WithCancel(ctx) vde.cancelRetry = cancel go vde.retry(ctx, err) @@ -142,7 +142,7 @@ func (vde *Engine) Open(ctx context.Context, vre *vreplication.Engine) { func (vde *Engine) openLocked(ctx context.Context) error { // This should never happen if len(vde.controllers) > 0 { - log.Warningf("VDiff Engine invalid state detected: %d controllers existed when opening; resetting state", len(vde.controllers)) + log.Warn(fmt.Sprintf("VDiff Engine invalid state detected: %d controllers existed when opening; resetting state", len(vde.controllers))) vde.resetControllers() } @@ -177,7 +177,7 @@ func (vde *Engine) openLocked(ctx context.Context) error { var openRetryInterval = 1 * time.Second func (vde *Engine) retry(ctx context.Context, err error) { - log.Errorf("Error starting vdiff engine: %v, will keep retrying.", err) + log.Error(fmt.Sprintf("Error starting vdiff engine: %v, will keep retrying.", err)) for { timer := time.NewTimer(openRetryInterval) select { @@ -198,7 +198,7 @@ func (vde *Engine) retry(ctx context.Context, err error) { default: } if err := vde.openLocked(ctx); err == nil { - log.Infof("VDiff engine: opened successfully") + log.Info("VDiff engine: opened successfully") // Don't invoke cancelRetry because openLocked // will hold on to this context for later cancellation. vde.cancelRetry = nil @@ -278,7 +278,7 @@ func (vde *Engine) Close() { vde.isOpen = false - log.Infof("VDiff Engine: closed") + log.Info("VDiff Engine: closed") } func (vde *Engine) getVDiffsToRun(ctx context.Context) (*sqltypes.Result, error) { @@ -358,7 +358,7 @@ func (vde *Engine) retryVDiffs(ctx context.Context) error { if err != nil { return err } - log.Infof("Retrying vdiff %s that had an ephemeral error of '%v'", uuid, lastError) + log.Info(fmt.Sprintf("Retrying vdiff %s that had an ephemeral error of '%v'", uuid, lastError)) query, err := sqlparser.ParseAndBind(sqlRetryVDiff, sqltypes.Int64BindVariable(id)) if err != nil { return err @@ -389,7 +389,7 @@ func (vde *Engine) retryErroredVDiffs() { } if err := vde.retryVDiffs(vde.ctx); err != nil { - log.Errorf("Error retrying vdiffs: %v", err) + log.Error(fmt.Sprintf("Error retrying vdiffs: %v", err)) } } } diff --git a/go/vt/vttablet/tabletmanager/vdiff/table_differ.go b/go/vt/vttablet/tabletmanager/vdiff/table_differ.go index 8349ae5c643..c36e1a6f7ab 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/table_differ.go +++ b/go/vt/vttablet/tabletmanager/vdiff/table_differ.go @@ -124,7 +124,7 @@ func (td *tableDiffer) initialize(ctx context.Context) error { targetKeyspace := td.wd.ct.vde.thisTablet.Keyspace lockName := fmt.Sprintf("%s/%s", targetKeyspace, td.wd.ct.workflow) - log.Infof("Locking workflow %s for VDiff %s", lockName, td.wd.ct.uuid) + log.Info(fmt.Sprintf("Locking workflow %s for VDiff %s", lockName, td.wd.ct.uuid)) // We attempt to get the lock until we can, using an exponential backoff. var ( vctx context.Context @@ -139,8 +139,7 @@ func (td *tableDiffer) initialize(ctx context.Context) error { if lockErr == nil { break } - log.Warningf("Locking workflow %s for VDiff %s initialization (stream ID: %d) failed, will wait %v before retrying: %v", - lockName, td.wd.ct.uuid, td.wd.ct.id, retryDelay, lockErr) + log.Warn(fmt.Sprintf("Locking workflow %s for VDiff %s initialization (stream ID: %d) failed, will wait %v before retrying: %v", lockName, td.wd.ct.uuid, td.wd.ct.id, retryDelay, lockErr)) select { case <-ctx.Done(): return vterrors.Errorf(vtrpcpb.Code_CANCELED, "engine is shutting down") @@ -163,7 +162,7 @@ func (td *tableDiffer) initialize(ctx context.Context) error { defer func() { unlock(&err) if err != nil { - log.Errorf("Unlocking workflow %s for vdiff %s failed: %v", lockName, td.wd.ct.uuid, err) + log.Error(fmt.Sprintf("Unlocking workflow %s for vdiff %s failed: %v", lockName, td.wd.ct.uuid, err)) } }() @@ -173,12 +172,11 @@ func (td *tableDiffer) initialize(ctx context.Context) error { defer func() { // We use a new context as we want to reset the state even // when the parent context has timed out or been canceled. - log.Infof("Restarting the %q VReplication workflow for vdiff %s on target tablets in keyspace %q", - td.wd.ct.workflow, td.wd.ct.uuid, targetKeyspace) + log.Info(fmt.Sprintf("Restarting the %q VReplication workflow for vdiff %s on target tablets in keyspace %q", td.wd.ct.workflow, td.wd.ct.uuid, targetKeyspace)) restartCtx, restartCancel := context.WithTimeout(context.Background(), BackgroundOperationTimeout) defer restartCancel() if err := td.restartTargetVReplicationStreams(restartCtx); err != nil { - log.Errorf("error restarting target streams for vdiff %s: %v", td.wd.ct.uuid, err) + log.Error(fmt.Sprintf("error restarting target streams for vdiff %s: %v", td.wd.ct.uuid, err)) } }() @@ -204,7 +202,7 @@ func (td *tableDiffer) initialize(ctx context.Context) error { } func (td *tableDiffer) stopTargetVReplicationStreams(ctx context.Context, dbClient binlogplayer.DBClient) error { - log.Infof("stopTargetVReplicationStreams for vdiff %s", td.wd.ct.uuid) + log.Info("stopTargetVReplicationStreams for vdiff " + td.wd.ct.uuid) ct := td.wd.ct query := "update _vt.vreplication set state = 'Stopped', message='for vdiff' " + ct.workflowFilter if _, err := ct.vde.vre.Exec(query); err != nil { @@ -358,7 +356,7 @@ func (td *tableDiffer) syncTargetStreams(ctx context.Context) error { return err } if err := ct.vde.vre.WaitForPos(waitCtx, source.vrID, source.snapshotPosition); err != nil { - log.Errorf("WaitForPosition for vdiff %s error: %d: %s", td.wd.ct.uuid, source.vrID, err) + log.Error(fmt.Sprintf("WaitForPosition for vdiff %s error: %d: %s", td.wd.ct.uuid, source.vrID, err)) return vterrors.Wrapf(err, "WaitForPosition for stream id %d", source.vrID) } return nil @@ -376,8 +374,7 @@ func (td *tableDiffer) startTargetDataStream(ctx context.Context) error { go td.streamOneShard(ctx, ct.targetShardStreamer, td.tablePlan.targetQuery, td.lastTargetPK, gtidch) gtid, ok := <-gtidch if !ok { - log.Errorf("VDiff %s streaming error on target tablet %s: %v", - td.wd.ct.uuid, topoproto.TabletAliasString(ct.targetShardStreamer.tablet.Alias), ct.targetShardStreamer.err) + log.Error(fmt.Sprintf("VDiff %s streaming error on target tablet %s: %v", td.wd.ct.uuid, topoproto.TabletAliasString(ct.targetShardStreamer.tablet.Alias), ct.targetShardStreamer.err)) return ct.targetShardStreamer.err } ct.targetShardStreamer.snapshotPosition = gtid @@ -393,8 +390,7 @@ func (td *tableDiffer) startSourceDataStreams(ctx context.Context) error { gtid, ok := <-gtidch if !ok { - log.Errorf("VDiff %s streaming error on source tablet %s: %v", - td.wd.ct.uuid, topoproto.TabletAliasString(source.tablet.Alias), source.err) + log.Error(fmt.Sprintf("VDiff %s streaming error on source tablet %s: %v", td.wd.ct.uuid, topoproto.TabletAliasString(source.tablet.Alias), source.err)) return source.err } source.snapshotPosition = gtid @@ -410,7 +406,7 @@ func (td *tableDiffer) restartTargetVReplicationStreams(ctx context.Context) err ct := td.wd.ct query := fmt.Sprintf("update _vt.vreplication set state='Running', message='', stop_pos='' where db_name=%s and workflow=%s", encodeString(ct.vde.dbName), encodeString(ct.workflow)) - log.Infof("Restarting the %q VReplication workflow for vdiff %s using %q", ct.workflow, td.wd.ct.uuid, query) + log.Info(fmt.Sprintf("Restarting the %q VReplication workflow for vdiff %s using %q", ct.workflow, td.wd.ct.uuid, query)) var err error // Let's retry a few times if we get a retryable error. for i := 1; i <= 3; i++ { @@ -418,19 +414,18 @@ func (td *tableDiffer) restartTargetVReplicationStreams(ctx context.Context) err if err == nil || !sqlerror.IsEphemeralError(err) { break } - log.Warningf("Encountered the following error while restarting the %q VReplication workflow, will retry (attempt #%d): %v", - ct.workflow, i, err) + log.Warn(fmt.Sprintf("Encountered the following error while restarting the %q VReplication workflow, will retry (attempt #%d): %v", ct.workflow, i, err)) } return err } func (td *tableDiffer) streamOneShard(ctx context.Context, participant *shardStreamer, query string, lastPK *querypb.QueryResult, gtidch chan string) { tabletAliasString := topoproto.TabletAliasString(participant.tablet.Alias) - log.Infof("streamOneShard Start for vdiff %s on %s using query: %s", td.wd.ct.uuid, tabletAliasString, query) + log.Info(fmt.Sprintf("streamOneShard Start for vdiff %s on %s using query: %s", td.wd.ct.uuid, tabletAliasString, query)) td.wgShardStreamers.Add(1) defer func() { - log.Infof("streamOneShard for vdiff %s End on %s (err: %v)", td.wd.ct.uuid, tabletAliasString, participant.err) + log.Info(fmt.Sprintf("streamOneShard for vdiff %s End on %s (err: %v)", td.wd.ct.uuid, tabletAliasString, participant.err)) select { case <-ctx.Done(): default: @@ -572,7 +567,7 @@ func (td *tableDiffer) diff(ctx context.Context, coreOpts *tabletmanagerdatapb.V // Save our progress when we finish the run. defer func() { if err := td.updateTableProgress(dbClient, dr, lastProcessedRow); err != nil { - log.Errorf("Failed to update vdiff %s progress on %s table: %v", td.wd.ct.uuid, td.table.Name, err) + log.Error(fmt.Sprintf("Failed to update vdiff %s progress on %s table: %v", td.wd.ct.uuid, td.table.Name, err)) } globalStats.RowsDiffedCount.Add(dr.ProcessedRows) }() @@ -597,7 +592,7 @@ func (td *tableDiffer) diff(ctx context.Context, coreOpts *tabletmanagerdatapb.V if !mismatch && dr.MismatchedRows > 0 { mismatch = true - log.Infof("Flagging mismatch in vdiff %s for %s: %+v", td.wd.ct.uuid, td.table.Name, dr) + log.Info(fmt.Sprintf("Flagging mismatch in vdiff %s for %s: %+v", td.wd.ct.uuid, td.table.Name, dr)) if err := updateTableMismatch(dbClient, td.wd.ct.id, td.table.Name); err != nil { return nil, err } @@ -605,20 +600,20 @@ func (td *tableDiffer) diff(ctx context.Context, coreOpts *tabletmanagerdatapb.V rowsToCompare-- if rowsToCompare < 0 { - log.Infof("Stopping vdiff %s, specified row limit of %d reached", td.wd.ct.uuid, rowsToCompare) + log.Info(fmt.Sprintf("Stopping vdiff %s, specified row limit of %d reached", td.wd.ct.uuid, rowsToCompare)) return dr, nil } if advanceSource { sourceRow, err = sourceExecutor.next() if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return nil, err } } if advanceTarget { targetRow, err = targetExecutor.next() if err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) return nil, err } } @@ -907,7 +902,7 @@ func (td *tableDiffer) adjustForSourceTimeZone(targetSelectExprs []sqlparser.Sel if td.wd.ct.sourceTimeZone == "" { return targetSelectExprs } - log.Infof("Source time zone specified for vdiff %s: %s", td.wd.ct.uuid, td.wd.ct.sourceTimeZone) + log.Info(fmt.Sprintf("Source time zone specified for vdiff %s: %s", td.wd.ct.uuid, td.wd.ct.sourceTimeZone)) var newSelectExprs []sqlparser.SelectExpr var modified bool for _, expr := range targetSelectExprs { @@ -924,7 +919,7 @@ func (td *tableDiffer) adjustForSourceTimeZone(targetSelectExprs []sqlparser.Sel sqlparser.NewStrLiteral(td.wd.ct.targetTimeZone), sqlparser.NewStrLiteral(td.wd.ct.sourceTimeZone), ) - log.Infof("Converting datetime column %s using convert_tz() for vdiff %s", colName, td.wd.ct.uuid) + log.Info(fmt.Sprintf("Converting datetime column %s using convert_tz() for vdiff %s", colName, td.wd.ct.uuid)) newSelectExprs = append(newSelectExprs, &sqlparser.AliasedExpr{Expr: convertTZFuncExpr, As: colAs.Name}) converted = true modified = true @@ -936,8 +931,7 @@ func (td *tableDiffer) adjustForSourceTimeZone(targetSelectExprs []sqlparser.Sel } } if modified { // at least one datetime was found - log.Infof("Found datetime columns when SourceTimeZone was set, resetting target SelectExprs after convert_tz() for vdiff %s", - td.wd.ct.uuid) + log.Info("Found datetime columns when SourceTimeZone was set, resetting target SelectExprs after convert_tz() for vdiff " + td.wd.ct.uuid) return newSelectExprs } return targetSelectExprs @@ -982,8 +976,7 @@ func (td *tableDiffer) getSourcePKCols() error { if len(sourceSchema.TableDefinitions) == 0 { // The table no longer exists on the source. Any rows that exist on the target will be // reported as extra rows. - log.Warningf("The %s table was not found on source tablet %s during VDiff for the %s workflow; any rows on the target will be reported as extra", - td.table.Name, topoproto.TabletAliasString(sourceTablet.Alias), td.wd.ct.workflow) + log.Warn(fmt.Sprintf("The %s table was not found on source tablet %s during VDiff for the %s workflow; any rows on the target will be reported as extra", td.table.Name, topoproto.TabletAliasString(sourceTablet.Alias), td.wd.ct.workflow)) return nil } sourceTable := sourceSchema.TableDefinitions[0] @@ -1006,11 +999,11 @@ func (td *tableDiffer) getSourcePKCols() error { td.table.Name, topoproto.TabletAliasString(sourceTablet.Alias)) } if len(pkeCols) > 0 { - log.Infof("Using primary key equivalent columns %+v for table %s in vdiff %s", pkeCols, td.table.Name, td.wd.ct.uuid) + log.Info(fmt.Sprintf("Using primary key equivalent columns %+v for table %s in vdiff %s", pkeCols, td.table.Name, td.wd.ct.uuid)) sourceTable.PrimaryKeyColumns = pkeCols } else { // We use every column together as a substitute PK. - log.Infof("Using all columns as a substitute primary key for table %s in vdiff %s", td.table.Name, td.wd.ct.uuid) + log.Info(fmt.Sprintf("Using all columns as a substitute primary key for table %s in vdiff %s", td.table.Name, td.wd.ct.uuid)) sourceTable.PrimaryKeyColumns = append(sourceTable.PrimaryKeyColumns, td.table.Columns...) } } diff --git a/go/vt/vttablet/tabletmanager/vdiff/table_plan.go b/go/vt/vttablet/tabletmanager/vdiff/table_plan.go index 880cd010cb1..4cbd3c5496e 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/table_plan.go +++ b/go/vt/vttablet/tabletmanager/vdiff/table_plan.go @@ -200,8 +200,8 @@ func (td *tableDiffer) buildTablePlan(dbClient binlogplayer.DBClient, dbName str tp.sourceQuery = sqlparser.String(sourceSelect) tp.targetQuery = sqlparser.String(targetSelect) - log.Infof("VDiff query on source: %v", tp.sourceQuery) - log.Infof("VDiff query on target: %v", tp.targetQuery) + log.Info(fmt.Sprintf("VDiff query on source: %v", tp.sourceQuery)) + log.Info(fmt.Sprintf("VDiff query on target: %v", tp.targetQuery)) tp.aggregates = aggregates td.tablePlan = tp @@ -225,7 +225,7 @@ func (tp *tablePlan) findPKs(dbClient binlogplayer.DBClient, targetSelect *sqlpa case *sqlparser.FuncExpr: // eg. weight_string() // no-op default: - log.Warningf("Not considering column %v for PK, type %v not handled", selExpr, ct) + log.Warn(fmt.Sprintf("Not considering column %v for PK, type %v not handled", selExpr, ct)) } if strings.EqualFold(pk, colname) { tp.compareCols[i].isPK = true diff --git a/go/vt/vttablet/tabletmanager/vdiff/utils.go b/go/vt/vttablet/tabletmanager/vdiff/utils.go index e56df52c5cc..5dffe9814e4 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/utils.go +++ b/go/vt/vttablet/tabletmanager/vdiff/utils.go @@ -73,7 +73,7 @@ func insertVDiffLog(ctx context.Context, dbClient binlogplayer.DBClient, vdiffID query := "insert into _vt.vdiff_log(vdiff_id, message) values (%d, %s)" query = fmt.Sprintf(query, vdiffID, encodeString(message)) if _, err := dbClient.ExecuteFetch(query, 1); err != nil { - log.Error("Error inserting into _vt.vdiff_log: %v", err) + log.Error(fmt.Sprintf("Error inserting into _vt.vdiff_log: %v", err)) } } diff --git a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go index 10df0338ef9..681b4de3125 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go +++ b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go @@ -124,8 +124,7 @@ func (wd *workflowDiffer) doReconcileExtraRows(dr *DiffReport, maxExtraRowsToCom matchedDiffs := int64(0) maxRows := min(int(dr.ExtraRowsSource), int(maxExtraRowsToCompare)) - log.Infof("Reconciling extra rows for table %s in vdiff %s, extra source rows %d, extra target rows %d, max rows %d", - dr.TableName, wd.ct.uuid, dr.ExtraRowsSource, dr.ExtraRowsTarget, maxRows) + log.Info(fmt.Sprintf("Reconciling extra rows for table %s in vdiff %s, extra source rows %d, extra target rows %d, max rows %d", dr.TableName, wd.ct.uuid, dr.ExtraRowsSource, dr.ExtraRowsTarget, maxRows)) // Find the matching extra rows for i := 0; i < len(dr.ExtraRowsSourceDiffs); i++ { @@ -144,8 +143,7 @@ func (wd *workflowDiffer) doReconcileExtraRows(dr *DiffReport, maxExtraRowsToCom } if matchedDiffs == 0 { - log.Infof("No matching extra rows found for table %s in vdiff %s, checked %d rows", - dr.TableName, maxRows, wd.ct.uuid) + log.Info(fmt.Sprintf("No matching extra rows found for table %s in vdiff %s, checked %d rows", dr.TableName, wd.ct.uuid, maxRows)) } else { // Now remove the matching extra rows newExtraRowsSourceDiffs := make([]*RowDiff, 0, int64(len(dr.ExtraRowsSourceDiffs))-matchedDiffs) @@ -178,8 +176,7 @@ func (wd *workflowDiffer) doReconcileExtraRows(dr *DiffReport, maxExtraRowsToCom // We do not update `ProcessedRows` here, because any extra target or source rows are already included in it. // We do not update `MismatchedRows`, because extra target or source rows are not counted as mismatches. - log.Infof("Reconciled extra rows for table %s in vdiff %s, matching rows %d, extra source rows %d, extra target rows %d. Max compared rows %d", - dr.TableName, wd.ct.uuid, matchedDiffs, dr.ExtraRowsSource, dr.ExtraRowsTarget, maxRows) + log.Info(fmt.Sprintf("Reconciled extra rows for table %s in vdiff %s, matching rows %d, extra source rows %d, extra target rows %d. Max compared rows %d", dr.TableName, wd.ct.uuid, matchedDiffs, dr.ExtraRowsSource, dr.ExtraRowsTarget, maxRows)) } // Trim the extra rows diffs to the maxReportSampleRows value. Note we need to do this after updating @@ -227,7 +224,7 @@ func (wd *workflowDiffer) diffTable(ctx context.Context, dbClient binlogplayer.D maxDiffRuntime = time.Duration(wd.ct.options.CoreOptions.MaxDiffSeconds) * time.Second } - log.Infof("Starting differ on table %s for vdiff %s", td.table.Name, wd.ct.uuid) + log.Info(fmt.Sprintf("Starting differ on table %s for vdiff %s", td.table.Name, wd.ct.uuid)) if err := td.updateTableState(ctx, dbClient, StartedState); err != nil { return err } @@ -257,22 +254,22 @@ func (wd *workflowDiffer) diffTable(ctx context.Context, dbClient binlogplayer.D if err := td.initialize(ctx); err != nil { // Setup the consistent snapshots return err } - log.Infof("Table initialization done on table %s for vdiff %s", td.table.Name, wd.ct.uuid) + log.Info(fmt.Sprintf("Table initialization done on table %s for vdiff %s", td.table.Name, wd.ct.uuid)) diffTimer = time.NewTimer(maxDiffRuntime) diffReport, diffErr = td.diff(ctx, wd.opts.CoreOptions, wd.opts.ReportOptions, diffTimer.C) if diffErr == nil { // We finished the diff successfully break } - log.Errorf("Encountered an error diffing table %s for vdiff %s: %v", td.table.Name, wd.ct.uuid, diffErr) + log.Error(fmt.Sprintf("Encountered an error diffing table %s for vdiff %s: %v", td.table.Name, wd.ct.uuid, diffErr)) if !errors.Is(diffErr, ErrMaxDiffDurationExceeded) { // We only want to retry if we hit the max-diff-duration return diffErr } } - log.Infof("Table diff done on table %s for vdiff %s with report: %+v", td.table.Name, wd.ct.uuid, diffReport) + log.Info(fmt.Sprintf("Table diff done on table %s for vdiff %s with report: %+v", td.table.Name, wd.ct.uuid, diffReport)) if diffReport.ExtraRowsSource > 0 || diffReport.ExtraRowsTarget > 0 { if err := wd.reconcileExtraRows(diffReport, wd.opts.CoreOptions.MaxExtraRowsToCompare, wd.opts.ReportOptions.MaxSampleRows); err != nil { - log.Errorf("Encountered an error reconciling extra rows found for table %s for vdiff %s: %v", td.table.Name, wd.ct.uuid, err) + log.Error(fmt.Sprintf("Encountered an error reconciling extra rows found for table %s for vdiff %s: %v", td.table.Name, wd.ct.uuid, err)) return vterrors.Wrap(err, "failed to reconcile extra rows") } } @@ -283,7 +280,7 @@ func (wd *workflowDiffer) diffTable(ctx context.Context, dbClient binlogplayer.D } } - log.Infof("Completed reconciliation on table %s for vdiff %s with updated report: %+v", td.table.Name, wd.ct.uuid, diffReport) + log.Info(fmt.Sprintf("Completed reconciliation on table %s for vdiff %s with updated report: %+v", td.table.Name, wd.ct.uuid, diffReport)) if err := td.updateTableStateAndReport(ctx, dbClient, CompletedState, diffReport); err != nil { return err } @@ -347,7 +344,7 @@ func (wd *workflowDiffer) diff(ctx context.Context) (err error) { td.table.Name, wd.ct.vde.thisTablet.Alias) } - log.Infof("Starting diff of table %s for vdiff %s", td.table.Name, wd.ct.uuid) + log.Info(fmt.Sprintf("Starting diff of table %s for vdiff %s", td.table.Name, wd.ct.uuid)) if err := wd.diffTable(ctx, dbClient, td); err != nil { if err := td.updateTableState(ctx, dbClient, ErrorState); err != nil { return err @@ -358,7 +355,7 @@ func (wd *workflowDiffer) diff(ctx context.Context) (err error) { if err := td.updateTableState(ctx, dbClient, CompletedState); err != nil { return err } - log.Infof("Completed diff of table %s for vdiff %s", td.table.Name, wd.ct.uuid) + log.Info(fmt.Sprintf("Completed diff of table %s for vdiff %s", td.table.Name, wd.ct.uuid)) } if err := wd.markIfCompleted(ctx, dbClient); err != nil { return err @@ -490,11 +487,11 @@ func (wd *workflowDiffer) initVDiffTables(dbClient binlogplayer.DBClient) error wd.ct.vde.dbName, tableName, ) - log.Infof("Updating the table stats for %s.%s using: %q", wd.ct.vde.dbName, tableName, stmt.Query) + log.Info(fmt.Sprintf("Updating the table stats for %s.%s using: %q", wd.ct.vde.dbName, tableName, stmt.Query)) if _, err := dbClient.ExecuteFetch(stmt.Query, -1); err != nil { return err } - log.Infof("Finished updating the table stats for %s.%s", wd.ct.vde.dbName, tableName) + log.Info(fmt.Sprintf("Finished updating the table stats for %s.%s", wd.ct.vde.dbName, tableName)) } tableIn.WriteString(encodeString(tableName)) if n++; n < len(wd.tableDiffers) { diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller.go b/go/vt/vttablet/tabletmanager/vreplication/controller.go index e6f4f2a2720..b5415597ae9 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller.go @@ -123,7 +123,7 @@ func newController(ctx context.Context, params map[string]string, dbClientFactor } ct.id = int32(id) ct.workflow = params["workflow"] - log.Infof("creating controller with id: %v, name: %v, cell: %v, tabletTypes: %v", ct.id, ct.workflow, cell, tabletTypesStr) + log.Info(fmt.Sprintf("creating controller with id: %v, name: %v, cell: %v, tabletTypes: %v", ct.id, ct.workflow, cell, tabletTypesStr)) ct.lastWorkflowError = vterrors.NewLastError(fmt.Sprintf("VReplication controller %d for workflow %q", ct.id, ct.workflow), workflowConfig.MaxTimeToRetryError) @@ -149,7 +149,7 @@ func newController(ctx context.Context, params map[string]string, dbClientFactor if v := params["tablet_types"]; v != "" { tabletTypesStr = v } - log.Infof("creating tablet picker for source keyspace/shard %v/%v with cell: %v and tabletTypes: %v", ct.source.Keyspace, ct.source.Shard, cell, tabletTypesStr) + log.Info(fmt.Sprintf("creating tablet picker for source keyspace/shard %v/%v with cell: %v and tabletTypes: %v", ct.source.Keyspace, ct.source.Shard, cell, tabletTypesStr)) cells := strings.Split(cell, ",") sourceTopo := ts @@ -175,7 +175,7 @@ func newController(ctx context.Context, params map[string]string, dbClientFactor func (ct *controller) run(ctx context.Context) { defer func() { - log.Infof("stream %v: stopped", ct.id) + log.Info(fmt.Sprintf("stream %v: stopped", ct.id)) close(ct.done) }() @@ -188,7 +188,7 @@ func (ct *controller) run(ctx context.Context) { // Sometimes, canceled contexts get wrapped as errors. select { case <-ctx.Done(): - log.Warningf("context canceled: %s", err.Error()) + log.Warn("context canceled: " + err.Error()) return default: } @@ -198,7 +198,7 @@ func (ct *controller) run(ctx context.Context) { timer := time.NewTimer(ct.WorkflowConfig.RetryDelay) select { case <-ctx.Done(): - log.Warningf("context canceled: %s", err.Error()) + log.Warn("context canceled: " + err.Error()) timer.Stop() return case <-timer.C: @@ -242,7 +242,7 @@ func (ct *controller) runBlp(ctx context.Context) (err error) { defer func() { ct.sourceTablet.Store(&topodatapb.TabletAlias{}) if x := recover(); x != nil { - log.Errorf("stream %v: caught panic: %v\n%s", ct.id, x, tb.Stack(4)) + log.Error(fmt.Sprintf("stream %v: caught panic: %v\n%s", ct.id, x, tb.Stack(4))) err = fmt.Errorf("panic: %v", x) } }() @@ -311,10 +311,10 @@ func (ct *controller) runBlp(ctx context.Context) (err error) { !ct.lastWorkflowError.ShouldRetry() { err = vterrors.Wrapf(err, TerminalErrorIndicator) if errSetState := vr.setState(binlogdatapb.VReplicationWorkflowState_Error, err.Error()); errSetState != nil { - log.Errorf("INTERNAL: unable to setState() in controller: %v. Could not set error text to: %v.", errSetState, err) + log.Error(fmt.Sprintf("INTERNAL: unable to setState() in controller: %v. Could not set error text to: %v.", errSetState, err)) return err // yes, err and not errSetState. } - log.Errorf("vreplication stream %d going into error state due to %+v", ct.id, err) + log.Error(fmt.Sprintf("vreplication stream %d going into error state due to %+v", ct.id, err)) return nil // this will cause vreplicate to quit the workflow } return err @@ -342,8 +342,7 @@ func (ct *controller) pickSourceTablet(ctx context.Context, dbClient binlogplaye if ct.source.GetExternalMysql() != "" { return nil, nil } - log.Infof("Trying to find an eligible source tablet for vreplication stream id %d for workflow: %s", - ct.id, ct.workflow) + log.Info(fmt.Sprintf("Trying to find an eligible source tablet for vreplication stream id %d for workflow: %s", ct.id, ct.workflow)) tpCtx, tpCancel := context.WithTimeout(ctx, discovery.GetTabletPickerRetryDelay()*tabletPickerRetries) defer tpCancel() tablet, err := ct.tabletPicker.PickForStreaming(tpCtx) @@ -357,8 +356,7 @@ func (ct *controller) pickSourceTablet(ctx context.Context, dbClient binlogplaye return tablet, err } ct.setMessage(dbClient, "Picked source tablet: "+tablet.Alias.String()) - log.Infof("Found eligible source tablet %s for vreplication stream id %d for workflow %s", - tablet.Alias.String(), ct.id, ct.workflow) + log.Info(fmt.Sprintf("Found eligible source tablet %s for vreplication stream id %d for workflow %s", tablet.Alias.String(), ct.id, ct.workflow)) ct.sourceTablet.Store(tablet.Alias) return tablet, err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine.go b/go/vt/vttablet/tabletmanager/vreplication/engine.go index b0af78be611..fed91813ab3 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine.go @@ -213,7 +213,7 @@ func (vre *Engine) Open(ctx context.Context) { if vre.isOpen { return } - log.Infof("VReplication Engine: opening") + log.Info("VReplication Engine: opening") // Cancel any existing retry loops. // This guarantees that there will be no more @@ -224,12 +224,12 @@ func (vre *Engine) Open(ctx context.Context) { } if err := vre.openLocked(ctx); err != nil { - log.Infof("openLocked error: %s", err) + log.Info(fmt.Sprintf("openLocked error: %s", err)) ctx, cancel := context.WithCancel(ctx) vre.cancelRetry = cancel go vre.retry(ctx, err) } - log.Infof("VReplication engine opened successfully") + log.Info("VReplication engine opened successfully") } func (vre *Engine) ThrottlerClient() *throttle.Client { @@ -256,7 +256,7 @@ func init() { } func (vre *Engine) retry(ctx context.Context, err error) { - log.Errorf("Error starting vreplication engine: %v, will keep retrying.", err) + log.Error(fmt.Sprintf("Error starting vreplication engine: %v, will keep retrying.", err)) for { timer := time.NewTimer(time.Duration(openRetryInterval.Load())) select { @@ -291,7 +291,7 @@ func (vre *Engine) initControllers(rows []map[string]string) { for _, row := range rows { ct, err := newController(vre.ctx, row, vre.dbClientFactoryFiltered, vre.mysqld, vre.ts, vre.cell, nil, vre, discovery.TabletPickerOptions{}) if err != nil { - log.Errorf("Controller could not be initialized for stream: %v: %v", row, err) + log.Error(fmt.Sprintf("Controller could not be initialized for stream: %v: %v", row, err)) continue } vre.controllers[ct.id] = ct @@ -336,7 +336,7 @@ func (vre *Engine) Close() { vre.isOpen = false vre.updateStats() - log.Infof("VReplication Engine: closed") + log.Info("VReplication Engine: closed") } func (vre *Engine) getDBClient(isAdmin bool) binlogplayer.DBClient { @@ -584,10 +584,10 @@ func (vre *Engine) registerJournal(journal *binlogdatapb.Journal, id int32) erro workflow := vre.controllers[id].workflow key := fmt.Sprintf("%s:%d", workflow, journal.Id) ks := fmt.Sprintf("%s:%s", vre.controllers[id].source.Keyspace, vre.controllers[id].source.Shard) - log.Infof("Journal encountered for (%s %s): %v", key, ks, journal) + log.Info(fmt.Sprintf("Journal encountered for (%s %s): %v", key, ks, journal)) je, ok := vre.journaler[key] if !ok { - log.Infof("First stream for workflow %s has joined, creating journaler entry", workflow) + log.Info(fmt.Sprintf("First stream for workflow %s has joined, creating journaler entry", workflow)) je = &journalEvent{ journal: journal, participants: make(map[string]int32), @@ -608,14 +608,14 @@ func (vre *Engine) registerJournal(journal *binlogdatapb.Journal, id int32) erro for _, jks := range journal.Participants { ks := fmt.Sprintf("%s:%s", jks.Keyspace, jks.Shard) if _, ok := controllerSources[ks]; !ok { - log.Errorf("cannot redirect on journal: not all sources are present in this workflow: missing %v", ks) + log.Error(fmt.Sprintf("cannot redirect on journal: not all sources are present in this workflow: missing %v", ks)) return fmt.Errorf("cannot redirect on journal: not all sources are present in this workflow: missing %v", ks) } if _, ok := je.participants[ks]; !ok { - log.Infof("New participant %s found for workflow %s", ks, workflow) + log.Info(fmt.Sprintf("New participant %s found for workflow %s", ks, workflow)) je.participants[ks] = 0 } else { - log.Infof("Participant %s:%d already exists for workflow %s", ks, je.participants[ks], workflow) + log.Info(fmt.Sprintf("Participant %s:%d already exists for workflow %s", ks, je.participants[ks], workflow)) } } for _, gtid := range journal.ShardGtids { @@ -627,7 +627,7 @@ func (vre *Engine) registerJournal(journal *binlogdatapb.Journal, id int32) erro for ks, pid := range je.participants { if pid == 0 { // Still need to wait. - log.Infof("Not all participants have joined, including %s", ks) + log.Info("Not all participants have joined, including " + ks) return nil } } @@ -646,7 +646,7 @@ func (vre *Engine) transitionJournal(je *journalEvent) { return } - log.Infof("Transitioning for journal:workload %v", je) + log.Info(fmt.Sprintf("Transitioning for journal:workload %v", je)) // sort both participants and shardgtids participants := make([]string, 0) @@ -654,7 +654,7 @@ func (vre *Engine) transitionJournal(je *journalEvent) { participants = append(participants, ks) } sort.Sort(ShardSorter(participants)) - log.Infof("Participants %+v, oldParticipants %+v", participants, je.participants) + log.Info(fmt.Sprintf("Participants %+v, oldParticipants %+v", participants, je.participants)) shardGTIDs := make([]string, 0) for shard := range je.shardGTIDs { shardGTIDs = append(shardGTIDs, shard) @@ -672,20 +672,20 @@ func (vre *Engine) transitionJournal(je *journalEvent) { dbClient := vre.dbClientFactoryFiltered() if err := dbClient.Connect(); err != nil { - log.Errorf("transitionJournal: unable to connect to the database: %v", err) + log.Error(fmt.Sprintf("transitionJournal: unable to connect to the database: %v", err)) return } defer dbClient.Close() if err := dbClient.Begin(); err != nil { - log.Errorf("transitionJournal: %v", err) + log.Error(fmt.Sprintf("transitionJournal: %v", err)) return } // Use the reference row to copy other fields like cell, tablet_types, etc. params, err := readRow(dbClient, refid) if err != nil { - log.Errorf("transitionJournal: %v", err) + log.Error(fmt.Sprintf("transitionJournal: %v", err)) return } var newids []int32 @@ -702,12 +702,12 @@ func (vre *Engine) transitionJournal(je *journalEvent) { binlogdatapb.VReplicationWorkflowType(workflowType), binlogdatapb.VReplicationWorkflowSubType(workflowSubType), deferSecondaryKeys, "") qr, err := dbClient.ExecuteFetch(ig.String(), maxRows) if err != nil { - log.Errorf("transitionJournal: %v", err) + log.Error(fmt.Sprintf("transitionJournal: %v", err)) return } - log.Infof("Created stream: %v for %v", qr.InsertID, sgtid) + log.Info(fmt.Sprintf("Created stream: %v for %v", qr.InsertID, sgtid)) if qr.InsertID > math.MaxInt32 { - log.Errorf("transitionJournal: InsertID %v too large", qr.InsertID) + log.Error(fmt.Sprintf("transitionJournal: InsertID %v too large", qr.InsertID)) return } newids = append(newids, int32(qr.InsertID)) @@ -716,13 +716,13 @@ func (vre *Engine) transitionJournal(je *journalEvent) { id := je.participants[ks] _, err := dbClient.ExecuteFetch(binlogplayer.DeleteVReplication(id), maxRows) if err != nil { - log.Errorf("transitionJournal: %v", err) + log.Error(fmt.Sprintf("transitionJournal: %v", err)) return } - log.Infof("Deleted stream: %v", id) + log.Info(fmt.Sprintf("Deleted stream: %v", id)) } if err := dbClient.Commit(); err != nil { - log.Errorf("transitionJournal: %v", err) + log.Error(fmt.Sprintf("transitionJournal: %v", err)) return } @@ -735,17 +735,17 @@ func (vre *Engine) transitionJournal(je *journalEvent) { for _, id := range newids { params, err := readRow(dbClient, id) if err != nil { - log.Errorf("transitionJournal: %v", err) + log.Error(fmt.Sprintf("transitionJournal: %v", err)) return } ct, err := newController(vre.ctx, params, vre.dbClientFactoryFiltered, vre.mysqld, vre.ts, vre.cell, nil, vre, discovery.TabletPickerOptions{}) if err != nil { - log.Errorf("transitionJournal: %v", err) + log.Error(fmt.Sprintf("transitionJournal: %v", err)) return } vre.controllers[id] = ct } - log.Infof("Completed transition for journal:workload %v", je) + log.Info(fmt.Sprintf("Completed transition for journal:workload %v", je)) } // WaitForPos waits for the replication to reach the specified position. @@ -793,7 +793,7 @@ func (vre *Engine) WaitForPos(ctx context.Context, id int32, pos string) error { // Deadlock found when trying to get lock; try restarting transaction (errno 1213) (sqlstate 40001) // Docs: https://dev.mysql.com/doc/mysql-errors/en/server-error-reference.html#error_er_lock_deadlock if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERLockDeadlock { - log.Infof("Deadlock detected waiting for pos %s: %v; will retry", pos, err) + log.Info(fmt.Sprintf("Deadlock detected waiting for pos %s: %v; will retry", pos, err)) } else { return err } @@ -813,7 +813,7 @@ func (vre *Engine) WaitForPos(ctx context.Context, id int32, pos string) error { } if current.AtLeast(mPos) { - log.Infof("position: %s reached, wait time: %v", pos, time.Since(start)) + log.Info(fmt.Sprintf("position: %s reached, wait time: %v", pos, time.Since(start))) return nil } diff --git a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go index b583418b2f6..2e2bcd2867d 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go @@ -105,7 +105,7 @@ func setFlag(flagName, flagValue string) { if err := pflag.Set(flagName, flagValue); err != nil { msg := "failed to set flag %q to %q: %v" - log.Errorf(msg, flagName, flagValue, err) + log.Error(fmt.Sprintf(msg, flagName, flagValue, err)) } } @@ -231,7 +231,7 @@ func primaryPosition(t *testing.T) string { func execStatements(t *testing.T, queries []string) { t.Helper() if err := env.Mysqld.ExecuteSuperQueryList(context.Background(), queries); err != nil { - log.Errorf("Error executing query: %s", err.Error()) + log.Error("Error executing query: " + err.Error()) t.Error(err) } } @@ -770,7 +770,7 @@ func customExpectData(t *testing.T, table string, values [][]string, exec func(c if err == nil { return } - log.Errorf("data mismatch: %v, retrying", err) + log.Error(fmt.Sprintf("data mismatch: %v, retrying", err)) time.Sleep(tick) } } diff --git a/go/vt/vttablet/tabletmanager/vreplication/table_plan_partial.go b/go/vt/vttablet/tabletmanager/vreplication/table_plan_partial.go index a22ab848081..3f401192fdf 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/table_plan_partial.go +++ b/go/vt/vttablet/tabletmanager/vreplication/table_plan_partial.go @@ -145,7 +145,7 @@ func (tpb *tablePlanBuilder) createPartialUpdateQuery(dataColumns *binlogdatapb. separator := "" for i, cexpr := range tpb.colExprs { if int64(i) >= dataColumns.Count { - log.Errorf("Ran out of columns trying to generate query for %s", tpb.name.CompliantName()) + log.Error("Ran out of columns trying to generate query for " + tpb.name.CompliantName()) return nil } if cexpr.isPK || cexpr.isGenerated || !isBitSet(dataColumns.Cols, i) { diff --git a/go/vt/vttablet/tabletmanager/vreplication/utils.go b/go/vt/vttablet/tabletmanager/vreplication/utils.go index 20c797b75e7..26ac8b54974 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/utils.go +++ b/go/vt/vttablet/tabletmanager/vreplication/utils.go @@ -93,7 +93,7 @@ func insertLog(dbClient *vdbClient, typ string, vreplID int32, state, message st // a new log but increment the count. This prevents spamming of the log table in case the same message is logged continuously. id, _, lastLogState, lastLogMessage, err := getLastLog(dbClient, vreplID) if err != nil { - log.Errorf("Could not insert vreplication_log record because we failed to get the last log record: %v", err) + log.Error(fmt.Sprintf("Could not insert vreplication_log record because we failed to get the last log record: %v", err)) return } if typ == LogStateChange && state == lastLogState { @@ -108,7 +108,7 @@ func insertLog(dbClient *vdbClient, typ string, vreplID int32, state, message st if len(message) > maxVReplicationLogMessageLen { message, err = textutil.TruncateText(message, maxVReplicationLogMessageLen, binlogplayer.TruncationLocation, binlogplayer.TruncationIndicator) if err != nil { - log.Errorf("Could not insert vreplication_log record because we failed to truncate the message: %v", err) + log.Error(fmt.Sprintf("Could not insert vreplication_log record because we failed to truncate the message: %v", err)) return } } @@ -117,7 +117,7 @@ func insertLog(dbClient *vdbClient, typ string, vreplID int32, state, message st query = buf.ParsedQuery().Query } if _, err = dbClient.ExecuteFetch(query, 10000); err != nil { - log.Errorf("Could not insert into vreplication_log table: %v: %v", query, err) + log.Error(fmt.Sprintf("Could not insert into vreplication_log table: %v: %v", query, err)) } } @@ -234,7 +234,7 @@ func isUnrecoverableError(err error) bool { sqlerror.ERWrongParamcountToNativeFct, sqlerror.ERVectorConversion, sqlerror.ERWrongValueCountOnRow: - log.Errorf("Got unrecoverable error: %v", sqlErr) + log.Error(fmt.Sprintf("Got unrecoverable error: %v", sqlErr)) return true case sqlerror.ERErrorDuringCommit: switch sqlErr.HaErrorCode() { @@ -251,7 +251,7 @@ func isUnrecoverableError(err error) bool { // These are recoverable errors. return false default: - log.Errorf("Got unrecoverable error: %v", sqlErr) + log.Error(fmt.Sprintf("Got unrecoverable error: %v", sqlErr)) return true } } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go index 0ceb8c4d99e..2e6a001915b 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go @@ -380,7 +380,7 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma defer vc.vr.stats.PhaseTimings.Record("copy", time.Now()) defer vc.vr.stats.CopyLoopCount.Add(1) - log.Infof("Copying table %s, lastpk: %v", tableName, copyState[tableName]) + log.Info(fmt.Sprintf("Copying table %s, lastpk: %v", tableName, copyState[tableName])) plan, err := vc.vr.buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats, vc.vr.vre.env.CollationEnv(), vc.vr.vre.env.Parser()) if err != nil { @@ -446,12 +446,12 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma vc.vr.id, encodeString(tableName), vc.vr.id, encodeString(tableName)) dbClient := vc.vr.vre.getDBClient(false) if err := dbClient.Connect(); err != nil { - log.Errorf("Error while garbage collecting older copy_state rows, could not connect to database: %v", err) + log.Error(fmt.Sprintf("Error while garbage collecting older copy_state rows, could not connect to database: %v", err)) return } defer dbClient.Close() if _, err := dbClient.ExecuteFetch(gcQuery, -1); err != nil { - log.Errorf("Error while garbage collecting older copy_state rows with query %q: %v", gcQuery, err) + log.Error(fmt.Sprintf("Error while garbage collecting older copy_state rows with query %q: %v", gcQuery, err)) } }() case <-ctx.Done(): @@ -560,7 +560,7 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma }) if err := copyWorkQueue.enqueue(ctx, currT); err != nil { - log.Warningf("failed to enqueue task in workflow %s: %s", vc.vr.WorkflowName, err.Error()) + log.Warn(fmt.Sprintf("failed to enqueue task in workflow %s: %s", vc.vr.WorkflowName, err.Error())) return err } @@ -581,7 +581,7 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma if result != nil { switch result.state { case vcopierCopyTaskCancel: - log.Warningf("task was canceled in workflow %s: %v", vc.vr.WorkflowName, result.err) + log.Warn(fmt.Sprintf("task was canceled in workflow %s: %v", vc.vr.WorkflowName, result.err)) return io.EOF case vcopierCopyTaskComplete: // Collect lastpk. Needed for logging at the end. @@ -628,7 +628,7 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma } if len(terrs) > 0 { terr := vterrors.Aggregate(terrs) - log.Warningf("task error in workflow %s: %v", vc.vr.WorkflowName, terr) + log.Warn(fmt.Sprintf("task error in workflow %s: %v", vc.vr.WorkflowName, terr)) return vterrors.Wrapf(terr, "task error") } @@ -652,7 +652,7 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma // of a copy phase. select { case <-ctx.Done(): - log.Infof("Copy of %v stopped at lastpk: %v", tableName, lastpkbv) + log.Info(fmt.Sprintf("Copy of %v stopped at lastpk: %v", tableName, lastpkbv)) return nil default: } @@ -665,7 +665,7 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma return vterrors.Wrapf(err, "failed to execute post copy actions for table %q", tableName) } - log.Infof("Copy of %v finished at lastpk: %v", tableName, lastpkbv) + log.Info(fmt.Sprintf("Copy of %v finished at lastpk: %v", tableName, lastpkbv)) buf := sqlparser.NewTrackedBuffer(nil) buf.Myprintf( "delete cs, pca from _vt.%s as cs left join _vt.%s as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name where cs.vrepl_id=%d and cs.table_name=%s", @@ -923,7 +923,7 @@ func (vrh *vcopierCopyTaskResultHooks) sendTo(ch chan<- *vcopierCopyTaskResult) defer func() { // This recover prevents panics when sending to a potentially closed channel. if err := recover(); err != nil { - log.Errorf("uncaught panic, vcopier copy task result: %v, error: %+v", result, err) + log.Error(fmt.Sprintf("uncaught panic, vcopier copy task result: %v, error: %+v", result, err)) } }() select { @@ -1081,7 +1081,7 @@ func (vbc *vcopierCopyWorker) execute(ctx context.Context, task *vcopierCopyTask case vcopierCopyTaskInsertCopyState: advanceFn = func(ctx context.Context, args *vcopierCopyTaskArgs) error { if vbc.copyStateInsert == nil { // we don't insert copy state for atomic copy - log.Infof("Skipping copy_state insert") + log.Info("Skipping copy_state insert") return nil } if err := vbc.insertCopyState(ctx, args.lastpk); err != nil { diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go index 3edad7e7855..b98c3577c47 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go @@ -69,8 +69,8 @@ func newCopyAllState(vc *vcopier) (*copyAllState, error) { func (vc *vcopier) copyAll(ctx context.Context, settings binlogplayer.VRSettings) error { var err error - log.Infof("Starting copyAll for %s", settings.WorkflowName) - defer log.Infof("Returning from copyAll for %s", settings.WorkflowName) + log.Info("Starting copyAll for " + settings.WorkflowName) + defer log.Info("Returning from copyAll for " + settings.WorkflowName) defer vc.vr.dbClient.Rollback() state, err := newCopyAllState(vc) @@ -108,8 +108,7 @@ func (vc *vcopier) copyAll(ctx context.Context, settings binlogplayer.VRSettings serr := vc.vr.sourceVStreamer.VStreamTables(ctx, func(resp *binlogdatapb.VStreamTablesResponse) error { defer vc.vr.stats.PhaseTimings.Record("copy", time.Now()) defer vc.vr.stats.CopyLoopCount.Add(1) - log.Infof("VStreamTablesResponse: received table %s, #fields %d, #rows %d, gtid %s, lastpk %+v", - resp.TableName, len(resp.Fields), len(resp.Rows), resp.Gtid, resp.Lastpk) + log.Info(fmt.Sprintf("VStreamTablesResponse: received table %s, #fields %d, #rows %d, gtid %s, lastpk %+v", resp.TableName, len(resp.Fields), len(resp.Rows), resp.Gtid, resp.Lastpk)) tableName := resp.TableName gtid = resp.Gtid updateRowsCopied := func() error { @@ -136,12 +135,12 @@ func (vc *vcopier) copyAll(ctx context.Context, settings binlogplayer.VRSettings } copyWorkQueue = vc.newCopyWorkQueue(parallelism, copyWorkerFactory) if state.currentTableName != "" { - log.Infof("copy of table %s is done at lastpk %+v", state.currentTableName, lastpkbv) + log.Info(fmt.Sprintf("copy of table %s is done at lastpk %+v", state.currentTableName, lastpkbv)) if err := vc.runPostCopyActionsAndDeleteCopyState(ctx, state.currentTableName); err != nil { return err } } else { - log.Infof("starting copy phase with table %s", tableName) + log.Info("starting copy phase with table " + tableName) } state.currentTableName = tableName @@ -198,7 +197,7 @@ func (vc *vcopier) copyAll(ctx context.Context, settings binlogplayer.VRSettings Value: lastpkbuf, }, } - log.Infof("copying table %s with lastpk %v", tableName, lastpkbv) + log.Info(fmt.Sprintf("copying table %s with lastpk %v", tableName, lastpkbv)) // Prepare a vcopierCopyTask for the current batch of work. currCh := make(chan *vcopierCopyTaskResult, 1) @@ -245,7 +244,7 @@ func (vc *vcopier) copyAll(ctx context.Context, settings binlogplayer.VRSettings }) if err := copyWorkQueue.enqueue(ctx, currT); err != nil { - log.Warningf("failed to enqueue task in workflow %s: %s", vc.vr.WorkflowName, err.Error()) + log.Warn(fmt.Sprintf("failed to enqueue task in workflow %s: %s", vc.vr.WorkflowName, err.Error())) return err } @@ -266,7 +265,7 @@ func (vc *vcopier) copyAll(ctx context.Context, settings binlogplayer.VRSettings if result != nil { switch result.state { case vcopierCopyTaskCancel: - log.Warningf("task was canceled in workflow %s: %v", vc.vr.WorkflowName, result.err) + log.Warn(fmt.Sprintf("task was canceled in workflow %s: %v", vc.vr.WorkflowName, result.err)) return io.EOF case vcopierCopyTaskComplete: // Collect lastpk. Needed for logging at the end. @@ -282,14 +281,14 @@ func (vc *vcopier) copyAll(ctx context.Context, settings binlogplayer.VRSettings return nil }, vstreamOptions) if serr != nil { - log.Infof("VStreamTables failed: %v", serr) + log.Info(fmt.Sprintf("VStreamTables failed: %v", serr)) return serr } // A context expiration was probably caused by a PlannedReparentShard or an // elapsed copy phase duration. CopyAll is not resilient to these events. select { case <-ctx.Done(): - log.Infof("Copy of %v stopped", state.currentTableName) + log.Info(fmt.Sprintf("Copy of %v stopped", state.currentTableName)) return errors.New("CopyAll was interrupted due to context expiration") default: if copyWorkQueue != nil { @@ -301,7 +300,7 @@ func (vc *vcopier) copyAll(ctx context.Context, settings binlogplayer.VRSettings if err := vc.updatePos(ctx, gtid); err != nil { return err } - log.Infof("Completed copy of all tables") + log.Info("Completed copy of all tables") } return nil } @@ -313,7 +312,7 @@ func (vc *vcopier) runPostCopyActionsAndDeleteCopyState(ctx context.Context, tab if err := vc.vr.execPostCopyActions(ctx, tableName); err != nil { return vterrors.Wrapf(err, "failed to execute post copy actions for table %q", tableName) } - log.Infof("Deleting copy state and post copy actions for table %s", tableName) + log.Info("Deleting copy state and post copy actions for table " + tableName) delQueryBuf := sqlparser.NewTrackedBuffer(nil) delQueryBuf.Myprintf( "delete cs, pca from _vt.%s as cs left join _vt.%s as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name where cs.vrepl_id=%d and cs.table_name=%s", diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go index 5b7bfd96866..417962cc355 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go @@ -1786,7 +1786,7 @@ func supportsInvisibleColumns() bool { if env.HasCapability(testenv.ServerCapabilityInvisibleColumn) { return true } - log.Infof("invisible columns not supported in %d.%d.%d", env.DBMajorVersion, env.DBMinorVersion, env.DBPatchVersion) + log.Info(fmt.Sprintf("invisible columns not supported in %d.%d.%d", env.DBMajorVersion, env.DBMinorVersion, env.DBPatchVersion)) return false } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go b/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go index dba1999cfb5..bb05fd5897d 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go @@ -18,6 +18,7 @@ package vreplication import ( "context" + "fmt" "io" "strings" "time" @@ -177,7 +178,7 @@ func (vc *vdbClient) ExecuteWithRetry(ctx context.Context, query string) (*sqlty qr, err := vc.Execute(query) for err != nil { if sqlErr, ok := err.(*sqlerror.SQLError); ok && (sqlErr.Number() == sqlerror.ERLockDeadlock || sqlErr.Number() == sqlerror.ERLockWaitTimeout) { - log.Infof("retryable error: %v, waiting for %v and retrying", sqlErr, dbLockRetryDelay) + log.Info(fmt.Sprintf("retryable error: %v, waiting for %v and retrying", sqlErr, dbLockRetryDelay)) if err := vc.Rollback(); err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go index cf665750705..6964090d2bf 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go @@ -125,9 +125,9 @@ func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map settings.StopPos = pausePos saveStop = false } - log.Infof("Starting VReplication player id: %v, name: %v, startPos: %v, stop: %v", vr.id, vr.WorkflowName, settings.StartPos, settings.StopPos) - log.V(2).Infof("Starting VReplication player id: %v, startPos: %v, stop: %v, filter: %+v", - vr.id, settings.StartPos, settings.StopPos, vr.source.Filter) + log.Info(fmt.Sprintf("Starting VReplication player id: %v, name: %v, startPos: %v, stop: %v", vr.id, vr.WorkflowName, settings.StartPos, settings.StopPos)) + log.Debug(fmt.Sprintf("Starting VReplication player id: %v, startPos: %v, stop: %v, filter: %+v", + vr.id, settings.StartPos, settings.StopPos, vr.source.Filter)) queryFunc := func(ctx context.Context, sql string) (*sqltypes.Result, error) { return vr.dbClient.ExecuteWithRetry(ctx, sql) } @@ -144,10 +144,10 @@ func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map // immediately so we use ExecuteFetch directly. res, err := vr.dbClient.ExecuteFetch(SqlMaxAllowedPacket, 1) if err != nil { - log.Errorf("Error getting max_allowed_packet, will use the relay-log-max-size value of %d bytes: %v", vr.workflowConfig.RelayLogMaxSize, err) + log.Error(fmt.Sprintf("Error getting max_allowed_packet, will use the relay-log-max-size value of %d bytes: %v", vr.workflowConfig.RelayLogMaxSize, err)) } else { if maxAllowedPacket, err = res.Rows[0][0].ToInt64(); err != nil { - log.Errorf("Error getting max_allowed_packet, will use the relay-log-max-size value of %d bytes: %v", vr.workflowConfig.RelayLogMaxSize, err) + log.Error(fmt.Sprintf("Error getting max_allowed_packet, will use the relay-log-max-size value of %d bytes: %v", vr.workflowConfig.RelayLogMaxSize, err)) } } // Leave 64 bytes of room for the commit to be sure that we have a more than @@ -187,7 +187,7 @@ func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map // play is the entry point for playing binlogs. func (vp *vplayer) play(ctx context.Context) error { if !vp.stopPos.IsZero() && vp.startPos.AtLeast(vp.stopPos) { - log.Infof("Stop position %v already reached: %v", vp.startPos, vp.stopPos) + log.Info(fmt.Sprintf("Stop position %v already reached: %v", vp.startPos, vp.stopPos)) if vp.saveStop { return vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, fmt.Sprintf("Stop position %v already reached: %v", vp.startPos, vp.stopPos)) } @@ -239,13 +239,13 @@ func (vp *vplayer) updateFKCheck(ctx context.Context, flags2 uint32) error { dbForeignKeyChecksEnabled == vp.foreignKeyChecksEnabled /* no change in the state, no need to update */ { return nil } - log.Infof("Setting this session's foreign_key_checks to %s", strconv.FormatBool(dbForeignKeyChecksEnabled)) + log.Info("Setting this session's foreign_key_checks to " + strconv.FormatBool(dbForeignKeyChecksEnabled)) if _, err := vp.query(ctx, "set @@session.foreign_key_checks="+strconv.FormatBool(dbForeignKeyChecksEnabled)); err != nil { return fmt.Errorf("failed to set session foreign_key_checks: %w", err) } vp.foreignKeyChecksEnabled = dbForeignKeyChecksEnabled if !vp.foreignKeyChecksStateInitialized { - log.Infof("First foreign_key_checks update to: %s", strconv.FormatBool(dbForeignKeyChecksEnabled)) + log.Info("First foreign_key_checks update to: " + strconv.FormatBool(dbForeignKeyChecksEnabled)) vp.foreignKeyChecksStateInitialized = true } return nil @@ -262,7 +262,7 @@ func (vp *vplayer) updateFKCheck(ctx context.Context, flags2 uint32) error { // one. This allows for the apply thread to catch up more quickly if // a backlog builds up. func (vp *vplayer) fetchAndApply(ctx context.Context) (err error) { - log.Infof("Starting VReplication player id: %v, name: %v, startPos: %v, stop: %v", vp.vr.id, vp.vr.WorkflowName, vp.startPos, vp.stopPos) + log.Info(fmt.Sprintf("Starting VReplication player id: %v, name: %v, startPos: %v, stop: %v", vp.vr.id, vp.vr.WorkflowName, vp.startPos, vp.stopPos)) ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -396,7 +396,7 @@ func (vp *vplayer) updatePos(ctx context.Context, ts int64) (posReached bool, er vp.vr.stats.SetLastPosition(vp.pos) posReached = !vp.stopPos.IsZero() && vp.pos.AtLeast(vp.stopPos) if posReached { - log.Infof("Stopped at position: %v", vp.stopPos) + log.Info(fmt.Sprintf("Stopped at position: %v", vp.stopPos)) if vp.saveStop { if err := vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, fmt.Sprintf("Stopped at position %v", vp.stopPos)); err != nil { return false, err @@ -565,7 +565,7 @@ func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog) error { if pos != "" { gtidLogMsg = " while processing position " + pos } - log.Errorf("Error applying event%s%s: %s", tableLogMsg, gtidLogMsg, err.Error()) + log.Error(fmt.Sprintf("Error applying event%s%s: %s", tableLogMsg, gtidLogMsg, err.Error())) err = vterrors.Wrapf(err, "error applying event%s%s", tableLogMsg, gtidLogMsg) } return err @@ -723,7 +723,7 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m return err } if err := vp.applyRowEvent(ctx, event.RowEvent); err != nil { - log.Infof("Error applying row event: %s", err.Error()) + log.Info("Error applying row event: " + err.Error()) return err } // Row event is logged AFTER RowChanges are applied so as to calculate the total elapsed @@ -734,7 +734,7 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m case binlogdatapb.VEventType_OTHER: if vp.vr.dbClient.InTransaction { // Unreachable - log.Errorf("internal error: vplayer is in a transaction on event: %v", event) + log.Error(fmt.Sprintf("internal error: vplayer is in a transaction on event: %v", event)) return fmt.Errorf("internal error: vplayer is in a transaction on event: %v", event) } // Just update the position. @@ -748,7 +748,7 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m case binlogdatapb.VEventType_DDL: if vp.vr.dbClient.InTransaction { // Unreachable - log.Errorf("internal error: vplayer is in a transaction on event: %v", event) + log.Error(fmt.Sprintf("internal error: vplayer is in a transaction on event: %v", event)) return fmt.Errorf("internal error: vplayer is in a transaction on event: %v", event) } vp.vr.stats.DDLEventActions.Add(vp.vr.source.OnDdl.String(), 1) // Record the DDL handling @@ -796,7 +796,7 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m } case binlogdatapb.OnDDLAction_EXEC_IGNORE: if _, err := vp.query(ctx, event.Statement); err != nil { - log.Infof("Ignoring error: %v for DDL: %s", err, event.Statement) + log.Info(fmt.Sprintf("Ignoring error: %v for DDL: %s", err, event.Statement)) } if stats != nil { stats.Send(event.Statement) @@ -812,7 +812,7 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m case binlogdatapb.VEventType_JOURNAL: if vp.vr.dbClient.InTransaction { // Unreachable - log.Errorf("internal error: vplayer is in a transaction on event: %v", event) + log.Error(fmt.Sprintf("internal error: vplayer is in a transaction on event: %v", event)) return fmt.Errorf("internal error: vplayer is in a transaction on event: %v", event) } // Ensure that we don't have a partial set of table matches in the journal. @@ -847,7 +847,7 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m } // All were found. We must register journal. } - log.Infof("Binlog event registering journal event %+v", event.Journal) + log.Info(fmt.Sprintf("Binlog event registering journal event %+v", event.Journal)) if err := vp.vr.vre.registerJournal(event.Journal, vp.vr.id); err != nil { if err := vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, err.Error()); err != nil { return err diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go index dc2b6f6bb16..d80233ec91f 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go @@ -19,6 +19,7 @@ package vreplication import ( "context" "fmt" + "log/slog" "math" "os" "regexp" @@ -35,7 +36,6 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" vttablet "vitess.io/vitess/go/vt/vttablet/common" "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer/testenv" @@ -43,6 +43,16 @@ import ( qh "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication/queryhistory" ) +func logMessages(handler *log.CaptureHandler) string { + records := handler.Records() + messages := make([]string, 0, len(records)) + for _, record := range records { + messages = append(messages, record.Message) + } + + return strings.Join(messages, "\n") +} + // TestPlayerGeneratedInvisiblePrimaryKey confirms that the gipk column is replicated by vplayer, both for target // tables that have a gipk column and those that make it visible. func TestPlayerGeneratedInvisiblePrimaryKey(t *testing.T) { @@ -586,12 +596,9 @@ func TestPlayerStatementModeWithFilterAndErrorHandling(t *testing.T) { defer deleteTablet(addTablet(100)) // We want to check for the expected log message. - ole := log.Errorf - logger := logutil.NewMemoryLogger() - log.Errorf = logger.Errorf - defer func() { - log.Errorf = ole - }() + handler := log.NewCaptureHandler() + restoreLogger := log.SetLogger(slog.New(handler)) + defer restoreLogger() execStatements(t, []string{ "create table src1(id int, val varbinary(128), primary key(id))", @@ -635,7 +642,7 @@ func TestPlayerStatementModeWithFilterAndErrorHandling(t *testing.T) { execStatements(t, input) expectDBClientQueries(t, output) - logs := logger.String() + logs := logMessages(handler) require.Regexp(t, expectedMsg, logs) } @@ -2049,8 +2056,7 @@ func TestPlayerDDL(t *testing.T) { pos2b := primaryPosition(t) execStatements(t, []string{"alter table t1 drop column val"}) pos2 := primaryPosition(t) - log.Errorf("Expected log:: TestPlayerDDL Positions are: before first alter %v, after first alter %v, before second alter %v, after second alter %v", - pos0, pos1, pos2b, pos2) // For debugging only: to check what are the positions when test works and if/when it fails + log.Error(fmt.Sprintf("Expected log:: TestPlayerDDL Positions are: before first alter %v, after first alter %v, before second alter %v, after second alter %v", pos0, pos1, pos2b, pos2)) // For debugging only: to check what are the positions when test works and if/when it fails // Restart vreplication if _, err := playerEngine.Exec(fmt.Sprintf(`update _vt.vreplication set state = 'Running', message='' where id=%d`, id)); err != nil { t.Fatal(err) @@ -3815,16 +3821,15 @@ func TestPlayerStalls(t *testing.T) { defer deleteTablet(addTablet(100)) // We want to check for the expected log messages. - ole := log.Errorf - logger := logutil.NewMemoryLogger() - log.Errorf = logger.Errorf + handler := log.NewCaptureHandler() + restoreLogger := log.SetLogger(slog.New(handler)) oldMinimumHeartbeatUpdateInterval := vreplicationMinimumHeartbeatUpdateInterval oldProgressDeadline := vplayerProgressDeadline oldRelayLogMaxItems := vttablet.DefaultVReplicationConfig.RelayLogMaxItems oldRetryDelay := vttablet.DefaultVReplicationConfig.RetryDelay defer func() { - log.Errorf = ole + restoreLogger() vreplicationMinimumHeartbeatUpdateInterval = oldMinimumHeartbeatUpdateInterval vplayerProgressDeadline = oldProgressDeadline vttablet.DefaultVReplicationConfig.RelayLogMaxItems = oldRelayLogMaxItems @@ -3892,7 +3897,7 @@ func TestPlayerStalls(t *testing.T) { postFunc: func() { time.Sleep(vplayerProgressDeadline) log.Flush() - require.Contains(t, logger.String(), relayLogIOStalledMsg, "expected log message not found") + require.Contains(t, logMessages(handler), relayLogIOStalledMsg, "expected log message not found") execStatements(t, []string{"set @@session.binlog_format='ROW'"}) }, }, @@ -3928,7 +3933,7 @@ func TestPlayerStalls(t *testing.T) { // Signal the preFunc goroutine to close the connection holding the row locks. done <- struct{}{} log.Flush() - require.Contains(t, logger.String(), failedToRecordHeartbeatMsg, "expected log message not found") + require.Contains(t, logMessages(handler), failedToRecordHeartbeatMsg, "expected log message not found") }, // Nothing should get replicated because of the exclusing row locks // held in the other connection from our preFunc. @@ -3949,7 +3954,7 @@ func TestPlayerStalls(t *testing.T) { if tc.postFunc != nil { tc.postFunc() } - logger.Clear() + handler.Reset() }) } } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go index 00a6b21824f..c63e63f9db5 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go @@ -148,8 +148,7 @@ func newVReplicator(id int32, source *binlogdatapb.BinlogSource, sourceVStreamer workflowConfig = vttablet.DefaultVReplicationConfig } if workflowConfig.HeartbeatUpdateInterval > vreplicationMinimumHeartbeatUpdateInterval { - log.Warningf("The supplied value for vreplication-heartbeat-update-interval:%d seconds is larger than the maximum allowed:%d seconds, vreplication will fallback to %d", - workflowConfig.HeartbeatUpdateInterval, vreplicationMinimumHeartbeatUpdateInterval, vreplicationMinimumHeartbeatUpdateInterval) + log.Warn(fmt.Sprintf("The supplied value for vreplication-heartbeat-update-interval:%d seconds is larger than the maximum allowed:%d seconds, vreplication will fallback to %d", workflowConfig.HeartbeatUpdateInterval, vreplicationMinimumHeartbeatUpdateInterval, vreplicationMinimumHeartbeatUpdateInterval)) } vttablet.InitVReplicationConfigDefaults() vr := &vreplicator{ @@ -293,16 +292,16 @@ func (vr *vreplicator) replicate(ctx context.Context) error { switch { case numTablesToCopy != 0: if err := vr.clearFKCheck(vr.dbClient); err != nil { - log.Warningf("Unable to clear FK check %v", err) + log.Warn(fmt.Sprintf("Unable to clear FK check %v", err)) return err } if err := vr.clearFKRestrict(vr.dbClient); err != nil { - log.Warningf("Unable to clear FK restrict %v", err) + log.Warn(fmt.Sprintf("Unable to clear FK restrict %v", err)) return err } if vr.WorkflowSubType == int32(binlogdatapb.VReplicationWorkflowSubType_AtomicCopy) { if err := newVCopier(vr).copyAll(ctx, settings); err != nil { - log.Infof("Error atomically copying all tables: %v", err) + log.Info(fmt.Sprintf("Error atomically copying all tables: %v", err)) vr.stats.ErrorCounts.Add([]string{"CopyAll"}, 1) return err } @@ -333,11 +332,11 @@ func (vr *vreplicator) replicate(ctx context.Context) error { } default: if err := vr.resetFKCheckAfterCopy(vr.dbClient); err != nil { - log.Warningf("Unable to reset FK check %v", err) + log.Warn(fmt.Sprintf("Unable to reset FK check %v", err)) return err } if err := vr.resetFKRestrictAfterCopy(vr.dbClient); err != nil { - log.Warningf("Unable to reset FK restrict %v", err) + log.Warn(fmt.Sprintf("Unable to reset FK restrict %v", err)) return err } if vr.source.StopAfterCopy { @@ -611,7 +610,7 @@ func (vr *vreplicator) setSQLMode(ctx context.Context, dbClient *vdbClient) (fun query := fmt.Sprintf(setSQLModeQueryf, vr.originalSQLMode) _, err := dbClient.Execute(query) if err != nil { - log.Warningf("Could not reset sql_mode on target using %s: %v", query, err) + log.Warn(fmt.Sprintf("Could not reset sql_mode on target using %s: %v", query, err)) } } vreplicationSQLMode := SQLMode @@ -787,8 +786,7 @@ func (vr *vreplicator) stashSecondaryKeys(ctx context.Context, tableName string) // READ-ONLY mode. dbClient, err := vr.newClientConnection(ctx) if err != nil { - log.Errorf("Unable to connect to the database when saving secondary keys for deferred creation on the %q table in the %q VReplication workflow: %v", - tableName, vr.WorkflowName, err) + log.Error(fmt.Sprintf("Unable to connect to the database when saving secondary keys for deferred creation on the %q table in the %q VReplication workflow: %v", tableName, vr.WorkflowName, err)) return vterrors.Wrap(err, "unable to connect to the database when saving secondary keys for deferred creation") } defer dbClient.Close() @@ -872,8 +870,7 @@ func (vr *vreplicator) execPostCopyActions(ctx context.Context, tableName string // mode. dbClient, err := vr.newClientConnection(ctx) if err != nil { - log.Errorf("Unable to connect to the database when executing post copy actions on the %q table in the %q VReplication workflow: %v", - tableName, vr.WorkflowName, err) + log.Error(fmt.Sprintf("Unable to connect to the database when executing post copy actions on the %q table in the %q VReplication workflow: %v", tableName, vr.WorkflowName, err)) return vterrors.Wrap(err, "unable to connect to the database when executing post copy actions") } defer dbClient.Close() @@ -956,11 +953,9 @@ func (vr *vreplicator) execPostCopyActions(ctx context.Context, tableName string select { // Only cancel an ongoing ALTER if the engine is closing. case <-vr.vre.ctx.Done(): - log.Infof("Copy of the %q table stopped when performing the following post copy action in the %q VReplication workflow: %+v", - tableName, vr.WorkflowName, action) + log.Info(fmt.Sprintf("Copy of the %q table stopped when performing the following post copy action in the %q VReplication workflow: %+v", tableName, vr.WorkflowName, action)) if err := killAction(action); err != nil { - log.Errorf("Failed to kill post copy action on the %q table in the %q VReplication workflow: %v", - tableName, vr.WorkflowName, err) + log.Error(fmt.Sprintf("Failed to kill post copy action on the %q table in the %q VReplication workflow: %v", tableName, vr.WorkflowName, err)) } return case <-done: @@ -1052,8 +1047,7 @@ func (vr *vreplicator) execPostCopyActions(ctx context.Context, tableName string switch action.Type { case PostCopyActionSQL: - log.Infof("Executing post copy SQL action on the %q table in the %q VReplication workflow: %s", - tableName, vr.WorkflowName, action.Task) + log.Info(fmt.Sprintf("Executing post copy SQL action on the %q table in the %q VReplication workflow: %s", tableName, vr.WorkflowName, action.Task)) // This will return an io.EOF / MySQL CRServerLost (errno 2013) // error if it is killed by the monitoring goroutine. if _, err := dbClient.ExecuteFetch(action.Task, -1); err != nil { @@ -1156,9 +1150,9 @@ func (vr *vreplicator) setExistingRowsCopied() { if vr.stats.CopyRowCount.Get() == 0 { rowsCopiedExisting, err := vr.readExistingRowsCopied(vr.id) if err != nil { - log.Warningf("Failed to read existing rows copied value for %s workflow: %v", vr.WorkflowName, err) + log.Warn(fmt.Sprintf("Failed to read existing rows copied value for %s workflow: %v", vr.WorkflowName, err)) } else if rowsCopiedExisting != 0 { - log.Infof("Resuming the %s vreplication workflow started on another tablet, setting rows copied counter to %v", vr.WorkflowName, rowsCopiedExisting) + log.Info(fmt.Sprintf("Resuming the %s vreplication workflow started on another tablet, setting rows copied counter to %v", vr.WorkflowName, rowsCopiedExisting)) vr.stats.CopyRowCount.Set(rowsCopiedExisting) } } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vrlog.go b/go/vt/vttablet/tabletmanager/vreplication/vrlog.go index 42b8f86a60b..d65b81b00da 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vrlog.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vrlog.go @@ -21,6 +21,7 @@ limitations under the License. package vreplication import ( + "fmt" "net/http" "strconv" "sync" @@ -88,7 +89,7 @@ func vrlogStatsHandler(ch chan *VrLogStats, w http.ResponseWriter, r *http.Reque default: } if err := vrLogStatsTemplate.Execute(w, stats); err != nil { - log.Errorf("vrlog: couldn't execute template: %v", err) + log.Error(fmt.Sprintf("vrlog: couldn't execute template: %v", err)) } if f, ok := w.(http.Flusher); ok { f.Flush() diff --git a/go/vt/vttablet/tabletserver/binlog_watcher.go b/go/vt/vttablet/tabletserver/binlog_watcher.go index 80ac1194c7e..ca143e54c06 100644 --- a/go/vt/vttablet/tabletserver/binlog_watcher.go +++ b/go/vt/vttablet/tabletserver/binlog_watcher.go @@ -18,6 +18,7 @@ package tabletserver import ( "context" + "fmt" "sync" "time" @@ -96,7 +97,7 @@ func (blw *BinlogWatcher) process(ctx context.Context) { err := blw.vs.Stream(ctx, "current", nil, filter, throttlerapp.BinlogWatcherName, func(events []*binlogdatapb.VEvent) error { return nil }, nil) - log.Infof("ReplicationWatcher VStream ended: %v, retrying in 5 seconds", err) + log.Info(fmt.Sprintf("ReplicationWatcher VStream ended: %v, retrying in 5 seconds", err)) select { case <-ctx.Done(): return diff --git a/go/vt/vttablet/tabletserver/connpool/dbconn.go b/go/vt/vttablet/tabletserver/connpool/dbconn.go index ea5308fe90f..faeb1bb91ff 100644 --- a/go/vt/vttablet/tabletserver/connpool/dbconn.go +++ b/go/vt/vttablet/tabletserver/connpool/dbconn.go @@ -456,7 +456,7 @@ func (dbc *Conn) KillQuery(reason string, elapsed time.Duration) error { // vttablet. func (dbc *Conn) kill(ctx context.Context, reason string, elapsed time.Duration) error { dbc.stats.KillCounters.Add("Connections", 1) - log.Infof("Due to %s, elapsed time: %v, killing connection ID %v %s", reason, elapsed, dbc.conn.ID(), dbc.CurrentForLogging()) + log.Info(fmt.Sprintf("Due to %s, elapsed time: %v, killing connection ID %v %s", reason, elapsed, dbc.conn.ID(), dbc.CurrentForLogging())) // Client side action. Set error and close connection. dbc.errmu.Lock() @@ -467,7 +467,7 @@ func (dbc *Conn) kill(ctx context.Context, reason string, elapsed time.Duration) // Server side action. Kill the session. killConn, err := dbc.dbaPool.Get(ctx) if err != nil { - log.Warningf("Failed to get conn from dba pool: %v", err) + log.Warn(fmt.Sprintf("Failed to get conn from dba pool: %v", err)) return err } defer killConn.Recycle() @@ -485,11 +485,11 @@ func (dbc *Conn) kill(ctx context.Context, reason string, elapsed time.Duration) killConn.Close() dbc.stats.InternalErrors.Add("HungConnection", 1) - log.Warningf("Failed to kill MySQL connection ID %d which was executing the following query, it may be hung: %s", dbc.conn.ID(), dbc.CurrentForLogging()) + log.Warn(fmt.Sprintf("Failed to kill MySQL connection ID %d which was executing the following query, it may be hung: %s", dbc.conn.ID(), dbc.CurrentForLogging())) return context.Cause(ctx) case err := <-ch: if err != nil { - log.Errorf("Could not kill connection ID %v %s: %v", dbc.conn.ID(), dbc.CurrentForLogging(), err) + log.Error(fmt.Sprintf("Could not kill connection ID %v %s: %v", dbc.conn.ID(), dbc.CurrentForLogging(), err)) return err } return nil @@ -500,7 +500,7 @@ func (dbc *Conn) kill(ctx context.Context, reason string, elapsed time.Duration) // and on the connection side. func (dbc *Conn) killQuery(ctx context.Context, reason string, elapsed time.Duration) error { dbc.stats.KillCounters.Add("Queries", 1) - log.Infof("Due to %s, elapsed time: %v, killing query ID %v %s", reason, elapsed, dbc.conn.ID(), dbc.CurrentForLogging()) + log.Info(fmt.Sprintf("Due to %s, elapsed time: %v, killing query ID %v %s", reason, elapsed, dbc.conn.ID(), dbc.CurrentForLogging())) // Client side action. Set error for killing the query on timeout. dbc.errmu.Lock() @@ -510,7 +510,7 @@ func (dbc *Conn) killQuery(ctx context.Context, reason string, elapsed time.Dura // Server side action. Kill the executing query. killConn, err := dbc.dbaPool.Get(ctx) if err != nil { - log.Warningf("Failed to get conn from dba pool: %v", err) + log.Warn(fmt.Sprintf("Failed to get conn from dba pool: %v", err)) return err } defer killConn.Recycle() @@ -528,11 +528,11 @@ func (dbc *Conn) killQuery(ctx context.Context, reason string, elapsed time.Dura killConn.Close() dbc.stats.InternalErrors.Add("HungQuery", 1) - log.Warningf("Failed to kill MySQL query ID %d which was executing the following query, it may be hung: %s", dbc.conn.ID(), dbc.CurrentForLogging()) + log.Warn(fmt.Sprintf("Failed to kill MySQL query ID %d which was executing the following query, it may be hung: %s", dbc.conn.ID(), dbc.CurrentForLogging())) return context.Cause(ctx) case err := <-ch: if err != nil { - log.Errorf("Could not kill query ID %v %s: %v", dbc.conn.ID(), dbc.CurrentForLogging(), err) + log.Error(fmt.Sprintf("Could not kill query ID %v %s: %v", dbc.conn.ID(), dbc.CurrentForLogging(), err)) return err } return nil diff --git a/go/vt/vttablet/tabletserver/debug_2pc.go b/go/vt/vttablet/tabletserver/debug_2pc.go index 5db72be0fba..548cd1b046a 100644 --- a/go/vt/vttablet/tabletserver/debug_2pc.go +++ b/go/vt/vttablet/tabletserver/debug_2pc.go @@ -20,6 +20,7 @@ package tabletserver import ( "context" + "fmt" "os" "path" "strconv" @@ -46,7 +47,7 @@ func commitPreparedDelayForTest(tsv *TabletServer) { if tsv.sm.target.Shard == sh { delay := readFileForTestSynchronization("VT_DELAY_COMMIT_TIME") delVal, _ := strconv.Atoi(delay) - log.Infof("Delaying commit for shard %v for %d seconds", sh, delVal) + log.Info(fmt.Sprintf("Delaying commit for shard %v for %d seconds", sh, delVal)) time.Sleep(time.Duration(delVal) * time.Second) } } diff --git a/go/vt/vttablet/tabletserver/debugenv.go b/go/vt/vttablet/tabletserver/debugenv.go index 6f1ea854ea9..c3e79f106c4 100644 --- a/go/vt/vttablet/tabletserver/debugenv.go +++ b/go/vt/vttablet/tabletserver/debugenv.go @@ -232,7 +232,7 @@ func respondWithHTML(w http.ResponseWriter, vars []envValue, msg string) { w.Write(debugEnvHeader) for _, v := range vars { if err := debugEnvRow.Execute(w, v); err != nil { - log.Errorf("debugenv: couldn't execute template: %v", err) + log.Error(fmt.Sprintf("debugenv: couldn't execute template: %v", err)) } } w.Write(endTable) diff --git a/go/vt/vttablet/tabletserver/dt_executor.go b/go/vt/vttablet/tabletserver/dt_executor.go index 04a6f7f23db..dfc6f5dafb6 100644 --- a/go/vt/vttablet/tabletserver/dt_executor.go +++ b/go/vt/vttablet/tabletserver/dt_executor.go @@ -18,6 +18,7 @@ package tabletserver import ( "context" + "fmt" "time" "vitess.io/vitess/go/trace" @@ -160,7 +161,7 @@ func (dte *DTExecutor) CommitPrepared(dtid string) (err error) { ctx := trace.CopySpan(context.Background(), dte.ctx) defer func() { if err != nil { - log.Warningf("failed to commit the prepared transaction '%s' with error: %v", dtid, err) + log.Warn(fmt.Sprintf("failed to commit the prepared transaction '%s' with error: %v", dtid, err)) fail := dte.te.checkErrorAndMarkFailed(ctx, dtid, err, "TwopcCommit") if fail { dte.te.env.Stats().CommitPreparedFail.Add("NonRetryable", 1) @@ -172,7 +173,7 @@ func (dte *DTExecutor) CommitPrepared(dtid string) (err error) { }() if DebugTwoPc { if err := checkTestFailure(dte.ctx, dte.shardFunc()); err != nil { - log.Errorf("failing test on commit prepared: %v", err) + log.Error(fmt.Sprintf("failing test on commit prepared: %v", err)) return err } } @@ -282,7 +283,7 @@ func (dte *DTExecutor) SetRollback(dtid string, transactionID int64) error { dte.te.Rollback(dte.ctx, transactionID) } else { // This is a warning because it should not happen in normal operation. - log.Warningf("SetRollback called with no transactionID for dtid %s", dtid) + log.Warn("SetRollback called with no transactionID for dtid " + dtid) } return dte.inTransaction(func(conn *StatefulConnection) error { diff --git a/go/vt/vttablet/tabletserver/gc/tablegc.go b/go/vt/vttablet/tabletserver/gc/tablegc.go index 2825c2ed757..cc4f72560be 100644 --- a/go/vt/vttablet/tabletserver/gc/tablegc.go +++ b/go/vt/vttablet/tabletserver/gc/tablegc.go @@ -205,7 +205,7 @@ func (collector *TableGC) Open() (err error) { if err != nil { return err } - log.Infof("TableGC: MySQL version=%v, lifecycleStates=%v", conn.ServerVersion, collector.lifecycleStates) + log.Info(fmt.Sprintf("TableGC: MySQL version=%v, lifecycleStates=%v", conn.ServerVersion, collector.lifecycleStates)) ctx := context.Background() ctx, collector.cancelOperation = context.WithCancel(ctx) @@ -254,12 +254,12 @@ func adjustLifecycleForFastDrops(conn capabilityConn, lifecycleStates map[schema // Close frees resources func (collector *TableGC) Close() { - log.Infof("TableGC - started execution of Close. Acquiring initMutex lock") + log.Info("TableGC - started execution of Close. Acquiring initMutex lock") collector.stateMutex.Lock() defer collector.stateMutex.Unlock() - log.Infof("TableGC - acquired lock") + log.Info("TableGC - acquired lock") if collector.isOpen == 0 { - log.Infof("TableGC - no collector is open") + log.Info("TableGC - no collector is open") // not open return } @@ -268,10 +268,10 @@ func (collector *TableGC) Close() { if collector.cancelOperation != nil { collector.cancelOperation() } - log.Infof("TableGC - closing pool") + log.Info("TableGC - closing pool") collector.pool.Close() atomic.StoreInt64(&collector.isOpen, 0) - log.Infof("TableGC - finished execution of Close") + log.Info("TableGC - finished execution of Close") } // RequestChecks requests that the GC will do a table check right away, as well as in a few seconds. @@ -317,7 +317,7 @@ func (collector *TableGC) operate(ctx context.Context) { go tableCheckTicker.TickNow() case <-tableCheckTicker.C: if err := collector.readAndCheckTables(ctx, dropTablesChan, transitionRequestsChan); err != nil { - log.Error(err) + log.Error(fmt.Sprint(err)) } case <-purgeReentranceTicker.C: // relay the request @@ -326,7 +326,7 @@ func (collector *TableGC) operate(ctx context.Context) { go func() { tableName, err := collector.purge(ctx) if err != nil { - log.Errorf("TableGC: error purging table %s: %+v", tableName, err) + log.Error(fmt.Sprintf("TableGC: error purging table %s: %+v", tableName, err)) return } if tableName == "" { @@ -343,14 +343,14 @@ func (collector *TableGC) operate(ctx context.Context) { purgeReentranceTicker.TickAfter(nextPurgeReentry) }() case dropTable := <-dropTablesChan: - log.Infof("TableGC: found %v in dropTablesChan", dropTable.tableName) + log.Info(fmt.Sprintf("TableGC: found %v in dropTablesChan", dropTable.tableName)) if err := collector.dropTable(ctx, dropTable.tableName, dropTable.isBaseTable); err != nil { - log.Errorf("TableGC: error dropping table %s: %+v", dropTable.tableName, err) + log.Error(fmt.Sprintf("TableGC: error dropping table %s: %+v", dropTable.tableName, err)) } case transition := <-transitionRequestsChan: - log.Info("TableGC: transitionRequestsChan, transition=%v", transition) + log.Info(fmt.Sprintf("TableGC: transitionRequestsChan, transition=%v", transition)) if err := collector.transitionTable(ctx, transition); err != nil { - log.Errorf("TableGC: error transitioning table %s to %+v: %+v", transition.fromTableName, transition.toGCState, err) + log.Error(fmt.Sprintf("TableGC: error transitioning table %s to %+v: %+v", transition.fromTableName, transition.toGCState, err)) } } } @@ -395,7 +395,7 @@ func (collector *TableGC) generateTansition(ctx context.Context, fromState schem // submitTransitionRequest generates and queues a transition request for a given table func (collector *TableGC) submitTransitionRequest(ctx context.Context, transitionRequestsChan chan<- *transitionRequest, fromState schema.TableGCState, fromTableName string, isBaseTable bool, uuid string) { - log.Infof("TableGC: submitting transition request for %s", fromTableName) + log.Info("TableGC: submitting transition request for " + fromTableName) go func() { transition := collector.generateTansition(ctx, fromState, fromTableName, isBaseTable, uuid) if transition != nil { @@ -482,7 +482,7 @@ func (collector *TableGC) checkTables(ctx context.Context, gcTables []*gcTable, table := gcTables[i] // we capture as local variable as we will later use this in a goroutine shouldTransition, state, uuid, err := collector.shouldTransitionTable(table.tableName) if err != nil { - log.Errorf("TableGC: error while checking tables: %+v", err) + log.Error(fmt.Sprintf("TableGC: error while checking tables: %+v", err)) continue } if !shouldTransition { @@ -490,7 +490,7 @@ func (collector *TableGC) checkTables(ctx context.Context, gcTables []*gcTable, continue } - log.Infof("TableGC: will operate on table %s", table.tableName) + log.Info("TableGC: will operate on table " + table.tableName) if state == schema.HoldTableGCState { // Hold period expired. Moving to next state @@ -578,13 +578,13 @@ func (collector *TableGC) purge(ctx context.Context) (tableName string, err erro defer func() { if sqlLogBinDisabled && !conn.IsClosed() { if _, err := conn.ExecuteFetch("SET sql_log_bin = ON", 0, false); err != nil { - log.Errorf("TableGC: error setting sql_log_bin = ON: %+v", err) + log.Error(fmt.Sprintf("TableGC: error setting sql_log_bin = ON: %+v", err)) // a followup defer() will run conn.Close() at any case. } } }() - log.Infof("TableGC: purge begin for %s", tableName) + log.Info("TableGC: purge begin for " + tableName) for { if ctx.Err() != nil { // cancelled @@ -602,7 +602,7 @@ func (collector *TableGC) purge(ctx context.Context) (tableName string, err erro return tableName, err } if res.RowsAffected == 0 { - log.Infof("TableGC: purge complete for %s", tableName) + log.Info("TableGC: purge complete for " + tableName) return tableName, nil } } @@ -623,12 +623,12 @@ func (collector *TableGC) dropTable(ctx context.Context, tableName string, isBas } parsed := sqlparser.BuildParsedQuery(sqlDrop, tableName) - log.Infof("TableGC: dropping table: %s", tableName) + log.Info("TableGC: dropping table: " + tableName) _, err = conn.Conn.ExecuteFetch(parsed.Query, 1, false) if err != nil { return err } - log.Infof("TableGC: dropped table: %s, isBaseTable: %v", tableName, isBaseTable) + log.Info(fmt.Sprintf("TableGC: dropped table: %s, isBaseTable: %v", tableName, isBaseTable)) return nil } @@ -660,12 +660,12 @@ func (collector *TableGC) transitionTable(ctx context.Context, transition *trans return err } - log.Infof("TableGC: renaming table: %s to %s", transition.fromTableName, toTableName) + log.Info(fmt.Sprintf("TableGC: renaming table: %s to %s", transition.fromTableName, toTableName)) _, err = conn.Conn.Exec(ctx, renameStatement, 1, true) if err != nil { return err } - log.Infof("TableGC: renamed table: %s", transition.fromTableName) + log.Info("TableGC: renamed table: " + transition.fromTableName) // Since the table has transitioned, there is a potential for more work on this table or on other tables, // let's kick a check request. collector.RequestChecks() diff --git a/go/vt/vttablet/tabletserver/health_streamer.go b/go/vt/vttablet/tabletserver/health_streamer.go index 2492b59aaff..9cb185cd436 100644 --- a/go/vt/vttablet/tabletserver/health_streamer.go +++ b/go/vt/vttablet/tabletserver/health_streamer.go @@ -239,7 +239,7 @@ func (hs *healthStreamer) broadCastToClients(shr *querypb.StreamHealthResponse) // when there hasn't been an update and/or move away from using channels toward a model where // old updates can be purged from the buffer in favor of more recent updates (since only the // most recent health state really matters to gates). - log.Warning("A streaming health buffer is full. Closing the channel") + log.Warn("A streaming health buffer is full. Closing the channel") close(ch) delete(hs.clients, ch) } @@ -303,7 +303,7 @@ func (hs *healthStreamer) MakePrimary(serving bool) { if serving && hs.signalWhenSchemaChange { hs.se.RegisterNotifier("healthStreamer", func(full map[string]*schema.Table, created, altered, dropped []*schema.Table, udfsChanged bool) { if err := hs.reload(created, altered, dropped, udfsChanged); err != nil { - log.Errorf("periodic schema reload failed in health stream: %v", err) + log.Error(fmt.Sprintf("periodic schema reload failed in health stream: %v", err)) } }, false) } diff --git a/go/vt/vttablet/tabletserver/livequeryz.go b/go/vt/vttablet/tabletserver/livequeryz.go index 49aff0c163a..244313a349d 100644 --- a/go/vt/vttablet/tabletserver/livequeryz.go +++ b/go/vt/vttablet/tabletserver/livequeryz.go @@ -84,7 +84,7 @@ func livequeryzHandler(queryLists []*QueryList, w http.ResponseWriter, r *http.R w.Write(livequeryzHeader) for i := range rows { if err := livequeryzTmpl.Execute(w, rows[i]); err != nil { - log.Errorf("livequeryz: couldn't execute template: %v", err) + log.Error(fmt.Sprintf("livequeryz: couldn't execute template: %v", err)) } } } diff --git a/go/vt/vttablet/tabletserver/messager/cache.go b/go/vt/vttablet/tabletserver/messager/cache.go index d793170ec9f..8940e00223c 100644 --- a/go/vt/vttablet/tabletserver/messager/cache.go +++ b/go/vt/vttablet/tabletserver/messager/cache.go @@ -113,14 +113,14 @@ func (mc *cache) IsEmpty() bool { // Clear clears the cache. func (mc *cache) Clear() { - log.Infof("messager cache - Clearing cache. Acquiring my lock") + log.Info("messager cache - Clearing cache. Acquiring my lock") mc.mu.Lock() - log.Infof("messager cache - acquired lock") + log.Info("messager cache - acquired lock") defer mc.mu.Unlock() mc.sendQueue = nil mc.inQueue = make(map[string]*MessageRow) mc.inFlight = make(map[string]bool) - log.Infof("messager cache - cache cleared") + log.Info("messager cache - cache cleared") } // Add adds a MessageRow to the cache. It returns diff --git a/go/vt/vttablet/tabletserver/messager/engine.go b/go/vt/vttablet/tabletserver/messager/engine.go index 9d8b09e819a..caae820f295 100644 --- a/go/vt/vttablet/tabletserver/messager/engine.go +++ b/go/vt/vttablet/tabletserver/messager/engine.go @@ -18,6 +18,7 @@ package messager import ( "context" + "fmt" "sync" "golang.org/x/sync/semaphore" @@ -94,18 +95,18 @@ func (me *Engine) Open() { // Close closes the Engine service. func (me *Engine) Close() { - log.Infof("messager Engine - started execution of Close. Acquiring mu lock") + log.Info("messager Engine - started execution of Close. Acquiring mu lock") me.mu.Lock() - log.Infof("messager Engine - acquired mu lock") + log.Info("messager Engine - acquired mu lock") defer me.mu.Unlock() if !me.isOpen { - log.Infof("messager Engine is not open") + log.Info("messager Engine is not open") return } me.isOpen = false - log.Infof("messager Engine - unregistering notifiers") + log.Info("messager Engine - unregistering notifiers") me.se.UnregisterNotifier("messages") - log.Infof("messager Engine - closing all managers") + log.Info("messager Engine - closing all managers") me.managersMu.Lock() defer me.managersMu.Unlock() for _, mm := range me.managers { @@ -156,7 +157,7 @@ func (me *Engine) schemaChanged(tables map[string]*schema.Table, created, altere if mm == nil { continue } - log.Infof("Stopping messager for dropped/updated table: %v", name) + log.Info(fmt.Sprintf("Stopping messager for dropped/updated table: %v", name)) mm.Close() delete(me.managers, name) } @@ -168,12 +169,12 @@ func (me *Engine) schemaChanged(tables map[string]*schema.Table, created, altere } if me.managers[name] != nil { me.tsv.Stats().InternalErrors.Add("Messages", 1) - log.Errorf("Newly created table already exists in messages: %s", name) + log.Error("Newly created table already exists in messages: " + name) continue } mm := newMessageManager(me.tsv, me.vs, t, me.postponeSema) me.managers[name] = mm - log.Infof("Starting messager for table: %v", name) + log.Info(fmt.Sprintf("Starting messager for table: %v", name)) mm.Open() } } diff --git a/go/vt/vttablet/tabletserver/messager/message_manager.go b/go/vt/vttablet/tabletserver/messager/message_manager.go index 6abee0ac9ce..edfd7d4f5ca 100644 --- a/go/vt/vttablet/tabletserver/messager/message_manager.go +++ b/go/vt/vttablet/tabletserver/messager/message_manager.go @@ -369,37 +369,37 @@ func (mm *messageManager) Open() { // Close stops the messageManager service. func (mm *messageManager) Close() { - log.Infof("messageManager (%v) - started execution of Close", mm.name) + log.Info(fmt.Sprintf("messageManager (%v) - started execution of Close", mm.name)) mm.pollerTicks.Stop() mm.purgeTicks.Stop() - log.Infof("messageManager (%v) - stopped the ticks. Acquiring mu Lock", mm.name) + log.Info(fmt.Sprintf("messageManager (%v) - stopped the ticks. Acquiring mu Lock", mm.name)) mm.mu.Lock() - log.Infof("messageManager (%v) - acquired mu Lock", mm.name) + log.Info(fmt.Sprintf("messageManager (%v) - acquired mu Lock", mm.name)) if !mm.isOpen { - log.Infof("messageManager (%v) - manager is not open", mm.name) + log.Info(fmt.Sprintf("messageManager (%v) - manager is not open", mm.name)) mm.mu.Unlock() return } mm.isOpen = false - log.Infof("messageManager (%v) - cancelling all receivers", mm.name) + log.Info(fmt.Sprintf("messageManager (%v) - cancelling all receivers", mm.name)) for _, rcvr := range mm.receivers { rcvr.receiver.cancel() } mm.receivers = nil MessageStats.Set([]string{mm.name.String(), "ClientCount"}, 0) - log.Infof("messageManager (%v) - clearing cache", mm.name) + log.Info(fmt.Sprintf("messageManager (%v) - clearing cache", mm.name)) mm.cache.Clear() - log.Infof("messageManager (%v) - sending a broadcast", mm.name) + log.Info(fmt.Sprintf("messageManager (%v) - sending a broadcast", mm.name)) // This broadcast will cause runSend to exit. mm.cond.Broadcast() - log.Infof("messageManager (%v) - stopping VStream", mm.name) + log.Info(fmt.Sprintf("messageManager (%v) - stopping VStream", mm.name)) mm.stopVStream() mm.mu.Unlock() - log.Infof("messageManager (%v) - Waiting for the wait group", mm.name) + log.Info(fmt.Sprintf("messageManager (%v) - Waiting for the wait group", mm.name)) mm.wg.Wait() - log.Infof("messageManager (%v) - closed", mm.name) + log.Info(fmt.Sprintf("messageManager (%v) - closed", mm.name)) } // Subscribe registers the send function as a receiver of messages @@ -417,7 +417,7 @@ func (mm *messageManager) Subscribe(ctx context.Context, send func(*sqltypes.Res } if err := receiver.Send(mm.fieldResult); err != nil { - log.Errorf("messageManager (%v) - Terminating connection due to error sending field info: %v", mm.name, err) + log.Error(fmt.Sprintf("messageManager (%v) - Terminating connection due to error sending field info: %v", mm.name, err)) receiver.cancel() return done } @@ -581,7 +581,7 @@ func (mm *messageManager) runSend() { go func() { err := mm.send(context.Background(), receiver, &sqltypes.Result{Rows: rows}) // calls the offsetting mm.wg.Done() if err != nil { - log.Errorf("messageManager (%v) - send failed: %v", mm.name, err) + log.Error(fmt.Sprintf("messageManager (%v) - send failed: %v", mm.name, err)) } }() } @@ -624,7 +624,7 @@ func (mm *messageManager) send(ctx context.Context, receiver *receiverWithStatus // Log the error, but we still want to postpone the message. // Otherwise, if this is a chronic failure like "message too // big", we'll end up spamming non-stop. - log.Errorf("messageManager (%v) - Error sending messages: %v: %v", mm.name, qr, err) + log.Error(fmt.Sprintf("messageManager (%v) - Error sending messages: %v: %v", mm.name, qr, err)) } return mm.postpone(ctx, mm.tsv, mm.ackWaitTime, ids) } @@ -655,7 +655,7 @@ func (mm *messageManager) startVStream() { } func (mm *messageManager) stopVStream() { - log.Infof("messageManager (%v) - calling stream cancel", mm.name) + log.Info(fmt.Sprintf("messageManager (%v) - calling stream cancel", mm.name)) if mm.streamCancel != nil { mm.streamCancel() mm.streamCancel = nil @@ -667,12 +667,12 @@ func (mm *messageManager) runVStream(ctx context.Context) { err := mm.runOneVStream(ctx) select { case <-ctx.Done(): - log.Info("messageManager (%v) - Context canceled, exiting vstream", mm.name) + log.Info(fmt.Sprintf("messageManager (%v) - Context canceled, exiting vstream", mm.name)) return default: } MessageStats.Add([]string{mm.name.String(), "VStreamFailed"}, 1) - log.Infof("messageManager (%v) - VStream ended: %v, retrying in 5 seconds", mm.name, err) + log.Info(fmt.Sprintf("messageManager (%v) - VStream ended: %v, retrying in 5 seconds", mm.name, err)) time.Sleep(5 * time.Second) } } @@ -818,7 +818,7 @@ func (mm *messageManager) runPoller() { mr, err := BuildMessageRow(row) if err != nil { mm.tsv.Stats().InternalErrors.Add("Messages", 1) - log.Errorf("messageManager (%v) - Error reading message row: %v", mm.name, err) + log.Error(fmt.Sprintf("messageManager (%v) - Error reading message row: %v", mm.name, err)) continue } if !mm.cache.Add(mr) { @@ -839,7 +839,7 @@ func (mm *messageManager) runPurge() { count, err := mm.tsv.PurgeMessages(ctx, nil, mm, time.Now().Add(-mm.purgeAfter).UnixNano()) if err != nil { MessageStats.Add([]string{mm.name.String(), "PurgeFailed"}, 1) - log.Errorf("messageManager (%v) - Unable to delete messages: %v", mm.name, err) + log.Error(fmt.Sprintf("messageManager (%v) - Unable to delete messages: %v", mm.name, err)) } else { MessageStats.Add([]string{mm.name.String(), "Purged"}, count) } @@ -942,7 +942,7 @@ func (mm *messageManager) readPending(ctx context.Context, bindVars map[string]* query, err := mm.readByPriorityAndTimeNext.GenerateQuery(bindVars, nil) if err != nil { mm.tsv.Stats().InternalErrors.Add("Messages", 1) - log.Errorf("messageManager (%v) - Error reading rows from message table: %v", mm.name, err) + log.Error(fmt.Sprintf("messageManager (%v) - Error reading rows from message table: %v", mm.name, err)) return nil, err } qr := &sqltypes.Result{} diff --git a/go/vt/vttablet/tabletserver/query_engine.go b/go/vt/vttablet/tabletserver/query_engine.go index d86acd6f97b..3896a0986d2 100644 --- a/go/vt/vttablet/tabletserver/query_engine.go +++ b/go/vt/vttablet/tabletserver/query_engine.go @@ -237,8 +237,7 @@ func NewQueryEngine(env tabletenv.Env, se *schema.Engine) *QueryEngine { qe.consolidatorMode.Store(config.Consolidator) qe.consolidator = sync2.NewConsolidator() if config.ConsolidatorStreamTotalSize > 0 && config.ConsolidatorStreamQuerySize > 0 { - log.Infof("Stream consolidator is enabled with query size set to %d and total size set to %d.", - config.ConsolidatorStreamQuerySize, config.ConsolidatorStreamTotalSize) + log.Info(fmt.Sprintf("Stream consolidator is enabled with query size set to %d and total size set to %d.", config.ConsolidatorStreamQuerySize, config.ConsolidatorStreamTotalSize)) qe.streamConsolidator = NewStreamConsolidator(config.ConsolidatorStreamTotalSize, config.ConsolidatorStreamQuerySize, returnStreamResult) } else { log.Info("Stream consolidator is not enabled.") @@ -253,13 +252,13 @@ func NewQueryEngine(env tabletenv.Env, se *schema.Engine) *QueryEngine { if config.TableACLExemptACL != "" { if f, err := tableacl.GetCurrentACLFactory(); err == nil { if exemptACL, err := f.New([]string{config.TableACLExemptACL}); err == nil { - log.Infof("Setting Table ACL exempt rule for %v", config.TableACLExemptACL) + log.Info(fmt.Sprintf("Setting Table ACL exempt rule for %v", config.TableACLExemptACL)) qe.exemptACL = exemptACL } else { - log.Infof("Cannot build exempt ACL for table ACL: %v", err) + log.Info(fmt.Sprintf("Cannot build exempt ACL for table ACL: %v", err)) } } else { - log.Infof("Cannot get current ACL Factory: %v", err) + log.Info(fmt.Sprintf("Cannot get current ACL Factory: %v", err)) } } diff --git a/go/vt/vttablet/tabletserver/query_executor.go b/go/vt/vttablet/tabletserver/query_executor.go index c18f36ab8b4..68a523b633d 100644 --- a/go/vt/vttablet/tabletserver/query_executor.go +++ b/go/vt/vttablet/tabletserver/query_executor.go @@ -634,7 +634,7 @@ func (qre *QueryExecutor) execDDL(conn *StatefulConnection) (result *sqltypes.Re // after every DDL, let them be outdated until the periodic // schema reload fixes it. if err := qre.tsv.se.ReloadAtEx(qre.ctx, replication.Position{}, false); err != nil { - log.Errorf("failed to reload schema %v", err) + log.Error(fmt.Sprintf("failed to reload schema %v", err)) } }() } @@ -694,7 +694,7 @@ func (qre *QueryExecutor) execNextval() (*sqltypes.Result, error) { // Someone reset the id underneath us. if t.SequenceInfo.LastVal != nextID { if nextID < t.SequenceInfo.LastVal { - log.Warningf("Sequence next ID value %v is below the currently cached max %v, updating it to max", nextID, t.SequenceInfo.LastVal) + log.Warn(fmt.Sprintf("Sequence next ID value %v is below the currently cached max %v, updating it to max", nextID, t.SequenceInfo.LastVal)) nextID = t.SequenceInfo.LastVal } t.SequenceInfo.NextVal = nextID @@ -815,7 +815,7 @@ func (qre *QueryExecutor) verifyRowCount(count, maxrows int64) error { if warnThreshold > 0 && count > warnThreshold { callerID := callerid.ImmediateCallerIDFromContext(qre.ctx) qre.tsv.Stats().Warnings.Add("ResultsExceeded", 1) - log.Warningf("caller id: %s row count %v exceeds warning threshold %v: %q", callerID.Username, count, warnThreshold, queryAsString(qre.plan.FullQuery.Query, qre.bindVars, qre.tsv.Config().SanitizeLogMessages, true, qre.tsv.env.Parser())) + log.Warn(fmt.Sprintf("caller id: %s row count %v exceeds warning threshold %v: %q", callerID.Username, count, warnThreshold, queryAsString(qre.plan.FullQuery.Query, qre.bindVars, qre.tsv.Config().SanitizeLogMessages, true, qre.tsv.env.Parser()))) } return nil } diff --git a/go/vt/vttablet/tabletserver/query_list.go b/go/vt/vttablet/tabletserver/query_list.go index 7263bea3afc..af2ca4ee56f 100644 --- a/go/vt/vttablet/tabletserver/query_list.go +++ b/go/vt/vttablet/tabletserver/query_list.go @@ -18,6 +18,7 @@ package tabletserver import ( "context" + "fmt" "sort" "sync" "time" @@ -145,7 +146,7 @@ func (ql *QueryList) Terminate(connID int64) bool { for _, qd := range qds { err := qd.conn.Kill("QueryList.Terminate()", time.Since(qd.start)) if err != nil { - log.Warningf("Error terminating query on connection id: %d, error: %v", qd.conn.ID(), err) + log.Warn(fmt.Sprintf("Error terminating query on connection id: %d, error: %v", qd.conn.ID(), err)) } } return true @@ -159,7 +160,7 @@ func (ql *QueryList) TerminateAll() { for _, qd := range qds { err := qd.conn.Kill("QueryList.TerminateAll()", time.Since(qd.start)) if err != nil { - log.Warningf("Error terminating query on connection id: %d, error: %v", qd.conn.ID(), err) + log.Warn(fmt.Sprintf("Error terminating query on connection id: %d, error: %v", qd.conn.ID(), err)) } } } diff --git a/go/vt/vttablet/tabletserver/querylogz.go b/go/vt/vttablet/tabletserver/querylogz.go index de1265c1ff8..1c04e8b430d 100644 --- a/go/vt/vttablet/tabletserver/querylogz.go +++ b/go/vt/vttablet/tabletserver/querylogz.go @@ -17,6 +17,7 @@ limitations under the License. package tabletserver import ( + "fmt" "net/http" "strconv" "strings" @@ -121,7 +122,7 @@ func querylogzHandler(ch chan *tabletenv.LogStats, w http.ResponseWriter, r *htt Parser *sqlparser.Parser }{stats, level, parser} if err := querylogzTmpl.Execute(w, tmplData); err != nil { - log.Errorf("querylogz: couldn't execute template: %v", err) + log.Error(fmt.Sprintf("querylogz: couldn't execute template: %v", err)) } case <-tmr.C: return diff --git a/go/vt/vttablet/tabletserver/querythrottler/query_throttler.go b/go/vt/vttablet/tabletserver/querythrottler/query_throttler.go index 1862b37ed9f..ce00191f791 100644 --- a/go/vt/vttablet/tabletserver/querythrottler/query_throttler.go +++ b/go/vt/vttablet/tabletserver/querythrottler/query_throttler.go @@ -19,6 +19,7 @@ package querythrottler import ( "context" "errors" + "fmt" "strconv" "sync" "sync/atomic" @@ -142,7 +143,7 @@ func (qt *QueryThrottler) Shutdown() { // - Missing critical throttling rules during high-load periods func (qt *QueryThrottler) InitDBConfig(keyspace string) { qt.keyspace = keyspace - log.Infof("QueryThrottler: initialized with keyspace=%s", keyspace) + log.Info("QueryThrottler: initialized with keyspace=" + keyspace) // Start the topo server watch post the keyspace is set. qt.startSrvKeyspaceWatch() @@ -199,7 +200,7 @@ func (qt *QueryThrottler) Throttle(ctx context.Context, tabletType topodatapb.Ta // If dry-run mode is enabled, log the decision but don't throttle if tCfg.GetDryRun() { - log.Warningf("[DRY-RUN] %s, metric name: %s, metric value: %f", decision.Message, decision.MetricName, decision.MetricValue) + log.Warn(fmt.Sprintf("[DRY-RUN] %s, metric name: %s, metric value: %f", decision.Message, decision.MetricName, decision.MetricValue)) return nil } @@ -222,7 +223,7 @@ func (qt *QueryThrottler) Throttle(ctx context.Context, tabletType topodatapb.Ta func (qt *QueryThrottler) startSrvKeyspaceWatch() { // Pre-flight validation: ensure required fields are set if qt.srvTopoServer == nil || qt.keyspace == "" { - log.Errorf("QueryThrottler: cannot start SrvKeyspace watch, srvTopoServer=%v, keyspace=%s", qt.srvTopoServer != nil, qt.keyspace) + log.Error(fmt.Sprintf("QueryThrottler: cannot start SrvKeyspace watch, srvTopoServer=%v, keyspace=%s", qt.srvTopoServer != nil, qt.keyspace)) return } @@ -231,10 +232,10 @@ func (qt *QueryThrottler) startSrvKeyspaceWatch() { // TODO(Siddharth) add retry for this initial load srvKS, err := qt.srvTopoServer.GetSrvKeyspace(qt.ctx, qt.cell, qt.keyspace) if err != nil { - log.Warningf("QueryThrottler: failed to load initial config for keyspace=%s (GetSrvKeyspace): %v", qt.keyspace, err) + log.Warn(fmt.Sprintf("QueryThrottler: failed to load initial config for keyspace=%s (GetSrvKeyspace): %v", qt.keyspace, err)) } if srvKS == nil { - log.Warningf("QueryThrottler: srv keyspace fetched is nil for keyspace=%s ", qt.keyspace) + log.Warn(fmt.Sprintf("QueryThrottler: srv keyspace fetched is nil for keyspace=%s ", qt.keyspace)) } qt.HandleConfigUpdate(srvKS, nil) @@ -243,7 +244,7 @@ func (qt *QueryThrottler) startSrvKeyspaceWatch() { // Only start the watch once (protected by atomic flag) if !qt.watchStarted.CompareAndSwap(false, true) { - log.Infof("QueryThrottler: SrvKeyspace watch already started for keyspace=%s", qt.keyspace) + log.Info("QueryThrottler: SrvKeyspace watch already started for keyspace=" + qt.keyspace) return } watchCtx, cancel := context.WithCancel(qt.ctx) @@ -257,7 +258,7 @@ func (qt *QueryThrottler) startSrvKeyspaceWatch() { qt.srvTopoServer.WatchSrvKeyspace(watchCtx, qt.cell, qt.keyspace, qt.HandleConfigUpdate) }() - log.Infof("QueryThrottler: started event-driven watch for SrvKeyspace keyspace=%s cell=%s", qt.keyspace, qt.cell) + log.Info(fmt.Sprintf("QueryThrottler: started event-driven watch for SrvKeyspace keyspace=%s cell=%s", qt.keyspace, qt.cell)) } // extractWorkloadName extracts the workload name from ExecuteOptions. @@ -290,7 +291,7 @@ func extractPriority(options *querypb.ExecuteOptions) int { // This should never error out, as the value for Priority has been validated in the vtgate already. // Still, handle it just to make sure. if err != nil || optionsPriority < 0 || optionsPriority > 100 { - log.Warningf("Invalid priority value '%s' in ExecuteOptions, expected integer 0-100, using default priority %d", options.Priority, defaultPriority) + log.Warn(fmt.Sprintf("Invalid priority value '%s' in ExecuteOptions, expected integer 0-100, using default priority %d", options.Priority, defaultPriority)) return defaultPriority } @@ -317,24 +318,24 @@ func (qt *QueryThrottler) HandleConfigUpdate(srvks *topodatapb.SrvKeyspace, err if err != nil { // Keyspace deleted from topology - stop watching if topo.IsErrType(err, topo.NoNode) { - log.Warningf("HandleConfigUpdate: keyspace %s deleted or not found, stopping watch", qt.keyspace) + log.Warn(fmt.Sprintf("HandleConfigUpdate: keyspace %s deleted or not found, stopping watch", qt.keyspace)) return false } // Context canceled or interrupted - graceful shutdown, stop watching if errors.Is(err, context.Canceled) || topo.IsErrType(err, topo.Interrupted) { - log.Infof("HandleConfigUpdate: watch stopped (context canceled or interrupted)") + log.Info("HandleConfigUpdate: watch stopped (context canceled or interrupted)") return false } // Transient error (network, temporary topo server issue) - keep watching // The resilient watcher will automatically retry as defined in go/vt/srvtopo/resilient_server.go:46 - log.Warningf("HandleConfigUpdate: transient topo watch error (will retry): %v", err) + log.Warn(fmt.Sprintf("HandleConfigUpdate: transient topo watch error (will retry): %v", err)) return true } if srvks == nil { - log.Warningf("HandleConfigUpdate: srvks is nil") + log.Warn("HandleConfigUpdate: srvks is nil") return true } @@ -378,7 +379,7 @@ func (qt *QueryThrottler) HandleConfigUpdate(srvks *topodatapb.SrvKeyspace, err oldStrategyInstance.Stop() } - log.Infof("HandleConfigUpdate: config updated, strategy=%s, enabled=%v", newCfg.GetStrategy(), newCfg.GetEnabled()) + log.Info(fmt.Sprintf("HandleConfigUpdate: config updated, strategy=%s, enabled=%v", newCfg.GetStrategy(), newCfg.GetEnabled())) return true } diff --git a/go/vt/vttablet/tabletserver/querythrottler/query_throttler_test.go b/go/vt/vttablet/tabletserver/querythrottler/query_throttler_test.go index 91f144f59ae..dc06e6fc668 100644 --- a/go/vt/vttablet/tabletserver/querythrottler/query_throttler_test.go +++ b/go/vt/vttablet/tabletserver/querythrottler/query_throttler_test.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "log/slog" "sync" "testing" "time" @@ -249,15 +250,9 @@ func TestQueryThrottler_DryRunMode(t *testing.T) { iqt.stats.requestsThrottled.ResetAll() // Capture log output - logCapture := &testLogCapture{} - originalLogWarningf := log.Warningf - defer func() { - // Restore original logging function - log.Warningf = originalLogWarningf - }() - - // Mock log.Warningf to capture output - log.Warningf = logCapture.captureLog + handler := log.NewCaptureHandler() + restoreLogger := log.SetLogger(slog.New(handler)) + defer restoreLogger() // Test the enforcement err := iqt.Throttle( @@ -279,11 +274,12 @@ func TestQueryThrottler_DryRunMode(t *testing.T) { } // Verify log expectation + records := handler.Records() if tt.expectDryRunLog { - require.Len(t, logCapture.logs, 1, "Expected exactly one log message") - require.Equal(t, tt.expectedLogMsg, logCapture.logs[0], "Log message should match expected") + require.Len(t, records, 1, "Expected exactly one log message") + require.Equal(t, tt.expectedLogMsg, records[0].Message, "Log message should match expected") } else { - require.Empty(t, logCapture.logs, "Expected no log messages") + require.Empty(t, records, "Expected no log messages") } // Verify stats expectation diff --git a/go/vt/vttablet/tabletserver/querythrottler/registry/registry.go b/go/vt/vttablet/tabletserver/querythrottler/registry/registry.go index 4da1bedba18..8b0cb313a0e 100644 --- a/go/vt/vttablet/tabletserver/querythrottler/registry/registry.go +++ b/go/vt/vttablet/tabletserver/querythrottler/registry/registry.go @@ -40,7 +40,7 @@ func Register(name querythrottlerpb.ThrottlingStrategy, factory StrategyFactory) } factories[name] = factory - log.Infof("Registered throttling strategy: %s", name) + log.Info(fmt.Sprintf("Registered throttling strategy: %s", name)) } // Get retrieves a strategy factory by name. @@ -63,13 +63,13 @@ func CreateStrategy(cfg StrategyConfig, deps Deps) ThrottlingStrategyHandler { // NoOpStrategy must always be available—even before any registration happens—so the registry itself can safely fall back on it. factory, ok := Get(cfg.GetStrategy()) if !ok { - log.Warningf("Unknown strategy %s, using NoOp", cfg.GetStrategy()) + log.Warn(fmt.Sprintf("Unknown strategy %s, using NoOp", cfg.GetStrategy())) return &NoOpStrategy{} } strategy, err := factory.New(deps, cfg) if err != nil { - log.Errorf("Strategy %s failed to init: %v, using NoOp", cfg.GetStrategy(), err) + log.Error(fmt.Sprintf("Strategy %s failed to init: %v, using NoOp", cfg.GetStrategy(), err)) return &NoOpStrategy{} } diff --git a/go/vt/vttablet/tabletserver/queryz.go b/go/vt/vttablet/tabletserver/queryz.go index 5d674b260cf..487c7dea5dd 100644 --- a/go/vt/vttablet/tabletserver/queryz.go +++ b/go/vt/vttablet/tabletserver/queryz.go @@ -178,7 +178,7 @@ func queryzHandler(qe *QueryEngine, w http.ResponseWriter, r *http.Request) { sort.Sort(&sorter) for _, Value := range sorter.rows { if err := queryzTmpl.Execute(w, Value); err != nil { - log.Errorf("queryz: couldn't execute template: %v", err) + log.Error(fmt.Sprintf("queryz: couldn't execute template: %v", err)) } } } diff --git a/go/vt/vttablet/tabletserver/repltracker/writer.go b/go/vt/vttablet/tabletserver/repltracker/writer.go index c39b05bc9a2..db6b17a338c 100644 --- a/go/vt/vttablet/tabletserver/repltracker/writer.go +++ b/go/vt/vttablet/tabletserver/repltracker/writer.go @@ -337,7 +337,7 @@ func (w *heartbeatWriter) killWrite() error { defer cancel() killConn, err := w.allPrivsPool.Get(ctx) if err != nil { - log.Errorf("Kill conn didn't get connection :(") + log.Error("Kill conn didn't get connection :(") return err } defer killConn.Recycle() diff --git a/go/vt/vttablet/tabletserver/rules/map.go b/go/vt/vttablet/tabletserver/rules/map.go index 18f75d567f9..ef02eb91e95 100644 --- a/go/vt/vttablet/tabletserver/rules/map.go +++ b/go/vt/vttablet/tabletserver/rules/map.go @@ -46,7 +46,7 @@ func (qri *Map) RegisterSource(ruleSource string) { qri.mu.Lock() defer qri.mu.Unlock() if _, existed := qri.queryRulesMap[ruleSource]; existed { - log.Errorf("Query rule source " + ruleSource + " has been registered") + log.Error("Query rule source " + ruleSource + " has been registered") panic("Query rule source " + ruleSource + " has been registered") } qri.queryRulesMap[ruleSource] = New() diff --git a/go/vt/vttablet/tabletserver/schema/engine.go b/go/vt/vttablet/tabletserver/schema/engine.go index 6e6e7239316..a4baf6c1db0 100644 --- a/go/vt/vttablet/tabletserver/schema/engine.go +++ b/go/vt/vttablet/tabletserver/schema/engine.go @@ -165,9 +165,9 @@ func (se *Engine) InitDBConfig(cp dbconfigs.Connector) { // in a future version (>v16) once the new schema init functionality // is stable. func (se *Engine) syncSidecarDB(ctx context.Context, conn *dbconnpool.DBConnection) error { - log.Infof("In syncSidecarDB") + log.Info("In syncSidecarDB") defer func(start time.Time) { - log.Infof("syncSidecarDB took %d ms", time.Since(start).Milliseconds()) + log.Info(fmt.Sprintf("syncSidecarDB took %d ms", time.Since(start).Milliseconds())) }(time.Now()) var exec sidecardb.Exec = func(ctx context.Context, query string, maxRows int, useDB bool) (*sqltypes.Result, error) { @@ -180,15 +180,15 @@ func (se *Engine) syncSidecarDB(ctx context.Context, conn *dbconnpool.DBConnecti return conn.ExecuteFetch(query, maxRows, true) } if err := sidecardb.Init(ctx, se.env.Environment(), exec); err != nil { - log.Errorf("Error in sidecardb.Init: %+v", err) + log.Error(fmt.Sprintf("Error in sidecardb.Init: %+v", err)) if se.env.Config().DB.HasGlobalSettings() { - log.Warning("Ignoring sidecardb.Init error for unmanaged tablets") + log.Warn("Ignoring sidecardb.Init error for unmanaged tablets") return nil } - log.Errorf("syncSidecarDB error %+v", err) + log.Error(fmt.Sprintf("syncSidecarDB error %+v", err)) return err } - log.Infof("syncSidecarDB done") + log.Info("syncSidecarDB done") return nil } @@ -232,13 +232,13 @@ func (se *Engine) EnsureConnectionAndDB(tabletType topodatapb.TabletType, servin if err != nil { if !se.dbCreationFailed { // This is the first failure. - log.Errorf("db creation failed for %v: %v, will keep retrying", dbname, err) + log.Error(fmt.Sprintf("db creation failed for %v: %v, will keep retrying", dbname, err)) se.dbCreationFailed = true } return err } - log.Infof("db %v created", dbname) + log.Info(fmt.Sprintf("db %v created", dbname)) se.dbCreationFailed = false // creates sidecar schema, the first time the database is created if err := se.syncSidecarDB(ctx, conn); err != nil { @@ -287,7 +287,7 @@ func (se *Engine) Open() error { se.ticks.Start(func() { // update stats on periodic reloads if err := se.reloadAndIncludeStats(ctx); err != nil { - log.Errorf("periodic schema reload failed: %v", err) + log.Error(fmt.Sprintf("periodic schema reload failed: %v", err)) } }) @@ -400,11 +400,11 @@ func (se *Engine) ReloadAtEx(ctx context.Context, pos replication.Position, incl se.mu.Lock() defer se.mu.Unlock() if !se.isOpen { - log.Warning("Schema reload called for an engine that is not yet open") + log.Warn("Schema reload called for an engine that is not yet open") return nil } if !pos.IsZero() && se.reloadAtPos.AtLeast(pos) { - log.V(2).Infof("ReloadAtEx: found cached schema at %s", replication.EncodePosition(pos)) + log.Debug("ReloadAtEx: found cached schema at " + replication.EncodePosition(pos)) return nil } if err := se.reload(ctx, includeStats); err != nil { @@ -497,7 +497,7 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { includeStats = false if err := se.updateTableIndexMetrics(ctx, conn.Conn); err != nil { - log.Errorf("Updating index/table statistics failed, error: %v", err) + log.Error(fmt.Sprintf("Updating index/table statistics failed, error: %v", err)) } } tableData, err := getTableData(ctx, conn.Conn, includeStats) @@ -595,7 +595,7 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { continue } - log.V(2).Infof("Reading schema for table: %s", tableName) + log.Debug("Reading schema for table: " + tableName) tableType := row[1].String() table, err := LoadTable(conn, se.cp.DBName(), tableName, tableType, row[3].ToString(), se.env.Environment().CollationEnv()) if err != nil { @@ -635,7 +635,7 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { // So, we do this step in the end when we can receive no more errors that fail the reload operation. err = reloadDataInDB(ctx, conn.Conn, altered, created, dropped, udfsChanged, se.env.Environment().Parser()) if err != nil { - log.Errorf("error in updating schema information in Engine.reload() - %v", err) + log.Error(fmt.Sprintf("error in updating schema information in Engine.reload() - %v", err)) } } @@ -643,7 +643,7 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { maps0.Copy(se.tables, changedTables) se.lastChange = curTime if len(created) > 0 || len(altered) > 0 || len(dropped) > 0 { - log.Infof("schema engine created %v, altered %v, dropped %v", extractNamesFromTablesList(created), extractNamesFromTablesList(altered), extractNamesFromTablesList(dropped)) + log.Info(fmt.Sprintf("schema engine created %v, altered %v, dropped %v", extractNamesFromTablesList(created), extractNamesFromTablesList(altered), extractNamesFromTablesList(dropped))) } se.broadcast(created, altered, dropped, udfsChanged) return nil @@ -710,7 +710,7 @@ func (se *Engine) updateInnoDBRowsRead(ctx context.Context, conn *connpool.Conn) se.innoDbReadRowsCounter.Set(value) } else { - log.Warningf("got strange results from 'show status': %v", readRowsData.Rows) + log.Warn(fmt.Sprintf("got strange results from 'show status': %v", readRowsData.Rows)) } return nil } @@ -900,7 +900,7 @@ func (se *Engine) RegisterVersionEvent() error { func (se *Engine) GetTableForPos(ctx context.Context, tableName sqlparser.IdentifierCS, gtid string) (*binlogdatapb.MinimalTable, error) { mt, err := se.historian.GetTableForPos(tableName, gtid) if err != nil { - log.Infof("GetTableForPos returned error: %s", err.Error()) + log.Info("GetTableForPos returned error: " + err.Error()) return nil, err } if mt != nil { @@ -940,7 +940,7 @@ func (se *Engine) GetTableForPos(ctx context.Context, tableName sqlparser.Identi // It's expected that internal tables are not found within VReplication workflows. // No need to refresh the cache for internal tables. if schema.IsInternalOperationTableName(tableNameStr) { - log.Infof("internal table %v found in vttablet schema: skipping for GTID search", tableNameStr) + log.Info(fmt.Sprintf("internal table %v found in vttablet schema: skipping for GTID search", tableNameStr)) return nil, nil } // We don't currently have the non-internal table in the cache. This can happen when @@ -964,7 +964,7 @@ func (se *Engine) GetTableForPos(ctx context.Context, tableName sqlparser.Identi } } - log.Infof("table %v not found in vttablet schema, current tables: %v", tableNameStr, se.tables) + log.Info(fmt.Sprintf("table %v not found in vttablet schema, current tables: %v", tableNameStr, se.tables)) return nil, fmt.Errorf("table %v not found in vttablet schema", tableNameStr) } @@ -994,17 +994,17 @@ func (se *Engine) RegisterNotifier(name string, f notifier, runNotifier bool) { // UnregisterNotifier unregisters the notifier function. func (se *Engine) UnregisterNotifier(name string) { if !se.isOpen { - log.Infof("schema Engine is not open") + log.Info("schema Engine is not open") return } - log.Infof("schema Engine - acquiring notifierMu lock") + log.Info("schema Engine - acquiring notifierMu lock") se.notifierMu.Lock() - log.Infof("schema Engine - acquired notifierMu lock") + log.Info("schema Engine - acquired notifierMu lock") defer se.notifierMu.Unlock() delete(se.notifiers, name) - log.Infof("schema Engine - finished UnregisterNotifier") + log.Info("schema Engine - finished UnregisterNotifier") } // broadcast must be called while holding a lock on se.mu. @@ -1148,7 +1148,7 @@ func (se *Engine) ResetSequences(tables []string) error { for _, tableName := range tables { if table, ok := se.tables[tableName]; ok { if table.SequenceInfo != nil { - log.Infof("Resetting sequence info for table %s: %+v", tableName, table.SequenceInfo) + log.Info(fmt.Sprintf("Resetting sequence info for table %s: %+v", tableName, table.SequenceInfo)) table.SequenceInfo.Reset() } } else { diff --git a/go/vt/vttablet/tabletserver/schema/historian.go b/go/vt/vttablet/tabletserver/schema/historian.go index eef539c1de7..995333a37bd 100644 --- a/go/vt/vttablet/tabletserver/schema/historian.go +++ b/go/vt/vttablet/tabletserver/schema/historian.go @@ -18,6 +18,7 @@ package schema import ( "context" + "fmt" "sort" "sync" "time" @@ -98,7 +99,7 @@ func (h *historian) Open() error { ctx := tabletenv.LocalContext() if err := h.loadFromDB(ctx); err != nil { - log.Errorf("Historian failed to open: %v", err) + log.Error(fmt.Sprintf("Historian failed to open: %v", err)) return err } @@ -143,7 +144,7 @@ func (h *historian) GetTableForPos(tableName sqlparser.IdentifierCS, gtid string return nil, nil } - log.V(2).Infof("GetTableForPos called for %s with pos %s", tableName, gtid) + log.Debug(fmt.Sprintf("GetTableForPos called for %s with pos %s", tableName, gtid)) if gtid == "" { return nil, nil } @@ -156,7 +157,7 @@ func (h *historian) GetTableForPos(tableName sqlparser.IdentifierCS, gtid string t = h.getTableFromHistoryForPos(tableName, pos) } if t != nil { - log.V(2).Infof("Returning table %s from history for pos %s, schema %s", tableName, gtid, t) + log.Debug(fmt.Sprintf("Returning table %s from history for pos %s, schema %s", tableName, gtid, t)) } return t, nil } @@ -181,7 +182,7 @@ func (h *historian) loadFromDB(ctx context.Context) error { } if err != nil { - log.Infof("Error reading schema_tracking table %v, will operate with the latest available schema", err) + log.Info(fmt.Sprintf("Error reading schema_tracking table %v, will operate with the latest available schema", err)) return nil } for _, row := range tableData.Rows { @@ -232,8 +233,8 @@ func (h *historian) readRow(row []sqltypes.Value) (*trackedSchema, int64, error) if err := sch.UnmarshalVT(rowBytes); err != nil { return nil, 0, err } - log.V(vl).Infof("Read tracked schema from db: id %d, pos %v, ddl %s, schema len %d, time_updated %d \n", - id, replication.EncodePosition(pos), ddl, len(sch.Tables), timeUpdated) + log.Debug(fmt.Sprintf("Read tracked schema from db: id %d, pos %v, ddl %s, schema len %d, time_updated %d \n", + id, replication.EncodePosition(pos), ddl, len(sch.Tables), timeUpdated)) tables := map[string]*binlogdatapb.MinimalTable{} for _, t := range sch.Tables { @@ -287,7 +288,7 @@ func (h *historian) getTableFromHistoryForPos(tableName sqlparser.IdentifierCS, return pos.Equal(h.schemas[i].pos) || !pos.AtLeast(h.schemas[i].pos) }) if idx >= len(h.schemas) || idx == 0 && !pos.Equal(h.schemas[idx].pos) { // beyond the range of the cache - log.Infof("Schema not found in cache for %s with pos %s", tableName, pos) + log.Info(fmt.Sprintf("Schema not found in cache for %s with pos %s", tableName, pos)) return nil } if pos.Equal(h.schemas[idx].pos) { // exact match to a cache entry diff --git a/go/vt/vttablet/tabletserver/schema/historian_test.go b/go/vt/vttablet/tabletserver/schema/historian_test.go index ca5f45ef6b0..e71d0c7757c 100644 --- a/go/vt/vttablet/tabletserver/schema/historian_test.go +++ b/go/vt/vttablet/tabletserver/schema/historian_test.go @@ -23,6 +23,7 @@ import ( "time" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" @@ -138,7 +139,7 @@ func TestHistorian(t *testing.T) { } tab, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid1) require.NoError(t, err) - require.Equal(t, exp1, tab) + require.True(t, proto.Equal(exp1, tab)) gtid2 := gtidPrefix + "1-20" _, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid2) require.Equal(t, "table t1 not found in vttablet schema", err.Error()) @@ -165,7 +166,7 @@ func TestHistorian(t *testing.T) { } tab, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid2) require.NoError(t, err) - require.Equal(t, exp2, tab) + require.True(t, proto.Equal(exp2, tab)) gtid3 := gtidPrefix + "1-30" _, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid3) require.Equal(t, "table t1 not found in vttablet schema", err.Error()) @@ -193,17 +194,17 @@ func TestHistorian(t *testing.T) { } tab, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid3) require.NoError(t, err) - require.Equal(t, exp3, tab) + require.True(t, proto.Equal(exp3, tab)) tab, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid1) require.NoError(t, err) - require.Equal(t, exp1, tab) + require.True(t, proto.Equal(exp1, tab)) tab, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid2) require.NoError(t, err) - require.Equal(t, exp2, tab) + require.True(t, proto.Equal(exp2, tab)) tab, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid3) require.NoError(t, err) - require.Equal(t, exp3, tab) + require.True(t, proto.Equal(exp3, tab)) } func TestHistorianPurgeOldSchemas(t *testing.T) { @@ -284,6 +285,6 @@ func TestHistorianPurgeOldSchemas(t *testing.T) { } tab, err := se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid2) require.NoError(t, err) - require.Equal(t, exp2, tab) + require.True(t, proto.Equal(exp2, tab)) require.Equal(t, 1, len(se.historian.schemas)) } diff --git a/go/vt/vttablet/tabletserver/schema/schemaz.go b/go/vt/vttablet/tabletserver/schema/schemaz.go index 312f5efa6cc..4f955d4ea30 100644 --- a/go/vt/vttablet/tabletserver/schema/schemaz.go +++ b/go/vt/vttablet/tabletserver/schema/schemaz.go @@ -17,6 +17,7 @@ limitations under the License. package schema import ( + "fmt" "net/http" "sort" @@ -95,7 +96,7 @@ func schemazHandler(tables map[string]*Table, w http.ResponseWriter, r *http.Req for _, Value := range sorter.rows { envelope.Table = Value if err := schemazTmpl.Execute(w, envelope); err != nil { - log.Errorf("schemaz: couldn't execute template: %v", err) + log.Error(fmt.Sprintf("schemaz: couldn't execute template: %v", err)) } } } diff --git a/go/vt/vttablet/tabletserver/schema/tracker.go b/go/vt/vttablet/tabletserver/schema/tracker.go index 47bc6af08b0..395eb58cbb2 100644 --- a/go/vt/vttablet/tabletserver/schema/tracker.go +++ b/go/vt/vttablet/tabletserver/schema/tracker.go @@ -19,6 +19,7 @@ package schema import ( "context" "errors" + "fmt" "sync" "time" @@ -117,7 +118,7 @@ func (tr *Tracker) process(ctx context.Context) { defer tr.env.LogError() defer tr.wg.Done() if err := tr.possiblyInsertInitialSchema(ctx); err != nil { - log.Errorf("error inserting initial schema: %v", err) + log.Error(fmt.Sprintf("error inserting initial schema: %v", err)) return } @@ -138,8 +139,7 @@ func (tr *Tracker) process(ctx context.Context) { MustReloadSchemaOnDDL(event.Statement, tr.engine.cp.DBName(), tr.env.Environment().Parser()) { if err := tr.schemaUpdated(gtid, event.Statement, event.Timestamp); err != nil { tr.env.Stats().ErrorCounters.Add(vtrpcpb.Code_INTERNAL.String(), 1) - log.Errorf("Error updating schema: %s for ddl %s, gtid %s", - tr.env.Environment().Parser().TruncateForLog(err.Error()), event.Statement, gtid) + log.Error(fmt.Sprintf("Error updating schema: %s for ddl %s, gtid %s", tr.env.Environment().Parser().TruncateForLog(err.Error()), event.Statement, gtid)) } } } @@ -150,7 +150,7 @@ func (tr *Tracker) process(ctx context.Context) { return case <-time.After(5 * time.Second): } - log.Infof("Tracker's vStream ended: %v, retrying in 5 seconds", err) + log.Info(fmt.Sprintf("Tracker's vStream ended: %v, retrying in 5 seconds", err)) time.Sleep(5 * time.Second) } } @@ -203,13 +203,13 @@ func (tr *Tracker) possiblyInsertInitialSchema(ctx context.Context) error { return err } gtid := replication.EncodePosition(pos) - log.Infof("Saving initial schema for gtid %s", gtid) + log.Info("Saving initial schema for gtid " + gtid) return tr.saveCurrentSchemaToDb(ctx, gtid, ddl, timestamp) } func (tr *Tracker) schemaUpdated(gtid string, ddl string, timestamp int64) error { - log.Infof("Processing schemaUpdated event for gtid %s, ddl %s", gtid, ddl) + log.Info(fmt.Sprintf("Processing schemaUpdated event for gtid %s, ddl %s", gtid, ddl)) if gtid == "" || ddl == "" { return errors.New("got invalid gtid or ddl in schemaUpdated") } diff --git a/go/vt/vttablet/tabletserver/state_manager.go b/go/vt/vttablet/tabletserver/state_manager.go index f9c1c0c23e9..ff56adc172f 100644 --- a/go/vt/vttablet/tabletserver/state_manager.go +++ b/go/vt/vttablet/tabletserver/state_manager.go @@ -19,6 +19,7 @@ package tabletserver import ( "context" "fmt" + "os" "slices" "sync" "sync/atomic" @@ -233,7 +234,7 @@ func (sm *stateManager) SetServingType(tabletType topodatapb.TabletType, ptsTime state = StateNotConnected } - log.Infof("Starting transition to %v %v, primary term start timestamp: %v", tabletType, state, ptsTimestamp) + log.Info(fmt.Sprintf("Starting transition to %v %v, primary term start timestamp: %v", tabletType, state, ptsTimestamp)) if sm.mustTransition(tabletType, ptsTimestamp, state, reason) { return sm.execTransition(tabletType, state) } @@ -332,14 +333,14 @@ func (sm *stateManager) checkMySQL() { if !sm.checkMySQLThrottler.TryAcquire(1) { return } - log.Infof("CheckMySQL started") + log.Info("CheckMySQL started") sm.checkMySQLRunning.Store(true) go func() { defer func() { time.Sleep(1 * time.Second) sm.checkMySQLRunning.Store(false) sm.checkMySQLThrottler.Release(1) - log.Infof("CheckMySQL finished") + log.Info("CheckMySQL finished") }() err := sm.qe.IsMySQLReachable() @@ -560,30 +561,30 @@ func (sm *stateManager) unserveCommon() { // We create a wait group that tracks whether all the queries have been terminated or not. wg := sync.WaitGroup{} wg.Add(1) - log.Infof("Started execution of unserveCommon") + log.Info("Started execution of unserveCommon") cancel := sm.terminateAllQueries(&wg) - log.Infof("Finished execution of terminateAllQueries") + log.Info("Finished execution of terminateAllQueries") defer cancel() - log.Infof("Started online ddl executor close") + log.Info("Started online ddl executor close") sm.ddle.Close() - log.Infof("Finished online ddl executor close. Started table garbage collector close") + log.Info("Finished online ddl executor close. Started table garbage collector close") sm.tableGC.Close() - log.Infof("Finished table garbage collector close. Started lag throttler close") + log.Info("Finished table garbage collector close. Started lag throttler close") sm.throttler.Close() - log.Infof("Finished lag throttler close. Started messager close") + log.Info("Finished lag throttler close. Started messager close") sm.qThrottler.Close() - log.Infof("Finished query throttler close. Started query throttler close") + log.Info("Finished query throttler close. Started query throttler close") sm.messager.Close() - log.Infof("Finished messager close. Started txEngine close") + log.Info("Finished messager close. Started txEngine close") sm.te.Close() - log.Infof("Finished txEngine close. Killing all OLAP queries") + log.Info("Finished txEngine close. Killing all OLAP queries") sm.olapql.TerminateAll() log.Info("Finished Killing all OLAP queries. Started tracker close") sm.tracker.Close() - log.Infof("Finished tracker close. Started wait for requests") + log.Info("Finished tracker close. Started wait for requests") sm.handleShutdownGracePeriod(&wg) - log.Infof("Finished handling grace period. Finished execution of unserveCommon") + log.Info("Finished handling grace period. Finished execution of unserveCommon") } // handleShutdownGracePeriod checks if we have shutdwonGracePeriod specified. @@ -620,17 +621,17 @@ func (sm *stateManager) terminateAllQueries(wg *sync.WaitGroup) (cancel func()) } // Prevent any new queries from being added before we kill all the queries in the list. sm.markClusterAction(ClusterActionNoQueries) - log.Infof("Grace Period %v exceeded. Killing all OLTP queries.", sm.shutdownGracePeriod) + log.Info(fmt.Sprintf("Grace Period %v exceeded. Killing all OLTP queries.", sm.shutdownGracePeriod)) sm.statelessql.TerminateAll() - log.Infof("Killed all stateless OLTP queries.") + log.Info("Killed all stateless OLTP queries.") sm.statefulql.TerminateAll() - log.Infof("Killed all OLTP queries.") + log.Info("Killed all OLTP queries.") // We can rollback prepared transactions only after we have killed all the write queries in progress. // This is essential because when we rollback a prepared transaction, it lets go of the locks it was holding. // If there were some other conflicting write in progress that hadn't been killed, then it could potentially go through // and cause data corruption since we won't be able to prepare the transaction again. sm.te.RollbackPrepared() - log.Infof("Rollbacked all prepared transactions") + log.Info("Rollbacked all prepared transactions") }() return cancel } @@ -658,7 +659,8 @@ func (sm *stateManager) setTimeBomb() chan struct{} { defer tmr.Stop() select { case <-tmr.C: - log.Fatal("Shutdown took too long. Crashing") + log.Error("Shutdown took too long. Crashing") + os.Exit(1) case <-done: } }() @@ -668,16 +670,15 @@ func (sm *stateManager) setTimeBomb() chan struct{} { // setState changes the state and logs the event. func (sm *stateManager) setState(tabletType topodatapb.TabletType, state servingState) { defer logInitTime.Do(func() { - log.Infof("Tablet Init took %d ms", time.Since(servenv.GetInitStartTime()).Milliseconds()) + log.Info(fmt.Sprintf("Tablet Init took %d ms", time.Since(servenv.GetInitStartTime()).Milliseconds())) }) sm.mu.Lock() defer sm.mu.Unlock() if tabletType == topodatapb.TabletType_UNKNOWN { tabletType = sm.wantTabletType } - log.Infof("TabletServer transition: %v -> %v for tablet %s:%s/%s", - sm.stateStringLocked(sm.target.TabletType, sm.state), sm.stateStringLocked(tabletType, state), - sm.target.Cell, sm.target.Keyspace, sm.target.Shard) + log.Info(fmt.Sprintf("TabletServer transition: %v -> %v for tablet %s:%s/%s", sm.stateStringLocked(sm.target.TabletType, sm.state), sm.stateStringLocked(tabletType, state), + sm.target.Cell, sm.target.Keyspace, sm.target.Shard)) sm.handleTransitionGracePeriod(tabletType) sm.target.TabletType = tabletType if sm.state == StateNotConnected { @@ -743,18 +744,18 @@ func (sm *stateManager) refreshReplHealthLocked() (time.Duration, error) { lag, err := sm.rt.Status() if err != nil { if sm.replHealthy { - log.Infof("Going unhealthy due to replication error: %v", err) + log.Info(fmt.Sprintf("Going unhealthy due to replication error: %v", err)) } sm.replHealthy = false } else { if lag > time.Duration(sm.unhealthyThreshold.Load()) { if sm.replHealthy { - log.Infof("Going unhealthy due to high replication lag: %v", lag) + log.Info(fmt.Sprintf("Going unhealthy due to high replication lag: %v", lag)) } sm.replHealthy = false } else { if !sm.replHealthy { - log.Infof("Replication is healthy") + log.Info("Replication is healthy") } sm.replHealthy = true } diff --git a/go/vt/vttablet/tabletserver/state_manager_test.go b/go/vt/vttablet/tabletserver/state_manager_test.go index f558bfd92fa..3ed52260cb4 100644 --- a/go/vt/vttablet/tabletserver/state_manager_test.go +++ b/go/vt/vttablet/tabletserver/state_manager_test.go @@ -19,6 +19,7 @@ package tabletserver import ( "context" "errors" + "fmt" "sync" "sync/atomic" "testing" @@ -319,7 +320,7 @@ func TestStateManagerSetServingTypeRace(t *testing.T) { } func TestStateManagerSetServingTypeNoChange(t *testing.T) { - log.Infof("starting") + log.Info("starting") sm := newTestStateManager() defer sm.StopService() err := sm.SetServingType(topodatapb.TabletType_REPLICA, testNow, StateServing, "") @@ -821,7 +822,7 @@ func newTestStateManager() *stateManager { } sm.Init(env, &querypb.Target{}) sm.hs.InitDBConfig(&querypb.Target{}) - log.Infof("returning sm: %p", sm) + log.Info(fmt.Sprintf("returning sm: %p", sm)) return sm } diff --git a/go/vt/vttablet/tabletserver/stateful_connection_pool.go b/go/vt/vttablet/tabletserver/stateful_connection_pool.go index b5bd1d40332..95d0339079d 100644 --- a/go/vt/vttablet/tabletserver/stateful_connection_pool.go +++ b/go/vt/vttablet/tabletserver/stateful_connection_pool.go @@ -18,6 +18,7 @@ package tabletserver import ( "context" + "fmt" "sync/atomic" "time" @@ -76,7 +77,7 @@ func NewStatefulConnPool(env tabletenv.Env) *StatefulConnectionPool { // Open makes the TxPool operational. This also starts the transaction killer // that will kill long-running transactions. func (sf *StatefulConnectionPool) Open(appParams, dbaParams, appDebugParams dbconfigs.Connector) { - log.Infof("Starting transaction id: %d", sf.lastID.Load()) + log.Info(fmt.Sprintf("Starting transaction id: %d", sf.lastID.Load())) sf.conns.Open(appParams, dbaParams, appDebugParams) foundRowsParam, _ := appParams.MysqlParams() foundRowsParam.EnableClientFoundRows() @@ -93,7 +94,7 @@ func (sf *StatefulConnectionPool) Close() { if conn.IsInTransaction() { thing = "transaction" } - log.Warningf("killing %s for shutdown: %s", thing, conn.String(sf.env.Config().SanitizeLogMessages, sf.env.Environment().Parser())) + log.Warn(fmt.Sprintf("killing %s for shutdown: %s", thing, conn.String(sf.env.Config().SanitizeLogMessages, sf.env.Environment().Parser()))) sf.env.Stats().InternalErrors.Add("StrayTransactions", 1) conn.Close() conn.ReleaseString("pool closed") @@ -130,7 +131,7 @@ func (sf *StatefulConnectionPool) ShutdownAll() []*StatefulConnection { // no dtid collisions with future transactions. func (sf *StatefulConnectionPool) AdjustLastID(id int64) { if current := sf.lastID.Load(); current < id { - log.Infof("Adjusting transaction id to: %d", id) + log.Info(fmt.Sprintf("Adjusting transaction id to: %d", id)) sf.lastID.Store(id) } } diff --git a/go/vt/vttablet/tabletserver/tabletenv/config.go b/go/vt/vttablet/tabletserver/tabletenv/config.go index f0cd9c7d316..72c74cc2dc0 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/config.go +++ b/go/vt/vttablet/tabletserver/tabletenv/config.go @@ -21,6 +21,7 @@ import ( "encoding/json" "errors" "fmt" + "os" "sync" "time" @@ -291,7 +292,8 @@ func Init() { case streamlog.QueryLogFormatText: case streamlog.QueryLogFormatJSON: default: - log.Exitf("Invalid querylog-format value %v: must be either text or json", logFormat) + log.Error(fmt.Sprintf("Invalid querylog-format value %v: must be either text or json", logFormat)) + os.Exit(1) } if queryLogHandler != "" { diff --git a/go/vt/vttablet/tabletserver/tabletenv/env.go b/go/vt/vttablet/tabletserver/tabletenv/env.go index 27b4330c735..4c05d055d54 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/env.go +++ b/go/vt/vttablet/tabletserver/tabletenv/env.go @@ -19,6 +19,8 @@ limitations under the License. package tabletenv import ( + "fmt" + "vitess.io/vitess/go/tb" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" @@ -63,7 +65,7 @@ func (te *testEnv) Environment() *vtenv.Environment { return te.env } func (te *testEnv) LogError() { if x := recover(); x != nil { - log.Errorf("Uncaught panic:\n%v\n%s", x, tb.Stack(4)) + log.Error(fmt.Sprintf("Uncaught panic:\n%v\n%s", x, tb.Stack(4))) te.Stats().InternalErrors.Add("Panic", 1) } } diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index dadbfd5d619..00977b6ba1d 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -363,7 +363,7 @@ func (tsv *TabletServer) Environment() *vtenv.Environment { // LogError satisfies tabletenv.Env. func (tsv *TabletServer) LogError() { if x := recover(); x != nil { - log.Errorf("Uncaught panic:\n%v\n%s", x, tb.Stack(4)) + log.Error(fmt.Sprintf("Uncaught panic:\n%v\n%s", x, tb.Stack(4))) tsv.stats.InternalErrors.Add("Panic", 1) } } @@ -410,9 +410,9 @@ func (tsv *TabletServer) InitACL(tableACLConfigFile string, reloadACLConfigFileI for range sigChan { err := tsv.initACL(tableACLConfigFile) if err != nil { - log.Errorf("Error reloading ACL config file %s in SIGHUP handler: %v", tableACLConfigFile, err) + log.Error(fmt.Sprintf("Error reloading ACL config file %s in SIGHUP handler: %v", tableACLConfigFile, err)) } else { - log.Infof("Successfully reloaded ACL file %s in SIGHUP handler", tableACLConfigFile) + log.Info(fmt.Sprintf("Successfully reloaded ACL file %s in SIGHUP handler", tableACLConfigFile)) } } }() @@ -638,10 +638,8 @@ func (tsv *TabletServer) getPriorityFromOptions(options *querypb.ExecuteOptions) // This should never error out, as the value for Priority has been validated in the vtgate already. // Still, handle it just to make sure. if err != nil { - log.Errorf( - "The value of the %s query directive could not be converted to integer, using the "+ - "default value. Error was: %s", - sqlparser.DirectivePriority, priority, err) + log.Error(fmt.Sprintf("The value of the %s query directive could not be converted to integer, using the "+ + "default value %d. Error was: %v", sqlparser.DirectivePriority, priority, err)) return priority } @@ -1662,7 +1660,9 @@ func (tsv *TabletServer) convertAndLogError(ctx context.Context, sql string, bin callerID = fmt.Sprintf(" (CallerID: %s)", cid.Username) } - logMethod := log.Errorf + logMethod := func(format string, args ...any) { + log.Error(fmt.Sprintf(format, args...)) + } // Suppress or demote some errors in logs. switch errCode { case vtrpcpb.Code_FAILED_PRECONDITION, vtrpcpb.Code_ALREADY_EXISTS: @@ -1670,9 +1670,13 @@ func (tsv *TabletServer) convertAndLogError(ctx context.Context, sql string, bin case vtrpcpb.Code_RESOURCE_EXHAUSTED: logMethod = logPoolFull.Errorf case vtrpcpb.Code_ABORTED: - logMethod = log.Warningf + logMethod = func(format string, args ...any) { + log.Warn(fmt.Sprintf(format, args...)) + } case vtrpcpb.Code_INVALID_ARGUMENT, vtrpcpb.Code_DEADLINE_EXCEEDED: - logMethod = log.Infof + logMethod = func(format string, args ...any) { + log.Info(fmt.Sprintf(format, args...)) + } } // If TerseErrors is on, strip the error message returned by MySQL and only @@ -1708,7 +1712,7 @@ func (tsv *TabletServer) convertAndLogError(ctx context.Context, sql string, bin } if logMethod != nil { - logMethod(message) + logMethod("%s", message) } if logStats != nil { diff --git a/go/vt/vttablet/tabletserver/tabletserver_test.go b/go/vt/vttablet/tabletserver/tabletserver_test.go index 656fc35ae47..41df37072ba 100644 --- a/go/vt/vttablet/tabletserver/tabletserver_test.go +++ b/go/vt/vttablet/tabletserver/tabletserver_test.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "net/http/httptest" "os" @@ -1682,85 +1683,43 @@ func TestQueryAsString(t *testing.T) { } type testLogger struct { - logsMu sync.Mutex - logs []string - - savedInfof func(format string, args ...any) - savedInfo func(args ...any) - savedErrorf func(format string, args ...any) - savedError func(args ...any) + handler *log.CaptureHandler + restore func() } func newTestLogger() *testLogger { - tl := &testLogger{ - savedInfof: log.Infof, - savedInfo: log.Info, - savedErrorf: log.Errorf, - savedError: log.Error, + handler := log.NewCaptureHandler() + restore := log.SetLogger(slog.New(handler)) + + return &testLogger{ + handler: handler, + restore: restore, } - tl.logsMu.Lock() - defer tl.logsMu.Unlock() - log.Infof = tl.recordInfof - log.Info = tl.recordInfo - log.Errorf = tl.recordErrorf - log.Error = tl.recordError - return tl } func (tl *testLogger) Close() { - tl.logsMu.Lock() - defer tl.logsMu.Unlock() - log.Infof = tl.savedInfof - log.Info = tl.savedInfo - log.Errorf = tl.savedErrorf - log.Error = tl.savedError -} - -func (tl *testLogger) recordInfof(format string, args ...any) { - msg := fmt.Sprintf(format, args...) - tl.logsMu.Lock() - defer tl.logsMu.Unlock() - tl.logs = append(tl.logs, msg) - tl.savedInfof(msg) -} - -func (tl *testLogger) recordInfo(args ...any) { - msg := fmt.Sprint(args...) - tl.logsMu.Lock() - defer tl.logsMu.Unlock() - tl.logs = append(tl.logs, msg) - tl.savedInfo(msg) -} - -func (tl *testLogger) recordErrorf(format string, args ...any) { - msg := fmt.Sprintf(format, args...) - tl.logsMu.Lock() - defer tl.logsMu.Unlock() - tl.logs = append(tl.logs, msg) - tl.savedErrorf(msg) -} - -func (tl *testLogger) recordError(args ...any) { - msg := fmt.Sprint(args...) - tl.logsMu.Lock() - defer tl.logsMu.Unlock() - tl.logs = append(tl.logs, msg) - tl.savedError(msg) + if tl.restore != nil { + tl.restore() + } } func (tl *testLogger) getLog(i int) string { - tl.logsMu.Lock() - defer tl.logsMu.Unlock() - if i < len(tl.logs) { - return tl.logs[i] + logs := tl.getLogs() + if i < len(logs) { + return logs[i] } - return fmt.Sprintf("ERROR: log %d/%d does not exist", i, len(tl.logs)) + + return fmt.Sprintf("ERROR: log %d/%d does not exist", i, len(logs)) } func (tl *testLogger) getLogs() []string { - tl.logsMu.Lock() - defer tl.logsMu.Unlock() - return tl.logs + records := tl.handler.Records() + logs := make([]string, 0, len(records)) + for _, record := range records { + logs = append(logs, record.Message) + } + + return logs } func TestHandleExecTabletError(t *testing.T) { @@ -2056,7 +2015,7 @@ func TestTerseErrorsIgnoreFailoverInProgress(t *testing.T) { } // errors during failover aren't logged at all - require.Empty(t, tl.logs, "unexpected error log during failover") + require.Empty(t, tl.getLogs(), "unexpected error log during failover") } var aclJSON1 = `{ diff --git a/go/vt/vttablet/tabletserver/throttle/throttler.go b/go/vt/vttablet/tabletserver/throttle/throttler.go index 117ca1039b8..8d1e73c8299 100644 --- a/go/vt/vttablet/tabletserver/throttle/throttler.go +++ b/go/vt/vttablet/tabletserver/throttle/throttler.go @@ -320,7 +320,7 @@ func (throttler *Throttler) GetMetricsThreshold() float64 { // initThrottler initializes config func (throttler *Throttler) initConfig() { - log.Infof("Throttler: initializing config") + log.Info("Throttler: initializing config") throttler.configSettings = &config.ConfigurationSettings{ MySQLStore: config.MySQLConfigurationSettings{ @@ -336,7 +336,7 @@ func (throttler *Throttler) readThrottlerConfig(ctx context.Context) (*topodatap if ti, err := throttler.ts.GetTablet(ctx, throttler.tabletAlias); err == nil { throttler.tabletInfo.Store(ti) } else { - log.Errorf("Throttler: error reading tablet info: %v", err) + log.Error(fmt.Sprintf("Throttler: error reading tablet info: %v", err)) } } @@ -373,7 +373,7 @@ func (throttler *Throttler) normalizeThrottlerConfig(throttlerConfig *topodatapb func (throttler *Throttler) WatchSrvKeyspaceCallback(srvks *topodatapb.SrvKeyspace, err error) bool { if err != nil { if !topo.IsErrType(err, topo.Interrupted) && !errors.Is(err, context.Canceled) { - log.Errorf("WatchSrvKeyspaceCallback error: %v", err) + log.Error(fmt.Sprintf("WatchSrvKeyspaceCallback error: %v", err)) } return true } @@ -417,7 +417,7 @@ func (throttler *Throttler) convergeMetricThresholds() { // This may cause the throttler to be enabled/disabled, and of course it affects the throttling query/threshold. // Note: you should be holding the initMutex when calling this function. func (throttler *Throttler) applyThrottlerConfig(ctx context.Context, throttlerConfig *topodatapb.ThrottlerConfig) { - log.Infof("Throttler: applying topo config: %+v", throttlerConfig) + log.Info(fmt.Sprintf("Throttler: applying topo config: %+v", throttlerConfig)) throttler.customMetricsQuery.Store(throttlerConfig.CustomQuery) if throttlerConfig.Threshold > 0 || throttlerConfig.CustomQuery != "" { // We do not allow Threshold=0, unless there is a custom query. @@ -520,10 +520,10 @@ func (throttler *Throttler) Enable() *sync.WaitGroup { defer throttler.enableMutex.Unlock() if wasEnabled := throttler.isEnabled.Swap(true); wasEnabled { - log.Infof("Throttler: already enabled") + log.Info("Throttler: already enabled") return nil } - log.Infof("Throttler: enabling") + log.Info("Throttler: enabling") wg := &sync.WaitGroup{} var ctx context.Context @@ -544,10 +544,10 @@ func (throttler *Throttler) Disable() bool { defer throttler.enableMutex.Unlock() if wasEnabled := throttler.isEnabled.Swap(false); !wasEnabled { - log.Infof("Throttler: already disabled") + log.Info("Throttler: already disabled") return false } - log.Infof("Throttler: disabling") + log.Info("Throttler: disabling") throttler.cancelEnableContext() return true @@ -567,7 +567,7 @@ func (throttler *Throttler) retryReadAndApplyThrottlerConfig(ctx context.Context for { if !throttler.IsOpen() { // Throttler is not open so no need to keep retrying. - log.Warningf("Throttler.retryReadAndApplyThrottlerConfig(): throttler no longer seems to be open, exiting") + log.Warn("Throttler.retryReadAndApplyThrottlerConfig(): throttler no longer seems to be open, exiting") return } @@ -575,7 +575,7 @@ func (throttler *Throttler) retryReadAndApplyThrottlerConfig(ctx context.Context defer requestCancel() throttlerConfig, err := throttler.readThrottlerConfig(requestCtx) if err == nil { - log.Infof("Throttler.retryReadAndApplyThrottlerConfig(): success reading throttler config: %+v", throttlerConfig) + log.Info(fmt.Sprintf("Throttler.retryReadAndApplyThrottlerConfig(): success reading throttler config: %+v", throttlerConfig)) // It's possible that during a retry-sleep, the throttler is closed and opened again, leading // to two (or more) instances of this goroutine. That's not a big problem; it's fine if all // attempt to read the throttler config; but we just want to ensure they don't step on each other @@ -590,11 +590,11 @@ func (throttler *Throttler) retryReadAndApplyThrottlerConfig(ctx context.Context }) return } - log.Errorf("Throttler.retryReadAndApplyThrottlerConfig(): error reading throttler config. Will retry in %v. Err=%+v", retryInterval, err) + log.Error(fmt.Sprintf("Throttler.retryReadAndApplyThrottlerConfig(): error reading throttler config. Will retry in %v. Err=%+v", retryInterval, err)) select { case <-ctx.Done(): // Throttler is not open so no need to keep retrying. - log.Infof("Throttler.retryReadAndApplyThrottlerConfig(): throttler no longer seems to be open, exiting") + log.Info("Throttler.retryReadAndApplyThrottlerConfig(): throttler no longer seems to be open, exiting") return case <-retryTicker.C: } @@ -603,17 +603,17 @@ func (throttler *Throttler) retryReadAndApplyThrottlerConfig(ctx context.Context // Open opens database pool and initializes the schema func (throttler *Throttler) Open() error { - log.Infof("Throttler: started execution of Open. Acquiring initMutex lock") + log.Info("Throttler: started execution of Open. Acquiring initMutex lock") throttler.initMutex.Lock() defer throttler.initMutex.Unlock() isOpen := throttler.isOpen.Swap(true) if isOpen { // already open - log.Infof("Throttler: throttler is already open") + log.Info("Throttler: throttler is already open") return nil } - log.Infof("Throttler: opening") + log.Info("Throttler: opening") var ctx context.Context ctx, throttler.cancelOpenContext = context.WithCancel(context.Background()) throttler.customMetricsQuery.Store("") @@ -629,13 +629,13 @@ func (throttler *Throttler) Open() error { // Close frees resources func (throttler *Throttler) Close() { - log.Infof("Throttler: started execution of Close. Acquiring initMutex lock") + log.Info("Throttler: started execution of Close. Acquiring initMutex lock") throttler.initMutex.Lock() - log.Infof("Throttler: acquired initMutex lock") + log.Info("Throttler: acquired initMutex lock") defer throttler.initMutex.Unlock() isOpen := throttler.isOpen.Swap(false) if !isOpen { - log.Infof("Throttler: throttler is not open") + log.Info("Throttler: throttler is not open") return } throttler.Disable() @@ -644,13 +644,13 @@ func (throttler *Throttler) Close() { // The below " != nil " checks are relevant to unit tests, where perhaps not all // fields are supplied. if throttler.pool != nil { - log.Infof("Throttler: closing pool") + log.Info("Throttler: closing pool") throttler.pool.Close() } if throttler.cancelOpenContext != nil { throttler.cancelOpenContext() } - log.Infof("Throttler: finished execution of Close") + log.Info("Throttler: finished execution of Close") } // requestHeartbeats sends a heartbeat lease request to the heartbeat writer. @@ -685,7 +685,7 @@ func (throttler *Throttler) stimulatePrimaryThrottler(ctx context.Context, tmCli req := &tabletmanagerdatapb.CheckThrottlerRequest{AppName: throttlerapp.ThrottlerStimulatorName.String()} _, err = tmClient.CheckThrottler(ctx, tablet.Tablet, req) if err != nil { - log.Errorf("stimulatePrimaryThrottler: %+v", err) + log.Error(fmt.Sprintf("stimulatePrimaryThrottler: %+v", err)) } return err } @@ -752,7 +752,7 @@ func (throttler *Throttler) Operate(ctx context.Context, wg *sync.WaitGroup) { }() // we do not flush throttler.throttledApps because this is data submitted by the user; the user expects the data to survive a disable+enable - defer log.Infof("Throttler: Operate terminated, tickers stopped") + defer log.Info("Throttler: Operate terminated, tickers stopped") for _, t := range tickers { defer t.Stop() // since we just started the tickers now, speed up the ticks by forcing an immediate tick @@ -782,11 +782,11 @@ func (throttler *Throttler) Operate(ctx context.Context, wg *sync.WaitGroup) { isLeader := throttler.isLeader.Swap(shouldBeLeader) transitionedIntoLeader := false if shouldBeLeader && !isLeader { - log.Infof("Throttler: transition into leadership") + log.Info("Throttler: transition into leadership") transitionedIntoLeader = true } if !shouldBeLeader && isLeader { - log.Infof("Throttler: transition out of leadership") + log.Info("Throttler: transition out of leadership") } if transitionedIntoLeader { @@ -1028,17 +1028,17 @@ func (throttler *Throttler) refreshInventory(ctx context.Context) error { addProbe := func(alias string, tablet *topodatapb.Tablet, scope base.Scope, mysqlSettings *config.MySQLConfigurationSettings, probes base.Probes) bool { for _, ignore := range mysqlSettings.IgnoreHosts { if strings.Contains(alias, ignore) { - log.Infof("Throttler: tablet ignored: %+v", alias) + log.Info(fmt.Sprintf("Throttler: tablet ignored: %+v", alias)) return false } } if scope != base.SelfScope { if alias == "" { - log.Errorf("Throttler: got empty alias for scope: %+v", scope) + log.Error(fmt.Sprintf("Throttler: got empty alias for scope: %+v", scope)) return false } if tablet == nil { - log.Errorf("Throttler: got nil tablet for alias: %v in scope: %+v", alias, scope) + log.Error(fmt.Sprintf("Throttler: got nil tablet for alias: %v in scope: %+v", alias, scope)) return false } } @@ -1115,7 +1115,7 @@ func (throttler *Throttler) refreshInventory(ctx context.Context) error { } go func() { if err := collect(); err != nil { - log.Errorf("refreshInventory: %+v", err) + log.Error(fmt.Sprintf("refreshInventory: %+v", err)) } }() return nil diff --git a/go/vt/vttablet/tabletserver/twopc.go b/go/vt/vttablet/tabletserver/twopc.go index 67cf3f894cf..642c8a285a3 100644 --- a/go/vt/vttablet/tabletserver/twopc.go +++ b/go/vt/vttablet/tabletserver/twopc.go @@ -180,7 +180,7 @@ func (tpc *TwoPC) Open(dbconfigs *dbconfigs.DBConfigs) error { defer conn.Close() tpc.readPool.Open(dbconfigs.AppWithDB(), dbconfigs.DbaWithDB(), dbconfigs.DbaWithDB()) tpc.initializeQueries() - log.Infof("TwoPC: Engine open succeeded") + log.Info("TwoPC: Engine open succeeded") return nil } @@ -272,14 +272,14 @@ func (tpc *TwoPC) ReadAllRedo(ctx context.Context) (prepared, failed []*tx.Prepa } st, err := row[1].ToCastInt64() if err != nil { - log.Errorf("Error parsing state for dtid %s: %v.", dtid, err) + log.Error(fmt.Sprintf("Error parsing state for dtid %s: %v.", dtid, err)) } switch st { case RedoStatePrepared: prepared = append(prepared, curTx) default: if st != RedoStateFailed { - log.Errorf("Unexpected state for dtid %s: %d. Treating it as a failure.", dtid, st) + log.Error(fmt.Sprintf("Unexpected state for dtid %s: %d. Treating it as a failure.", dtid, st)) } failed = append(failed, curTx) } @@ -488,11 +488,11 @@ func (tpc *TwoPC) ReadAllTransactions(ctx context.Context) ([]*tx.DistributedTx, // Just log on error and continue. The state will show up as UNKNOWN // on the display. if err != nil { - log.Errorf("Error parsing state for dtid %s: %v.", dtid, err) + log.Error(fmt.Sprintf("Error parsing state for dtid %s: %v.", dtid, err)) } protostate := querypb.TransactionState(st) if protostate < DTStatePrepare || protostate > DTStateCommit { - log.Errorf("Unexpected state for dtid %s: %v.", dtid, protostate) + log.Error(fmt.Sprintf("Unexpected state for dtid %s: %v.", dtid, protostate)) } curTx = &tx.DistributedTx{ Dtid: dtid, diff --git a/go/vt/vttablet/tabletserver/twopcz.go b/go/vt/vttablet/tabletserver/twopcz.go index 51ed457c679..ff402b94e5e 100644 --- a/go/vt/vttablet/tabletserver/twopcz.go +++ b/go/vt/vttablet/tabletserver/twopcz.go @@ -189,7 +189,7 @@ func twopczHandler(txe *DTExecutor, w http.ResponseWriter, r *http.Request) { w.Write(failedzHeader) for _, row := range failed { if err := failedzRow.Execute(w, row); err != nil { - log.Errorf("queryz: couldn't execute template: %v", err) + log.Error(fmt.Sprintf("queryz: couldn't execute template: %v", err)) } } w.Write(endTable) @@ -198,7 +198,7 @@ func twopczHandler(txe *DTExecutor, w http.ResponseWriter, r *http.Request) { w.Write(preparedzHeader) for _, row := range prepared { if err := preparedzRow.Execute(w, row); err != nil { - log.Errorf("queryz: couldn't execute template: %v", err) + log.Error(fmt.Sprintf("queryz: couldn't execute template: %v", err)) } } w.Write(endTable) @@ -207,7 +207,7 @@ func twopczHandler(txe *DTExecutor, w http.ResponseWriter, r *http.Request) { w.Write(distributedzHeader) for _, row := range distributed { if err := distributedzRow.Execute(w, row); err != nil { - log.Errorf("queryz: couldn't execute template: %v", err) + log.Error(fmt.Sprintf("queryz: couldn't execute template: %v", err)) } } w.Write(endTable) diff --git a/go/vt/vttablet/tabletserver/tx_engine.go b/go/vt/vttablet/tabletserver/tx_engine.go index e6c259164cb..4276e9bf039 100644 --- a/go/vt/vttablet/tabletserver/tx_engine.go +++ b/go/vt/vttablet/tabletserver/tx_engine.go @@ -166,7 +166,7 @@ func (te *TxEngine) transition(state txEngineState) { return } - log.Infof("TxEngine transition: %v", state) + log.Info(fmt.Sprintf("TxEngine transition: %v", state)) // When we are transitioning from read write state, we should close all transactions. if te.state == AcceptingReadAndWrite { @@ -215,7 +215,7 @@ func (te *TxEngine) redoPreparedTransactionsLocked() { if err := te.twoPC.Open(te.env.Config().DB); err != nil { te.env.Stats().InternalErrors.Add("TwopcOpen", 1) - log.Errorf("Could not open TwoPC engine: %v", err) + log.Error(fmt.Sprintf("Could not open TwoPC engine: %v", err)) return } @@ -230,26 +230,26 @@ func (te *TxEngine) redoPreparedTransactionsLocked() { if err := te.prepareFromRedo(); err != nil { te.env.Stats().InternalErrors.Add("TwopcResurrection", 1) - log.Errorf("Could not prepare transactions: %v", err) + log.Error(fmt.Sprintf("Could not prepare transactions: %v", err)) } } // Close will disregard common rules for when to kill transactions // and wait forever for transactions to wrap up func (te *TxEngine) Close() { - log.Infof("TxEngine - started Close. Acquiring stateLock lock") + log.Info("TxEngine - started Close. Acquiring stateLock lock") te.stateLock.Lock() - log.Infof("TxEngine - acquired stateLock") + log.Info("TxEngine - acquired stateLock") defer func() { te.state = NotServing te.stateLock.Unlock() }() if te.state == NotServing { - log.Infof("TxEngine - state is not serving already") + log.Info("TxEngine - state is not serving already") return } - log.Infof("TxEngine - starting shutdown") + log.Info("TxEngine - starting shutdown") te.shutdownLocked() log.Info("TxEngine: closed") } @@ -342,17 +342,17 @@ func (te *TxEngine) txFinish(transactionID int64, reason tx.ReleaseReason, f fun // the transactions are rolled back if they're not resolved // by that time. func (te *TxEngine) shutdownLocked() { - log.Infof("TxEngine - called shutdownLocked") + log.Info("TxEngine - called shutdownLocked") immediate := te.state != AcceptingReadAndWrite // Unlock, wait for all begin requests to complete, and relock. te.state = Transitioning te.stateLock.Unlock() - log.Infof("TxEngine - waiting for begin requests") + log.Info("TxEngine - waiting for begin requests") te.beginRequests.Wait() - log.Infof("TxEngine - acquiring state lock again") + log.Info("TxEngine - acquiring state lock again") te.stateLock.Lock() - log.Infof("TxEngine - state lock acquired again") + log.Info("TxEngine - state lock acquired again") poolEmpty := make(chan bool) rollbackDone := make(chan bool) @@ -393,28 +393,28 @@ func (te *TxEngine) shutdownLocked() { // It is important to note, that we aren't rolling back prepared transactions here. // That is happneing in the same place where we are killing queries. This will block // until either all prepared transactions get resolved or rollbacked. - log.Infof("TxEngine - waiting for empty txPool") + log.Info("TxEngine - waiting for empty txPool") te.txPool.WaitForEmpty() // If the goroutine is still running, signal that it can exit. close(poolEmpty) // Make sure the goroutine has returned. - log.Infof("TxEngine - making sure the goroutine has returned") + log.Info("TxEngine - making sure the goroutine has returned") <-rollbackDone // We stop the transaction watcher so late, because if the user isn't running // with any shutdown grace period, we still want the watcher to run while we are waiting // for resolving transactions. - log.Infof("TxEngine - stop transaction watcher") + log.Info("TxEngine - stop transaction watcher") te.stopTransactionWatcher() // Mark the prepared pool closed. - log.Infof("TxEngine - closing the txPool") + log.Info("TxEngine - closing the txPool") te.txPool.Close() - log.Infof("TxEngine - closing twoPC") + log.Info("TxEngine - closing twoPC") te.twoPC.Close() - log.Infof("TxEngine - closing the prepared pool") + log.Info("TxEngine - closing the prepared pool") te.preparedPool.Close() - log.Infof("TxEngine - finished shutdownLocked") + log.Info("TxEngine - finished shutdownLocked") } // prepareFromRedo replays and prepares the transactions @@ -469,7 +469,7 @@ func (te *TxEngine) prepareFromRedo() error { } te.txPool.AdjustLastID(maxID) - log.Infof("TwoPC: Prepared %d transactions, and registered %d failures.", preparedCounter, failedCounter) + log.Info(fmt.Sprintf("TwoPC: Prepared %d transactions, and registered %d failures.", preparedCounter, failedCounter)) return vterrors.Aggregate(allErrs) } @@ -520,7 +520,7 @@ func (te *TxEngine) prepareTx(ctx context.Context, preparedTx *tx.PreparedTx) (f func (te *TxEngine) checkErrorAndMarkFailed(ctx context.Context, dtid string, receivedErr error, metricName string) (fail bool) { state := RedoStateFailed if isRetryableError(receivedErr) { - log.Infof("retryable error for dtid: %s", dtid) + log.Info("retryable error for dtid: " + dtid) state = RedoStatePrepared } else { fail = true @@ -533,18 +533,18 @@ func (te *TxEngine) checkErrorAndMarkFailed(ctx context.Context, dtid string, re // Non-retryable Error: Along with message, update the state as RedoStateFailed. conn, _, _, err := te.txPool.Begin(ctx, &querypb.ExecuteOptions{}, false, 0, nil) if err != nil { - log.Errorf("markFailed: Begin failed for dtid %s: %v", dtid, err) + log.Error(fmt.Sprintf("markFailed: Begin failed for dtid %s: %v", dtid, err)) return } defer te.txPool.RollbackAndRelease(ctx, conn) if err = te.twoPC.UpdateRedo(ctx, conn, dtid, state, receivedErr.Error()); err != nil { - log.Errorf("markFailed: UpdateRedo failed for dtid %s: %v", dtid, err) + log.Error(fmt.Sprintf("markFailed: UpdateRedo failed for dtid %s: %v", dtid, err)) return } if _, err = te.txPool.Commit(ctx, conn); err != nil { - log.Errorf("markFailed: Commit failed for dtid %s: %v", dtid, err) + log.Error(fmt.Sprintf("markFailed: Commit failed for dtid %s: %v", dtid, err)) } return } @@ -599,7 +599,7 @@ func (te *TxEngine) startTransactionWatcher() { count, err := te.twoPC.CountUnresolvedRedo(ctx, time.Now().Add(-te.abandonAge)) if err != nil { te.env.Stats().InternalErrors.Add("RedoWatcherFail", 1) - log.Errorf("Error reading prepared transactions: %v", err) + log.Error(fmt.Sprintf("Error reading prepared transactions: %v", err)) } te.env.Stats().Unresolved.Set("ResourceManager", count) @@ -607,7 +607,7 @@ func (te *TxEngine) startTransactionWatcher() { count, err = te.twoPC.CountUnresolvedTransaction(ctx, time.Now().Add(-te.abandonAge)) if err != nil { te.env.Stats().InternalErrors.Add("TransactionWatcherFail", 1) - log.Errorf("Error reading unresolved transactions: %v", err) + log.Error(fmt.Sprintf("Error reading unresolved transactions: %v", err)) return } te.env.Stats().Unresolved.Set("MetadataManager", count) diff --git a/go/vt/vttablet/tabletserver/tx_pool.go b/go/vt/vttablet/tabletserver/tx_pool.go index cca44056608..ae6f57316c2 100644 --- a/go/vt/vttablet/tabletserver/tx_pool.go +++ b/go/vt/vttablet/tabletserver/tx_pool.go @@ -18,6 +18,7 @@ package tabletserver import ( "context" + "fmt" "strings" "sync" "time" @@ -129,7 +130,7 @@ func (tp *TxPool) Shutdown(ctx context.Context) { func (tp *TxPool) transactionKiller() { defer tp.env.LogError() for _, conn := range tp.scp.GetElapsedTimeout(vterrors.TxKillerRollback) { - log.Warningf("killing transaction (exceeded timeout: %v): %s", conn.timeout, conn.String(tp.env.Config().SanitizeLogMessages, tp.env.Environment().Parser())) + log.Warn(fmt.Sprintf("killing transaction (exceeded timeout: %v): %s", conn.timeout, conn.String(tp.env.Config().SanitizeLogMessages, tp.env.Environment().Parser()))) switch { case conn.IsTainted(): conn.Close() @@ -202,7 +203,7 @@ func (tp *TxPool) RollbackAndRelease(ctx context.Context, txConn *StatefulConnec defer txConn.Release(tx.TxRollback) rollbackError := tp.Rollback(ctx, txConn) if rollbackError != nil { - log.Errorf("tried to rollback, but failed with: %v", rollbackError.Error()) + log.Error(fmt.Sprintf("tried to rollback, but failed with: %v", rollbackError.Error())) } } diff --git a/go/vt/vttablet/tabletserver/txlimiter/tx_limiter.go b/go/vt/vttablet/tabletserver/txlimiter/tx_limiter.go index cd0f6b7e4df..dc144aba39e 100644 --- a/go/vt/vttablet/tabletserver/txlimiter/tx_limiter.go +++ b/go/vt/vttablet/tabletserver/txlimiter/tx_limiter.go @@ -117,12 +117,12 @@ func (txl *Impl) Get(immediate *querypb.VTGateCallerID, effective *vtrpcpb.Calle } if txl.dryRun { - log.Infof("TxLimiter: DRY RUN: user over limit: %s", key) + log.Info("TxLimiter: DRY RUN: user over limit: " + key) txl.rejectionsDryRun.Add(key, 1) return true } - log.Infof("TxLimiter: Over limit, rejecting transaction request for user: %s", key) + log.Info("TxLimiter: Over limit, rejecting transaction request for user: " + key) txl.rejections.Add(key, 1) return false } diff --git a/go/vt/vttablet/tabletserver/txlogz.go b/go/vt/vttablet/tabletserver/txlogz.go index e702aae0c5f..f018b3cc6af 100644 --- a/go/vt/vttablet/tabletserver/txlogz.go +++ b/go/vt/vttablet/tabletserver/txlogz.go @@ -112,7 +112,7 @@ func txlogzHandler(w http.ResponseWriter, req *http.Request, redactUIQuery bool) io.WriteString(w, ``) io.WriteString(w, err.Error()) io.WriteString(w, "") - log.Error(err) + log.Error(fmt.Sprint(err)) continue } // not all StatefulConnections contain transactions @@ -142,6 +142,6 @@ func writeTransactionData(w http.ResponseWriter, txc *StatefulConnection) { ColorLevel string }{txc, duration, level} if err := txlogzTmpl.Execute(w, tmplData); err != nil { - log.Errorf("txlogz: couldn't execute template: %v", err) + log.Error(fmt.Sprintf("txlogz: couldn't execute template: %v", err)) } } diff --git a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go index 2b7bb686891..be4d1a6e478 100644 --- a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go +++ b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go @@ -18,6 +18,7 @@ package txthrottler import ( "context" + "fmt" "math/rand/v2" "slices" "strings" @@ -86,7 +87,7 @@ const TxThrottlerName = "TransactionThrottler" func fetchKnownCells(ctx context.Context, topoServer *topo.Server, target *querypb.Target) []string { cells, err := topoServer.GetKnownCells(ctx) if err != nil { - log.Errorf("txThrottler: falling back to local cell due to error fetching cells from topology: %+v", err) + log.Error(fmt.Sprintf("txThrottler: falling back to local cell due to error fetching cells from topology: %+v", err)) cells = []string{target.Cell} } return cells @@ -175,13 +176,9 @@ func NewTxThrottler(env tabletenv.Env, topoServer *topo.Server) TxThrottler { config := env.Config() if config.EnableTxThrottler { if len(config.TxThrottlerHealthCheckCells) == 0 { - defer log.Infof("Initialized transaction throttler using tabletTypes: %+v, cellsFromTopo: true, topoRefreshInterval: %s, throttlerConfig: %q", - config.TxThrottlerTabletTypes, config.TxThrottlerTopoRefreshInterval, config.TxThrottlerConfig.Get(), - ) + defer log.Info(fmt.Sprintf("Initialized transaction throttler using tabletTypes: %+v, cellsFromTopo: true, topoRefreshInterval: %s, throttlerConfig: %q", config.TxThrottlerTabletTypes, config.TxThrottlerTopoRefreshInterval, config.TxThrottlerConfig.Get())) } else { - defer log.Infof("Initialized transaction throttler using tabletTypes: %+v, healthCheckCells: %+v, throttlerConfig: %q", - config.TxThrottlerTabletTypes, config.TxThrottlerHealthCheckCells, config.TxThrottlerConfig.Get(), - ) + defer log.Info(fmt.Sprintf("Initialized transaction throttler using tabletTypes: %+v, healthCheckCells: %+v, throttlerConfig: %q", config.TxThrottlerTabletTypes, config.TxThrottlerHealthCheckCells, config.TxThrottlerConfig.Get())) } } @@ -353,7 +350,7 @@ func (ts *txThrottlerStateImpl) healthChecksProcessor(topoServer *topo.Server, t return case <-cellsUpdateTicks: if err := ts.updateHealthCheckCells(topoServer, target); err != nil { - log.Errorf("txThrottler: failed to update cell list: %+v", err) + log.Error(fmt.Sprintf("txThrottler: failed to update cell list: %+v", err)) } case th := <-ts.healthCheckChan: ts.StatsUpdate(th) diff --git a/go/vt/vttablet/tabletserver/vstreamer/copy.go b/go/vt/vttablet/tabletserver/vstreamer/copy.go index daeb4349b90..4397a904b1c 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/copy.go +++ b/go/vt/vttablet/tabletserver/vstreamer/copy.go @@ -36,7 +36,7 @@ import ( func (uvs *uvstreamer) copy(ctx context.Context) error { for len(uvs.tablesToCopy) > 0 { tableName := uvs.tablesToCopy[0] - log.V(2).Infof("Copystate not empty starting catchupAndCopy on table %s", tableName) + log.Debug("Copystate not empty starting catchupAndCopy on table " + tableName) if err := uvs.catchupAndCopy(ctx, tableName); err != nil { uvs.vse.errorCounts.Add("Copy", 1) return err @@ -48,22 +48,22 @@ func (uvs *uvstreamer) copy(ctx context.Context) error { // first does a catchup for tables already fully or partially copied (upto last pk) func (uvs *uvstreamer) catchupAndCopy(ctx context.Context, tableName string) error { - log.Infof("catchupAndCopy for %s", tableName) + log.Info("catchupAndCopy for " + tableName) if !uvs.pos.IsZero() { if err := uvs.catchup(ctx); err != nil { - log.Infof("catchupAndCopy: catchup returned %v", err) + log.Info(fmt.Sprintf("catchupAndCopy: catchup returned %v", err)) uvs.vse.errorCounts.Add("Catchup", 1) return err } } - log.Infof("catchupAndCopy: before copyTable %s", tableName) + log.Info("catchupAndCopy: before copyTable " + tableName) uvs.fields = nil return uvs.copyTable(ctx, tableName) } // catchup on events for tables already fully or partially copied (upto last pk) until replication lag is small func (uvs *uvstreamer) catchup(ctx context.Context) error { - log.Infof("starting catchup ...") + log.Info("starting catchup ...") uvs.setReplicationLagSeconds(math.MaxInt64) ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -79,7 +79,7 @@ func (uvs *uvstreamer) catchup(ctx context.Context) error { uvs.setVs(vs) errch <- vs.Stream() uvs.setVs(nil) - log.Infof("catchup vs.stream returned with vs.pos %s", vs.pos.String()) + log.Info("catchup vs.stream returned with vs.pos " + vs.pos.String()) }() // Wait for catchup. @@ -89,7 +89,7 @@ func (uvs *uvstreamer) catchup(ctx context.Context) error { for { sbm := uvs.getReplicationLagSeconds() if sbm <= seconds { - log.Infof("Canceling context because lag is %d:%d", sbm, seconds) + log.Info(fmt.Sprintf("Canceling context because lag is %d:%d", sbm, seconds)) cancel() // Make sure vplayer returns before returning. <-errch @@ -118,11 +118,11 @@ func (uvs *uvstreamer) sendFieldEvent(ctx context.Context, gtid string, fieldEve Type: binlogdatapb.VEventType_FIELD, FieldEvent: fieldEvent, }} - log.V(2).Infof("Sending field event %v, gtid is %s", fieldEvent, gtid) + log.Debug(fmt.Sprintf("Sending field event %v, gtid is %s", fieldEvent, gtid)) uvs.send(evs) if err := uvs.setPosition(gtid, true); err != nil { - log.Infof("setPosition returned error %v", err) + log.Info(fmt.Sprintf("setPosition returned error %v", err)) return err } return nil @@ -170,7 +170,7 @@ func (uvs *uvstreamer) sendEventsForRows(ctx context.Context, tableName string, }) if err := uvs.send(evs); err != nil { - log.Infof("send returned error %v", err) + log.Info(fmt.Sprintf("send returned error %v", err)) return err } return nil @@ -184,7 +184,7 @@ func getLastPKFromQR(qr *querypb.QueryResult) []sqltypes.Value { var lastPK []sqltypes.Value r := sqltypes.Proto3ToResult(qr) if len(r.Rows) != 1 { - log.Errorf("unexpected lastpk input: %v", qr) + log.Error(fmt.Sprintf("unexpected lastpk input: %v", qr)) return nil } lastPK = r.Rows[0] @@ -213,13 +213,13 @@ func (uvs *uvstreamer) copyTable(ctx context.Context, tableName string) error { lastPK := getLastPKFromQR(uvs.plans[tableName].tablePK.Lastpk) filter := uvs.plans[tableName].rule.Filter - log.Infof("Starting copyTable for %s, Filter: %s, LastPK: %v", tableName, filter, lastPK) + log.Info(fmt.Sprintf("Starting copyTable for %s, Filter: %s, LastPK: %v", tableName, filter, lastPK)) uvs.sendTestEvent("Copy Start " + tableName) err := uvs.vse.StreamRows(ctx, filter, lastPK, func(rows *binlogdatapb.VStreamRowsResponse) error { select { case <-ctx.Done(): - log.Infof("Returning io.EOF in StreamRows") + log.Info("Returning io.EOF in StreamRows") return io.EOF default: } @@ -231,7 +231,7 @@ func (uvs *uvstreamer) copyTable(ctx context.Context, tableName string) error { if !uvs.pos.IsZero() && !uvs.pos.AtLeast(pos) { if err := uvs.fastForward(rows.Gtid); err != nil { uvs.setVs(nil) - log.Infof("fastForward returned error %v", err) + log.Info(fmt.Sprintf("fastForward returned error %v", err)) return err } uvs.setVs(nil) @@ -242,7 +242,7 @@ func (uvs *uvstreamer) copyTable(ctx context.Context, tableName string) error { return err } } else { - log.V(2).Infof("Not starting fastforward pos is %s, uvs.pos is %s, rows.gtid %s", pos, uvs.pos, rows.Gtid) + log.Debug(fmt.Sprintf("Not starting fastforward pos is %s, uvs.pos is %s, rows.gtid %s", pos, uvs.pos, rows.Gtid)) } // Store a copy of the fields and pkfields because the original will be cleared @@ -268,7 +268,7 @@ func (uvs *uvstreamer) copyTable(ctx context.Context, tableName string) error { EnumSetStringValues: true, } if err := uvs.sendFieldEvent(ctx, rows.Gtid, fieldEvent); err != nil { - log.Infof("sendFieldEvent returned error %v", err) + log.Info(fmt.Sprintf("sendFieldEvent returned error %v", err)) return err } // sendFieldEvent() sends a BEGIN event first. @@ -276,7 +276,7 @@ func (uvs *uvstreamer) copyTable(ctx context.Context, tableName string) error { } if len(rows.Rows) == 0 { - log.V(2).Infof("0 rows returned for table %s", tableName) + log.Debug("0 rows returned for table " + tableName) return nil } @@ -296,16 +296,16 @@ func (uvs *uvstreamer) copyTable(ctx context.Context, tableName string) error { Rows: []*querypb.Row{rows.Lastpk.CloneVT()}, }) qrLastPK := sqltypes.ResultToProto3(newLastPK) - log.V(2).Infof("Calling sendEventForRows with gtid %s", rows.Gtid) + log.Debug("Calling sendEventForRows with gtid " + rows.Gtid) if err := uvs.sendEventsForRows(ctx, tableName, rows, qrLastPK); err != nil { - log.Infof("sendEventsForRows returned error %v", err) + log.Info(fmt.Sprintf("sendEventsForRows returned error %v", err)) return err } // sendEventsForRows() sends a COMMIT event last. uvs.inTransaction = false uvs.setCopyState(tableName, qrLastPK) - log.V(2).Infof("NewLastPK: %v", qrLastPK) + log.Debug(fmt.Sprintf("NewLastPK: %v", qrLastPK)) return nil }, nil) if err != nil { @@ -315,12 +315,12 @@ func (uvs *uvstreamer) copyTable(ctx context.Context, tableName string) error { select { case <-ctx.Done(): - log.Infof("Context done: Copy of %v stopped at lastpk: %v", tableName, newLastPK) + log.Info(fmt.Sprintf("Context done: Copy of %v stopped at lastpk: %v", tableName, newLastPK)) return ctx.Err() default: } - log.Infof("Copy of %v finished at lastpk: %v", tableName, newLastPK) + log.Info(fmt.Sprintf("Copy of %v finished at lastpk: %v", tableName, newLastPK)) if err := uvs.copyComplete(tableName); err != nil { return err } @@ -332,7 +332,7 @@ func (uvs *uvstreamer) fastForward(stopPos string) error { defer func() { uvs.vse.vstreamerPhaseTimings.Record("fastforward", time.Now()) }() - log.Infof("starting fastForward from %s upto pos %s", replication.EncodePosition(uvs.pos), stopPos) + log.Info(fmt.Sprintf("starting fastForward from %s upto pos %s", replication.EncodePosition(uvs.pos), stopPos)) uvs.stopPos, _ = replication.DecodePosition(stopPos) vs := newVStreamer(uvs.ctx, uvs.cp, uvs.se, replication.EncodePosition(uvs.pos), "", uvs.filter, uvs.getVSchema(), uvs.throttlerApp, uvs.send2, "fastforward", uvs.vse, nil) uvs.setVs(vs) diff --git a/go/vt/vttablet/tabletserver/vstreamer/engine.go b/go/vt/vttablet/tabletserver/vstreamer/engine.go index 2047e10a504..9579be6fc51 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/engine.go +++ b/go/vt/vttablet/tabletserver/vstreamer/engine.go @@ -289,7 +289,7 @@ func (vse *Engine) StreamRows(ctx context.Context, query string, lastpk []sqltyp // Starting of the watcher has to be delayed till the first call to Stream // because this overhead should be incurred only if someone uses this feature. vse.watcherOnce.Do(vse.setWatch) - log.Infof("Streaming rows for query %s, lastpk: %s", query, lastpk) + log.Info(fmt.Sprintf("Streaming rows for query %s, lastpk: %s", query, lastpk)) // Create stream and add it to the map. rowStreamer, idx, err := func() (*rowStreamer, int, error) { @@ -333,7 +333,7 @@ func (vse *Engine) StreamTables(ctx context.Context, // Starting of the watcher is delayed till the first call to StreamTables // so that this overhead is incurred only if someone uses this feature. vse.watcherOnce.Do(vse.setWatch) - log.Infof("Streaming all tables") + log.Info("Streaming all tables") // Create stream and add it to the map. tableStreamer, idx, err := func() (*tableStreamer, int, error) { @@ -443,7 +443,7 @@ func (vse *Engine) setWatch() { case topo.IsErrType(err, topo.NoNode): v = nil default: - log.Errorf("Error fetching vschema: %v", err) + log.Error(fmt.Sprintf("Error fetching vschema: %v", err)) vse.vschemaErrors.Add(1) return true } @@ -451,7 +451,7 @@ func (vse *Engine) setWatch() { if v != nil { vschema = vindexes.BuildVSchema(v, vse.env.Environment().Parser()) if err != nil { - log.Errorf("Error building vschema: %v", err) + log.Error(fmt.Sprintf("Error building vschema: %v", err)) vse.vschemaErrors.Add(1) return true } @@ -467,7 +467,7 @@ func (vse *Engine) setWatch() { vschema: vschema, } b, _ := json.MarshalIndent(vschema, "", " ") - log.V(2).Infof("Updated vschema: %s", b) + log.Debug(fmt.Sprintf("Updated vschema: %s", b)) for _, s := range vse.streamers { s.SetVSchema(vse.lvschema) } @@ -507,8 +507,7 @@ func (vse *Engine) waitForMySQL(ctx context.Context, db dbconfigs.Connector, tab if hll <= mhll && rpl <= mrls { ready = true } else { - log.Infof("VStream source (%s) is not ready to stream more rows. Max InnoDB history length is %d and it was %d, max replication lag is %d (seconds) and it was %d. Will pause and retry.", - sourceEndpoint, mhll, hll, mrls, rpl) + log.Info(fmt.Sprintf("VStream source (%s) is not ready to stream more rows. Max InnoDB history length is %d and it was %d, max replication lag is %d (seconds) and it was %d. Will pause and retry.", sourceEndpoint, mhll, hll, mrls, rpl)) } return nil } diff --git a/go/vt/vttablet/tabletserver/vstreamer/local_vschema.go b/go/vt/vttablet/tabletserver/vstreamer/local_vschema.go index d3bbb4cce98..4764c34ecc8 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/local_vschema.go +++ b/go/vt/vttablet/tabletserver/vstreamer/local_vschema.go @@ -73,7 +73,7 @@ func (lvs *localVSchema) findTable(tablename string) (*vindexes.BaseTable, error table := ks.Tables[tablename] if table == nil { if schema.IsInternalOperationTableName(tablename) { - log.Infof("found internal table %s, ignoring in local vschema search", tablename) + log.Info(fmt.Sprintf("found internal table %s, ignoring in local vschema search", tablename)) } else { if ks.Error != nil { return nil, fmt.Errorf("table %s not found in keyspace %s (keyspace has error: %v)", tablename, lvs.keyspace, ks.Error) diff --git a/go/vt/vttablet/tabletserver/vstreamer/main_test.go b/go/vt/vttablet/tabletserver/vstreamer/main_test.go index a888e3ef606..9bf20a92c5f 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/main_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/main_test.go @@ -131,10 +131,10 @@ func runCases(t *testing.T, filter *binlogdatapb.Filter, testcases []testcase, p // If position is 'current', we wait for a heartbeat to be // sure the vstreamer has started. if position == "current" { - log.Infof("Starting stream with current position") + log.Info("Starting stream with current position") expectLog(ctx, t, "current pos", ch, [][]string{{`gtid`, `type:OTHER`}}) } - log.Infof("Starting to run test cases") + log.Info("Starting to run test cases") for _, tcase := range testcases { switch input := tcase.input.(type) { case []string: @@ -151,7 +151,7 @@ func runCases(t *testing.T, filter *binlogdatapb.Filter, testcases []testcase, p if evs, ok := <-ch; ok { t.Fatalf("unexpected evs: %v", evs) } - log.Infof("Last line of runCases") + log.Info("Last line of runCases") } func expectLog(ctx context.Context, t *testing.T, input any, ch <-chan []*binlogdatapb.VEvent, output [][]string) { @@ -212,7 +212,7 @@ func expectLog(ctx context.Context, t *testing.T, input any, ch <-chan []*binlog numEventsToMatch := len(evs) if len(wantset) != len(evs) { - log.Warningf("%v: evs\n%v, want\n%v, >> got length %d, wanted length %d", input, evs, wantset, len(evs), len(wantset)) + log.Warn(fmt.Sprintf("%v: evs\n%v, want\n%v, >> got length %d, wanted length %d", input, evs, wantset, len(evs), len(wantset))) if len(wantset) < len(evs) { numEventsToMatch = len(wantset) } @@ -276,7 +276,7 @@ func expectLog(ctx context.Context, t *testing.T, input any, ch <-chan []*binlog evs[i].EventGtid = "" want = env.RemoveAnyDeprecatedDisplayWidths(want) if got := fmt.Sprintf("%v", evs[i]); got != want { - log.Errorf("%v (%d): event:\n%q, want\n%q", input, i, got, want) + log.Error(fmt.Sprintf("%v (%d): event:\n%q, want\n%q", input, i, got, want)) t.Fatalf("%v (%d): event:\n%q, want\n%q", input, i, got, want) } } @@ -347,7 +347,7 @@ func vstream(ctx context.Context, t *testing.T, pos string, tablePKs []*binlogda timer := time.NewTimer(2 * time.Second) defer timer.Stop() - log.Infof("Received events: %v", evs) + log.Info(fmt.Sprintf("Received events: %v", evs)) select { case ch <- evs: case <-ctx.Done(): @@ -411,7 +411,7 @@ func setVSchema(t *testing.T, vschema string) { time.Sleep(10 * time.Millisecond) } if !updated { - log.Infof("vschema did not get updated") + log.Info("vschema did not get updated") t.Error("vschema did not get updated") } } diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go index 845ccb3cd09..a7d34ac361e 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go +++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go @@ -502,11 +502,11 @@ func buildREPlan(env *vtenv.Environment, ti *Table, vschema *localVSchema, filte func buildTablePlan(env *vtenv.Environment, ti *Table, vschema *localVSchema, query string) (*Plan, error) { sel, fromTable, err := analyzeSelect(query, env.Parser()) if err != nil { - log.Errorf("%s", err.Error()) + log.Error(err.Error()) return nil, err } if fromTable.String() != ti.Name { - log.Errorf("unsupported: select expression table %v does not match the table entry name %s", sqlparser.String(fromTable), ti.Name) + log.Error(fmt.Sprintf("unsupported: select expression table %v does not match the table entry name %s", sqlparser.String(fromTable), ti.Name)) return nil, fmt.Errorf("unsupported: select expression table %v does not match the table entry name %s", sqlparser.String(fromTable), ti.Name) } @@ -515,11 +515,11 @@ func buildTablePlan(env *vtenv.Environment, ti *Table, vschema *localVSchema, qu env: env, } if err := plan.analyzeWhere(vschema, sel.Where); err != nil { - log.Errorf("%s", err.Error()) + log.Error(err.Error()) return nil, err } if err := plan.analyzeExprs(vschema, sel.GetColumns()); err != nil { - log.Errorf("%s", err.Error()) + log.Error(err.Error()) return nil, err } @@ -946,7 +946,7 @@ func (plan *Plan) analyzeExpr(vschema *localVSchema, selExpr sqlparser.SelectExp Field: field, }, nil default: - log.Infof("Unsupported expression: %v", inner) + log.Info(fmt.Sprintf("Unsupported expression: %v", inner)) return ColExpr{}, fmt.Errorf("unsupported: %v", sqlparser.String(aliased.Expr)) } } diff --git a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go index 0ed4ff448b8..15f1d3abc51 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go @@ -171,7 +171,7 @@ func (rs *rowStreamer) buildPlan() error { // filtering will work. rs.plan, err = buildTablePlan(rs.se.Environment(), ti, rs.vschema, rs.query) if err != nil { - log.Errorf("Failed to build table plan for %s in row streamer: %v", ti.Name, err) + log.Error(fmt.Sprintf("Failed to build table plan for %s in row streamer: %v", ti.Name, err)) return err } @@ -349,7 +349,7 @@ func (rs *rowStreamer) streamQuery(send func(*binlogdatapb.VStreamRowsResponse) rotatedLog bool err error ) - log.Infof("Streaming rows for query: %s\n", rs.sendQuery) + log.Info(fmt.Sprintf("Streaming rows for query: %s\n", rs.sendQuery)) if rs.mode == RowStreamerModeSingleTable { gtid, rotatedLog, err = rs.conn.streamWithSnapshot(rs.ctx, rs.plan.Table.Name, rs.sendQuery) if err != nil { @@ -415,7 +415,7 @@ func (rs *rowStreamer) streamQuery(send func(*binlogdatapb.VStreamRowsResponse) logger := logutil.NewThrottledLogger(rs.vse.GetTabletInfo(), throttledLoggerInterval) for { if rs.ctx.Err() != nil { - log.Infof("Row stream ended because of ctx.Done") + log.Info("Row stream ended because of ctx.Done") return fmt.Errorf("row stream ended: %v", rs.ctx.Err()) } diff --git a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go index 7c631d86dba..abd38db5b96 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go @@ -678,7 +678,7 @@ func checkStream(t *testing.T, query string, lastpk []sqltypes.Value, wantQuery ch <- errors.New("stream gtid is empty") } if got := engine.rowStreamers[engine.streamIdx-1].sendQuery; got != wantQuery { - log.Infof("Got: %v", got) + log.Info(fmt.Sprintf("Got: %v", got)) ch <- fmt.Errorf("query sent:\n%v, want\n%v", got, wantQuery) } } diff --git a/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go b/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go index 6104eb39a8f..992cd6e9c29 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go +++ b/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go @@ -79,8 +79,7 @@ func (conn *snapshotConn) streamWithSnapshot(ctx context.Context, table, query s if rotatedLog, err = conn.limitOpenBinlogSize(); err != nil { // This is a best effort operation meant to lower overhead and improve performance. // Thus it should not be required, nor cause the operation to fail. - log.Warningf("Failed in attempt to potentially flush binary logs in order to lessen overhead and improve performance of a VStream using query %q: %v", - query, err) + log.Warn(fmt.Sprintf("Failed in attempt to potentially flush binary logs in order to lessen overhead and improve performance of a VStream using query %q: %v", query, err)) } gtid, err = conn.startSnapshot(ctx, table) @@ -103,7 +102,7 @@ func (conn *snapshotConn) startSnapshot(ctx context.Context, table string) (gtid defer func() { _, err := lockConn.ExecuteFetch("unlock tables", 0, false) if err != nil { - log.Warning("Unlock tables (%s) failed: %v", table, err) + log.Warn(fmt.Sprintf("Unlock tables (%s) failed: %v", table, err)) } lockConn.Close() }() @@ -111,7 +110,7 @@ func (conn *snapshotConn) startSnapshot(ctx context.Context, table string) (gtid tableName := sqlparser.String(sqlparser.NewIdentifierCS(table)) if _, err := lockConn.ExecuteFetch(fmt.Sprintf("lock tables %s read", tableName), 1, false); err != nil { - log.Warningf("Error locking table %s to read: %v", tableName, err) + log.Warn(fmt.Sprintf("Error locking table %s to read: %v", tableName, err)) return "", err } mpos, err := lockConn.PrimaryPosition() @@ -199,12 +198,12 @@ func (conn *snapshotConn) startSnapshotAllTables(ctx context.Context) (gtid stri defer func() { _, err := lockConn.ExecuteFetch("unlock tables", 0, false) if err != nil { - log.Warning("Unlock tables failed: %v", err) + log.Warn(fmt.Sprintf("Unlock tables failed: %v", err)) } lockConn.Close() }() - log.Infof("Locking all tables") + log.Info("Locking all tables") if _, err := lockConn.ExecuteFetch("FLUSH TABLES WITH READ LOCK", 1, false); err != nil { attemptExplicitTablesLocks := false if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERAccessDeniedError { @@ -214,7 +213,7 @@ func (conn *snapshotConn) startSnapshotAllTables(ctx context.Context) (gtid stri // efficient, and make a huge query, but still better than nothing. attemptExplicitTablesLocks = true } - log.Infof("Error locking all tables") + log.Info("Error locking all tables") if !attemptExplicitTablesLocks { return "", err } @@ -238,11 +237,11 @@ func (conn *snapshotConn) startSnapshotAllTables(ctx context.Context) (gtid stri if len(lockClauses) > 0 { query := "lock tables " + strings.Join(lockClauses, ",") if _, err := lockConn.ExecuteFetch(query, 1, false); err != nil { - log.Error(vterrors.Wrapf(err, "explicitly locking all %v tables", len(lockClauses))) + log.Error(fmt.Sprint(vterrors.Wrapf(err, "explicitly locking all %v tables", len(lockClauses)))) return "", err } } else { - log.Infof("explicit lock tables: no tables found") + log.Info("explicit lock tables: no tables found") } } mpos, err := lockConn.PrimaryPosition() diff --git a/go/vt/vttablet/tabletserver/vstreamer/tablestreamer.go b/go/vt/vttablet/tabletserver/vstreamer/tablestreamer.go index d4ddba6c431..c59a37c3d16 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/tablestreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/tablestreamer.go @@ -118,8 +118,7 @@ func (ts *tableStreamer) Stream() error { if _, err := conn.ExecuteFetch(fmt.Sprintf("set @@session.net_write_timeout = %v", ts.config.NetWriteTimeout), 1, false); err != nil { return err } - log.Infof("TableStreamer Stream() started with net read_timeout: %v, net write_timeout: %v", - ts.config.NetReadTimeout, ts.config.NetWriteTimeout) + log.Info(fmt.Sprintf("TableStreamer Stream() started with net read_timeout: %v, net write_timeout: %v", ts.config.NetReadTimeout, ts.config.NetWriteTimeout)) rs, err := conn.ExecuteFetch("show full tables", -1, true) if err != nil { @@ -132,21 +131,21 @@ func (ts *tableStreamer) Stream() error { continue } if schema2.IsInternalOperationTableName(tableName) { - log.Infof("Skipping internal table %s", tableName) + log.Info("Skipping internal table " + tableName) continue } ts.tables = append(ts.tables, tableName) } - log.Infof("Found %d tables to stream: %s", len(ts.tables), strings.Join(ts.tables, ", ")) + log.Info(fmt.Sprintf("Found %d tables to stream: %s", len(ts.tables), strings.Join(ts.tables, ", "))) for _, tableName := range ts.tables { - log.Infof("Streaming table %s", tableName) + log.Info("Streaming table " + tableName) if err := ts.streamTable(ts.ctx, tableName); err != nil { - log.Errorf("Streaming table %s failed: %v", tableName, err) + log.Error(fmt.Sprintf("Streaming table %s failed: %v", tableName, err)) return err } - log.Infof("Finished streaming table %s", tableName) + log.Info("Finished streaming table " + tableName) } - log.Infof("Finished streaming %d tables", len(ts.tables)) + log.Info(fmt.Sprintf("Finished streaming %d tables", len(ts.tables))) return nil } diff --git a/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go b/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go index 5cf336994ad..33adcfee35a 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go +++ b/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go @@ -67,12 +67,12 @@ func init() { panic("could not parse MySQL version: " + err.Error()) } MySQLVersion = fmt.Sprintf("%d.%d.%d", mv.Major, mv.Minor, mv.Patch) - log.Infof("MySQL version: %s", MySQLVersion) + log.Info("MySQL version: " + MySQLVersion) CollationEnv = collations.NewEnvironment(MySQLVersion) // utf8mb4_general_ci is the default for MySQL 5.7 and // utf8mb4_0900_ai_ci is the default for MySQL 8.0. DefaultCollationID = CollationEnv.DefaultConnectionCharset() - log.Infof("Default collation ID: %d", DefaultCollationID) + log.Info(fmt.Sprintf("Default collation ID: %d", DefaultCollationID)) } // Env contains all the env vars for a test against a mysql instance. diff --git a/go/vt/vttablet/tabletserver/vstreamer/utils.go b/go/vt/vttablet/tabletserver/vstreamer/utils.go index 9597f80c07c..2c31b33f975 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/utils.go +++ b/go/vt/vttablet/tabletserver/vstreamer/utils.go @@ -17,6 +17,8 @@ limitations under the License. package vstreamer import ( + "fmt" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vterrors" vttablet "vitess.io/vitess/go/vt/vttablet/common" @@ -30,7 +32,7 @@ func GetVReplicationConfig(options *binlogdatapb.VStreamOptions) (*vttablet.VRep } config, err := vttablet.NewVReplicationConfig(options.ConfigOverrides) if err != nil { - log.Errorf("Error parsing VReplicationConfig: %v", err) + log.Error(fmt.Sprintf("Error parsing VReplicationConfig: %v", err)) return nil, vterrors.Wrapf(err, "failed to parse VReplicationConfig") } return config, nil diff --git a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go index b5f5774b343..3ed7ae484e9 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go @@ -116,7 +116,7 @@ func newUVStreamer(ctx context.Context, vse *Engine, cp dbconfigs.Connector, se } err := send(evs) if err != nil { - log.Infof("uvstreamer replicate send() returned with err %v", err) + log.Info(fmt.Sprintf("uvstreamer replicate send() returned with err %v", err)) } return err } @@ -256,7 +256,7 @@ func getQuery(tableName string, filter string) string { } func (uvs *uvstreamer) Cancel() { - log.Infof("uvstreamer context is being cancelled") + log.Info("uvstreamer context is being cancelled") uvs.cancel() } @@ -342,27 +342,27 @@ func (uvs *uvstreamer) send2(evs []*binlogdatapb.VEvent) error { } err := uvs.send(evs2) if err != nil && err != io.EOF { - log.Infof("uvstreamer catchup/fastforward send() returning with send error %v", err) + log.Info(fmt.Sprintf("uvstreamer catchup/fastforward send() returning with send error %v", err)) return err } for _, ev := range evs2 { if ev.Type == binlogdatapb.VEventType_GTID { uvs.pos, _ = replication.DecodePosition(ev.Gtid) if !uvs.stopPos.IsZero() && uvs.pos.AtLeast(uvs.stopPos) { - log.Infof("Reached stop position %v, returning io.EOF", uvs.stopPos) + log.Info(fmt.Sprintf("Reached stop position %v, returning io.EOF", uvs.stopPos)) err = io.EOF } } } if err != nil { - log.Infof("uvstreamer catchup/fastforward returning with EOF error %v", err) + log.Info(fmt.Sprintf("uvstreamer catchup/fastforward returning with EOF error %v", err)) uvs.vse.errorCounts.Add("Send", 1) } return err } func (uvs *uvstreamer) sendEventsForCurrentPos() error { - log.Infof("sendEventsForCurrentPos") + log.Info("sendEventsForCurrentPos") evs := []*binlogdatapb.VEvent{{ Type: binlogdatapb.VEventType_GTID, Gtid: replication.EncodePosition(uvs.pos), @@ -444,7 +444,7 @@ func (uvs *uvstreamer) Stream() error { if len(uvs.plans) > 0 { log.Info("TablePKs is not nil: starting vs.copy()") if err := uvs.copy(uvs.ctx); err != nil { - log.Infof("uvstreamer.Stream() copy returned with err %s", err) + log.Info(fmt.Sprintf("uvstreamer.Stream() copy returned with err %s", err)) uvs.vse.errorCounts.Add("Copy", 1) return err } diff --git a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_flaky_test.go b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_flaky_test.go index eceeb40f9a9..55065c8a659 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_flaky_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_flaky_test.go @@ -165,7 +165,7 @@ func TestVStreamCopyFilterValidations(t *testing.T) { testCases = append(testCases, &TestCase{[]*binlogdatapb.Rule{{Match: "/x.*"}}, nil, []string{""}, "stream needs a position or a table to copy"}) for _, tc := range testCases { - log.Infof("Running %v", tc.rules) + log.Info(fmt.Sprintf("Running %v", tc.rules)) testFilter(tc.rules, tc.tablePKs, tc.expected, tc.expectedError) } } @@ -216,7 +216,7 @@ func TestVStreamCopyCompleteFlow(t *testing.T) { log.Info("Inserting row for fast forward to find, locking t2") conn.ExecuteFetch("lock tables t2 write", 1, false) insertRow(t, "t1", 1, numInitialRows+2) - log.Infof("Position after second insert into t1: %s", primaryPosition(t)) + log.Info("Position after second insert into t1: " + primaryPosition(t)) conn.ExecuteFetch("unlock tables", 1, false) log.Info("Inserted row for fast forward to find, unlocked tables") } @@ -230,7 +230,7 @@ func TestVStreamCopyCompleteFlow(t *testing.T) { conn.ExecuteFetch("lock tables t3 write", 1, false) insertRow(t, "t1", 1, numInitialRows+3) insertRow(t, "t2", 2, numInitialRows+2) - log.Infof("Position after third insert into t1: %s", primaryPosition(t)) + log.Info("Position after third insert into t1: " + primaryPosition(t)) conn.ExecuteFetch("unlock tables", 1, false) log.Info("Inserted rows for fast forward to find, unlocked tables") } @@ -264,14 +264,14 @@ func TestVStreamCopyCompleteFlow(t *testing.T) { var lastRowEventSeen bool callbacks["ROW.*t3.*13390"] = func() { - log.Infof("Saw last row event") + log.Info("Saw last row event") lastRowEventSeen = true } callbacks["COMMIT"] = func() { - log.Infof("Got commit, lastRowSeen is %t", lastRowEventSeen) + log.Info(fmt.Sprintf("Got commit, lastRowSeen is %t", lastRowEventSeen)) if lastRowEventSeen { - log.Infof("Found last row event, canceling context") + log.Info("Found last row event, canceling context") cancel() } } @@ -283,7 +283,7 @@ func TestVStreamCopyCompleteFlow(t *testing.T) { printAllEvents("Timed out") t.Fatal("Timed out waiting for events") case <-ctx.Done(): - log.Infof("Received context.Done, ending test") + log.Info("Received context.Done, ending test") } muAllEvents.Lock() defer muAllEvents.Unlock() @@ -291,7 +291,7 @@ func TestVStreamCopyCompleteFlow(t *testing.T) { printAllEvents(fmt.Sprintf("Received %d events, expected %d", len(allEvents), numExpectedEvents)) t.Fatalf("Received %d events, expected %d", len(allEvents), numExpectedEvents) } else { - log.Infof("Successfully received %d events", numExpectedEvents) + log.Info(fmt.Sprintf("Successfully received %d events", numExpectedEvents)) } validateReceivedEvents(t) validateMetrics(t) @@ -394,7 +394,7 @@ func initTables(t *testing.T, tables []string) { "commit", } env.Mysqld.ExecuteSuperQueryList(ctx, queries) - log.Infof("Position after first insert into t1 and t2: %s", primaryPosition(t)) + log.Info("Position after first insert into t1 and t2: " + primaryPosition(t)) } } } @@ -410,7 +410,7 @@ func initTables(t *testing.T, tables []string) { "commit", } env.Mysqld.ExecuteSuperQueryList(ctx, queries) - log.Infof("Position after insert into t1 and t2 after t2 complete: %s", primaryPosition(t)) + log.Info("Position after insert into t1 and t2 after t2 complete: " + primaryPosition(t)) } positions["afterInitialInsert"] = primaryPosition(t) } @@ -421,7 +421,7 @@ func initialize(t *testing.T) { positions = make(map[string]string) initTables(t, testState.tables) callbacks["gtid.*"+positions["afterInitialInsert"]] = func() { - log.Infof("Callback: afterInitialInsert") + log.Info("Callback: afterInitialInsert") } } @@ -447,9 +447,9 @@ func insertRow(t *testing.T, table string, idx int, id int) { } func printAllEvents(msg string) { - log.Errorf("%s: Received %d events", msg, len(allEvents)) + log.Error(fmt.Sprintf("%s: Received %d events", msg, len(allEvents))) for i, ev := range allEvents { - log.Errorf("%d:\t%s", i, ev) + log.Error(fmt.Sprintf("%d:\t%s", i, ev)) } } diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go index 33171c16227..9a6c59cea4c 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go @@ -192,7 +192,7 @@ func (vs *vstreamer) Stream() error { vs.vse.vstreamerCount.Add(-1) }() vs.vse.vstreamersCreated.Add(1) - log.Infof("Starting Stream() with startPos %s", vs.startPos) + log.Info("Starting Stream() with startPos " + vs.startPos) pos, err := replication.DecodePosition(vs.startPos) if err != nil { vs.vse.errorCounts.Add("StreamRows", 1) @@ -643,7 +643,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent, bufferAndTransmit func(vev return nil, nil } vs.plans[id] = nil - log.Infof("table map changed: id %d for %s has changed to %s", id, plan.Table.Name, tm.Name) + log.Info(fmt.Sprintf("table map changed: id %d for %s has changed to %s", id, plan.Table.Name, tm.Name)) } // The database connector `vs.cp` points to the keyspace's database. @@ -921,7 +921,7 @@ func (vs *vstreamer) buildTableColumns(tm *mysql.TableMap) ([]*querypb.Field, er st, err := vs.se.GetTableForPos(vs.ctx, sqlparser.NewIdentifierCS(tm.Name), replication.EncodePosition(vs.pos)) if err != nil { if vs.filter.FieldEventMode == binlogdatapb.Filter_ERR_ON_MISMATCH { - log.Infof("No schema found for table %s", tm.Name) + log.Info("No schema found for table " + tm.Name) return nil, fmt.Errorf("unknown table %v in schema", tm.Name) } return fields, nil @@ -929,7 +929,7 @@ func (vs *vstreamer) buildTableColumns(tm *mysql.TableMap) ([]*querypb.Field, er if len(st.Fields) < len(tm.Types) { if vs.filter.FieldEventMode == binlogdatapb.Filter_ERR_ON_MISMATCH { - log.Infof("Cannot determine columns for table %s", tm.Name) + log.Info("Cannot determine columns for table " + tm.Name) return nil, fmt.Errorf("cannot determine table columns for %s: event has %v, schema has %v", tm.Name, tm.Types, st.Fields) } return fields, nil @@ -1212,8 +1212,7 @@ func (vs *vstreamer) getValues(plan *streamerPlan, data []byte, } value, l, err := mysqlbinlog.CellValue(data, pos, plan.TableMap.Types[colNum], plan.TableMap.Metadata[colNum], plan.Table.Fields[colNum], partialJSON) if err != nil { - log.Errorf("extractRowAndFilter: %s, table: %s, colNum: %d, fields: %+v, current values: %+v", - err, plan.Table.Name, colNum, plan.Table.Fields, values) + log.Error(fmt.Sprintf("extractRowAndFilter: %s, table: %s, colNum: %d, fields: %+v, current values: %+v", err, plan.Table.Name, colNum, plan.Table.Fields, values)) return nil, nil, false, vterrors.Wrapf(err, "failed to extract row's value for column %s from binlog event", plan.Table.Fields[colNum].Name) } @@ -1362,9 +1361,9 @@ func wrapError(err error, stopPos replication.Position, vse *Engine) error { vse.vstreamersEndedWithErrors.Add(1) vse.errorCounts.Add("StreamEnded", 1) err = fmt.Errorf("stream (at source tablet) error @ (including the GTID we failed to process) %v: %v", stopPos, err) - log.Error(err) + log.Error(fmt.Sprint(err)) return err } - log.Infof("stream (at source tablet) ended @ (including the GTID we failed to process) %v", stopPos) + log.Info(fmt.Sprintf("stream (at source tablet) ended @ (including the GTID we failed to process) %v", stopPos)) return nil } diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go index 9c673699a35..656de4ee767 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go @@ -550,9 +550,9 @@ func TestVStreamCopySimpleFlow(t *testing.T) { } ts.Init() defer ts.Close() - log.Infof("Pos before bulk insert: %s", primaryPosition(t)) + log.Info("Pos before bulk insert: " + primaryPosition(t)) insertSomeRows(t, 10) - log.Infof("Pos after bulk insert: %s", primaryPosition(t)) + log.Info("Pos after bulk insert: " + primaryPosition(t)) ctx := context.Background() qr, err := env.Mysqld.FetchSuperQuery(ctx, "SELECT count(*) as cnt from t1, t2 where t1.id11 = t2.id21") @@ -643,7 +643,7 @@ func TestVStreamCopySimpleFlow(t *testing.T) { } runCases(t, filter, testcases, "vscopy", tablePKs) - log.Infof("Pos at end of test: %s", primaryPosition(t)) + log.Info("Pos at end of test: " + primaryPosition(t)) } func TestVStreamCopyWithDifferentFilters(t *testing.T) { @@ -740,7 +740,7 @@ func TestVStreamCopyWithDifferentFilters(t *testing.T) { allEvents = append(allEvents, ev) } if len(allEvents) == len(expectedEvents) { - log.Infof("Got %d events as expected", len(allEvents)) + log.Info(fmt.Sprintf("Got %d events as expected", len(allEvents))) for i, ev := range allEvents { ev.Timestamp = 0 switch ev.Type { diff --git a/go/vt/vttablet/tmclient/rpc_client_api.go b/go/vt/vttablet/tmclient/rpc_client_api.go index 5a61f5d3d1d..c0440629805 100644 --- a/go/vt/vttablet/tmclient/rpc_client_api.go +++ b/go/vt/vttablet/tmclient/rpc_client_api.go @@ -18,6 +18,8 @@ package tmclient import ( "context" + "fmt" + "os" "time" "github.com/spf13/pflag" @@ -314,7 +316,8 @@ var tabletManagerClientFactories = make(map[string]TabletManagerClientFactory) // TabletManagerClient implementations. Should be called on init(). func RegisterTabletManagerClientFactory(name string, factory TabletManagerClientFactory) { if _, ok := tabletManagerClientFactories[name]; ok { - log.Fatalf("RegisterTabletManagerClient %s already exists", name) + log.Error(fmt.Sprintf("RegisterTabletManagerClient %s already exists", name)) + os.Exit(1) } tabletManagerClientFactories[name] = factory } @@ -324,7 +327,8 @@ func RegisterTabletManagerClientFactory(name string, factory TabletManagerClient func NewTabletManagerClient() TabletManagerClient { f, ok := tabletManagerClientFactories[tabletManagerProtocol] if !ok { - log.Exitf("No TabletManagerProtocol registered with name %s", tabletManagerProtocol) + log.Error("No TabletManagerProtocol registered with name " + tabletManagerProtocol) + os.Exit(1) } return f() diff --git a/go/vt/vttablet/tmclienttest/tmclienttest.go b/go/vt/vttablet/tmclienttest/tmclienttest.go index 0b207569f3f..bc1d30b3d96 100644 --- a/go/vt/vttablet/tmclienttest/tmclienttest.go +++ b/go/vt/vttablet/tmclienttest/tmclienttest.go @@ -17,6 +17,7 @@ limitations under the License. package tmclienttest import ( + "fmt" "os" "github.com/spf13/pflag" @@ -53,13 +54,13 @@ func SetProtocol(name string, protocol string) (reset func()) { case nil: reset = func() { SetProtocol(name, oldVal) } default: - log.Errorf("failed to get string value for flag %q: %v", tmclientProtocolFlagName, err) + log.Error(fmt.Sprintf("failed to get string value for flag %q: %v", tmclientProtocolFlagName, err)) reset = func() {} } if err := pflag.Set(tmclientProtocolFlagName, protocol); err != nil { msg := "failed to set flag %q to %q: %v" - log.Errorf(msg, tmclientProtocolFlagName, protocol, err) + log.Error(fmt.Sprintf(msg, tmclientProtocolFlagName, protocol, err)) reset = func() {} } diff --git a/go/vt/vttest/local_cluster.go b/go/vt/vttest/local_cluster.go index c7f670d0b88..25d6d24b66c 100644 --- a/go/vt/vttest/local_cluster.go +++ b/go/vt/vttest/local_cluster.go @@ -343,14 +343,14 @@ func (db *LocalCluster) Setup() error { } } - log.Infof("LocalCluster environment: %+v", db.Env) + log.Info(fmt.Sprintf("LocalCluster environment: %+v", db.Env)) // Set up topo manager if we are using a remote topo server if db.ExternalTopoImplementation != "" { db.topo = db.Env.TopoManager(db.ExternalTopoImplementation, db.ExternalTopoGlobalServerAddress, db.ExternalTopoGlobalRoot, db.Topology) - log.Infof("Initializing Topo Manager: %+v", db.topo) + log.Info(fmt.Sprintf("Initializing Topo Manager: %+v", db.topo)) if err := db.topo.Setup(); err != nil { - log.Errorf("Failed to set up Topo Manager: %v", err) + log.Error(fmt.Sprintf("Failed to set up Topo Manager: %v", err)) return err } } @@ -363,11 +363,11 @@ func (db *LocalCluster) Setup() error { initializing := !db.PersistentMode || !dirExist(db.mysql.TabletDir()) if initializing { - log.Infof("Initializing MySQL Manager (%T)...", db.mysql) + log.Info(fmt.Sprintf("Initializing MySQL Manager (%T)...", db.mysql)) if err := db.mysql.Setup(); err != nil { - log.Errorf("Mysqlctl failed to start: %s", err) + log.Error(fmt.Sprintf("Mysqlctl failed to start: %s", err)) if err, ok := err.(*exec.ExitError); ok { - log.Errorf("stderr: %s", err.Stderr) + log.Error(fmt.Sprintf("stderr: %s", err.Stderr)) } return err } @@ -376,26 +376,26 @@ func (db *LocalCluster) Setup() error { return err } } else { - log.Infof("Starting MySQL Manager (%T)...", db.mysql) + log.Info(fmt.Sprintf("Starting MySQL Manager (%T)...", db.mysql)) if err := db.mysql.Start(); err != nil { - log.Errorf("Mysqlctl failed to start: %s", err) + log.Error(fmt.Sprintf("Mysqlctl failed to start: %s", err)) if err, ok := err.(*exec.ExitError); ok { - log.Errorf("stderr: %s", err.Stderr) + log.Error(fmt.Sprintf("stderr: %s", err.Stderr)) } return err } } mycfg, _ := json.Marshal(db.mysql.Params("")) - log.Infof("MySQL up: %s", mycfg) + log.Info(fmt.Sprintf("MySQL up: %s", mycfg)) if !db.OnlyMySQL { - log.Infof("Starting vtcombo...") + log.Info("Starting vtcombo...") db.vt, _ = VtcomboProcess(db.Env, &db.Config, db.mysql) if err := db.vt.WaitStart(); err != nil { return err } - log.Infof("vtcombo up: %s", db.vt.Address()) + log.Info("vtcombo up: " + db.vt.Address()) } if initializing { @@ -437,9 +437,9 @@ func (db *LocalCluster) TearDown() error { if err := db.mysql.TearDown(); err != nil { errors = append(errors, fmt.Sprintf("mysql: %s", err)) - log.Errorf("failed to shutdown MySQL: %s", err) + log.Error(fmt.Sprintf("failed to shutdown MySQL: %s", err)) if err, ok := err.(*exec.ExitError); ok { - log.Errorf("stderr: %s", err.Stderr) + log.Error(fmt.Sprintf("stderr: %s", err.Stderr)) } } @@ -582,7 +582,7 @@ func (db *LocalCluster) Execute(sql []string, dbname string) error { } for _, cmd := range sql { - log.Infof("Execute(%s): \"%s\"", dbname, cmd) + log.Info(fmt.Sprintf("Execute(%s): \"%s\"", dbname, cmd)) _, err := conn.ExecuteFetch(cmd, -1, false) if err != nil { return err @@ -603,7 +603,7 @@ func (db *LocalCluster) ExecuteFetch(sql string, dbname string) (*sqltypes.Resul } defer conn.Close() - log.Infof("ExecuteFetch(%s): \"%s\"", dbname, sql) + log.Info(fmt.Sprintf("ExecuteFetch(%s): \"%s\"", dbname, sql)) rs, err := conn.ExecuteFetch(sql, -1, true) return rs, err } @@ -656,7 +656,7 @@ func (db *LocalCluster) applyVschema(keyspace string, migration string) error { args := []string{"ApplyVSchema", "--sql", migration, keyspace} fmt.Printf("Applying vschema %v", args) err := vtctlclient.RunCommandAndWait(context.Background(), server, args, func(e *logutil.Event) { - log.Info(e) + log.Info(fmt.Sprint(e)) }) return err @@ -665,10 +665,10 @@ func (db *LocalCluster) applyVschema(keyspace string, migration string) error { func (db *LocalCluster) reloadSchemaKeyspace(keyspace string) error { server := fmt.Sprintf("localhost:%v", db.vt.PortGrpc) args := []string{"ReloadSchemaKeyspace", "--include_primary=true", keyspace} - log.Infof("Reloading keyspace schema %v", args) + log.Info(fmt.Sprintf("Reloading keyspace schema %v", args)) err := vtctlclient.RunCommandAndWait(context.Background(), server, args, func(e *logutil.Event) { - log.Info(e) + log.Info(fmt.Sprint(e)) }) return err diff --git a/go/vt/vttest/mysqlctl.go b/go/vt/vttest/mysqlctl.go index 6d6f278d998..56a7c164422 100644 --- a/go/vt/vttest/mysqlctl.go +++ b/go/vt/vttest/mysqlctl.go @@ -18,6 +18,7 @@ package vttest import ( "context" + "fmt" "os" "os/exec" "path" @@ -99,7 +100,7 @@ func (ctl *Mysqlctl) Start() error { cmd.Env = append(cmd.Env, os.Environ()...) cmd.Env = append(cmd.Env, ctl.Env...) cmd.Env = append(cmd.Env, "EXTRA_MY_CNF="+myCnf) - log.Infof("Starting MySQL using: %+v", cmd.Env) + log.Info(fmt.Sprintf("Starting MySQL using: %+v", cmd.Env)) _, err := cmd.Output() return err } diff --git a/go/vt/vttest/topoctl.go b/go/vt/vttest/topoctl.go index 2b63900d6d8..41d96e56a0c 100644 --- a/go/vt/vttest/topoctl.go +++ b/go/vt/vttest/topoctl.go @@ -2,6 +2,7 @@ package vttest import ( "context" + "fmt" "time" "vitess.io/vitess/go/vt/log" @@ -32,7 +33,7 @@ func (ctl *Topoctl) Setup() error { return err } - log.Infof("Creating cells if they don't exist in the provided topo server %s %s %s", ctl.TopoImplementation, ctl.TopoGlobalServerAddress, ctl.TopoGlobalRoot) + log.Info(fmt.Sprintf("Creating cells if they don't exist in the provided topo server %s %s %s", ctl.TopoImplementation, ctl.TopoGlobalServerAddress, ctl.TopoGlobalRoot)) // Create cells if it doesn't exist to be idempotent. Should work when we share the same topo server across multiple local clusters. for _, cell := range ctl.Topology.Cells { _, err := topoServer.GetCellInfo(ctx, cell, true) @@ -54,7 +55,7 @@ func (ctl *Topoctl) Setup() error { if err != nil { return err } - log.Infof("Created cell info for %s in the topo server %s %s %s", cell, ctl.TopoImplementation, ctl.TopoGlobalServerAddress, ctl.TopoGlobalRoot) + log.Info(fmt.Sprintf("Created cell info for %s in the topo server %s %s %s", cell, ctl.TopoImplementation, ctl.TopoGlobalServerAddress, ctl.TopoGlobalRoot)) } return nil diff --git a/go/vt/vttest/vtprocess.go b/go/vt/vttest/vtprocess.go index 9bca07fc2b2..402dcc3d0ec 100644 --- a/go/vt/vttest/vtprocess.go +++ b/go/vt/vttest/vtprocess.go @@ -173,7 +173,7 @@ func (vtp *VtProcess) WaitStart() (err error) { vtp.proc.Stdout = os.Stdout } - log.Infof("%v %v", strings.Join(vtp.proc.Args, " ")) + log.Info(strings.Join(vtp.proc.Args, " ")) err = vtp.proc.Start() if err != nil { return diff --git a/go/vt/vttls/crl.go b/go/vt/vttls/crl.go index fde4f0b2b66..041bf18705a 100644 --- a/go/vt/vttls/crl.go +++ b/go/vt/vttls/crl.go @@ -30,7 +30,7 @@ type verifyPeerCertificateFunc func([][]byte, [][]*x509.Certificate) error func certIsRevoked(cert *x509.Certificate, crl *x509.RevocationList) bool { if !time.Now().Before(crl.NextUpdate) { - log.Warningf("The current Certificate Revocation List (CRL) is past expiry date and must be updated. Revoked certificates will still be rejected in this state.") + log.Warn("The current Certificate Revocation List (CRL) is past expiry date and must be updated. Revoked certificates will still be rejected in this state.") } for _, revoked := range crl.RevokedCertificateEntries { diff --git a/go/vt/wrangler/fake_dbclient_test.go b/go/vt/wrangler/fake_dbclient_test.go index 81c56e5beca..00cd36b1031 100644 --- a/go/vt/wrangler/fake_dbclient_test.go +++ b/go/vt/wrangler/fake_dbclient_test.go @@ -51,7 +51,7 @@ type dbResult struct { func (dbrs *dbResults) next(query string) (*sqltypes.Result, error) { if dbrs.exhausted() { - log.Infof(fmt.Sprintf("Unexpected query >%s<", query)) + log.Info(fmt.Sprintf("Unexpected query >%s<", query)) return nil, fmt.Errorf("code executed this query, but the test did not expect it: %s", query) } i := dbrs.index @@ -204,7 +204,7 @@ func (dc *fakeDBClient) executeFetch(query string, maxrows int) (*sqltypes.Resul } } - log.Infof("Missing query: >>>>>>>>>>>>>>>>>>%s<<<<<<<<<<<<<<<", query) + log.Info(fmt.Sprintf("Missing query: >>>>>>>>>>>>>>>>>>%s<<<<<<<<<<<<<<<", query)) return nil, fmt.Errorf("unexpected query: %s", query) } diff --git a/go/vt/wrangler/materializer.go b/go/vt/wrangler/materializer.go index 184cec645b7..0979d7c1c9d 100644 --- a/go/vt/wrangler/materializer.go +++ b/go/vt/wrangler/materializer.go @@ -142,7 +142,7 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta return err } wr.sourceTs = externalTopo - log.Infof("Successfully opened external topo: %+v", externalTopo) + log.Info(fmt.Sprintf("Successfully opened external topo: %+v", externalTopo)) } var ( @@ -208,7 +208,7 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta if len(tables) == 0 { return errors.New("no tables to move") } - log.Infof("Found tables to move: %s", strings.Join(tables, ",")) + log.Info("Found tables to move: " + strings.Join(tables, ",")) if !vschema.Sharded { // Save the original in case we need to restore it for a late failure @@ -289,7 +289,7 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta // routing rules in place. if externalTopo == nil { if noRoutingRules { - log.Warningf("Found --no-routing-rules flag, not creating routing rules for workflow %s.%s", targetKeyspace, workflow) + log.Warn(fmt.Sprintf("Found --no-routing-rules flag, not creating routing rules for workflow %s.%s", targetKeyspace, workflow)) } else { // Save routing rules before vschema. If we save vschema first, and routing rules // fails to save, we may generate duplicate table errors. @@ -407,7 +407,7 @@ func (wr *Wrangler) getKeyspaceTables(ctx context.Context, ks string, ts *topo.S if err != nil { return nil, err } - log.Infof("got table schemas from source primary %v.", primary) + log.Info(fmt.Sprintf("got table schemas from source primary %v.", primary)) var sourceTables []string for _, td := range schema.TableDefinitions { @@ -1196,7 +1196,7 @@ func (mz *materializer) deploySchema(ctx context.Context) error { } mu.Unlock() if err != nil { - log.Errorf("Error getting DDLs of source tables: %s", err.Error()) + log.Error("Error getting DDLs of source tables: " + err.Error()) return err } @@ -1251,12 +1251,12 @@ func (mz *materializer) deploySchema(ctx context.Context) error { // reading the source schema. env := schemadiff.NewEnv(mz.wr.env, mz.wr.env.CollationEnv().DefaultConnectionCharset()) schema, err := schemadiff.NewSchemaFromQueries(env, applyDDLs) - log.Infof("AtomicCopy schema:\n %v", applyDDLs) + log.Info(fmt.Sprintf("AtomicCopy schema:\n %v", applyDDLs)) if err != nil { - log.Error(vterrors.Wrapf(err, "AtomicCopy: failed to normalize schema via schemadiff")) + log.Error(fmt.Sprint(vterrors.Wrapf(err, "AtomicCopy: failed to normalize schema via schemadiff"))) } else { applyDDLs = schema.ToQueries() - log.Infof("AtomicCopy used, and schema was normalized via schemadiff. %v queries normalized", len(applyDDLs)) + log.Info(fmt.Sprintf("AtomicCopy used, and schema was normalized via schemadiff. %v queries normalized", len(applyDDLs))) } } diff --git a/go/vt/wrangler/permissions.go b/go/vt/wrangler/permissions.go index f10475cdc54..ebe29934791 100644 --- a/go/vt/wrangler/permissions.go +++ b/go/vt/wrangler/permissions.go @@ -44,14 +44,14 @@ func (wr *Wrangler) GetPermissions(ctx context.Context, tabletAlias *topodatapb. // diffPermissions is a helper method to asynchronously diff a permissions func (wr *Wrangler) diffPermissions(ctx context.Context, primaryPermissions *tabletmanagerdatapb.Permissions, primaryAlias *topodatapb.TabletAlias, alias *topodatapb.TabletAlias, wg *sync.WaitGroup, er concurrency.ErrorRecorder) { defer wg.Done() - log.Infof("Gathering permissions for %v", topoproto.TabletAliasString(alias)) + log.Info(fmt.Sprintf("Gathering permissions for %v", topoproto.TabletAliasString(alias))) replicaPermissions, err := wr.GetPermissions(ctx, alias) if err != nil { er.RecordError(err) return } - log.Infof("Diffing permissions for %v", topoproto.TabletAliasString(alias)) + log.Info(fmt.Sprintf("Diffing permissions for %v", topoproto.TabletAliasString(alias))) tmutils.DiffPermissions(topoproto.TabletAliasString(primaryAlias), primaryPermissions, topoproto.TabletAliasString(alias), replicaPermissions, er) } @@ -67,7 +67,7 @@ func (wr *Wrangler) ValidatePermissionsShard(ctx context.Context, keyspace, shar if !si.HasPrimary() { return fmt.Errorf("no primary in shard %v/%v", keyspace, shard) } - log.Infof("Gathering permissions for primary %v", topoproto.TabletAliasString(si.PrimaryAlias)) + log.Info(fmt.Sprintf("Gathering permissions for primary %v", topoproto.TabletAliasString(si.PrimaryAlias))) primaryPermissions, err := wr.GetPermissions(ctx, si.PrimaryAlias) if err != nil { return err @@ -124,7 +124,7 @@ func (wr *Wrangler) ValidatePermissionsKeyspace(ctx context.Context, keyspace st return fmt.Errorf("no primary in shard %v/%v", keyspace, shards[0]) } referenceAlias := si.PrimaryAlias - log.Infof("Gathering permissions for reference primary %v", topoproto.TabletAliasString(referenceAlias)) + log.Info(fmt.Sprintf("Gathering permissions for reference primary %v", topoproto.TabletAliasString(referenceAlias))) referencePermissions, err := wr.GetPermissions(ctx, si.PrimaryAlias) if err != nil { return err diff --git a/go/vt/wrangler/reparent.go b/go/vt/wrangler/reparent.go index 334fd8bd68d..c926ab2ffcc 100644 --- a/go/vt/wrangler/reparent.go +++ b/go/vt/wrangler/reparent.go @@ -110,7 +110,7 @@ func (wr *Wrangler) EmergencyReparentShard(ctx context.Context, keyspace, shard func (wr *Wrangler) TabletExternallyReparented(ctx context.Context, newPrimaryAlias *topodatapb.TabletAlias) error { tabletInfo, err := wr.ts.GetTablet(ctx, newPrimaryAlias) if err != nil { - log.Warningf("TabletExternallyReparented: failed to read tablet record for %v: %v", newPrimaryAlias, err) + log.Warn(fmt.Sprintf("TabletExternallyReparented: failed to read tablet record for %v: %v", newPrimaryAlias, err)) return err } @@ -118,19 +118,19 @@ func (wr *Wrangler) TabletExternallyReparented(ctx context.Context, newPrimaryAl tablet := tabletInfo.Tablet si, err := wr.ts.GetShard(ctx, tablet.Keyspace, tablet.Shard) if err != nil { - log.Warningf("TabletExternallyReparented: failed to read global shard record for %v/%v: %v", tablet.Keyspace, tablet.Shard, err) + log.Warn(fmt.Sprintf("TabletExternallyReparented: failed to read global shard record for %v/%v: %v", tablet.Keyspace, tablet.Shard, err)) return err } // We update the tablet only if it is not currently primary if tablet.Type != topodatapb.TabletType_PRIMARY { - log.Infof("TabletExternallyReparented: executing tablet type change to PRIMARY") + log.Info("TabletExternallyReparented: executing tablet type change to PRIMARY") durabilityName, err := wr.ts.GetKeyspaceDurability(ctx, tablet.Keyspace) if err != nil { return err } - log.Infof("Getting a new durability policy for %v", durabilityName) + log.Info(fmt.Sprintf("Getting a new durability policy for %v", durabilityName)) durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return err @@ -153,7 +153,7 @@ func (wr *Wrangler) TabletExternallyReparented(ctx context.Context, newPrimaryAl event.DispatchUpdate(ev, "starting external reparent") if err := wr.tmc.ChangeType(ctx, tablet, topodatapb.TabletType_PRIMARY, policy.SemiSyncAckers(durability, tablet) > 0); err != nil { - log.Warningf("Error calling ChangeType on new primary %v: %v", topoproto.TabletAliasString(newPrimaryAlias), err) + log.Warn(fmt.Sprintf("Error calling ChangeType on new primary %v: %v", topoproto.TabletAliasString(newPrimaryAlias), err)) return err } event.DispatchUpdate(ev, "finished") diff --git a/go/vt/wrangler/resharder.go b/go/vt/wrangler/resharder.go index ff3415f79f5..096e46bd0b9 100644 --- a/go/vt/wrangler/resharder.go +++ b/go/vt/wrangler/resharder.go @@ -74,7 +74,7 @@ func (wr *Wrangler) Reshard(ctx context.Context, keyspace, workflow string, sour } if err := wr.ts.ValidateSrvKeyspace(ctx, keyspace, cell); err != nil { err2 := vterrors.Wrapf(err, "SrvKeyspace for keyspace %s is corrupt in cell %s", keyspace, cell) - log.Errorf("%w", err2) + log.Error(err2.Error()) return err2 } diff --git a/go/vt/wrangler/schema.go b/go/vt/wrangler/schema.go index ae24106c97f..46ec611f05f 100644 --- a/go/vt/wrangler/schema.go +++ b/go/vt/wrangler/schema.go @@ -42,7 +42,7 @@ import ( // helper method to asynchronously diff a schema func (wr *Wrangler) diffSchema(ctx context.Context, primarySchema *tabletmanagerdatapb.SchemaDefinition, primaryTabletAlias, alias *topodatapb.TabletAlias, excludeTables []string, includeViews bool, wg *sync.WaitGroup, er concurrency.ErrorRecorder) { defer wg.Done() - log.Infof("Gathering schema for %v", topoproto.TabletAliasString(alias)) + log.Info(fmt.Sprintf("Gathering schema for %v", topoproto.TabletAliasString(alias))) req := &tabletmanagerdatapb.GetSchemaRequest{ExcludeTables: excludeTables, IncludeViews: includeViews} replicaSchema, err := schematools.GetSchema(ctx, wr.ts, wr.tmc, alias, req) if err != nil { @@ -50,7 +50,7 @@ func (wr *Wrangler) diffSchema(ctx context.Context, primarySchema *tabletmanager return } - log.Infof("Diffing schema for %v", topoproto.TabletAliasString(alias)) + log.Info(fmt.Sprintf("Diffing schema for %v", topoproto.TabletAliasString(alias))) tmutils.DiffSchema(topoproto.TabletAliasString(primaryTabletAlias), primarySchema, topoproto.TabletAliasString(alias), replicaSchema, er) } @@ -65,7 +65,7 @@ func (wr *Wrangler) ValidateSchemaShard(ctx context.Context, keyspace, shard str if !si.HasPrimary() { return fmt.Errorf("no primary in shard %v/%v", keyspace, shard) } - log.Infof("Gathering schema for primary %v", topoproto.TabletAliasString(si.PrimaryAlias)) + log.Info(fmt.Sprintf("Gathering schema for primary %v", topoproto.TabletAliasString(si.PrimaryAlias))) req := &tabletmanagerdatapb.GetSchemaRequest{ExcludeTables: excludeTables, IncludeViews: includeViews} primarySchema, err := schematools.GetSchema(ctx, wr.ts, wr.tmc, si.PrimaryAlias, req) if err != nil { diff --git a/go/vt/wrangler/traffic_switcher.go b/go/vt/wrangler/traffic_switcher.go index 4fd5b81d576..83681239144 100644 --- a/go/vt/wrangler/traffic_switcher.go +++ b/go/vt/wrangler/traffic_switcher.go @@ -350,7 +350,7 @@ func (wr *Wrangler) SwitchReads(ctx context.Context, targetKeyspace, workflowNam errorMsg := fmt.Sprintf("workflow %s not found in keyspace %s", workflowName, targetKeyspace) return handleError("failed to get the current state of the workflow", errors.New(errorMsg)) } - log.Infof("Switching reads: %s.%s tt %+v, cells %+v, workflow state: %+v", targetKeyspace, workflowName, servedTypes, cells, ws) + log.Info(fmt.Sprintf("Switching reads: %s.%s tt %+v, cells %+v, workflow state: %+v", targetKeyspace, workflowName, servedTypes, cells, ws)) var switchReplicas, switchRdonly bool for _, servedType := range servedTypes { if servedType != topodatapb.TabletType_REPLICA && servedType != topodatapb.TabletType_RDONLY { @@ -393,7 +393,7 @@ func (wr *Wrangler) SwitchReads(ctx context.Context, targetKeyspace, workflowNam return handleError(fmt.Sprintf("failed to read journal in the %s keyspace", ts.SourceKeyspaceName()), err) } if journalsExist { - log.Infof("Found a previous journal entry for %d", ts.id) + log.Info(fmt.Sprintf("Found a previous journal entry for %d", ts.id)) } var sw iswitcher if dryRun { @@ -469,7 +469,7 @@ func (wr *Wrangler) areTabletsAvailableToStreamFrom(ctx context.Context, ts *tra wg.Wait() if allErrors.HasErrors() { - log.Errorf("%s", allErrors.Error()) + log.Error(fmt.Sprintf("%s", allErrors.Error())) return allErrors.Error() } return nil @@ -710,7 +710,7 @@ func (wr *Wrangler) DropTargets(ctx context.Context, targetKeyspace, workflow st if !keepData { switch ts.MigrationType() { case binlogdatapb.MigrationType_TABLES: - log.Infof("Deleting target tables") + log.Info("Deleting target tables") if err := sw.removeTargetTables(ctx); err != nil { return nil, err } @@ -721,7 +721,7 @@ func (wr *Wrangler) DropTargets(ctx context.Context, targetKeyspace, workflow st return nil, err } case binlogdatapb.MigrationType_SHARDS: - log.Infof("Removing target shards") + log.Info("Removing target shards") if err := sw.dropTargetShards(ctx); err != nil { return nil, err } @@ -788,7 +788,7 @@ func (wr *Wrangler) finalizeMigrateWorkflow(ctx context.Context, targetKeyspace, return nil, err } } - log.Infof("cancel is %t, keepData %t", cancel, keepData) + log.Info(fmt.Sprintf("cancel is %t, keepData %t", cancel, keepData)) if cancel && !keepData { if err := sw.removeTargetTables(ctx); err != nil { return nil, err @@ -837,7 +837,7 @@ func (wr *Wrangler) DropSources(ctx context.Context, targetKeyspace, workflowNam if !keepData { switch ts.MigrationType() { case binlogdatapb.MigrationType_TABLES: - log.Infof("Deleting tables") + log.Info("Deleting tables") if err := sw.removeSourceTables(ctx, removalType); err != nil { return nil, err } @@ -849,7 +849,7 @@ func (wr *Wrangler) DropSources(ctx context.Context, targetKeyspace, workflowNam } case binlogdatapb.MigrationType_SHARDS: - log.Infof("Removing shards") + log.Info("Removing shards") if err := sw.dropSourceShards(ctx); err != nil { return nil, err } @@ -892,8 +892,7 @@ func (wr *Wrangler) getShardSubset(ctx context.Context, keyspace string, shardSu return nil, fmt.Errorf("shard %s not found in keyspace %s", shard, keyspace) } } - log.Infof("Selecting subset of shards in keyspace %s: %d from %d :: %+v", - keyspace, len(shardSubset), len(allShards), shardSubset) + log.Info(fmt.Sprintf("Selecting subset of shards in keyspace %s: %d from %d :: %+v", keyspace, len(shardSubset), len(allShards), shardSubset)) return shardSubset, nil } @@ -904,7 +903,7 @@ func (wr *Wrangler) buildTrafficSwitcher(ctx context.Context, targetKeyspace, wo } tgtInfo, err := workflow.LegacyBuildTargets(ctx, wr.ts, wr.tmc, targetKeyspace, workflowName, shardSubset) if err != nil { - log.Infof("Error building targets: %s", err) + log.Info(fmt.Sprintf("Error building targets: %s", err)) return nil, err } targets, frozen, optCells, optTabletTypes := tgtInfo.Targets, tgtInfo.Frozen, tgtInfo.OptCells, tgtInfo.OptTabletTypes @@ -923,7 +922,7 @@ func (wr *Wrangler) buildTrafficSwitcher(ctx context.Context, targetKeyspace, wo workflowType: tgtInfo.WorkflowType, workflowSubType: tgtInfo.WorkflowSubType, } - log.Infof("Migration ID for workflow %s: %d", workflowName, ts.id) + log.Info(fmt.Sprintf("Migration ID for workflow %s: %d", workflowName, ts.id)) sourceTopo := wr.ts // Build the sources. @@ -1010,7 +1009,7 @@ func (wr *Wrangler) buildTrafficSwitcher(ctx context.Context, targetKeyspace, wo return nil, err } if ts.isPartialMigration { - log.Infof("Migration is partial, for shards %+v", sourceShards) + log.Info(fmt.Sprintf("Migration is partial, for shards %+v", sourceShards)) } return ts, nil } @@ -1119,7 +1118,7 @@ func (ts *trafficSwitcher) validate(ctx context.Context) error { } func (ts *trafficSwitcher) switchTableReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction workflow.TrafficSwitchDirection) error { - log.Infof("switchTableReads: servedTypes: %+v, direction %t", servedTypes, direction) + log.Info(fmt.Sprintf("switchTableReads: servedTypes: %+v, direction %v", servedTypes, direction)) rules, err := topotools.GetRoutingRules(ctx, ts.TopoServer()) if err != nil { return err @@ -1133,13 +1132,13 @@ func (ts *trafficSwitcher) switchTableReads(ctx context.Context, cells []string, tt := strings.ToLower(servedType.String()) for _, table := range ts.Tables() { if direction == workflow.DirectionForward { - log.Infof("Route direction forward") + log.Info("Route direction forward") toTarget := []string{ts.TargetKeyspaceName() + "." + table} rules[table+"@"+tt] = toTarget rules[ts.TargetKeyspaceName()+"."+table+"@"+tt] = toTarget rules[ts.SourceKeyspaceName()+"."+table+"@"+tt] = toTarget } else { - log.Infof("Route direction backwards") + log.Info("Route direction backwards") toSource := []string{ts.SourceKeyspaceName() + "." + table} rules[table+"@"+tt] = toSource rules[ts.TargetKeyspaceName()+"."+table+"@"+tt] = toSource @@ -1163,7 +1162,7 @@ func (ts *trafficSwitcher) switchShardReads(ctx context.Context, cells []string, if err := ts.TopoServer().ValidateSrvKeyspace(ctx, ts.TargetKeyspaceName(), strings.Join(cells, ",")); err != nil { err2 := vterrors.Wrapf(err, "Before switching shard reads, found SrvKeyspace for %s is corrupt in cell %s", ts.TargetKeyspaceName(), strings.Join(cells, ",")) - log.Errorf("%w", err2) + log.Error(err2.Error()) return err2 } for _, servedType := range servedTypes { @@ -1181,7 +1180,7 @@ func (ts *trafficSwitcher) switchShardReads(ctx context.Context, cells []string, if err := ts.TopoServer().ValidateSrvKeyspace(ctx, ts.TargetKeyspaceName(), strings.Join(cells, ",")); err != nil { err2 := vterrors.Wrapf(err, "After switching shard reads, found SrvKeyspace for %s is corrupt in cell %s", ts.TargetKeyspaceName(), strings.Join(cells, ",")) - log.Errorf("%w", err2) + log.Error(err2.Error()) return err2 } return nil @@ -1222,7 +1221,7 @@ func (ts *trafficSwitcher) stopSourceWrites(ctx context.Context) error { err = ts.changeShardsAccess(ctx, ts.SourceKeyspaceName(), ts.SourceShards(), disallowWrites) } if err != nil { - log.Warningf("Error: %s", err) + log.Warn(fmt.Sprintf("Error: %s", err)) return err } return ts.ForAllSources(func(source *workflow.MigrationSource) error { @@ -1231,7 +1230,7 @@ func (ts *trafficSwitcher) stopSourceWrites(ctx context.Context) error { ts.wr.Logger().Infof("Stopped Source Writes. Position for source %v:%v: %v", ts.SourceKeyspaceName(), source.GetShard().ShardName(), source.Position) if err != nil { - log.Warningf("Error: %s", err) + log.Warn(fmt.Sprintf("Error: %s", err)) } return err }) @@ -1254,7 +1253,7 @@ func (ts *trafficSwitcher) changeTableSourceWrites(ctx context.Context, access a return err }) if err != nil { - log.Warningf("Error in changeTableSourceWrites: %s", err) + log.Warn(fmt.Sprintf("Error in changeTableSourceWrites: %s", err)) return err } // Note that the denied tables, which are being updated in this method, are not part of the SrvVSchema in the topo. @@ -1309,12 +1308,11 @@ func (ts *trafficSwitcher) waitForCatchup(ctx context.Context, filteredReplicati if err := ts.TabletManagerClient().VReplicationWaitForPos(ctx, target.GetPrimary().Tablet, uid, source.Position); err != nil { return err } - log.Infof("After catchup: target keyspace:shard: %v:%v, source position %v, uid %d", - ts.TargetKeyspaceName(), target.GetShard().ShardName(), source.Position, uid) + log.Info(fmt.Sprintf("After catchup: target keyspace:shard: %v:%v, source position %v, uid %d", ts.TargetKeyspaceName(), target.GetShard().ShardName(), source.Position, uid)) ts.Logger().Infof("After catchup: position for keyspace:shard: %v:%v reached, uid %d", ts.TargetKeyspaceName(), target.GetShard().ShardName(), uid) if _, err := ts.TabletManagerClient().VReplicationExec(ctx, target.GetPrimary().Tablet, binlogplayer.StopVReplication(uid, "stopped for cutover")); err != nil { - log.Infof("error marking stopped for cutover on %s, uid %d", target.GetPrimary().AliasString(), uid) + log.Info(fmt.Sprintf("error marking stopped for cutover on %s, uid %d", target.GetPrimary().AliasString(), uid)) return err } return nil @@ -1435,8 +1433,7 @@ func (ts *trafficSwitcher) createReverseVReplication(ctx context.Context) error Filter: filter, }) } - log.Infof("Creating reverse workflow vreplication stream on tablet %s: workflow %s, startPos %s for target %s:%s, uid %d", - source.GetPrimary().Alias, ts.ReverseWorkflowName(), target.Position, ts.TargetKeyspaceName(), target.GetShard().ShardName(), uid) + log.Info(fmt.Sprintf("Creating reverse workflow vreplication stream on tablet %s: workflow %s, startPos %s for target %s:%s, uid %d", source.GetPrimary().Alias, ts.ReverseWorkflowName(), target.Position, ts.TargetKeyspaceName(), target.GetShard().ShardName(), uid)) _, err := ts.VReplicationExec(ctx, source.GetPrimary().Alias, binlogplayer.CreateVReplicationState(ts.ReverseWorkflowName(), reverseBls, target.Position, binlogdatapb.VReplicationWorkflowState_Stopped, source.GetPrimary().DbName(), ts.workflowType, ts.workflowSubType)) @@ -1447,7 +1444,7 @@ func (ts *trafficSwitcher) createReverseVReplication(ctx context.Context) error // if user has defined the cell/tablet_types parameters in the forward workflow, update the reverse workflow as well updateQuery := ts.getReverseVReplicationUpdateQuery(target.GetPrimary().Alias.Cell, source.GetPrimary().Alias.Cell, source.GetPrimary().DbName()) if updateQuery != "" { - log.Infof("Updating vreplication stream entry on %s with: %s", source.GetPrimary().Alias, updateQuery) + log.Info(fmt.Sprintf("Updating vreplication stream entry on %s with: %s", source.GetPrimary().Alias, updateQuery)) _, err = ts.VReplicationExec(ctx, source.GetPrimary().Alias, updateQuery) return err } @@ -1485,7 +1482,7 @@ func (ts *trafficSwitcher) deleteReverseVReplication(ctx context.Context) error } func (ts *trafficSwitcher) createJournals(ctx context.Context, sourceWorkflows []string) error { - log.Infof("In createJournals for source workflows %+v", sourceWorkflows) + log.Info(fmt.Sprintf("In createJournals for source workflows %+v", sourceWorkflows)) return ts.ForAllSources(func(source *workflow.MigrationSource) error { if source.Journaled { return nil @@ -1521,7 +1518,7 @@ func (ts *trafficSwitcher) createJournals(ctx context.Context, sourceWorkflows [ Shard: shard, }) } - log.Infof("Creating journal %v", journal) + log.Info(fmt.Sprintf("Creating journal %v", journal)) ts.Logger().Infof("Creating journal: %v", journal) statement := fmt.Sprintf("insert into _vt.resharding_journal "+ "(id, db_name, val) "+ @@ -1601,7 +1598,7 @@ func (ts *trafficSwitcher) changeWriteRoute(ctx context.Context) error { func (ts *trafficSwitcher) changeShardRouting(ctx context.Context) error { if err := ts.TopoServer().ValidateSrvKeyspace(ctx, ts.TargetKeyspaceName(), ""); err != nil { err2 := vterrors.Wrapf(err, "Before changing shard routes, found SrvKeyspace for %s is corrupt", ts.TargetKeyspaceName()) - log.Errorf("%w", err2) + log.Error(err2.Error()) return err2 } err := ts.ForAllSources(func(source *workflow.MigrationSource) error { @@ -1630,7 +1627,7 @@ func (ts *trafficSwitcher) changeShardRouting(ctx context.Context) error { } if err := ts.TopoServer().ValidateSrvKeyspace(ctx, ts.TargetKeyspaceName(), ""); err != nil { err2 := vterrors.Wrapf(err, "After changing shard routes, found SrvKeyspace for %s is corrupt", ts.TargetKeyspaceName()) - log.Errorf("%w", err2) + log.Error(err2.Error()) return err2 } return nil @@ -1891,7 +1888,7 @@ func (ts *trafficSwitcher) dropSourceReverseVReplicationStreams(ctx context.Cont } func (ts *trafficSwitcher) removeTargetTables(ctx context.Context) error { - log.Infof("removeTargetTables") + log.Info("removeTargetTables") err := ts.ForAllTargets(func(target *workflow.MigrationTarget) error { for _, tableName := range ts.Tables() { primaryDbName, err := sqlescape.EnsureEscaped(target.GetPrimary().DbName()) diff --git a/go/vt/wrangler/traffic_switcher_env_test.go b/go/vt/wrangler/traffic_switcher_env_test.go index d60d95ed06f..510c974a52a 100644 --- a/go/vt/wrangler/traffic_switcher_env_test.go +++ b/go/vt/wrangler/traffic_switcher_env_test.go @@ -517,8 +517,7 @@ func newTestTablePartialMigrater(ctx context.Context, t *testing.T, shards, shar } vreplIDsJoined := strings.Join(vreplIDs, ", ") tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, vreplIDsJoined, vreplIDsJoined), noResult) - log.Infof("Adding streamInfoKs2 invariant for shard %s, client %s,rows %q", - shard, tme.dbTargetClients[i].name, streamExtInfoRows) + log.Info(fmt.Sprintf("Adding streamInfoKs2 invariant for shard %s, client %s,rows %q", shard, tme.dbTargetClients[i].name, streamExtInfoRows)) tme.dbTargetClients[i].addInvariant(streamInfoKs2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys", "int64|varchar|varchar|varchar|varchar|int64|int64|int64"), @@ -776,7 +775,7 @@ func (tme *testMigraterEnv) createDBClients(ctx context.Context, t *testing.T) { primary.TM.VREngine.Open(ctx) } for _, primary := range tme.targetPrimaries { - log.Infof("Adding as targetPrimary %s", primary.Tablet.Alias) + log.Info(fmt.Sprintf("Adding as targetPrimary %s", primary.Tablet.Alias)) dbclient := newFakeDBClient(primary.Tablet.Alias.String()) tme.dbTargetClients = append(tme.dbTargetClients, dbclient) dbClientFactory := func() binlogplayer.DBClient { return dbclient } @@ -785,7 +784,7 @@ func (tme *testMigraterEnv) createDBClients(ctx context.Context, t *testing.T) { primary.TM.VREngine.Open(ctx) } for _, primary := range tme.additionalPrimaries { - log.Infof("Adding as additionalPrimary %s", primary.Tablet.Alias) + log.Info(fmt.Sprintf("Adding as additionalPrimary %s", primary.Tablet.Alias)) dbclient := newFakeDBClient(primary.Tablet.Alias.String()) tme.dbAdditionalClients = append(tme.dbTargetClients, dbclient) } diff --git a/go/vt/wrangler/vdiff.go b/go/vt/wrangler/vdiff.go index 922317e0fc8..3ea932f8d69 100644 --- a/go/vt/wrangler/vdiff.go +++ b/go/vt/wrangler/vdiff.go @@ -177,8 +177,7 @@ func (wr *Wrangler) VDiff(ctx context.Context, targetKeyspace, workflowName, sou filteredReplicationWaitTime time.Duration, format string, maxRows int64, tables string, debug, onlyPks bool, maxExtraRowsToCompare int, ) (map[string]*DiffReport, error) { - log.Infof("Starting VDiff for %s.%s, sourceCell %s, targetCell %s, tabletTypes %s, timeout %s", - targetKeyspace, workflowName, sourceCell, targetCell, tabletTypesStr, filteredReplicationWaitTime.String()) + log.Info(fmt.Sprintf("Starting VDiff for %s.%s, sourceCell %s, targetCell %s, tabletTypes %s, timeout %s", targetKeyspace, workflowName, sourceCell, targetCell, tabletTypesStr, filteredReplicationWaitTime.String())) // Assign defaults to sourceCell and targetCell if not specified. if sourceCell == "" && targetCell == "" { cells, err := wr.ts.GetCellInfoNames(ctx) @@ -265,7 +264,7 @@ func (wr *Wrangler) VDiff(ctx context.Context, targetKeyspace, workflowName, sou defer func() { // We use a new context as we want to reset the state even // when the parent context has timed out or been canceled. - log.Infof("Restarting the %q VReplication workflow on target tablets in keyspace %q", df.workflow, df.targetKeyspace) + log.Info(fmt.Sprintf("Restarting the %q VReplication workflow on target tablets in keyspace %q", df.workflow, df.targetKeyspace)) restartCtx, restartCancel := context.WithTimeout(context.Background(), DefaultActionTimeout) defer restartCancel() if err := df.restartTargets(restartCtx); err != nil { @@ -418,12 +417,12 @@ func (wr *Wrangler) VDiff(ctx context.Context, targetKeyspace, workflowName, sou } func (df *vdiff) diffTable(ctx context.Context, wr *Wrangler, table string, td *tableDiffer, filteredReplicationWaitTime time.Duration) error { - log.Infof("Starting vdiff for table %s", table) + log.Info("Starting vdiff for table " + table) - log.Infof("Locking target keyspace %s", df.targetKeyspace) + log.Info("Locking target keyspace " + df.targetKeyspace) ctx, unlock, lockErr := wr.ts.LockKeyspace(ctx, df.targetKeyspace, "vdiff") if lockErr != nil { - log.Errorf("LockKeyspace failed: %v", lockErr) + log.Error(fmt.Sprintf("LockKeyspace failed: %v", lockErr)) wr.Logger().Errorf("LockKeyspace %s failed: %v", df.targetKeyspace) return lockErr } @@ -432,7 +431,7 @@ func (df *vdiff) diffTable(ctx context.Context, wr *Wrangler, table string, td * defer func() { unlock(&err) if err != nil { - log.Errorf("UnlockKeyspace %s failed: %v", df.targetKeyspace, lockErr) + log.Error(fmt.Sprintf("UnlockKeyspace %s failed: %v", df.targetKeyspace, lockErr)) } }() @@ -485,7 +484,7 @@ func (df *vdiff) buildVDiffPlan(filter *binlogdatapb.Filter, schm *tabletmanager } } if len(tablesToInclude) > 0 && len(tablesToInclude) != len(df.differs) { - log.Errorf("one or more tables provided are not present in the workflow: %v, %+v", tablesToInclude, df.differs) + log.Error(fmt.Sprintf("one or more tables provided are not present in the workflow: %v, %+v", tablesToInclude, df.differs)) return fmt.Errorf("one or more tables provided are not present in the workflow: %v, %+v", tablesToInclude, df.differs) } return nil @@ -510,7 +509,7 @@ func findPKs(env *vtenv.Environment, table *tabletmanagerdatapb.TableDefinition, case *sqlparser.FuncExpr: // eg. weight_string() // no-op default: - log.Warningf("Not considering column %v for PK, type %v not handled", selExpr, ct) + log.Warn(fmt.Sprintf("Not considering column %v for PK, type %v not handled", selExpr, ct)) } if strings.EqualFold(pk, colname) { td.compareCols[i].isPK = true @@ -605,7 +604,7 @@ func (df *vdiff) adjustForSourceTimeZone(targetSelectExprs []sqlparser.SelectExp if df.sourceTimeZone == "" { return targetSelectExprs } - log.Infof("source time zone specified: %s", df.sourceTimeZone) + log.Info("source time zone specified: " + df.sourceTimeZone) var newSelectExprs []sqlparser.SelectExpr var modified bool for _, expr := range targetSelectExprs { @@ -621,7 +620,7 @@ func (df *vdiff) adjustForSourceTimeZone(targetSelectExprs []sqlparser.SelectExp colAs, sqlparser.NewStrLiteral(df.targetTimeZone), sqlparser.NewStrLiteral(df.sourceTimeZone)) - log.Infof("converting datetime column %s using convert_tz()", colName) + log.Info(fmt.Sprintf("converting datetime column %s using convert_tz()", colName)) newSelectExprs = append(newSelectExprs, &sqlparser.AliasedExpr{Expr: convertTZFuncExpr, As: colAs.Name}) converted = true modified = true @@ -633,7 +632,7 @@ func (df *vdiff) adjustForSourceTimeZone(targetSelectExprs []sqlparser.SelectExp } } if modified { // at least one datetime was found - log.Infof("Found datetime columns when SourceTimeZone was set, resetting target SelectExprs after convert_tz()") + log.Info("Found datetime columns when SourceTimeZone was set, resetting target SelectExprs after convert_tz()") return newSelectExprs } return targetSelectExprs @@ -930,9 +929,9 @@ func (df *vdiff) startQueryStreams(ctx context.Context, keyspace string, partici if participant.position.IsZero() { return fmt.Errorf("workflow %s.%s: stream has not started on tablet %s", df.targetKeyspace, df.workflow, participant.primary.Alias.String()) } - log.Infof("WaitForPosition: tablet %s should reach position %s", participant.tablet.Alias.String(), replication.EncodePosition(participant.position)) + log.Info(fmt.Sprintf("WaitForPosition: tablet %s should reach position %s", participant.tablet.Alias.String(), replication.EncodePosition(participant.position))) if err := df.ts.TabletManagerClient().WaitForPosition(waitCtx, participant.tablet, replication.EncodePosition(participant.position)); err != nil { - log.Errorf("WaitForPosition error: %s", err) + log.Error(fmt.Sprintf("WaitForPosition error: %s", err)) return vterrors.Wrapf(err, "WaitForPosition for tablet %v", topoproto.TabletAliasString(participant.tablet.Alias)) } participant.result = make(chan *sqltypes.Result, 1) @@ -1043,7 +1042,7 @@ func (df *vdiff) restartTargets(ctx context.Context) error { return df.forAll(df.targets, func(shard string, target *shardStreamer) error { query := fmt.Sprintf("update _vt.vreplication set state='Running', message='', stop_pos='' where db_name=%s and workflow=%s", encodeString(target.primary.DbName()), encodeString(df.ts.WorkflowName())) - log.Infof("Restarting the %q VReplication workflow on %q using %q", df.ts.WorkflowName(), target.primary.Alias, query) + log.Info(fmt.Sprintf("Restarting the %q VReplication workflow on %q using %q", df.ts.WorkflowName(), target.primary.Alias, query)) var err error // Let's retry a few times if we get a retryable error. for i := 1; i <= 3; i++ { @@ -1051,8 +1050,7 @@ func (df *vdiff) restartTargets(ctx context.Context) error { if err == nil || !sqlerror.IsEphemeralError(err) { break } - log.Warningf("Encountered the following error while restarting the %q VReplication workflow on %q, will retry (attempt #%d): %v", - df.ts.WorkflowName(), target.primary.Alias, i, err) + log.Warn(fmt.Sprintf("Encountered the following error while restarting the %q VReplication workflow on %q, will retry (attempt #%d): %v", df.ts.WorkflowName(), target.primary.Alias, i, err)) } return err }) @@ -1183,11 +1181,11 @@ func (td *tableDiffer) diff(ctx context.Context, rowsToCompare *int64, debug, on advanceTarget := true for { if dr.ProcessedRows%1e7 == 0 { // log progress every 10 million rows - log.Infof("VDiff progress:: table %s: %s rows", td.targetTable, humanInt(int64(dr.ProcessedRows))) + log.Info(fmt.Sprintf("VDiff progress:: table %s: %s rows", td.targetTable, humanInt(int64(dr.ProcessedRows)))) } *rowsToCompare-- if *rowsToCompare < 0 { - log.Infof("Stopping vdiff, specified limit reached") + log.Info("Stopping vdiff, specified limit reached") return dr, nil } if advanceSource { diff --git a/go/vt/wrangler/vdiff2.go b/go/vt/wrangler/vdiff2.go index 0427875379d..93e122fba36 100644 --- a/go/vt/wrangler/vdiff2.go +++ b/go/vt/wrangler/vdiff2.go @@ -40,7 +40,7 @@ type VDiffOutput struct { func (wr *Wrangler) VDiff2(ctx context.Context, keyspace, workflowName string, action vdiff2.VDiffAction, actionArg, uuid string, options *tabletmanagerdata.VDiffOptions, ) (*VDiffOutput, error) { - log.Infof("VDiff2 called with %s, %s, %s, %s, %s, %+v", keyspace, workflowName, action, actionArg, uuid, options) + log.Info(fmt.Sprintf("VDiff2 called with %s, %s, %s, %s, %s, %+v", keyspace, workflowName, action, actionArg, uuid, options)) req := &tabletmanagerdata.VDiffRequest{ Keyspace: keyspace, @@ -73,7 +73,7 @@ func (wr *Wrangler) VDiff2(ctx context.Context, keyspace, workflowName string, a return err }) if output.Err != nil { - log.Errorf("Error executing action %s: %v", action, output.Err) + log.Error(fmt.Sprintf("Error executing action %s: %v", action, output.Err)) return nil, output.Err } diff --git a/go/vt/wrangler/version.go b/go/vt/wrangler/version.go index a74a26737b3..8b126f86fb3 100644 --- a/go/vt/wrangler/version.go +++ b/go/vt/wrangler/version.go @@ -32,7 +32,7 @@ func (wr *Wrangler) GetVersion(ctx context.Context, tabletAlias *topodatapb.Tabl resp, err := wr.VtctldServer().GetVersion(ctx, &vtctldatapb.GetVersionRequest{ TabletAlias: tabletAlias, }) - log.Infof("Tablet %v is running version '%v'", topoproto.TabletAliasString(tabletAlias), resp.Version) + log.Info(fmt.Sprintf("Tablet %v is running version '%v'", topoproto.TabletAliasString(tabletAlias), resp.Version)) return resp.Version, err } diff --git a/go/vt/wrangler/vexec.go b/go/vt/wrangler/vexec.go index 3e6312fdb67..6f26a578c69 100644 --- a/go/vt/wrangler/vexec.go +++ b/go/vt/wrangler/vexec.go @@ -265,7 +265,7 @@ func (vx *vexec) execCallback(callback func(context.Context, *topo.TabletInfo) ( allErrors.RecordError(err) } else { if qr == nil { - log.Infof("Callback returned nil result for tablet %s-%s", primary.Alias.Cell, primary.Alias.Uid) + log.Info(fmt.Sprintf("Callback returned nil result for tablet %s-%d", primary.Alias.Cell, primary.Alias.Uid)) return // no result } mu.Lock() diff --git a/go/vt/wrangler/vexec_plan.go b/go/vt/wrangler/vexec_plan.go index 59936440b65..605eb3ae41f 100644 --- a/go/vt/wrangler/vexec_plan.go +++ b/go/vt/wrangler/vexec_plan.go @@ -81,7 +81,7 @@ func (p vreplicationPlanner) exec( return nil, err } if qr.RowsAffected == 0 && len(qr.Rows) == 0 { - log.Infof("no matching streams found for workflow %s, tablet %s, query %s", p.vx.workflow, primaryAlias, query) + log.Info(fmt.Sprintf("no matching streams found for workflow %s, tablet %s, query %s", p.vx.workflow, primaryAlias, query)) } return qr, nil } diff --git a/go/vt/wrangler/workflow.go b/go/vt/wrangler/workflow.go index 45325cb3fce..1f315dc190a 100644 --- a/go/vt/wrangler/workflow.go +++ b/go/vt/wrangler/workflow.go @@ -108,13 +108,13 @@ func (wr *Wrangler) NewVReplicationWorkflow(ctx context.Context, workflowType VR params *VReplicationWorkflowParams, ) (*VReplicationWorkflow, error) { wr.WorkflowParams = params - log.Infof("NewVReplicationWorkflow with params %+v", params) + log.Info(fmt.Sprintf("NewVReplicationWorkflow with params %+v", params)) vrw := &VReplicationWorkflow{wr: wr, ctx: ctx, params: params, workflowType: workflowType} ts, ws, err := wr.getWorkflowState(ctx, params.TargetKeyspace, params.Workflow) if err != nil { return nil, err } - log.Infof("Workflow state is %+v", ws) + log.Info(fmt.Sprintf("Workflow state is %+v", ws)) if ts != nil { // Other than on create we need to get SourceKeyspace from the workflow vrw.params.TargetKeyspace = ts.targetKeyspace vrw.params.Workflow = ts.workflow @@ -155,7 +155,7 @@ func (vrw *VReplicationWorkflow) Exists() bool { } func (vrw *VReplicationWorkflow) stateAsString(ws *workflow.State) string { - log.Infof("Workflow state is %+v", ws) + log.Info(fmt.Sprintf("Workflow state is %+v", ws)) var stateInfo []string s := "" if !vrw.Exists() { @@ -450,7 +450,7 @@ func (vrw *VReplicationWorkflow) parseTabletTypes() (hasReplica, hasRdonly, hasP // region Core Actions func (vrw *VReplicationWorkflow) initMoveTables() error { - log.Infof("In VReplicationWorkflow.initMoveTables() for %+v", vrw) + log.Info(fmt.Sprintf("In VReplicationWorkflow.initMoveTables() for %+v", vrw)) return vrw.wr.MoveTables(vrw.ctx, vrw.params.Workflow, vrw.params.SourceKeyspace, vrw.params.TargetKeyspace, vrw.params.Tables, vrw.params.Cells, vrw.params.TabletTypes, vrw.params.AllTables, vrw.params.ExcludeTables, vrw.params.AutoStart, vrw.params.StopAfterCopy, vrw.params.ExternalCluster, vrw.params.DropForeignKeys, @@ -459,14 +459,14 @@ func (vrw *VReplicationWorkflow) initMoveTables() error { } func (vrw *VReplicationWorkflow) initReshard() error { - log.Infof("In VReplicationWorkflow.initReshard() for %+v", vrw) + log.Info(fmt.Sprintf("In VReplicationWorkflow.initReshard() for %+v", vrw)) return vrw.wr.Reshard(vrw.ctx, vrw.params.TargetKeyspace, vrw.params.Workflow, vrw.params.SourceShards, vrw.params.TargetShards, vrw.params.SkipSchemaCopy, vrw.params.Cells, vrw.params.TabletTypes, vrw.params.OnDDL, vrw.params.AutoStart, vrw.params.StopAfterCopy, vrw.params.DeferSecondaryKeys) } func (vrw *VReplicationWorkflow) switchReads() (*[]string, error) { - log.Infof("In VReplicationWorkflow.switchReads() for %+v", vrw) + log.Info(fmt.Sprintf("In VReplicationWorkflow.switchReads() for %+v", vrw)) fullTabletTypes, _, err := discovery.ParseTabletTypesAndOrder(vrw.params.TabletTypes) if err != nil { return nil, err @@ -490,13 +490,13 @@ func (vrw *VReplicationWorkflow) switchWrites() (*[]string, error) { var journalID int64 var dryRunResults *[]string var err error - log.Infof("In VReplicationWorkflow.switchWrites() for %+v", vrw) + log.Info(fmt.Sprintf("In VReplicationWorkflow.switchWrites() for %+v", vrw)) if vrw.params.Direction == workflow.DirectionBackward { keyspace := vrw.params.SourceKeyspace vrw.params.SourceKeyspace = vrw.params.TargetKeyspace vrw.params.TargetKeyspace = keyspace vrw.params.Workflow = workflow.ReverseWorkflowName(vrw.params.Workflow) - log.Infof("In VReplicationWorkflow.switchWrites(reverse) for %+v", vrw) + log.Info(fmt.Sprintf("In VReplicationWorkflow.switchWrites(reverse) for %+v", vrw)) } journalID, dryRunResults, err = vrw.wr.SwitchWrites(vrw.ctx, vrw.params.TargetKeyspace, vrw.params.Workflow, vrw.params.Timeout, false, vrw.params.Direction == workflow.DirectionBackward, vrw.params.EnableReverseReplication, vrw.params.DryRun, @@ -504,7 +504,7 @@ func (vrw *VReplicationWorkflow) switchWrites() (*[]string, error) { if err != nil { return nil, err } - log.Infof("switchWrites succeeded with journal id %s", journalID) + log.Info(fmt.Sprintf("switchWrites succeeded with journal id %d", journalID)) return dryRunResults, nil } @@ -536,10 +536,10 @@ func (vrw *VReplicationWorkflow) canSwitch(keyspace, workflowName string) (reaso } if vrw.params.Direction == workflow.DirectionForward && ws.WritesSwitched || vrw.params.Direction == workflow.DirectionBackward && !ws.WritesSwitched { - log.Infof("writes already switched no need to check lag") + log.Info("writes already switched no need to check lag") return "", nil } - log.Infof("state:%s, direction %d, switched %t", vrw.CachedState(), vrw.params.Direction, ws.WritesSwitched) + log.Info(fmt.Sprintf("state:%s, direction %d, switched %t", vrw.CachedState(), vrw.params.Direction, ws.WritesSwitched)) result, err := vrw.wr.getStreams(vrw.ctx, workflowName, keyspace, vrw.params.ShardSubset) if err != nil { return "", err @@ -735,7 +735,7 @@ func (wr *Wrangler) deleteWorkflowVDiffData(ctx context.Context, tablet *topodat Action: string(vdiff2.DeleteAction), ActionArg: vdiff2.AllActionArg, }); err != nil { - log.Errorf("Error deleting vdiff data for %s.%s workflow: %v", tablet.Keyspace, workflow, err) + log.Error(fmt.Sprintf("Error deleting vdiff data for %s.%s workflow: %v", tablet.Keyspace, workflow, err)) } } @@ -755,8 +755,7 @@ func (wr *Wrangler) deleteWorkflowVDiffData(ctx context.Context, tablet *topodat func (wr *Wrangler) optimizeCopyStateTable(tablet *topodatapb.Tablet) { if wr.sem != nil { if !wr.sem.TryAcquire(1) { - log.Warningf("Deferring work to optimize the copy_state table on %q due to hitting the maximum concurrent background job limit.", - tablet.Alias.String()) + log.Warn(fmt.Sprintf("Deferring work to optimize the copy_state table on %q due to hitting the maximum concurrent background job limit.", tablet.Alias.String())) return } } @@ -776,7 +775,7 @@ func (wr *Wrangler) optimizeCopyStateTable(tablet *topodatapb.Tablet) { if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Num == sqlerror.ERNoSuchTable { // the table may not exist return } - log.Warningf("Failed to optimize the copy_state table on %q: %v", tablet.Alias.String(), err) + log.Warn(fmt.Sprintf("Failed to optimize the copy_state table on %q: %v", tablet.Alias.String(), err)) } // This will automatically set the value to 1 or the current max value in the table, whichever is greater sqlResetAutoInc := "alter table _vt.copy_state auto_increment = 1" @@ -784,8 +783,7 @@ func (wr *Wrangler) optimizeCopyStateTable(tablet *topodatapb.Tablet) { Query: []byte(sqlResetAutoInc), MaxRows: uint64(0), }); err != nil { - log.Warningf("Failed to reset the auto_increment value for the copy_state table on %q: %v", - tablet.Alias.String(), err) + log.Warn(fmt.Sprintf("Failed to reset the auto_increment value for the copy_state table on %q: %v", tablet.Alias.String(), err)) } }() } diff --git a/go/vt/wrangler/workflow_test.go b/go/vt/wrangler/workflow_test.go index bff46fbb8f1..2f363c4d6fa 100644 --- a/go/vt/wrangler/workflow_test.go +++ b/go/vt/wrangler/workflow_test.go @@ -218,7 +218,7 @@ func TestCopyProgress(t *testing.T) { var cp *CopyProgress cp, err = wf.GetCopyProgress() require.NoError(t, err) - log.Infof("CopyProgress is %+v,%+v", (*cp)["t1"], (*cp)["t2"]) + log.Info(fmt.Sprintf("CopyProgress is %+v,%+v", (*cp)["t1"], (*cp)["t2"])) require.Equal(t, int64(800), (*cp)["t1"].SourceRowCount) require.Equal(t, int64(200), (*cp)["t1"].TargetRowCount) diff --git a/go/vt/wrangler/wrangler_env_test.go b/go/vt/wrangler/wrangler_env_test.go index 2b174bee176..ee283872099 100644 --- a/go/vt/wrangler/wrangler_env_test.go +++ b/go/vt/wrangler/wrangler_env_test.go @@ -344,7 +344,7 @@ func (tmc *testWranglerTMClient) ExecuteFetchAsApp(ctx context.Context, tablet * result, ok := t.queryResults[string(req.Query)] if !ok { result = &querypb.QueryResult{} - log.Errorf("Query: %s, Result :%v\n", string(req.Query), result) + log.Error(fmt.Sprintf("Query: %s, Result :%v\n", string(req.Query), result)) } return result, nil } diff --git a/go/vt/zkctl/zkconf.go b/go/vt/zkctl/zkconf.go index facddf08ee5..790bfeebc30 100644 --- a/go/vt/zkctl/zkconf.go +++ b/go/vt/zkctl/zkconf.go @@ -183,7 +183,7 @@ func MakeZkConfigFromString(cmdLine string, myID uint32) *ZkConfig { zkConfig.Servers = append(zkConfig.Servers, zkServer) } hostname := netutil.FullyQualifiedHostnameOrPanic() - log.Infof("Fully qualified machine hostname was detected as: %v", hostname) + log.Info(fmt.Sprintf("Fully qualified machine hostname was detected as: %v", hostname)) for _, zkServer := range zkConfig.Servers { if (myID > 0 && myID == zkServer.ServerId) || (myID == 0 && zkServer.Hostname == hostname) { zkConfig.ServerId = zkServer.ServerId diff --git a/go/vt/zkctl/zkctl.go b/go/vt/zkctl/zkctl.go index 3f50e0e2c4e..42a550801fe 100644 --- a/go/vt/zkctl/zkctl.go +++ b/go/vt/zkctl/zkctl.go @@ -73,7 +73,7 @@ func (zkd *Zkd) Done() <-chan struct{} { // Start runs an already initialized ZooKeeper server. func (zkd *Zkd) Start() error { - log.Infof("zkctl.Start") + log.Info("zkctl.Start") // NOTE(msolomon) use a script here so we can detach and continue to run // if the wrangler process dies. this pretty much the same as mysqld_safe. args := []string{ @@ -130,7 +130,7 @@ func (zkd *Zkd) Start() error { // Shutdown kills a ZooKeeper server, but keeps its data dir intact. func (zkd *Zkd) Shutdown() error { - log.Infof("zkctl.Shutdown") + log.Info("zkctl.Shutdown") pidData, err := os.ReadFile(zkd.config.PidFile()) if err != nil { return err @@ -168,10 +168,10 @@ func (zkd *Zkd) Init() error { return errors.New("zk already inited") } - log.Infof("zkd.Init") + log.Info("zkd.Init") for _, path := range zkd.config.DirectoryList() { if err := os.MkdirAll(path, 0o775); err != nil { - log.Errorf("%v", err) + log.Error(fmt.Sprintf("%v", err)) return err } // FIXME(msolomon) validate permissions? @@ -182,18 +182,18 @@ func (zkd *Zkd) Init() error { err = os.WriteFile(zkd.config.ConfigFile(), []byte(configData), 0o664) } if err != nil { - log.Errorf("failed creating %v: %v", zkd.config.ConfigFile(), err) + log.Error(fmt.Sprintf("failed creating %v: %v", zkd.config.ConfigFile(), err)) return err } err = zkd.config.WriteMyid() if err != nil { - log.Errorf("failed creating %v: %v", zkd.config.MyidFile(), err) + log.Error(fmt.Sprintf("failed creating %v: %v", zkd.config.MyidFile(), err)) return err } if err = zkd.Start(); err != nil { - log.Errorf("failed starting, check %v", zkd.config.LogDir()) + log.Error(fmt.Sprintf("failed starting, check %v", zkd.config.LogDir())) return err } @@ -230,14 +230,14 @@ func (zkd *Zkd) Init() error { // Teardown shuts down the server and removes its data dir. func (zkd *Zkd) Teardown() error { - log.Infof("zkctl.Teardown") + log.Info("zkctl.Teardown") if err := zkd.Shutdown(); err != nil { - log.Warningf("failed zookeeper shutdown: %v", err.Error()) + log.Warn(fmt.Sprintf("failed zookeeper shutdown: %v", err.Error())) } var removalErr error for _, dir := range zkd.config.DirectoryList() { if err := os.RemoveAll(dir); err != nil { - log.Errorf("failed removing %v: %v", dir, err.Error()) + log.Error(fmt.Sprintf("failed removing %v: %v", dir, err.Error())) removalErr = err } } diff --git a/go/vt/zkctl/zkctl_local.go b/go/vt/zkctl/zkctl_local.go index dfb11e28cd6..808332a4bba 100644 --- a/go/vt/zkctl/zkctl_local.go +++ b/go/vt/zkctl/zkctl_local.go @@ -18,6 +18,7 @@ package zkctl import ( "fmt" + "os" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/vt/log" @@ -36,7 +37,8 @@ func StartLocalZk(id, port int) (*Zkd, string) { // Init & start zk. if err := zkd.Init(); err != nil { - log.Exitf("zkd.Init(%d, %d) failed: %v", id, port, err) + log.Error(fmt.Sprintf("zkd.Init(%d, %d) failed: %v", id, port, err)) + os.Exit(1) } return zkd, fmt.Sprintf("%v:%v", hostname, port+2) diff --git a/go/vtbench/vtbench.go b/go/vtbench/vtbench.go index 7321b53d77d..ae730812bb4 100644 --- a/go/vtbench/vtbench.go +++ b/go/vtbench/vtbench.go @@ -127,7 +127,7 @@ func (b *Bench) Run(ctx context.Context) error { } func (b *Bench) createConns(ctx context.Context) error { - log.V(10).Infof("creating %d client connections...", b.Threads) + log.Debug(fmt.Sprintf("creating %d client connections...", b.Threads)) start := time.Now() reportInterval := 2 * time.Second report := start.Add(reportInterval) @@ -141,15 +141,15 @@ func (b *Bench) createConns(ctx context.Context) error { switch b.ConnParams.Protocol { case MySQL: - log.V(5).Infof("connecting to %s using mysql protocol...", host) + log.Debug(fmt.Sprintf("connecting to %s using mysql protocol...", host)) conn = &mysqlClientConn{} err = conn.connect(ctx, cp) case GRPCVtgate: - log.V(5).Infof("connecting to %s using grpc vtgate protocol...", host) + log.Debug(fmt.Sprintf("connecting to %s using grpc vtgate protocol...", host)) conn = &grpcVtgateConn{} err = conn.connect(ctx, cp) case GRPCVttablet: - log.V(5).Infof("connecting to %s using grpc vttablet protocol...", host) + log.Debug(fmt.Sprintf("connecting to %s using grpc vttablet protocol...", host)) conn = &grpcVttabletConn{} err = conn.connect(ctx, cp) default: @@ -189,13 +189,13 @@ func (b *Bench) createThreads(ctx context.Context) { // Create a barrier so all the threads start at the same time b.lock.Lock() - log.V(10).Infof("starting %d threads", b.Threads) + log.Debug(fmt.Sprintf("starting %d threads", b.Threads)) for i := 0; i < b.Threads; i++ { b.wg.Add(1) go b.threads[i].clientLoop(ctx) } - log.V(10).Infof("waiting for %d threads to start", b.Threads) + log.Debug(fmt.Sprintf("waiting for %d threads to start", b.Threads)) b.wg.Wait() b.wg.Add(b.Threads) @@ -207,7 +207,7 @@ func (b *Bench) runTest(ctx context.Context) error { b.lock.Unlock() // Then wait for them all to finish looping - log.V(10).Infof("waiting for %d threads to finish", b.Threads) + log.Debug(fmt.Sprintf("waiting for %d threads to finish", b.Threads)) b.wg.Wait() b.TotalTime = time.Since(start) @@ -220,16 +220,16 @@ func (bt *benchThread) clientLoop(ctx context.Context) { // Declare that startup is finished and wait for // the barrier b.wg.Done() - log.V(10).Infof("thread %d waiting for startup barrier", bt.i) + log.Debug(fmt.Sprintf("thread %d waiting for startup barrier", bt.i)) b.lock.RLock() - log.V(10).Infof("thread %d starting loop", bt.i) + log.Debug(fmt.Sprintf("thread %d starting loop", bt.i)) for i := 0; i < b.Count; i++ { start := time.Now() result, err := bt.conn.execute(ctx, bt.query, bt.bindVars) b.Timings.Record("query", start) if err != nil { - log.Errorf("query error: %v", err) + log.Error(fmt.Sprintf("query error: %v", err)) break } else { b.Rows.Add(int64(len(result.Rows))) diff --git a/test.go b/test.go index 6b38517a0a1..28713b55f8d 100755 --- a/test.go +++ b/test.go @@ -181,7 +181,7 @@ func (t *Test) run(dir, dataDir string) ([]byte, error) { testCmd := t.Command if len(testCmd) == 0 { - if strings.Contains(fmt.Sprintf("%v", t.File), ".go") { + if strings.Contains(t.File, ".go") { testCmd = []string{"tools/e2e_go_test.sh"} testCmd = append(testCmd, t.Args...) if *keepData { @@ -451,7 +451,7 @@ func main() { command.Env = append(os.Environ(), "NOVTADMINBUILD=1") } if *buildTag != "" { - command.Env = append(command.Env, fmt.Sprintf(`EXTRA_BUILD_TAGS=%s`, *buildTag)) + command.Env = append(command.Env, "EXTRA_BUILD_TAGS="+*buildTag) } if out, err := command.CombinedOutput(); err != nil { log.Fatalf("make build failed; exit code: %d, error: %v\n%s", diff --git a/tools/rowlog/rowlog.go b/tools/rowlog/rowlog.go index 7821f1f0ad7..a08e915f945 100644 --- a/tools/rowlog/rowlog.go +++ b/tools/rowlog/rowlog.go @@ -65,7 +65,7 @@ func usage() { s := "rowlog --ids --table --pk --source --target " s += "--vtctld --vtgate --cells --topo-implementation " s += "--topo-global-server-address --topo-global-root \n" - logger.Printf(s) + logger.Printf("%s", s) } } @@ -77,12 +77,12 @@ func main() { pflag.Usage() return } - log.Infof("Starting rowlogger with config: %s", config) + log.Info(fmt.Sprintf("Starting rowlogger with config: %s", config)) fmt.Printf("Starting rowlogger with\n%v\n", config) ts := topo.Open() sourceTablet := getTablet(ctx, ts, config.cells, config.sourceKeyspace) targetTablet := getTablet(ctx, ts, config.cells, config.targetKeyspace) - log.Infof("Using tablets %s and %s to get positions", sourceTablet, targetTablet) + log.Info(fmt.Sprintf("Using tablets %s and %s to get positions", sourceTablet, targetTablet)) var wg sync.WaitGroup stream := func(keyspace, tablet string) { @@ -94,20 +94,20 @@ func main() { for { i++ if i > 100 { - log.Errorf("returning without completion : Timing out for keyspace %s", keyspace) + log.Error("returning without completion : Timing out for keyspace " + keyspace) return } - log.Infof("%s Iteration:%d", keyspace, i) + log.Info(fmt.Sprintf("%s Iteration:%d", keyspace, i)) startPos, stopPos, done, fieldsPrinted, err = startStreaming(ctx, config.vtgate, config.vtctld, keyspace, tablet, config.table, config.pk, config.ids, startPos, stopPos, fieldsPrinted) if done { - log.Infof("Finished streaming all events for keyspace %s", keyspace) + log.Info("Finished streaming all events for keyspace " + keyspace) fmt.Printf("Finished streaming all events for keyspace %s\n", keyspace) return } if startPos != "" { - log.Infof("resuming streaming from %s, error received was %v", startPos, err) + log.Info(fmt.Sprintf("resuming streaming from %s, error received was %v", startPos, err)) } else { - log.Errorf("returning without completion of keyspace %s because of error %v", keyspace, err) + log.Error(fmt.Sprintf("returning without completion of keyspace %s because of error %v", keyspace, err)) return } } @@ -121,7 +121,7 @@ func main() { wg.Wait() - log.Infof("rowlog done streaming from both source and target") + log.Info("rowlog done streaming from both source and target") fmt.Printf("\n\nRowlog completed\nIf the program worked you should see two log files with the related binlog entries: %s.log and %s.log\n", config.sourceKeyspace, config.targetKeyspace) } @@ -131,14 +131,14 @@ func startStreaming(ctx context.Context, vtgate, vtctld, keyspace, tablet, table if startPos == "" { flavor := getFlavor(ctx, vtctld, keyspace) if flavor == "" { - log.Errorf("Invalid flavor for %s", keyspace) + log.Error("Invalid flavor for " + keyspace) return "", "", false, false, nil } startPos, stopPos, _ = getPositions(ctx, vtctld, tablet) startPos = flavor + "/" + startPos stopPos = flavor + "/" + stopPos } - log.Infof("Streaming keyspace %s from %s upto %s", keyspace, startPos, stopPos) + log.Info(fmt.Sprintf("Streaming keyspace %s from %s upto %s", keyspace, startPos, stopPos)) fmt.Printf("Streaming keyspace %s from %s upto %s\n", keyspace, startPos, stopPos) vgtid := &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ @@ -156,7 +156,8 @@ func startStreaming(ctx context.Context, vtgate, vtctld, keyspace, tablet, table } conn, err := vtgateconn.Dial(ctx, vtgate) if err != nil { - log.Fatal(err) + log.Error(fmt.Sprint(err)) + os.Exit(1) } defer conn.Close() reader, _ := conn.VStream(ctx, topodatapb.TabletType_PRIMARY, vgtid, filter, &vtgatepb.VStreamFlags{}) @@ -173,8 +174,8 @@ func startStreaming(ctx context.Context, vtgate, vtctld, keyspace, tablet, table now := time.Now().Unix() if now-lastLoggedAt > 60 && ev.Timestamp != 0 { // every minute lastLoggedAt = now - log.Infof("%s Progress: %d/%d rows, %s: %s", keyspace, filteredRows, totalRowsForTable, - time.Unix(ev.Timestamp, 0).Format(time.RFC3339), gtid) + log.Info(fmt.Sprintf("%s Progress: %d/%d rows, %s: %s", keyspace, filteredRows, totalRowsForTable, + time.Unix(ev.Timestamp, 0).Format(time.RFC3339), gtid)) fmt.Printf(".") } switch ev.Type { @@ -208,7 +209,7 @@ func startStreaming(ctx context.Context, vtgate, vtctld, keyspace, tablet, table fmt.Printf("Error decoding position for %s:%vs\n", stopPos, err.Error()) } if currentPosition.AtLeast(stopPosition) { - log.Infof("Finished streaming keyspace %s from %s upto %s, total rows seen %d", keyspace, startPos, stopPos, totalRowsForTable) + log.Info(fmt.Sprintf("Finished streaming keyspace %s from %s upto %s, total rows seen %d", keyspace, startPos, stopPos, totalRowsForTable)) return "", "", true, true, nil } @@ -216,11 +217,11 @@ func startStreaming(ctx context.Context, vtgate, vtctld, keyspace, tablet, table return gtid, stopPos, false, fieldsPrinted, nil } case io.EOF: - log.Infof("stream ended before reaching stop pos") + log.Info("stream ended before reaching stop pos") fmt.Printf("stream ended before reaching stop pos\n") return "", "", false, fieldsPrinted, nil default: - log.Errorf("remote error: %s, returning gtid %s, stopPos %s", err, gtid, stopPos) + log.Error(fmt.Sprintf("remote error: %s, returning gtid %s, stopPos %s", err, gtid, stopPos)) fmt.Printf("remote error: %s, returning gtid %s, stopPos %s\n", err.Error(), gtid, stopPos) return gtid, stopPos, false, fieldsPrinted, err } @@ -231,13 +232,13 @@ func output(filename, s string) { f, err := os.OpenFile(filename+".log", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { - log.Errorf(err.Error()) + log.Error(err.Error()) } defer f.Close() if _, err := f.WriteString(s + "\n"); err != nil { - log.Errorf(err.Error()) + log.Error(err.Error()) } - log.Infof("Writing to %s.log: %s", filename, s) + log.Info(fmt.Sprintf("Writing to %s.log: %s", filename, s)) } func outputHeader(plan *TablePlan) { @@ -469,7 +470,7 @@ func getPositions(ctx context.Context, server, tablet string) (string, string, e results, err := execVtctl(ctx, server, []string{"ExecuteFetchAsDba", "--json", tablet, query}) if err != nil { fmt.Println(err) - log.Errorf(err.Error()) + log.Error(err.Error()) return "", "", err } firstPos := parseExecOutput(strings.Join(results, "")) @@ -478,7 +479,7 @@ func getPositions(ctx context.Context, server, tablet string) (string, string, e results, err = execVtctl(ctx, server, []string{"ExecuteFetchAsDba", "--json", tablet, query}) if err != nil { fmt.Println(err) - log.Errorf(err.Error()) + log.Error(err.Error()) return "", "", err } lastPos := parseExecOutput(strings.Join(results, "")) @@ -523,7 +524,7 @@ func execVtctl(ctx context.Context, server string, args []string) ([]string, err case io.EOF: return results, nil default: - log.Errorf("remote error: %v", err) + log.Error(fmt.Sprintf("remote error: %v", err)) return nil, fmt.Errorf("remote error: %v", err) } } diff --git a/tools/unit_test_runner.sh b/tools/unit_test_runner.sh index 1a99d641ff2..8e56e2a4c68 100755 --- a/tools/unit_test_runner.sh +++ b/tools/unit_test_runner.sh @@ -60,7 +60,9 @@ fi # Build gotestsum args. Failed tests are retried up to 3 times, but if more than 10 tests fail # initially we skip retries to avoid wasting time on a real widespread failure. -GOTESTSUM_ARGS="--format pkgname-and-test-fails --rerun-fails=3 --rerun-fails-max-failures=10 --rerun-fails-run-root-test --format-hide-empty-pkg --hide-summary=skipped" +GOTESTSUM_FORMAT="${GOTESTSUM_FORMAT:-pkgname-and-test-fails}" + +GOTESTSUM_ARGS="--format ${GOTESTSUM_FORMAT} --rerun-fails=3 --rerun-fails-max-failures=10 --rerun-fails-run-root-test --format-hide-empty-pkg --hide-summary=skipped" if [[ -n "${JUNIT_OUTPUT:-}" ]]; then GOTESTSUM_ARGS="$GOTESTSUM_ARGS --junitfile $JUNIT_OUTPUT" fi