From ef05397ba53e095f13627c36acadc2d95d0031de Mon Sep 17 00:00:00 2001 From: Alex Plischke Date: Wed, 15 Jan 2025 07:38:49 -0800 Subject: [PATCH] feat: improve interrupt handling (#996) * refactor: use global context where applicable * don't wait for the ticker to cancel a poll * refactor: simplify interrupt handling * cleanup * fix: tests --- internal/http/rdcservice.go | 6 +- internal/http/rdcservice_test.go | 6 +- internal/http/resto.go | 6 +- internal/http/resto_test.go | 6 +- internal/job/job.go | 2 +- internal/mocks/job.go | 8 +- internal/saucecloud/cloud.go | 182 +++++++----------- internal/saucecloud/cloud_test.go | 45 ----- internal/saucecloud/cucumber.go | 7 +- internal/saucecloud/cypress.go | 6 +- internal/saucecloud/espresso.go | 7 +- internal/saucecloud/jobservice.go | 12 +- internal/saucecloud/jobservice_test.go | 15 +- internal/saucecloud/playwright.go | 7 +- internal/saucecloud/replay.go | 7 +- .../saucecloud/retry/saucereportretrier.go | 6 +- .../retry/saucereportretrier_test.go | 2 +- internal/saucecloud/testcafe.go | 7 +- internal/saucecloud/xcuitest.go | 7 +- 19 files changed, 121 insertions(+), 223 deletions(-) diff --git a/internal/http/rdcservice.go b/internal/http/rdcservice.go index 642464a94..3003ed7bd 100644 --- a/internal/http/rdcservice.go +++ b/internal/http/rdcservice.go @@ -267,10 +267,12 @@ func (c *RDCService) PollJob(ctx context.Context, id string, interval, timeout t for { select { + case <-ctx.Done(): + return job.Job{ID: id}, ctx.Err() case <-ticker.C: j, err := c.Job(ctx, id, realDevice) if err != nil { - return job.Job{}, err + return job.Job{ID: id}, err } if job.Done(j.Status) { @@ -280,7 +282,7 @@ func (c *RDCService) PollJob(ctx context.Context, id string, interval, timeout t case <-deathclock.C: j, err := c.Job(ctx, id, realDevice) if err != nil { - return job.Job{}, err + return job.Job{ID: id}, err } j.TimedOut = true return j, nil diff --git a/internal/http/rdcservice_test.go b/internal/http/rdcservice_test.go index 6a40f729b..31b318db3 100644 --- a/internal/http/rdcservice_test.go +++ b/internal/http/rdcservice_test.go @@ -208,21 +208,21 @@ func TestRDCService_PollJob(t *testing.T) { name: "job not found error from external API", client: rdc, jobID: "3", - expectedResp: job.Job{}, + expectedResp: job.Job{ID: "3"}, expectedErr: ErrJobNotFound, }, { name: "http status is not 200, but 401 from external API", client: rdc, jobID: "4", - expectedResp: job.Job{}, + expectedResp: job.Job{ID: "4"}, expectedErr: errors.New("unexpected statusCode: 401"), }, { name: "unexpected status code from external API", client: rdc, jobID: "333", - expectedResp: job.Job{}, + expectedResp: job.Job{ID: "333"}, expectedErr: errors.New("internal server error"), }, { diff --git a/internal/http/resto.go b/internal/http/resto.go index b0cd7630b..596748caf 100644 --- a/internal/http/resto.go +++ b/internal/http/resto.go @@ -127,10 +127,12 @@ func (c *Resto) PollJob(ctx context.Context, id string, interval, timeout time.D for { select { + case <-ctx.Done(): + return job.Job{ID: id}, ctx.Err() case <-ticker.C: j, err := c.Job(ctx, id, realDevice) if err != nil { - return job.Job{}, err + return job.Job{ID: id}, err } if job.Done(j.Status) { @@ -139,7 +141,7 @@ func (c *Resto) PollJob(ctx context.Context, id string, interval, timeout time.D case <-deathclock.C: j, err := c.Job(ctx, id, realDevice) if err != nil { - return job.Job{}, err + return job.Job{ID: id}, err } j.TimedOut = true return j, nil diff --git a/internal/http/resto_test.go b/internal/http/resto_test.go index b9b58c283..cd42883c3 100644 --- a/internal/http/resto_test.go +++ b/internal/http/resto_test.go @@ -226,21 +226,21 @@ func TestResto_GetJobStatus(t *testing.T) { name: "user not found error from external API", client: resto, jobID: "3", - expectedResp: job.Job{}, + expectedResp: job.Job{ID: "3"}, expectedErr: ErrJobNotFound, }, { name: "http status is not 200, but 401 from external API", client: resto, jobID: "4", - expectedResp: job.Job{}, + expectedResp: job.Job{ID: "4"}, expectedErr: errors.New("job status request failed; unexpected response code:'401', msg:''"), }, { name: "unexpected status code from external API", client: resto, jobID: "333", - expectedResp: job.Job{}, + expectedResp: job.Job{ID: "333"}, expectedErr: errors.New("internal server error"), }, { diff --git a/internal/job/job.go b/internal/job/job.go index 1db0c1cd0..ededc34c4 100644 --- a/internal/job/job.go +++ b/internal/job/job.go @@ -115,5 +115,5 @@ type Service interface { // DownloadArtifacts downloads artifacts from a Job. Returns a list of // file paths. - DownloadArtifacts(job Job, isLastAttempt bool) []string + DownloadArtifacts(ctx context.Context, job Job, isLastAttempt bool) []string } diff --git a/internal/mocks/job.go b/internal/mocks/job.go index a40619174..1f4dd32b3 100644 --- a/internal/mocks/job.go +++ b/internal/mocks/job.go @@ -15,7 +15,7 @@ type FakeJobService struct { PollJobFn func(ctx context.Context, id string, interval time.Duration, timeout time.Duration) (job.Job, error) UploadAssetFn func(ctx context.Context, jobID string, realDevice bool, fileName string, contentType string, content []byte) error - DownloadArtifactFn func(job job.Job, isLastAttempt bool) []string + DownloadArtifactFn func(ctx context.Context, job job.Job, isLastAttempt bool) []string GetJobAssetFileNamesFn func(ctx context.Context, jobID string) ([]string, error) GetJobAssetFileContentFn func(ctx context.Context, jobID, fileName string) ([]byte, error) } @@ -34,10 +34,8 @@ func (s *FakeJobService) StopJob( return s.StopJobFn(ctx, jobID, realDevice) } -func (s *FakeJobService) DownloadArtifacts( - job job.Job, isLastAttempt bool, -) []string { - return s.DownloadArtifactFn(job, isLastAttempt) +func (s *FakeJobService) DownloadArtifacts(ctx context.Context, job job.Job, isLastAttempt bool) []string { + return s.DownloadArtifactFn(ctx, job, isLastAttempt) } // Job mock function diff --git a/internal/saucecloud/cloud.go b/internal/saucecloud/cloud.go index 02572cd00..259e2456b 100644 --- a/internal/saucecloud/cloud.go +++ b/internal/saucecloud/cloud.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "os" - "os/signal" "path" "path/filepath" "strings" @@ -62,8 +61,7 @@ type CloudRunner struct { NPMDependencies []string - interrupted bool - Cache Cache + Cache Cache } type Cache struct { @@ -102,27 +100,27 @@ func (r *CloudRunner) createWorkerPool(ctx context.Context, ccy int, maxRetries return jobOpts, results } -func (r *CloudRunner) collectResults(results chan result, expected int) bool { +func (r *CloudRunner) collectResults(ctx context.Context, results chan result, expected int) bool { // TODO find a better way to get the expected completed := 0 inProgress := expected passed := true done := make(chan interface{}) - go func(r *CloudRunner) { + go func() { t := time.NewTicker(10 * time.Second) defer t.Stop() for { select { + case <-ctx.Done(): + return case <-done: return case <-t.C: - if !r.interrupted { - log.Info().Msgf("Suites in progress: %d", inProgress) - } + log.Info().Msgf("Suites in progress: %d", inProgress) } } - }(r) + }() for i := 0; i < expected; i++ { res := <-results @@ -147,7 +145,7 @@ func (r *CloudRunner) collectResults(results chan result, expected int) bool { browser = fmt.Sprintf("%s %s", browser, res.job.BrowserVersion) } - r.FetchJUnitReports(&res, res.artifacts) + r.FetchJUnitReports(ctx, &res, res.artifacts) tr := report.TestResult{ Name: res.name, @@ -164,19 +162,19 @@ func (r *CloudRunner) collectResults(results chan result, expected int) bool { RDC: res.job.IsRDC, TimedOut: res.job.TimedOut, Attempts: res.attempts, - BuildURL: r.findBuild(res.job.ID, res.job.IsRDC).URL, + BuildURL: r.findBuild(ctx, res.job.ID, res.job.IsRDC).URL, } for _, rep := range r.Reporters { rep.Add(tr) } } - r.logSuite(res) + r.logSuite(ctx, res) - r.reportInsights(res) + r.reportInsights(ctx, res) } close(done) - if !r.interrupted { + if ctx.Err() == nil { for _, rep := range r.Reporters { rep.Render() } @@ -185,7 +183,7 @@ func (r *CloudRunner) collectResults(results chan result, expected int) bool { return passed } -func (r *CloudRunner) findBuild(jobID string, isRDC bool) build.Build { +func (r *CloudRunner) findBuild(ctx context.Context, jobID string, isRDC bool) build.Build { if isRDC { if r.Cache.RDCBuild != nil { return *r.Cache.RDCBuild @@ -196,7 +194,7 @@ func (r *CloudRunner) findBuild(jobID string, isRDC bool) build.Build { } } - b, err := r.BuildService.FindBuild(context.Background(), jobID, isRDC) + b, err := r.BuildService.FindBuild(ctx, jobID, isRDC) if err != nil { log.Warn().Err(err).Msgf("Failed to retrieve build id for job (%s)", jobID) return build.Build{} @@ -216,25 +214,18 @@ func (r *CloudRunner) runJob(ctx context.Context, opts job.StartOptions) (j job. Str("suite", opts.DisplayName). Msg("Starting suite.") - j, err = r.JobService.StartJob(context.Background(), opts) + // Use this context for operations that should not be interrupted by the CLI, + // ensuring jobs are not left abandoned. + localCtx := context.Background() + + j, err = r.JobService.StartJob(localCtx, opts) if err != nil { return job.Job{Status: job.StateError}, false, err } - sigChan := r.registerInterruptOnSignal(j.ID, opts.RealDevice, opts.DisplayName) - defer unregisterSignalCapture(sigChan) - r.uploadSauceConfig(ctx, j.ID, opts.RealDevice, opts.ConfigFilePath) r.uploadCLIFlags(ctx, j.ID, opts.RealDevice, opts.CLIFlags) - // os.Interrupt can arrive before the signal.Notify() is registered. In that case, - // if a soft exit is requested during startContainer phase, it gently exits. - if r.interrupted { - r.stopSuiteExecution(j.ID, opts.RealDevice, opts.DisplayName) - j, err = r.JobService.PollJob(context.Background(), j.ID, 15*time.Second, opts.Timeout, opts.RealDevice) - return j, true, err - } - l := log.Info().Str("url", j.URL).Str("suite", opts.DisplayName).Str("platform", opts.PlatformName) if opts.RealDevice { @@ -252,9 +243,13 @@ func (r *CloudRunner) runJob(ctx context.Context, opts job.StartOptions) (j job. } // High interval poll to not oversaturate the job reader with requests - j, err = r.JobService.PollJob(context.Background(), j.ID, 15*time.Second, opts.Timeout, opts.RealDevice) + j, err = r.JobService.PollJob(ctx, j.ID, 15*time.Second, opts.Timeout, opts.RealDevice) + if ctx.Err() != nil { + r.stopSuiteExecution(localCtx, j.ID, opts.RealDevice, opts.DisplayName) + return j, true, nil + } if err != nil { - return job.Job{}, r.interrupted, fmt.Errorf("failed to retrieve job status for suite %s: %s", opts.DisplayName, err.Error()) + return job.Job{}, false, fmt.Errorf("failed to retrieve job status for suite %s: %s", opts.DisplayName, err.Error()) } // Check timeout @@ -264,7 +259,7 @@ func (r *CloudRunner) runJob(ctx context.Context, opts job.StartOptions) (j job. Str("timeout", opts.Timeout.String()). Msg("Suite timed out.") - r.stopSuiteExecution(j.ID, opts.RealDevice, opts.DisplayName) + r.stopSuiteExecution(localCtx, j.ID, opts.RealDevice, opts.DisplayName) j.Passed = false j.TimedOut = true @@ -274,7 +269,7 @@ func (r *CloudRunner) runJob(ctx context.Context, opts job.StartOptions) (j job. if !j.Passed { // We may need to differentiate when a job has crashed vs. when there is errors. - return j, r.interrupted, errors.New("suite has test failures") + return j, false, errors.New("suite has test failures") } return j, false, nil @@ -301,6 +296,8 @@ func (r *CloudRunner) shouldRetry(opts job.StartOptions, jobData job.Job, skippe } func (r *CloudRunner) runJobs(ctx context.Context, jobOpts chan job.StartOptions, results chan<- result) { + skipStart := false + for opts := range jobOpts { start := time.Now() @@ -311,7 +308,7 @@ func (r *CloudRunner) runJobs(ctx context.Context, jobOpts chan job.StartOptions BuildName: opts.Build, } - if r.interrupted { + if ctx.Err() != nil || skipStart { results <- result{ name: opts.DisplayName, browser: opts.BrowserName, @@ -335,7 +332,7 @@ func (r *CloudRunner) runJobs(ctx context.Context, jobOpts chan job.StartOptions } if r.shouldRetry(opts, jobData, skipped) { - go r.JobService.DownloadArtifacts(jobData, false) + go r.JobService.DownloadArtifacts(ctx, jobData, false) if !jobData.Passed { log.Warn().Err(err). Str("attempt", @@ -360,10 +357,10 @@ func (r *CloudRunner) runJobs(ctx context.Context, jobOpts chan job.StartOptions if r.FailFast && !jobData.Passed { log.Warn().Err(err).Msg("FailFast mode enabled. Skipping upcoming suites.") - r.interrupted = true + skipStart = true } - if !r.Async { + if !r.Async && !skipped { if opts.CurrentPassCount < opts.PassThreshold { log.Error().Str("suite", opts.DisplayName).Msg("Failed to pass threshold") jobData.Status = job.StateFailed @@ -375,12 +372,15 @@ func (r *CloudRunner) runJobs(ctx context.Context, jobOpts chan job.StartOptions } } - files := r.JobService.DownloadArtifacts(jobData, true) var artifacts []report.Artifact - for _, f := range files { - artifacts = append(artifacts, report.Artifact{ - FilePath: f, - }) + + if !skipped { + files := r.JobService.DownloadArtifacts(ctx, jobData, true) + for _, f := range files { + artifacts = append(artifacts, report.Artifact{ + FilePath: f, + }) + } } results <- result{ @@ -629,7 +629,7 @@ func (r *CloudRunner) remoteArchiveFiles(ctx context.Context, project interface{ // FetchJUnitReports retrieves junit reports for the given result and all of its // attempts. Can use the given artifacts to avoid unnecessary API calls. -func (r *CloudRunner) FetchJUnitReports(res *result, artifacts []report.Artifact) { +func (r *CloudRunner) FetchJUnitReports(ctx context.Context, res *result, artifacts []report.Artifact) { if !report.IsArtifactRequired(r.Reporters, report.JUnitArtifact) { return } @@ -655,7 +655,7 @@ func (r *CloudRunner) FetchJUnitReports(res *result, artifacts []report.Artifact log.Debug().Msg("Using cached JUnit report") } else { content, err = r.JobService.Artifact( - context.Background(), + ctx, attempt.ID, junit.FileName, res.job.IsRDC, @@ -787,7 +787,7 @@ func (r *CloudRunner) isFileStored(ctx context.Context, filename string) (storag } // logSuite display the result of a suite -func (r *CloudRunner) logSuite(res result) { +func (r *CloudRunner) logSuite(ctx context.Context, res result) { logger := log.With(). Str("suite", res.name). Bool("passed", res.job.Passed). @@ -827,11 +827,15 @@ func (r *CloudRunner) logSuite(res result) { l.Msg("Suite failed.") } - r.logSuiteConsole(res) + r.logSuiteConsole(ctx, res) } // logSuiteError display the console output when tests from a suite are failing -func (r *CloudRunner) logSuiteConsole(res result) { +func (r *CloudRunner) logSuiteConsole(ctx context.Context, res result) { + if ctx.Err() != nil { + return + } + // To avoid clutter, we don't show the console on job passes. if res.job.Passed && !r.ShowConsoleLog { return @@ -846,13 +850,13 @@ func (r *CloudRunner) logSuiteConsole(res result) { var err error // Display log only when at least it has started - if assetContent, err = r.JobService.Artifact(context.Background(), res.job.ID, ConsoleLogAsset, res.job.IsRDC); err == nil { + if assetContent, err = r.JobService.Artifact(ctx, res.job.ID, ConsoleLogAsset, res.job.IsRDC); err == nil { log.Info().Str("suite", res.name).Msgf("console.log output: \n%s", assetContent) return } // Some frameworks produce a junit.xml instead, check for that file if there's no console.log - assetContent, err = r.JobService.Artifact(context.Background(), res.job.ID, junit.FileName, res.job.IsRDC) + assetContent, err = r.JobService.Artifact(ctx, res.job.ID, junit.FileName, res.job.IsRDC) if err != nil { log.Warn().Err(err).Str("suite", res.name).Msg("Failed to retrieve the console output.") return @@ -905,55 +909,15 @@ func (r *CloudRunner) validateTunnel(ctx context.Context, name, owner string, dr } // stopSuiteExecution stops the current execution on Sauce Cloud -func (r *CloudRunner) stopSuiteExecution(jobID string, realDevice bool, suiteName string) { - log.Info().Str("suite", suiteName).Msg("Attempting to stop job...") +func (r *CloudRunner) stopSuiteExecution(ctx context.Context, jobID string, realDevice bool, suiteName string) { + log.Info(). + Str("suite", suiteName). + Str("id", jobID). + Msg("Attempting to stop job...") // Ignore errors when stopping a job, as it may have already ended or is in // a state where it cannot be stopped. Either way, there's nothing we can do. - _, _ = r.JobService.StopJob(context.Background(), jobID, realDevice) -} - -// registerInterruptOnSignal stops execution on Sauce Cloud when a SIGINT is captured. -func (r *CloudRunner) registerInterruptOnSignal(jobID string, realDevice bool, suiteName string) chan os.Signal { - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, os.Interrupt) - - go func() { - sig := <-sigChan - if sig == nil { - return - } - r.stopSuiteExecution(jobID, realDevice, suiteName) - }() - - return sigChan -} - -// registerSkipSuitesOnSignal prevent new suites from being executed when a SIGINT is captured. -func (r *CloudRunner) registerSkipSuitesOnSignal() chan os.Signal { - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, os.Interrupt) - - go func(c <-chan os.Signal, cr *CloudRunner) { - for { - sig := <-c - if sig == nil { - return - } - if cr.interrupted { - os.Exit(1) - } - println("\nStopping run. Waiting for all in progress tests to be stopped... (press Ctrl-c again to exit without waiting)\n") - cr.interrupted = true - } - }(sigChan, r) - return sigChan -} - -// unregisterSignalCapture remove the signal hook associated to the chan c. -func unregisterSignalCapture(c chan os.Signal) { - signal.Stop(c) - close(c) + _, _ = r.JobService.StopJob(ctx, jobID, realDevice) } // uploadSauceConfig adds job configuration as an asset. @@ -1014,8 +978,8 @@ func (r *CloudRunner) getAvailableVersions(ctx context.Context, frameworkName st return available } -func (r *CloudRunner) getHistory(launchOrder config.LaunchOrder) (insights.JobHistory, error) { - user, err := r.UserService.User(context.Background()) +func (r *CloudRunner) getHistory(ctx context.Context, launchOrder config.LaunchOrder) (insights.JobHistory, error) { + user, err := r.UserService.User(ctx) if err != nil { return insights.JobHistory{}, err } @@ -1023,29 +987,29 @@ func (r *CloudRunner) getHistory(launchOrder config.LaunchOrder) (insights.JobHi // The config uses spaces, but the API requires underscores. sortBy := strings.ReplaceAll(string(launchOrder), " ", "_") - return r.InsightsService.GetHistory(context.Background(), user, sortBy) + return r.InsightsService.GetHistory(ctx, user, sortBy) } -func (r *CloudRunner) reportInsights(res result) { +func (r *CloudRunner) reportInsights(ctx context.Context, res result) { // NOTE: Jobs must be finished in order to be reported to Insights. // * Async jobs have an unknown status by definition, so should always be excluded from reporting. // * Timed out jobs will be requested to stop, but stopping a job // is either not possible (rdc) or async (vdc) so its actual status is not known now. // Skip reporting to be safe. - if r.Async || !job.Done(res.job.Status) || res.job.TimedOut || res.skipped || res.job.ID == "" { + if ctx.Err() != nil || r.Async || !job.Done(res.job.Status) || res.job.TimedOut || res.skipped || res.job.ID == "" { return } - res.details.BuildID = r.findBuild(res.job.ID, res.job.IsRDC).ID + res.details.BuildID = r.findBuild(ctx, res.job.ID, res.job.IsRDC).ID - assets, err := r.JobService.ArtifactNames(context.Background(), res.job.ID, res.job.IsRDC) + assets, err := r.JobService.ArtifactNames(ctx, res.job.ID, res.job.IsRDC) if err != nil { log.Warn().Err(err).Str("action", "loadAssets").Str("jobID", res.job.ID).Msg(msg.InsightsReportError) return } // read job from insights to get accurate platform and device name - j, err := r.InsightsService.ReadJob(context.Background(), res.job.ID) + j, err := r.InsightsService.ReadJob(ctx, res.job.ID) if err != nil { log.Warn().Err(err).Str("action", "readJob").Str("jobID", res.job.ID).Msg(msg.InsightsReportError) return @@ -1055,14 +1019,14 @@ func (r *CloudRunner) reportInsights(res result) { var testRuns []insights.TestRun if arrayContains(assets, saucereport.FileName) { - report, err := r.loadSauceTestReport(res.job.ID, res.job.IsRDC) + report, err := r.loadSauceTestReport(ctx, res.job.ID, res.job.IsRDC) if err != nil { log.Warn().Err(err).Str("action", "parsingJSON").Str("jobID", res.job.ID).Msg(msg.InsightsReportError) return } testRuns = insights.FromSauceReport(report, res.job.ID, res.name, res.details, res.job.IsRDC) } else if arrayContains(assets, junit.FileName) { - report, err := r.loadJUnitReport(res.job.ID, res.job.IsRDC) + report, err := r.loadJUnitReport(ctx, res.job.ID, res.job.IsRDC) if err != nil { log.Warn().Err(err).Str("action", "parsingXML").Str("jobID", res.job.ID).Msg(msg.InsightsReportError) return @@ -1071,14 +1035,14 @@ func (r *CloudRunner) reportInsights(res result) { } if len(testRuns) > 0 { - if err := r.InsightsService.PostTestRun(context.Background(), testRuns); err != nil { + if err := r.InsightsService.PostTestRun(ctx, testRuns); err != nil { log.Warn().Err(err).Str("action", "posting").Str("jobID", res.job.ID).Msg(msg.InsightsReportError) } } } -func (r *CloudRunner) loadSauceTestReport(jobID string, isRDC bool) (saucereport.SauceReport, error) { - fileContent, err := r.JobService.Artifact(context.Background(), jobID, saucereport.FileName, isRDC) +func (r *CloudRunner) loadSauceTestReport(ctx context.Context, jobID string, isRDC bool) (saucereport.SauceReport, error) { + fileContent, err := r.JobService.Artifact(ctx, jobID, saucereport.FileName, isRDC) if err != nil { log.Warn().Err(err).Str("action", "loading-json-report").Msg(msg.InsightsReportError) return saucereport.SauceReport{}, err @@ -1086,8 +1050,8 @@ func (r *CloudRunner) loadSauceTestReport(jobID string, isRDC bool) (saucereport return saucereport.Parse(fileContent) } -func (r *CloudRunner) loadJUnitReport(jobID string, isRDC bool) (junit.TestSuites, error) { - fileContent, err := r.JobService.Artifact(context.Background(), jobID, junit.FileName, isRDC) +func (r *CloudRunner) loadJUnitReport(ctx context.Context, jobID string, isRDC bool) (junit.TestSuites, error) { + fileContent, err := r.JobService.Artifact(ctx, jobID, junit.FileName, isRDC) if err != nil { log.Warn().Err(err).Str("action", "loading-xml-report").Msg(msg.InsightsReportError) return junit.TestSuites{}, err diff --git a/internal/saucecloud/cloud_test.go b/internal/saucecloud/cloud_test.go index 73d61fd68..6bf9858d5 100644 --- a/internal/saucecloud/cloud_test.go +++ b/internal/saucecloud/cloud_test.go @@ -1,59 +1,14 @@ package saucecloud import ( - "context" "os" "path/filepath" "strings" - "syscall" "testing" - "time" - "github.com/saucelabs/saucectl/internal/job" "github.com/stretchr/testify/assert" ) -func TestSignalDetection(t *testing.T) { - r := CloudRunner{JobService: JobService{}} - assert.False(t, r.interrupted) - c := r.registerSkipSuitesOnSignal() - defer unregisterSignalCapture(c) - - c <- syscall.SIGINT - - deadline := time.NewTimer(3 * time.Second) - defer deadline.Stop() - - // Wait for interrupt to be processed, as it happens asynchronously. - for { - select { - case <-deadline.C: - assert.True(t, r.interrupted) - return - default: - if r.interrupted { - return - } - time.Sleep(1 * time.Nanosecond) // allow context switch - } - } -} - -func TestRunJobsSkipped(t *testing.T) { - r := CloudRunner{} - r.interrupted = true - - opts := make(chan job.StartOptions) - results := make(chan result) - - go r.runJobs(context.Background(), opts, results) - opts <- job.StartOptions{} - close(opts) - res := <-results - assert.Nil(t, res.err) - assert.True(t, res.skipped) -} - func Test_arrayContains(t *testing.T) { type args struct { list []string diff --git a/internal/saucecloud/cucumber.go b/internal/saucecloud/cucumber.go index b509790dc..347fd7218 100644 --- a/internal/saucecloud/cucumber.go +++ b/internal/saucecloud/cucumber.go @@ -135,15 +135,12 @@ func (r *CucumberRunner) getSuiteNames() []string { } func (r *CucumberRunner) runSuites(ctx context.Context, app string, otherApps []string) bool { - sigChan := r.registerSkipSuitesOnSignal() - defer unregisterSignalCapture(sigChan) - jobOpts, results := r.createWorkerPool(ctx, r.Project.Sauce.Concurrency, r.Project.Sauce.Retries) defer close(results) suites := r.Project.Suites if r.Project.Sauce.LaunchOrder != "" { - history, err := r.getHistory(r.Project.Sauce.LaunchOrder) + history, err := r.getHistory(ctx, r.Project.Sauce.LaunchOrder) if err != nil { log.Warn().Err(err).Msg(msg.RetrieveJobHistoryError) } else { @@ -187,5 +184,5 @@ func (r *CucumberRunner) runSuites(ctx context.Context, app string, otherApps [] } }() - return r.collectResults(results, len(r.Project.Suites)) + return r.collectResults(ctx, results, len(r.Project.Suites)) } diff --git a/internal/saucecloud/cypress.go b/internal/saucecloud/cypress.go index 5247c3a05..571be7d6a 100644 --- a/internal/saucecloud/cypress.go +++ b/internal/saucecloud/cypress.go @@ -127,14 +127,12 @@ func (r *CypressRunner) validateFramework(ctx context.Context, m framework.Metad } func (r *CypressRunner) runSuites(ctx context.Context, app string, otherApps []string) bool { - sigChan := r.registerSkipSuitesOnSignal() - defer unregisterSignalCapture(sigChan) jobOpts, results := r.createWorkerPool(ctx, r.Project.GetSauceCfg().Concurrency, r.Project.GetSauceCfg().Retries) defer close(results) suites := r.Project.GetSuites() if r.Project.GetSauceCfg().LaunchOrder != "" { - history, err := r.getHistory(r.Project.GetSauceCfg().LaunchOrder) + history, err := r.getHistory(ctx, r.Project.GetSauceCfg().LaunchOrder) if err != nil { log.Warn().Err(err).Msg(msg.RetrieveJobHistoryError) } else { @@ -182,5 +180,5 @@ func (r *CypressRunner) runSuites(ctx context.Context, app string, otherApps []s } }() - return r.collectResults(results, r.Project.GetSuiteCount()) + return r.collectResults(ctx, results, r.Project.GetSuiteCount()) } diff --git a/internal/saucecloud/espresso.go b/internal/saucecloud/espresso.go index ab884081b..c2e535252 100644 --- a/internal/saucecloud/espresso.go +++ b/internal/saucecloud/espresso.go @@ -97,9 +97,6 @@ func (r *EspressoRunner) RunProject(ctx context.Context) (int, error) { } func (r *EspressoRunner) runSuites(ctx context.Context) bool { - sigChan := r.registerSkipSuitesOnSignal() - defer unregisterSignalCapture(sigChan) - jobOpts, results := r.createWorkerPool( ctx, r.Project.Sauce.Concurrency, r.Project.Sauce.Retries, ) @@ -107,7 +104,7 @@ func (r *EspressoRunner) runSuites(ctx context.Context) bool { suites := r.Project.Suites if r.Project.Sauce.LaunchOrder != "" { - history, err := r.getHistory(r.Project.Sauce.LaunchOrder) + history, err := r.getHistory(ctx, r.Project.Sauce.LaunchOrder) if err != nil { log.Warn().Err(err).Msg(msg.RetrieveJobHistoryError) } else { @@ -157,7 +154,7 @@ func (r *EspressoRunner) runSuites(ctx context.Context) bool { } }() - return r.collectResults(results, len(startOptions)) + return r.collectResults(ctx, results, len(startOptions)) } func (r *EspressoRunner) dryRun() { diff --git a/internal/saucecloud/jobservice.go b/internal/saucecloud/jobservice.go index f560ea449..fa9014e6f 100644 --- a/internal/saucecloud/jobservice.go +++ b/internal/saucecloud/jobservice.go @@ -22,9 +22,7 @@ type JobService struct { ArtifactDownloadConfig config.ArtifactDownload } -func (s JobService) DownloadArtifacts( - jobData job.Job, isLastAttempt bool, -) []string { +func (s JobService) DownloadArtifacts(ctx context.Context, jobData job.Job, isLastAttempt bool) []string { if s.skipDownload(jobData, isLastAttempt) { return []string{} } @@ -38,7 +36,7 @@ func (s JobService) DownloadArtifacts( } files, err := s.ArtifactNames( - context.Background(), jobData.ID, jobData.IsRDC, + ctx, jobData.ID, jobData.IsRDC, ) if err != nil { log.Error().Msgf("Unable to fetch artifacts list (%v)", err) @@ -50,7 +48,7 @@ func (s JobService) DownloadArtifacts( for _, f := range filepaths { targetFile, err := s.downloadArtifact( - destDir, jobData.ID, f, jobData.IsRDC, + ctx, destDir, jobData.ID, f, jobData.IsRDC, ) if err != nil { log.Err(err).Msg("Unable to download artifacts") @@ -119,10 +117,10 @@ func (s JobService) StartJob(ctx context.Context, opts job.StartOptions) (job.Jo } func (s JobService) downloadArtifact( - targetDir, jobID, fileName string, realDevice bool, + ctx context.Context, targetDir, jobID, fileName string, realDevice bool, ) (string, error) { content, err := s.Artifact( - context.Background(), jobID, fileName, realDevice, + ctx, jobID, fileName, realDevice, ) if err != nil { return "", err diff --git a/internal/saucecloud/jobservice_test.go b/internal/saucecloud/jobservice_test.go index afcd28c77..b47518df5 100644 --- a/internal/saucecloud/jobservice_test.go +++ b/internal/saucecloud/jobservice_test.go @@ -1,6 +1,7 @@ package saucecloud import ( + "context" "net/http" "net/http/httptest" "os" @@ -59,14 +60,12 @@ func TestJobService_DownloadArtifact(t *testing.T) { RDC: rdc, ArtifactDownloadConfig: artifactCfg, } - downloader.DownloadArtifacts( - job.Job{ - ID: "test-123", - Name: "suite name", - IsRDC: true, - Status: job.StateComplete, - }, true, - ) + downloader.DownloadArtifacts(context.Background(), job.Job{ + ID: "test-123", + Name: "suite name", + IsRDC: true, + Status: job.StateComplete, + }, true) fileName := filepath.Join(tempDir, "suite_name", "junit.xml") d, err := os.ReadFile(fileName) diff --git a/internal/saucecloud/playwright.go b/internal/saucecloud/playwright.go index 131d6c081..3600407e7 100644 --- a/internal/saucecloud/playwright.go +++ b/internal/saucecloud/playwright.go @@ -140,15 +140,12 @@ func (r *PlaywrightRunner) getSuiteNames() []string { } func (r *PlaywrightRunner) runSuites(ctx context.Context, app string, otherApps []string) bool { - sigChan := r.registerSkipSuitesOnSignal() - defer unregisterSignalCapture(sigChan) - jobOpts, results := r.createWorkerPool(ctx, r.Project.Sauce.Concurrency, r.Project.Sauce.Retries) defer close(results) suites := r.Project.Suites if r.Project.Sauce.LaunchOrder != "" { - history, err := r.getHistory(r.Project.Sauce.LaunchOrder) + history, err := r.getHistory(ctx, r.Project.Sauce.LaunchOrder) if err != nil { log.Warn().Err(err).Msg(msg.RetrieveJobHistoryError) } else { @@ -198,5 +195,5 @@ func (r *PlaywrightRunner) runSuites(ctx context.Context, app string, otherApps } }() - return r.collectResults(results, len(r.Project.Suites)) + return r.collectResults(ctx, results, len(r.Project.Suites)) } diff --git a/internal/saucecloud/replay.go b/internal/saucecloud/replay.go index 1cdeebffa..d04918f96 100644 --- a/internal/saucecloud/replay.go +++ b/internal/saucecloud/replay.go @@ -73,15 +73,12 @@ func (r *ReplayRunner) RunProject(ctx context.Context) (int, error) { } func (r *ReplayRunner) runSuites(ctx context.Context, fileURI string) bool { - sigChan := r.registerSkipSuitesOnSignal() - defer unregisterSignalCapture(sigChan) - jobOpts, results := r.createWorkerPool(ctx, r.Project.Sauce.Concurrency, r.Project.Sauce.Retries) defer close(results) suites := r.Project.Suites if r.Project.Sauce.LaunchOrder != "" { - history, err := r.getHistory(r.Project.Sauce.LaunchOrder) + history, err := r.getHistory(ctx, r.Project.Sauce.LaunchOrder) if err != nil { log.Warn().Err(err).Msg(msg.RetrieveJobHistoryError) } else { @@ -119,5 +116,5 @@ func (r *ReplayRunner) runSuites(ctx context.Context, fileURI string) bool { } }() - return r.collectResults(results, len(r.Project.Suites)) + return r.collectResults(ctx, results, len(r.Project.Suites)) } diff --git a/internal/saucecloud/retry/saucereportretrier.go b/internal/saucecloud/retry/saucereportretrier.go index 10bcd1980..039e6847a 100644 --- a/internal/saucecloud/retry/saucereportretrier.go +++ b/internal/saucecloud/retry/saucereportretrier.go @@ -42,7 +42,7 @@ func (r *SauceReportRetrier) retryFailedTests(ctx context.Context, opt *job.Star return false } - report, err := r.getSauceReport(previous) + report, err := r.getSauceReport(ctx, previous) if err != nil { log.Err(err).Msgf(msg.UnableToFetchFile, saucereport.FileName) return false @@ -111,8 +111,8 @@ func (r *SauceReportRetrier) uploadConfig(ctx context.Context, filename string) return resp.ID, nil } -func (r *SauceReportRetrier) getSauceReport(job job.Job) (saucereport.SauceReport, error) { - content, err := r.JobService.Artifact(context.Background(), job.ID, saucereport.FileName, false) +func (r *SauceReportRetrier) getSauceReport(ctx context.Context, job job.Job) (saucereport.SauceReport, error) { + content, err := r.JobService.Artifact(ctx, job.ID, saucereport.FileName, false) if err != nil { return saucereport.SauceReport{}, err } diff --git a/internal/saucecloud/retry/saucereportretrier_test.go b/internal/saucecloud/retry/saucereportretrier_test.go index 0a29f3050..9fea70dd5 100644 --- a/internal/saucecloud/retry/saucereportretrier_test.go +++ b/internal/saucecloud/retry/saucereportretrier_test.go @@ -58,7 +58,7 @@ func (f *JobServiceStub) StopJob(context.Context, string, bool) ( panic("implement me") } -func (f *JobServiceStub) DownloadArtifacts(job.Job, bool) []string { +func (f *JobServiceStub) DownloadArtifacts(context.Context, job.Job, bool) []string { panic("implement me") } diff --git a/internal/saucecloud/testcafe.go b/internal/saucecloud/testcafe.go index 5feb90030..9a94ff215 100644 --- a/internal/saucecloud/testcafe.go +++ b/internal/saucecloud/testcafe.go @@ -135,15 +135,12 @@ func (r *TestcafeRunner) getSuiteNames() []string { } func (r *TestcafeRunner) runSuites(ctx context.Context, app string, otherApps []string) bool { - sigChan := r.registerSkipSuitesOnSignal() - defer unregisterSignalCapture(sigChan) - jobOpts, results := r.createWorkerPool(ctx, r.Project.Sauce.Concurrency, r.Project.Sauce.Retries) defer close(results) suites := r.Project.Suites if r.Project.Sauce.LaunchOrder != "" { - history, err := r.getHistory(r.Project.Sauce.LaunchOrder) + history, err := r.getHistory(ctx, r.Project.Sauce.LaunchOrder) if err != nil { log.Warn().Err(err).Msg(msg.RetrieveJobHistoryError) } else { @@ -179,7 +176,7 @@ func (r *TestcafeRunner) runSuites(ctx context.Context, app string, otherApps [] } }() - return r.collectResults(results, jobsCount) + return r.collectResults(ctx, results, jobsCount) } func (r *TestcafeRunner) generateStartOpts(s testcafe.Suite) job.StartOptions { diff --git a/internal/saucecloud/xcuitest.go b/internal/saucecloud/xcuitest.go index 1d115759e..68e1c85a1 100644 --- a/internal/saucecloud/xcuitest.go +++ b/internal/saucecloud/xcuitest.go @@ -164,15 +164,12 @@ func (r *XcuitestRunner) dryRun() { } func (r *XcuitestRunner) runSuites(ctx context.Context) bool { - sigChan := r.registerSkipSuitesOnSignal() - defer unregisterSignalCapture(sigChan) - jobOpts, results := r.createWorkerPool(ctx, r.Project.Sauce.Concurrency, r.Project.Sauce.Retries) defer close(results) suites := r.Project.Suites if r.Project.Sauce.LaunchOrder != "" { - history, err := r.getHistory(r.Project.Sauce.LaunchOrder) + history, err := r.getHistory(ctx, r.Project.Sauce.LaunchOrder) if err != nil { log.Warn().Err(err).Msg(msg.RetrieveJobHistoryError) } else { @@ -194,7 +191,7 @@ func (r *XcuitestRunner) runSuites(ctx context.Context) bool { } }() - return r.collectResults(results, jobsCount) + return r.collectResults(ctx, results, jobsCount) } func (r *XcuitestRunner) startJob(jobOpts chan<- job.StartOptions, appFileID, testAppFileID string, otherAppsIDs []string, s xcuitest.Suite, d deviceConfig) {