-
Notifications
You must be signed in to change notification settings - Fork 40
Improve restarts #247
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Improve restarts #247
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -371,48 +371,23 @@ func (d *LocalRunner) ExitErr() <-chan error { | |
| } | ||
|
|
||
| func (d *LocalRunner) Stop() error { | ||
| // only stop the containers that belong to this session | ||
| containers, err := d.client.ContainerList(context.Background(), container.ListOptions{ | ||
| Filters: filters.NewArgs(filters.Arg("label", fmt.Sprintf("playground.session=%s", d.manifest.ID))), | ||
| }) | ||
| if err != nil { | ||
| return fmt.Errorf("error getting container list: %w", err) | ||
| } | ||
|
|
||
| var wg sync.WaitGroup | ||
| wg.Add(len(containers)) | ||
| // stop the docker-compose | ||
| cmd := exec.CommandContext( | ||
| context.Background(), "docker", "compose", | ||
| "-p", d.manifest.ID, | ||
| "down", | ||
| "-v", // removes containers and volumes | ||
| ) | ||
|
|
||
| var errCh chan error | ||
| errCh = make(chan error, len(containers)) | ||
|
|
||
| for _, cont := range containers { | ||
| go func(contID string) { | ||
| defer wg.Done() | ||
| if err := d.client.ContainerRemove(context.Background(), contID, container.RemoveOptions{ | ||
| RemoveVolumes: true, | ||
| RemoveLinks: false, | ||
| Force: true, | ||
| }); err != nil { | ||
| errCh <- fmt.Errorf("error removing container: %w", err) | ||
| } | ||
| }(cont.ID) | ||
| if err := cmd.Run(); err != nil { | ||
| return fmt.Errorf("error taking docker-compose down: %w", err) | ||
| } | ||
|
|
||
| wg.Wait() | ||
|
|
||
| // stop all the handles | ||
| for _, handle := range d.handles { | ||
| handle.Process.Kill() | ||
| } | ||
|
|
||
| close(errCh) | ||
|
|
||
| for err := range errCh { | ||
| if err != nil { | ||
| return err | ||
| } | ||
| } | ||
|
|
||
| return nil | ||
| } | ||
|
|
||
|
|
@@ -584,26 +559,26 @@ func (d *LocalRunner) validateImageExists(image string) error { | |
| return fmt.Errorf("image %s not found", image) | ||
| } | ||
|
|
||
| func (d *LocalRunner) toDockerComposeService(s *Service) (map[string]interface{}, error) { | ||
| func (d *LocalRunner) toDockerComposeService(s *Service) (map[string]interface{}, []string, error) { | ||
| // apply the template again on the arguments to figure out the connections | ||
| // at this point all of them are valid, we just have to resolve them again. We assume for now | ||
| // everyone is going to be on docker at the same network. | ||
| args, envs, err := d.applyTemplate(s) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("failed to apply template, err: %w", err) | ||
| return nil, nil, fmt.Errorf("failed to apply template, err: %w", err) | ||
| } | ||
|
|
||
| // The containers have access to the full set of artifacts on the /artifacts folder | ||
| // so, we have to bind it as a volume on the container. | ||
| outputFolder, err := d.out.AbsoluteDstPath() | ||
| if err != nil { | ||
| return nil, fmt.Errorf("failed to get absolute path for output folder: %w", err) | ||
| return nil, nil, fmt.Errorf("failed to get absolute path for output folder: %w", err) | ||
| } | ||
|
|
||
| // Validate that the image exists | ||
| imageName := fmt.Sprintf("%s:%s", s.Image, s.Tag) | ||
| if err := d.validateImageExists(imageName); err != nil { | ||
| return nil, fmt.Errorf("failed to validate image %s: %w", imageName, err) | ||
| return nil, nil, fmt.Errorf("failed to validate image %s: %w", imageName, err) | ||
| } | ||
|
|
||
| labels := map[string]string{ | ||
|
|
@@ -633,12 +608,11 @@ func (d *LocalRunner) toDockerComposeService(s *Service) (map[string]interface{} | |
| } | ||
|
|
||
| // create the bind volumes | ||
| var createdVolumes []string | ||
| for localPath, volumeName := range s.VolumesMapped { | ||
| volumeDirAbsPath, err := d.createVolume(s.Name, volumeName) | ||
| if err != nil { | ||
| return nil, err | ||
| } | ||
| volumes[volumeDirAbsPath] = localPath | ||
| dockerVolumeName := d.createVolumeName(s.Name, volumeName) | ||
| volumes[dockerVolumeName] = localPath | ||
| createdVolumes = append(createdVolumes, dockerVolumeName) | ||
| } | ||
|
|
||
| volumesInLine := []string{} | ||
|
|
@@ -729,7 +703,7 @@ func (d *LocalRunner) toDockerComposeService(s *Service) (map[string]interface{} | |
| service["ports"] = ports | ||
| } | ||
|
|
||
| return service, nil | ||
| return service, createdVolumes, nil | ||
| } | ||
|
|
||
| func (d *LocalRunner) isHostService(name string) bool { | ||
|
|
@@ -759,18 +733,27 @@ func (d *LocalRunner) generateDockerCompose() ([]byte, error) { | |
| } | ||
| } | ||
|
|
||
| volumes := map[string]struct{}{} | ||
| for _, svc := range d.manifest.Services { | ||
| if d.isHostService(svc.Name) { | ||
| // skip services that are going to be launched on host | ||
| continue | ||
| } | ||
| var err error | ||
| if services[svc.Name], err = d.toDockerComposeService(svc); err != nil { | ||
| var ( | ||
| err error | ||
| dockerVolumes []string | ||
| ) | ||
| services[svc.Name], dockerVolumes, err = d.toDockerComposeService(svc) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("failed to convert service %s to docker compose service: %w", svc.Name, err) | ||
| } | ||
| for _, volumeName := range dockerVolumes { | ||
| volumes[volumeName] = struct{}{} | ||
| } | ||
| } | ||
|
|
||
| compose["services"] = services | ||
| compose["volumes"] = volumes | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I do not think is a good idea to add Docker volumes. It has been really helpful in the past to have everything self contained in the output folder:
I feel that if we introduce volumes we lose those capabilities.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The cleanup on restart problem happens because one of the containers is writing a dir (and a file under that) to the artifacts dir with root permissions. I hear you about the use cases but, outside of the cleanup issue, we seem to risk mixing up container outputs from this dir as well. If we want to put artifacts into containers, we can also add that as another layer by using the Go Docker client that we already use - we don't have to do bind-mount to a single dir from all containers. The alternatives are asking all Linux users to run If somebody wants to see what artifacts the recipe generated, container inspection or |
||
| yamlData, err := yaml.Marshal(compose) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("failed to marshal docker compose: %w", err) | ||
|
|
@@ -779,7 +762,11 @@ func (d *LocalRunner) generateDockerCompose() ([]byte, error) { | |
| return yamlData, nil | ||
| } | ||
|
|
||
| func (d *LocalRunner) createVolume(service, volumeName string) (string, error) { | ||
| func (d *LocalRunner) createVolumeName(service, volumeName string) string { | ||
| return fmt.Sprintf("volume-%s-%s", service, volumeName) | ||
| } | ||
|
|
||
| func (d *LocalRunner) createVolumeDir(service, volumeName string) (string, error) { | ||
| // create the volume in the output folder | ||
| volumeDirAbsPath, err := d.out.CreateDir(fmt.Sprintf("volume-%s-%s", service, volumeName)) | ||
| if err != nil { | ||
|
|
@@ -799,7 +786,7 @@ func (d *LocalRunner) runOnHost(ss *Service) error { | |
| // Create the volumes for this service | ||
| volumesMapped := map[string]string{} | ||
| for pathInDocker, volumeName := range ss.VolumesMapped { | ||
| volumeDirAbsPath, err := d.createVolume(ss.Name, volumeName) | ||
| volumeDirAbsPath, err := d.createVolumeDir(ss.Name, volumeName) | ||
| if err != nil { | ||
| return err | ||
| } | ||
|
|
@@ -996,7 +983,13 @@ func (d *LocalRunner) Run(ctx context.Context) error { | |
| } | ||
|
|
||
| // First start the services that are running in docker-compose | ||
| cmd := exec.CommandContext(ctx, "docker", "compose", "-f", d.out.dst+"/docker-compose.yaml", "up", "-d") | ||
| cmd := exec.CommandContext( | ||
| ctx, "docker", "compose", | ||
| "-p", d.manifest.ID, // identify project with id for doing "docker compose down" on it later | ||
| "-f", d.out.dst+"/docker-compose.yaml", | ||
| "up", | ||
| "-d", | ||
| ) | ||
|
|
||
| var errOut bytes.Buffer | ||
| cmd.Stderr = &errOut | ||
|
|
@@ -1006,24 +999,16 @@ func (d *LocalRunner) Run(ctx context.Context) error { | |
| } | ||
|
|
||
| // Second, start the services that are running on the host machine | ||
| errCh := make(chan error) | ||
| go func() { | ||
| for _, svc := range d.manifest.Services { | ||
| if d.isHostService(svc.Name) { | ||
| if err := d.runOnHost(svc); err != nil { | ||
| errCh <- err | ||
| } | ||
| } | ||
| } | ||
| close(errCh) | ||
| }() | ||
|
|
||
| for err := range errCh { | ||
| if err != nil { | ||
| return err | ||
| g := new(errgroup.Group) | ||
| for _, svc := range d.manifest.Services { | ||
| if d.isHostService(svc.Name) { | ||
| g.Go(func() error { | ||
| return d.runOnHost(svc) | ||
| }) | ||
| } | ||
| } | ||
| return nil | ||
|
|
||
| return g.Wait() | ||
| } | ||
|
|
||
| // StopContainersBySessionID removes all Docker containers associated with a specific playground session ID. | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I would prefer not to do this specially since we need this function anyway to clean all the containers related to playground if we have a
playground cleancommand. Also, I try to avoid as much as possible having to rely on exec operations since they are harder to debug and reason about.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm not a fan of doing everything with commands either 🙂 But we don't seem to have much choice for
docker compose. If it's okay to startdocker composefrom a command, why not take it down from another (quicker) command? By the way, this cleans up only the specific session and not the other playground sessions in parallel and it leaves no containers, volumes, networks - it's super clean.If we want to hook this up to
playground cleansomehow and use in detached mode, I think this command likely helps us - would you disagree?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
My only concern is side effects of having to parse logs if we want to provide useful information on what it is happening on the output of the command while now it is pretty explicit if something fails what is going on.
I agree that this becomes helpful if we have to remove other artifacts from the execution (i.e. volumes, network) but I do not think that is requirement right now (see my other comment on using volumes).
At the same time, I am not sure how this can change be helpful for
playground cleansince it would need a reference to every docker-compose file? While, the current code does work forplayground cleansince you only have to modify the filter.But, my concern is not major though (unlike using volumes). But would like to get your thoughts on this.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It's not necessary to reference the compose file - just the session ID is sufficient because we are using that as the Docker Compose project name with the
-pflag. And we are able to take stuff down by just providing-p <session-id> -v.BTW, I think the GUIDs seem a little unfriendly for this purpose. We could probably use a truncated hex hash. WDYT?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
About the logs of this command: It is just a stop command so I expect any failure output to be minimal. And when it's all Docker Compose responsibility to turn things on/off, there is very little room for failure in my experience. I think it's useful to make
builder-playgroundspit out any failure from this command as it is. The user can either try to reason about it alone (if savvy about Docker) or copy to us over an issue here.