2023-01-11 11:52:19 +01:00
|
|
|
/*
|
|
|
|
Copyright 2020 Docker Compose CLI authors
|
2023-07-10 12:16:20 -04:00
|
|
|
|
2023-01-11 11:52:19 +01:00
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package compose
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2023-09-26 00:57:12 +02:00
|
|
|
"errors"
|
2023-01-11 11:52:19 +01:00
|
|
|
"fmt"
|
2023-08-01 14:39:08 -04:00
|
|
|
"io"
|
2024-08-01 11:10:13 +01:00
|
|
|
"io/fs"
|
2023-08-01 14:39:08 -04:00
|
|
|
"os"
|
watch: add file delete/rename handling
This approach mimics Tilt's behavior[^1]:
1. At sync time, `stat` the path on host
2. If the path does not exist -> `rm` from container
3. If the path exists -> sync to container
By handling things this way, we're always syncing based on the true
state, regardless of what's happened in the interim. For example, a
common pattern in POSIX tools is to create a file and then rename it
over an existing file. Based on timing, this could be a sync, delete,
sync (every file gets seen & processed) OR a delete, sync (by the
the time we process the event, the "temp" file is already gone, so
we just delete it from the container, where it never existed, but
that's fine since we deletes are idempotent thanks to the `-f` flag
on `rm`).
Additionally, when syncing, if the `stat` call shows it's for a
directory, we ignore it. Otherwise, duplicate, nested copies of the
entire path could get synced in. (On some OSes, an event for the
directory gets dispatched when a file inside of it is modified. In
practice, I think we might want this pushed further down in the
watching code, but since we're already `stat`ing the paths here now,
it's a good place to handle it.)
Lastly, there's some very light changes to the text when it does a
full rebuild that will list out the (merged) set of paths that
triggered it. We can continue to improve the output, but this is
really helpful for understanding why it's rebuilding.
[^1]: https://github.com/tilt-dev/tilt/blob/db7f887b0658ed042069dc0ff4cb266fe0596c23/internal/controllers/core/liveupdate/reconciler.go#L911
Signed-off-by: Milas Bowman <milas.bowman@docker.com>
2023-03-20 13:25:19 -04:00
|
|
|
"path"
|
2023-02-01 09:18:43 +01:00
|
|
|
"path/filepath"
|
2025-01-15 12:24:39 +01:00
|
|
|
"slices"
|
2023-08-01 14:39:08 -04:00
|
|
|
"strconv"
|
2023-01-13 13:33:40 +01:00
|
|
|
"strings"
|
2025-05-22 18:04:38 +02:00
|
|
|
gsync "sync"
|
2023-01-13 13:33:40 +01:00
|
|
|
"time"
|
2023-01-11 11:52:19 +01:00
|
|
|
|
2023-11-08 10:19:24 +01:00
|
|
|
"github.com/compose-spec/compose-go/v2/types"
|
2025-01-15 12:24:39 +01:00
|
|
|
"github.com/compose-spec/compose-go/v2/utils"
|
2024-11-27 14:52:26 +01:00
|
|
|
ccli "github.com/docker/cli/cli/command/container"
|
2024-03-20 12:41:24 -06:00
|
|
|
pathutil "github.com/docker/compose/v2/internal/paths"
|
2023-09-07 13:27:23 -04:00
|
|
|
"github.com/docker/compose/v2/internal/sync"
|
2025-02-26 10:53:50 +01:00
|
|
|
"github.com/docker/compose/v2/internal/tracing"
|
2023-09-07 13:27:23 -04:00
|
|
|
"github.com/docker/compose/v2/pkg/api"
|
|
|
|
"github.com/docker/compose/v2/pkg/watch"
|
2024-06-07 17:57:54 +02:00
|
|
|
"github.com/docker/docker/api/types/container"
|
2024-06-24 11:08:14 +03:00
|
|
|
"github.com/docker/docker/api/types/filters"
|
|
|
|
"github.com/docker/docker/api/types/image"
|
2024-09-16 12:05:35 +03:00
|
|
|
"github.com/mitchellh/mapstructure"
|
2023-02-09 15:26:59 +01:00
|
|
|
"github.com/sirupsen/logrus"
|
2023-01-11 11:52:19 +01:00
|
|
|
"golang.org/x/sync/errgroup"
|
|
|
|
)
|
|
|
|
|
2025-05-22 18:04:38 +02:00
|
|
|
type WatchFunc func(ctx context.Context, project *types.Project, options api.WatchOptions) (func() error, error)
|
|
|
|
|
|
|
|
type Watcher struct {
|
|
|
|
project *types.Project
|
|
|
|
options api.WatchOptions
|
|
|
|
watchFn WatchFunc
|
|
|
|
stopFn func()
|
|
|
|
errCh chan error
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewWatcher(project *types.Project, options api.UpOptions, w WatchFunc) (*Watcher, error) {
|
|
|
|
for i := range project.Services {
|
|
|
|
service := project.Services[i]
|
|
|
|
|
|
|
|
if service.Develop != nil && service.Develop.Watch != nil {
|
|
|
|
build := options.Create.Build
|
|
|
|
build.Quiet = true
|
|
|
|
return &Watcher{
|
|
|
|
project: project,
|
|
|
|
options: api.WatchOptions{
|
|
|
|
LogTo: options.Start.Attach,
|
|
|
|
Build: build,
|
|
|
|
},
|
|
|
|
watchFn: w,
|
|
|
|
errCh: make(chan error),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// none of the services is eligible to watch
|
|
|
|
return nil, fmt.Errorf("none of the selected services is configured for watch, see https://docs.docker.com/compose/how-tos/file-watch/")
|
|
|
|
}
|
|
|
|
|
|
|
|
// ensure state changes are atomic
|
|
|
|
var mx gsync.Mutex
|
|
|
|
|
|
|
|
func (w *Watcher) Start(ctx context.Context) error {
|
|
|
|
mx.Lock()
|
|
|
|
defer mx.Unlock()
|
|
|
|
ctx, cancelFunc := context.WithCancel(ctx)
|
|
|
|
w.stopFn = cancelFunc
|
|
|
|
wait, err := w.watchFn(ctx, w.project, w.options)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
go func() {
|
|
|
|
w.errCh <- wait()
|
|
|
|
}()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *Watcher) Stop() error {
|
|
|
|
mx.Lock()
|
|
|
|
defer mx.Unlock()
|
|
|
|
if w.stopFn == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
w.stopFn()
|
|
|
|
w.stopFn = nil
|
|
|
|
err := <-w.errCh
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-02-13 12:56:53 -05:00
|
|
|
// getSyncImplementation returns an appropriate sync implementation for the
|
|
|
|
// project.
|
|
|
|
//
|
|
|
|
// Currently, an implementation that batches files and transfers them using
|
|
|
|
// the Moby `Untar` API.
|
|
|
|
func (s *composeService) getSyncImplementation(project *types.Project) (sync.Syncer, error) {
|
2023-08-04 16:58:01 -04:00
|
|
|
var useTar bool
|
|
|
|
if useTarEnv, ok := os.LookupEnv("COMPOSE_EXPERIMENTAL_WATCH_TAR"); ok {
|
|
|
|
useTar, _ = strconv.ParseBool(useTarEnv)
|
|
|
|
} else {
|
|
|
|
useTar = true
|
|
|
|
}
|
2024-02-13 12:56:53 -05:00
|
|
|
if !useTar {
|
|
|
|
return nil, errors.New("no available sync implementation")
|
2023-08-04 16:58:01 -04:00
|
|
|
}
|
|
|
|
|
2024-02-13 12:56:53 -05:00
|
|
|
return sync.NewTar(project.Name, tarDockerClient{s: s}), nil
|
2023-08-04 16:58:01 -04:00
|
|
|
}
|
2024-12-10 10:30:37 +01:00
|
|
|
|
2025-05-22 18:04:38 +02:00
|
|
|
func (s *composeService) Watch(ctx context.Context, project *types.Project, options api.WatchOptions) error {
|
|
|
|
wait, err := s.watch(ctx, project, options)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2024-03-08 14:07:51 +00:00
|
|
|
}
|
2025-05-22 18:04:38 +02:00
|
|
|
return wait()
|
2024-06-17 10:59:57 +02:00
|
|
|
}
|
2024-12-10 10:30:37 +01:00
|
|
|
|
2025-01-15 12:24:39 +01:00
|
|
|
type watchRule struct {
|
|
|
|
types.Trigger
|
2025-02-25 10:30:52 +01:00
|
|
|
include watch.PathMatcher
|
2025-01-15 12:24:39 +01:00
|
|
|
ignore watch.PathMatcher
|
|
|
|
service string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r watchRule) Matches(event watch.FileEvent) *sync.PathMapping {
|
|
|
|
hostPath := string(event)
|
|
|
|
if !pathutil.IsChild(r.Path, hostPath) {
|
|
|
|
return nil
|
|
|
|
}
|
2025-02-25 10:30:52 +01:00
|
|
|
included, err := r.include.Matches(hostPath)
|
|
|
|
if err != nil {
|
|
|
|
logrus.Warnf("error include matching %q: %v", hostPath, err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if !included {
|
|
|
|
logrus.Debugf("%s is not matching include pattern", hostPath)
|
|
|
|
return nil
|
|
|
|
}
|
2025-01-15 12:24:39 +01:00
|
|
|
isIgnored, err := r.ignore.Matches(hostPath)
|
|
|
|
if err != nil {
|
|
|
|
logrus.Warnf("error ignore matching %q: %v", hostPath, err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if isIgnored {
|
|
|
|
logrus.Debugf("%s is matching ignore pattern", hostPath)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var containerPath string
|
|
|
|
if r.Target != "" {
|
|
|
|
rel, err := filepath.Rel(r.Path, hostPath)
|
|
|
|
if err != nil {
|
|
|
|
logrus.Warnf("error making %s relative to %s: %v", hostPath, r.Path, err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// always use Unix-style paths for inside the container
|
|
|
|
containerPath = path.Join(r.Target, filepath.ToSlash(rel))
|
|
|
|
}
|
|
|
|
return &sync.PathMapping{
|
|
|
|
HostPath: hostPath,
|
|
|
|
ContainerPath: containerPath,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-05-22 18:04:38 +02:00
|
|
|
func (s *composeService) watch(ctx context.Context, project *types.Project, options api.WatchOptions) (func() error, error) { //nolint: gocyclo
|
2023-12-29 11:45:45 +01:00
|
|
|
var err error
|
2025-05-22 18:04:38 +02:00
|
|
|
if project, err = project.WithSelectedServices(options.Services); err != nil {
|
|
|
|
return nil, err
|
2023-02-13 10:09:41 +01:00
|
|
|
}
|
2024-02-13 12:56:53 -05:00
|
|
|
syncer, err := s.getSyncImplementation(project)
|
|
|
|
if err != nil {
|
2025-05-22 18:04:38 +02:00
|
|
|
return nil, err
|
2024-02-13 12:56:53 -05:00
|
|
|
}
|
watch: batch & de-duplicate file events (#10865)
Adjust the debouncing logic so that it applies to all inbound file
events, regardless of whether they match a sync or rebuild rule.
When the batch is flushed out, if any event for the service is a
rebuild event, then the service is rebuilt and all sync events for
the batch are ignored. If _all_ events in the batch are sync events,
then a sync is triggered, passing the entire batch at once. This
provides a substantial performance win for the new `tar`-based
implementation, as it can efficiently transfer the changes in bulk.
Additionally, this helps with jitter, e.g. it's not uncommon for
there to be double-writes in quick succession to a file, so even if
there's not many files being modified at once, it can still prevent
some unnecessary transfers.
Signed-off-by: Milas Bowman <milas.bowman@docker.com>
2023-08-03 14:53:02 -04:00
|
|
|
eg, ctx := errgroup.WithContext(ctx)
|
2024-02-19 15:38:45 +01:00
|
|
|
options.LogTo.Register(api.WatchLogger)
|
2025-01-15 12:24:39 +01:00
|
|
|
|
|
|
|
var (
|
|
|
|
rules []watchRule
|
|
|
|
paths []string
|
|
|
|
)
|
|
|
|
for serviceName, service := range project.Services {
|
2023-02-01 09:18:43 +01:00
|
|
|
config, err := loadDevelopmentConfig(service, project)
|
|
|
|
if err != nil {
|
2025-05-22 18:04:38 +02:00
|
|
|
return nil, err
|
2023-01-11 11:52:19 +01:00
|
|
|
}
|
2023-04-04 10:40:04 +02:00
|
|
|
|
2023-09-20 17:51:53 +02:00
|
|
|
if service.Develop != nil {
|
|
|
|
config = service.Develop
|
|
|
|
}
|
|
|
|
|
2023-06-14 15:21:15 +02:00
|
|
|
if config == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-02-19 15:38:45 +01:00
|
|
|
for _, trigger := range config.Watch {
|
|
|
|
if trigger.Action == types.WatchActionRebuild {
|
|
|
|
if service.Build == nil {
|
2025-05-22 18:04:38 +02:00
|
|
|
return nil, fmt.Errorf("can't watch service %q with action %s without a build context", service.Name, types.WatchActionRebuild)
|
2024-02-19 15:38:45 +01:00
|
|
|
}
|
|
|
|
if options.Build == nil {
|
2025-05-22 18:04:38 +02:00
|
|
|
return nil, fmt.Errorf("--no-build is incompatible with watch action %s in service %s", types.WatchActionRebuild, service.Name)
|
2024-02-19 15:38:45 +01:00
|
|
|
}
|
2024-12-02 15:34:43 +01:00
|
|
|
// set the service to always be built - watch triggers `Up()` when it receives a rebuild event
|
|
|
|
service.PullPolicy = types.PullPolicyBuild
|
2025-01-15 12:24:39 +01:00
|
|
|
project.Services[serviceName] = service
|
2024-02-19 15:38:45 +01:00
|
|
|
}
|
2023-04-04 10:40:04 +02:00
|
|
|
}
|
|
|
|
|
2023-05-23 19:35:50 +02:00
|
|
|
for _, trigger := range config.Watch {
|
2024-12-19 10:43:07 +01:00
|
|
|
if isSync(trigger) && checkIfPathAlreadyBindMounted(trigger.Path, service.Volumes) {
|
2023-06-27 00:56:04 +02:00
|
|
|
logrus.Warnf("path '%s' also declared by a bind mount volume, this path won't be monitored!\n", trigger.Path)
|
|
|
|
continue
|
2024-08-01 11:10:13 +01:00
|
|
|
} else {
|
|
|
|
var initialSync bool
|
|
|
|
success, err := trigger.Extensions.Get("x-initialSync", &initialSync)
|
2024-12-19 10:43:07 +01:00
|
|
|
if err == nil && success && initialSync && isSync(trigger) {
|
2024-08-01 11:10:13 +01:00
|
|
|
// Need to check initial files are in container that are meant to be synched from watch action
|
2025-01-15 12:24:39 +01:00
|
|
|
err := s.initialSync(ctx, project, service, trigger, syncer)
|
2024-08-06 13:49:55 +01:00
|
|
|
if err != nil {
|
2025-05-22 18:04:38 +02:00
|
|
|
return nil, err
|
2024-08-06 13:49:55 +01:00
|
|
|
}
|
2024-08-01 11:10:13 +01:00
|
|
|
}
|
2023-06-27 00:56:04 +02:00
|
|
|
}
|
2023-05-23 19:35:50 +02:00
|
|
|
paths = append(paths, trigger.Path)
|
2023-01-31 11:35:59 +01:00
|
|
|
}
|
|
|
|
|
2025-01-15 12:24:39 +01:00
|
|
|
serviceWatchRules, err := getWatchRules(config, service)
|
2023-01-11 11:52:19 +01:00
|
|
|
if err != nil {
|
2025-05-22 18:04:38 +02:00
|
|
|
return nil, err
|
2023-01-11 11:52:19 +01:00
|
|
|
}
|
2025-01-15 12:24:39 +01:00
|
|
|
rules = append(rules, serviceWatchRules...)
|
2023-01-11 11:52:19 +01:00
|
|
|
}
|
2025-01-15 12:24:39 +01:00
|
|
|
|
|
|
|
if len(paths) == 0 {
|
2025-05-22 18:04:38 +02:00
|
|
|
return nil, fmt.Errorf("none of the selected services is configured for watch, consider setting a 'develop' section")
|
2023-03-20 10:25:23 +01:00
|
|
|
}
|
2025-01-15 12:24:39 +01:00
|
|
|
|
|
|
|
watcher, err := watch.NewWatcher(paths)
|
|
|
|
if err != nil {
|
2025-05-22 18:04:38 +02:00
|
|
|
return nil, err
|
2025-01-15 12:24:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
err = watcher.Start()
|
|
|
|
if err != nil {
|
2025-05-22 18:04:38 +02:00
|
|
|
return nil, err
|
2025-01-15 12:24:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
eg.Go(func() error {
|
|
|
|
return s.watchEvents(ctx, project, options, watcher, syncer, rules)
|
|
|
|
})
|
2024-03-08 14:07:51 +00:00
|
|
|
options.LogTo.Log(api.WatchLogger, "Watch enabled")
|
2023-03-20 10:25:23 +01:00
|
|
|
|
2025-05-22 18:04:38 +02:00
|
|
|
return func() error {
|
|
|
|
err := eg.Wait()
|
|
|
|
if werr := watcher.Close(); werr != nil {
|
|
|
|
logrus.Debugf("Error closing Watcher: %v", werr)
|
2024-06-17 10:59:57 +02:00
|
|
|
}
|
2025-05-22 18:04:38 +02:00
|
|
|
return err
|
|
|
|
}, nil
|
2023-01-11 11:52:19 +01:00
|
|
|
}
|
2023-01-13 13:33:40 +01:00
|
|
|
|
2025-01-15 12:24:39 +01:00
|
|
|
func getWatchRules(config *types.DevelopConfig, service types.ServiceConfig) ([]watchRule, error) {
|
|
|
|
var rules []watchRule
|
|
|
|
|
|
|
|
dockerIgnores, err := watch.LoadDockerIgnore(service.Build)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// add a hardcoded set of ignores on top of what came from .dockerignore
|
|
|
|
// some of this should likely be configurable (e.g. there could be cases
|
|
|
|
// where you want `.git` to be synced) but this is suitable for now
|
|
|
|
dotGitIgnore, err := watch.NewDockerPatternMatcher("/", []string{".git/"})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, trigger := range config.Watch {
|
|
|
|
ignore, err := watch.NewDockerPatternMatcher(trigger.Path, trigger.Ignore)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2025-02-25 10:30:52 +01:00
|
|
|
var include watch.PathMatcher
|
|
|
|
if len(trigger.Include) == 0 {
|
|
|
|
include = watch.AnyMatcher{}
|
|
|
|
} else {
|
|
|
|
include, err = watch.NewDockerPatternMatcher(trigger.Path, trigger.Include)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-01-15 12:24:39 +01:00
|
|
|
rules = append(rules, watchRule{
|
|
|
|
Trigger: trigger,
|
2025-02-25 10:30:52 +01:00
|
|
|
include: include,
|
2025-01-15 12:24:39 +01:00
|
|
|
ignore: watch.NewCompositeMatcher(
|
|
|
|
dockerIgnores,
|
|
|
|
watch.EphemeralPathMatcher(),
|
|
|
|
dotGitIgnore,
|
|
|
|
ignore,
|
|
|
|
),
|
|
|
|
service: service.Name,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return rules, nil
|
|
|
|
}
|
|
|
|
|
2024-12-19 10:43:07 +01:00
|
|
|
func isSync(trigger types.Trigger) bool {
|
|
|
|
return trigger.Action == types.WatchActionSync || trigger.Action == types.WatchActionSyncRestart
|
|
|
|
}
|
|
|
|
|
2025-01-15 12:24:39 +01:00
|
|
|
func (s *composeService) watchEvents(ctx context.Context, project *types.Project, options api.WatchOptions, watcher watch.Notify, syncer sync.Syncer, rules []watchRule) error {
|
watch: batch & de-duplicate file events (#10865)
Adjust the debouncing logic so that it applies to all inbound file
events, regardless of whether they match a sync or rebuild rule.
When the batch is flushed out, if any event for the service is a
rebuild event, then the service is rebuilt and all sync events for
the batch are ignored. If _all_ events in the batch are sync events,
then a sync is triggered, passing the entire batch at once. This
provides a substantial performance win for the new `tar`-based
implementation, as it can efficiently transfer the changes in bulk.
Additionally, this helps with jitter, e.g. it's not uncommon for
there to be double-writes in quick succession to a file, so even if
there's not many files being modified at once, it can still prevent
some unnecessary transfers.
Signed-off-by: Milas Bowman <milas.bowman@docker.com>
2023-08-03 14:53:02 -04:00
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
|
|
|
|
2025-01-15 12:24:39 +01:00
|
|
|
// debounce and group filesystem events so that we capture IDE saving many files as one "batch" event
|
|
|
|
batchEvents := watch.BatchDebounceEvents(ctx, s.clock, watcher.Events())
|
watch: batch & de-duplicate file events (#10865)
Adjust the debouncing logic so that it applies to all inbound file
events, regardless of whether they match a sync or rebuild rule.
When the batch is flushed out, if any event for the service is a
rebuild event, then the service is rebuilt and all sync events for
the batch are ignored. If _all_ events in the batch are sync events,
then a sync is triggered, passing the entire batch at once. This
provides a substantial performance win for the new `tar`-based
implementation, as it can efficiently transfer the changes in bulk.
Additionally, this helps with jitter, e.g. it's not uncommon for
there to be double-writes in quick succession to a file, so even if
there's not many files being modified at once, it can still prevent
some unnecessary transfers.
Signed-off-by: Milas Bowman <milas.bowman@docker.com>
2023-08-03 14:53:02 -04:00
|
|
|
|
2023-03-20 10:25:23 +01:00
|
|
|
for {
|
|
|
|
select {
|
2025-01-15 12:24:39 +01:00
|
|
|
case <-ctx.Done():
|
2024-03-08 14:07:51 +00:00
|
|
|
options.LogTo.Log(api.WatchLogger, "Watch disabled")
|
2023-03-20 10:25:23 +01:00
|
|
|
return nil
|
2025-05-22 18:04:38 +02:00
|
|
|
case err, open := <-watcher.Errors():
|
|
|
|
if err != nil {
|
|
|
|
options.LogTo.Err(api.WatchLogger, "Watch disabled with errors: "+err.Error())
|
|
|
|
}
|
|
|
|
if open {
|
|
|
|
continue
|
|
|
|
}
|
watch: batch & de-duplicate file events (#10865)
Adjust the debouncing logic so that it applies to all inbound file
events, regardless of whether they match a sync or rebuild rule.
When the batch is flushed out, if any event for the service is a
rebuild event, then the service is rebuilt and all sync events for
the batch are ignored. If _all_ events in the batch are sync events,
then a sync is triggered, passing the entire batch at once. This
provides a substantial performance win for the new `tar`-based
implementation, as it can efficiently transfer the changes in bulk.
Additionally, this helps with jitter, e.g. it's not uncommon for
there to be double-writes in quick succession to a file, so even if
there's not many files being modified at once, it can still prevent
some unnecessary transfers.
Signed-off-by: Milas Bowman <milas.bowman@docker.com>
2023-08-03 14:53:02 -04:00
|
|
|
return err
|
2025-01-15 12:24:39 +01:00
|
|
|
case batch := <-batchEvents:
|
|
|
|
start := time.Now()
|
|
|
|
logrus.Debugf("batch start: count[%d]", len(batch))
|
|
|
|
err := s.handleWatchBatch(ctx, project, options, batch, rules, syncer)
|
|
|
|
if err != nil {
|
|
|
|
logrus.Warnf("Error handling changed files: %v", err)
|
2023-02-01 09:18:43 +01:00
|
|
|
}
|
2025-01-15 12:24:39 +01:00
|
|
|
logrus.Debugf("batch complete: duration[%s] count[%d]", time.Since(start), len(batch))
|
2023-03-20 10:25:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-20 17:51:53 +02:00
|
|
|
func loadDevelopmentConfig(service types.ServiceConfig, project *types.Project) (*types.DevelopConfig, error) {
|
|
|
|
var config types.DevelopConfig
|
2023-03-20 10:25:23 +01:00
|
|
|
y, ok := service.Extensions["x-develop"]
|
|
|
|
if !ok {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2023-09-20 17:51:53 +02:00
|
|
|
logrus.Warnf("x-develop is DEPRECATED, please use the official `develop` attribute")
|
2023-03-20 10:25:23 +01:00
|
|
|
err := mapstructure.Decode(y, &config)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-08-01 14:39:08 -04:00
|
|
|
baseDir, err := filepath.EvalSymlinks(project.WorkingDir)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("resolving symlink for %q: %w", project.WorkingDir, err)
|
|
|
|
}
|
|
|
|
|
2023-03-20 10:25:23 +01:00
|
|
|
for i, trigger := range config.Watch {
|
|
|
|
if !filepath.IsAbs(trigger.Path) {
|
2023-08-01 14:39:08 -04:00
|
|
|
trigger.Path = filepath.Join(baseDir, trigger.Path)
|
|
|
|
}
|
|
|
|
if p, err := filepath.EvalSymlinks(trigger.Path); err == nil {
|
|
|
|
// this might fail because the path doesn't exist, etc.
|
|
|
|
trigger.Path = p
|
2023-03-20 10:25:23 +01:00
|
|
|
}
|
|
|
|
trigger.Path = filepath.Clean(trigger.Path)
|
|
|
|
if trigger.Path == "" {
|
|
|
|
return nil, errors.New("watch rules MUST define a path")
|
2023-02-01 09:18:43 +01:00
|
|
|
}
|
2023-03-20 10:25:23 +01:00
|
|
|
|
2023-09-20 17:51:53 +02:00
|
|
|
if trigger.Action == types.WatchActionRebuild && service.Build == nil {
|
2024-11-27 14:52:26 +01:00
|
|
|
return nil, fmt.Errorf("service %s doesn't have a build section, can't apply %s on watch", types.WatchActionRebuild, service.Name)
|
|
|
|
}
|
|
|
|
if trigger.Action == types.WatchActionSyncExec && len(trigger.Exec.Command) == 0 {
|
|
|
|
return nil, fmt.Errorf("can't watch with action %q on service %s wihtout a command", types.WatchActionSyncExec, service.Name)
|
2023-03-20 10:25:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
config.Watch[i] = trigger
|
2023-02-01 09:18:43 +01:00
|
|
|
}
|
2023-03-20 10:25:23 +01:00
|
|
|
return &config, nil
|
2023-02-01 09:18:43 +01:00
|
|
|
}
|
|
|
|
|
2023-06-27 00:56:04 +02:00
|
|
|
func checkIfPathAlreadyBindMounted(watchPath string, volumes []types.ServiceVolumeConfig) bool {
|
|
|
|
for _, volume := range volumes {
|
2025-03-15 00:30:37 +01:00
|
|
|
if volume.Bind != nil {
|
|
|
|
relPath, err := filepath.Rel(volume.Source, watchPath)
|
|
|
|
if err == nil && !strings.HasPrefix(relPath, "..") {
|
|
|
|
return true
|
|
|
|
}
|
2023-06-27 00:56:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
2023-08-01 14:39:08 -04:00
|
|
|
|
|
|
|
type tarDockerClient struct {
|
|
|
|
s *composeService
|
|
|
|
}
|
|
|
|
|
2025-02-12 09:34:07 +01:00
|
|
|
func (t tarDockerClient) ContainersForService(ctx context.Context, projectName string, serviceName string) ([]container.Summary, error) {
|
2023-08-01 14:39:08 -04:00
|
|
|
containers, err := t.s.getContainers(ctx, projectName, oneOffExclude, true, serviceName)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return containers, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t tarDockerClient) Exec(ctx context.Context, containerID string, cmd []string, in io.Reader) error {
|
2024-06-07 17:57:54 +02:00
|
|
|
execCfg := container.ExecOptions{
|
2023-08-01 14:39:08 -04:00
|
|
|
Cmd: cmd,
|
|
|
|
AttachStdout: false,
|
|
|
|
AttachStderr: true,
|
|
|
|
AttachStdin: in != nil,
|
|
|
|
Tty: false,
|
|
|
|
}
|
|
|
|
execCreateResp, err := t.s.apiClient().ContainerExecCreate(ctx, containerID, execCfg)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-06-07 17:57:54 +02:00
|
|
|
startCheck := container.ExecStartOptions{Tty: false, Detach: false}
|
2023-08-01 14:39:08 -04:00
|
|
|
conn, err := t.s.apiClient().ContainerExecAttach(ctx, execCreateResp.ID, startCheck)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
var eg errgroup.Group
|
|
|
|
if in != nil {
|
|
|
|
eg.Go(func() error {
|
|
|
|
defer func() {
|
|
|
|
_ = conn.CloseWrite()
|
|
|
|
}()
|
|
|
|
_, err := io.Copy(conn.Conn, in)
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
}
|
|
|
|
eg.Go(func() error {
|
|
|
|
_, err := io.Copy(t.s.stdinfo(), conn.Reader)
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
|
|
|
|
err = t.s.apiClient().ContainerExecStart(ctx, execCreateResp.ID, startCheck)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// although the errgroup is not tied directly to the context, the operations
|
|
|
|
// in it are reading/writing to the connection, which is tied to the context,
|
|
|
|
// so they won't block indefinitely
|
|
|
|
if err := eg.Wait(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
execResult, err := t.s.apiClient().ContainerExecInspect(ctx, execCreateResp.ID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
watch: batch & de-duplicate file events (#10865)
Adjust the debouncing logic so that it applies to all inbound file
events, regardless of whether they match a sync or rebuild rule.
When the batch is flushed out, if any event for the service is a
rebuild event, then the service is rebuilt and all sync events for
the batch are ignored. If _all_ events in the batch are sync events,
then a sync is triggered, passing the entire batch at once. This
provides a substantial performance win for the new `tar`-based
implementation, as it can efficiently transfer the changes in bulk.
Additionally, this helps with jitter, e.g. it's not uncommon for
there to be double-writes in quick succession to a file, so even if
there's not many files being modified at once, it can still prevent
some unnecessary transfers.
Signed-off-by: Milas Bowman <milas.bowman@docker.com>
2023-08-03 14:53:02 -04:00
|
|
|
if execResult.Running {
|
|
|
|
return errors.New("process still running")
|
|
|
|
}
|
2023-08-01 14:39:08 -04:00
|
|
|
if execResult.ExitCode != 0 {
|
|
|
|
return fmt.Errorf("exit code %d", execResult.ExitCode)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
watch: batch & de-duplicate file events (#10865)
Adjust the debouncing logic so that it applies to all inbound file
events, regardless of whether they match a sync or rebuild rule.
When the batch is flushed out, if any event for the service is a
rebuild event, then the service is rebuilt and all sync events for
the batch are ignored. If _all_ events in the batch are sync events,
then a sync is triggered, passing the entire batch at once. This
provides a substantial performance win for the new `tar`-based
implementation, as it can efficiently transfer the changes in bulk.
Additionally, this helps with jitter, e.g. it's not uncommon for
there to be double-writes in quick succession to a file, so even if
there's not many files being modified at once, it can still prevent
some unnecessary transfers.
Signed-off-by: Milas Bowman <milas.bowman@docker.com>
2023-08-03 14:53:02 -04:00
|
|
|
|
2024-01-09 15:14:34 +01:00
|
|
|
func (t tarDockerClient) Untar(ctx context.Context, id string, archive io.ReadCloser) error {
|
2024-06-07 17:57:54 +02:00
|
|
|
return t.s.apiClient().CopyToContainer(ctx, id, "/", archive, container.CopyToContainerOptions{
|
2024-01-09 15:14:34 +01:00
|
|
|
CopyUIDGID: true,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2025-01-15 12:24:39 +01:00
|
|
|
//nolint:gocyclo
|
|
|
|
func (s *composeService) handleWatchBatch(ctx context.Context, project *types.Project, options api.WatchOptions, batch []watch.FileEvent, rules []watchRule, syncer sync.Syncer) error {
|
|
|
|
var (
|
|
|
|
restart = map[string]bool{}
|
|
|
|
syncfiles = map[string][]*sync.PathMapping{}
|
|
|
|
exec = map[string][]int{}
|
|
|
|
rebuild = map[string]bool{}
|
|
|
|
)
|
|
|
|
for _, event := range batch {
|
|
|
|
for i, rule := range rules {
|
|
|
|
mapping := rule.Matches(event)
|
|
|
|
if mapping == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
switch rule.Action {
|
|
|
|
case types.WatchActionRebuild:
|
|
|
|
rebuild[rule.service] = true
|
|
|
|
case types.WatchActionSync:
|
|
|
|
syncfiles[rule.service] = append(syncfiles[rule.service], mapping)
|
|
|
|
case types.WatchActionRestart:
|
|
|
|
restart[rule.service] = true
|
|
|
|
case types.WatchActionSyncRestart:
|
|
|
|
syncfiles[rule.service] = append(syncfiles[rule.service], mapping)
|
|
|
|
restart[rule.service] = true
|
|
|
|
case types.WatchActionSyncExec:
|
|
|
|
syncfiles[rule.service] = append(syncfiles[rule.service], mapping)
|
|
|
|
// We want to run exec hooks only once after syncfiles if multiple file events match
|
|
|
|
// as we can't compare ServiceHook to sort and compact a slice, collect rule indexes
|
|
|
|
exec[rule.service] = append(exec[rule.service], i)
|
|
|
|
}
|
2023-10-13 18:04:05 +02:00
|
|
|
}
|
watch: batch & de-duplicate file events (#10865)
Adjust the debouncing logic so that it applies to all inbound file
events, regardless of whether they match a sync or rebuild rule.
When the batch is flushed out, if any event for the service is a
rebuild event, then the service is rebuilt and all sync events for
the batch are ignored. If _all_ events in the batch are sync events,
then a sync is triggered, passing the entire batch at once. This
provides a substantial performance win for the new `tar`-based
implementation, as it can efficiently transfer the changes in bulk.
Additionally, this helps with jitter, e.g. it's not uncommon for
there to be double-writes in quick succession to a file, so even if
there's not many files being modified at once, it can still prevent
some unnecessary transfers.
Signed-off-by: Milas Bowman <milas.bowman@docker.com>
2023-08-03 14:53:02 -04:00
|
|
|
}
|
|
|
|
|
2025-01-15 12:24:39 +01:00
|
|
|
logrus.Debugf("watch actions: rebuild %d sync %d restart %d", len(rebuild), len(syncfiles), len(restart))
|
watch: batch & de-duplicate file events (#10865)
Adjust the debouncing logic so that it applies to all inbound file
events, regardless of whether they match a sync or rebuild rule.
When the batch is flushed out, if any event for the service is a
rebuild event, then the service is rebuilt and all sync events for
the batch are ignored. If _all_ events in the batch are sync events,
then a sync is triggered, passing the entire batch at once. This
provides a substantial performance win for the new `tar`-based
implementation, as it can efficiently transfer the changes in bulk.
Additionally, this helps with jitter, e.g. it's not uncommon for
there to be double-writes in quick succession to a file, so even if
there's not many files being modified at once, it can still prevent
some unnecessary transfers.
Signed-off-by: Milas Bowman <milas.bowman@docker.com>
2023-08-03 14:53:02 -04:00
|
|
|
|
2025-01-15 12:24:39 +01:00
|
|
|
if len(rebuild) > 0 {
|
|
|
|
err := s.rebuild(ctx, project, utils.MapKeys(rebuild), options)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
watch: batch & de-duplicate file events (#10865)
Adjust the debouncing logic so that it applies to all inbound file
events, regardless of whether they match a sync or rebuild rule.
When the batch is flushed out, if any event for the service is a
rebuild event, then the service is rebuilt and all sync events for
the batch are ignored. If _all_ events in the batch are sync events,
then a sync is triggered, passing the entire batch at once. This
provides a substantial performance win for the new `tar`-based
implementation, as it can efficiently transfer the changes in bulk.
Additionally, this helps with jitter, e.g. it's not uncommon for
there to be double-writes in quick succession to a file, so even if
there's not many files being modified at once, it can still prevent
some unnecessary transfers.
Signed-off-by: Milas Bowman <milas.bowman@docker.com>
2023-08-03 14:53:02 -04:00
|
|
|
}
|
2025-01-15 12:24:39 +01:00
|
|
|
|
|
|
|
for serviceName, pathMappings := range syncfiles {
|
|
|
|
writeWatchSyncMessage(options.LogTo, serviceName, pathMappings)
|
|
|
|
err := syncer.Sync(ctx, serviceName, pathMappings)
|
|
|
|
if err != nil {
|
2024-12-11 12:00:02 +01:00
|
|
|
return err
|
|
|
|
}
|
watch: batch & de-duplicate file events (#10865)
Adjust the debouncing logic so that it applies to all inbound file
events, regardless of whether they match a sync or rebuild rule.
When the batch is flushed out, if any event for the service is a
rebuild event, then the service is rebuilt and all sync events for
the batch are ignored. If _all_ events in the batch are sync events,
then a sync is triggered, passing the entire batch at once. This
provides a substantial performance win for the new `tar`-based
implementation, as it can efficiently transfer the changes in bulk.
Additionally, this helps with jitter, e.g. it's not uncommon for
there to be double-writes in quick succession to a file, so even if
there's not many files being modified at once, it can still prevent
some unnecessary transfers.
Signed-off-by: Milas Bowman <milas.bowman@docker.com>
2023-08-03 14:53:02 -04:00
|
|
|
}
|
2025-01-15 12:24:39 +01:00
|
|
|
if len(restart) > 0 {
|
|
|
|
services := utils.MapKeys(restart)
|
|
|
|
err := s.restart(ctx, project.Name, api.RestartOptions{
|
|
|
|
Services: services,
|
2023-10-13 18:04:05 +02:00
|
|
|
Project: project,
|
|
|
|
NoDeps: false,
|
|
|
|
})
|
2024-10-22 08:57:46 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
options.LogTo.Log(
|
|
|
|
api.WatchLogger,
|
2025-01-15 12:24:39 +01:00
|
|
|
fmt.Sprintf("service(s) %q restarted", services))
|
2024-11-27 14:52:26 +01:00
|
|
|
}
|
2025-01-15 12:24:39 +01:00
|
|
|
|
2024-11-27 14:52:26 +01:00
|
|
|
eg, ctx := errgroup.WithContext(ctx)
|
2025-01-15 12:24:39 +01:00
|
|
|
for service, rulesToExec := range exec {
|
|
|
|
slices.Sort(rulesToExec)
|
|
|
|
for _, i := range slices.Compact(rulesToExec) {
|
|
|
|
err := s.exec(ctx, project, service, rules[i].Exec, eg)
|
2024-11-27 14:52:26 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return eg.Wait()
|
|
|
|
}
|
2024-10-22 08:57:46 +02:00
|
|
|
|
2024-12-11 12:00:02 +01:00
|
|
|
func (s *composeService) exec(ctx context.Context, project *types.Project, serviceName string, x types.ServiceHook, eg *errgroup.Group) error {
|
|
|
|
containers, err := s.getContainers(ctx, project.Name, oneOffExclude, false, serviceName)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, c := range containers {
|
|
|
|
eg.Go(func() error {
|
|
|
|
exec := ccli.NewExecOptions()
|
|
|
|
exec.User = x.User
|
|
|
|
exec.Privileged = x.Privileged
|
|
|
|
exec.Command = x.Command
|
|
|
|
exec.Workdir = x.WorkingDir
|
|
|
|
for _, v := range x.Environment.ToMapping().Values() {
|
|
|
|
err := exec.Env.Set(v)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ccli.RunExec(ctx, s.dockerCli, c.ID, exec)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2025-01-15 12:24:39 +01:00
|
|
|
func (s *composeService) rebuild(ctx context.Context, project *types.Project, services []string, options api.WatchOptions) error {
|
|
|
|
options.LogTo.Log(api.WatchLogger, fmt.Sprintf("Rebuilding service(s) %q after changes were detected...", services))
|
2024-11-27 14:52:26 +01:00
|
|
|
// restrict the build to ONLY this service, not any of its dependencies
|
2025-01-15 12:24:39 +01:00
|
|
|
options.Build.Services = services
|
2025-02-26 10:53:50 +01:00
|
|
|
|
|
|
|
var (
|
|
|
|
imageNameToIdMap map[string]string
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
err = tracing.SpanWrapFunc("project/build", tracing.ProjectOptions(ctx, project),
|
|
|
|
func(ctx context.Context) error {
|
|
|
|
imageNameToIdMap, err = s.build(ctx, project, *options.Build, nil)
|
|
|
|
return err
|
|
|
|
})(ctx)
|
2024-11-27 14:52:26 +01:00
|
|
|
if err != nil {
|
|
|
|
options.LogTo.Log(api.WatchLogger, fmt.Sprintf("Build failed. Error: %v", err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if options.Prune {
|
|
|
|
s.pruneDanglingImagesOnRebuild(ctx, project.Name, imageNameToIdMap)
|
|
|
|
}
|
|
|
|
|
2025-01-15 12:24:39 +01:00
|
|
|
options.LogTo.Log(api.WatchLogger, fmt.Sprintf("service(s) %q successfully built", services))
|
2024-11-27 14:52:26 +01:00
|
|
|
|
|
|
|
err = s.create(ctx, project, api.CreateOptions{
|
2025-01-15 12:24:39 +01:00
|
|
|
Services: services,
|
2024-11-27 14:52:26 +01:00
|
|
|
Inherit: true,
|
|
|
|
Recreate: api.RecreateForce,
|
|
|
|
})
|
|
|
|
if err != nil {
|
2025-01-15 12:24:39 +01:00
|
|
|
options.LogTo.Log(api.WatchLogger, fmt.Sprintf("Failed to recreate services after update. Error: %v", err))
|
2024-11-27 14:52:26 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2025-05-27 15:51:38 +02:00
|
|
|
p, err := project.WithSelectedServices(services, types.IncludeDependents)
|
2024-11-27 14:52:26 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = s.start(ctx, project.Name, api.StartOptions{
|
|
|
|
Project: p,
|
|
|
|
Services: services,
|
|
|
|
AttachTo: services,
|
|
|
|
}, nil)
|
|
|
|
if err != nil {
|
|
|
|
options.LogTo.Log(api.WatchLogger, fmt.Sprintf("Application failed to start after update. Error: %v", err))
|
2023-10-13 18:04:05 +02:00
|
|
|
}
|
watch: batch & de-duplicate file events (#10865)
Adjust the debouncing logic so that it applies to all inbound file
events, regardless of whether they match a sync or rebuild rule.
When the batch is flushed out, if any event for the service is a
rebuild event, then the service is rebuilt and all sync events for
the batch are ignored. If _all_ events in the batch are sync events,
then a sync is triggered, passing the entire batch at once. This
provides a substantial performance win for the new `tar`-based
implementation, as it can efficiently transfer the changes in bulk.
Additionally, this helps with jitter, e.g. it's not uncommon for
there to be double-writes in quick succession to a file, so even if
there's not many files being modified at once, it can still prevent
some unnecessary transfers.
Signed-off-by: Milas Bowman <milas.bowman@docker.com>
2023-08-03 14:53:02 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// writeWatchSyncMessage prints out a message about the sync for the changed paths.
|
2025-01-15 12:24:39 +01:00
|
|
|
func writeWatchSyncMessage(log api.LogConsumer, serviceName string, pathMappings []*sync.PathMapping) {
|
2024-09-12 15:04:20 +03:00
|
|
|
if logrus.IsLevelEnabled(logrus.DebugLevel) {
|
watch: batch & de-duplicate file events (#10865)
Adjust the debouncing logic so that it applies to all inbound file
events, regardless of whether they match a sync or rebuild rule.
When the batch is flushed out, if any event for the service is a
rebuild event, then the service is rebuilt and all sync events for
the batch are ignored. If _all_ events in the batch are sync events,
then a sync is triggered, passing the entire batch at once. This
provides a substantial performance win for the new `tar`-based
implementation, as it can efficiently transfer the changes in bulk.
Additionally, this helps with jitter, e.g. it's not uncommon for
there to be double-writes in quick succession to a file, so even if
there's not many files being modified at once, it can still prevent
some unnecessary transfers.
Signed-off-by: Milas Bowman <milas.bowman@docker.com>
2023-08-03 14:53:02 -04:00
|
|
|
hostPathsToSync := make([]string, len(pathMappings))
|
|
|
|
for i := range pathMappings {
|
|
|
|
hostPathsToSync[i] = pathMappings[i].HostPath
|
|
|
|
}
|
2024-09-11 18:03:13 +03:00
|
|
|
log.Log(
|
|
|
|
api.WatchLogger,
|
|
|
|
fmt.Sprintf(
|
2025-01-15 12:24:39 +01:00
|
|
|
"Syncing service %q after changes were detected: %s",
|
2024-09-11 18:03:13 +03:00
|
|
|
serviceName,
|
|
|
|
strings.Join(hostPathsToSync, ", "),
|
|
|
|
),
|
|
|
|
)
|
watch: batch & de-duplicate file events (#10865)
Adjust the debouncing logic so that it applies to all inbound file
events, regardless of whether they match a sync or rebuild rule.
When the batch is flushed out, if any event for the service is a
rebuild event, then the service is rebuilt and all sync events for
the batch are ignored. If _all_ events in the batch are sync events,
then a sync is triggered, passing the entire batch at once. This
provides a substantial performance win for the new `tar`-based
implementation, as it can efficiently transfer the changes in bulk.
Additionally, this helps with jitter, e.g. it's not uncommon for
there to be double-writes in quick succession to a file, so even if
there's not many files being modified at once, it can still prevent
some unnecessary transfers.
Signed-off-by: Milas Bowman <milas.bowman@docker.com>
2023-08-03 14:53:02 -04:00
|
|
|
} else {
|
2024-09-11 18:03:13 +03:00
|
|
|
log.Log(
|
|
|
|
api.WatchLogger,
|
2025-01-15 12:24:39 +01:00
|
|
|
fmt.Sprintf("Syncing service %q after %d changes were detected", serviceName, len(pathMappings)),
|
2024-09-11 18:03:13 +03:00
|
|
|
)
|
watch: batch & de-duplicate file events (#10865)
Adjust the debouncing logic so that it applies to all inbound file
events, regardless of whether they match a sync or rebuild rule.
When the batch is flushed out, if any event for the service is a
rebuild event, then the service is rebuilt and all sync events for
the batch are ignored. If _all_ events in the batch are sync events,
then a sync is triggered, passing the entire batch at once. This
provides a substantial performance win for the new `tar`-based
implementation, as it can efficiently transfer the changes in bulk.
Additionally, this helps with jitter, e.g. it's not uncommon for
there to be double-writes in quick succession to a file, so even if
there's not many files being modified at once, it can still prevent
some unnecessary transfers.
Signed-off-by: Milas Bowman <milas.bowman@docker.com>
2023-08-03 14:53:02 -04:00
|
|
|
}
|
|
|
|
}
|
2024-06-24 11:08:14 +03:00
|
|
|
|
|
|
|
func (s *composeService) pruneDanglingImagesOnRebuild(ctx context.Context, projectName string, imageNameToIdMap map[string]string) {
|
|
|
|
images, err := s.apiClient().ImageList(ctx, image.ListOptions{
|
|
|
|
Filters: filters.NewArgs(
|
|
|
|
filters.Arg("dangling", "true"),
|
|
|
|
filters.Arg("label", api.ProjectLabel+"="+projectName),
|
|
|
|
),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
logrus.Debugf("Failed to list images: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, img := range images {
|
|
|
|
if _, ok := imageNameToIdMap[img.ID]; !ok {
|
|
|
|
_, err := s.apiClient().ImageRemove(ctx, img.ID, image.RemoveOptions{})
|
|
|
|
if err != nil {
|
|
|
|
logrus.Debugf("Failed to remove image %s: %v", img.ID, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-08-01 11:10:13 +01:00
|
|
|
|
2024-08-06 13:49:55 +01:00
|
|
|
// Walks develop.watch.path and checks which files should be copied inside the container
|
|
|
|
// ignores develop.watch.ignore, Dockerfile, compose files, bind mounted paths and .git
|
2025-01-15 12:24:39 +01:00
|
|
|
func (s *composeService) initialSync(ctx context.Context, project *types.Project, service types.ServiceConfig, trigger types.Trigger, syncer sync.Syncer) error {
|
|
|
|
dockerIgnores, err := watch.LoadDockerIgnore(service.Build)
|
2024-08-06 13:49:55 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2025-01-15 12:24:39 +01:00
|
|
|
|
|
|
|
dotGitIgnore, err := watch.NewDockerPatternMatcher("/", []string{".git/"})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
triggerIgnore, err := watch.NewDockerPatternMatcher(trigger.Path, trigger.Ignore)
|
2024-08-06 13:49:55 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2025-01-15 12:24:39 +01:00
|
|
|
// FIXME .dockerignore
|
|
|
|
ignoreInitialSync := watch.NewCompositeMatcher(
|
|
|
|
dockerIgnores,
|
|
|
|
watch.EphemeralPathMatcher(),
|
|
|
|
dotGitIgnore,
|
|
|
|
triggerIgnore)
|
2024-08-01 11:10:13 +01:00
|
|
|
|
2024-08-06 13:49:55 +01:00
|
|
|
pathsToCopy, err := s.initialSyncFiles(ctx, project, service, trigger, ignoreInitialSync)
|
2024-08-01 11:10:13 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2025-01-15 12:24:39 +01:00
|
|
|
return syncer.Sync(ctx, service.Name, pathsToCopy)
|
2024-08-01 11:10:13 +01:00
|
|
|
}
|
|
|
|
|
2024-08-06 13:49:55 +01:00
|
|
|
// Syncs files from develop.watch.path if thy have been modified after the image has been created
|
|
|
|
//
|
|
|
|
//nolint:gocyclo
|
2025-01-15 12:24:39 +01:00
|
|
|
func (s *composeService) initialSyncFiles(ctx context.Context, project *types.Project, service types.ServiceConfig, trigger types.Trigger, ignore watch.PathMatcher) ([]*sync.PathMapping, error) {
|
2024-08-01 11:10:13 +01:00
|
|
|
fi, err := os.Stat(trigger.Path)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-08-06 13:49:55 +01:00
|
|
|
timeImageCreated, err := s.imageCreatedTime(ctx, project, service.Name)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2025-01-15 12:24:39 +01:00
|
|
|
var pathsToCopy []*sync.PathMapping
|
2024-08-01 11:10:13 +01:00
|
|
|
switch mode := fi.Mode(); {
|
|
|
|
case mode.IsDir():
|
2024-08-06 13:49:55 +01:00
|
|
|
// process directory
|
2024-08-01 11:10:13 +01:00
|
|
|
err = filepath.WalkDir(trigger.Path, func(path string, d fs.DirEntry, err error) error {
|
|
|
|
if err != nil {
|
|
|
|
// handle possible path err, just in case...
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if trigger.Path == path {
|
|
|
|
// walk starts at the root directory
|
|
|
|
return nil
|
|
|
|
}
|
2024-08-06 13:49:55 +01:00
|
|
|
if shouldIgnore(filepath.Base(path), ignore) || checkIfPathAlreadyBindMounted(path, service.Volumes) {
|
2024-08-01 11:10:13 +01:00
|
|
|
// By definition sync ignores bind mounted paths
|
|
|
|
if d.IsDir() {
|
2024-08-06 13:49:55 +01:00
|
|
|
// skip folder
|
|
|
|
return fs.SkipDir
|
2024-08-01 11:10:13 +01:00
|
|
|
}
|
2024-08-06 13:49:55 +01:00
|
|
|
return nil // skip file
|
|
|
|
}
|
|
|
|
info, err := d.Info()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !d.IsDir() {
|
|
|
|
if info.ModTime().Before(timeImageCreated) {
|
|
|
|
// skip file if it was modified before image creation
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
rel, err := filepath.Rel(trigger.Path, path)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// only copy files (and not full directories)
|
2025-01-15 12:24:39 +01:00
|
|
|
pathsToCopy = append(pathsToCopy, &sync.PathMapping{
|
2024-08-06 13:49:55 +01:00
|
|
|
HostPath: path,
|
|
|
|
ContainerPath: filepath.Join(trigger.Target, rel),
|
|
|
|
})
|
2024-08-01 11:10:13 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
case mode.IsRegular():
|
2024-08-06 13:49:55 +01:00
|
|
|
// process file
|
|
|
|
if fi.ModTime().After(timeImageCreated) && !shouldIgnore(filepath.Base(trigger.Path), ignore) && !checkIfPathAlreadyBindMounted(trigger.Path, service.Volumes) {
|
2025-01-15 12:24:39 +01:00
|
|
|
pathsToCopy = append(pathsToCopy, &sync.PathMapping{
|
2024-08-01 11:10:13 +01:00
|
|
|
HostPath: trigger.Path,
|
|
|
|
ContainerPath: trigger.Target,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2024-08-06 13:49:55 +01:00
|
|
|
return pathsToCopy, err
|
2024-08-01 11:10:13 +01:00
|
|
|
}
|
|
|
|
|
2024-08-06 13:49:55 +01:00
|
|
|
func shouldIgnore(name string, ignore watch.PathMatcher) bool {
|
|
|
|
shouldIgnore, _ := ignore.Matches(name)
|
2024-08-01 11:10:13 +01:00
|
|
|
// ignore files that match any ignore pattern
|
|
|
|
return shouldIgnore
|
|
|
|
}
|
2024-08-06 13:49:55 +01:00
|
|
|
|
|
|
|
// gets the image creation time for a service
|
|
|
|
func (s *composeService) imageCreatedTime(ctx context.Context, project *types.Project, serviceName string) (time.Time, error) {
|
|
|
|
containers, err := s.apiClient().ContainerList(ctx, container.ListOptions{
|
|
|
|
All: true,
|
|
|
|
Filters: filters.NewArgs(
|
|
|
|
filters.Arg("label", fmt.Sprintf("%s=%s", api.ProjectLabel, project.Name)),
|
|
|
|
filters.Arg("label", fmt.Sprintf("%s=%s", api.ServiceLabel, serviceName))),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return time.Now(), err
|
|
|
|
}
|
|
|
|
if len(containers) == 0 {
|
2025-03-31 17:22:58 +02:00
|
|
|
return time.Now(), fmt.Errorf("could not get created time for service's image")
|
2024-08-06 13:49:55 +01:00
|
|
|
}
|
|
|
|
|
2025-02-12 09:34:07 +01:00
|
|
|
img, err := s.apiClient().ImageInspect(ctx, containers[0].ImageID)
|
2024-08-06 13:49:55 +01:00
|
|
|
if err != nil {
|
|
|
|
return time.Now(), err
|
|
|
|
}
|
|
|
|
// Need to get oldest one?
|
|
|
|
timeCreated, err := time.Parse(time.RFC3339Nano, img.Created)
|
|
|
|
if err != nil {
|
|
|
|
return time.Now(), err
|
|
|
|
}
|
|
|
|
return timeCreated, nil
|
|
|
|
}
|