From b9f1aa838e43b60e5333dc39c306e10a0a32472a Mon Sep 17 00:00:00 2001 From: Joshua Chamberlain Date: Thu, 31 Aug 2023 21:29:32 -0700 Subject: [PATCH 1/6] POC: watch config files to load/unload maps --- atlas/atlas.go | 47 ++ cmd/internal/register/maps.go | 11 +- cmd/tegola/cmd/root.go | 147 +++- config/config.go | 47 +- config/source/file.go | 134 ++++ config/source/source.go | 59 ++ go.mod | 1 + go.sum | 4 +- .../fsnotify/fsnotify/.editorconfig | 12 + .../fsnotify/fsnotify/.gitattributes | 1 + .../github.com/fsnotify/fsnotify/.gitignore | 6 + vendor/github.com/fsnotify/fsnotify/.mailmap | 2 + .../github.com/fsnotify/fsnotify/CHANGELOG.md | 470 +++++++++++ .../fsnotify/fsnotify/CONTRIBUTING.md | 26 + vendor/github.com/fsnotify/fsnotify/LICENSE | 25 + vendor/github.com/fsnotify/fsnotify/README.md | 161 ++++ .../fsnotify/fsnotify/backend_fen.go | 162 ++++ .../fsnotify/fsnotify/backend_inotify.go | 459 +++++++++++ .../fsnotify/fsnotify/backend_kqueue.go | 707 +++++++++++++++++ .../fsnotify/fsnotify/backend_other.go | 66 ++ .../fsnotify/fsnotify/backend_windows.go | 746 ++++++++++++++++++ .../github.com/fsnotify/fsnotify/fsnotify.go | 81 ++ vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 208 +++++ .../fsnotify/fsnotify/system_bsd.go | 8 + .../fsnotify/fsnotify/system_darwin.go | 9 + vendor/modules.txt | 3 + 26 files changed, 3575 insertions(+), 27 deletions(-) create mode 100644 config/source/file.go create mode 100644 config/source/source.go create mode 100644 vendor/github.com/fsnotify/fsnotify/.editorconfig create mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes create mode 100644 vendor/github.com/fsnotify/fsnotify/.gitignore create mode 100644 vendor/github.com/fsnotify/fsnotify/.mailmap create mode 100644 vendor/github.com/fsnotify/fsnotify/CHANGELOG.md create mode 100644 vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md create mode 100644 vendor/github.com/fsnotify/fsnotify/LICENSE create mode 100644 vendor/github.com/fsnotify/fsnotify/README.md create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_fen.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_inotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_other.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_windows.go create mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/mkdoc.zsh create mode 100644 vendor/github.com/fsnotify/fsnotify/system_bsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/system_darwin.go diff --git a/atlas/atlas.go b/atlas/atlas.go index 5ea55e0a5..714d03fa9 100644 --- a/atlas/atlas.go +++ b/atlas/atlas.go @@ -3,6 +3,7 @@ package atlas import ( "context" + "fmt" "os" "strconv" "strings" @@ -221,6 +222,52 @@ func (a *Atlas) AddMap(m Map) { a.maps[m.Name] = m } +// AddMaps registers maps by name, all or nothing. If a map already exists an error will be returned. +func (a *Atlas) AddMaps(maps []Map) error { + if a == nil { + // Use the default Atlas if a, is nil. This way the empty value is + // still useful. + return defaultAtlas.AddMaps(maps) + } + a.Lock() + defer a.Unlock() + + if a.maps == nil { + a.maps = map[string]Map{} + } + + // Check all the names for conflicts before we add any map, so that we can add all or none. + for _, m := range maps { + if _, exists := a.maps[m.Name]; exists { + return fmt.Errorf("Map with name \"%s\" already exists.", m.Name) + } + } + + // Now add all the maps. + for _, m := range maps { + a.maps[m.Name] = m + } + + return nil +} + +func (a *Atlas) RemoveMaps(names []string) { + if a == nil { + // Use the default Atlas if a, is nil. This way the empty value is + // still useful. + defaultAtlas.RemoveMaps(names) + return + } + a.Lock() + defer a.Unlock() + + for _, name := range names { + if _, exists := a.maps[name]; exists { + delete(a.maps, name) + } + } +} + // GetCache returns the registered cache if one is registered, otherwise nil func (a *Atlas) GetCache() cache.Interface { if a == nil { diff --git a/cmd/internal/register/maps.go b/cmd/internal/register/maps.go index bb4e62c9f..a95f2ffae 100644 --- a/cmd/internal/register/maps.go +++ b/cmd/internal/register/maps.go @@ -131,6 +131,7 @@ func Maps(a *atlas.Atlas, maps []provider.Map, providers map[string]provider.Til ) // iterate our maps + newMaps := make([]atlas.Map, 0, len(maps)) for _, m := range maps { newMap := webMercatorMapFromConfigMap(m) @@ -157,9 +158,15 @@ func Maps(a *atlas.Atlas, maps []provider.Map, providers map[string]provider.Til newMap.Layers = append(newMap.Layers, layer) } - a.AddMap(newMap) + newMaps = append(newMaps, newMap) } - return nil + + // Register all or nothing. + return a.AddMaps(newMaps) +} + +func UnloadMaps(a *atlas.Atlas, names []string) { + a.RemoveMaps(names) } // Find allow HTML tag diff --git a/cmd/tegola/cmd/root.go b/cmd/tegola/cmd/root.go index 177638ae2..83d9f09db 100644 --- a/cmd/tegola/cmd/root.go +++ b/cmd/tegola/cmd/root.go @@ -1,6 +1,7 @@ package cmd import ( + "context" "fmt" "github.com/go-spatial/cobra" @@ -8,9 +9,12 @@ import ( "github.com/go-spatial/tegola/cmd/internal/register" cachecmd "github.com/go-spatial/tegola/cmd/tegola/cmd/cache" "github.com/go-spatial/tegola/config" + "github.com/go-spatial/tegola/config/source" "github.com/go-spatial/tegola/dict" "github.com/go-spatial/tegola/internal/build" + "github.com/go-spatial/tegola/internal/env" "github.com/go-spatial/tegola/internal/log" + "github.com/go-spatial/tegola/provider" ) var ( @@ -114,22 +118,22 @@ func initConfig(configFile string, cacheRequired bool, logLevel string, logger s return err } - // init our providers - // but first convert []env.Map -> []dict.Dicter - provArr := make([]dict.Dicter, len(conf.Providers)) - for i := range provArr { - provArr[i] = conf.Providers[i] + // Init providers from the primary config file. + providers, err := initProviders(conf.Providers, conf.Maps) + if err != nil { + return err } - providers, err := register.Providers(provArr, conf.Maps) - if err != nil { - return fmt.Errorf("could not register providers: %v", err) + // Init maps from the primary config file. + if err = initMaps(conf.Maps, providers); err != nil { + return err } - // init our maps - if err = register.Maps(nil, conf.Maps, providers); err != nil { - return fmt.Errorf("could not register maps: %v", err) + // Setup the app config source. + if err = initAppConfigSource(conf); err != nil { + return err } + if len(conf.Cache) == 0 && cacheRequired { return fmt.Errorf("no cache defined in config, please check your config (%v)", configFile) } @@ -152,3 +156,124 @@ func initConfig(configFile string, cacheRequired bool, logLevel string, logger s atlas.SetObservability(observer) return nil } + +// initProviders translate provider config from a TOML file into usable Provider objects. +func initProviders(providersConfig []env.Dict, maps []provider.Map) (map[string]provider.TilerUnion, error) { + // first convert []env.Map -> []dict.Dicter + provArr := make([]dict.Dicter, len(providersConfig)) + for i := range provArr { + provArr[i] = providersConfig[i] + } + + providers, err := register.Providers(provArr, conf.Maps) + if err != nil { + return nil, fmt.Errorf("could not register providers: %v", err) + } + + return providers, nil +} + +// initMaps registers maps with Atlas to be ready for service. +func initMaps(maps []provider.Map, providers map[string]provider.TilerUnion) error { + if err := register.Maps(nil, maps, providers); err != nil { + return fmt.Errorf("could not register maps: %v", err) + } + + return nil +} + +// initAppConfigSource sets up an additional configuration source for "apps" (groups of providers and maps) to be loaded and unloaded on-the-fly. +func initAppConfigSource(conf config.Config) error { + // Get the config source type. If none, return. + val, err := conf.AppConfigSource.String("type", nil) + if err != nil || val == "" { + return nil + } + + // Initialize the source. + ctx := context.Background() // Not doing anything with context now, but could use it for stopping this goroutine. + src, err := source.InitSource(val, conf.AppConfigSource, conf.BaseDir) + if err != nil { + return err + } + + // Load and start watching for new apps. + watcher, err := src.LoadAndWatch(ctx) + if err != nil { + return err + } + + go func() { + // Keep a record of what we've loaded so that we can unload when needed. + apps := make(map[string]source.App) + + for { + select { + case app, ok := <-watcher.Updates: + if !ok { + return + } + + // Check for validity first. + if err := config.ValidateApp(&app); err != nil { + log.Errorf("Failed validating app %s. %s", app.Key, err) + continue + } + + // If the new app is named the same as an existing app, first unload the existing one. + if old, exists := apps[app.Key]; exists { + log.Infof("Unloading app %s...", old.Key) + // We need only unload maps, since the providers don't live outside of maps. + register.UnloadMaps(nil, getMapNames(old)) + delete(apps, app.Key) + } + + log.Infof("Loading app %s...", app.Key) + + // Init new providers + providers, err := initProviders(app.Providers, app.Maps) + if err != nil { + log.Errorf("Failed initializing providers from %s: %s", app.Key, err) + continue + } + + // Init new maps + if err = initMaps(app.Maps, providers); err != nil { + log.Errorf("Failed initializing maps from %s: %s", app.Key, err) + continue + } + + // Record that we've loaded this app. + apps[app.Key] = app + + case deleted, ok := <-watcher.Deletions: + if !ok { + return + } + + // Unload an app's maps if it was previously loaded. + if app, exists := apps[deleted]; exists { + log.Infof("Unloading app %s...", app.Key) + register.UnloadMaps(nil, getMapNames(app)) + delete(apps, app.Key) + } else { + log.Infof("Received an unload event for app %s, but couldn't find it.", deleted) + } + + case <-ctx.Done(): + return + } + } + }() + + return nil +} + +func getMapNames(app source.App) []string { + names := make([]string, 0, len(app.Maps)) + for _, m := range app.Maps { + names = append(names, string(m.Name)) + } + + return names +} diff --git a/config/config.go b/config/config.go index aef1a70fa..9b5c7e60e 100644 --- a/config/config.go +++ b/config/config.go @@ -6,11 +6,13 @@ import ( "io" "net/http" "os" + "path/filepath" "strings" "time" "github.com/BurntSushi/toml" "github.com/go-spatial/tegola" + "github.com/go-spatial/tegola/config/source" "github.com/go-spatial/tegola/internal/env" "github.com/go-spatial/tegola/internal/log" "github.com/go-spatial/tegola/provider" @@ -55,6 +57,7 @@ type Config struct { // If this is an empty string, it means that the location was unknown. This is the case if // the Parse() function is used directly. LocationName string + BaseDir string Webserver Webserver `toml:"webserver"` Cache env.Dict `toml:"cache"` Observer env.Dict `toml:"observer"` @@ -65,8 +68,8 @@ type Config struct { // 2. type -- this is the name the provider modules register // themselves under. (e.g. postgis, gpkg, mvt_postgis ) // Note: Use the type to figure out if the provider is a mvt or std provider - Providers []env.Dict `toml:"providers"` - Maps []provider.Map `toml:"maps"` + source.App + AppConfigSource env.Dict `toml:"app_config_source"` } // Webserver represents the config options for the webserver part of Tegola @@ -147,6 +150,8 @@ func ValidateAndRegisterParams(mapName string, params []provider.QueryParameter) } // Mark all used tokens as reserved + // This looks like it's going to cause trouble if the global ReservedTokens map just keeps growing. + // I guess a map can't be reloaded if it uses tokens? for token := range usedTokens { ReservedTokens[token] = struct{}{} } @@ -154,9 +159,10 @@ func ValidateAndRegisterParams(mapName string, params []provider.QueryParameter) return nil } -// Validate checks the config for issues -func (c *Config) Validate() error { - +// ValidateApp checks map and provider config for issues and sets +// some defaults along the way. +// (Lifted from Config.Validate()) +func ValidateApp(app *source.App) error { var knownTypes []string drivers := make(map[string]int) for _, name := range provider.Drivers(provider.TypeStd) { @@ -169,8 +175,8 @@ func (c *Config) Validate() error { } // mvtproviders maps a known provider name to whether that provider is // an mvt provider or not. - mvtproviders := make(map[string]bool, len(c.Providers)) - for i, prvd := range c.Providers { + mvtproviders := make(map[string]bool, len(app.Providers)) + for i, prvd := range app.Providers { name, _ := prvd.String("name", nil) if name == "" { return ErrProviderNameRequired{Pos: i} @@ -198,7 +204,7 @@ func (c *Config) Validate() error { mapLayers := map[string]map[string]provider.MapLayer{} // maps with configured parameters for logging mapsWithCustomParams := []string{} - for mapKey, m := range c.Maps { + for mapKey, m := range app.Maps { // validate any declared query parameters if err := ValidateAndRegisterParams(string(m.Name), m.Parameters); err != nil { @@ -269,7 +275,7 @@ func (c *Config) Validate() error { // set in iterated value l.MaxZoom = &ph // set in underlying config struct - c.Maps[mapKey].Layers[layerKey].MaxZoom = &ph + app.Maps[mapKey].Layers[layerKey].MaxZoom = &ph } // MinZoom default if l.MinZoom == nil { @@ -277,7 +283,7 @@ func (c *Config) Validate() error { // set in iterated value l.MinZoom = &ph // set in underlying config struct - c.Maps[mapKey].Layers[layerKey].MinZoom = &ph + app.Maps[mapKey].Layers[layerKey].MinZoom = &ph } if int(*l.MaxZoom) == 0 { @@ -286,7 +292,7 @@ func (c *Config) Validate() error { // set in iterated value l.MaxZoom = &ph // set in underlying config struct - c.Maps[mapKey].Layers[layerKey].MaxZoom = &ph + app.Maps[mapKey].Layers[layerKey].MaxZoom = &ph } // check if we already have this layer @@ -313,6 +319,17 @@ func (c *Config) Validate() error { ) } + return nil +} + +// Validate checks the config for issues +func (c *Config) Validate() error { + + // Validate the "app": providers and maps. + if err := ValidateApp(&c.App); err != nil { + return err + } + // check for blacklisted headers for k := range c.Webserver.Headers { for _, v := range blacklistHeaders { @@ -356,7 +373,7 @@ func (c *Config) ConfigureTileBuffers() { } // Parse will parse the Tegola config file provided by the io.Reader. -func Parse(reader io.Reader, location string) (conf Config, err error) { +func Parse(reader io.Reader, location, baseDir string) (conf Config, err error) { // decode conf file, don't care about the meta data. _, err = toml.NewDecoder(reader).Decode(&conf) if err != nil { @@ -371,6 +388,7 @@ func Parse(reader io.Reader, location string) (conf Config, err error) { } conf.LocationName = location + conf.BaseDir = baseDir conf.ConfigureTileBuffers() @@ -380,6 +398,7 @@ func Parse(reader io.Reader, location string) (conf Config, err error) { // Load will load and parse the config file from the given location. func Load(location string) (conf Config, err error) { var reader io.Reader + baseDir := "" // check for http prefix if strings.HasPrefix(location, "http") { @@ -413,9 +432,11 @@ func Load(location string) (conf Config, err error) { if err != nil { return conf, fmt.Errorf("error opening local config file (%v): %v ", location, err) } + + baseDir = filepath.Dir(location) } - return Parse(reader, location) + return Parse(reader, location, baseDir) } // LoadAndValidate will load the config from the given filename and validate it if it was diff --git a/config/source/file.go b/config/source/file.go new file mode 100644 index 000000000..1f01fd406 --- /dev/null +++ b/config/source/file.go @@ -0,0 +1,134 @@ +package source + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/fsnotify/fsnotify" + "github.com/go-spatial/tegola/internal/env" + "github.com/go-spatial/tegola/internal/log" +) + +// FileConfigSource is a config source for loading and watching files in a local directory. +type FileConfigSource struct { + dir string +} + +func (s *FileConfigSource) Type() string { + return "file" +} + +func (s *FileConfigSource) Init(options env.Dict, baseDir string) error { + var err error + dir, err := options.String("dir", nil) + if err != nil { + return err + } + + // If dir is relative, make it relative to baseDir. + if !filepath.IsAbs(dir) { + dir = filepath.Join(baseDir, dir) + } + + s.dir = dir + return nil +} + +// LoadAndWatch will read all the files in the configured directory and then keep watching the directory for changes. +func (s *FileConfigSource) LoadAndWatch(ctx context.Context) (ConfigWatcher, error) { + appWatcher := ConfigWatcher{ + Updates: make(chan App), + Deletions: make(chan string), + } + + // First check that the directory exists and is readable. + if _, err := os.ReadDir(s.dir); err != nil { + return appWatcher, fmt.Errorf("Apps directory not readable: %s", err) + } + + // Now setup the filesystem watcher. + fsWatcher, err := fsnotify.NewWatcher() + if err != nil { + return appWatcher, err + } + + err = fsWatcher.Add(s.dir) + if err != nil { + return appWatcher, err + } + + go func() { + defer fsWatcher.Close() + + // First load the files already present in the directory. + entries, err := os.ReadDir(s.dir) + if err != nil { + log.Errorf("Could not read apps directory (%s). Exiting watcher. %s", s.dir, err) + return + } + + for _, entry := range entries { + if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".toml") { + log.Debug("Ignoring ", entry.Name()) + continue + } + + log.Infof("Loading app file %s...", entry.Name()) + s.loadApp(filepath.Join(s.dir, entry.Name()), appWatcher.Updates) + } + + // Now start processing future additions/removals/edits. + for { + select { + case event, ok := <-fsWatcher.Events: + if !ok { + return + } + + if !strings.HasSuffix(event.Name, ".toml") { + log.Debug("Ignoring ", event.Name) + continue + } + + if event.Has(fsnotify.Write) || event.Has(fsnotify.Create) { + log.Infof("Loading app file %s (%s)...", event.Name, event.Op) + s.loadApp(event.Name, appWatcher.Updates) + } else if event.Has(fsnotify.Remove) { + log.Infof("Unloading app file %s (%s)...", event.Name, event.Op) + appWatcher.Deletions <- event.Name + } + + case err, ok := <-fsWatcher.Errors: + if !ok { + return + } + log.Error(err) + + case <-ctx.Done(): + log.Info("Exiting watcher...") + return + } + } + }() + + return appWatcher, nil +} + +// loadApp reads the file and loads the app into the updates channel. +func (s *FileConfigSource) loadApp(filename string, updates chan App) { + f, err := os.Open(filename) + if err != nil { + log.Errorf("Failed to load %s: %s", filename, err) + return + } + defer f.Close() + + if app, err := parseApp(f, filename); err == nil { + updates <- app + } else { + log.Errorf("Failed to parse %s: %s", filename, err) + } +} diff --git a/config/source/source.go b/config/source/source.go new file mode 100644 index 000000000..5e0e5d408 --- /dev/null +++ b/config/source/source.go @@ -0,0 +1,59 @@ +package source + +import ( + "context" + "fmt" + "io" + + "github.com/BurntSushi/toml" + "github.com/go-spatial/tegola/internal/env" + "github.com/go-spatial/tegola/provider" +) + +// App represents a set of providers and maps that should be added/removed together. +type App struct { + Providers []env.Dict `toml:"providers"` + Maps []provider.Map `toml:"maps"` + Key string // key is used to track this app through its lifecycle and could be anything to uniquely identify it. +} + +type ConfigSource interface { + Type() string + LoadAndWatch(ctx context.Context) (ConfigWatcher, error) +} + +type ConfigWatcher struct { + Updates chan App + Deletions chan string +} + +func InitSource(sourceType string, options env.Dict, baseDir string) (ConfigSource, error) { + switch sourceType { + case "file": + src := FileConfigSource{} + err := src.Init(options, baseDir) + return &src, err + + default: + return nil, fmt.Errorf("No ConfigSource of type %s", sourceType) + } +} + +// parseApp decodes any reader into an App. +func parseApp(reader io.Reader, key string) (app App, err error) { + app = App{} + _, err = toml.NewDecoder(reader).Decode(&app) + if err != nil { + return app, err + } + + for _, m := range app.Maps { + for k, p := range m.Parameters { + p.Normalize() + m.Parameters[k] = p + } + } + + app.Key = key + return app, nil +} diff --git a/go.mod b/go.mod index ffad3ec10..c5cc19242 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( github.com/akrylysov/algnhsa v1.0.0 github.com/aws/aws-sdk-go v1.27.0 github.com/dimfeld/httptreemux v5.0.1+incompatible + github.com/fsnotify/fsnotify v1.6.0 github.com/gdey/tbltest v0.0.0-20170331191646-af8abc47b052 github.com/go-redis/redis v6.9.0+incompatible github.com/go-spatial/cobra v0.0.3-0.20181105183926-68194e4fbcc6 diff --git a/go.sum b/go.sum index 25ede7dd9..3724bed4d 100644 --- a/go.sum +++ b/go.sum @@ -53,8 +53,9 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/gdey/tbltest v0.0.0-20170331191646-af8abc47b052 h1:uDErRK65HpAslYsynvi7QVzqNYJELGmG2ijcBT/GKJo= github.com/gdey/tbltest v0.0.0-20170331191646-af8abc47b052/go.mod h1:O0rUOxGq87ndwSAK+YVv/8g40Wbre/OSPCU8GlgUyPk= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -330,6 +331,7 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig new file mode 100644 index 000000000..fad895851 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig @@ -0,0 +1,12 @@ +root = true + +[*.go] +indent_style = tab +indent_size = 4 +insert_final_newline = true + +[*.{yml,yaml}] +indent_style = space +indent_size = 2 +insert_final_newline = true +trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes new file mode 100644 index 000000000..32f1001be --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitattributes @@ -0,0 +1 @@ +go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore new file mode 100644 index 000000000..1d89d85ce --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -0,0 +1,6 @@ +# go test -c output +*.test +*.test.exe + +# Output of go build ./cmd/fsnotify +/fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/.mailmap b/vendor/github.com/fsnotify/fsnotify/.mailmap new file mode 100644 index 000000000..a04f2907f --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.mailmap @@ -0,0 +1,2 @@ +Chris Howey +Nathan Youngman <4566+nathany@users.noreply.github.com> diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md new file mode 100644 index 000000000..77f9593bd --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -0,0 +1,470 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +Nothing yet. + +## [1.6.0] - 2022-10-13 + +This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, +but not documented). It also increases the minimum Linux version to 2.6.32. + +### Additions + +- all: add `Event.Has()` and `Op.Has()` ([#477]) + + This makes checking events a lot easier; for example: + + if event.Op&Write == Write && !(event.Op&Remove == Remove) { + } + + Becomes: + + if event.Has(Write) && !event.Has(Remove) { + } + +- all: add cmd/fsnotify ([#463]) + + A command-line utility for testing and some examples. + +### Changes and fixes + +- inotify: don't ignore events for files that don't exist ([#260], [#470]) + + Previously the inotify watcher would call `os.Lstat()` to check if a file + still exists before emitting events. + + This was inconsistent with other platforms and resulted in inconsistent event + reporting (e.g. when a file is quickly removed and re-created), and generally + a source of confusion. It was added in 2013 to fix a memory leak that no + longer exists. + +- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's + not watched ([#460]) + +- inotify: replace epoll() with non-blocking inotify ([#434]) + + Non-blocking inotify was not generally available at the time this library was + written in 2014, but now it is. As a result, the minimum Linux version is + bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster. + +- kqueue: don't check for events every 100ms ([#480]) + + The watcher would wake up every 100ms, even when there was nothing to do. Now + it waits until there is something to do. + +- macos: retry opening files on EINTR ([#475]) + +- kqueue: skip unreadable files ([#479]) + + kqueue requires a file descriptor for every file in a directory; this would + fail if a file was unreadable by the current user. Now these files are simply + skipped. + +- windows: fix renaming a watched directory if the parent is also watched ([#370]) + +- windows: increase buffer size from 4K to 64K ([#485]) + +- windows: close file handle on Remove() ([#288]) + +- kqueue: put pathname in the error if watching a file fails ([#471]) + +- inotify, windows: calling Close() more than once could race ([#465]) + +- kqueue: improve Close() performance ([#233]) + +- all: various documentation additions and clarifications. + +[#233]: https://github.com/fsnotify/fsnotify/pull/233 +[#260]: https://github.com/fsnotify/fsnotify/pull/260 +[#288]: https://github.com/fsnotify/fsnotify/pull/288 +[#370]: https://github.com/fsnotify/fsnotify/pull/370 +[#434]: https://github.com/fsnotify/fsnotify/pull/434 +[#460]: https://github.com/fsnotify/fsnotify/pull/460 +[#463]: https://github.com/fsnotify/fsnotify/pull/463 +[#465]: https://github.com/fsnotify/fsnotify/pull/465 +[#470]: https://github.com/fsnotify/fsnotify/pull/470 +[#471]: https://github.com/fsnotify/fsnotify/pull/471 +[#475]: https://github.com/fsnotify/fsnotify/pull/475 +[#477]: https://github.com/fsnotify/fsnotify/pull/477 +[#479]: https://github.com/fsnotify/fsnotify/pull/479 +[#480]: https://github.com/fsnotify/fsnotify/pull/480 +[#485]: https://github.com/fsnotify/fsnotify/pull/485 + +## [1.5.4] - 2022-04-25 + +* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) +* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444) +* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443) + +## [1.5.3] - 2022-04-22 + +* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445) + +## [1.5.2] - 2022-04-21 + +* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374) +* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361) +* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424) +* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406) +* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416) + +## [1.5.1] - 2021-08-24 + +* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394) + +## [1.5.0] - 2021-08-20 + +* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381) +* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298) +* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289) +* CI: Use GitHub Actions for CI and cover go 1.12-1.17 + [#378](https://github.com/fsnotify/fsnotify/pull/378) + [#381](https://github.com/fsnotify/fsnotify/pull/381) + [#385](https://github.com/fsnotify/fsnotify/pull/385) +* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) + +## [1.4.9] - 2020-03-11 + +* Move example usage to the readme #329. This may resolve #328. + +## [1.4.8] - 2020-03-10 + +* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216) +* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265) +* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266) +* CI: Less verbosity (@nathany #267) +* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267) +* Tests: Check if channels are closed in the example (@alexeykazakov #244) +* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284) +* CI: Add windows to travis matrix (@cpuguy83 #284) +* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93) +* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219) +* Linux: open files with close-on-exec (@linxiulei #273) +* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 ) +* Project: Add go.mod (@nathany #309) +* Project: Revise editor config (@nathany #309) +* Project: Update copyright for 2019 (@nathany #309) +* CI: Drop go1.8 from CI matrix (@nathany #309) +* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e ) + +## [1.4.7] - 2018-01-09 + +* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) +* Tests: Fix missing verb on format string (thanks @rchiossi) +* Linux: Fix deadlock in Remove (thanks @aarondl) +* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) +* Docs: Moved FAQ into the README (thanks @vahe) +* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) +* Docs: replace references to OS X with macOS + +## [1.4.2] - 2016-10-10 + +* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) + +## [1.4.1] - 2016-10-04 + +* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) + +## [1.4.0] - 2016-10-01 + +* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) + +## [1.3.1] - 2016-06-28 + +* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) + +## [1.3.0] - 2016-04-19 + +* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) + +## [1.2.10] - 2016-03-02 + +* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) + +## [1.2.9] - 2016-01-13 + +kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) + +## [1.2.8] - 2015-12-17 + +* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) +* inotify: fix race in test +* enable race detection for continuous integration (Linux, Mac, Windows) + +## [1.2.5] - 2015-10-17 + +* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) +* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) +* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) +* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) + +## [1.2.1] - 2015-10-14 + +* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) + +## [1.2.0] - 2015-02-08 + +* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) +* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) +* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) + +## [1.1.1] - 2015-02-05 + +* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) + +## [1.1.0] - 2014-12-12 + +* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) + * add low-level functions + * only need to store flags on directories + * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) + * done can be an unbuffered channel + * remove calls to os.NewSyscallError +* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) +* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## [1.0.4] - 2014-09-07 + +* kqueue: add dragonfly to the build tags. +* Rename source code files, rearrange code so exported APIs are at the top. +* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) + +## [1.0.3] - 2014-08-19 + +* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) + +## [1.0.2] - 2014-08-17 + +* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Make ./path and path equivalent. (thanks @zhsso) + +## [1.0.0] - 2014-08-15 + +* [API] Remove AddWatch on Windows, use Add. +* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) +* Minor updates based on feedback from golint. + +## dev / 2014-07-09 + +* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). +* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) + +## dev / 2014-07-04 + +* kqueue: fix incorrect mutex used in Close() +* Update example to demonstrate usage of Op. + +## dev / 2014-06-28 + +* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) +* Fix for String() method on Event (thanks Alex Brainman) +* Don't build on Plan 9 or Solaris (thanks @4ad) + +## dev / 2014-06-21 + +* Events channel of type Event rather than *Event. +* [internal] use syscall constants directly for inotify and kqueue. +* [internal] kqueue: rename events to kevents and fileEvent to event. + +## dev / 2014-06-19 + +* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). +* [internal] remove cookie from Event struct (unused). +* [internal] Event struct has the same definition across every OS. +* [internal] remove internal watch and removeWatch methods. + +## dev / 2014-06-12 + +* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). +* [API] Pluralized channel names: Events and Errors. +* [API] Renamed FileEvent struct to Event. +* [API] Op constants replace methods like IsCreate(). + +## dev / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## dev / 2014-05-23 + +* [API] Remove current implementation of WatchFlags. + * current implementation doesn't take advantage of OS for efficiency + * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes + * no tests for the current implementation + * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) + +## [0.9.3] - 2014-12-31 + +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## [0.9.2] - 2014-08-17 + +* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) + +## [0.9.1] - 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## [0.9.0] - 2014-01-17 + +* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) +* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) +* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. + +## [0.8.12] - 2013-11-13 + +* [API] Remove FD_SET and friends from Linux adapter + +## [0.8.11] - 2013-11-02 + +* [Doc] Add Changelog [#72][] (thanks @nathany) +* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) + +## [0.8.10] - 2013-10-19 + +* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) +* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) +* [Doc] specify OS-specific limits in README (thanks @debrando) + +## [0.8.9] - 2013-09-08 + +* [Doc] Contributing (thanks @nathany) +* [Doc] update package path in example code [#63][] (thanks @paulhammond) +* [Doc] GoCI badge in README (Linux only) [#60][] +* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) + +## [0.8.8] - 2013-06-17 + +* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) + +## [0.8.7] - 2013-06-03 + +* [API] Make syscall flags internal +* [Fix] inotify: ignore event changes +* [Fix] race in symlink test [#45][] (reported by @srid) +* [Fix] tests on Windows +* lower case error messages + +## [0.8.6] - 2013-05-23 + +* kqueue: Use EVT_ONLY flag on Darwin +* [Doc] Update README with full example + +## [0.8.5] - 2013-05-09 + +* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) + +## [0.8.4] - 2013-04-07 + +* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) + +## [0.8.3] - 2013-03-13 + +* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) +* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) + +## [0.8.2] - 2013-02-07 + +* [Doc] add Authors +* [Fix] fix data races for map access [#29][] (thanks @fsouza) + +## [0.8.1] - 2013-01-09 + +* [Fix] Windows path separators +* [Doc] BSD License + +## [0.8.0] - 2012-11-09 + +* kqueue: directory watching improvements (thanks @vmirage) +* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) +* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) + +## [0.7.4] - 2012-10-09 + +* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) +* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) +* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) +* [Fix] kqueue: modify after recreation of file + +## [0.7.3] - 2012-09-27 + +* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) +* [Fix] kqueue: no longer get duplicate CREATE events + +## [0.7.2] - 2012-09-01 + +* kqueue: events for created directories + +## [0.7.1] - 2012-07-14 + +* [Fix] for renaming files + +## [0.7.0] - 2012-07-02 + +* [Feature] FSNotify flags +* [Fix] inotify: Added file name back to event path + +## [0.6.0] - 2012-06-06 + +* kqueue: watch files after directory created (thanks @tmc) + +## [0.5.1] - 2012-05-22 + +* [Fix] inotify: remove all watches before Close() + +## [0.5.0] - 2012-05-03 + +* [API] kqueue: return errors during watch instead of sending over channel +* kqueue: match symlink behavior on Linux +* inotify: add `DELETE_SELF` (requested by @taralx) +* [Fix] kqueue: handle EINTR (reported by @robfig) +* [Doc] Godoc example [#1][] (thanks @davecheney) + +## [0.4.0] - 2012-03-30 + +* Go 1 released: build with go tool +* [Feature] Windows support using winfsnotify +* Windows does not have attribute change notifications +* Roll attribute notifications into IsModify + +## [0.3.0] - 2012-02-19 + +* kqueue: add files when watch directory + +## [0.2.0] - 2011-12-30 + +* update to latest Go weekly code + +## [0.1.0] - 2011-10-19 + +* kqueue: add watch on file creation to match inotify +* kqueue: create file event +* inotify: ignore `IN_IGNORED` events +* event String() +* linux: common FileEvent functions +* initial commit + +[#79]: https://github.com/howeyc/fsnotify/pull/79 +[#77]: https://github.com/howeyc/fsnotify/pull/77 +[#72]: https://github.com/howeyc/fsnotify/issues/72 +[#71]: https://github.com/howeyc/fsnotify/issues/71 +[#70]: https://github.com/howeyc/fsnotify/issues/70 +[#63]: https://github.com/howeyc/fsnotify/issues/63 +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#60]: https://github.com/howeyc/fsnotify/issues/60 +[#59]: https://github.com/howeyc/fsnotify/issues/59 +[#49]: https://github.com/howeyc/fsnotify/issues/49 +[#45]: https://github.com/howeyc/fsnotify/issues/45 +[#40]: https://github.com/howeyc/fsnotify/issues/40 +[#36]: https://github.com/howeyc/fsnotify/issues/36 +[#33]: https://github.com/howeyc/fsnotify/issues/33 +[#29]: https://github.com/howeyc/fsnotify/issues/29 +[#25]: https://github.com/howeyc/fsnotify/issues/25 +[#24]: https://github.com/howeyc/fsnotify/issues/24 +[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md new file mode 100644 index 000000000..ea379759d --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -0,0 +1,26 @@ +Thank you for your interest in contributing to fsnotify! We try to review and +merge PRs in a reasonable timeframe, but please be aware that: + +- To avoid "wasted" work, please discus changes on the issue tracker first. You + can just send PRs, but they may end up being rejected for one reason or the + other. + +- fsnotify is a cross-platform library, and changes must work reasonably well on + all supported platforms. + +- Changes will need to be compatible; old code should still compile, and the + runtime behaviour can't change in ways that are likely to lead to problems for + users. + +Testing +------- +Just `go test ./...` runs all the tests; the CI runs this on all supported +platforms. Testing different platforms locally can be done with something like +[goon] or [Vagrant], but this isn't super-easy to set up at the moment. + +Use the `-short` flag to make the "stress test" run faster. + + +[goon]: https://github.com/arp242/goon +[Vagrant]: https://www.vagrantup.com/ +[integration_test.go]: /integration_test.go diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE new file mode 100644 index 000000000..fb03ade75 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -0,0 +1,25 @@ +Copyright © 2012 The Go Authors. All rights reserved. +Copyright © fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of Google Inc. nor the names of its contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md new file mode 100644 index 000000000..d4e6080fe --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -0,0 +1,161 @@ +fsnotify is a Go library to provide cross-platform filesystem notifications on +Windows, Linux, macOS, and BSD systems. + +Go 1.16 or newer is required; the full documentation is at +https://pkg.go.dev/github.com/fsnotify/fsnotify + +**It's best to read the documentation at pkg.go.dev, as it's pinned to the last +released version, whereas this README is for the last development version which +may include additions/changes.** + +--- + +Platform support: + +| Adapter | OS | Status | +| --------------------- | ---------------| -------------------------------------------------------------| +| inotify | Linux 2.6.32+ | Supported | +| kqueue | BSD, macOS | Supported | +| ReadDirectoryChangesW | Windows | Supported | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | +| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | + +Linux and macOS should include Android and iOS, but these are currently untested. + +Usage +----- +A basic example: + +```go +package main + +import ( + "log" + + "github.com/fsnotify/fsnotify" +) + +func main() { + // Create new watcher. + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + // Start listening for events. + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Has(fsnotify.Write) { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + // Add a path. + err = watcher.Add("/tmp") + if err != nil { + log.Fatal(err) + } + + // Block main goroutine forever. + <-make(chan struct{}) +} +``` + +Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be +run with: + + % go run ./cmd/fsnotify + +FAQ +--- +### Will a file still be watched when it's moved to another directory? +No, not unless you are watching the location it was moved to. + +### Are subdirectories watched too? +No, you must add watches for any directory you want to watch (a recursive +watcher is on the roadmap: [#18]). + +[#18]: https://github.com/fsnotify/fsnotify/issues/18 + +### Do I have to watch the Error and Event channels in a goroutine? +As of now, yes (you can read both channels in the same goroutine using `select`, +you don't need a separate goroutine for both channels; see the example). + +### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? +fsnotify requires support from underlying OS to work. The current NFS and SMB +protocols does not provide network level support for file notifications, and +neither do the /proc and /sys virtual filesystems. + +This could be fixed with a polling watcher ([#9]), but it's not yet implemented. + +[#9]: https://github.com/fsnotify/fsnotify/issues/9 + +Platform-specific notes +----------------------- +### Linux +When a file is removed a REMOVE event won't be emitted until all file +descriptors are closed; it will emit a CHMOD instead: + + fp := os.Open("file") + os.Remove("file") // CHMOD + fp.Close() // REMOVE + +This is the event that inotify sends, so not much can be changed about this. + +The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for +the number of watches per user, and `fs.inotify.max_user_instances` specifies +the maximum number of inotify instances per user. Every Watcher you create is an +"instance", and every path you add is a "watch". + +These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and +`/proc/sys/fs/inotify/max_user_instances` + +To increase them you can use `sysctl` or write the value to proc file: + + # The default values on Linux 5.18 + sysctl fs.inotify.max_user_watches=124983 + sysctl fs.inotify.max_user_instances=128 + +To make the changes persist on reboot edit `/etc/sysctl.conf` or +`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your +distro's documentation): + + fs.inotify.max_user_watches=124983 + fs.inotify.max_user_instances=128 + +Reaching the limit will result in a "no space left on device" or "too many open +files" error. + +### kqueue (macOS, all BSD systems) +kqueue requires opening a file descriptor for every file that's being watched; +so if you're watching a directory with five files then that's six file +descriptors. You will run in to your system's "max open files" limit faster on +these platforms. + +The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to +control the maximum number of open files. + +### macOS +Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary +workaround is to add your folder(s) to the *Spotlight Privacy settings* until we +have a native FSEvents implementation (see [#11]). + +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#15]: https://github.com/fsnotify/fsnotify/issues/15 diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go new file mode 100644 index 000000000..1a95ad8e7 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -0,0 +1,162 @@ +//go:build solaris +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go new file mode 100644 index 000000000..54c77fbb0 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -0,0 +1,459 @@ +//go:build linux +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + // Store fd here as os.File.Read() will no longer return on close after + // calling Fd(). See: https://github.com/golang/go/issues/26439 + fd int + mu sync.Mutex // Map access + inotifyFile *os.File + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + // Need to set the FD to nonblocking mode in order for SetDeadline methods to work + // Otherwise, blocking i/o operations won't terminate on close + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) + if fd == -1 { + return nil, errno + } + + w := &Watcher{ + fd: fd, + inotifyFile: os.NewFile(uintptr(fd), ""), + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + } + return false +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + return false + } +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed() { + w.mu.Unlock() + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + w.mu.Unlock() + + // Causes any blocking reads to return with an error, provided the file + // still supports deadline operations. + err := w.inotifyFile.Close() + if err != nil { + return err + } + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case; + // The only two possible errors are: + // + // - EBADF, which happens when w.fd is not a valid file descriptor + // of any kind. + // - EINVAL, which is when fd is not an inotify descriptor or wd + // is not a valid watch descriptor. Watch descriptors are + // invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they + // are watching is deleted. + return errno + } + + return nil +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + defer func() { + close(w.doneResp) + close(w.Errors) + close(w.Events) + }() + + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + errno error // Syscall errno + ) + for { + // See if we have been closed. + if w.isClosed() { + return + } + + n, err := w.inotifyFile.Read(buf[:]) + switch { + case errors.Unwrap(err) == os.ErrClosed: + return + case err != nil: + if !w.sendError(err) { + return + } + continue + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + if !w.sendError(err) { + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + var ( + // Point "raw" to the event in the buffer + raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + mask = uint32(raw.Mask) + nameLen = uint32(raw.Len) + ) + + if mask&unix.IN_Q_OVERFLOW != 0 { + if !w.sendError(ErrEventOverflow) { + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := w.newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if mask&unix.IN_IGNORED == 0 { + if !w.sendEvent(event) { + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go new file mode 100644 index 000000000..29087469b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -0,0 +1,707 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + done chan struct{} + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing. + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Watched file descriptors (key: path). + watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). + userWatches map[string]struct{} // Watches added with Watcher.Add() + dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. + paths map[int]pathInfo // File descriptors to path names for processing kqueue events. + fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + kq, closepipe, err := newKqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + closepipe: closepipe, + watches: make(map[string]int), + watchesByDir: make(map[string]map[int]struct{}), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]struct{}), + userWatches: make(map[string]struct{}), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// newKqueue creates a new kernel event queue and returns a descriptor. +// +// This registers a new event on closepipe, which will trigger an event when +// it's closed. This way we can use kevent() without timeout/polling; without +// the closepipe, it would block forever and we wouldn't be able to stop it at +// all. +func newKqueue() (kq int, closepipe [2]int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, closepipe, err + } + + // Register the close pipe. + err = unix.Pipe(closepipe[:]) + if err != nil { + unix.Close(kq) + return kq, closepipe, err + } + + // Register changes to listen on the closepipe. + changes := make([]unix.Kevent_t, 1) + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ, + unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT) + + ok, err := unix.Kevent(kq, changes, nil, nil) + if ok == -1 { + unix.Close(kq) + unix.Close(closepipe[0]) + unix.Close(closepipe[1]) + return kq, closepipe, err + } + return kq, closepipe, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + } + return false +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + } + return false +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + pathsToRemove := make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() // Unlock before calling Remove, which also locks + for _, name := range pathsToRemove { + w.Remove(name) + } + + // Send "quit" message to the reader goroutine. + unix.Close(w.closepipe[1]) + close(w.done) + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.userWatches[name] = struct{}{} + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + if err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.userWatches, name) + + parentName := filepath.Dir(name) + delete(w.watchesByDir[parentName], watchfd) + + if len(w.watchesByDir[parentName]) == 0 { + delete(w.watchesByDir, parentName) + } + + delete(w.paths, watchfd) + delete(w.dirFlags, name) + delete(w.fileExists, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for fd := range w.watchesByDir[name] { + path := w.paths[fd] + if _, ok := w.userWatches[path.name]; !ok { + pathsToRemove = append(pathsToRemove, path.name) + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.userWatches)) + for pathname := range w.userWatches { + entries = append(entries, pathname) + } + + return entries +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets or named pipes + if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { + return "", nil + } + + // Follow Symlinks + // + // Linux can add unresolvable symlinks to the watch list without issue, + // and Windows can't do symlinks period. To maintain consistency, we + // will act like everything is fine if the link can't be resolved. + // There will simply be no file events for broken symlinks. Hence the + // returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + // Retry on EINTR; open() can return EINTR in practice on macOS. + // See #354, and go issues 11180 and 39237. + for { + watchfd, err = unix.Open(name, openMode, 0) + if err == nil { + break + } + if errors.Is(err, unix.EINTR) { + continue + } + + return "", err + } + + isDir = fi.IsDir() + } + + err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + if err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + parentName := filepath.Dir(name) + w.watches[name] = watchfd + + watchesByDir, ok := w.watchesByDir[parentName] + if !ok { + watchesByDir = make(map[int]struct{}, 1) + w.watchesByDir[parentName] = watchesByDir + } + watchesByDir[watchfd] = struct{}{} + + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + defer func() { + err := unix.Close(w.kq) + if err != nil { + w.Errors <- err + } + unix.Close(w.closepipe[0]) + close(w.Events) + close(w.Errors) + }() + + eventBuffer := make([]unix.Kevent_t, 10) + for closed := false; !closed; { + kevents, err := w.read(eventBuffer) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { + closed = true + } + continue + } + + // Flush the events we received to the Events channel + for _, kevent := range kevents { + var ( + watchfd = int(kevent.Ident) + mask = uint32(kevent.Fflags) + ) + + // Shut down the loop when the pipe is closed, but only after all + // other events have been processed. + if watchfd == w.closepipe[0] { + closed = true + continue + } + + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + + event := w.newEvent(path.name, mask) + + if path.isDir && !event.Has(Remove) { + // Double check to make sure the directory exists. This can + // happen when we do a rm -fr on a recursively watched folders + // and we receive a modification event first but the folder has + // been deleted and later receive the delete event. + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + event.Op |= Remove + } + } + + if event.Has(Rename) || event.Has(Remove) { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Has(Write) && !event.Has(Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + if !w.sendEvent(event) { + closed = true + continue + } + } + + if event.Has(Remove) { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + } + } +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + path := filepath.Join(dirPath, fileInfo.Name()) + + cleanPath, err := w.internalWatch(path, fileInfo) + if err != nil { + // No permission to read the file; that's not a problem: just skip. + // But do add it to w.fileExists to prevent it from being picked up + // as a "new" file later (it still shows up in the directory + // listing). + switch { + case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): + cleanPath = filepath.Clean(path) + default: + return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err) + } + } + + w.mu.Lock() + w.fileExists[cleanPath] = struct{}{} + w.mu.Unlock() + } + + return nil +} + +// Search the directory for new files and send an event for them. +// +// This functionality is to have the BSD watcher match the inotify, which sends +// a create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dir string) { + // Get all files + files, err := ioutil.ReadDir(dir) + if err != nil { + if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) { + return + } + } + + // Search for new files + for _, fi := range files { + err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + if !w.sendEvent(Event{Name: filePath, Op: Create}) { + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = struct{}{} + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// Register events with the queue. +func (w *Watcher) register(fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + for i, fd := range fds { + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // Register the events. + success, err := unix.Kevent(w.kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(w.kq, nil, events, nil) + if err != nil { + return nil, err + } + return events[0:n], nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go new file mode 100644 index 000000000..a9bb1c3c4 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -0,0 +1,66 @@ +//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows + +package fsnotify + +import ( + "fmt" + "runtime" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct{} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go new file mode 100644 index 000000000..ae392867c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -0,0 +1,746 @@ +//go:build windows +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + port windows.Handle // Handle to completion port + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error + + mu sync.Mutex // Protects access to watches, isClosed + watches watchMap // Map of watches (key: i-number) + isClosed bool // Set to true when Close() is first called +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) + if err != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", err) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + + event := w.newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.quit: + } + return false +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + w.mu.Unlock() + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return errors.New("watcher already closed") + } + w.mu.Unlock() + + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for _, entry := range w.watches { + for _, watchEntry := range entry { + entries = append(entries, watchEntry.path) + } + } + + return entries +} + +// These options are from the old golang.org/x/exp/winfsnotify, where you could +// add various options to the watch. This has long since been removed. +// +// The "sys" in the name is misleading as they're not part of any "system". +// +// This should all be removed at some point, and just use windows.FILE_NOTIFY_* +const ( + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + sysFSIGNORED = 0x8000 +) + +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle windows.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov windows.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [65536]byte // 64K buffer +} + +type ( + indexMap map[uint64]*watch + watchMap map[uint32]indexMap +) + +func (w *Watcher) wakeupReader() error { + err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if err != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", err) + } + return nil +} + +func (w *Watcher) getDir(pathname string) (dir string, err error) { + attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) + if err != nil { + return "", os.NewSyscallError("GetFileAttributes", err) + } + if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func (w *Watcher) getIno(path string) (ino *inode, err error) { + h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), + windows.FILE_LIST_DIRECTORY, + windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, + nil, windows.OPEN_EXISTING, + windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0) + if err != nil { + return nil, os.NewSyscallError("CreateFile", err) + } + + var fi windows.ByHandleFileInformation + err = windows.GetFileInformationByHandle(h, &fi) + if err != nil { + windows.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", err) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := w.getDir(pathname) + if err != nil { + return err + } + + ino, err := w.getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0) + if err != nil { + windows.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", err) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + windows.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + + err = w.startRead(watchEntry) + if err != nil { + return err + } + + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := w.getDir(pathname) + if err != nil { + return err + } + ino, err := w.getIno(dir) + if err != nil { + return err + } + + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + + err = windows.CloseHandle(ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + if watch == nil { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + err := windows.CancelIo(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CancelIo", err)) + w.deleteWatch(watch) + } + mask := w.toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= w.toWindowsFlags(m) + } + if mask == 0 { + err := windows.CloseHandle(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + + rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if rdErr != nil { + err := os.NewSyscallError("ReadDirectoryChanges", rdErr) + if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n uint32 + key uintptr + ov *windows.Overlapped + ) + runtime.LockOSThread() + + for { + qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) + // This error is handled after the watch == nil check below. NOTE: this + // seems odd, note sure if it's correct. + + watch := (*watch)(unsafe.Pointer(ov)) + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + + err := windows.CloseHandle(w.port) + if err != nil { + err = os.NewSyscallError("CloseHandle", err) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch qErr { + case windows.ERROR_MORE_DATA: + if watch == nil { + w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case windows.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case windows.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.sendError(errors.New("short read in readEvents()")) + break + } + + // Point "raw" to the event in the buffer + raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + + // Create a buf that is the size of the path name + size := int(raw.FileNameLength / 2) + var buf []uint16 + // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973 + sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) + sh.Len = size + sh.Cap = size + name := windows.UTF16ToString(buf) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case windows.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case windows.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case windows.FILE_ACTION_RENAMED_NEW_NAME: + // Update saved path of all sub-watches. + old := filepath.Join(watch.path, watch.rename) + w.mu.Lock() + for _, watchMap := range w.watches { + for _, ww := range watchMap { + if strings.HasPrefix(ww.path, old) { + ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old)) + } + } + } + w.mu.Unlock() + + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + w.sendEvent(fullname, watch.names[name]&mask) + } + if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == windows.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.sendError(errors.New( + "Windows system assumed buffer larger than it is, events have likely been missed.")) + break + } + } + + if err := w.startRead(watch); err != nil { + w.sendError(err) + } + } +} + +func (w *Watcher) toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSMODIFY != 0 { + m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { + switch action { + case windows.FILE_ACTION_ADDED: + return sysFSCREATE + case windows.FILE_ACTION_REMOVED: + return sysFSDELETE + case windows.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case windows.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go new file mode 100644 index 000000000..30a5bf0f0 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -0,0 +1,81 @@ +//go:build !plan9 +// +build !plan9 + +// Package fsnotify provides a cross-platform interface for file system +// notifications. +package fsnotify + +import ( + "errors" + "fmt" + "strings" +) + +// Event represents a file system notification. +type Event struct { + // Path to the file or directory. + // + // Paths are relative to the input; for example with Add("dir") the Name + // will be set to "dir/file" if you create that file, but if you use + // Add("/path/to/dir") it will be "/path/to/dir/file". + Name string + + // File operation that triggered the event. + // + // This is a bitmask and some systems may send multiple operations at once. + // Use the Event.Has() method instead of comparing with ==. + Op Op +} + +// Op describes a set of file operations. +type Op uint32 + +// The operations fsnotify can trigger; see the documentation on [Watcher] for a +// full description, and check them with [Event.Has]. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +// Common errors that can be reported by a watcher +var ( + ErrNonExistentWatch = errors.New("can't remove non-existent watcher") + ErrEventOverflow = errors.New("fsnotify queue overflow") +) + +func (op Op) String() string { + var b strings.Builder + if op.Has(Create) { + b.WriteString("|CREATE") + } + if op.Has(Remove) { + b.WriteString("|REMOVE") + } + if op.Has(Write) { + b.WriteString("|WRITE") + } + if op.Has(Rename) { + b.WriteString("|RENAME") + } + if op.Has(Chmod) { + b.WriteString("|CHMOD") + } + if b.Len() == 0 { + return "[no events]" + } + return b.String()[1:] +} + +// Has reports if this operation has the given operation. +func (o Op) Has(h Op) bool { return o&h == h } + +// Has reports if this event has the given operation. +func (e Event) Has(op Op) bool { return e.Op.Has(op) } + +// String returns a string representation of the event with their path. +func (e Event) String() string { + return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh new file mode 100644 index 000000000..b09ef7683 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh @@ -0,0 +1,208 @@ +#!/usr/bin/env zsh +[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 +setopt err_exit no_unset pipefail extended_glob + +# Simple script to update the godoc comments on all watchers. Probably took me +# more time to write this than doing it manually, but ah well 🙃 + +watcher=$(</tmp/x + print -r -- $cmt >>/tmp/x + tail -n+$(( end + 1 )) $file >>/tmp/x + mv /tmp/x $file + done +} + +set-cmt '^type Watcher struct ' $watcher +set-cmt '^func NewWatcher(' $new +set-cmt '^func (w \*Watcher) Add(' $add +set-cmt '^func (w \*Watcher) Remove(' $remove +set-cmt '^func (w \*Watcher) Close(' $close +set-cmt '^func (w \*Watcher) WatchList(' $watchlist +set-cmt '^[[:space:]]*Events *chan Event$' $events +set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go new file mode 100644 index 000000000..4322b0b88 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -0,0 +1,8 @@ +//go:build freebsd || openbsd || netbsd || dragonfly +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go new file mode 100644 index 000000000..5da5ffa78 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -0,0 +1,9 @@ +//go:build darwin +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/vendor/modules.txt b/vendor/modules.txt index 61f650f05..6b65c16d2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -108,6 +108,9 @@ github.com/cespare/xxhash/v2 # github.com/dimfeld/httptreemux v5.0.1+incompatible ## explicit github.com/dimfeld/httptreemux +# github.com/fsnotify/fsnotify v1.6.0 +## explicit; go 1.16 +github.com/fsnotify/fsnotify # github.com/gdey/tbltest v0.0.0-20170331191646-af8abc47b052 ## explicit github.com/gdey/tbltest From 4b56ee559ef9b32af7888ec64f836bc6f3939d0b Mon Sep 17 00:00:00 2001 From: Joshua Chamberlain Date: Tue, 5 Sep 2023 11:55:55 -0700 Subject: [PATCH 2/6] Work on passing tests --- config/config_test.go | 1399 ++++++++++++++++++++++------------------- 1 file changed, 736 insertions(+), 663 deletions(-) diff --git a/config/config_test.go b/config/config_test.go index 5bf34ced6..775eb163b 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/go-spatial/tegola/config" + "github.com/go-spatial/tegola/config/source" "github.com/go-spatial/tegola/internal/env" "github.com/go-spatial/tegola/provider" _ "github.com/go-spatial/tegola/provider/debug" @@ -62,7 +63,7 @@ func TestParse(t *testing.T) { r := strings.NewReader(tc.config) - conf, err := config.Parse(r, "") + conf, err := config.Parse(r, "", "") if tc.expectedErr != nil { if err == nil { @@ -192,62 +193,64 @@ func TestParse(t *testing.T) { "type": "file", "basepath": "/tmp/tegola-cache", }, - Providers: []env.Dict{ - { - "name": "provider1", - "type": "postgis", - "host": "localhost", - "port": int64(5432), - "database": "osm_water", - "user": "admin", - "password": "", - "layers": []map[string]interface{}{ - { - "name": "water", - "geometry_fieldname": "geom", - "id_fieldname": "gid", - "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + "type": "postgis", + "host": "localhost", + "port": int64(5432), + "database": "osm_water", + "user": "admin", + "password": "", + "layers": []map[string]interface{}{ + { + "name": "water", + "geometry_fieldname": "geom", + "id_fieldname": "gid", + "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + }, }, }, }, - }, - Maps: []provider.Map{ - { - Name: "osm", - Attribution: "Test Attribution", - Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, - Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, - TileBuffer: env.IntPtr(env.Int(12)), - Layers: []provider.MapLayer{ - { - ProviderLayer: "provider1.water", - MinZoom: env.UintPtr(10), - MaxZoom: env.UintPtr(20), - DontSimplify: true, - DontClip: true, - DontClean: true, - }, - }, - Parameters: []provider.QueryParameter{ - { - Name: "param1", - Token: "!PARAM1!", - SQL: "?", - Type: "string", - }, - { - Name: "param2", - Token: "!PARAM2!", - Type: "int", - SQL: "AND answer = ?", - DefaultValue: "42", + Maps: []provider.Map{ + { + Name: "osm", + Attribution: "Test Attribution", + Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, + Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, + TileBuffer: env.IntPtr(env.Int(12)), + Layers: []provider.MapLayer{ + { + ProviderLayer: "provider1.water", + MinZoom: env.UintPtr(10), + MaxZoom: env.UintPtr(20), + DontSimplify: true, + DontClip: true, + DontClean: true, + }, }, - { - Name: "param3", - Token: "!PARAM3!", - Type: "float", - SQL: "?", - DefaultSQL: "AND pi = 3.1415926", + Parameters: []provider.QueryParameter{ + { + Name: "param1", + Token: "!PARAM1!", + SQL: "?", + Type: "string", + }, + { + Name: "param2", + Token: "!PARAM2!", + Type: "int", + SQL: "AND answer = ?", + DefaultValue: "42", + }, + { + Name: "param3", + Token: "!PARAM3!", + Type: "float", + SQL: "?", + DefaultSQL: "AND pi = 3.1415926", + }, }, }, }, @@ -336,74 +339,76 @@ func TestParse(t *testing.T) { }, }, }, - Providers: []env.Dict{ - { - "name": "provider1", - "type": "postgis", - "host": "localhost", - "port": int64(5432), - "database": "osm_water", - "user": "admin", - "password": "", - "layers": []map[string]interface{}{ - { - "name": "water_0_5", - "geometry_fieldname": "geom", - "id_fieldname": "gid", - "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", - }, - { - "name": "water_6_10", - "geometry_fieldname": "geom", - "id_fieldname": "gid", - "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + "type": "postgis", + "host": "localhost", + "port": int64(5432), + "database": "osm_water", + "user": "admin", + "password": "", + "layers": []map[string]interface{}{ + { + "name": "water_0_5", + "geometry_fieldname": "geom", + "id_fieldname": "gid", + "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + }, + { + "name": "water_6_10", + "geometry_fieldname": "geom", + "id_fieldname": "gid", + "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + }, }, }, }, - }, - Maps: []provider.Map{ - { - Name: "osm", - Attribution: "Test Attribution", - Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, - Center: [3]env.Float{ENV_TEST_CENTER_X, ENV_TEST_CENTER_Y, ENV_TEST_CENTER_Z}, - TileBuffer: env.IntPtr(env.Int(64)), - Layers: []provider.MapLayer{ - { - Name: "water", - ProviderLayer: ENV_TEST_PROVIDER_LAYER, - MinZoom: nil, - MaxZoom: nil, - }, - { - Name: "water", - ProviderLayer: "provider1.water_6_10", - MinZoom: env.UintPtr(6), - MaxZoom: env.UintPtr(10), + Maps: []provider.Map{ + { + Name: "osm", + Attribution: "Test Attribution", + Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, + Center: [3]env.Float{ENV_TEST_CENTER_X, ENV_TEST_CENTER_Y, ENV_TEST_CENTER_Z}, + TileBuffer: env.IntPtr(env.Int(64)), + Layers: []provider.MapLayer{ + { + Name: "water", + ProviderLayer: ENV_TEST_PROVIDER_LAYER, + MinZoom: nil, + MaxZoom: nil, + }, + { + Name: "water", + ProviderLayer: "provider1.water_6_10", + MinZoom: env.UintPtr(6), + MaxZoom: env.UintPtr(10), + }, }, }, - }, - { - Name: "osm_2", - Attribution: "Test Attribution", - Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, - Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, - TileBuffer: env.IntPtr(env.Int(64)), - Layers: []provider.MapLayer{ - { - Name: "water", - ProviderLayer: "provider1.water_0_5", - MinZoom: env.UintPtr(0), - MaxZoom: env.UintPtr(5), - DefaultTags: env.Dict{ - "provider": ENV_TEST_MAP_LAYER_DEFAULT_TAG, + { + Name: "osm_2", + Attribution: "Test Attribution", + Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, + Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, + TileBuffer: env.IntPtr(env.Int(64)), + Layers: []provider.MapLayer{ + { + Name: "water", + ProviderLayer: "provider1.water_0_5", + MinZoom: env.UintPtr(0), + MaxZoom: env.UintPtr(5), + DefaultTags: env.Dict{ + "provider": ENV_TEST_MAP_LAYER_DEFAULT_TAG, + }, + }, + { + Name: "water", + ProviderLayer: "provider1.water_6_10", + MinZoom: env.UintPtr(6), + MaxZoom: env.UintPtr(10), }, - }, - { - Name: "water", - ProviderLayer: "provider1.water_6_10", - MinZoom: env.UintPtr(6), - MaxZoom: env.UintPtr(10), }, }, }, @@ -524,36 +529,38 @@ func TestValidateMutateZoom(t *testing.T) { Webserver: config.Webserver{ Port: ":8080", }, - Providers: []env.Dict{ - { - "name": "provider1", - "type": "postgis", - "host": "localhost", - "port": int64(5432), - "database": "osm_water", - "user": "admin", - "password": "", - "layers": []map[string]interface{}{ - { - "name": "water", - "geometry_fieldname": "geom", - "id_fieldname": "gid", - "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + "type": "postgis", + "host": "localhost", + "port": int64(5432), + "database": "osm_water", + "user": "admin", + "password": "", + "layers": []map[string]interface{}{ + { + "name": "water", + "geometry_fieldname": "geom", + "id_fieldname": "gid", + "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + }, }, }, }, - }, - Maps: []provider.Map{ - { - Name: "osm", - Attribution: "Test Attribution", - Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, - Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, - Layers: []provider.MapLayer{ - { - ProviderLayer: "provider1.water", - MinZoom: nil, - MaxZoom: nil, + Maps: []provider.Map{ + { + Name: "osm", + Attribution: "Test Attribution", + Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, + Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, + Layers: []provider.MapLayer{ + { + ProviderLayer: "provider1.water", + MinZoom: nil, + MaxZoom: nil, + }, }, }, }, @@ -568,36 +575,38 @@ func TestValidateMutateZoom(t *testing.T) { Webserver: config.Webserver{ Port: ":8080", }, - Providers: []env.Dict{ - { - "name": "provider1", - "type": "postgis", - "host": "localhost", - "port": int64(5432), - "database": "osm_water", - "user": "admin", - "password": "", - "layers": []map[string]interface{}{ - { - "name": "water", - "geometry_fieldname": "geom", - "id_fieldname": "gid", - "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + "type": "postgis", + "host": "localhost", + "port": int64(5432), + "database": "osm_water", + "user": "admin", + "password": "", + "layers": []map[string]interface{}{ + { + "name": "water", + "geometry_fieldname": "geom", + "id_fieldname": "gid", + "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + }, }, }, }, - }, - Maps: []provider.Map{ - { - Name: "osm", - Attribution: "Test Attribution", - Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, - Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, - Layers: []provider.MapLayer{ - { - ProviderLayer: "provider1.water", - MinZoom: env.UintPtr(0), - MaxZoom: env.UintPtr(0), + Maps: []provider.Map{ + { + Name: "osm", + Attribution: "Test Attribution", + Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, + Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, + Layers: []provider.MapLayer{ + { + ProviderLayer: "provider1.water", + MinZoom: env.UintPtr(0), + MaxZoom: env.UintPtr(0), + }, }, }, }, @@ -637,58 +646,60 @@ func TestValidate(t *testing.T) { Webserver: config.Webserver{ Port: ":8080", }, - Providers: []env.Dict{ - { - "name": "provider1", - "type": "postgis", - "host": "localhost", - "port": int64(5432), - "database": "osm_water", - "user": "admin", - "password": "", - "layers": []map[string]interface{}{ - { - "name": "water", - "geometry_fieldname": "geom", - "id_fieldname": "gid", - "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + "type": "postgis", + "host": "localhost", + "port": int64(5432), + "database": "osm_water", + "user": "admin", + "password": "", + "layers": []map[string]interface{}{ + { + "name": "water", + "geometry_fieldname": "geom", + "id_fieldname": "gid", + "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + }, }, }, - }, - { - "name": "provider2", - "type": "postgis", - "host": "localhost", - "port": int64(5432), - "database": "osm_water", - "user": "admin", - "password": "", - "layers": []map[string]interface{}{ - { - "name": "water", - "geometry_fieldname": "geom", - "id_fieldname": "gid", - "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + { + "name": "provider2", + "type": "postgis", + "host": "localhost", + "port": int64(5432), + "database": "osm_water", + "user": "admin", + "password": "", + "layers": []map[string]interface{}{ + { + "name": "water", + "geometry_fieldname": "geom", + "id_fieldname": "gid", + "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + }, }, }, }, - }, - Maps: []provider.Map{ - { - Name: "osm", - Attribution: "Test Attribution", - Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, - Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, - Layers: []provider.MapLayer{ - { - ProviderLayer: "provider1.water", - MinZoom: env.UintPtr(10), - MaxZoom: env.UintPtr(20), - }, - { - ProviderLayer: "provider2.water", - MinZoom: env.UintPtr(10), - MaxZoom: env.UintPtr(20), + Maps: []provider.Map{ + { + Name: "osm", + Attribution: "Test Attribution", + Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, + Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, + Layers: []provider.MapLayer{ + { + ProviderLayer: "provider1.water", + MinZoom: env.UintPtr(10), + MaxZoom: env.UintPtr(20), + }, + { + ProviderLayer: "provider2.water", + MinZoom: env.UintPtr(10), + MaxZoom: env.UintPtr(20), + }, }, }, }, @@ -701,60 +712,62 @@ func TestValidate(t *testing.T) { }, "2": { config: config.Config{ - Providers: []env.Dict{ - { - "name": "provider1", - "type": "postgis", - "host": "localhost", - "port": int64(5432), - "database": "osm_water", - "user": "admin", - "password": "", - "layers": []map[string]interface{}{ - { - "name": "water_0_5", - "geometry_fieldname": "geom", - "id_fieldname": "gid", - "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + "type": "postgis", + "host": "localhost", + "port": int64(5432), + "database": "osm_water", + "user": "admin", + "password": "", + "layers": []map[string]interface{}{ + { + "name": "water_0_5", + "geometry_fieldname": "geom", + "id_fieldname": "gid", + "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + }, }, }, - }, - { - "name": "provider2", - "type": "postgis", - "host": "localhost", - "port": int64(5432), - "database": "osm_water", - "user": "admin", - "password": "", - "layers": []map[string]interface{}{ - { - "name": "water_5_10", - "geometry_fieldname": "geom", - "id_fieldname": "gid", - "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + { + "name": "provider2", + "type": "postgis", + "host": "localhost", + "port": int64(5432), + "database": "osm_water", + "user": "admin", + "password": "", + "layers": []map[string]interface{}{ + { + "name": "water_5_10", + "geometry_fieldname": "geom", + "id_fieldname": "gid", + "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + }, }, }, }, - }, - Maps: []provider.Map{ - { - Name: "osm", - Attribution: "Test Attribution", - Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, - Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, - Layers: []provider.MapLayer{ - { - Name: "water", - ProviderLayer: "provider1.water_0_5", - MinZoom: env.UintPtr(0), - MaxZoom: env.UintPtr(5), - }, - { - Name: "water", - ProviderLayer: "provider2.water_5_10", - MinZoom: env.UintPtr(5), - MaxZoom: env.UintPtr(10), + Maps: []provider.Map{ + { + Name: "osm", + Attribution: "Test Attribution", + Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, + Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, + Layers: []provider.MapLayer{ + { + Name: "water", + ProviderLayer: "provider1.water_0_5", + MinZoom: env.UintPtr(0), + MaxZoom: env.UintPtr(5), + }, + { + Name: "water", + ProviderLayer: "provider2.water_5_10", + MinZoom: env.UintPtr(5), + MaxZoom: env.UintPtr(10), + }, }, }, }, @@ -771,76 +784,78 @@ func TestValidate(t *testing.T) { Webserver: config.Webserver{ Port: ":8080", }, - Providers: []env.Dict{ - { - "name": "provider1", - "type": "postgis", - "host": "localhost", - "port": int64(5432), - "database": "osm_water", - "user": "admin", - "password": "", - "layers": []map[string]interface{}{ - { - "name": "water", - "geometry_fieldname": "geom", - "id_fieldname": "gid", - "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + "type": "postgis", + "host": "localhost", + "port": int64(5432), + "database": "osm_water", + "user": "admin", + "password": "", + "layers": []map[string]interface{}{ + { + "name": "water", + "geometry_fieldname": "geom", + "id_fieldname": "gid", + "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + }, }, }, - }, - { - "name": "provider2", - "type": "postgis", - "host": "localhost", - "port": int64(5432), - "database": "osm_water", - "user": "admin", - "password": "", - "layers": []map[string]interface{}{ - { - "name": "water", - "geometry_fieldname": "geom", - "id_fieldname": "gid", - "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + { + "name": "provider2", + "type": "postgis", + "host": "localhost", + "port": int64(5432), + "database": "osm_water", + "user": "admin", + "password": "", + "layers": []map[string]interface{}{ + { + "name": "water", + "geometry_fieldname": "geom", + "id_fieldname": "gid", + "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + }, }, }, }, - }, - Maps: []provider.Map{ - { - Name: "osm", - Attribution: "Test Attribution", - Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, - Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, - Layers: []provider.MapLayer{ - { - ProviderLayer: "provider1.water", - MinZoom: env.UintPtr(10), - MaxZoom: env.UintPtr(15), - }, - { - ProviderLayer: "provider2.water", - MinZoom: env.UintPtr(16), - MaxZoom: env.UintPtr(20), + Maps: []provider.Map{ + { + Name: "osm", + Attribution: "Test Attribution", + Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, + Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, + Layers: []provider.MapLayer{ + { + ProviderLayer: "provider1.water", + MinZoom: env.UintPtr(10), + MaxZoom: env.UintPtr(15), + }, + { + ProviderLayer: "provider2.water", + MinZoom: env.UintPtr(16), + MaxZoom: env.UintPtr(20), + }, }, }, - }, - { - Name: "osm_2", - Attribution: "Test Attribution", - Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, - Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, - Layers: []provider.MapLayer{ - { - ProviderLayer: "provider1.water", - MinZoom: env.UintPtr(10), - MaxZoom: env.UintPtr(15), - }, - { - ProviderLayer: "provider2.water", - MinZoom: env.UintPtr(16), - MaxZoom: env.UintPtr(20), + { + Name: "osm_2", + Attribution: "Test Attribution", + Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, + Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, + Layers: []provider.MapLayer{ + { + ProviderLayer: "provider1.water", + MinZoom: env.UintPtr(10), + MaxZoom: env.UintPtr(15), + }, + { + ProviderLayer: "provider2.water", + MinZoom: env.UintPtr(16), + MaxZoom: env.UintPtr(20), + }, }, }, }, @@ -854,62 +869,64 @@ func TestValidate(t *testing.T) { Webserver: config.Webserver{ Port: ":8080", }, - Providers: []env.Dict{ - { - "name": "provider1", - "type": "postgis", - "host": "localhost", - "port": int64(5432), - "database": "osm_water", - "user": "admin", - "password": "", - "layers": []map[string]interface{}{ - { - "name": "water", - "geometry_fieldname": "geom", - "id_fieldname": "gid", - "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + "type": "postgis", + "host": "localhost", + "port": int64(5432), + "database": "osm_water", + "user": "admin", + "password": "", + "layers": []map[string]interface{}{ + { + "name": "water", + "geometry_fieldname": "geom", + "id_fieldname": "gid", + "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + }, }, }, - }, - { - "name": "provider2", - "type": "postgis", - "host": "localhost", - "port": int64(5432), - "database": "osm_water", - "user": "admin", - "password": "", - "layers": []map[string]interface{}{ - { - "name": "water", - "geometry_fieldname": "geom", - "id_fieldname": "gid", - "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + { + "name": "provider2", + "type": "postgis", + "host": "localhost", + "port": int64(5432), + "database": "osm_water", + "user": "admin", + "password": "", + "layers": []map[string]interface{}{ + { + "name": "water", + "geometry_fieldname": "geom", + "id_fieldname": "gid", + "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + }, }, }, }, - }, - Maps: []provider.Map{ - { - Name: "osm", - Attribution: "Test Attribution", - Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, - Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, - Layers: []provider.MapLayer{ - { - ProviderLayer: "provider1.water", + Maps: []provider.Map{ + { + Name: "osm", + Attribution: "Test Attribution", + Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, + Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, + Layers: []provider.MapLayer{ + { + ProviderLayer: "provider1.water", + }, }, }, - }, - { - Name: "osm_2", - Attribution: "Test Attribution", - Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, - Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, - Layers: []provider.MapLayer{ - { - ProviderLayer: "provider2.water", + { + Name: "osm_2", + Attribution: "Test Attribution", + Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, + Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, + Layers: []provider.MapLayer{ + { + ProviderLayer: "provider2.water", + }, }, }, }, @@ -923,54 +940,56 @@ func TestValidate(t *testing.T) { Webserver: config.Webserver{ Port: ":8080", }, - Providers: []env.Dict{ - { - "name": "provider1", - "type": "postgis", - "host": "localhost", - "port": int64(5432), - "database": "osm_water", - "user": "admin", - "password": "", - "layers": []map[string]interface{}{ - { - "name": "water", - "geometry_fieldname": "geom", - "id_fieldname": "gid", - "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + "type": "postgis", + "host": "localhost", + "port": int64(5432), + "database": "osm_water", + "user": "admin", + "password": "", + "layers": []map[string]interface{}{ + { + "name": "water", + "geometry_fieldname": "geom", + "id_fieldname": "gid", + "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + }, }, }, - }, - { - "name": "provider2", - "type": "postgis", - "host": "localhost", - "port": int64(5432), - "database": "osm_water", - "user": "admin", - "password": "", - "layers": []map[string]interface{}{ - { - "name": "water", - "geometry_fieldname": "geom", - "id_fieldname": "gid", - "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + { + "name": "provider2", + "type": "postgis", + "host": "localhost", + "port": int64(5432), + "database": "osm_water", + "user": "admin", + "password": "", + "layers": []map[string]interface{}{ + { + "name": "water", + "geometry_fieldname": "geom", + "id_fieldname": "gid", + "sql": "SELECT gid, ST_AsBinary(geom) AS geom FROM simplified_water_polygons WHERE geom && !BBOX!", + }, }, }, }, - }, - Maps: []provider.Map{ - { - Name: "osm", - Attribution: "Test Attribution", - Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, - Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, - Layers: []provider.MapLayer{ - { - ProviderLayer: "provider1.water_default_z", - }, - { - ProviderLayer: "provider2.water_default_z", + Maps: []provider.Map{ + { + Name: "osm", + Attribution: "Test Attribution", + Bounds: []env.Float{-180, -85.05112877980659, 180, 85.0511287798066}, + Center: [3]env.Float{-76.275329586789, 39.153492567373, 8.0}, + Layers: []provider.MapLayer{ + { + ProviderLayer: "provider1.water_default_z", + }, + { + ProviderLayer: "provider2.water_default_z", + }, }, }, }, @@ -998,10 +1017,12 @@ func TestValidate(t *testing.T) { "7 non-existant provider type": { expectedErr: config.ErrUnknownProviderType{Type: "nonexistant", Name: "provider1", KnownProviders: []string{"..."}}, config: config.Config{ - Providers: []env.Dict{ - { - "name": "provider1", - "type": "nonexistant", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + "type": "nonexistant", + }, }, }, }, @@ -1009,9 +1030,11 @@ func TestValidate(t *testing.T) { "8 missing name field": { expectedErr: config.ErrProviderNameRequired{Pos: 0}, config: config.Config{ - Providers: []env.Dict{ - { - "type": "test", + App: source.App{ + Providers: []env.Dict{ + { + "type": "test", + }, }, }, }, @@ -1019,14 +1042,16 @@ func TestValidate(t *testing.T) { "8 duplicate name field": { expectedErr: config.ErrProviderNameDuplicate{Pos: 1}, config: config.Config{ - Providers: []env.Dict{ - { - "name": "provider1", - "type": "test", - }, - { - "name": "provider1", - "type": "test", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + "type": "test", + }, + { + "name": "provider1", + "type": "test", + }, }, }, }, @@ -1034,13 +1059,15 @@ func TestValidate(t *testing.T) { "8 missing name field at pos 1": { expectedErr: config.ErrProviderNameRequired{Pos: 1}, config: config.Config{ - Providers: []env.Dict{ - { - "name": "provider1", - "type": "test", - }, - { - "type": "test", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + "type": "test", + }, + { + "type": "test", + }, }, }, }, @@ -1048,9 +1075,11 @@ func TestValidate(t *testing.T) { "9 missing type field": { expectedErr: config.ErrProviderTypeRequired{Pos: 0}, config: config.Config{ - Providers: []env.Dict{ - { - "name": "provider1", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + }, }, }, }, @@ -1058,32 +1087,36 @@ func TestValidate(t *testing.T) { "9 missing type field at pos 1": { expectedErr: config.ErrProviderTypeRequired{Pos: 1}, config: config.Config{ - Providers: []env.Dict{ - { - "name": "provider1", - "type": "test", - }, - { - "name": "provider2", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + "type": "test", + }, + { + "name": "provider2", + }, }, }, }, }, "10 happy 1 mvt provider only 1 layer": { config: config.Config{ - Providers: []env.Dict{ - { - "name": "provider1", - "type": "mvt_test", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + "type": "mvt_test", + }, }, - }, - Maps: []provider.Map{ - { - Name: "happy", - Attribution: "Test Attribution", - Layers: []provider.MapLayer{ - { - ProviderLayer: "provider1.water_default_z", + Maps: []provider.Map{ + { + Name: "happy", + Attribution: "Test Attribution", + Layers: []provider.MapLayer{ + { + ProviderLayer: "provider1.water_default_z", + }, }, }, }, @@ -1092,22 +1125,24 @@ func TestValidate(t *testing.T) { }, "10 happy 1 mvt provider only 2 layer": { config: config.Config{ - Providers: []env.Dict{ - { - "name": "provider1", - "type": "mvt_test", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + "type": "mvt_test", + }, }, - }, - Maps: []provider.Map{ - { - Name: "happy", - Attribution: "Test Attribution", - Layers: []provider.MapLayer{ - { - ProviderLayer: "provider1.water_default_z", - }, - { - ProviderLayer: "provider1.land_default_z", + Maps: []provider.Map{ + { + Name: "happy", + Attribution: "Test Attribution", + Layers: []provider.MapLayer{ + { + ProviderLayer: "provider1.water_default_z", + }, + { + ProviderLayer: "provider1.land_default_z", + }, }, }, }, @@ -1116,26 +1151,28 @@ func TestValidate(t *testing.T) { }, "10 happy 1 mvt, 1 std provider only 1 layer": { config: config.Config{ - Providers: []env.Dict{ - { - "name": "provider1", - "type": "mvt_test", - }, - { - "name": "provider2", - "type": "test", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + "type": "mvt_test", + }, + { + "name": "provider2", + "type": "test", + }, }, - }, - Maps: []provider.Map{ - { - Name: "happy", - Attribution: "Test Attribution", - Layers: []provider.MapLayer{ - { - ProviderLayer: "provider1.water_default_z", - }, - { - ProviderLayer: "provider1.land_default_z", + Maps: []provider.Map{ + { + Name: "happy", + Attribution: "Test Attribution", + Layers: []provider.MapLayer{ + { + ProviderLayer: "provider1.water_default_z", + }, + { + ProviderLayer: "provider1.land_default_z", + }, }, }, }, @@ -1148,19 +1185,21 @@ func TestValidate(t *testing.T) { ProviderName: "bad", }, config: config.Config{ - Providers: []env.Dict{ - { - "name": "provider1", - "type": "mvt_test", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + "type": "mvt_test", + }, }, - }, - Maps: []provider.Map{ - { - Name: "happy", - Attribution: "Test Attribution", - Layers: []provider.MapLayer{ - { - ProviderLayer: "bad.water_default_z", + Maps: []provider.Map{ + { + Name: "happy", + Attribution: "Test Attribution", + Layers: []provider.MapLayer{ + { + ProviderLayer: "bad.water_default_z", + }, }, }, }, @@ -1173,26 +1212,28 @@ func TestValidate(t *testing.T) { Current: "stdprovider1", }, config: config.Config{ - Providers: []env.Dict{ - { - "name": "provider1", - "type": "mvt_test", - }, - { - "name": "stdprovider1", - "type": "test", + App: source.App{ + Providers: []env.Dict{ + { + "name": "provider1", + "type": "mvt_test", + }, + { + "name": "stdprovider1", + "type": "test", + }, }, - }, - Maps: []provider.Map{ - { - Name: "comingle", - Attribution: "Test Attribution", - Layers: []provider.MapLayer{ - { - ProviderLayer: "provider1.water_default_z", - }, - { - ProviderLayer: "stdprovider1.water_default_z", + Maps: []provider.Map{ + { + Name: "comingle", + Attribution: "Test Attribution", + Layers: []provider.MapLayer{ + { + ProviderLayer: "provider1.water_default_z", + }, + { + ProviderLayer: "stdprovider1.water_default_z", + }, }, }, }, @@ -1205,26 +1246,28 @@ func TestValidate(t *testing.T) { Current: "provider1", }, config: config.Config{ - Providers: []env.Dict{ - { - "name": "stdprovider1", - "type": "test", - }, - { - "name": "provider1", - "type": "mvt_test", + App: source.App{ + Providers: []env.Dict{ + { + "name": "stdprovider1", + "type": "test", + }, + { + "name": "provider1", + "type": "mvt_test", + }, }, - }, - Maps: []provider.Map{ - { - Name: "comingle", - Attribution: "Test Attribution", - Layers: []provider.MapLayer{ - { - ProviderLayer: "stdprovider1.water_default_z", - }, - { - ProviderLayer: "provider1.water_default_z", + Maps: []provider.Map{ + { + Name: "comingle", + Attribution: "Test Attribution", + Layers: []provider.MapLayer{ + { + ProviderLayer: "stdprovider1.water_default_z", + }, + { + ProviderLayer: "provider1.water_default_z", + }, }, }, }, @@ -1233,14 +1276,16 @@ func TestValidate(t *testing.T) { }, "13 reserved token name": { config: config.Config{ - Maps: []provider.Map{ - { - Name: "bad_param", - Parameters: []provider.QueryParameter{ - { - Name: "param", - Token: "!BBOX!", - Type: "int", + App: source.App{ + Maps: []provider.Map{ + { + Name: "bad_param", + Parameters: []provider.QueryParameter{ + { + Name: "param", + Token: "!BBOX!", + Type: "int", + }, }, }, }, @@ -1257,19 +1302,21 @@ func TestValidate(t *testing.T) { }, "13 duplicate parameter name": { config: config.Config{ - Maps: []provider.Map{ - { - Name: "dupe_param_name", - Parameters: []provider.QueryParameter{ - { - Name: "param", - Token: "!PARAM!", - Type: "int", - }, - { - Name: "param", - Token: "!PARAM2!", - Type: "int", + App: source.App{ + Maps: []provider.Map{ + { + Name: "dupe_param_name", + Parameters: []provider.QueryParameter{ + { + Name: "param", + Token: "!PARAM!", + Type: "int", + }, + { + Name: "param", + Token: "!PARAM2!", + Type: "int", + }, }, }, }, @@ -1286,19 +1333,21 @@ func TestValidate(t *testing.T) { }, "13 duplicate token name": { config: config.Config{ - Maps: []provider.Map{ - { - Name: "dupe_param_token", - Parameters: []provider.QueryParameter{ - { - Name: "param", - Token: "!PARAM!", - Type: "int", - }, - { - Name: "param2", - Token: "!PARAM!", - Type: "int", + App: source.App{ + Maps: []provider.Map{ + { + Name: "dupe_param_token", + Parameters: []provider.QueryParameter{ + { + Name: "param", + Token: "!PARAM!", + Type: "int", + }, + { + Name: "param2", + Token: "!PARAM!", + Type: "int", + }, }, }, }, @@ -1315,14 +1364,16 @@ func TestValidate(t *testing.T) { }, "13 parameter unknown type": { config: config.Config{ - Maps: []provider.Map{ - { - Name: "unknown_param_type", - Parameters: []provider.QueryParameter{ - { - Name: "param", - Token: "!BBOX!", - Type: "foo", + App: source.App{ + Maps: []provider.Map{ + { + Name: "unknown_param_type", + Parameters: []provider.QueryParameter{ + { + Name: "param", + Token: "!BBOX!", + Type: "foo", + }, }, }, }, @@ -1339,16 +1390,18 @@ func TestValidate(t *testing.T) { }, "13 parameter two defaults": { config: config.Config{ - Maps: []provider.Map{ - { - Name: "unknown_two_defaults", - Parameters: []provider.QueryParameter{ - { - Name: "param", - Token: "!BBOX!", - Type: "string", - DefaultSQL: "foo", - DefaultValue: "bar", + App: source.App{ + Maps: []provider.Map{ + { + Name: "unknown_two_defaults", + Parameters: []provider.QueryParameter{ + { + Name: "param", + Token: "!BBOX!", + Type: "string", + DefaultSQL: "foo", + DefaultValue: "bar", + }, }, }, }, @@ -1367,16 +1420,18 @@ func TestValidate(t *testing.T) { }, "13 parameter invalid default": { config: config.Config{ - Maps: []provider.Map{ - { - Name: "parameter_invalid_default", - - Parameters: []provider.QueryParameter{ - { - Name: "param", - Token: "!BBOX!", - Type: "int", - DefaultValue: "foo", + App: source.App{ + Maps: []provider.Map{ + { + Name: "parameter_invalid_default", + + Parameters: []provider.QueryParameter{ + { + Name: "param", + Token: "!BBOX!", + Type: "int", + DefaultValue: "foo", + }, }, }, }, @@ -1394,14 +1449,16 @@ func TestValidate(t *testing.T) { }, "13 invalid token name": { config: config.Config{ - Maps: []provider.Map{ - { - Name: "parameter_invalid_token", - Parameters: []provider.QueryParameter{ - { - Name: "param", - Token: "!Token with spaces!", - Type: "int", + App: source.App{ + Maps: []provider.Map{ + { + Name: "parameter_invalid_token", + Parameters: []provider.QueryParameter{ + { + Name: "param", + Token: "!Token with spaces!", + Type: "int", + }, }, }, }, @@ -1445,17 +1502,21 @@ func TestConfigureTileBuffers(t *testing.T) { tests := map[string]tcase{ "1 tilebuffer is not set": { config: config.Config{ - Maps: []provider.Map{ - { - Name: "osm", + App: source.App{ + Maps: []provider.Map{ + { + Name: "osm", + }, }, }, }, expected: config.Config{ - Maps: []provider.Map{ - { - Name: "osm", - TileBuffer: env.IntPtr(env.Int(64)), + App: source.App{ + Maps: []provider.Map{ + { + Name: "osm", + TileBuffer: env.IntPtr(env.Int(64)), + }, }, }, }, @@ -1463,51 +1524,59 @@ func TestConfigureTileBuffers(t *testing.T) { "2 tilebuffer is set in global section": { config: config.Config{ TileBuffer: env.IntPtr(env.Int(32)), - Maps: []provider.Map{ - { - Name: "osm", - }, - { - Name: "osm-2", + App: source.App{ + Maps: []provider.Map{ + { + Name: "osm", + }, + { + Name: "osm-2", + }, }, }, }, expected: config.Config{ TileBuffer: env.IntPtr(env.Int(32)), - Maps: []provider.Map{ - { - Name: "osm", - TileBuffer: env.IntPtr(env.Int(32)), - }, - { - Name: "osm-2", - TileBuffer: env.IntPtr(env.Int(32)), + App: source.App{ + Maps: []provider.Map{ + { + Name: "osm", + TileBuffer: env.IntPtr(env.Int(32)), + }, + { + Name: "osm-2", + TileBuffer: env.IntPtr(env.Int(32)), + }, }, }, }, }, "3 tilebuffer is set in map section": { config: config.Config{ - Maps: []provider.Map{ - { - Name: "osm", - TileBuffer: env.IntPtr(env.Int(16)), - }, - { - Name: "osm-2", - TileBuffer: env.IntPtr(env.Int(32)), + App: source.App{ + Maps: []provider.Map{ + { + Name: "osm", + TileBuffer: env.IntPtr(env.Int(16)), + }, + { + Name: "osm-2", + TileBuffer: env.IntPtr(env.Int(32)), + }, }, }, }, expected: config.Config{ - Maps: []provider.Map{ - { - Name: "osm", - TileBuffer: env.IntPtr(env.Int(16)), - }, - { - Name: "osm-2", - TileBuffer: env.IntPtr(env.Int(32)), + App: source.App{ + Maps: []provider.Map{ + { + Name: "osm", + TileBuffer: env.IntPtr(env.Int(16)), + }, + { + Name: "osm-2", + TileBuffer: env.IntPtr(env.Int(32)), + }, }, }, }, @@ -1515,19 +1584,23 @@ func TestConfigureTileBuffers(t *testing.T) { "4 tilebuffer is set in global and map sections": { config: config.Config{ TileBuffer: env.IntPtr(env.Int(32)), - Maps: []provider.Map{ - { - Name: "osm", - TileBuffer: env.IntPtr(env.Int(16)), + App: source.App{ + Maps: []provider.Map{ + { + Name: "osm", + TileBuffer: env.IntPtr(env.Int(16)), + }, }, }, }, expected: config.Config{ TileBuffer: env.IntPtr(env.Int(32)), - Maps: []provider.Map{ - { - Name: "osm", - TileBuffer: env.IntPtr(env.Int(16)), + App: source.App{ + Maps: []provider.Map{ + { + Name: "osm", + TileBuffer: env.IntPtr(env.Int(16)), + }, }, }, }, From 971a99fefddb3abd337d95b7f78ff72818b1c10d Mon Sep 17 00:00:00 2001 From: Joshua Chamberlain Date: Tue, 12 Sep 2023 16:02:30 -0700 Subject: [PATCH 3/6] Address initial review comments --- atlas/atlas.go | 7 ++- cmd/tegola/cmd/root.go | 131 +++++++++++++++++++++-------------------- 2 files changed, 73 insertions(+), 65 deletions(-) diff --git a/atlas/atlas.go b/atlas/atlas.go index 714d03fa9..df885f95a 100644 --- a/atlas/atlas.go +++ b/atlas/atlas.go @@ -232,8 +232,13 @@ func (a *Atlas) AddMaps(maps []Map) error { a.Lock() defer a.Unlock() + // If Atlas is empty, add all the maps, no checks required. if a.maps == nil { - a.maps = map[string]Map{} + a.maps = make(map[string]Map, len(maps)) + for _, m := range maps { + a.maps[m.Name] = m + } + return nil } // Check all the names for conflicts before we add any map, so that we can add all or none. diff --git a/cmd/tegola/cmd/root.go b/cmd/tegola/cmd/root.go index 83d9f09db..142c6e771 100644 --- a/cmd/tegola/cmd/root.go +++ b/cmd/tegola/cmd/root.go @@ -130,7 +130,8 @@ func initConfig(configFile string, cacheRequired bool, logLevel string, logger s } // Setup the app config source. - if err = initAppConfigSource(conf); err != nil { + ctx := context.Background() + if err = initAppConfigSource(ctx, conf); err != nil { return err } @@ -183,7 +184,7 @@ func initMaps(maps []provider.Map, providers map[string]provider.TilerUnion) err } // initAppConfigSource sets up an additional configuration source for "apps" (groups of providers and maps) to be loaded and unloaded on-the-fly. -func initAppConfigSource(conf config.Config) error { +func initAppConfigSource(ctx context.Context, conf config.Config) error { // Get the config source type. If none, return. val, err := conf.AppConfigSource.String("type", nil) if err != nil || val == "" { @@ -191,7 +192,6 @@ func initAppConfigSource(conf config.Config) error { } // Initialize the source. - ctx := context.Background() // Not doing anything with context now, but could use it for stopping this goroutine. src, err := source.InitSource(val, conf.AppConfigSource, conf.BaseDir) if err != nil { return err @@ -203,70 +203,73 @@ func initAppConfigSource(conf config.Config) error { return err } - go func() { - // Keep a record of what we've loaded so that we can unload when needed. - apps := make(map[string]source.App) - - for { - select { - case app, ok := <-watcher.Updates: - if !ok { - return - } - - // Check for validity first. - if err := config.ValidateApp(&app); err != nil { - log.Errorf("Failed validating app %s. %s", app.Key, err) - continue - } - - // If the new app is named the same as an existing app, first unload the existing one. - if old, exists := apps[app.Key]; exists { - log.Infof("Unloading app %s...", old.Key) - // We need only unload maps, since the providers don't live outside of maps. - register.UnloadMaps(nil, getMapNames(old)) - delete(apps, app.Key) - } - - log.Infof("Loading app %s...", app.Key) - - // Init new providers - providers, err := initProviders(app.Providers, app.Maps) - if err != nil { - log.Errorf("Failed initializing providers from %s: %s", app.Key, err) - continue - } - - // Init new maps - if err = initMaps(app.Maps, providers); err != nil { - log.Errorf("Failed initializing maps from %s: %s", app.Key, err) - continue - } - - // Record that we've loaded this app. - apps[app.Key] = app - - case deleted, ok := <-watcher.Deletions: - if !ok { - return - } - - // Unload an app's maps if it was previously loaded. - if app, exists := apps[deleted]; exists { - log.Infof("Unloading app %s...", app.Key) - register.UnloadMaps(nil, getMapNames(app)) - delete(apps, app.Key) - } else { - log.Infof("Received an unload event for app %s, but couldn't find it.", deleted) - } - - case <-ctx.Done(): + go watchAppUpdates(ctx, watcher) + + return nil +} + +// watchAppUpdates will pull from the channels supplied by the given watcher to process new app config. +func watchAppUpdates(ctx context.Context, watcher source.ConfigWatcher) { + // Keep a record of what we've loaded so that we can unload when needed. + apps := make(map[string]source.App) + + for { + select { + case app, ok := <-watcher.Updates: + if !ok { return } - } - }() - return nil + // Check for validity first. + if err := config.ValidateApp(&app); err != nil { + log.Errorf("Failed validating app %s. %s", app.Key, err) + continue + } + + // If the new app is named the same as an existing app, first unload the existing one. + if old, exists := apps[app.Key]; exists { + log.Infof("Unloading app %s...", old.Key) + // We need only unload maps, since the providers don't live outside of maps. + register.UnloadMaps(nil, getMapNames(old)) + delete(apps, app.Key) + } + + log.Infof("Loading app %s...", app.Key) + + // Init new providers + providers, err := initProviders(app.Providers, app.Maps) + if err != nil { + log.Errorf("Failed initializing providers from %s: %s", app.Key, err) + continue + } + + // Init new maps + if err = initMaps(app.Maps, providers); err != nil { + log.Errorf("Failed initializing maps from %s: %s", app.Key, err) + continue + } + + // Record that we've loaded this app. + apps[app.Key] = app + + case deleted, ok := <-watcher.Deletions: + if !ok { + return + } + + // Unload an app's maps if it was previously loaded. + if app, exists := apps[deleted]; exists { + log.Infof("Unloading app %s...", app.Key) + register.UnloadMaps(nil, getMapNames(app)) + delete(apps, app.Key) + } else { + log.Infof("Received an unload event for app %s, but couldn't find it.", deleted) + } + + case <-ctx.Done(): + return + } + } } func getMapNames(app source.App) []string { From c0ff399d21cac90e0b6a12c6114b238f262cea39 Mon Sep 17 00:00:00 2001 From: Joshua Chamberlain Date: Tue, 12 Sep 2023 16:54:27 -0700 Subject: [PATCH 4/6] Create a global set of provider instances (namespaced by app name) to track their lifecyeles, thus removing this responsibility from the individual drivers. --- cmd/internal/register/maps_test.go | 2 +- cmd/internal/register/providers.go | 13 +++- cmd/internal/register/providers_test.go | 2 +- cmd/tegola/cmd/root.go | 27 +++++++-- cmd/tegola_lambda/main.go | 2 +- provider/debug/debug.go | 7 ++- provider/gpkg/gpkg.go | 5 ++ provider/gpkg/gpkg_register.go | 26 +------- provider/gpkg/gpkg_register_internal_test.go | 46 --------------- provider/hana/hana.go | 25 ++------ provider/hana/register.go | 33 +++++------ provider/mvt_provider.go | 3 + provider/postgis/postgis.go | 25 ++------ provider/postgis/register.go | 39 ++++++------ provider/provider.go | 62 ++++++++++++++++---- provider/provider_test.go | 4 +- provider/test/emptycollection/provider.go | 11 ++-- provider/test/provider.go | 26 ++++---- 18 files changed, 172 insertions(+), 186 deletions(-) diff --git a/cmd/internal/register/maps_test.go b/cmd/internal/register/maps_test.go index 5573bb8c4..25b3712ad 100644 --- a/cmd/internal/register/maps_test.go +++ b/cmd/internal/register/maps_test.go @@ -29,7 +29,7 @@ func TestMaps(t *testing.T) { provArr[i] = tc.providers[i] } - providers, err := register.Providers(provArr, tc.maps) + providers, err := register.Providers(provArr, tc.maps, "default") if err != nil { t.Errorf("unexpected err: %v", err) return diff --git a/cmd/internal/register/providers.go b/cmd/internal/register/providers.go index 4553b9fc1..89955f6a1 100644 --- a/cmd/internal/register/providers.go +++ b/cmd/internal/register/providers.go @@ -33,7 +33,7 @@ func (e ErrProviderTypeInvalid) Error() string { } // Providers registers data provider backends -func Providers(providers []dict.Dicter, maps []provider.Map) (map[string]provider.TilerUnion, error) { +func Providers(providers []dict.Dicter, maps []provider.Map, namespace string) (map[string]provider.TilerUnion, error) { // holder for registered providers registeredProviders := map[string]provider.TilerUnion{} @@ -72,7 +72,7 @@ func Providers(providers []dict.Dicter, maps []provider.Map) (map[string]provide } // register the provider - prov, err := provider.For(ptype, p, maps) + prov, err := provider.For(ptype, p, maps, namespace) if err != nil { return registeredProviders, err } @@ -84,3 +84,12 @@ func Providers(providers []dict.Dicter, maps []provider.Map) (map[string]provide return registeredProviders, nil } + +func UnloadProviders(names []string, namespace string) { + for _, name := range names { + err := provider.Remove(name, namespace) + if err != nil { + log.Errorf("Error unloading provider instance %s: %s", name, err) + } + } +} diff --git a/cmd/internal/register/providers_test.go b/cmd/internal/register/providers_test.go index 9f8d44410..f72dc0e35 100644 --- a/cmd/internal/register/providers_test.go +++ b/cmd/internal/register/providers_test.go @@ -23,7 +23,7 @@ func TestProviders(t *testing.T) { provArr[i] = tc.config[i] } - _, err = register.Providers(provArr, nil) + _, err = register.Providers(provArr, nil, "default") if tc.expectedErr != nil { if err.Error() != tc.expectedErr.Error() { t.Errorf("invalid error. expected: %v, got %v", tc.expectedErr, err.Error()) diff --git a/cmd/tegola/cmd/root.go b/cmd/tegola/cmd/root.go index 142c6e771..7df6a0481 100644 --- a/cmd/tegola/cmd/root.go +++ b/cmd/tegola/cmd/root.go @@ -119,7 +119,7 @@ func initConfig(configFile string, cacheRequired bool, logLevel string, logger s } // Init providers from the primary config file. - providers, err := initProviders(conf.Providers, conf.Maps) + providers, err := initProviders(conf.Providers, conf.Maps, "default") if err != nil { return err } @@ -159,14 +159,14 @@ func initConfig(configFile string, cacheRequired bool, logLevel string, logger s } // initProviders translate provider config from a TOML file into usable Provider objects. -func initProviders(providersConfig []env.Dict, maps []provider.Map) (map[string]provider.TilerUnion, error) { +func initProviders(providersConfig []env.Dict, maps []provider.Map, namespace string) (map[string]provider.TilerUnion, error) { // first convert []env.Map -> []dict.Dicter provArr := make([]dict.Dicter, len(providersConfig)) for i := range provArr { provArr[i] = providersConfig[i] } - providers, err := register.Providers(provArr, conf.Maps) + providers, err := register.Providers(provArr, conf.Maps, namespace) if err != nil { return nil, fmt.Errorf("could not register providers: %v", err) } @@ -229,15 +229,15 @@ func watchAppUpdates(ctx context.Context, watcher source.ConfigWatcher) { // If the new app is named the same as an existing app, first unload the existing one. if old, exists := apps[app.Key]; exists { log.Infof("Unloading app %s...", old.Key) - // We need only unload maps, since the providers don't live outside of maps. register.UnloadMaps(nil, getMapNames(old)) - delete(apps, app.Key) + register.UnloadProviders(getProviderNames(old), old.Key) + delete(apps, old.Key) } log.Infof("Loading app %s...", app.Key) // Init new providers - providers, err := initProviders(app.Providers, app.Maps) + providers, err := initProviders(app.Providers, app.Maps, app.Key) if err != nil { log.Errorf("Failed initializing providers from %s: %s", app.Key, err) continue @@ -261,6 +261,7 @@ func watchAppUpdates(ctx context.Context, watcher source.ConfigWatcher) { if app, exists := apps[deleted]; exists { log.Infof("Unloading app %s...", app.Key) register.UnloadMaps(nil, getMapNames(app)) + register.UnloadProviders(getProviderNames(app), app.Key) delete(apps, app.Key) } else { log.Infof("Received an unload event for app %s, but couldn't find it.", deleted) @@ -280,3 +281,17 @@ func getMapNames(app source.App) []string { return names } + +func getProviderNames(app source.App) []string { + names := make([]string, 0, len(app.Providers)) + for _, p := range app.Providers { + name, err := p.String("name", nil) + if err != nil { + log.Warnf("Encountered a provider in app %s with an empty name.", app.Key) + continue + } + names = append(names, name) + } + + return names +} diff --git a/cmd/tegola_lambda/main.go b/cmd/tegola_lambda/main.go index 5e987da8c..84656eb71 100644 --- a/cmd/tegola_lambda/main.go +++ b/cmd/tegola_lambda/main.go @@ -66,7 +66,7 @@ func init() { } // register the providers - providers, err := register.Providers(provArr, nil) + providers, err := register.Providers(provArr, nil, "default") if err != nil { log.Fatal(err) } diff --git a/provider/debug/debug.go b/provider/debug/debug.go index ba96f41f1..ffc5e2548 100644 --- a/provider/debug/debug.go +++ b/provider/debug/debug.go @@ -22,7 +22,7 @@ const ( ) func init() { - provider.Register(provider.TypeStd.Prefix()+Name, NewTileProvider, nil) + provider.Register(provider.TypeStd.Prefix()+Name, NewTileProvider) } // NewProvider Setups a debug provider. there are not currently any config params supported @@ -113,3 +113,8 @@ func (p *Provider) Layers() ([]provider.LayerInfo, error) { return ls, nil } + +// Cleanup is a no-op for the debug provider. +func (p *Provider) Cleanup() error { + return nil +} diff --git a/provider/gpkg/gpkg.go b/provider/gpkg/gpkg.go index 65649ed07..c21e272da 100644 --- a/provider/gpkg/gpkg.go +++ b/provider/gpkg/gpkg.go @@ -235,6 +235,11 @@ func (p *Provider) Close() error { return p.db.Close() } +// Cleanup calls Close() +func (p *Provider) Cleanup() error { + return p.Close() +} + type GeomTableDetails struct { geomFieldname string geomType geom.Geometry diff --git a/provider/gpkg/gpkg_register.go b/provider/gpkg/gpkg_register.go index 791550fe9..61991cde5 100644 --- a/provider/gpkg/gpkg_register.go +++ b/provider/gpkg/gpkg_register.go @@ -26,7 +26,7 @@ import ( var colFinder *regexp.Regexp func init() { - provider.Register(provider.TypeStd.Prefix()+Name, NewTileProvider, Cleanup) + provider.Register(provider.TypeStd.Prefix()+Name, NewTileProvider) colFinder = regexp.MustCompile(`^(([a-zA-Z_][a-zA-Z0-9_]*)|"([^"]+)")\s`) } @@ -41,7 +41,8 @@ type featureTableDetails struct { } // Creates a config instance of the type NewTileProvider() requires including all available feature -// tables in the gpkg at 'gpkgPath'. +// +// tables in the gpkg at 'gpkgPath'. func AutoConfig(gpkgPath string) (map[string]interface{}, error) { // Get all feature tables db, err := sql.Open("sqlite3", gpkgPath) @@ -394,26 +395,5 @@ func NewTileProvider(config dict.Dicter, maps []provider.Map) (provider.Tiler, e p.layers[layer.name] = layer } - // track the provider so we can clean it up later - providers = append(providers, p) - return &p, err } - -// reference to all instantiated providers -var providers []Provider - -// Cleanup will close all database connections and destroy all previously instantiated Provider instances -func Cleanup() { - if len(providers) > 0 { - log.Infof("cleaning up gpkg providers") - } - - for i := range providers { - if err := providers[i].Close(); err != nil { - log.Errorf("err closing connection: %v", err) - } - } - - providers = make([]Provider, 0) -} diff --git a/provider/gpkg/gpkg_register_internal_test.go b/provider/gpkg/gpkg_register_internal_test.go index a0ba0b808..aa1cf4113 100644 --- a/provider/gpkg/gpkg_register_internal_test.go +++ b/provider/gpkg/gpkg_register_internal_test.go @@ -11,7 +11,6 @@ import ( "github.com/go-spatial/geom" "github.com/go-spatial/geom/cmp" - "github.com/go-spatial/tegola/dict" _ "github.com/mattn/go-sqlite3" ) @@ -1088,48 +1087,3 @@ func TestFeatureTableMetaData(t *testing.T) { t.Run(tname, fn(tc)) } } - -func TestCleanup(t *testing.T) { - type tcase struct { - config dict.Dict - } - - fn := func(tc tcase) func(*testing.T) { - return func(t *testing.T) { - _, err := NewTileProvider(tc.config, nil) - if err != nil { - t.Fatalf("err creating NewTileProvider: %v", err) - return - } - - if len(providers) != 1 { - t.Errorf("expecting 1 providers, got %v", len(providers)) - return - } - - Cleanup() - - if len(providers) != 0 { - t.Errorf("expecting 0 providers, got %v", len(providers)) - return - } - } - } - - tests := map[string]tcase{ - "cleanup": { - config: map[string]interface{}{ - "filepath": GPKGAthensFilePath, - "layers": []map[string]interface{}{ - {"name": "a_points", "tablename": "amenities_points", "id_fieldname": "fid", "fields": []string{"amenity", "religion", "tourism", "shop"}}, - {"name": "r_lines", "tablename": "rail_lines", "id_fieldname": "fid", "fields": []string{"railway", "bridge", "tunnel"}}, - {"name": "rd_lines", "tablename": "roads_lines"}, - }, - }, - }, - } - - for name, tc := range tests { - t.Run(name, fn(tc)) - } -} diff --git a/provider/hana/hana.go b/provider/hana/hana.go index c706c2c76..06ce50d87 100644 --- a/provider/hana/hana.go +++ b/provider/hana/hana.go @@ -597,9 +597,6 @@ func CreateProvider(config dict.Dicter, maps []provider.Map, providerType string } p.layers = lyrs - // track the provider so we can clean it up later - providers = append(providers, p) - return &p, nil } @@ -956,21 +953,11 @@ func (p Provider) MVTForLayers(ctx context.Context, tile provider.Tile, params p return mvtBytes.Bytes(), nil } +// Cleanup calls Close() +func (p *Provider) Cleanup() error { + p.Close() + return nil +} + // Close will close the Provider's database connectio func (p *Provider) Close() { p.pool.Close() } - -// reference to all instantiated providers -var providers []Provider - -// Cleanup will close all database connections and destroy all previously instantiated Provider instances -func Cleanup() { - if len(providers) > 0 { - log.Infof("cleaning up HANA providers") - } - - for i := range providers { - providers[i].Close() - } - - providers = make([]Provider, 0) -} diff --git a/provider/hana/register.go b/provider/hana/register.go index e83922dae..3cc8ce58d 100644 --- a/provider/hana/register.go +++ b/provider/hana/register.go @@ -6,8 +6,8 @@ import ( ) func init() { - provider.Register(provider.TypeStd.Prefix()+Name, NewTileProvider, Cleanup) - provider.MVTRegister(provider.TypeMvt.Prefix()+Name, NewMVTTileProvider, Cleanup) + provider.Register(provider.TypeStd.Prefix()+Name, NewTileProvider) + provider.MVTRegister(provider.TypeMvt.Prefix()+Name, NewMVTTileProvider) } const ( @@ -20,23 +20,22 @@ const ( // trying to create a driver. This Provider supports the following fields // in the provided map[string]interface{} map: // -// uri (string): [Required] HANA connection string -// name (string): [Required] Provider name is referenced from map layers -// srid (int): [Optional] The default SRID for the provider. Defaults to WebMercator (3857) but also supports WGS84 (4326) -// type (string): [Required] The type of data provider. must be "hana" to use this data provider -// layers (map[string]struct{}) — This is map of layers keyed by the layer name. Supports the following properties +// uri (string): [Required] HANA connection string +// name (string): [Required] Provider name is referenced from map layers +// srid (int): [Optional] The default SRID for the provider. Defaults to WebMercator (3857) but also supports WGS84 (4326) +// type (string): [Required] The type of data provider. must be "hana" to use this data provider +// layers (map[string]struct{}) — This is map of layers keyed by the layer name. Supports the following properties // -// name (string): [Required] the name of the layer. This is used to reference this layer from map layers. -// tablename (string): [*Required] the name of the database table to query against. Required if sql is not defined. -// geometry_fieldname (string): [Optional] the name of the filed which contains the geometry for the feature. defaults to geom -// id_fieldname (string): [Optional] the name of the feature id field. defaults to gid -// fields ([]string): [Optional] a list of fields to include alongside the feature. Can be used if sql is not defined. -// srid (int): [Optional] the SRID of the layer. Supports 3857 (WebMercator) or 4326 (WGS84). -// sql (string): [*Required] custom SQL to use use. Required if tablename is not defined. Supports the following tokens: -// -// !BBOX! - [Required] will be replaced with the bounding box of the tile before the query is sent to the database. -// !ZOOM! - [Optional] will be replaced with the "Z" (zoom) value of the requested tile. +// name (string): [Required] the name of the layer. This is used to reference this layer from map layers. +// tablename (string): [*Required] the name of the database table to query against. Required if sql is not defined. +// geometry_fieldname (string): [Optional] the name of the filed which contains the geometry for the feature. defaults to geom +// id_fieldname (string): [Optional] the name of the feature id field. defaults to gid +// fields ([]string): [Optional] a list of fields to include alongside the feature. Can be used if sql is not defined. +// srid (int): [Optional] the SRID of the layer. Supports 3857 (WebMercator) or 4326 (WGS84). +// sql (string): [*Required] custom SQL to use use. Required if tablename is not defined. Supports the following tokens: // +// !BBOX! - [Required] will be replaced with the bounding box of the tile before the query is sent to the database. +// !ZOOM! - [Optional] will be replaced with the "Z" (zoom) value of the requested tile. func NewTileProvider(config dict.Dicter, maps []provider.Map) (provider.Tiler, error) { return CreateProvider(config, maps, ProviderType) } diff --git a/provider/mvt_provider.go b/provider/mvt_provider.go index 4f192fec8..96483a9be 100644 --- a/provider/mvt_provider.go +++ b/provider/mvt_provider.go @@ -11,6 +11,9 @@ type MVTTiler interface { // MVTForLayers will return a MVT byte array or an error for the given layer names. MVTForLayers(ctx context.Context, tile Tile, params Params, layers []Layer) ([]byte, error) + + // Cleanup will do anything needed before the Tiler is removed. + Cleanup() error } // MVTInitFunc initialize a provider given a config map. The init function should validate the config map, and report any errors. This is called by the For function. diff --git a/provider/postgis/postgis.go b/provider/postgis/postgis.go index 9dbe1314a..8e4fd66f0 100644 --- a/provider/postgis/postgis.go +++ b/provider/postgis/postgis.go @@ -640,9 +640,6 @@ func CreateProvider(config dict.Dicter, maps []provider.Map, providerType string } p.layers = lyrs - // track the provider so we can clean it up later - providers = append(providers, p) - return &p, nil } @@ -1067,21 +1064,11 @@ func (p Provider) MVTForLayers(ctx context.Context, tile provider.Tile, params p return data.Bytes, nil } +// Cleanup calls Close() +func (p *Provider) Cleanup() error { + p.Close() + return nil +} + // Close will close the Provider's database connectio func (p *Provider) Close() { p.pool.Close() } - -// reference to all instantiated providers -var providers []Provider - -// Cleanup will close all database connections and destroy all previously instantiated Provider instances -func Cleanup() { - if len(providers) > 0 { - log.Infof("cleaning up postgis providers") - } - - for i := range providers { - providers[i].Close() - } - - providers = make([]Provider, 0) -} diff --git a/provider/postgis/register.go b/provider/postgis/register.go index 4c3a271b2..db85fb9e2 100644 --- a/provider/postgis/register.go +++ b/provider/postgis/register.go @@ -6,8 +6,8 @@ import ( ) func init() { - provider.Register(provider.TypeStd.Prefix()+Name, NewTileProvider, Cleanup) - provider.MVTRegister(provider.TypeMvt.Prefix()+Name, NewMVTTileProvider, Cleanup) + provider.Register(provider.TypeStd.Prefix()+Name, NewTileProvider) + provider.MVTRegister(provider.TypeMvt.Prefix()+Name, NewMVTTileProvider) } const ( @@ -20,26 +20,25 @@ const ( // trying to create a driver. This Provider supports the following fields // in the provided map[string]interface{} map: // -// host (string): [Required] postgis database host -// port (int): [Required] postgis database port (required) -// database (string): [Required] postgis database name -// user (string): [Required] postgis database user -// password (string): [Required] postgis database password -// srid (int): [Optional] The default SRID for the provider. Defaults to WebMercator (3857) but also supports WGS84 (4326) -// max_connections : [Optional] The max connections to maintain in the connection pool. Default is 100. 0 means no max. -// layers (map[string]struct{}) — This is map of layers keyed by the layer name. supports the following properties +// host (string): [Required] postgis database host +// port (int): [Required] postgis database port (required) +// database (string): [Required] postgis database name +// user (string): [Required] postgis database user +// password (string): [Required] postgis database password +// srid (int): [Optional] The default SRID for the provider. Defaults to WebMercator (3857) but also supports WGS84 (4326) +// max_connections : [Optional] The max connections to maintain in the connection pool. Default is 100. 0 means no max. +// layers (map[string]struct{}) — This is map of layers keyed by the layer name. supports the following properties // -// name (string): [Required] the name of the layer. This is used to reference this layer from map layers. -// tablename (string): [*Required] the name of the database table to query against. Required if sql is not defined. -// geometry_fieldname (string): [Optional] the name of the filed which contains the geometry for the feature. defaults to geom -// id_fieldname (string): [Optional] the name of the feature id field. defaults to gid -// fields ([]string): [Optional] a list of fields to include alongside the feature. Can be used if sql is not defined. -// srid (int): [Optional] the SRID of the layer. Supports 3857 (WebMercator) or 4326 (WGS84). -// sql (string): [*Required] custom SQL to use use. Required if tablename is not defined. Supports the following tokens: -// -// !BBOX! - [Required] will be replaced with the bounding box of the tile before the query is sent to the database. -// !ZOOM! - [Optional] will be replaced with the "Z" (zoom) value of the requested tile. +// name (string): [Required] the name of the layer. This is used to reference this layer from map layers. +// tablename (string): [*Required] the name of the database table to query against. Required if sql is not defined. +// geometry_fieldname (string): [Optional] the name of the filed which contains the geometry for the feature. defaults to geom +// id_fieldname (string): [Optional] the name of the feature id field. defaults to gid +// fields ([]string): [Optional] a list of fields to include alongside the feature. Can be used if sql is not defined. +// srid (int): [Optional] the SRID of the layer. Supports 3857 (WebMercator) or 4326 (WGS84). +// sql (string): [*Required] custom SQL to use use. Required if tablename is not defined. Supports the following tokens: // +// !BBOX! - [Required] will be replaced with the bounding box of the tile before the query is sent to the database. +// !ZOOM! - [Optional] will be replaced with the "Z" (zoom) value of the requested tile. func NewTileProvider(config dict.Dicter, maps []provider.Map) (provider.Tiler, error) { return CreateProvider(config, maps, ProviderType) } diff --git a/provider/provider.go b/provider/provider.go index 6813892cb..dde50ff6b 100644 --- a/provider/provider.go +++ b/provider/provider.go @@ -115,6 +115,9 @@ type Tiler interface { // TileFeature will stream decoded features to the callback function fn // if fn returns ErrCanceled, the TileFeatures method should stop processing TileFeatures(ctx context.Context, layer string, t Tile, params Params, fn func(f *Feature) error) error + + // Cleanup will do anything needed before the Tiler is removed. + Cleanup() error } // TilerUnion represents either a Std Tiler or and MVTTiler; only one should be not nil. @@ -135,6 +138,17 @@ func (tu TilerUnion) Layers() ([]LayerInfo, error) { return nil, ErrNilInitFunc } +// Cleanup calls the cleanup method on either Std or MVT layer. +func (tu TilerUnion) Cleanup() error { + if tu.Std != nil { + return tu.Std.Cleanup() + } + if tu.Mvt != nil { + return tu.Mvt.Cleanup() + } + return nil +} + // InitFunc initialize a provider given a config map. The init function should validate the config map, and report any errors. This is called by the For function. type InitFunc func(dicter dict.Dicter, maps []Map) (Tiler, error) @@ -146,16 +160,14 @@ type pfns struct { init InitFunc // mvtInit will be filled out if it's a mvt provider mvtInit MVTInitFunc - - cleanup CleanupFunc } var providers map[string]pfns // Register the provider with the system. This call is generally made in the init functions of the provider. -// the clean up function will be called during shutdown of the provider to allow the provider to do any cleanup. +// // The init function can not be nil, the cleanup function may be nil -func Register(name string, init InitFunc, cleanup CleanupFunc) error { +func Register(name string, init InitFunc) error { if init == nil { return ErrNilInitFunc } @@ -168,17 +180,16 @@ func Register(name string, init InitFunc, cleanup CleanupFunc) error { } providers[name] = pfns{ - init: init, - cleanup: cleanup, + init: init, } return nil } // MVTRegister the provider with the system. This call is generally made in the init functions of the provider. -// the clean up function will be called during shutdown of the provider to allow the provider to do any cleanup. +// // The init function can not be nil, the cleanup function may be nil -func MVTRegister(name string, init MVTInitFunc, cleanup CleanupFunc) error { +func MVTRegister(name string, init MVTInitFunc) error { if init == nil { return ErrNilInitFunc } @@ -192,7 +203,6 @@ func MVTRegister(name string, init MVTInitFunc, cleanup CleanupFunc) error { providers[name] = pfns{ mvtInit: init, - cleanup: cleanup, } return nil @@ -233,7 +243,7 @@ func Drivers(types ...providerType) (l []string) { // For function returns a configure provider of the given type; The provider may be a mvt provider or // a std provider. The correct entry in TilerUnion will not be nil. If there is an error both entries // will be nil. -func For(name string, config dict.Dicter, maps []Map) (val TilerUnion, err error) { +func For(name string, config dict.Dicter, maps []Map, namespace string) (val TilerUnion, err error) { var ( driversList = Drivers() ) @@ -246,21 +256,47 @@ func For(name string, config dict.Dicter, maps []Map) (val TilerUnion, err error } if p.init != nil { val.Std, err = p.init(config, maps) + recordInstance(name, namespace, val) return val, err } if p.mvtInit != nil { val.Mvt, err = p.mvtInit(config, maps) + recordInstance(name, namespace, val) return val, err } return val, ErrInvalidRegisteredProvider{Name: name} } +// providerInstances tracks all Tilers currently in use. +// Keys are prefixed by namespace, e.g., `namespace.providerName`. +var providerInstances map[string]TilerUnion + +func recordInstance(name, namespace string, instance TilerUnion) { + if providerInstances == nil { + providerInstances = make(map[string]TilerUnion) + } + + providerInstances[namespace+"."+name] = instance +} + +// Remove unregisters a single provider instance and calls its Remove method. +func Remove(name, namespace string) error { + key := namespace + "." + name + if p, exists := providerInstances[key]; exists { + delete(providerInstances, key) + return p.Cleanup() + } + + return nil +} + // Cleanup is called at the end of the run to allow providers to cleanup func Cleanup() { log.Info("cleaning up providers") - for _, p := range providers { - if p.cleanup != nil { - p.cleanup() + for k, p := range providerInstances { + err := p.Cleanup() + if err != nil { + log.Errorf("Failed cleaning up provider %s: %s", k, err) } } } diff --git a/provider/provider_test.go b/provider/provider_test.go index d67a98a93..fa14047e1 100644 --- a/provider/provider_test.go +++ b/provider/provider_test.go @@ -12,7 +12,7 @@ func TestProviderInterface(t *testing.T) { stdName = provider.TypeStd.Prefix() + test.Name mvtName = provider.TypeMvt.Prefix() + test.Name ) - if _, err := provider.For(stdName, nil, nil); err != nil { + if _, err := provider.For(stdName, nil, nil, "default"); err != nil { t.Errorf("retrieve provider err , expected nil got %v", err) return } @@ -23,7 +23,7 @@ func TestProviderInterface(t *testing.T) { if test.Count != 0 { t.Errorf(" expected count , expected 0 got %v", test.Count) } - if _, err := provider.For(mvtName, nil, nil); err != nil { + if _, err := provider.For(mvtName, nil, nil, "default"); err != nil { t.Errorf("retrieve provider err , expected nil got %v", err) return } diff --git a/provider/test/emptycollection/provider.go b/provider/test/emptycollection/provider.go index eef8d9ac0..e96796f99 100644 --- a/provider/test/emptycollection/provider.go +++ b/provider/test/emptycollection/provider.go @@ -15,7 +15,7 @@ const Name = "emptycollection" var Count int func init() { - provider.Register(provider.TypeStd.Prefix()+Name, NewTileProvider, Cleanup) + provider.Register(provider.TypeStd.Prefix()+Name, NewTileProvider) } // NewProvider setups a test provider. there are not currently any config params supported @@ -24,9 +24,6 @@ func NewTileProvider(config dict.Dicter, maps []provider.Map) (provider.Tiler, e return &TileProvider{}, nil } -// Cleanup cleans up all the test providers. -func Cleanup() { Count = 0 } - type TileProvider struct{} func (tp *TileProvider) Layers() ([]provider.LayerInfo, error) { @@ -52,3 +49,9 @@ func (tp *TileProvider) TileFeatures(ctx context.Context, layer string, t provid return fn(&debugTileOutline) } + +// Cleanup cleans up the test provider. +func (tp *TileProvider) Cleanup() error { + Count-- + return nil +} diff --git a/provider/test/provider.go b/provider/test/provider.go index 011242afb..91ac4ebde 100644 --- a/provider/test/provider.go +++ b/provider/test/provider.go @@ -23,8 +23,8 @@ var ( ) func init() { - provider.Register(provider.TypeStd.Prefix()+Name, NewTileProvider, Cleanup) - provider.MVTRegister(provider.TypeMvt.Prefix()+Name, NewMVTTileProvider, Cleanup) + provider.Register(provider.TypeStd.Prefix()+Name, NewTileProvider) + provider.MVTRegister(provider.TypeMvt.Prefix()+Name, NewMVTTileProvider) } // NewTileProvider setups a test provider. there are not currently any config params supported @@ -41,7 +41,7 @@ func NewMVTTileProvider(config dict.Dicter, maps []provider.Map) (provider.MVTTi lock.Lock() MVTCount++ lock.Unlock() - var mvtTile []byte + mvtTile := []byte{} if config != nil { path, err := config.String("test_file", nil) if err != nil { @@ -61,14 +61,6 @@ func NewMVTTileProvider(config dict.Dicter, maps []provider.Map) (provider.MVTTi }, nil } -// Cleanup cleans up all the test providers. -func Cleanup() { - lock.Lock() - Count = 0 - MVTCount = 0 - lock.Unlock() -} - // TileProvider mocks out a tile provider type TileProvider struct { MVTTile []byte @@ -110,3 +102,15 @@ func (tp *TileProvider) MVTForLayers(ctx context.Context, _ provider.Tile, _ pro } return tp.MVTTile, nil } + +// Cleanup cleans up the test provider. +func (tp *TileProvider) Cleanup() error { + lock.Lock() + if tp.MVTTile == nil { + Count-- + } else { + MVTCount-- + } + lock.Unlock() + return nil +} From efb04516fdd50c771a168939aba0bd0dc3a51c1d Mon Sep 17 00:00:00 2001 From: Joshua Chamberlain Date: Mon, 18 Sep 2023 10:00:27 -0700 Subject: [PATCH 5/6] Add tests for config watchers and related functions --- atlas/atlas_test.go | 49 +++++++++ cmd/tegola/cmd/root.go | 38 ++++--- cmd/tegola/cmd/root_test.go | 204 +++++++++++++++++++++++++++++++++++ config/source/file.go | 11 +- config/source/file_test.go | 184 +++++++++++++++++++++++++++++++ config/source/source.go | 8 +- config/source/source_test.go | 95 ++++++++++++++++ 7 files changed, 571 insertions(+), 18 deletions(-) create mode 100644 cmd/tegola/cmd/root_test.go create mode 100644 config/source/file_test.go create mode 100644 config/source/source_test.go diff --git a/atlas/atlas_test.go b/atlas/atlas_test.go index 75d24718a..45b96a923 100644 --- a/atlas/atlas_test.go +++ b/atlas/atlas_test.go @@ -1,6 +1,9 @@ package atlas_test import ( + "strings" + "testing" + "github.com/go-spatial/geom" "github.com/go-spatial/tegola/atlas" "github.com/go-spatial/tegola/internal/env" @@ -51,3 +54,49 @@ var testMap = atlas.Map{ testLayer3, }, } + +func TestAddMaps(t *testing.T) { + a := &atlas.Atlas{} + + // Should initialize from empty + maps := []atlas.Map{ + {Name: "First Map"}, + {Name: "Second Map"}, + } + err := a.AddMaps(maps) + if err != nil { + t.Errorf("Unexpected error when addings maps. %s", err) + } + + m, err := a.Map("Second Map") + if err != nil { + t.Errorf("Failed retrieving map from Atlas. %s", err) + } else if m.Name != "Second Map" { + t.Errorf("Expected map named \"Second Map\". Found %v.", m) + } + + // Should error if duplicate name. + err = a.AddMaps([]atlas.Map{{Name: "First Map"}}) + if err == nil || !strings.Contains(err.Error(), "already exists") { + t.Errorf("Should return error for duplicate map name. err=%s", err) + } +} + +func TestRemoveMaps(t *testing.T) { + a := &atlas.Atlas{} + a.AddMaps([]atlas.Map{ + {Name: "First Map"}, + {Name: "Second Map"}, + }) + + if len(a.AllMaps()) != 2 { + t.Error("Unexpected failure setting up Atlas. No maps added.") + return + } + + a.RemoveMaps([]string{"Second Map"}) + maps := a.AllMaps() + if len(maps) != 1 || maps[0].Name == "Second Map" { + t.Error("Should have deleted \"Second Map\". Didn't.") + } +} diff --git a/cmd/tegola/cmd/root.go b/cmd/tegola/cmd/root.go index 7df6a0481..a6a139e63 100644 --- a/cmd/tegola/cmd/root.go +++ b/cmd/tegola/cmd/root.go @@ -118,14 +118,16 @@ func initConfig(configFile string, cacheRequired bool, logLevel string, logger s return err } + loader := appInitializer{} + // Init providers from the primary config file. - providers, err := initProviders(conf.Providers, conf.Maps, "default") + providers, err := loader.initProviders(conf.Providers, conf.Maps, "default") if err != nil { return err } // Init maps from the primary config file. - if err = initMaps(conf.Maps, providers); err != nil { + if err = loader.initMaps(conf.Maps, providers); err != nil { return err } @@ -158,8 +160,16 @@ func initConfig(configFile string, cacheRequired bool, logLevel string, logger s return nil } +type initializer interface { + initProviders(providersConfig []env.Dict, maps []provider.Map, namespace string) (map[string]provider.TilerUnion, error) + initMaps(maps []provider.Map, providers map[string]provider.TilerUnion) error + unload(app source.App) +} + +type appInitializer struct{} + // initProviders translate provider config from a TOML file into usable Provider objects. -func initProviders(providersConfig []env.Dict, maps []provider.Map, namespace string) (map[string]provider.TilerUnion, error) { +func (l appInitializer) initProviders(providersConfig []env.Dict, maps []provider.Map, namespace string) (map[string]provider.TilerUnion, error) { // first convert []env.Map -> []dict.Dicter provArr := make([]dict.Dicter, len(providersConfig)) for i := range provArr { @@ -175,7 +185,7 @@ func initProviders(providersConfig []env.Dict, maps []provider.Map, namespace st } // initMaps registers maps with Atlas to be ready for service. -func initMaps(maps []provider.Map, providers map[string]provider.TilerUnion) error { +func (l appInitializer) initMaps(maps []provider.Map, providers map[string]provider.TilerUnion) error { if err := register.Maps(nil, maps, providers); err != nil { return fmt.Errorf("could not register maps: %v", err) } @@ -183,6 +193,12 @@ func initMaps(maps []provider.Map, providers map[string]provider.TilerUnion) err return nil } +// unload deregisters the maps and providers of an app. +func (l appInitializer) unload(app source.App) { + register.UnloadMaps(nil, getMapNames(app)) + register.UnloadProviders(getProviderNames(app), app.Key) +} + // initAppConfigSource sets up an additional configuration source for "apps" (groups of providers and maps) to be loaded and unloaded on-the-fly. func initAppConfigSource(ctx context.Context, conf config.Config) error { // Get the config source type. If none, return. @@ -203,13 +219,13 @@ func initAppConfigSource(ctx context.Context, conf config.Config) error { return err } - go watchAppUpdates(ctx, watcher) + go watchAppUpdates(ctx, watcher, appInitializer{}) return nil } // watchAppUpdates will pull from the channels supplied by the given watcher to process new app config. -func watchAppUpdates(ctx context.Context, watcher source.ConfigWatcher) { +func watchAppUpdates(ctx context.Context, watcher source.ConfigWatcher, init initializer) { // Keep a record of what we've loaded so that we can unload when needed. apps := make(map[string]source.App) @@ -229,22 +245,21 @@ func watchAppUpdates(ctx context.Context, watcher source.ConfigWatcher) { // If the new app is named the same as an existing app, first unload the existing one. if old, exists := apps[app.Key]; exists { log.Infof("Unloading app %s...", old.Key) - register.UnloadMaps(nil, getMapNames(old)) - register.UnloadProviders(getProviderNames(old), old.Key) + init.unload(old) delete(apps, old.Key) } log.Infof("Loading app %s...", app.Key) // Init new providers - providers, err := initProviders(app.Providers, app.Maps, app.Key) + providers, err := init.initProviders(app.Providers, app.Maps, app.Key) if err != nil { log.Errorf("Failed initializing providers from %s: %s", app.Key, err) continue } // Init new maps - if err = initMaps(app.Maps, providers); err != nil { + if err = init.initMaps(app.Maps, providers); err != nil { log.Errorf("Failed initializing maps from %s: %s", app.Key, err) continue } @@ -260,8 +275,7 @@ func watchAppUpdates(ctx context.Context, watcher source.ConfigWatcher) { // Unload an app's maps if it was previously loaded. if app, exists := apps[deleted]; exists { log.Infof("Unloading app %s...", app.Key) - register.UnloadMaps(nil, getMapNames(app)) - register.UnloadProviders(getProviderNames(app), app.Key) + init.unload(app) delete(apps, app.Key) } else { log.Infof("Received an unload event for app %s, but couldn't find it.", deleted) diff --git a/cmd/tegola/cmd/root_test.go b/cmd/tegola/cmd/root_test.go new file mode 100644 index 000000000..06f86b657 --- /dev/null +++ b/cmd/tegola/cmd/root_test.go @@ -0,0 +1,204 @@ +package cmd + +import ( + "context" + "reflect" + "strings" + "testing" + "time" + + "github.com/go-spatial/tegola/config" + "github.com/go-spatial/tegola/config/source" + "github.com/go-spatial/tegola/internal/env" + "github.com/go-spatial/tegola/provider" +) + +type initializerMock struct { + initProvidersCalls chan bool + initMapsCalls chan bool + unloadCalls chan bool +} + +func (i initializerMock) initProviders(providersConfig []env.Dict, maps []provider.Map, namespace string) (map[string]provider.TilerUnion, error) { + i.initProvidersCalls <- true + return map[string]provider.TilerUnion{}, nil +} + +func (i initializerMock) initProvidersCalled() bool { + select { + case _, ok := <-i.initProvidersCalls: + return ok + case <-time.After(time.Millisecond): + return false + } +} + +func (i initializerMock) initMaps(maps []provider.Map, providers map[string]provider.TilerUnion) error { + i.initMapsCalls <- true + return nil +} + +func (i initializerMock) initMapsCalled() bool { + select { + case _, ok := <-i.initMapsCalls: + return ok + case <-time.After(time.Millisecond): + return false + } +} + +func (i initializerMock) unload(app source.App) { + i.unloadCalls <- true +} + +func (i initializerMock) unloadCalled() bool { + select { + case _, ok := <-i.unloadCalls: + return ok + case <-time.After(time.Millisecond): + return false + } +} + +func TestInitAppConfigSource(t *testing.T) { + var ( + cfg config.Config + err error + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Should return nil if app config source type not specified. + cfg = config.Config{} + err = initAppConfigSource(ctx, cfg) + if err != nil { + t.Errorf("Unexpected error when app config source type is not specified: %s", err) + } + + // Should return error if unable to initialize source. + cfg = config.Config{ + AppConfigSource: env.Dict{ + "type": "something_nonexistent", + }, + } + err = initAppConfigSource(ctx, cfg) + if err == nil { + t.Error("Should return an error if invalid source type provided") + } + + cfg = config.Config{ + AppConfigSource: env.Dict{ + "type": "file", + "dir": "something_nonexistent", + }, + } + err = initAppConfigSource(ctx, cfg) + if err == nil || !strings.Contains(err.Error(), "directory") { + t.Errorf("Should return an error if unable to initialize source; expected an error about the directory, got %v", err) + } +} + +func TestWatchAppUpdates(t *testing.T) { + loader := initializerMock{ + initProvidersCalls: make(chan bool), + initMapsCalls: make(chan bool), + unloadCalls: make(chan bool), + } + // watcher := mock.NewWatcherMock() + watcher := source.ConfigWatcher{ + Updates: make(chan source.App), + Deletions: make(chan string), + } + defer watcher.Close() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go watchAppUpdates(ctx, watcher, loader) + + // Should load new map+provider + app := source.App{ + Providers: []env.Dict{}, + Maps: []provider.Map{}, + Key: "Test1", + } + // watcher.SendUpdate(app) + watcher.Updates <- app + if !loader.initProvidersCalled() { + t.Error("Failed to initialize providers") + } + if !loader.initMapsCalled() { + t.Error("Failed to initialize maps") + } + if loader.unloadCalled() { + t.Error("Unexpected app unload") + } + + // Should load updated map+provider + // watcher.SendUpdate(app) + watcher.Updates <- app + if !loader.unloadCalled() { + t.Error("Failed to unload old app") + } + if !loader.initProvidersCalled() { + t.Error("Failed to initialize providers") + } + if !loader.initMapsCalled() { + t.Error("Failed to initialize maps") + } + + // Should unload map+provider + // watcher.SendDeletion("Test1") + watcher.Deletions <- "Test1" + if !loader.unloadCalled() { + t.Error("Failed to unload old app") + } +} + +func TestGetMapNames(t *testing.T) { + app := source.App{ + Maps: []provider.Map{ + {Name: "First Map"}, + {Name: "Second Map"}, + }, + } + expected := []string{"First Map", "Second Map"} + names := getMapNames(app) + if !reflect.DeepEqual(expected, names) { + t.Errorf("Expected map names %v; found %v", expected, names) + } +} + +func TestGetProviderNames(t *testing.T) { + var ( + app source.App + names []string + expected []string + ) + + // Happy path + app = source.App{ + Providers: []env.Dict{ + {"name": "First Provider"}, + {"name": "Second Provider"}, + }, + } + expected = []string{"First Provider", "Second Provider"} + names = getProviderNames(app) + if !reflect.DeepEqual(expected, names) { + t.Errorf("Expected provider names %v; found %v", expected, names) + } + + // Error + app = source.App{ + Providers: []env.Dict{ + {}, + {"name": "Second Provider"}, + }, + } + expected = []string{"Second Provider"} + names = getProviderNames(app) + if !reflect.DeepEqual(expected, names) { + t.Errorf("Expected provider names %v; found %v", expected, names) + } +} diff --git a/config/source/file.go b/config/source/file.go index 1f01fd406..f2aa2b72b 100644 --- a/config/source/file.go +++ b/config/source/file.go @@ -17,11 +17,7 @@ type FileConfigSource struct { dir string } -func (s *FileConfigSource) Type() string { - return "file" -} - -func (s *FileConfigSource) Init(options env.Dict, baseDir string) error { +func (s *FileConfigSource) init(options env.Dict, baseDir string) error { var err error dir, err := options.String("dir", nil) if err != nil { @@ -37,6 +33,10 @@ func (s *FileConfigSource) Init(options env.Dict, baseDir string) error { return nil } +func (s *FileConfigSource) Type() string { + return "file" +} + // LoadAndWatch will read all the files in the configured directory and then keep watching the directory for changes. func (s *FileConfigSource) LoadAndWatch(ctx context.Context) (ConfigWatcher, error) { appWatcher := ConfigWatcher{ @@ -109,6 +109,7 @@ func (s *FileConfigSource) LoadAndWatch(ctx context.Context) (ConfigWatcher, err case <-ctx.Done(): log.Info("Exiting watcher...") + appWatcher.Close() return } } diff --git a/config/source/file_test.go b/config/source/file_test.go new file mode 100644 index 000000000..b0020d0c0 --- /dev/null +++ b/config/source/file_test.go @@ -0,0 +1,184 @@ +package source + +import ( + "context" + "os" + "path/filepath" + "testing" + "time" + + "github.com/BurntSushi/toml" + "github.com/go-spatial/tegola/internal/env" +) + +func TestFileConfigSourceInit(t *testing.T) { + var ( + src FileConfigSource + err error + ) + + src = FileConfigSource{} + err = src.init(env.Dict{}, "") + if err == nil { + t.Error("init() should return an error if no dir provided; no error returned.") + } + + absDir := "/tmp/configs" + src = FileConfigSource{} + src.init(env.Dict{"dir": absDir}, "/opt") + if src.dir != absDir { + t.Errorf("init() should preserve absolute path %s; found %s instead.", absDir, src.dir) + } + + relDir := "configs" + src = FileConfigSource{} + src.init(env.Dict{"dir": relDir}, "/root") + joined := filepath.Join("/root", relDir) + if src.dir != joined { + t.Errorf("init() should place relative path under given basedir; expected %s, found %s.", joined, src.dir) + } +} + +func TestFileConfigSourceLoadAndWatch(t *testing.T) { + var ( + src FileConfigSource + watcher ConfigWatcher + dir string + ctx context.Context + err error + ) + + dir, _ = os.MkdirTemp("", "tegolaapps") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Should error if directory not readable. + src = FileConfigSource{} + src.init(env.Dict{"dir": filepath.Join(dir, "nonexistent")}, "") + watcher, err = src.LoadAndWatch(ctx) + if err == nil { + t.Error("LoadAndWatch() should error if directory doesn't exist; no error returned.") + } + + // Should load files already present in directory. + err = createFile(App{}, filepath.Join(dir, "app1.toml")) + if err != nil { + t.Errorf("Could not create an app config file. %s", err) + return + } + src = FileConfigSource{} + src.init(env.Dict{"dir": dir}, "") + watcher, err = src.LoadAndWatch(ctx) + if err != nil { + t.Errorf("No error expected from LoadAndWatch(): returned %s", err) + return + } + + updates := readAllUpdates(watcher.Updates) + if len(updates) != 1 || updates[0].Key != filepath.Join(dir, "app1.toml") { + t.Errorf("Failed reading preexisting files: len=%d updates=%v", len(updates), updates) + } + + // Should detect new files added to directory. + createFile(App{}, filepath.Join(dir, "app2.toml")) + createFile(App{}, filepath.Join(dir, "app3.toml")) + updates = readAllUpdates(watcher.Updates) + if len(updates) != 2 || updates[0].Key != filepath.Join(dir, "app2.toml") || updates[1].Key != filepath.Join(dir, "app3.toml") { + t.Errorf("Failed reading new files: len=%d updates=%v", len(updates), updates) + } + + // Should detect files removed from directory. + os.Remove(filepath.Join(dir, "app2.toml")) + os.Remove(filepath.Join(dir, "app1.toml")) + deletions := readAllDeletions(watcher.Deletions) + if len(deletions) != 2 || !contains(deletions, filepath.Join(dir, "app2.toml")) || !contains(deletions, filepath.Join(dir, "app1.toml")) { + t.Errorf("Failed detecting deletions: len=%d deletions=%v", len(deletions), deletions) + } +} + +func TestFileConfigSourceLoadAndWatchShouldExitOnDone(t *testing.T) { + dir, _ := os.MkdirTemp("", "tegolaapps") + + src := FileConfigSource{} + src.init(env.Dict{"dir": dir}, "") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + watcher, err := src.LoadAndWatch(ctx) + if err != nil { + t.Errorf("No error expected from LoadAndWatch(): returned %s", err) + return + } + + cancel() + select { + case <-watcher.Updates: + // do nothing + case <-time.After(time.Millisecond): + t.Error("Updates channel should be closed, but is still open.") + } + + select { + case <-watcher.Deletions: + // do nothing + case <-time.After(time.Millisecond): + t.Error("Deletions channel should be closed, but is still open.") + } +} + +func createFile(app App, filename string) error { + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + err = toml.NewEncoder(f).Encode(app) + return err +} + +func readAllUpdates(updates chan App) []App { + apps := []App{} + + for { + select { + case app, ok := <-updates: + if !ok { + return apps + } + + apps = append(apps, app) + + case <-time.After(10 * time.Millisecond): + return apps + } + } +} + +func readAllDeletions(deletions chan string) []string { + keys := []string{} + + for { + select { + case key, ok := <-deletions: + if !ok { + return keys + } + + keys = append(keys, key) + + case <-time.After(10 * time.Millisecond): + return keys + } + } +} + +func contains(vals []string, expected string) bool { + for _, str := range vals { + if str == expected { + return true + } + } + + return false +} diff --git a/config/source/source.go b/config/source/source.go index 5e0e5d408..40a974974 100644 --- a/config/source/source.go +++ b/config/source/source.go @@ -22,16 +22,22 @@ type ConfigSource interface { LoadAndWatch(ctx context.Context) (ConfigWatcher, error) } +// ConfigWatcher allows watching for App updates (new and changes) and deletions. type ConfigWatcher struct { Updates chan App Deletions chan string } +func (c ConfigWatcher) Close() { + close(c.Updates) + close(c.Deletions) +} + func InitSource(sourceType string, options env.Dict, baseDir string) (ConfigSource, error) { switch sourceType { case "file": src := FileConfigSource{} - err := src.Init(options, baseDir) + err := src.init(options, baseDir) return &src, err default: diff --git a/config/source/source_test.go b/config/source/source_test.go new file mode 100644 index 000000000..bd43d4e6e --- /dev/null +++ b/config/source/source_test.go @@ -0,0 +1,95 @@ +package source + +import ( + "strings" + "testing" + + "github.com/go-spatial/tegola/internal/env" +) + +func TestInitSource(t *testing.T) { + var ( + src ConfigSource + err error + ) + + _, err = InitSource("invalidtype", env.Dict{}, "") + if err == nil { + t.Error("InitSource should error if invalid source type provided; no error returned.") + } + + _, err = InitSource("file", env.Dict{}, "") + if err == nil { + t.Error("InitSource should return error from underlying source type (file) if no directory provided.") + } + + src, err = InitSource("file", env.Dict{"dir": "config"}, "/tmp") + if err != nil { + t.Errorf("Unexpected error from InitSource: %s", err) + } + + if src.Type() != "file" { + t.Errorf("Expected source type %s, found %s", "file", src.Type()) + } +} + +func TestParseApp(t *testing.T) { + conf := ` + [[providers]] + name = "test_postgis" + type = "mvt_postgis" + uri = "postgres:/username:password@127.0.0.1:5423/some_db" + srid = 3857 + + [[providers.layers]] + name = "dynamic" + sql = "id, ST_AsMVTGeom(wkb_geometry, !BBOX!) as geom FROM some_table WHERE wkb_geometry && !BBOX!" + geometry_type = "polygon" + + [[maps]] + name = "stuff" + + [[maps.layers]] + provider_layer = "test_postgis.dynamic" + min_zoom = 2 + max_zoom = 18 + + [[maps.params]] + name = "param" + token = "!PaRaM!" + ` + + r := strings.NewReader(conf) + + // Should load TOML file. + app, err := parseApp(r, "some_key") + if err != nil { + t.Errorf("Unexpected error from parseApp: %s", err) + return + } + + if app.Key != "some_key" { + t.Errorf("Expected app key \"some_key\", found %s", app.Key) + } + + if len(app.Providers) != 1 { + t.Error("Failed to load providers from TOML") + } else { + name, err := app.Providers[0].String("name", nil) + if err != nil || name != "test_postgis" { + t.Errorf("Expected provider name \"test_postgis\", found %s (err=%s)", name, err) + } + } + + if len(app.Maps) != 1 { + t.Error("Failed to load maps from TOML") + } else if app.Maps[0].Name != "stuff" { + t.Errorf("Expected map name \"stuff\", found %s", app.Maps[0].Name) + } + + // Should normalize map params. + token := "!PARAM!" + if len(app.Maps) == 1 && app.Maps[0].Parameters[0].Token != token { + t.Errorf("Expected map query param with token %s, found %s.", token, app.Maps[0].Parameters[0].Token) + } +} From 2ecd334b687e211097901b68ac29afd3e8bfc1b0 Mon Sep 17 00:00:00 2001 From: Joshua Chamberlain Date: Mon, 25 Sep 2023 11:05:48 -0700 Subject: [PATCH 6/6] Fix bug where misnamed provider instances aren't unloaded --- provider/provider.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/provider/provider.go b/provider/provider.go index dde50ff6b..df9f0198f 100644 --- a/provider/provider.go +++ b/provider/provider.go @@ -243,17 +243,19 @@ func Drivers(types ...providerType) (l []string) { // For function returns a configure provider of the given type; The provider may be a mvt provider or // a std provider. The correct entry in TilerUnion will not be nil. If there is an error both entries // will be nil. -func For(name string, config dict.Dicter, maps []Map, namespace string) (val TilerUnion, err error) { +func For(ptype string, config dict.Dicter, maps []Map, namespace string) (val TilerUnion, err error) { var ( driversList = Drivers() ) if providers == nil { return val, ErrUnknownProvider{KnownProviders: driversList} } - p, ok := providers[name] + p, ok := providers[ptype] if !ok { - return val, ErrUnknownProvider{KnownProviders: driversList, Name: name} + return val, ErrUnknownProvider{KnownProviders: driversList, Name: ptype} } + + name, _ := config.String("name", nil) if p.init != nil { val.Std, err = p.init(config, maps) recordInstance(name, namespace, val)