diff --git a/Makefile b/Makefile index 2afab068e..7906b684a 100644 --- a/Makefile +++ b/Makefile @@ -510,6 +510,7 @@ run-blackbox-cloud-ci: check-blackbox-prerequisites check-awslocal binary $(BATS echo running cloud CI bats tests; \ $(BATS) $(BATS_FLAGS) test/blackbox/cloud_only.bats $(BATS) $(BATS_FLAGS) test/blackbox/sync_cloud.bats + $(BATS) $(BATS_FLAGS) test/blackbox/redis_s3.bats .PHONY: run-blackbox-dedupe-nightly run-blackbox-dedupe-nightly: check-blackbox-prerequisites check-awslocal binary binary-minimal diff --git a/examples/config-redis.json b/examples/config-redis.json new file mode 100644 index 000000000..cd15edf89 --- /dev/null +++ b/examples/config-redis.json @@ -0,0 +1,37 @@ +{ + "distSpecVersion": "1.1.0", + "storage": { + "dedupe": true, + "gc": true, + "rootDirectory": "/tmp/zot", + "cacheDriver": { + "name": "redis", + "rootDir": "/tmp/zot/_redis", + "url": "redis://localhost:6379" + }, + "storageDriver": { + "name": "s3", + "rootdirectory": "/zot", + "region": "us-east-2", + "regionendpoint": "localhost:4566", + "bucket": "zot-storage", + "secure": false, + "skipverify": false + } + }, + "http": { + "address": "0.0.0.0", + "port": "8484" + }, + "log": { + "level": "debug" + }, + "extensions": { + "ui": { + "enable": true + }, + "search": { + "enable": true + } + } +} diff --git a/go.mod b/go.mod index c899f4ec0..e5edf9649 100644 --- a/go.mod +++ b/go.mod @@ -45,6 +45,7 @@ require ( ) require ( + github.com/alicebob/miniredis/v2 v2.33.0 github.com/aquasecurity/trivy v0.53.0 github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.1 github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.1 @@ -58,6 +59,7 @@ require ( github.com/notaryproject/notation-go v1.1.1 github.com/opencontainers/distribution-spec/specs-go v0.0.0-20240201174943-0f98d91a0afe github.com/project-zot/mockoidc v0.0.0-20240610203808-d69d9e02020a + github.com/redis/go-redis/v9 v9.5.1 github.com/sigstore/cosign/v2 v2.2.4 github.com/swaggo/http-swagger v1.3.4 github.com/zitadel/oidc v1.13.5 @@ -91,6 +93,7 @@ require ( github.com/Masterminds/squirrel v1.5.4 // indirect github.com/Microsoft/hcsshim v0.12.3 // indirect github.com/alecthomas/chroma v0.10.0 // indirect + github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/aquasecurity/table v1.8.0 // indirect @@ -209,6 +212,7 @@ require ( github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect + github.com/yuin/gopher-lua v1.1.1 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect go.opentelemetry.io/otel/metric v1.27.0 // indirect diff --git a/go.sum b/go.sum index 1a60f3f25..a1963cf4f 100644 --- a/go.sum +++ b/go.sum @@ -471,6 +471,10 @@ github.com/briandowns/spinner v1.23.1 h1:t5fDPmScwUjozhDj4FA46p5acZWIPXYE30qW2Pt github.com/briandowns/spinner v1.23.1/go.mod h1:LaZeM4wm2Ywy6vO571mvhQNRcWfRUnXOs0RcKV0wYKM= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= @@ -1306,6 +1310,8 @@ github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf h1:014O62 github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/redis/go-redis/v9 v9.5.1 h1:H1X4D3yHPaYrkL5X06Wh6xNVM/pX0Ft4RV0vMGvLBh8= +github.com/redis/go-redis/v9 v9.5.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= diff --git a/pkg/cli/server/root.go b/pkg/cli/server/root.go index 46f16b0d7..38c26a2c1 100644 --- a/pkg/cli/server/root.go +++ b/pkg/cli/server/root.go @@ -262,7 +262,8 @@ func validateCacheConfig(cfg *config.Config, log zlog.Logger) error { } // unsupported cache driver - if cfg.Storage.CacheDriver["name"] != storageConstants.DynamoDBDriverName { + if cfg.Storage.CacheDriver["name"] != storageConstants.DynamoDBDriverName && + cfg.Storage.CacheDriver["name"] != storageConstants.RedisDriverName { log.Error().Err(zerr.ErrBadConfig). Interface("cacheDriver", cfg.Storage.CacheDriver["name"]).Msg("invalid cache config, unsupported cache driver") @@ -272,8 +273,8 @@ func validateCacheConfig(cfg *config.Config, log zlog.Logger) error { if !cfg.Storage.RemoteCache && cfg.Storage.CacheDriver != nil { log.Warn().Err(zerr.ErrBadConfig).Str("directory", cfg.Storage.RootDirectory). - Msg("invalid cache config, remoteCache set to false but cacheDriver config (remote caching) provided for directory" + - "will ignore and use local caching") + Msg("invalid cache config, remoteCache set to false but cacheDriver config (remote caching) provided for " + + "directory will ignore and use local caching") } // subpaths diff --git a/pkg/meta/meta.go b/pkg/meta/meta.go index c4ab4b533..cec443e8f 100644 --- a/pkg/meta/meta.go +++ b/pkg/meta/meta.go @@ -2,6 +2,7 @@ package meta import ( "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/redis/go-redis/v9" "go.etcd.io/bbolt" "zotregistry.dev/zot/errors" @@ -9,19 +10,31 @@ import ( "zotregistry.dev/zot/pkg/log" "zotregistry.dev/zot/pkg/meta/boltdb" mdynamodb "zotregistry.dev/zot/pkg/meta/dynamodb" + "zotregistry.dev/zot/pkg/meta/redisdb" mTypes "zotregistry.dev/zot/pkg/meta/types" + sconstants "zotregistry.dev/zot/pkg/storage/constants" ) func New(storageConfig config.StorageConfig, log log.Logger) (mTypes.MetaDB, error) { if storageConfig.RemoteCache { - dynamoParams := getDynamoParams(storageConfig.CacheDriver, log) + if storageConfig.CacheDriver["name"] == sconstants.DynamoDBDriverName { + dynamoParams := getDynamoParams(storageConfig.CacheDriver, log) - client, err := mdynamodb.GetDynamoClient(dynamoParams) - if err != nil { + client, err := mdynamodb.GetDynamoClient(dynamoParams) + if err != nil { + return nil, err + } + + return Create(sconstants.DynamoDBDriverName, client, dynamoParams, log) //nolint:contextcheck + } + // go-redis supports connecting via the redis uri specification (more convenient than parameter parsing) + redisURL := getRedisURL(storageConfig.CacheDriver, log) + client, err := redisdb.GetRedisClient(redisURL) + if err != nil { //nolint:wsl return nil, err } - return Create("dynamodb", client, dynamoParams, log) //nolint:contextcheck + return Create(sconstants.RedisDriverName, client, &redisdb.RedisDB{Client: client}, log) //nolint:contextcheck } params := boltdb.DBParameters{} @@ -51,6 +64,18 @@ func Create(dbtype string, dbDriver, parameters interface{}, log log.Logger, //n return boltdb.New(properDriver, log) } + case "redis": + { + properDriver, ok := dbDriver.(*redis.Client) + if !ok { + log.Error().Err(errors.ErrTypeAssertionFailed). + Msgf("failed to cast type, expected type '%T' but got '%T'", &redis.Client{}, dbDriver) + + return nil, errors.ErrTypeAssertionFailed + } + + return redisdb.New(properDriver, log) + } case "dynamodb": { properDriver, ok := dbDriver.(*dynamodb.Client) @@ -122,6 +147,16 @@ func getDynamoParams(cacheDriverConfig map[string]interface{}, log log.Logger) m } } +func getRedisURL(cacheDriverConfig map[string]interface{}, log log.Logger) string { + url, ok := toStringIfOk(cacheDriverConfig, "url", log) + + if !ok { + log.Panic().Msg("redis parameters are not specified correctly, can't proceed") + } + + return url +} + func toStringIfOk(cacheDriverConfig map[string]interface{}, param string, log log.Logger) (string, bool) { val, ok := cacheDriverConfig[param] diff --git a/pkg/meta/redisdb/redis.go b/pkg/meta/redisdb/redis.go new file mode 100644 index 000000000..76f3d6778 --- /dev/null +++ b/pkg/meta/redisdb/redis.go @@ -0,0 +1,262 @@ +package redisdb + +import ( + "context" + "time" + + godigest "github.com/opencontainers/go-digest" + "github.com/redis/go-redis/v9" + + "zotregistry.dev/zot/pkg/log" + mTypes "zotregistry.dev/zot/pkg/meta/types" +) + +type RedisDB struct { + Client *redis.Client + imgTrustStore mTypes.ImageTrustStore + Log log.Logger +} + +func New(client *redis.Client, log log.Logger) (*RedisDB, error) { + redisWrapper := RedisDB{ + Client: client, + imgTrustStore: nil, + Log: log, + } + + // Using the Config value, create the DynamoDB client + return &redisWrapper, nil +} + +func GetRedisClient(url string) (*redis.Client, error) { + opts, err := redis.ParseURL(url) + if err != nil { + return nil, err + } + + return redis.NewClient(opts), nil +} + +// GetStarredRepos returns starred repos and takes current user in consideration. +func (rc *RedisDB) GetStarredRepos(ctx context.Context) ([]string, error) { + return []string{}, nil +} + +// GetBookmarkedRepos returns bookmarked repos and takes current user in consideration. +func (rc *RedisDB) GetBookmarkedRepos(ctx context.Context) ([]string, error) { + return []string{}, nil +} + +// ToggleStarRepo adds/removes stars on repos. +func (rc *RedisDB) ToggleStarRepo(ctx context.Context, reponame string) (mTypes.ToggleState, error) { + return 0, nil +} + +// ToggleBookmarkRepo adds/removes bookmarks on repos. +func (rc *RedisDB) ToggleBookmarkRepo(ctx context.Context, reponame string) (mTypes.ToggleState, error) { + return 0, nil +} + +// UserDB profile/api key CRUD. +func (rc *RedisDB) GetUserData(ctx context.Context) (mTypes.UserData, error) { + return mTypes.UserData{}, nil +} + +func (rc *RedisDB) SetUserData(ctx context.Context, userData mTypes.UserData) error { + return nil +} + +func (rc *RedisDB) SetUserGroups(ctx context.Context, groups []string) error { + return nil +} + +func (rc *RedisDB) GetUserGroups(ctx context.Context) ([]string, error) { + return []string{}, nil +} + +func (rc *RedisDB) DeleteUserData(ctx context.Context) error { + return nil +} + +func (rc *RedisDB) GetUserAPIKeyInfo(hashedKey string) (string, error) { + return "", nil +} + +func (rc *RedisDB) GetUserAPIKeys(ctx context.Context) ([]mTypes.APIKeyDetails, error) { + return []mTypes.APIKeyDetails{}, nil +} + +func (rc *RedisDB) AddUserAPIKey(ctx context.Context, hashedKey string, apiKeyDetails *mTypes.APIKeyDetails) error { + return nil +} + +func (rc *RedisDB) IsAPIKeyExpired(ctx context.Context, hashedKey string) (bool, error) { + return false, nil +} + +func (rc *RedisDB) UpdateUserAPIKeyLastUsed(ctx context.Context, hashedKey string) error { + return nil +} + +func (rc *RedisDB) DeleteUserAPIKey(ctx context.Context, id string) error { + return nil +} + +func (rc *RedisDB) SetImageMeta(digest godigest.Digest, imageMeta mTypes.ImageMeta) error { + return nil +} + +// SetRepoReference sets the given image data to the repo metadata. +func (rc *RedisDB) SetRepoReference(ctx context.Context, repo string, + reference string, imageMeta mTypes.ImageMeta, +) error { + return nil +} + +// SearchRepos searches for repos given a search string. +func (rc *RedisDB) SearchRepos(ctx context.Context, searchText string) ([]mTypes.RepoMeta, error) { + return []mTypes.RepoMeta{}, nil +} + +// SearchTags searches for images(repo:tag) given a search string. +func (rc *RedisDB) SearchTags(ctx context.Context, searchText string) ([]mTypes.FullImageMeta, error) { + return []mTypes.FullImageMeta{}, nil +} + +// FilterTags filters for images given a filter function. +func (rc *RedisDB) FilterTags(ctx context.Context, filterRepoTag mTypes.FilterRepoTagFunc, + filterFunc mTypes.FilterFunc, +) ([]mTypes.FullImageMeta, error) { + return []mTypes.FullImageMeta{}, nil +} + +// FilterRepos filters for repos given a filter function. +func (rc *RedisDB) FilterRepos(ctx context.Context, rankName mTypes.FilterRepoNameFunc, + filterFunc mTypes.FilterFullRepoFunc, +) ([]mTypes.RepoMeta, error) { + return []mTypes.RepoMeta{}, nil +} + +// GetRepoMeta returns the full information about a repo. +func (rc *RedisDB) GetRepoMeta(ctx context.Context, repo string) (mTypes.RepoMeta, error) { + return mTypes.RepoMeta{}, nil +} + +// GetFullImageMeta returns the full information about an image. +func (rc *RedisDB) GetFullImageMeta(ctx context.Context, repo string, tag string) (mTypes.FullImageMeta, error) { + return mTypes.FullImageMeta{}, nil +} + +// GetImageMeta returns the raw information about an image. +func (rc *RedisDB) GetImageMeta(digest godigest.Digest) (mTypes.ImageMeta, error) { + return mTypes.ImageMeta{}, nil +} + +// GetMultipleRepoMeta returns a list of all repos that match the given filter function. +func (rc *RedisDB) GetMultipleRepoMeta(ctx context.Context, filter func(repoMeta mTypes.RepoMeta) bool) ( + []mTypes.RepoMeta, error, +) { + return []mTypes.RepoMeta{}, nil +} + +// AddManifestSignature adds signature metadata to a given manifest in the database. +func (rc *RedisDB) AddManifestSignature(repo string, signedManifestDigest godigest.Digest, + sm mTypes.SignatureMetadata, +) error { + return nil +} + +// DeleteSignature deletes signature metadata to a given manifest from the database. +func (rc *RedisDB) DeleteSignature(repo string, signedManifestDigest godigest.Digest, + sigMeta mTypes.SignatureMetadata, +) error { + return nil +} + +// UpdateSignaturesValidity checks and updates signatures validity of a given manifest. +func (rc *RedisDB) UpdateSignaturesValidity(ctx context.Context, repo string, manifestDigest godigest.Digest) error { + return nil +} + +// IncrementRepoStars adds 1 to the star count of an image. +func (rc *RedisDB) IncrementRepoStars(repo string) error { + return nil +} + +// DecrementRepoStars subtracts 1 from the star count of an image. +func (rc *RedisDB) DecrementRepoStars(repo string) error { + return nil +} + +// SetRepoMeta returns RepoMetadata of a repo from the database. +func (rc *RedisDB) SetRepoMeta(repo string, repoMeta mTypes.RepoMeta) error { + return nil +} + +// DeleteRepoMeta. +func (rc *RedisDB) DeleteRepoMeta(repo string) error { + return nil +} + +// GetReferrersInfo returns a list of for all referrers of the given digest that match one of the +// artifact types. +func (rc *RedisDB) GetReferrersInfo(repo string, referredDigest godigest.Digest, + artifactTypes []string, +) ([]mTypes.ReferrerInfo, error) { + return []mTypes.ReferrerInfo{}, nil +} + +// UpdateStatsOnDownload adds 1 to the download count of an image and sets the timestamp of download. +func (rc *RedisDB) UpdateStatsOnDownload(repo string, reference string) error { + return nil +} + +// FilterImageMeta returns the image data for the given digests. +func (rc *RedisDB) FilterImageMeta(ctx context.Context, + digests []string, +) (map[mTypes.ImageDigest]mTypes.ImageMeta, error) { + return map[mTypes.ImageDigest]mTypes.ImageMeta{}, nil +} + +/* + RemoveRepoReference removes the tag from RepoMetadata if the reference is a tag, + +it also removes its corresponding digest from Statistics, Signatures and Referrers if there are no tags +pointing to it. +If the reference is a digest then it will remove the digest from Statistics, Signatures and Referrers only +if there are no tags pointing to the digest, otherwise it's noop. +*/ +func (rc *RedisDB) RemoveRepoReference(repo, reference string, manifestDigest godigest.Digest) error { + return nil +} + +// ResetRepoReferences resets all layout specific data (tags, signatures, referrers, etc.) but keep user and image +// specific metadata such as star count, downloads other statistics. +func (rc *RedisDB) ResetRepoReferences(repo string) error { + return nil +} + +func (rc *RedisDB) GetRepoLastUpdated(repo string) time.Time { + return time.Now() +} + +func (rc *RedisDB) GetAllRepoNames() ([]string, error) { + return []string{}, nil +} + +// ResetDB will delete all data in the DB. +func (rc *RedisDB) ResetDB() error { + return nil +} + +func (rc *RedisDB) PatchDB() error { + return nil +} + +func (rc *RedisDB) ImageTrustStore() mTypes.ImageTrustStore { + return rc.imgTrustStore +} + +func (rc *RedisDB) SetImageTrustStore(imgTrustStore mTypes.ImageTrustStore) { + rc.imgTrustStore = imgTrustStore +} diff --git a/pkg/storage/cache.go b/pkg/storage/cache.go index 8722671e7..e60ba3487 100644 --- a/pkg/storage/cache.go +++ b/pkg/storage/cache.go @@ -32,19 +32,32 @@ func CreateCacheDatabaseDriver(storageConfig config.StorageConfig, log zlog.Logg return nil, nil } - if name != constants.DynamoDBDriverName { + if name != constants.DynamoDBDriverName && + name != constants.RedisDriverName { log.Warn().Str("driver", name).Msg("remote cache driver unsupported!") return nil, nil } - // dynamodb - dynamoParams := cache.DynamoDBDriverParameters{} - dynamoParams.Endpoint, _ = storageConfig.CacheDriver["endpoint"].(string) - dynamoParams.Region, _ = storageConfig.CacheDriver["region"].(string) - dynamoParams.TableName, _ = storageConfig.CacheDriver["cachetablename"].(string) + if name == constants.DynamoDBDriverName { + // dynamodb + dynamoParams := cache.DynamoDBDriverParameters{} + dynamoParams.Endpoint, _ = storageConfig.CacheDriver["endpoint"].(string) + dynamoParams.Region, _ = storageConfig.CacheDriver["region"].(string) + dynamoParams.TableName, _ = storageConfig.CacheDriver["cachetablename"].(string) - return Create("dynamodb", dynamoParams, log) + return Create(name, dynamoParams, log) + } + + if name == constants.RedisDriverName { + // redis + redisParams := cache.RedisDriverParameters{} + redisParams.RootDir, _ = storageConfig.CacheDriver["rootDir"].(string) + redisParams.URL, _ = storageConfig.CacheDriver["url"].(string) + redisParams.UseRelPaths = getUseRelPaths(&storageConfig) + + return Create(name, redisParams, log) + } } return nil, nil @@ -60,6 +73,10 @@ func Create(dbtype string, parameters interface{}, log zlog.Logger) (cache.Cache { return cache.NewDynamoDBCache(parameters, log) } + case "redis": + { + return cache.NewRedisCache(parameters, log) + } default: { return nil, zerr.ErrBadConfig diff --git a/pkg/storage/cache/redis.go b/pkg/storage/cache/redis.go new file mode 100644 index 000000000..c181a0abb --- /dev/null +++ b/pkg/storage/cache/redis.go @@ -0,0 +1,244 @@ +package cache + +import ( + "context" + goerrors "errors" + "path/filepath" + "strings" + + godigest "github.com/opencontainers/go-digest" + "github.com/redis/go-redis/v9" + + zerr "zotregistry.dev/zot/errors" + zlog "zotregistry.dev/zot/pkg/log" + "zotregistry.dev/zot/pkg/storage/constants" +) + +type RedisDriver struct { + rootDir string + db redis.UniversalClient + log zlog.Logger + useRelPaths bool // whether or not to use relative paths, should be true for filesystem and false for s3 +} + +type RedisDriverParameters struct { + RootDir string + URL string // https://github.com/redis/redis-specifications/blob/master/uri/redis.txt + UseRelPaths bool +} + +func NewRedisCache(parameters interface{}, log zlog.Logger) (*RedisDriver, error) { + properParameters, ok := parameters.(RedisDriverParameters) + if !ok { + log.Error().Err(zerr.ErrTypeAssertionFailed).Msgf("failed to cast type, expected type '%T' but got '%T'", + RedisDriverParameters{}, parameters) + + return nil, zerr.ErrTypeAssertionFailed + } + + connOpts, err := redis.ParseURL(properParameters.URL) + if err != nil { + log.Error().Err(err).Str("directory", properParameters.URL).Msg("failed to connect to redis") + } + + cacheDB := redis.NewClient(connOpts) + + if _, err := cacheDB.Ping(context.Background()).Result(); err != nil { + log.Error().Err(err).Msg("failed to ping redis cache") + + return nil, err + } + + driver := &RedisDriver{ + db: cacheDB, + log: log, + rootDir: properParameters.RootDir, + useRelPaths: properParameters.UseRelPaths, + } + + return driver, nil +} + +func join(xs ...string) string { + return "zot:" + strings.Join(xs, ":") +} + +func (d *RedisDriver) UsesRelativePaths() bool { + return d.useRelPaths +} + +func (d *RedisDriver) Name() string { + return "redis" +} + +func (d *RedisDriver) PutBlob(digest godigest.Digest, path string) error { + ctx := context.TODO() + + if path == "" { + d.log.Error().Err(zerr.ErrEmptyValue).Str("digest", digest.String()).Msg("failed to provide non-empty path") + + return zerr.ErrEmptyValue + } + + // use only relative (to rootDir) paths on blobs + var err error + if d.useRelPaths { + path, err = filepath.Rel(d.rootDir, path) + if err != nil { + d.log.Error().Err(err).Str("path", path).Msg("failed to get relative path") + } + } + + if len(path) == 0 { + return zerr.ErrEmptyValue + } + + // see if the blob digest exists. + exists, err := d.db.HExists(ctx, join(constants.BlobsCache, constants.OriginalBucket), digest.String()).Result() + if err != nil { + return err + } + + if _, err := d.db.TxPipelined(ctx, func(txrp redis.Pipeliner) error { + if !exists { + // add the key value pair [digest, path] to blobs:origin if not + // exist already. the path becomes the canonical blob we do this in + // a transaction to make sure that if something is in the set, then + // it is guaranteed to always have a path note that there is a + // race, but the worst case is that a different origin path that is + // still valid is used. + if err := txrp.HSet(ctx, join(constants.BlobsCache, constants.OriginalBucket), + digest.String(), path).Err(); err != nil { + d.log.Error().Err(err).Str("hset", join(constants.BlobsCache, constants.OriginalBucket)). + Str("value", path).Msg("unable to put record") + + return err + } + } + // add path to the set of paths which the digest represents + if err := d.db.SAdd(ctx, join(constants.BlobsCache, constants.DuplicatesBucket, + digest.String()), path).Err(); err != nil { + d.log.Error().Err(err).Str("sadd", join(constants.BlobsCache, constants.DuplicatesBucket, digest.String())). + Str("value", path).Msg("unable to put record") + + return err + } + + return nil + }); err != nil { + return err + } + + return nil +} + +func (d *RedisDriver) GetBlob(digest godigest.Digest) (string, error) { + ctx := context.TODO() + + path, err := d.db.HGet(ctx, join(constants.BlobsCache, constants.OriginalBucket), digest.String()).Result() + if err != nil { + if goerrors.Is(err, redis.Nil) { + return "", zerr.ErrCacheMiss + } + + d.log.Error().Err(err).Str("hget", join(constants.BlobsCache, constants.OriginalBucket)). + Str("digest", digest.String()).Msg("unable to get record") + + return "", err + } + + return path, nil +} + +func (d *RedisDriver) HasBlob(digest godigest.Digest, blob string) bool { + ctx := context.TODO() + // see if we are in the set + exists, err := d.db.SIsMember(ctx, join(constants.BlobsCache, constants.DuplicatesBucket, + digest.String()), blob).Result() + if err != nil { + d.log.Error().Err(err).Str("sismember", join(constants.BlobsCache, constants.DuplicatesBucket, digest.String())). + Str("digest", digest.String()).Msg("unable to get record") + + return false + } + + if !exists { + return false + } + + // see if the path entry exists. is this actually needed? i guess it doesn't really hurt (it is fast) + exists, err = d.db.HExists(ctx, join(constants.BlobsCache, constants.OriginalBucket), digest.String()).Result() + + d.log.Error().Err(err).Str("hexists", join(constants.BlobsCache, constants.OriginalBucket)). + Str("digest", digest.String()).Msg("unable to get record") + + if err != nil { + return false + } + + if !exists { + return false + } + + return true +} + +func (d *RedisDriver) DeleteBlob(digest godigest.Digest, path string) error { + ctx := context.TODO() + + // use only relative (to rootDir) paths on blobs + var err error + if d.useRelPaths { + path, err = filepath.Rel(d.rootDir, path) + if err != nil { + d.log.Error().Err(err).Str("path", path).Msg("failed to get relative path") + } + } + + pathSet := join(constants.BlobsCache, constants.DuplicatesBucket, digest.String()) + + // delete path from the set of paths which the digest represents + _, err = d.db.SRem(ctx, pathSet, path).Result() + if err != nil { + d.log.Error().Err(err).Str("srem", pathSet).Str("value", path).Msg("failed to delete record") + + return err + } + + currentPath, err := d.GetBlob(digest) + if err != nil { + return err + } + + if currentPath != path { + // nothing we need to do, return nil yay + return nil + } + + // we need to set a new path + newPath, err := d.db.SRandMember(ctx, pathSet).Result() + if err != nil { + if goerrors.Is(err, redis.Nil) { + _, err := d.db.HDel(ctx, join(constants.BlobsCache, constants.OriginalBucket), digest.String()).Result() + if err != nil { + return err + } + + return nil + } + + d.log.Error().Err(err).Str("srandmember", pathSet).Msg("failed to get new path") + + return err + } + + if _, err := d.db.HSet(ctx, join(constants.BlobsCache, constants.OriginalBucket), + digest.String(), newPath).Result(); err != nil { + d.log.Error().Err(err).Str("hset", join(constants.BlobsCache, constants.OriginalBucket)).Str("value", newPath). + Msg("unable to put record") + + return err + } + + return nil +} diff --git a/pkg/storage/cache/redis_test.go b/pkg/storage/cache/redis_test.go new file mode 100644 index 000000000..6b90f1599 --- /dev/null +++ b/pkg/storage/cache/redis_test.go @@ -0,0 +1,127 @@ +package cache_test + +import ( + "path" + "testing" + + "github.com/alicebob/miniredis/v2" + . "github.com/smartystreets/goconvey/convey" + + "zotregistry.dev/zot/errors" + "zotregistry.dev/zot/pkg/log" + "zotregistry.dev/zot/pkg/storage" + "zotregistry.dev/zot/pkg/storage/cache" +) + +func TestRedisCache(t *testing.T) { + miniRedis := miniredis.RunT(t) + + Convey("Make a new cache", t, func() { + dir := t.TempDir() + + log := log.NewLogger("debug", "") + So(log, ShouldNotBeNil) + + _, err := storage.Create("redis", "failTypeAssertion", log) + So(err, ShouldNotBeNil) + + cacheDriver, _ := storage.Create("redis", + cache.RedisDriverParameters{dir, "redis://" + miniRedis.Addr(), true}, log) + So(cacheDriver, ShouldNotBeNil) + + name := cacheDriver.Name() + So(name, ShouldEqual, "redis") + + val, err := cacheDriver.GetBlob("key") + So(err, ShouldEqual, errors.ErrCacheMiss) + So(val, ShouldBeEmpty) + + exists := cacheDriver.HasBlob("key", "value") + So(exists, ShouldBeFalse) + + err = cacheDriver.PutBlob("key", path.Join(dir, "value")) + So(err, ShouldBeNil) + + err = cacheDriver.PutBlob("key", "value") + So(err, ShouldNotBeNil) + + exists = cacheDriver.HasBlob("key", "value") + So(exists, ShouldBeTrue) + + val, err = cacheDriver.GetBlob("key") + So(err, ShouldBeNil) + So(val, ShouldNotBeEmpty) + + err = cacheDriver.DeleteBlob("bogusKey", "bogusValue") + So(err, ShouldEqual, errors.ErrCacheMiss) + + err = cacheDriver.DeleteBlob("key", "bogusValue") + So(err, ShouldBeNil) + + // try to insert empty path + err = cacheDriver.PutBlob("key", "") + So(err, ShouldNotBeNil) + So(err, ShouldEqual, errors.ErrEmptyValue) + + cacheDriver, _ = storage.Create("redis", + cache.RedisDriverParameters{t.TempDir(), "redis://" + miniRedis.Addr() + "/5", false}, log) + So(cacheDriver, ShouldNotBeNil) + + err = cacheDriver.PutBlob("key1", "originalBlobPath") + So(err, ShouldBeNil) + + err = cacheDriver.PutBlob("key1", "duplicateBlobPath") + So(err, ShouldBeNil) + + val, err = cacheDriver.GetBlob("key1") + So(val, ShouldEqual, "originalBlobPath") + So(err, ShouldBeNil) + + err = cacheDriver.DeleteBlob("key1", "duplicateBlobPath") + So(err, ShouldBeNil) + + val, err = cacheDriver.GetBlob("key1") + So(val, ShouldEqual, "originalBlobPath") + So(err, ShouldBeNil) + + err = cacheDriver.PutBlob("key1", "duplicateBlobPath") + So(err, ShouldBeNil) + + err = cacheDriver.DeleteBlob("key1", "originalBlobPath") + So(err, ShouldBeNil) + + val, err = cacheDriver.GetBlob("key1") + So(val, ShouldEqual, "duplicateBlobPath") + So(err, ShouldBeNil) + + err = cacheDriver.DeleteBlob("key1", "duplicateBlobPath") + So(err, ShouldBeNil) + + // should be empty + val, err = cacheDriver.GetBlob("key1") + So(err, ShouldNotBeNil) + So(val, ShouldBeEmpty) + + // try to add three same values + err = cacheDriver.PutBlob("key2", "duplicate") + So(err, ShouldBeNil) + + err = cacheDriver.PutBlob("key2", "duplicate") + So(err, ShouldBeNil) + + err = cacheDriver.PutBlob("key2", "duplicate") + So(err, ShouldBeNil) + + val, err = cacheDriver.GetBlob("key2") + So(val, ShouldEqual, "duplicate") + So(err, ShouldBeNil) + + err = cacheDriver.DeleteBlob("key2", "duplicate") + So(err, ShouldBeNil) + + // should be empty + val, err = cacheDriver.GetBlob("key2") + So(err, ShouldNotBeNil) + So(val, ShouldBeEmpty) + }) +} diff --git a/pkg/storage/constants/constants.go b/pkg/storage/constants/constants.go index 905178bd9..6a714d089 100644 --- a/pkg/storage/constants/constants.go +++ b/pkg/storage/constants/constants.go @@ -19,6 +19,7 @@ const ( DBCacheLockCheckTimeout = 10 * time.Second BoltdbName = "cache" DynamoDBDriverName = "dynamodb" + RedisDriverName = "redis" DefaultGCDelay = 1 * time.Hour DefaultRetentionDelay = 24 * time.Hour DefaultGCInterval = 1 * time.Hour diff --git a/test/blackbox/ci.sh b/test/blackbox/ci.sh index 14974440b..48b22aa15 100755 --- a/test/blackbox/ci.sh +++ b/test/blackbox/ci.sh @@ -9,7 +9,7 @@ PATH=$PATH:${SCRIPTPATH}/../../hack/tools/bin tests=("pushpull" "pushpull_authn" "delete_images" "referrers" "metadata" "anonymous_policy" "annotations" "detect_manifest_collision" "cve" "sync" "sync_docker" "sync_replica_cluster" - "scrub" "garbage_collect" "metrics" "metrics_minimal" "multiarch_index") + "scrub" "garbage_collect" "metrics" "metrics_minimal" "multiarch_index" "redis_local") for test in ${tests[*]}; do ${BATS} ${BATS_FLAGS} ${SCRIPTPATH}/${test}.bats > ${test}.log & pids+=($!) diff --git a/test/blackbox/helpers_redis.bash b/test/blackbox/helpers_redis.bash new file mode 100644 index 000000000..460c85ba6 --- /dev/null +++ b/test/blackbox/helpers_redis.bash @@ -0,0 +1,12 @@ + +function redis_start() { + local cname="$1" # container name + local free_port="$2" + docker run -d --name ${cname} -p ${free_port}:6379 redis +} + +function redis_stop() { + local cname="$1" + docker stop ${cname} + docker rm -f ${cname} +} diff --git a/test/blackbox/redis_local.bats b/test/blackbox/redis_local.bats new file mode 100644 index 000000000..54571e6ee --- /dev/null +++ b/test/blackbox/redis_local.bats @@ -0,0 +1,125 @@ +# Note: Intended to be run as "make run-blackbox-tests" or "make run-blackbox-ci" +# Makefile target installs & checks all necessary tooling +# Extra tools that are not covered in Makefile target needs to be added in verify_prerequisites() + +load helpers_zot +load helpers_redis + +function verify_prerequisites() { + if [ ! $(command -v curl) ]; then + echo "you need to install curl as a prerequisite to running the tests" >&3 + return 1 + fi + + if [ ! $(command -v jq) ]; then + echo "you need to install jq as a prerequisite to running the tests" >&3 + return 1 + fi + + if [ ! $(command -v docker) ]; then + echo "you need to install docker as a prerequisite to running the tests" >&3 + return 1 + fi + + return 0 +} + +function setup_file() { + # Verify prerequisites are available + if ! $(verify_prerequisites); then + exit 1 + fi + + # Download test data to folder common for the entire suite, not just this file + skopeo --insecure-policy copy --format=oci docker://ghcr.io/project-zot/golang:1.20 oci:${TEST_DATA_DIR}/golang:1.20 + + # Setup redis server + redis_port=$(get_free_port) + redis_start redis_server_local ${redis_port} + + # Setup zot server + local zot_root_dir=${BATS_FILE_TMPDIR}/zot + local zot_sync_ondemand_config_file=${BATS_FILE_TMPDIR}/zot_sync_ondemand_config.json + zot_port=$(get_free_port) + echo ${zot_port} > ${BATS_FILE_TMPDIR}/zot.port + + mkdir -p ${zot_root_dir} + + cat >${zot_sync_ondemand_config_file} <&3 + return 1 + fi + + return 0 +} + +function setup_file() { + # Verify prerequisites are available + if ! $(verify_prerequisites); then + exit 1 + fi + + # Setup redis server + redis_port=$(get_free_port) + redis_start redis_server ${redis_port} + + # Setup zot server + local zot_root_dir=${BATS_FILE_TMPDIR}/zot + local zot_sync_ondemand_config_file=${BATS_FILE_TMPDIR}/zot_sync_ondemand_config.json + zot_port=$(get_free_port) + echo ${zot_port} > ${BATS_FILE_TMPDIR}/zot.port + + mkdir -p ${zot_root_dir} + + cat >${zot_sync_ondemand_config_file} <