From 6c194dee604a6cc280cd925836c62a756bd4e678 Mon Sep 17 00:00:00 2001 From: James Lu Date: Mon, 16 Jan 2023 16:23:08 +0800 Subject: [PATCH] vendor: update backupstore Update `backupstore` to v0.0.0-20230125201126-0c2bd550ebc3 for creating a http client when initializing S3 driver. Ref: 5265 Signed-off-by: James Lu --- go.mod | 2 +- go.sum | 5 +- .../longhorn/backupstore/Dockerfile.dapper | 2 +- .../longhorn/backupstore/backupstore.go | 44 +- .../github.com/longhorn/backupstore/config.go | 99 ++- .../longhorn/backupstore/deltablock.go | 727 +++++++++++++----- .../github.com/longhorn/backupstore/driver.go | 8 +- .../longhorn/backupstore/fsops/fsops.go | 15 +- vendor/github.com/longhorn/backupstore/go.mod | 2 + vendor/github.com/longhorn/backupstore/go.sum | 2 + .../longhorn/backupstore/http/client.go | 6 + .../longhorn/backupstore/inspect.go | 23 +- .../github.com/longhorn/backupstore/list.go | 29 +- .../github.com/longhorn/backupstore/lock.go | 8 +- .../longhorn/backupstore/logging/logging.go | 1 + .../longhorn/backupstore/nfs/nfs.go | 24 +- .../github.com/longhorn/backupstore/s3/s3.go | 29 +- .../longhorn/backupstore/s3/s3_service.go | 7 +- .../longhorn/backupstore/singlefile.go | 27 +- .../longhorn/backupstore/util/util.go | 75 +- .../longhorn/backupstore/vfs/vfs.go | 4 +- vendor/github.com/pierrec/lz4/v4/.gitignore | 36 + vendor/github.com/pierrec/lz4/v4/LICENSE | 28 + vendor/github.com/pierrec/lz4/v4/README.md | 92 +++ vendor/github.com/pierrec/lz4/v4/go.mod | 3 + vendor/github.com/pierrec/lz4/v4/go.sum | 0 .../pierrec/lz4/v4/internal/lz4block/block.go | 481 ++++++++++++ .../lz4/v4/internal/lz4block/blocks.go | 90 +++ .../lz4/v4/internal/lz4block/decode_amd64.s | 448 +++++++++++ .../lz4/v4/internal/lz4block/decode_arm.s | 231 ++++++ .../lz4/v4/internal/lz4block/decode_arm64.s | 230 ++++++ .../lz4/v4/internal/lz4block/decode_asm.go | 10 + .../lz4/v4/internal/lz4block/decode_other.go | 139 ++++ .../lz4/v4/internal/lz4errors/errors.go | 19 + .../lz4/v4/internal/lz4stream/block.go | 350 +++++++++ .../lz4/v4/internal/lz4stream/frame.go | 204 +++++ .../lz4/v4/internal/lz4stream/frame_gen.go | 103 +++ .../lz4/v4/internal/xxh32/xxh32zero.go | 212 +++++ .../lz4/v4/internal/xxh32/xxh32zero_arm.go | 11 + .../lz4/v4/internal/xxh32/xxh32zero_arm.s | 251 ++++++ .../lz4/v4/internal/xxh32/xxh32zero_other.go | 10 + vendor/github.com/pierrec/lz4/v4/lz4.go | 157 ++++ vendor/github.com/pierrec/lz4/v4/options.go | 214 ++++++ .../github.com/pierrec/lz4/v4/options_gen.go | 92 +++ vendor/github.com/pierrec/lz4/v4/reader.go | 275 +++++++ vendor/github.com/pierrec/lz4/v4/state.go | 75 ++ vendor/github.com/pierrec/lz4/v4/state_gen.go | 28 + vendor/github.com/pierrec/lz4/v4/writer.go | 238 ++++++ .../golang.org/x/net/http/httpproxy/proxy.go | 370 +++++++++ vendor/modules.txt | 9 +- 50 files changed, 5217 insertions(+), 328 deletions(-) create mode 100644 vendor/github.com/pierrec/lz4/v4/.gitignore create mode 100644 vendor/github.com/pierrec/lz4/v4/LICENSE create mode 100644 vendor/github.com/pierrec/lz4/v4/README.md create mode 100644 vendor/github.com/pierrec/lz4/v4/go.mod create mode 100644 vendor/github.com/pierrec/lz4/v4/go.sum create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go create mode 100644 vendor/github.com/pierrec/lz4/v4/lz4.go create mode 100644 vendor/github.com/pierrec/lz4/v4/options.go create mode 100644 vendor/github.com/pierrec/lz4/v4/options_gen.go create mode 100644 vendor/github.com/pierrec/lz4/v4/reader.go create mode 100644 vendor/github.com/pierrec/lz4/v4/state.go create mode 100644 vendor/github.com/pierrec/lz4/v4/state_gen.go create mode 100644 vendor/github.com/pierrec/lz4/v4/writer.go create mode 100644 vendor/golang.org/x/net/http/httpproxy/proxy.go diff --git a/go.mod b/go.mod index 73599428a..bd817d2e9 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/RoaringBitmap/roaring v0.4.18 github.com/golang/protobuf v1.3.3-0.20190920234318-1680a479a2cf github.com/google/uuid v1.3.0 - github.com/longhorn/backupstore v0.0.0-20220913112826-5f5c95274f2a + github.com/longhorn/backupstore v0.0.0-20230125201126-0c2bd550ebc3 github.com/longhorn/go-iscsi-helper v0.0.0-20220927074943-051bf960608b // indirect github.com/longhorn/longhorn-engine v1.3.2-0.20220929032851-7aac8ae9c8b4 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index a735330bd..962561066 100644 --- a/go.sum +++ b/go.sum @@ -46,8 +46,9 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/longhorn/backupstore v0.0.0-20220913112826-5f5c95274f2a h1:f+mLqp3A5M7plw1pBgf8K1nvJxSU7mrGtU7bii+W5Bk= github.com/longhorn/backupstore v0.0.0-20220913112826-5f5c95274f2a/go.mod h1:hvIVsrpjPey7KupirAh0WoPMg0ArWnE6fA5bI30X7AI= +github.com/longhorn/backupstore v0.0.0-20230125201126-0c2bd550ebc3 h1:CDUWRyBlxV62OvuiBIQpMhHytDoqr3M3nvoXbFhhHBE= +github.com/longhorn/backupstore v0.0.0-20230125201126-0c2bd550ebc3/go.mod h1:73zwYfMYt/JacoG4y86p3Ct6MEzIhHtbxo02AAO+TmA= github.com/longhorn/go-iscsi-helper v0.0.0-20220805034259-7b59e22574bb/go.mod h1:9z/y9glKmWEdV50tjlUPxFwi1goQfIrrsoZbnMyIZbY= github.com/longhorn/go-iscsi-helper v0.0.0-20220927074943-051bf960608b h1:nGWLOG/u+hT1aQnygRavj31JxPwfig/Ts8EnqUVEW6o= github.com/longhorn/go-iscsi-helper v0.0.0-20220927074943-051bf960608b/go.mod h1:9z/y9glKmWEdV50tjlUPxFwi1goQfIrrsoZbnMyIZbY= @@ -64,6 +65,8 @@ github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbM github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= +github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/vendor/github.com/longhorn/backupstore/Dockerfile.dapper b/vendor/github.com/longhorn/backupstore/Dockerfile.dapper index b965fb96a..c6c3f58b6 100644 --- a/vendor/github.com/longhorn/backupstore/Dockerfile.dapper +++ b/vendor/github.com/longhorn/backupstore/Dockerfile.dapper @@ -19,7 +19,7 @@ ENV GOLANG_ARCH_amd64=amd64 GOLANG_ARCH_arm=armv6l GOLANG_ARCH=GOLANG_ARCH_${ARC GOPATH=/go PATH=/go/bin:/usr/local/go/bin:${PATH} SHELL=/bin/bash RUN wget -O - https://storage.googleapis.com/golang/go1.13.3.linux-${!GOLANG_ARCH}.tar.gz | tar -xzf - -C /usr/local && \ - go get github.com/rancher/trash && go get -u golang.org/x/lint/golint + go get github.com/rancher/trash && GO111MODULE=on go get golang.org/x/lint/golint@v0.0.0-20210508222113-6edffad5e616 ENV DAPPER_SOURCE /go/src/github.com/longhorn/backupstore ENV DAPPER_OUTPUT ./bin diff --git a/vendor/github.com/longhorn/backupstore/backupstore.go b/vendor/github.com/longhorn/backupstore/backupstore.go index 3321066fb..01fe59ecf 100644 --- a/vendor/github.com/longhorn/backupstore/backupstore.go +++ b/vendor/github.com/longhorn/backupstore/backupstore.go @@ -3,8 +3,10 @@ package backupstore import ( "fmt" "net/url" + "sync" "github.com/longhorn/backupstore/util" + "github.com/pkg/errors" ) type Volume struct { @@ -17,6 +19,7 @@ type Volume struct { BlockCount int64 `json:",string"` BackingImageName string `json:",string"` BackingImageChecksum string `json:",string"` + CompressionMethod string `json:",string"` } type Snapshot struct { @@ -24,7 +27,13 @@ type Snapshot struct { CreatedTime string } +type ProcessingBlocks struct { + sync.Mutex + blocks map[string][]*BlockMapping +} + type Backup struct { + sync.Mutex Name string VolumeName string SnapshotName string @@ -33,6 +42,9 @@ type Backup struct { Size int64 `json:",string"` Labels map[string]string IsIncremental bool + CompressionMethod string + + ProcessingBlocks *ProcessingBlocks Blocks []BlockMapping `json:",omitempty"` SingleFile BackupFile `json:",omitempty"` @@ -50,27 +62,27 @@ func GetBackupstoreBase() string { return backupstoreBase } -func addVolume(volume *Volume, driver BackupStoreDriver) error { - if volumeExists(volume.Name, driver) { +func addVolume(driver BackupStoreDriver, volume *Volume) error { + if volumeExists(driver, volume.Name) { return nil } if !util.ValidateName(volume.Name) { - return fmt.Errorf("Invalid volume name %v", volume.Name) + return fmt.Errorf("invalid volume name %v", volume.Name) } - if err := saveVolume(volume, driver); err != nil { - log.Error("Fail add volume ", volume.Name) + if err := saveVolume(driver, volume); err != nil { + log.WithError(err).Errorf("Failed to add volume %v", volume.Name) return err } - log.Debug("Added backupstore volume ", volume.Name) + log.Infof("Added backupstore volume %v", volume.Name) return nil } func removeVolume(volumeName string, driver BackupStoreDriver) error { if !util.ValidateName(volumeName) { - return fmt.Errorf("Invalid volume name %v", volumeName) + return fmt.Errorf("invalid volume name %v", volumeName) } volumeDir := getVolumePath(volumeName) @@ -78,20 +90,20 @@ func removeVolume(volumeName string, driver BackupStoreDriver) error { volumeBackupsDirectory := getBackupPath(volumeName) volumeLocksDirectory := getLockPath(volumeName) if err := driver.Remove(volumeBackupsDirectory); err != nil { - return fmt.Errorf("failed to remove all the backups for volume %v: %v", volumeName, err) + return errors.Wrapf(err, "failed to remove all the backups for volume %v", volumeName) } if err := driver.Remove(volumeBlocksDirectory); err != nil { - return fmt.Errorf("failed to remove all the blocks for volume %v: %v", volumeName, err) + return errors.Wrapf(err, "failed to remove all the blocks for volume %v", volumeName) } if err := driver.Remove(volumeLocksDirectory); err != nil { - return fmt.Errorf("failed to remove all the locks for volume %v: %v", volumeName, err) + return errors.Wrapf(err, "failed to remove all the locks for volume %v", volumeName) } if err := driver.Remove(volumeDir); err != nil { - return fmt.Errorf("failed to remove backup volume %v directory in backupstore: %v", volumeName, err) + return errors.Wrapf(err, "failed to remove backup volume %v directory in backupstore", volumeName) } - log.Debug("Removed volume directory in backupstore: ", volumeDir) - log.Debug("Removed backupstore volume ", volumeName) + log.Infof("Removed volume directory in backupstore %v", volumeDir) + log.Infof("Removed backupstore volume %v", volumeName) return nil } @@ -114,10 +126,10 @@ func DecodeBackupURL(backupURL string) (string, string, string, error) { volumeName := v.Get("volume") backupName := v.Get("backup") if !util.ValidateName(volumeName) { - return "", "", "", fmt.Errorf("Invalid volume name parsed, got %v", volumeName) + return "", "", "", fmt.Errorf("invalid volume name parsed, got %v", volumeName) } if backupName != "" && !util.ValidateName(backupName) { - return "", "", "", fmt.Errorf("Invalid backup name parsed, got %v", backupName) + return "", "", "", fmt.Errorf("invalid backup name parsed, got %v", backupName) } u.RawQuery = "" destURL := u.String() @@ -133,5 +145,5 @@ func LoadVolume(backupURL string) (*Volume, error) { if err != nil { return nil, err } - return loadVolume(volumeName, driver) + return loadVolume(driver, volumeName) } diff --git a/vendor/github.com/longhorn/backupstore/config.go b/vendor/github.com/longhorn/backupstore/config.go index 9e38d3af7..2fa1a2f89 100644 --- a/vendor/github.com/longhorn/backupstore/config.go +++ b/vendor/github.com/longhorn/backupstore/config.go @@ -33,7 +33,7 @@ func getBackupConfigName(id string) string { return BACKUP_CONFIG_PREFIX + id + CFG_SUFFIX } -func loadConfigInBackupStore(filePath string, driver BackupStoreDriver, v interface{}) error { +func LoadConfigInBackupStore(driver BackupStoreDriver, filePath string, v interface{}) error { if !driver.FileExists(filePath) { return fmt.Errorf("cannot find %v in backupstore", filePath) } @@ -61,7 +61,7 @@ func loadConfigInBackupStore(filePath string, driver BackupStoreDriver, v interf return nil } -func saveConfigInBackupStore(filePath string, driver BackupStoreDriver, v interface{}) error { +func SaveConfigInBackupStore(driver BackupStoreDriver, filePath string, v interface{}) error { j, err := json.Marshal(v) if err != nil { return err @@ -84,9 +84,51 @@ func saveConfigInBackupStore(filePath string, driver BackupStoreDriver, v interf return nil } -func volumeExists(volumeName string, driver BackupStoreDriver) bool { - volumeFile := getVolumeFilePath(volumeName) - return driver.FileExists(volumeFile) +func SaveLocalFileToBackupStore(localFilePath, backupStoreFilePath string, driver BackupStoreDriver) error { + log := log.WithFields(logrus.Fields{ + LogFieldReason: LogReasonStart, + LogFieldObject: LogObjectConfig, + LogFieldKind: driver.Kind(), + LogFieldFilepath: localFilePath, + LogFieldDestURL: backupStoreFilePath, + }) + log.Debug() + + if driver.FileExists(backupStoreFilePath) { + return fmt.Errorf("%v already exists", backupStoreFilePath) + } + + if err := driver.Upload(localFilePath, backupStoreFilePath); err != nil { + return err + } + + log.WithField(LogFieldReason, LogReasonComplete).Debug() + return nil +} + +func SaveBackupStoreToLocalFile(driver BackupStoreDriver, backupStoreFileURL, localFilePath string) error { + log := log.WithFields(logrus.Fields{ + LogFieldReason: LogReasonStart, + LogFieldObject: LogObjectConfig, + LogFieldKind: driver.Kind(), + LogFieldFilepath: localFilePath, + LogFieldSourceURL: backupStoreFileURL, + }) + log.Debug() + + if err := driver.Download(backupStoreFileURL, localFilePath); err != nil { + return err + } + + log = log.WithFields(logrus.Fields{ + LogFieldReason: LogReasonComplete, + }) + log.Debug() + return nil +} + +func volumeExists(driver BackupStoreDriver, volumeName string) bool { + return driver.FileExists(getVolumeFilePath(volumeName)) } func getVolumePath(volumeName string) string { @@ -108,7 +150,7 @@ func getVolumeNames(jobQueues *jobq.WorkerDispatcher, jobQueueTimeout time.Durat volumePathBase := filepath.Join(backupstoreBase, VOLUME_DIRECTORY) lv1Dirs, err := driver.List(volumePathBase) if err != nil { - log.Warnf("failed to list first level dirs for path: %v reason: %v", volumePathBase, err) + log.WithError(err).Warnf("Failed to list first level dirs for path %v", volumePathBase) return names, err } @@ -122,7 +164,7 @@ func getVolumeNames(jobQueues *jobq.WorkerDispatcher, jobQueueTimeout time.Durat lv1Tracker := jobQueues.QueueTimedFunc(context.Background(), func(ctx context.Context) (interface{}, error) { lv2Dirs, err := driver.List(path) if err != nil { - log.Warnf("failed to list second level dirs for path: %v reason: %v", path, err) + log.WithError(err).Warnf("Failed to list second level dirs for path %v", path) return nil, err } @@ -148,7 +190,7 @@ func getVolumeNames(jobQueues *jobq.WorkerDispatcher, jobQueueTimeout time.Durat lv2Tracker := jobQueues.QueueTimedFunc(context.Background(), func(ctx context.Context) (interface{}, error) { volumeNames, err := driver.List(path) if err != nil { - log.Warnf("failed to list volume names for path: %v reason: %v", path, err) + log.WithError(err).Warnf("Failed to list volume names for path %v", path) return nil, err } return volumeNames, nil @@ -172,24 +214,25 @@ func getVolumeNames(jobQueues *jobq.WorkerDispatcher, jobQueueTimeout time.Durat return names, nil } -func loadVolume(volumeName string, driver BackupStoreDriver) (*Volume, error) { +func loadVolume(driver BackupStoreDriver, volumeName string) (*Volume, error) { v := &Volume{} file := getVolumeFilePath(volumeName) - if err := loadConfigInBackupStore(file, driver, v); err != nil { + if err := LoadConfigInBackupStore(driver, file, v); err != nil { return nil, err } + // Backward compatibility + if v.CompressionMethod == "" { + log.Infof("Fall back compression method to %v for volume %v", LEGACY_COMPRESSION_METHOD, v.Name) + v.CompressionMethod = LEGACY_COMPRESSION_METHOD + } return v, nil } -func saveVolume(v *Volume, driver BackupStoreDriver) error { - file := getVolumeFilePath(v.Name) - if err := saveConfigInBackupStore(file, driver, v); err != nil { - return err - } - return nil +func saveVolume(driver BackupStoreDriver, v *Volume) error { + return SaveConfigInBackupStore(driver, getVolumeFilePath(v.Name), v) } -func getBackupNamesForVolume(volumeName string, driver BackupStoreDriver) ([]string, error) { +func getBackupNamesForVolume(driver BackupStoreDriver, volumeName string) ([]string, error) { result := []string{} fileList, err := driver.List(getBackupPath(volumeName)) if err != nil { @@ -213,27 +256,25 @@ func isBackupInProgress(backup *Backup) bool { return backup != nil && backup.CreatedTime == "" } -func backupExists(backupName, volumeName string, bsDriver BackupStoreDriver) bool { - return bsDriver.FileExists(getBackupConfigPath(backupName, volumeName)) -} - -func loadBackup(backupName, volumeName string, bsDriver BackupStoreDriver) (*Backup, error) { +func loadBackup(bsDriver BackupStoreDriver, backupName, volumeName string) (*Backup, error) { backup := &Backup{} - if err := loadConfigInBackupStore(getBackupConfigPath(backupName, volumeName), bsDriver, backup); err != nil { + if err := LoadConfigInBackupStore(bsDriver, getBackupConfigPath(backupName, volumeName), backup); err != nil { return nil, err } + // Backward compatibility + if backup.CompressionMethod == "" { + log.Infof("Fall back compression method to %v for backup %v", LEGACY_COMPRESSION_METHOD, backup.Name) + backup.CompressionMethod = LEGACY_COMPRESSION_METHOD + } return backup, nil } -func saveBackup(backup *Backup, bsDriver BackupStoreDriver) error { +func saveBackup(bsDriver BackupStoreDriver, backup *Backup) error { if backup.VolumeName == "" { return fmt.Errorf("missing volume specifier for backup: %v", backup.Name) } filePath := getBackupConfigPath(backup.Name, backup.VolumeName) - if err := saveConfigInBackupStore(filePath, bsDriver, backup); err != nil { - return err - } - return nil + return SaveConfigInBackupStore(bsDriver, filePath, backup) } func removeBackup(backup *Backup, bsDriver BackupStoreDriver) error { @@ -241,6 +282,6 @@ func removeBackup(backup *Backup, bsDriver BackupStoreDriver) error { if err := bsDriver.Remove(filePath); err != nil { return err } - log.Debugf("Removed %v on backupstore", filePath) + log.Infof("Removed %v on backupstore", filePath) return nil } diff --git a/vendor/github.com/longhorn/backupstore/deltablock.go b/vendor/github.com/longhorn/backupstore/deltablock.go index 28bd1b723..99a8ecb94 100644 --- a/vendor/github.com/longhorn/backupstore/deltablock.go +++ b/vendor/github.com/longhorn/backupstore/deltablock.go @@ -1,12 +1,16 @@ package backupstore import ( + "context" "fmt" "io" "os" "path/filepath" + "sync" + "syscall" "time" + "github.com/pkg/errors" "github.com/sirupsen/logrus" . "github.com/longhorn/backupstore/logging" @@ -14,19 +18,21 @@ import ( ) type DeltaBackupConfig struct { - BackupName string - Volume *Volume - Snapshot *Snapshot - DestURL string - DeltaOps DeltaBlockBackupOperations - Labels map[string]string + BackupName string + Volume *Volume + Snapshot *Snapshot + DestURL string + DeltaOps DeltaBlockBackupOperations + Labels map[string]string + ConcurrentLimit int32 } type DeltaRestoreConfig struct { - BackupURL string - DeltaOps DeltaRestoreOperations - LastBackupName string - Filename string + BackupURL string + DeltaOps DeltaRestoreOperations + LastBackupName string + Filename string + ConcurrentLimit int32 } type BlockMapping struct { @@ -88,7 +94,8 @@ type DeltaRestoreOperations interface { } const ( - DEFAULT_BLOCK_SIZE = 2097152 + DEFAULT_BLOCK_SIZE = 2 * 1024 * 1024 + LEGACY_COMPRESSION_METHOD = "gzip" BLOCKS_DIRECTORY = "blocks" BLOCK_SEPARATE_LAYER1 = 2 @@ -99,9 +106,9 @@ const ( PROGRESS_PERCENTAGE_BACKUP_TOTAL = 100 ) -func CreateDeltaBlockBackup(config *DeltaBackupConfig) (string, bool, error) { +func CreateDeltaBlockBackup(config *DeltaBackupConfig) (backupID string, isIncremental bool, err error) { if config == nil { - return "", false, fmt.Errorf("Invalid empty config for backup") + return "", false, fmt.Errorf("invalid empty config for backup") } volume := config.Volume @@ -109,7 +116,7 @@ func CreateDeltaBlockBackup(config *DeltaBackupConfig) (string, bool, error) { destURL := config.DestURL deltaOps := config.DeltaOps if deltaOps == nil { - return "", false, fmt.Errorf("Missing DeltaBlockBackupOperations") + return "", false, fmt.Errorf("missing DeltaBlockBackupOperations") } bsDriver, err := GetBackupStoreDriver(destURL) @@ -127,16 +134,18 @@ func CreateDeltaBlockBackup(config *DeltaBackupConfig) (string, bool, error) { return "", false, err } - if err := addVolume(volume, bsDriver); err != nil { + if err := addVolume(bsDriver, volume); err != nil { return "", false, err } // Update volume from backupstore - volume, err = loadVolume(volume.Name, bsDriver) + volume, err = loadVolume(bsDriver, volume.Name) if err != nil { return "", false, err } + config.Volume.CompressionMethod = volume.CompressionMethod + if err := deltaOps.OpenSnapshot(snapshot.Name, volume.Name); err != nil { return "", false, err } @@ -144,7 +153,7 @@ func CreateDeltaBlockBackup(config *DeltaBackupConfig) (string, bool, error) { backupRequest := &backupRequest{} if volume.LastBackupName != "" { lastBackupName := volume.LastBackupName - var backup, err = loadBackup(lastBackupName, volume.Name, bsDriver) + var backup, err = loadBackup(bsDriver, lastBackupName, volume.Name) if err != nil { log.WithFields(logrus.Fields{ LogFieldReason: LogReasonFallback, @@ -153,9 +162,9 @@ func CreateDeltaBlockBackup(config *DeltaBackupConfig) (string, bool, error) { LogFieldBackup: lastBackupName, LogFieldVolume: volume.Name, LogFieldDestURL: destURL, - }).Info("Cannot find previous backup in backupstore") + }).WithError(err).Info("Cannot find previous backup in backupstore") } else if backup.SnapshotName == snapshot.Name { - //Generate full snapshot if the snapshot has been backed up last time + // Generate full snapshot if the snapshot has been backed up last time log.WithFields(logrus.Fields{ LogFieldReason: LogReasonFallback, LogFieldEvent: LogEventCompare, @@ -182,6 +191,7 @@ func CreateDeltaBlockBackup(config *DeltaBackupConfig) (string, bool, error) { LogFieldSnapshot: snapshot.Name, LogFieldLastSnapshot: backupRequest.getLastSnapshotName(), }).Debug("Generating snapshot changed blocks config") + delta, err := deltaOps.CompareSnapshot(snapshot.Name, backupRequest.getLastSnapshotName(), volume.Name) if err != nil { deltaOps.CloseSnapshot(snapshot.Name, volume.Name) @@ -213,10 +223,14 @@ func CreateDeltaBlockBackup(config *DeltaBackupConfig) (string, bool, error) { } deltaBackup := &Backup{ - Name: backupName, - VolumeName: volume.Name, - SnapshotName: snapshot.Name, - Blocks: []BlockMapping{}, + Name: backupName, + VolumeName: volume.Name, + SnapshotName: snapshot.Name, + CompressionMethod: volume.CompressionMethod, + Blocks: []BlockMapping{}, + ProcessingBlocks: &ProcessingBlocks{ + blocks: map[string][]*BlockMapping{}, + }, } // keep lock alive for async go routine. @@ -228,7 +242,7 @@ func CreateDeltaBlockBackup(config *DeltaBackupConfig) (string, bool, error) { defer deltaOps.CloseSnapshot(snapshot.Name, volume.Name) defer lock.Unlock() - if progress, backup, err := performBackup(config, delta, deltaBackup, backupRequest.lastBackup, bsDriver); err != nil { + if progress, backup, err := performBackup(bsDriver, config, delta, deltaBackup, backupRequest.lastBackup); err != nil { deltaOps.UpdateBackupStatus(snapshot.Name, volume.Name, progress, "", err.Error()) } else { deltaOps.UpdateBackupStatus(snapshot.Name, volume.Name, progress, backup, "") @@ -237,69 +251,288 @@ func CreateDeltaBlockBackup(config *DeltaBackupConfig) (string, bool, error) { return deltaBackup.Name, backupRequest.isIncrementalBackup(), nil } -// performBackup if lastBackup is present we will do an incremental backup -func performBackup(config *DeltaBackupConfig, delta *Mappings, deltaBackup *Backup, lastBackup *Backup, - bsDriver BackupStoreDriver) (int, string, error) { +func populateMappings(bsDriver BackupStoreDriver, config *DeltaBackupConfig, deltaBackup *Backup, delta *Mappings) (<-chan Mapping, <-chan error) { + mappingChan := make(chan Mapping, 1) + errChan := make(chan error, 1) - // create an in progress backup config file - if err := saveBackup(&Backup{Name: deltaBackup.Name, VolumeName: deltaBackup.VolumeName, - CreatedTime: ""}, bsDriver); err != nil { - return 0, "", err + go func() { + defer close(mappingChan) + defer close(errChan) + + for _, mapping := range delta.Mappings { + mappingChan <- mapping + } + }() + + return mappingChan, errChan +} + +func getProgress(total, processed int64) int { + return int((float64(processed+1) / float64(total)) * PROGRESS_PERCENTAGE_BACKUP_SNAPSHOT) +} + +func isBlockBeingProcessed(deltaBackup *Backup, offset int64, checksum string) bool { + processingBlocks := deltaBackup.ProcessingBlocks + + processingBlocks.Lock() + defer processingBlocks.Unlock() + + blockInfo := &BlockMapping{ + Offset: offset, + BlockChecksum: checksum, + } + if _, ok := processingBlocks.blocks[checksum]; ok { + processingBlocks.blocks[checksum] = append(processingBlocks.blocks[checksum], blockInfo) + return true + } + + processingBlocks.blocks[checksum] = []*BlockMapping{blockInfo} + return false +} + +func updateBlocksAndProgress(deltaBackup *Backup, progress *progress, checksum string, newBlock bool) { + processingBlocks := deltaBackup.ProcessingBlocks + + processingBlocks.Lock() + defer processingBlocks.Unlock() + + // Update deltaBackup.Blocks + blocks := processingBlocks.blocks[checksum] + for _, block := range blocks { + deltaBackup.Blocks = append(deltaBackup.Blocks, *block) } + // Update progress + func() { + progress.Lock() + defer progress.Unlock() + + if newBlock { + progress.newBlockCounts++ + } + progress.processedBlockCounts += int64(len(blocks)) + progress.progress = getProgress(progress.totalBlockCounts, progress.processedBlockCounts) + }() + + delete(processingBlocks.blocks, checksum) +} + +func backupBlock(bsDriver BackupStoreDriver, config *DeltaBackupConfig, + deltaBackup *Backup, offset int64, block []byte, progress *progress) error { + var err error + newBlock := false volume := config.Volume snapshot := config.Snapshot - destURL := config.DestURL deltaOps := config.DeltaOps - var progress int - mCounts := len(delta.Mappings) - newBlocks := int64(0) - for m, d := range delta.Mappings { - if d.Size%delta.BlockSize != 0 { - return progress, "", fmt.Errorf("Mapping's size %v is not multiples of backup block size %v", - d.Size, delta.BlockSize) + checksum := util.GetChecksum(block) + + // This prevents multiple goroutines from trying to upload blocks that contain identical contents + // with the same checksum but different offsets). + // After uploading, `bsDriver.FileExists(blkFile)` is used to avoid repeat uploading. + if isBlockBeingProcessed(deltaBackup, offset, checksum) { + return nil + } + + defer func() { + if err != nil { + return } - block := make([]byte, DEFAULT_BLOCK_SIZE) - blkCounts := d.Size / delta.BlockSize - for i := int64(0); i < blkCounts; i++ { - offset := d.Offset + i*delta.BlockSize - log.Debugf("Backup for %v: segment %v/%v, blocks %v/%v", snapshot.Name, m+1, mCounts, i+1, blkCounts) - err := deltaOps.ReadSnapshot(snapshot.Name, volume.Name, offset, block) - if err != nil { - return progress, "", err - } - checksum := util.GetChecksum(block) - blkFile := getBlockFilePath(volume.Name, checksum) - if bsDriver.FileExists(blkFile) { - blockMapping := BlockMapping{ - Offset: offset, - BlockChecksum: checksum, + deltaBackup.Lock() + defer deltaBackup.Unlock() + updateBlocksAndProgress(deltaBackup, progress, checksum, newBlock) + deltaOps.UpdateBackupStatus(snapshot.Name, volume.Name, progress.progress, "", "") + }() + + blkFile := getBlockFilePath(volume.Name, checksum) + if bsDriver.FileExists(blkFile) { + log.Debugf("Found existing block matching at %v", blkFile) + return nil + } + + log.Debugf("Creating new block file at %v", blkFile) + newBlock = true + rs, err := util.CompressData(deltaBackup.CompressionMethod, block) + if err != nil { + return err + } + + return bsDriver.Write(blkFile, rs) +} + +func backupMapping(bsDriver BackupStoreDriver, config *DeltaBackupConfig, + deltaBackup *Backup, blockSize int64, mapping Mapping, progress *progress) error { + volume := config.Volume + snapshot := config.Snapshot + deltaOps := config.DeltaOps + + block := make([]byte, DEFAULT_BLOCK_SIZE) + blkCounts := mapping.Size / blockSize + + for i := int64(0); i < blkCounts; i++ { + log.Debugf("Backup for %v: segment %+v, blocks %v/%v", snapshot.Name, mapping, i+1, blkCounts) + offset := mapping.Offset + i*blockSize + if err := deltaOps.ReadSnapshot(snapshot.Name, volume.Name, offset, block); err != nil { + logrus.WithError(err).Errorf("Failed to read volume %v snapshot %v block at offset %v size %v", + volume.Name, snapshot.Name, offset, len(block)) + return err + } + + if err := backupBlock(bsDriver, config, deltaBackup, offset, block, progress); err != nil { + logrus.WithError(err).Errorf("Failed to back up volume %v snapshot %v block at offset %v size %v", + volume.Name, snapshot.Name, offset, len(block)) + return err + } + } + + return nil +} + +func backupMappings(ctx context.Context, bsDriver BackupStoreDriver, config *DeltaBackupConfig, + deltaBackup *Backup, blockSize int64, progress *progress, in <-chan Mapping) <-chan error { + errChan := make(chan error, 1) + + go func() { + defer close(errChan) + for { + select { + case <-ctx.Done(): + return + case mapping, open := <-in: + if !open { + return } - deltaBackup.Blocks = append(deltaBackup.Blocks, blockMapping) - log.Debugf("Found existed block match at %v", blkFile) - continue - } - rs, err := util.CompressData(block) - if err != nil { - return progress, "", err + if err := backupMapping(bsDriver, config, deltaBackup, blockSize, mapping, progress); err != nil { + errChan <- err + return + } } + } + }() + + return errChan +} + +type progress struct { + sync.Mutex + + totalBlockCounts int64 + processedBlockCounts int64 + newBlockCounts int64 + + progress int +} - if err := bsDriver.Write(blkFile, rs); err != nil { - return progress, "", err +func getTotalBackupBlockCounts(delta *Mappings) (int64, error) { + totalBlockCounts := int64(0) + for _, d := range delta.Mappings { + if d.Size%delta.BlockSize != 0 { + return 0, fmt.Errorf("mapping's size %v is not multiples of backup block size %v", + d.Size, delta.BlockSize) + } + totalBlockCounts += d.Size / delta.BlockSize + } + return totalBlockCounts, nil +} + +// mergeErrorChannels will merge all error channels into a single error out channel. +// the error out channel will be closed once the ctx is done or all error channels are closed +// if there is an error on one of the incoming channels the error will be relayed. +func mergeErrorChannels(ctx context.Context, channels ...<-chan error) <-chan error { + var wg sync.WaitGroup + wg.Add(len(channels)) + + out := make(chan error, len(channels)) + output := func(c <-chan error) { + defer wg.Done() + select { + case err, ok := <-c: + if ok { + out <- err } - log.Debugf("Created new block file at %v", blkFile) + return + case <-ctx.Done(): + return + } + } - newBlocks++ - blockMapping := BlockMapping{ - Offset: offset, + for _, c := range channels { + go output(c) + } + + go func() { + wg.Wait() + close(out) + }() + return out +} + +func sortBackupBlocks(blocks []BlockMapping, volumeSize, blockSize int64) []BlockMapping { + sortedBlocks := make([]string, volumeSize/blockSize) + for _, block := range blocks { + i := block.Offset / blockSize + sortedBlocks[i] = block.BlockChecksum + } + + blockMappings := []BlockMapping{} + for i, checksum := range sortedBlocks { + if checksum != "" { + blockMappings = append(blockMappings, BlockMapping{ + Offset: int64(i) * blockSize, BlockChecksum: checksum, - } - deltaBackup.Blocks = append(deltaBackup.Blocks, blockMapping) + }) } - progress = int((float64(m+1) / float64(mCounts)) * PROGRESS_PERCENTAGE_BACKUP_SNAPSHOT) - deltaOps.UpdateBackupStatus(snapshot.Name, volume.Name, progress, "", "") + } + + return blockMappings +} + +// performBackup if lastBackup is present we will do an incremental backup +func performBackup(bsDriver BackupStoreDriver, config *DeltaBackupConfig, delta *Mappings, deltaBackup *Backup, lastBackup *Backup) (int, string, error) { + volume := config.Volume + snapshot := config.Snapshot + destURL := config.DestURL + concurrentLimit := config.ConcurrentLimit + + // create an in progress backup config file + if err := saveBackup(bsDriver, &Backup{ + Name: deltaBackup.Name, + VolumeName: deltaBackup.VolumeName, + CompressionMethod: volume.CompressionMethod, + CreatedTime: "", + }); err != nil { + return 0, "", err + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + totalBlockCounts, err := getTotalBackupBlockCounts(delta) + if err != nil { + return 0, "", err + } + logrus.Infof("Volume %v Snapshot %v is consist of %v mappings and %v blocks", + volume.Name, snapshot.Name, len(delta.Mappings), totalBlockCounts) + + progress := &progress{ + totalBlockCounts: totalBlockCounts, + } + + mappingChan, errChan := populateMappings(bsDriver, config, deltaBackup, delta) + + errorChans := []<-chan error{errChan} + for i := 0; i < int(concurrentLimit); i++ { + errorChans = append(errorChans, backupMappings(ctx, bsDriver, config, + deltaBackup, delta.BlockSize, progress, mappingChan)) + } + + mergedErrChan := mergeErrorChannels(ctx, errorChans...) + err = <-mergedErrChan + + if err != nil { + logrus.WithError(err).Errorf("Failed to backup volume %v snapshot %v", volume.Name, snapshot.Name) + return progress.progress, "", err } log.WithFields(logrus.Fields{ @@ -307,7 +540,10 @@ func performBackup(config *DeltaBackupConfig, delta *Mappings, deltaBackup *Back LogFieldEvent: LogEventBackup, LogFieldObject: LogObjectSnapshot, LogFieldSnapshot: snapshot.Name, - }).Debug("Created snapshot changed blocks") + }).Infof("Created snapshot changed blocks: %v mappings, %v blocks and %v new blocks", + len(delta.Mappings), progress.totalBlockCounts, progress.newBlockCounts) + + deltaBackup.Blocks = sortBackupBlocks(deltaBackup.Blocks, volume.Size, delta.BlockSize) backup := mergeSnapshotMap(deltaBackup, lastBackup) backup.SnapshotName = snapshot.Name @@ -317,26 +553,27 @@ func performBackup(config *DeltaBackupConfig, delta *Mappings, deltaBackup *Back backup.Labels = config.Labels backup.IsIncremental = lastBackup != nil - if err := saveBackup(backup, bsDriver); err != nil { - return progress, "", err + if err := saveBackup(bsDriver, backup); err != nil { + return progress.progress, "", err } - volume, err := loadVolume(volume.Name, bsDriver) + volume, err = loadVolume(bsDriver, volume.Name) if err != nil { - return progress, "", err + return progress.progress, "", err } volume.LastBackupName = backup.Name volume.LastBackupAt = backup.SnapshotCreatedAt - volume.BlockCount = volume.BlockCount + newBlocks + volume.BlockCount = volume.BlockCount + progress.newBlockCounts // The volume may be expanded volume.Size = config.Volume.Size volume.Labels = config.Labels volume.BackingImageName = config.Volume.BackingImageName volume.BackingImageChecksum = config.Volume.BackingImageChecksum + volume.CompressionMethod = config.Volume.CompressionMethod - if err := saveVolume(volume, bsDriver); err != nil { - return progress, "", err + if err := saveVolume(bsDriver, volume); err != nil { + return progress.progress, "", err } return PROGRESS_PERCENTAGE_BACKUP_TOTAL, EncodeBackupURL(backup.Name, volume.Name, destURL), nil @@ -347,10 +584,11 @@ func mergeSnapshotMap(deltaBackup, lastBackup *Backup) *Backup { return deltaBackup } backup := &Backup{ - Name: deltaBackup.Name, - VolumeName: deltaBackup.VolumeName, - SnapshotName: deltaBackup.SnapshotName, - Blocks: []BlockMapping{}, + Name: deltaBackup.Name, + VolumeName: deltaBackup.VolumeName, + SnapshotName: deltaBackup.SnapshotName, + CompressionMethod: deltaBackup.CompressionMethod, + Blocks: []BlockMapping{}, } var d, l int for d, l = 0, 0; d < len(deltaBackup.Blocks) && l < len(lastBackup.Blocks); { @@ -375,7 +613,7 @@ func mergeSnapshotMap(deltaBackup, lastBackup *Backup) *Backup { LogFieldObject: LogObjectBackup, LogFieldBackup: deltaBackup.Name, LogFieldLastBackup: lastBackup.Name, - }).Debugf("merge backup blocks") + }).Debugf("Merge backup blocks") if d == len(deltaBackup.Blocks) { backup.Blocks = append(backup.Blocks, lastBackup.Blocks[l:]...) } else { @@ -392,6 +630,7 @@ func RestoreDeltaBlockBackup(config *DeltaRestoreConfig) error { volDevName := config.Filename backupURL := config.BackupURL + concurrentLimit := config.ConcurrentLimit deltaOps := config.DeltaOps if deltaOps == nil { return fmt.Errorf("missing DeltaRestoreOperations") @@ -417,7 +656,7 @@ func RestoreDeltaBlockBackup(config *DeltaRestoreConfig) error { return err } - vol, err := loadVolume(srcVolumeName, bsDriver) + vol, err := loadVolume(bsDriver, srcVolumeName) if err != nil { return generateError(logrus.Fields{ LogFieldVolume: srcVolumeName, @@ -426,13 +665,13 @@ func RestoreDeltaBlockBackup(config *DeltaRestoreConfig) error { } if vol.Size == 0 || vol.Size%DEFAULT_BLOCK_SIZE != 0 { - return fmt.Errorf("Read invalid volume size %v", vol.Size) + return fmt.Errorf("read invalid volume size %v", vol.Size) } if _, err := os.Stat(volDevName); err == nil { logrus.Warnf("File %s for the restore exists, will remove and re-create it", volDevName) - if err := os.Remove(volDevName); err != nil { - return fmt.Errorf("failed to clean up the existing file %v before restore: %v", volDevName, err) + if err := os.RemoveAll(volDevName); err != nil { + return errors.Wrapf(err, "failed to clean up the existing file %v before restore", volDevName) } } @@ -452,7 +691,7 @@ func RestoreDeltaBlockBackup(config *DeltaRestoreConfig) error { return err } - backup, err := loadBackup(srcBackupName, srcVolumeName, bsDriver) + backup, err := loadBackup(bsDriver, srcBackupName, srcVolumeName) if err != nil { return err } @@ -475,7 +714,10 @@ func RestoreDeltaBlockBackup(config *DeltaRestoreConfig) error { defer volDev.Close() defer lock.Unlock() - var progress int + progress := &progress{ + totalBlockCounts: int64(len(backup.Blocks)), + } + // This pre-truncate is to ensure the XFS speculatively // preallocates post-EOF blocks get reclaimed when volDev is // closed. @@ -484,20 +726,27 @@ func RestoreDeltaBlockBackup(config *DeltaRestoreConfig) error { if stat.Mode()&os.ModeType == 0 { log.Debugf("Truncate %v to size %v", volDevName, vol.Size) if err := volDev.Truncate(vol.Size); err != nil { - deltaOps.UpdateRestoreStatus(volDevName, progress, err) + deltaOps.UpdateRestoreStatus(volDevName, progress.progress, err) return } } - blkCounts := len(backup.Blocks) - for i, block := range backup.Blocks { - log.Debugf("Restore for %v: block %v, %v/%v", volDevName, block.BlockChecksum, i+1, blkCounts) - if err := restoreBlockToFile(srcVolumeName, volDev, bsDriver, block); err != nil { - deltaOps.UpdateRestoreStatus(volDevName, progress, err) - return - } - progress = int((float64(i+1) / float64(blkCounts)) * PROGRESS_PERCENTAGE_BACKUP_SNAPSHOT) - deltaOps.UpdateRestoreStatus(volDevName, progress, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blockChan, errChan := populateBlocksForFullRestore(bsDriver, backup) + + errorChans := []<-chan error{errChan} + for i := 0; i < int(concurrentLimit); i++ { + errorChans = append(errorChans, restoreBlocks(ctx, bsDriver, config, srcVolumeName, blockChan, progress)) + } + + mergedErrChan := mergeErrorChannels(ctx, errorChans...) + err = <-mergedErrChan + if err != nil { + logrus.WithError(err).Errorf("Failed to delta restore volume %v backup %v", srcVolumeName, backup.Name) + deltaOps.UpdateRestoreStatus(volDevName, progress.progress, err) + return } deltaOps.UpdateRestoreStatus(volDevName, PROGRESS_PERCENTAGE_BACKUP_TOTAL, nil) @@ -506,24 +755,22 @@ func RestoreDeltaBlockBackup(config *DeltaRestoreConfig) error { return nil } -func restoreBlockToFile(volumeName string, volDev *os.File, bsDriver BackupStoreDriver, blk BlockMapping) error { +func restoreBlockToFile(bsDriver BackupStoreDriver, volumeName string, volDev *os.File, decompression string, blk BlockMapping) error { blkFile := getBlockFilePath(volumeName, blk.BlockChecksum) rc, err := bsDriver.Read(blkFile) if err != nil { return err } defer rc.Close() - r, err := util.DecompressAndVerify(rc, blk.BlockChecksum) + r, err := util.DecompressAndVerify(decompression, rc, blk.BlockChecksum) if err != nil { return err } if _, err := volDev.Seek(blk.Offset, 0); err != nil { return err } - if _, err := io.CopyN(volDev, r, DEFAULT_BLOCK_SIZE); err != nil { - return err - } - return nil + _, err = io.CopyN(volDev, r, DEFAULT_BLOCK_SIZE) + return err } func RestoreDeltaBlockBackupIncrementally(config *DeltaRestoreConfig) error { @@ -558,7 +805,7 @@ func RestoreDeltaBlockBackupIncrementally(config *DeltaRestoreConfig) error { } defer lock.Unlock() - vol, err := loadVolume(srcVolumeName, bsDriver) + vol, err := loadVolume(bsDriver, srcVolumeName) if err != nil { return generateError(logrus.Fields{ LogFieldVolume: srcVolumeName, @@ -579,7 +826,7 @@ func RestoreDeltaBlockBackupIncrementally(config *DeltaRestoreConfig) error { if _, err := os.Stat(volDevName); err == nil { logrus.Warnf("File %s for the incremental restore exists, will remove and re-create it", volDevName) if err := os.Remove(volDevName); err != nil { - return fmt.Errorf("failed to clean up the existing file %v before incremental restore: %v", volDevName, err) + return errors.Wrapf(err, "failed to clean up the existing file %v before incremental restore", volDevName) } } @@ -599,11 +846,11 @@ func RestoreDeltaBlockBackupIncrementally(config *DeltaRestoreConfig) error { return err } - lastBackup, err := loadBackup(lastBackupName, srcVolumeName, bsDriver) + lastBackup, err := loadBackup(bsDriver, lastBackupName, srcVolumeName) if err != nil { return err } - backup, err := loadBackup(srcBackupName, srcVolumeName, bsDriver) + backup, err := loadBackup(bsDriver, srcBackupName, srcVolumeName) if err != nil { return err } @@ -638,7 +885,7 @@ func RestoreDeltaBlockBackupIncrementally(config *DeltaRestoreConfig) error { } } - if err := performIncrementalRestore(srcVolumeName, volDev, lastBackup, backup, bsDriver, config); err != nil { + if err := performIncrementalRestore(bsDriver, config, srcVolumeName, volDevName, lastBackup, backup); err != nil { deltaOps.UpdateRestoreStatus(volDevName, 0, err) return } @@ -648,63 +895,182 @@ func RestoreDeltaBlockBackupIncrementally(config *DeltaRestoreConfig) error { return nil } -func performIncrementalRestore(srcVolumeName string, volDev *os.File, lastBackup *Backup, backup *Backup, - bsDriver BackupStoreDriver, config *DeltaRestoreConfig) error { - var progress int - volDevName := config.Filename - deltaOps := config.DeltaOps +type Block struct { + offset int64 + blockChecksum string + compressionMethod string + isZeroBlock bool +} - emptyBlock := make([]byte, DEFAULT_BLOCK_SIZE) - total := len(backup.Blocks) + len(lastBackup.Blocks) +func populateBlocksForIncrementalRestore(bsDriver BackupStoreDriver, lastBackup, backup *Backup) (<-chan *Block, <-chan error) { + blockChan := make(chan *Block, 10) + errChan := make(chan error, 1) - for b, l := 0, 0; b < len(backup.Blocks) || l < len(lastBackup.Blocks); { - if b >= len(backup.Blocks) { - if err := fillBlockToFile(&emptyBlock, volDev, lastBackup.Blocks[l].Offset); err != nil { - return err + go func() { + defer close(blockChan) + defer close(errChan) + + for b, l := 0, 0; b < len(backup.Blocks) || l < len(lastBackup.Blocks); { + if b >= len(backup.Blocks) { + blockChan <- &Block{ + offset: lastBackup.Blocks[l].Offset, + isZeroBlock: true, + } + l++ + continue } - l++ - continue - } - if l >= len(lastBackup.Blocks) { - if err := restoreBlockToFile(srcVolumeName, volDev, bsDriver, backup.Blocks[b]); err != nil { - return err + if l >= len(lastBackup.Blocks) { + blockChan <- &Block{ + offset: backup.Blocks[b].Offset, + blockChecksum: backup.Blocks[b].BlockChecksum, + compressionMethod: backup.CompressionMethod, + } + b++ + continue } - b++ - continue - } - bB := backup.Blocks[b] - lB := lastBackup.Blocks[l] - if bB.Offset == lB.Offset { - if bB.BlockChecksum != lB.BlockChecksum { - if err := restoreBlockToFile(srcVolumeName, volDev, bsDriver, bB); err != nil { - return err + bB := backup.Blocks[b] + lB := lastBackup.Blocks[l] + if bB.Offset == lB.Offset { + if bB.BlockChecksum != lB.BlockChecksum { + blockChan <- &Block{ + offset: bB.Offset, + blockChecksum: bB.BlockChecksum, + compressionMethod: backup.CompressionMethod, + } } + b++ + l++ + } else if bB.Offset < lB.Offset { + blockChan <- &Block{ + offset: bB.Offset, + blockChecksum: bB.BlockChecksum, + compressionMethod: backup.CompressionMethod, + } + b++ + } else { + blockChan <- &Block{ + offset: lB.Offset, + isZeroBlock: true, + } + l++ } - b++ - l++ - } else if bB.Offset < lB.Offset { - if err := restoreBlockToFile(srcVolumeName, volDev, bsDriver, bB); err != nil { - return err - } - b++ - } else { - if err := fillBlockToFile(&emptyBlock, volDev, lB.Offset); err != nil { - return err + } + }() + + return blockChan, errChan +} + +func populateBlocksForFullRestore(bsDriver BackupStoreDriver, backup *Backup) (<-chan *Block, <-chan error) { + blockChan := make(chan *Block, 10) + errChan := make(chan error, 1) + + go func() { + defer close(blockChan) + defer close(errChan) + + for _, block := range backup.Blocks { + blockChan <- &Block{ + offset: block.Offset, + blockChecksum: block.BlockChecksum, + compressionMethod: backup.CompressionMethod, } - l++ } - progress = int((float64(b+l+2) / float64(total)) * PROGRESS_PERCENTAGE_BACKUP_SNAPSHOT) - deltaOps.UpdateRestoreStatus(volDevName, progress, nil) + }() + + return blockChan, errChan +} + +func restoreBlock(bsDriver BackupStoreDriver, config *DeltaRestoreConfig, + volumeName string, volDev *os.File, block *Block, progress *progress) error { + deltaOps := config.DeltaOps + + defer func() { + progress.Lock() + defer progress.Unlock() + + progress.processedBlockCounts++ + progress.progress = getProgress(progress.totalBlockCounts, progress.processedBlockCounts) + deltaOps.UpdateRestoreStatus(volumeName, progress.progress, nil) + }() + + if block.isZeroBlock { + return fillZeros(volDev, block.offset, DEFAULT_BLOCK_SIZE) } - return nil + + return restoreBlockToFile(bsDriver, volumeName, volDev, block.compressionMethod, + BlockMapping{ + Offset: block.offset, + BlockChecksum: block.blockChecksum, + }) } -func fillBlockToFile(block *[]byte, volDev *os.File, offset int64) error { - if _, err := volDev.WriteAt(*block, offset); err != nil { - return err +func restoreBlocks(ctx context.Context, bsDriver BackupStoreDriver, config *DeltaRestoreConfig, + volumeName string, in <-chan *Block, progress *progress) <-chan error { + errChan := make(chan error, 1) + + go func() { + defer close(errChan) + + volDevName := config.Filename + volDev, err := os.OpenFile(volDevName, os.O_RDWR, 0666) + if err != nil { + errChan <- err + return + } + defer volDev.Close() + + for { + select { + case <-ctx.Done(): + return + case block, open := <-in: + if !open { + return + } + + if err := restoreBlock(bsDriver, config, volumeName, volDev, block, progress); err != nil { + errChan <- err + return + } + } + } + }() + + return errChan +} + +func performIncrementalRestore(bsDriver BackupStoreDriver, config *DeltaRestoreConfig, + srcVolumeName, volDevName string, lastBackup *Backup, backup *Backup) error { + var err error + concurrentLimit := config.ConcurrentLimit + + progress := &progress{ + totalBlockCounts: int64(len(backup.Blocks) + len(lastBackup.Blocks)), } - return nil + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blockChan, errChan := populateBlocksForIncrementalRestore(bsDriver, lastBackup, backup) + + errorChans := []<-chan error{errChan} + for i := 0; i < int(concurrentLimit); i++ { + errorChans = append(errorChans, restoreBlocks(ctx, bsDriver, config, srcVolumeName, blockChan, progress)) + } + + mergedErrChan := mergeErrorChannels(ctx, errorChans...) + err = <-mergedErrChan + + if err != nil { + logrus.WithError(err).Errorf("Failed to incrementally restore volume %v backup %v", srcVolumeName, backup.Name) + } + + return err +} + +func fillZeros(volDev *os.File, offset, length int64) error { + return syscall.Fallocate(int(volDev.Fd()), 0, offset, length) } func DeleteBackupVolume(volumeName string, destURL string) error { @@ -721,18 +1087,14 @@ func DeleteBackupVolume(volumeName string, destURL string) error { return err } defer lock.Unlock() - if err := removeVolume(volumeName, bsDriver); err != nil { - return err - } - return nil + return removeVolume(volumeName, bsDriver) } func checkBlockReferenceCount(blockInfos map[string]*BlockInfo, backup *Backup, volumeName string, driver BackupStoreDriver) { for _, block := range backup.Blocks { info, known := blockInfos[block.BlockChecksum] if !known { - log.Errorf("backup %v refers to unknown block %v", - backup.Name, block.BlockChecksum) + log.Errorf("Backup %v refers to unknown block %v", backup.Name, block.BlockChecksum) info = &BlockInfo{checksum: block.BlockChecksum} blockInfos[block.BlockChecksum] = info } @@ -750,12 +1112,12 @@ func getLatestBackup(backup *Backup, lastBackup *Backup) error { backupTime, err := time.Parse(time.RFC3339, backup.SnapshotCreatedAt) if err != nil { - return fmt.Errorf("Cannot parse backup %v time %v due to %v", backup.Name, backup.SnapshotCreatedAt, err) + return errors.Wrapf(err, "cannot parse backup %v time %v", backup.Name, backup.SnapshotCreatedAt) } lastBackupTime, err := time.Parse(time.RFC3339, lastBackup.SnapshotCreatedAt) if err != nil { - return fmt.Errorf("Cannot parse last backup %v time %v due to %v", lastBackup.Name, lastBackup.SnapshotCreatedAt, err) + return errors.Wrapf(err, "cannot parse last backup %v time %v", lastBackup.Name, lastBackup.SnapshotCreatedAt) } if backupTime.After(lastBackupTime) { @@ -790,9 +1152,9 @@ func DeleteDeltaBlockBackup(backupURL string) error { defer lock.Unlock() // If we fail to load the backup we still want to proceed with the deletion of the backup file - backupToBeDeleted, err := loadBackup(backupName, volumeName, bsDriver) + backupToBeDeleted, err := loadBackup(bsDriver, backupName, volumeName) if err != nil { - log.WithError(err).Warn("failed to load to be deleted backup") + log.WithError(err).Warn("Failed to load to be deleted backup") backupToBeDeleted = &Backup{ Name: backupName, VolumeName: volumeName, @@ -805,9 +1167,9 @@ func DeleteDeltaBlockBackup(backupURL string) error { } log.Info("Removed backup for volume") - v, err := loadVolume(volumeName, bsDriver) + v, err := loadVolume(bsDriver, volumeName) if err != nil { - return fmt.Errorf("Cannot find volume in backupstore due to: %v", err) + return errors.Wrap(err, "cannot find volume in backupstore") } updateLastBackup := false if backupToBeDeleted.Name == v.LastBackupName { @@ -818,14 +1180,14 @@ func DeleteDeltaBlockBackup(backupURL string) error { log.Debug("GC started") deleteBlocks := true - backupNames, err := getBackupNamesForVolume(volumeName, bsDriver) + backupNames, err := getBackupNamesForVolume(bsDriver, volumeName) if err != nil { - log.WithError(err).Warn("failed to load backup names, skip block deletion") + log.WithError(err).Warn("Failed to load backup names, skip block deletion") deleteBlocks = false } blockInfos := make(map[string]*BlockInfo) - blockNames, err := getBlockNamesForVolume(volumeName, bsDriver) + blockNames, err := getBlockNamesForVolume(bsDriver, volumeName) if err != nil { return err } @@ -840,9 +1202,9 @@ func DeleteDeltaBlockBackup(backupURL string) error { lastBackup := &Backup{} for _, name := range backupNames { log := log.WithField("backup", name) - backup, err := loadBackup(name, volumeName, bsDriver) + backup, err := loadBackup(bsDriver, name, volumeName) if err != nil { - log.WithError(err).Warn("failed to load backup, skip block deletion") + log.WithError(err).Warn("Failed to load backup, skip block deletion") deleteBlocks = false break } @@ -861,7 +1223,7 @@ func DeleteDeltaBlockBackup(backupURL string) error { if updateLastBackup { err := getLatestBackup(backup, lastBackup) if err != nil { - log.WithError(err).Warn("failed to find last backup, skip block deletion") + log.WithError(err).Warn("Failed to find last backup, skip block deletion") deleteBlocks = false break } @@ -872,14 +1234,14 @@ func DeleteDeltaBlockBackup(backupURL string) error { v.LastBackupName = lastBackup.Name v.LastBackupAt = lastBackup.SnapshotCreatedAt } - if err := saveVolume(v, bsDriver); err != nil { + if err := saveVolume(bsDriver, v); err != nil { return err } } // check if there have been new backups created while we where processing prevBackupNames := backupNames - backupNames, err = getBackupNamesForVolume(volumeName, bsDriver) + backupNames, err = getBackupNamesForVolume(bsDriver, volumeName) if err != nil || !util.UnorderedEqual(prevBackupNames, backupNames) { log.Info("Found new backups for volume, skip block deletion") deleteBlocks = false @@ -887,14 +1249,14 @@ func DeleteDeltaBlockBackup(backupURL string) error { // only delete the blocks if it is safe to do so if deleteBlocks { - if err := cleanupBlocks(blockInfos, volumeName, bsDriver); err != nil { + if err := cleanupBlocks(bsDriver, blockInfos, volumeName); err != nil { return err } } return nil } -func cleanupBlocks(blockMap map[string]*BlockInfo, volume string, driver BackupStoreDriver) error { +func cleanupBlocks(driver BackupStoreDriver, blockMap map[string]*BlockInfo, volume string) error { var deletionFailures []string activeBlockCount := int64(0) deletedBlockCount := int64(0) @@ -904,7 +1266,7 @@ func cleanupBlocks(blockMap map[string]*BlockInfo, volume string, driver BackupS deletionFailures = append(deletionFailures, blk.checksum) continue } - log.Debugf("deleted block %v for volume %v", blk.checksum, volume) + log.Debugf("Deleted block %v for volume %v", blk.checksum, volume) deletedBlockCount++ } else if isBlockReferenced(blk) && isBlockPresent(blk) { activeBlockCount++ @@ -919,20 +1281,17 @@ func cleanupBlocks(blockMap map[string]*BlockInfo, volume string, driver BackupS log.Debugf("Removed %v unused blocks for volume %v", deletedBlockCount, volume) log.Debug("GC completed") - v, err := loadVolume(volume, driver) + v, err := loadVolume(driver, volume) if err != nil { return err } // update the block count to what we actually have on disk that is in use v.BlockCount = activeBlockCount - if err := saveVolume(v, driver); err != nil { - return err - } - return nil + return saveVolume(driver, v) } -func getBlockNamesForVolume(volumeName string, driver BackupStoreDriver) ([]string, error) { +func getBlockNamesForVolume(driver BackupStoreDriver, volumeName string) ([]string, error) { names := []string{} blockPathBase := getBlockPath(volumeName) lv1Dirs, err := driver.List(blockPathBase) diff --git a/vendor/github.com/longhorn/backupstore/driver.go b/vendor/github.com/longhorn/backupstore/driver.go index d3079eaf7..443a13979 100644 --- a/vendor/github.com/longhorn/backupstore/driver.go +++ b/vendor/github.com/longhorn/backupstore/driver.go @@ -35,6 +35,10 @@ var ( log = logrus.WithFields(logrus.Fields{"pkg": "backupstore"}) ) +func GetLog() logrus.FieldLogger { + return log +} + func generateError(fields logrus.Fields, format string, v ...interface{}) error { return ErrorWithFields("backupstore", fields, format, v...) } @@ -61,14 +65,14 @@ func unregisterDriver(kind string) error { func GetBackupStoreDriver(destURL string) (BackupStoreDriver, error) { if destURL == "" { - return nil, fmt.Errorf("Destination URL hasn't been specified") + return nil, fmt.Errorf("destination URL hasn't been specified") } u, err := url.Parse(destURL) if err != nil { return nil, err } if _, exists := initializers[u.Scheme]; !exists { - return nil, fmt.Errorf("Driver %v is not supported!", u.Scheme) + return nil, fmt.Errorf("driver %v is not supported", u.Scheme) } return initializers[u.Scheme](destURL) } diff --git a/vendor/github.com/longhorn/backupstore/fsops/fsops.go b/vendor/github.com/longhorn/backupstore/fsops/fsops.go index fafe6d540..157ff9db1 100644 --- a/vendor/github.com/longhorn/backupstore/fsops/fsops.go +++ b/vendor/github.com/longhorn/backupstore/fsops/fsops.go @@ -29,10 +29,7 @@ func NewFileSystemOperator(ops FileSystemOps) *FileSystemOperator { } func (f *FileSystemOperator) preparePath(file string) error { - if err := os.MkdirAll(filepath.Dir(f.LocalPath(file)), os.ModeDir|0700); err != nil { - return err - } - return nil + return os.MkdirAll(filepath.Dir(f.LocalPath(file)), os.ModeDir|0700) } func (f *FileSystemOperator) FileSize(filePath string) int64 { @@ -140,16 +137,10 @@ func (f *FileSystemOperator) Upload(src, dst string) error { return err } _, err = util.Execute("mv", []string{f.LocalPath(tmpDst), f.LocalPath(dst)}) - if err != nil { - return err - } - return nil + return err } func (f *FileSystemOperator) Download(src, dst string) error { _, err := util.Execute("cp", []string{f.LocalPath(src), dst}) - if err != nil { - return err - } - return nil + return err } diff --git a/vendor/github.com/longhorn/backupstore/go.mod b/vendor/github.com/longhorn/backupstore/go.mod index 79f8d8cc9..f0021811e 100644 --- a/vendor/github.com/longhorn/backupstore/go.mod +++ b/vendor/github.com/longhorn/backupstore/go.mod @@ -6,11 +6,13 @@ require ( github.com/aws/aws-sdk-go v1.34.2 github.com/google/uuid v1.3.0 github.com/honestbee/jobq v1.0.2 + github.com/pierrec/lz4/v4 v4.1.17 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.3.0 github.com/spf13/afero v1.5.1 github.com/stretchr/testify v1.7.0 github.com/urfave/cli v1.14.0 + golang.org/x/net v0.0.0-20200202094626-16171245cfb2 gopkg.in/check.v1 v1.0.0-20160105164936-4f90aeace3a2 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) diff --git a/vendor/github.com/longhorn/backupstore/go.sum b/vendor/github.com/longhorn/backupstore/go.sum index 27e7cede9..57beb2cb0 100644 --- a/vendor/github.com/longhorn/backupstore/go.sum +++ b/vendor/github.com/longhorn/backupstore/go.sum @@ -13,6 +13,8 @@ github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeY github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= +github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/vendor/github.com/longhorn/backupstore/http/client.go b/vendor/github.com/longhorn/backupstore/http/client.go index f2300e695..85fcdd5f0 100644 --- a/vendor/github.com/longhorn/backupstore/http/client.go +++ b/vendor/github.com/longhorn/backupstore/http/client.go @@ -5,6 +5,9 @@ import ( "crypto/x509" "fmt" "net/http" + "net/url" + + "golang.org/x/net/http/httpproxy" ) func getSystemCerts() *x509.CertPool { @@ -43,6 +46,9 @@ func GetClient(insecure bool, customCerts []byte) (*http.Client, error) { InsecureSkipVerify: insecure, RootCAs: certs, } + customTransport.Proxy = func(request *http.Request) (*url.URL, error) { + return httpproxy.FromEnvironment().ProxyFunc()(request.URL) + } client := &http.Client{Transport: customTransport} return client, nil } diff --git a/vendor/github.com/longhorn/backupstore/inspect.go b/vendor/github.com/longhorn/backupstore/inspect.go index 080b691f5..7c196fbad 100644 --- a/vendor/github.com/longhorn/backupstore/inspect.go +++ b/vendor/github.com/longhorn/backupstore/inspect.go @@ -19,7 +19,7 @@ func InspectVolume(volumeURL string) (*VolumeInfo, error) { return nil, err } - volume, err := loadVolume(volumeName, driver) + volume, err := loadVolume(driver, volumeName) if err != nil { return nil, err } @@ -38,12 +38,12 @@ func InspectBackup(backupURL string) (*BackupInfo, error) { return nil, err } - volume, err := loadVolume(volumeName, driver) + volume, err := loadVolume(driver, volumeName) if err != nil { return nil, err } - backup, err := loadBackup(backupName, volumeName, driver) + backup, err := loadBackup(driver, backupName, volumeName) if err != nil { log.WithFields(logrus.Fields{ LogFieldReason: LogReasonFallback, @@ -79,14 +79,15 @@ func fillVolumeInfo(volume *Volume) *VolumeInfo { func fillBackupInfo(backup *Backup, destURL string) *BackupInfo { return &BackupInfo{ - Name: backup.Name, - URL: EncodeBackupURL(backup.Name, backup.VolumeName, destURL), - SnapshotName: backup.SnapshotName, - SnapshotCreated: backup.SnapshotCreatedAt, - Created: backup.CreatedTime, - Size: backup.Size, - Labels: backup.Labels, - IsIncremental: backup.IsIncremental, + Name: backup.Name, + URL: EncodeBackupURL(backup.Name, backup.VolumeName, destURL), + SnapshotName: backup.SnapshotName, + SnapshotCreated: backup.SnapshotCreatedAt, + Created: backup.CreatedTime, + Size: backup.Size, + Labels: backup.Labels, + IsIncremental: backup.IsIncremental, + CompressionMethod: backup.CompressionMethod, } } diff --git a/vendor/github.com/longhorn/backupstore/list.go b/vendor/github.com/longhorn/backupstore/list.go index dde1babf2..0d2b20737 100644 --- a/vendor/github.com/longhorn/backupstore/list.go +++ b/vendor/github.com/longhorn/backupstore/list.go @@ -32,14 +32,15 @@ type VolumeInfo struct { } type BackupInfo struct { - Name string - URL string - SnapshotName string - SnapshotCreated string - Created string - Size int64 `json:",string"` - Labels map[string]string - IsIncremental bool + Name string + URL string + SnapshotName string + SnapshotCreated string + Created string + Size int64 `json:",string"` + Labels map[string]string + IsIncremental bool + CompressionMethod string `json:",omitempty"` VolumeName string `json:",omitempty"` VolumeSize int64 `json:",string,omitempty"` @@ -49,17 +50,17 @@ type BackupInfo struct { Messages map[MessageType]string } -func addListVolume(volumeName string, driver BackupStoreDriver, volumeOnly bool) (*VolumeInfo, error) { +func addListVolume(driver BackupStoreDriver, volumeName string, volumeOnly bool) (*VolumeInfo, error) { if volumeName == "" { - return nil, fmt.Errorf("Invalid empty volume Name") + return nil, fmt.Errorf("invalid empty volume Name") } if !util.ValidateName(volumeName) { - return nil, fmt.Errorf("Invalid volume name %v", volumeName) + return nil, fmt.Errorf("invalid volume name %v", volumeName) } volumeInfo := &VolumeInfo{Messages: make(map[MessageType]string)} - if !volumeExists(volumeName, driver) { + if !volumeExists(driver, volumeName) { // If the backup volume folder exist but volume.cfg not exist // save the error in Messages field volumeInfo.Messages[MessageTypeError] = fmt.Sprintf("cannot find %v in backupstore", getVolumeFilePath(volumeName)) @@ -71,7 +72,7 @@ func addListVolume(volumeName string, driver BackupStoreDriver, volumeOnly bool) } // try to find all backups for this volume - backupNames, err := getBackupNamesForVolume(volumeName, driver) + backupNames, err := getBackupNamesForVolume(driver, volumeName) if err != nil { volumeInfo.Messages[MessageTypeError] = err.Error() return volumeInfo, nil @@ -108,7 +109,7 @@ func List(volumeName, destURL string, volumeOnly bool) (map[string]*VolumeInfo, var errs []string for _, volumeName := range volumeNames { - volumeInfo, err := addListVolume(volumeName, driver, volumeOnly) + volumeInfo, err := addListVolume(driver, volumeName, volumeOnly) if err != nil { errs = append(errs, err.Error()) continue diff --git a/vendor/github.com/longhorn/backupstore/lock.go b/vendor/github.com/longhorn/backupstore/lock.go index e5a708856..4dcf03629 100644 --- a/vendor/github.com/longhorn/backupstore/lock.go +++ b/vendor/github.com/longhorn/backupstore/lock.go @@ -62,7 +62,7 @@ func (lock *FileLock) canAcquire() bool { canAcquire := true locks := getLocksForVolume(lock.volume, lock.driver) file := getLockFilePath(lock.volume, lock.Name) - log.WithField("lock", lock).Debugf("trying to acquire lock %v", file) + log.WithField("lock", lock).Debugf("Trying to acquire lock %v", file) log.Debugf("backupstore volume %v contains locks %v", lock.volume, locks) for _, serverLock := range locks { @@ -164,7 +164,7 @@ func (lock *FileLock) Unlock() error { func loadLock(volumeName string, name string, driver BackupStoreDriver) (*FileLock, error) { lock := &FileLock{} file := getLockFilePath(volumeName, name) - if err := loadConfigInBackupStore(file, driver, lock); err != nil { + if err := LoadConfigInBackupStore(driver, file, lock); err != nil { return nil, err } lock.serverTime = driver.FileTime(file) @@ -183,7 +183,7 @@ func removeLock(lock *FileLock) error { func saveLock(lock *FileLock) error { file := getLockFilePath(lock.volume, lock.Name) - if err := saveConfigInBackupStore(file, lock.driver, lock); err != nil { + if err := SaveConfigInBackupStore(lock.driver, file, lock); err != nil { return err } lock.serverTime = lock.driver.FileTime(file) @@ -226,7 +226,7 @@ func getLocksForVolume(volumeName string, driver BackupStoreDriver) []*FileLock lock, err := loadLock(volumeName, name, driver) if err != nil { file := getLockFilePath(volumeName, name) - log.Warnf("failed to load lock %v on backupstore reason %v", file, err) + log.WithError(err).Warnf("Failed to load lock %v on backupstore", file) continue } locks = append(locks, lock) diff --git a/vendor/github.com/longhorn/backupstore/logging/logging.go b/vendor/github.com/longhorn/backupstore/logging/logging.go index e57d9e96e..c040e98d9 100644 --- a/vendor/github.com/longhorn/backupstore/logging/logging.go +++ b/vendor/github.com/longhorn/backupstore/logging/logging.go @@ -18,6 +18,7 @@ const ( LogFieldLastSnapshot = "last_snapshot" LogEventBackupURL = "backup_url" LogFieldDestURL = "dest_url" + LogFieldSourceURL = "source_url" LogFieldKind = "kind" LogFieldFilepath = "filepath" diff --git a/vendor/github.com/longhorn/backupstore/nfs/nfs.go b/vendor/github.com/longhorn/backupstore/nfs/nfs.go index 15af7aea1..ef39d30a4 100644 --- a/vendor/github.com/longhorn/backupstore/nfs/nfs.go +++ b/vendor/github.com/longhorn/backupstore/nfs/nfs.go @@ -7,16 +7,18 @@ import ( "strings" "time" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/longhorn/backupstore" "github.com/longhorn/backupstore/fsops" "github.com/longhorn/backupstore/util" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) var ( - log = logrus.WithFields(logrus.Fields{"pkg": "nfs"}) - MinorVersions = []string{"4.2", "4.1", "4.0"} + log = logrus.WithFields(logrus.Fields{"pkg": "nfs"}) + MinorVersions = []string{"4.2", "4.1", "4.0"} + defaultTimeout = 5 * time.Second ) type BackupStoreDriver struct { @@ -59,24 +61,24 @@ func initFunc(destURL string) (backupstore.BackupStoreDriver, error) { return nil, fmt.Errorf("NFS path must follow: nfs://server:/path/ format") } if u.Path == "" { - return nil, fmt.Errorf("Cannot find nfs path") + return nil, fmt.Errorf("cannot find nfs path") } b.serverPath = u.Host + u.Path b.mountDir = filepath.Join(MountDir, strings.TrimRight(strings.Replace(u.Host, ".", "_", -1), ":"), u.Path) - if _, err = util.ExecuteWithCustomTimeout("mkdir", []string{"-m", "700", "-p", b.mountDir}, 3*time.Second); err != nil { - return nil, fmt.Errorf("Cannot create mount directory %v for NFS server: %v", b.mountDir, err) + if _, err = util.ExecuteWithCustomTimeout("mkdir", []string{"-m", "700", "-p", b.mountDir}, defaultTimeout); err != nil { + return nil, errors.Wrapf(err, "cannot create mount directory %v for NFS server", b.mountDir) } if err := b.mount(); err != nil { - return nil, fmt.Errorf("Cannot mount nfs %v: %v", b.serverPath, err) + return nil, errors.Wrapf(err, "cannot mount nfs %v", b.serverPath) } if _, err := b.List(""); err != nil { return nil, fmt.Errorf("NFS path %v doesn't exist or is not a directory", b.serverPath) } b.destURL = KIND + "://" + b.serverPath - log.Debugf("Loaded driver for %v", b.destURL) + log.Infof("Loaded driver for %v", b.destURL) return b, nil } @@ -88,8 +90,8 @@ func (b *BackupStoreDriver) mount() (err error) { retErr := errors.New("Cannot mount using NFSv4") for _, version := range MinorVersions { - log.Debugf("attempting mount for nfs path %v with nfsvers %v", b.serverPath, version) - _, err = util.Execute("mount", []string{"-t", "nfs4", "-o", fmt.Sprintf("nfsvers=%v", version), "-o", "actimeo=1", b.serverPath, b.mountDir}) + log.Debugf("Attempting mount for nfs path %v with nfsvers %v", b.serverPath, version) + _, err = util.ExecuteWithCustomTimeout("mount", []string{"-t", "nfs4", "-o", fmt.Sprintf("nfsvers=%v", version), "-o", "actimeo=1", b.serverPath, b.mountDir}, defaultTimeout) if err == nil { return nil } diff --git a/vendor/github.com/longhorn/backupstore/s3/s3.go b/vendor/github.com/longhorn/backupstore/s3/s3.go index aef885092..a57648f7a 100644 --- a/vendor/github.com/longhorn/backupstore/s3/s3.go +++ b/vendor/github.com/longhorn/backupstore/s3/s3.go @@ -10,9 +10,10 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/sirupsen/logrus" + "github.com/longhorn/backupstore" "github.com/longhorn/backupstore/http" - "github.com/sirupsen/logrus" ) var ( @@ -60,13 +61,12 @@ func initFunc(destURL string) (backupstore.BackupStoreDriver, error) { } // add custom ca to http client that is used by s3 service - if customCerts := getCustomCerts(); customCerts != nil { - client, err := http.GetClientWithCustomCerts(customCerts) - if err != nil { - return nil, err - } - b.service.Client = client + customCerts := getCustomCerts() + client, err := http.GetClientWithCustomCerts(customCerts) + if err != nil { + return nil, err } + b.service.Client = client //Leading '/' can cause mystery problems for s3 b.path = strings.TrimLeft(b.path, "/") @@ -82,7 +82,7 @@ func initFunc(destURL string) (backupstore.BackupStoreDriver, error) { } b.destURL += "/" + b.path - log.Debugf("Loaded driver for %v", b.destURL) + log.Infof("Loaded driver for %v", b.destURL) return b, nil } @@ -114,7 +114,7 @@ func (s *BackupStoreDriver) List(listPath string) ([]string, error) { path := s.updatePath(listPath) + "/" contents, prefixes, err := s.service.ListObjects(path, "/") if err != nil { - log.Error("Fail to list s3: ", err) + log.WithError(err).Error("Failed to list s3") return result, err } @@ -195,11 +195,17 @@ func (s *BackupStoreDriver) Download(src, dst string) error { if _, err := os.Stat(dst); err != nil { os.Remove(dst) } + + if err := os.MkdirAll(filepath.Dir(dst), os.ModeDir|0700); err != nil { + return err + } + f, err := os.Create(dst) if err != nil { return err } defer f.Close() + path := s.updatePath(src) rc, err := s.service.GetObject(path) if err != nil { @@ -208,8 +214,5 @@ func (s *BackupStoreDriver) Download(src, dst string) error { defer rc.Close() _, err = io.Copy(f, rc) - if err != nil { - return err - } - return nil + return err } diff --git a/vendor/github.com/longhorn/backupstore/s3/s3_service.go b/vendor/github.com/longhorn/backupstore/s3/s3_service.go index 6c516dcbd..6cee12e7d 100644 --- a/vendor/github.com/longhorn/backupstore/s3/s3_service.go +++ b/vendor/github.com/longhorn/backupstore/s3/s3_service.go @@ -10,6 +10,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" + "github.com/pkg/errors" ) type Service struct { @@ -163,12 +164,12 @@ func (s *Service) DeleteObjects(key string) error { objects, _, err := s.ListObjects(key, "") if err != nil { - return fmt.Errorf("failed to list objects with prefix %v before removing them error: %v", key, err) + return errors.Wrapf(err, "failed to list objects with prefix %v before removing them", key) } svc, err := s.New() if err != nil { - return fmt.Errorf("failed to get a new s3 client instance before removing objects: %v", err) + return errors.Wrap(err, "failed to get a new s3 client instance before removing objects") } defer s.Close() @@ -180,7 +181,7 @@ func (s *Service) DeleteObjects(key string) error { }) if err != nil { - log.Errorf("failed to delete object: %v response: %v error: %v", + log.Errorf("Failed to delete object: %v response: %v error: %v", aws.StringValue(object.Key), resp.String(), parseAwsError(err)) deletionFailures = append(deletionFailures, aws.StringValue(object.Key)) } diff --git a/vendor/github.com/longhorn/backupstore/singlefile.go b/vendor/github.com/longhorn/backupstore/singlefile.go index 24af97cb9..82b64ee5b 100644 --- a/vendor/github.com/longhorn/backupstore/singlefile.go +++ b/vendor/github.com/longhorn/backupstore/singlefile.go @@ -1,13 +1,13 @@ package backupstore import ( - "fmt" "path/filepath" - "github.com/longhorn/backupstore/util" + "github.com/pkg/errors" "github.com/sirupsen/logrus" . "github.com/longhorn/backupstore/logging" + "github.com/longhorn/backupstore/util" ) const ( @@ -29,11 +29,11 @@ func CreateSingleFileBackup(volume *Volume, snapshot *Snapshot, filePath, destUR return "", err } - if err := addVolume(volume, driver); err != nil { + if err := addVolume(driver, volume); err != nil { return "", err } - volume, err = loadVolume(volume.Name, driver) + volume, err = loadVolume(driver, volume.Name) if err != nil { return "", err } @@ -51,6 +51,7 @@ func CreateSingleFileBackup(volume *Volume, snapshot *Snapshot, filePath, destUR VolumeName: volume.Name, SnapshotName: snapshot.Name, SnapshotCreatedAt: snapshot.CreatedTime, + CompressionMethod: volume.CompressionMethod, } backup.SingleFile.FilePath = getSingleFileBackupFilePath(backup) @@ -59,7 +60,7 @@ func CreateSingleFileBackup(volume *Volume, snapshot *Snapshot, filePath, destUR } backup.CreatedTime = util.Now() - if err := saveBackup(backup, driver); err != nil { + if err := saveBackup(driver, backup); err != nil { return "", err } @@ -84,14 +85,14 @@ func RestoreSingleFileBackup(backupURL, path string) (string, error) { return "", err } - if _, err := loadVolume(srcVolumeName, driver); err != nil { + if _, err := loadVolume(driver, srcVolumeName); err != nil { return "", generateError(logrus.Fields{ LogFieldVolume: srcVolumeName, LogEventBackupURL: backupURL, }, "Volume doesn't exist in backupstore: %v", err) } - backup, err := loadBackup(srcBackupName, srcVolumeName, driver) + backup, err := loadBackup(driver, srcBackupName, srcVolumeName) if err != nil { return "", err } @@ -115,12 +116,12 @@ func DeleteSingleFileBackup(backupURL string) error { return err } - _, err = loadVolume(volumeName, driver) + _, err = loadVolume(driver, volumeName) if err != nil { - return fmt.Errorf("Cannot find volume %v in backupstore due to: %v", volumeName, err) + return errors.Wrapf(err, "cannot find volume %v in backupstore", volumeName) } - backup, err := loadBackup(backupName, volumeName, driver) + backup, err := loadBackup(driver, backupName, volumeName) if err != nil { return err } @@ -129,9 +130,5 @@ func DeleteSingleFileBackup(backupURL string) error { return err } - if err := removeBackup(backup, driver); err != nil { - return err - } - - return nil + return removeBackup(backup, driver) } diff --git a/vendor/github.com/longhorn/backupstore/util/util.go b/vendor/github.com/longhorn/backupstore/util/util.go index be23b864b..b6ed34a78 100644 --- a/vendor/github.com/longhorn/backupstore/util/util.go +++ b/vendor/github.com/longhorn/backupstore/util/util.go @@ -4,17 +4,20 @@ import ( "bytes" "compress/gzip" "context" + "crypto/sha256" "crypto/sha512" "encoding/hex" "fmt" "io" "io/ioutil" + "os" "os/exec" "regexp" "strings" "time" "github.com/google/uuid" + lz4 "github.com/pierrec/lz4/v4" "github.com/sirupsen/logrus" ) @@ -26,6 +29,14 @@ var ( cmdTimeout = time.Minute // one minute by default ) +// NopCloser wraps an io.Witer as io.WriteCloser +// with noop Close +type NopCloser struct { + io.Writer +} + +func (NopCloser) Close() error { return nil } + func GenerateName(prefix string) string { suffix := strings.Replace(NewUUID(), "-", "", -1) return prefix + "-" + suffix[:16] @@ -41,19 +52,43 @@ func GetChecksum(data []byte) string { return checksum } -func CompressData(data []byte) (io.ReadSeeker, error) { - var b bytes.Buffer - w := gzip.NewWriter(&b) +func GetFileChecksum(filePath string) (string, error) { + f, err := os.Open(filePath) + if err != nil { + return "", err + } + defer f.Close() + + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return "", err + } + + return hex.EncodeToString(h.Sum(nil)), nil +} + +func CompressData(method string, data []byte) (io.ReadSeeker, error) { + if method == "none" { + return bytes.NewReader(data), nil + } + + var buffer bytes.Buffer + + w, err := newCompressionWriter(method, &buffer) + if err != nil { + return nil, err + } + if _, err := w.Write(data); err != nil { w.Close() return nil, err } w.Close() - return bytes.NewReader(b.Bytes()), nil + return bytes.NewReader(buffer.Bytes()), nil } -func DecompressAndVerify(src io.Reader, checksum string) (io.Reader, error) { - r, err := gzip.NewReader(src) +func DecompressAndVerify(method string, src io.Reader, checksum string) (io.Reader, error) { + r, err := newDecompressionReader(method, src) if err != nil { return nil, err } @@ -68,6 +103,30 @@ func DecompressAndVerify(src io.Reader, checksum string) (io.Reader, error) { return bytes.NewReader(block), nil } +func newCompressionWriter(method string, buffer io.Writer) (io.WriteCloser, error) { + switch method { + case "gzip": + return gzip.NewWriter(buffer), nil + case "lz4": + return lz4.NewWriter(buffer), nil + default: + return nil, fmt.Errorf("unsupported compression method: %v", method) + } +} + +func newDecompressionReader(method string, r io.Reader) (io.ReadCloser, error) { + switch method { + case "none": + return ioutil.NopCloser(r), nil + case "gzip": + return gzip.NewReader(r) + case "lz4": + return ioutil.NopCloser(lz4.NewReader(r)), nil + default: + return nil, fmt.Errorf("unsupported decompression method: %v", method) + } +} + func Now() string { return time.Now().UTC().Format(time.RFC3339) } @@ -154,11 +213,11 @@ func execute(ctx context.Context, binary string, args []string) (string, error) case <-done: break case <-ctx.Done(): - return "", fmt.Errorf("Timeout executing: %v %v, output %v, error %v", binary, args, string(output), err) + return "", fmt.Errorf("timeout executing: %v %v, output %v, error %v", binary, args, string(output), err) } if err != nil { - return "", fmt.Errorf("Failed to execute: %v %v, output %v, error %v", binary, args, string(output), err) + return "", fmt.Errorf("failed to execute: %v %v, output %v, error %v", binary, args, string(output), err) } return string(output), nil diff --git a/vendor/github.com/longhorn/backupstore/vfs/vfs.go b/vendor/github.com/longhorn/backupstore/vfs/vfs.go index 74d207ddd..ab1758ab2 100644 --- a/vendor/github.com/longhorn/backupstore/vfs/vfs.go +++ b/vendor/github.com/longhorn/backupstore/vfs/vfs.go @@ -53,14 +53,14 @@ func initFunc(destURL string) (backupstore.BackupStoreDriver, error) { b.path = u.Path if b.path == "" { - return nil, fmt.Errorf("Cannot find vfs path") + return nil, fmt.Errorf("cannot find vfs path") } if _, err := b.List(""); err != nil { return nil, fmt.Errorf("VFS path %v doesn't exist or is not a directory", b.path) } b.destURL = KIND + "://" + b.path - log.Debugf("Loaded driver for %v", b.destURL) + log.Infof("Loaded driver for %v", b.destURL) return b, nil } diff --git a/vendor/github.com/pierrec/lz4/v4/.gitignore b/vendor/github.com/pierrec/lz4/v4/.gitignore new file mode 100644 index 000000000..5d7e88de0 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/.gitignore @@ -0,0 +1,36 @@ +# Created by https://www.gitignore.io/api/macos + +### macOS ### +*.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +# End of https://www.gitignore.io/api/macos + +cmd/*/*exe +.idea + +fuzz/*.zip diff --git a/vendor/github.com/pierrec/lz4/v4/LICENSE b/vendor/github.com/pierrec/lz4/v4/LICENSE new file mode 100644 index 000000000..bd899d835 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2015, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/pierrec/lz4/v4/README.md b/vendor/github.com/pierrec/lz4/v4/README.md new file mode 100644 index 000000000..4629c9d0e --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/README.md @@ -0,0 +1,92 @@ +# lz4 : LZ4 compression in pure Go + +[![Go Reference](https://pkg.go.dev/badge/github.com/pierrec/lz4/v4.svg)](https://pkg.go.dev/github.com/pierrec/lz4/v4) +[![CI](https://github.com/pierrec/lz4/workflows/ci/badge.svg)](https://github.com/pierrec/lz4/actions) +[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4) +[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags) + +## Overview + +This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks. +The implementation is based on the reference C [one](https://github.com/lz4/lz4). + +## Install + +Assuming you have the go toolchain installed: + +``` +go get github.com/pierrec/lz4/v4 +``` + +There is a command line interface tool to compress and decompress LZ4 files. + +``` +go install github.com/pierrec/lz4/v4/cmd/lz4c +``` + +Usage + +``` +Usage of lz4c: + -version + print the program version + +Subcommands: +Compress the given files or from stdin to stdout. +compress [arguments] [ ...] + -bc + enable block checksum + -l int + compression level (0=fastest) + -sc + disable stream checksum + -size string + block max size [64K,256K,1M,4M] (default "4M") + +Uncompress the given files or from stdin to stdout. +uncompress [arguments] [ ...] + +``` + + +## Example + +``` +// Compress and uncompress an input string. +s := "hello world" +r := strings.NewReader(s) + +// The pipe will uncompress the data from the writer. +pr, pw := io.Pipe() +zw := lz4.NewWriter(pw) +zr := lz4.NewReader(pr) + +go func() { + // Compress the input string. + _, _ = io.Copy(zw, r) + _ = zw.Close() // Make sure the writer is closed + _ = pw.Close() // Terminate the pipe +}() + +_, _ = io.Copy(os.Stdout, zr) + +// Output: +// hello world +``` + +## Contributing + +Contributions are very welcome for bug fixing, performance improvements...! + +- Open an issue with a proper description +- Send a pull request with appropriate test case(s) + +## Contributors + +Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far! + +Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder. + +Special thanks to [@greatroar](https://github.com/greatroar) for his work on the asm implementations of the decoder for amd64 and arm64. + +Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code. diff --git a/vendor/github.com/pierrec/lz4/v4/go.mod b/vendor/github.com/pierrec/lz4/v4/go.mod new file mode 100644 index 000000000..42229b296 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/go.mod @@ -0,0 +1,3 @@ +module github.com/pierrec/lz4/v4 + +go 1.14 diff --git a/vendor/github.com/pierrec/lz4/v4/go.sum b/vendor/github.com/pierrec/lz4/v4/go.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go new file mode 100644 index 000000000..fec8adb03 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go @@ -0,0 +1,481 @@ +package lz4block + +import ( + "encoding/binary" + "math/bits" + "sync" + + "github.com/pierrec/lz4/v4/internal/lz4errors" +) + +const ( + // The following constants are used to setup the compression algorithm. + minMatch = 4 // the minimum size of the match sequence size (4 bytes) + winSizeLog = 16 // LZ4 64Kb window size limit + winSize = 1 << winSizeLog + winMask = winSize - 1 // 64Kb window of previous data for dependent blocks + + // hashLog determines the size of the hash table used to quickly find a previous match position. + // Its value influences the compression speed and memory usage, the lower the faster, + // but at the expense of the compression ratio. + // 16 seems to be the best compromise for fast compression. + hashLog = 16 + htSize = 1 << hashLog + + mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes. +) + +func recoverBlock(e *error) { + if r := recover(); r != nil && *e == nil { + *e = lz4errors.ErrInvalidSourceShortBuffer + } +} + +// blockHash hashes the lower 6 bytes into a value < htSize. +func blockHash(x uint64) uint32 { + const prime6bytes = 227718039650203 + return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) +} + +func CompressBlockBound(n int) int { + return n + n/255 + 16 +} + +func UncompressBlock(src, dst, dict []byte) (int, error) { + if len(src) == 0 { + return 0, nil + } + if di := decodeBlock(dst, src, dict); di >= 0 { + return di, nil + } + return 0, lz4errors.ErrInvalidSourceShortBuffer +} + +type Compressor struct { + // Offsets are at most 64kiB, so we can store only the lower 16 bits of + // match positions: effectively, an offset from some 64kiB block boundary. + // + // When we retrieve such an offset, we interpret it as relative to the last + // block boundary si &^ 0xffff, or the one before, (si &^ 0xffff) - 0x10000, + // depending on which of these is inside the current window. If a table + // entry was generated more than 64kiB back in the input, we find out by + // inspecting the input stream. + table [htSize]uint16 + + // Bitmap indicating which positions in the table are in use. + // This allows us to quickly reset the table for reuse, + // without having to zero everything. + inUse [htSize / 32]uint32 +} + +// Get returns the position of a presumptive match for the hash h. +// The match may be a false positive due to a hash collision or an old entry. +// If si < winSize, the return value may be negative. +func (c *Compressor) get(h uint32, si int) int { + h &= htSize - 1 + i := 0 + if c.inUse[h/32]&(1<<(h%32)) != 0 { + i = int(c.table[h]) + } + i += si &^ winMask + if i >= si { + // Try previous 64kiB block (negative when in first block). + i -= winSize + } + return i +} + +func (c *Compressor) put(h uint32, si int) { + h &= htSize - 1 + c.table[h] = uint16(si) + c.inUse[h/32] |= 1 << (h % 32) +} + +func (c *Compressor) reset() { c.inUse = [htSize / 32]uint32{} } + +var compressorPool = sync.Pool{New: func() interface{} { return new(Compressor) }} + +func CompressBlock(src, dst []byte) (int, error) { + c := compressorPool.Get().(*Compressor) + n, err := c.CompressBlock(src, dst) + compressorPool.Put(c) + return n, err +} + +func (c *Compressor) CompressBlock(src, dst []byte) (int, error) { + // Zero out reused table to avoid non-deterministic output (issue #65). + c.reset() + + // Return 0, nil only if the destination buffer size is < CompressBlockBound. + isNotCompressible := len(dst) < CompressBlockBound(len(src)) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compression. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + + // si: Current position of the search. + // anchor: Position of the current literals. + var si, di, anchor int + sn := len(src) - mfLimit + if sn <= 0 { + goto lastLiterals + } + + // Fast scan strategy: the hash table only stores the last 4 bytes sequences. + for si < sn { + // Hash the next 6 bytes (sequence)... + match := binary.LittleEndian.Uint64(src[si:]) + h := blockHash(match) + h2 := blockHash(match >> 8) + + // We check a match at s, s+1 and s+2 and pick the first one we get. + // Checking 3 only requires us to load the source one. + ref := c.get(h, si) + ref2 := c.get(h2, si+1) + c.put(h, si) + c.put(h2, si+1) + + offset := si - ref + + if offset <= 0 || offset >= winSize || uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { + // No match. Start calculating another hash. + // The processor can usually do this out-of-order. + h = blockHash(match >> 16) + ref3 := c.get(h, si+2) + + // Check the second match at si+1 + si += 1 + offset = si - ref2 + + if offset <= 0 || offset >= winSize || uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) { + // No match. Check the third match at si+2 + si += 1 + offset = si - ref3 + c.put(h, si) + + if offset <= 0 || offset >= winSize || uint32(match>>16) != binary.LittleEndian.Uint32(src[ref3:]) { + // Skip one extra byte (at si+3) before we check 3 matches again. + si += 2 + (si-anchor)>>adaptSkipLog + continue + } + } + } + + // Match found. + lLen := si - anchor // Literal length. + // We already matched 4 bytes. + mLen := 4 + + // Extend backwards if we can, reducing literals. + tOff := si - offset - 1 + for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] { + si-- + tOff-- + lLen-- + mLen++ + } + + // Add the match length, so we continue search at the end. + // Use mLen to store the offset base. + si, mLen = si+mLen, si+minMatch + + // Find the longest match by looking by batches of 8 bytes. + for si+8 <= sn { + x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) + if x == 0 { + si += 8 + } else { + // Stop is first non-zero byte. + si += bits.TrailingZeros64(x) >> 3 + break + } + } + + mLen = si - mLen + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // Encode literals length. + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + di++ + l := lLen - 0xF + for ; l >= 0xFF && di < len(dst); l -= 0xFF { + dst[di] = 0xFF + di++ + } + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + dst[di] = byte(l) + } + di++ + + // Literals. + if di+lLen > len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) + di += lLen + 2 + anchor = si + + // Encode offset. + if di > len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // Encode match length part 2. + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF && di < len(dst); mLen -= 0xFF { + dst[di] = 0xFF + di++ + } + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + dst[di] = byte(mLen) + di++ + } + // Check if we can load next values. + if si >= sn { + break + } + // Hash match end-2 + h = blockHash(binary.LittleEndian.Uint64(src[si-2:])) + c.put(h, si-2) + } + +lastLiterals: + if isNotCompressible && anchor == 0 { + // Incompressible. + return 0, nil + } + + // Last literals. + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + di++ + for lLen -= 0xF; lLen >= 0xFF && di < len(dst); lLen -= 0xFF { + dst[di] = 0xFF + di++ + } + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + dst[di] = byte(lLen) + } + di++ + + // Write the last literals. + if isNotCompressible && di >= anchor { + // Incompressible. + return 0, nil + } + if di+len(src)-anchor > len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) + return di, nil +} + +// blockHash hashes 4 bytes into a value < winSize. +func blockHashHC(x uint32) uint32 { + const hasher uint32 = 2654435761 // Knuth multiplicative hash. + return x * hasher >> (32 - winSizeLog) +} + +type CompressorHC struct { + // hashTable: stores the last position found for a given hash + // chainTable: stores previous positions for a given hash + hashTable, chainTable [htSize]int + needsReset bool +} + +var compressorHCPool = sync.Pool{New: func() interface{} { return new(CompressorHC) }} + +func CompressBlockHC(src, dst []byte, depth CompressionLevel) (int, error) { + c := compressorHCPool.Get().(*CompressorHC) + n, err := c.CompressBlock(src, dst, depth) + compressorHCPool.Put(c) + return n, err +} + +func (c *CompressorHC) CompressBlock(src, dst []byte, depth CompressionLevel) (_ int, err error) { + if c.needsReset { + // Zero out reused table to avoid non-deterministic output (issue #65). + c.hashTable = [htSize]int{} + c.chainTable = [htSize]int{} + } + c.needsReset = true // Only false on first call. + + defer recoverBlock(&err) + + // Return 0, nil only if the destination buffer size is < CompressBlockBound. + isNotCompressible := len(dst) < CompressBlockBound(len(src)) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compression. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + + var si, di, anchor int + sn := len(src) - mfLimit + if sn <= 0 { + goto lastLiterals + } + + if depth == 0 { + depth = winSize + } + + for si < sn { + // Hash the next 4 bytes (sequence). + match := binary.LittleEndian.Uint32(src[si:]) + h := blockHashHC(match) + + // Follow the chain until out of window and give the longest match. + mLen := 0 + offset := 0 + for next, try := c.hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next, try = c.chainTable[next&winMask], try-1 { + // The first (mLen==0) or next byte (mLen>=minMatch) at current match length + // must match to improve on the match length. + if src[next+mLen] != src[si+mLen] { + continue + } + ml := 0 + // Compare the current position with a previous with the same hash. + for ml < sn-si { + x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:]) + if x == 0 { + ml += 8 + } else { + // Stop is first non-zero byte. + ml += bits.TrailingZeros64(x) >> 3 + break + } + } + if ml < minMatch || ml <= mLen { + // Match too small (>adaptSkipLog + continue + } + + // Match found. + // Update hash/chain tables with overlapping bytes: + // si already hashed, add everything from si+1 up to the match length. + winStart := si + 1 + if ws := si + mLen - winSize; ws > winStart { + winStart = ws + } + for si, ml := winStart, si+mLen; si < ml; { + match >>= 8 + match |= uint32(src[si+3]) << 24 + h := blockHashHC(match) + c.chainTable[si&winMask] = c.hashTable[h] + c.hashTable[h] = si + si++ + } + + lLen := si - anchor + si += mLen + mLen -= minMatch // Match length does not include minMatch. + + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // Encode literals length. + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + di++ + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(l) + } + di++ + + // Literals. + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) + di += lLen + anchor = si + + // Encode offset. + di += 2 + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // Encode match length part 2. + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(mLen) + di++ + } + } + + if isNotCompressible && anchor == 0 { + // Incompressible. + return 0, nil + } + + // Last literals. +lastLiterals: + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + di++ + lLen -= 0xF + for ; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(lLen) + } + di++ + + // Write the last literals. + if isNotCompressible && di >= anchor { + // Incompressible. + return 0, nil + } + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) + return di, nil +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go new file mode 100644 index 000000000..a1bfa99e4 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go @@ -0,0 +1,90 @@ +// Package lz4block provides LZ4 BlockSize types and pools of buffers. +package lz4block + +import "sync" + +const ( + Block64Kb uint32 = 1 << (16 + iota*2) + Block256Kb + Block1Mb + Block4Mb +) + +// In legacy mode all blocks are compressed regardless +// of the compressed size: use the bound size. +var Block8Mb = uint32(CompressBlockBound(8 << 20)) + +var ( + BlockPool64K = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }} + BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }} + BlockPool1M = sync.Pool{New: func() interface{} { return make([]byte, Block1Mb) }} + BlockPool4M = sync.Pool{New: func() interface{} { return make([]byte, Block4Mb) }} + BlockPool8M = sync.Pool{New: func() interface{} { return make([]byte, Block8Mb) }} +) + +func Index(b uint32) BlockSizeIndex { + switch b { + case Block64Kb: + return 4 + case Block256Kb: + return 5 + case Block1Mb: + return 6 + case Block4Mb: + return 7 + case Block8Mb: // only valid in legacy mode + return 3 + } + return 0 +} + +func IsValid(b uint32) bool { + return Index(b) > 0 +} + +type BlockSizeIndex uint8 + +func (b BlockSizeIndex) IsValid() bool { + switch b { + case 4, 5, 6, 7: + return true + } + return false +} + +func (b BlockSizeIndex) Get() []byte { + var buf interface{} + switch b { + case 4: + buf = BlockPool64K.Get() + case 5: + buf = BlockPool256K.Get() + case 6: + buf = BlockPool1M.Get() + case 7: + buf = BlockPool4M.Get() + case 3: + buf = BlockPool8M.Get() + } + return buf.([]byte) +} + +func Put(buf []byte) { + // Safeguard: do not allow invalid buffers. + switch c := cap(buf); uint32(c) { + case Block64Kb: + BlockPool64K.Put(buf[:c]) + case Block256Kb: + BlockPool256K.Put(buf[:c]) + case Block1Mb: + BlockPool1M.Put(buf[:c]) + case Block4Mb: + BlockPool4M.Put(buf[:c]) + case Block8Mb: + BlockPool8M.Put(buf[:c]) + } +} + +type CompressionLevel uint32 + +const Fast CompressionLevel = 0 diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s new file mode 100644 index 000000000..1d00133fa --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s @@ -0,0 +1,448 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "go_asm.h" +#include "textflag.h" + +// AX scratch +// BX scratch +// CX literal and match lengths +// DX token, match offset +// +// DI &dst +// SI &src +// R8 &dst + len(dst) +// R9 &src + len(src) +// R11 &dst +// R12 short output end +// R13 short input end +// R14 &dict +// R15 len(dict) + +// func decodeBlock(dst, src, dict []byte) int +TEXT ·decodeBlock(SB), NOSPLIT, $48-80 + MOVQ dst_base+0(FP), DI + MOVQ DI, R11 + MOVQ dst_len+8(FP), R8 + ADDQ DI, R8 + + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R9 + CMPQ R9, $0 + JE err_corrupt + ADDQ SI, R9 + + MOVQ dict_base+48(FP), R14 + MOVQ dict_len+56(FP), R15 + + // shortcut ends + // short output end + MOVQ R8, R12 + SUBQ $32, R12 + // short input end + MOVQ R9, R13 + SUBQ $16, R13 + + XORL CX, CX + +loop: + // token := uint32(src[si]) + MOVBLZX (SI), DX + INCQ SI + + // lit_len = token >> 4 + // if lit_len > 0 + // CX = lit_len + MOVL DX, CX + SHRL $4, CX + + // if lit_len != 0xF + CMPL CX, $0xF + JEQ lit_len_loop + CMPQ DI, R12 + JAE copy_literal + CMPQ SI, R13 + JAE copy_literal + + // copy shortcut + + // A two-stage shortcut for the most common case: + // 1) If the literal length is 0..14, and there is enough space, + // enter the shortcut and copy 16 bytes on behalf of the literals + // (in the fast mode, only 8 bytes can be safely copied this way). + // 2) Further if the match length is 4..18, copy 18 bytes in a similar + // manner; but we ensure that there's enough space in the output for + // those 18 bytes earlier, upon entering the shortcut (in other words, + // there is a combined check for both stages). + + // copy literal + MOVOU (SI), X0 + MOVOU X0, (DI) + ADDQ CX, DI + ADDQ CX, SI + + MOVL DX, CX + ANDL $0xF, CX + + // The second stage: prepare for match copying, decode full info. + // If it doesn't work out, the info won't be wasted. + // offset := uint16(data[:2]) + MOVWLZX (SI), DX + TESTL DX, DX + JE err_corrupt + ADDQ $2, SI + JC err_short_buf + + MOVQ DI, AX + SUBQ DX, AX + JC err_corrupt + CMPQ AX, DI + JA err_short_buf + + // if we can't do the second stage then jump straight to read the + // match length, we already have the offset. + CMPL CX, $0xF + JEQ match_len_loop_pre + CMPL DX, $8 + JLT match_len_loop_pre + CMPQ AX, R11 + JB match_len_loop_pre + + // memcpy(op + 0, match + 0, 8); + MOVQ (AX), BX + MOVQ BX, (DI) + // memcpy(op + 8, match + 8, 8); + MOVQ 8(AX), BX + MOVQ BX, 8(DI) + // memcpy(op +16, match +16, 2); + MOVW 16(AX), BX + MOVW BX, 16(DI) + + LEAQ const_minMatch(DI)(CX*1), DI + + // shortcut complete, load next token + JMP loopcheck + + // Read the rest of the literal length: + // do { BX = src[si++]; lit_len += BX } while (BX == 0xFF). +lit_len_loop: + CMPQ SI, R9 + JAE err_short_buf + + MOVBLZX (SI), BX + INCQ SI + ADDQ BX, CX + + CMPB BX, $0xFF + JE lit_len_loop + +copy_literal: + // bounds check src and dst + MOVQ SI, AX + ADDQ CX, AX + JC err_short_buf + CMPQ AX, R9 + JA err_short_buf + + MOVQ DI, BX + ADDQ CX, BX + JC err_short_buf + CMPQ BX, R8 + JA err_short_buf + + // Copy literals of <=48 bytes through the XMM registers. + CMPQ CX, $48 + JGT memmove_lit + + // if len(dst[di:]) < 48 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $48 + JLT memmove_lit + + // if len(src[si:]) < 48 + MOVQ R9, BX + SUBQ SI, BX + CMPQ BX, $48 + JLT memmove_lit + + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU 32(SI), X2 + MOVOU X0, (DI) + MOVOU X1, 16(DI) + MOVOU X2, 32(DI) + + ADDQ CX, SI + ADDQ CX, DI + + JMP finish_lit_copy + +memmove_lit: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + + // Spill registers. Increment SI, DI now so we don't need to save CX. + ADDQ CX, DI + ADDQ CX, SI + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVL DX, 40(SP) + + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVL 40(SP), DX + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ dict_base+48(FP), R14 + MOVQ dict_len+56(FP), R15 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + +finish_lit_copy: + // CX := mLen + // free up DX to use for offset + MOVL DX, CX + ANDL $0xF, CX + + CMPQ SI, R9 + JAE end + + // offset + // si += 2 + // DX := int(src[si-2]) | int(src[si-1])<<8 + ADDQ $2, SI + JC err_short_buf + CMPQ SI, R9 + JA err_short_buf + MOVWQZX -2(SI), DX + + // 0 offset is invalid + TESTL DX, DX + JEQ err_corrupt + +match_len_loop_pre: + // if mlen != 0xF + CMPB CX, $0xF + JNE copy_match + + // do { BX = src[si++]; mlen += BX } while (BX == 0xFF). +match_len_loop: + CMPQ SI, R9 + JAE err_short_buf + + MOVBLZX (SI), BX + INCQ SI + ADDQ BX, CX + + CMPB BX, $0xFF + JE match_len_loop + +copy_match: + ADDQ $const_minMatch, CX + + // check we have match_len bytes left in dst + // di+match_len < len(dst) + MOVQ DI, AX + ADDQ CX, AX + JC err_short_buf + CMPQ AX, R8 + JA err_short_buf + + // DX = offset + // CX = match_len + // BX = &dst + (di - offset) + MOVQ DI, BX + SUBQ DX, BX + + // check BX is within dst + // if BX < &dst + JC copy_match_from_dict + CMPQ BX, R11 + JBE copy_match_from_dict + + // if offset + match_len < di + LEAQ (BX)(CX*1), AX + CMPQ DI, AX + JA copy_interior_match + + // AX := len(dst[:di]) + // MOVQ DI, AX + // SUBQ R11, AX + + // copy 16 bytes at a time + // if di-offset < 16 copy 16-(di-offset) bytes to di + // then do the remaining + +copy_match_loop: + // for match_len >= 0 + // dst[di] = dst[i] + // di++ + // i++ + MOVB (BX), AX + MOVB AX, (DI) + INCQ DI + INCQ BX + DECQ CX + JNZ copy_match_loop + + JMP loopcheck + +copy_interior_match: + CMPQ CX, $16 + JGT memmove_match + + // if len(dst[di:]) < 16 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $16 + JLT memmove_match + + MOVOU (BX), X0 + MOVOU X0, (DI) + + ADDQ CX, DI + XORL CX, CX + JMP loopcheck + +copy_match_from_dict: + // CX = match_len + // BX = &dst + (di - offset) + + // AX = offset - di = dict_bytes_available => count of bytes potentially covered by the dictionary + MOVQ R11, AX + SUBQ BX, AX + + // BX = len(dict) - dict_bytes_available + MOVQ R15, BX + SUBQ AX, BX + JS err_short_dict + + ADDQ R14, BX + + // if match_len > dict_bytes_available, match fits entirely within external dictionary : just copy + CMPQ CX, AX + JLT memmove_match + + // The match stretches over the dictionary and our block + // 1) copy what comes from the dictionary + // AX = dict_bytes_available = copy_size + // BX = &dict_end - copy_size + // CX = match_len + + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ BX, 8(SP) + MOVQ AX, 16(SP) + // store extra stuff we want to recover + // spill + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // restore registers + MOVQ 16(SP), AX // copy_size + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX // match_len + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 // TODO: make these sensible numbers + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ dict_base+48(FP), R14 + MOVQ dict_len+56(FP), R15 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + + // di+=copy_size + ADDQ AX, DI + + // 2) copy the rest from the current block + // CX = match_len - copy_size = rest_size + SUBQ AX, CX + MOVQ R11, BX + + // check if we have a copy overlap + // AX = &dst + rest_size + MOVQ CX, AX + ADDQ BX, AX + // if &dst + rest_size > di, copy byte by byte + CMPQ AX, DI + + JA copy_match_loop + +memmove_match: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ BX, 8(SP) + MOVQ CX, 16(SP) + + // Spill registers. Increment DI now so we don't need to save CX. + ADDQ CX, DI + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 // TODO: make these sensible numbers + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + MOVQ dict_base+48(FP), R14 + MOVQ dict_len+56(FP), R15 + XORL CX, CX + +loopcheck: + // for si < len(src) + CMPQ SI, R9 + JB loop + +end: + // Remaining length must be zero. + TESTQ CX, CX + JNE err_corrupt + + SUBQ R11, DI + MOVQ DI, ret+72(FP) + RET + +err_corrupt: + MOVQ $-1, ret+72(FP) + RET + +err_short_buf: + MOVQ $-2, ret+72(FP) + RET + +err_short_dict: + MOVQ $-3, ret+72(FP) + RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s new file mode 100644 index 000000000..20b21fcf1 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s @@ -0,0 +1,231 @@ +// +build gc +// +build !noasm + +#include "go_asm.h" +#include "textflag.h" + +// Register allocation. +#define dst R0 +#define dstorig R1 +#define src R2 +#define dstend R3 +#define srcend R4 +#define match R5 // Match address. +#define dictend R6 +#define token R7 +#define len R8 // Literal and match lengths. +#define offset R7 // Match offset; overlaps with token. +#define tmp1 R9 +#define tmp2 R11 +#define tmp3 R12 + +// func decodeBlock(dst, src, dict []byte) int +TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $-4-40 + MOVW dst_base +0(FP), dst + MOVW dst_len +4(FP), dstend + MOVW src_base +12(FP), src + MOVW src_len +16(FP), srcend + + CMP $0, srcend + BEQ shortSrc + + ADD dst, dstend + ADD src, srcend + + MOVW dst, dstorig + +loop: + // Read token. Extract literal length. + MOVBU.P 1(src), token + MOVW token >> 4, len + CMP $15, len + BNE readLitlenDone + +readLitlenLoop: + CMP src, srcend + BEQ shortSrc + MOVBU.P 1(src), tmp1 + ADD.S tmp1, len + BVS shortDst + CMP $255, tmp1 + BEQ readLitlenLoop + +readLitlenDone: + CMP $0, len + BEQ copyLiteralDone + + // Bounds check dst+len and src+len. + ADD.S dst, len, tmp1 + ADD.CC.S src, len, tmp2 + BCS shortSrc + CMP dstend, tmp1 + //BHI shortDst // Uncomment for distinct error codes. + CMP.LS srcend, tmp2 + BHI shortSrc + + // Copy literal. + CMP $4, len + BLO copyLiteralFinish + + // Copy 0-3 bytes until src is aligned. + TST $1, src + MOVBU.NE.P 1(src), tmp1 + MOVB.NE.P tmp1, 1(dst) + SUB.NE $1, len + + TST $2, src + MOVHU.NE.P 2(src), tmp2 + MOVB.NE.P tmp2, 1(dst) + MOVW.NE tmp2 >> 8, tmp1 + MOVB.NE.P tmp1, 1(dst) + SUB.NE $2, len + + B copyLiteralLoopCond + +copyLiteralLoop: + // Aligned load, unaligned write. + MOVW.P 4(src), tmp1 + MOVW tmp1 >> 8, tmp2 + MOVB tmp2, 1(dst) + MOVW tmp1 >> 16, tmp3 + MOVB tmp3, 2(dst) + MOVW tmp1 >> 24, tmp2 + MOVB tmp2, 3(dst) + MOVB.P tmp1, 4(dst) +copyLiteralLoopCond: + // Loop until len-4 < 0. + SUB.S $4, len + BPL copyLiteralLoop + +copyLiteralFinish: + // Copy remaining 0-3 bytes. + // At this point, len may be < 0, but len&3 is still accurate. + TST $1, len + MOVB.NE.P 1(src), tmp3 + MOVB.NE.P tmp3, 1(dst) + TST $2, len + MOVB.NE.P 2(src), tmp1 + MOVB.NE.P tmp1, 2(dst) + MOVB.NE -1(src), tmp2 + MOVB.NE tmp2, -1(dst) + +copyLiteralDone: + // Initial part of match length. + // This frees up the token register for reuse as offset. + AND $15, token, len + + CMP src, srcend + BEQ end + + // Read offset. + ADD.S $2, src + BCS shortSrc + CMP srcend, src + BHI shortSrc + MOVBU -2(src), offset + MOVBU -1(src), tmp1 + ORR.S tmp1 << 8, offset + BEQ corrupt + + // Read rest of match length. + CMP $15, len + BNE readMatchlenDone + +readMatchlenLoop: + CMP src, srcend + BEQ shortSrc + MOVBU.P 1(src), tmp1 + ADD.S tmp1, len + BVS shortDst + CMP $255, tmp1 + BEQ readMatchlenLoop + +readMatchlenDone: + // Bounds check dst+len+minMatch. + ADD.S dst, len, tmp1 + ADD.CC.S $const_minMatch, tmp1 + BCS shortDst + CMP dstend, tmp1 + BHI shortDst + + RSB dst, offset, match + CMP dstorig, match + BGE copyMatch4 + + // match < dstorig means the match starts in the dictionary, + // at len(dict) - offset + (dst - dstorig). + MOVW dict_base+24(FP), match + MOVW dict_len +28(FP), dictend + + ADD $const_minMatch, len + + RSB dst, dstorig, tmp1 + RSB dictend, offset, tmp2 + ADD.S tmp2, tmp1 + BMI shortDict + ADD match, dictend + ADD tmp1, match + +copyDict: + MOVBU.P 1(match), tmp1 + MOVB.P tmp1, 1(dst) + SUB.S $1, len + CMP.NE match, dictend + BNE copyDict + + // If the match extends beyond the dictionary, the rest is at dstorig. + CMP $0, len + BEQ copyMatchDone + MOVW dstorig, match + B copyMatch + + // Copy a regular match. + // Since len+minMatch is at least four, we can do a 4× unrolled + // byte copy loop. Using MOVW instead of four byte loads is faster, + // but to remain portable we'd have to align match first, which is + // too expensive. By alternating loads and stores, we also handle + // the case offset < 4. +copyMatch4: + SUB.S $4, len + MOVBU.P 4(match), tmp1 + MOVB.P tmp1, 4(dst) + MOVBU -3(match), tmp2 + MOVB tmp2, -3(dst) + MOVBU -2(match), tmp3 + MOVB tmp3, -2(dst) + MOVBU -1(match), tmp1 + MOVB tmp1, -1(dst) + BPL copyMatch4 + + // Restore len, which is now negative. + ADD.S $4, len + BEQ copyMatchDone + +copyMatch: + // Finish with a byte-at-a-time copy. + SUB.S $1, len + MOVBU.P 1(match), tmp2 + MOVB.P tmp2, 1(dst) + BNE copyMatch + +copyMatchDone: + CMP src, srcend + BNE loop + +end: + CMP $0, len + BNE corrupt + SUB dstorig, dst, tmp1 + MOVW tmp1, ret+36(FP) + RET + + // The error cases have distinct labels so we can put different + // return codes here when debugging, or if the error returns need to + // be changed. +shortDict: +shortDst: +shortSrc: +corrupt: + MOVW $-1, tmp1 + MOVW tmp1, ret+36(FP) + RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s new file mode 100644 index 000000000..c43e8a8d2 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s @@ -0,0 +1,230 @@ +// +build gc +// +build !noasm + +// This implementation assumes that strict alignment checking is turned off. +// The Go compiler makes the same assumption. + +#include "go_asm.h" +#include "textflag.h" + +// Register allocation. +#define dst R0 +#define dstorig R1 +#define src R2 +#define dstend R3 +#define dstend16 R4 // dstend - 16 +#define srcend R5 +#define srcend16 R6 // srcend - 16 +#define match R7 // Match address. +#define dict R8 +#define dictlen R9 +#define dictend R10 +#define token R11 +#define len R12 // Literal and match lengths. +#define lenRem R13 +#define offset R14 // Match offset. +#define tmp1 R15 +#define tmp2 R16 +#define tmp3 R17 +#define tmp4 R19 + +// func decodeBlock(dst, src, dict []byte) int +TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $0-80 + LDP dst_base+0(FP), (dst, dstend) + ADD dst, dstend + MOVD dst, dstorig + + LDP src_base+24(FP), (src, srcend) + CBZ srcend, shortSrc + ADD src, srcend + + // dstend16 = max(dstend-16, 0) and similarly for srcend16. + SUBS $16, dstend, dstend16 + CSEL LO, ZR, dstend16, dstend16 + SUBS $16, srcend, srcend16 + CSEL LO, ZR, srcend16, srcend16 + + LDP dict_base+48(FP), (dict, dictlen) + ADD dict, dictlen, dictend + +loop: + // Read token. Extract literal length. + MOVBU.P 1(src), token + LSR $4, token, len + CMP $15, len + BNE readLitlenDone + +readLitlenLoop: + CMP src, srcend + BEQ shortSrc + MOVBU.P 1(src), tmp1 + ADDS tmp1, len + BVS shortDst + CMP $255, tmp1 + BEQ readLitlenLoop + +readLitlenDone: + CBZ len, copyLiteralDone + + // Bounds check dst+len and src+len. + ADDS dst, len, tmp1 + BCS shortSrc + ADDS src, len, tmp2 + BCS shortSrc + CMP dstend, tmp1 + BHI shortDst + CMP srcend, tmp2 + BHI shortSrc + + // Copy literal. + SUBS $16, len + BLO copyLiteralShort + +copyLiteralLoop: + LDP.P 16(src), (tmp1, tmp2) + STP.P (tmp1, tmp2), 16(dst) + SUBS $16, len + BPL copyLiteralLoop + + // Copy (final part of) literal of length 0-15. + // If we have >=16 bytes left in src and dst, just copy 16 bytes. +copyLiteralShort: + CMP dstend16, dst + CCMP LO, src, srcend16, $0b0010 // 0010 = preserve carry (LO). + BHS copyLiteralShortEnd + + AND $15, len + + LDP (src), (tmp1, tmp2) + ADD len, src + STP (tmp1, tmp2), (dst) + ADD len, dst + + B copyLiteralDone + + // Safe but slow copy near the end of src, dst. +copyLiteralShortEnd: + TBZ $3, len, 3(PC) + MOVD.P 8(src), tmp1 + MOVD.P tmp1, 8(dst) + TBZ $2, len, 3(PC) + MOVW.P 4(src), tmp2 + MOVW.P tmp2, 4(dst) + TBZ $1, len, 3(PC) + MOVH.P 2(src), tmp3 + MOVH.P tmp3, 2(dst) + TBZ $0, len, 3(PC) + MOVBU.P 1(src), tmp4 + MOVB.P tmp4, 1(dst) + +copyLiteralDone: + // Initial part of match length. + AND $15, token, len + + CMP src, srcend + BEQ end + + // Read offset. + ADDS $2, src + BCS shortSrc + CMP srcend, src + BHI shortSrc + MOVHU -2(src), offset + CBZ offset, corrupt + + // Read rest of match length. + CMP $15, len + BNE readMatchlenDone + +readMatchlenLoop: + CMP src, srcend + BEQ shortSrc + MOVBU.P 1(src), tmp1 + ADDS tmp1, len + BVS shortDst + CMP $255, tmp1 + BEQ readMatchlenLoop + +readMatchlenDone: + ADD $const_minMatch, len + + // Bounds check dst+len. + ADDS dst, len, tmp2 + BCS shortDst + CMP dstend, tmp2 + BHI shortDst + + SUB offset, dst, match + CMP dstorig, match + BHS copyMatchTry8 + + // match < dstorig means the match starts in the dictionary, + // at len(dict) - offset + (dst - dstorig). + SUB dstorig, dst, tmp1 + SUB offset, dictlen, tmp2 + ADDS tmp2, tmp1 + BMI shortDict + ADD dict, tmp1, match + +copyDict: + MOVBU.P 1(match), tmp3 + MOVB.P tmp3, 1(dst) + SUBS $1, len + CCMP NE, dictend, match, $0b0100 // 0100 sets the Z (EQ) flag. + BNE copyDict + + CBZ len, copyMatchDone + + // If the match extends beyond the dictionary, the rest is at dstorig. + // Recompute the offset for the next check. + MOVD dstorig, match + SUB dstorig, dst, offset + +copyMatchTry8: + // Copy doublewords if both len and offset are at least eight. + // A 16-at-a-time loop doesn't provide a further speedup. + CMP $8, len + CCMP HS, offset, $8, $0 + BLO copyMatchLoop1 + + AND $7, len, lenRem + SUB $8, len +copyMatchLoop8: + MOVD.P 8(match), tmp1 + MOVD.P tmp1, 8(dst) + SUBS $8, len + BPL copyMatchLoop8 + + MOVD (match)(len), tmp2 // match+len == match+lenRem-8. + ADD lenRem, dst + MOVD $0, len + MOVD tmp2, -8(dst) + B copyMatchDone + +copyMatchLoop1: + // Byte-at-a-time copy for small offsets. + MOVBU.P 1(match), tmp2 + MOVB.P tmp2, 1(dst) + SUBS $1, len + BNE copyMatchLoop1 + +copyMatchDone: + CMP src, srcend + BNE loop + +end: + CBNZ len, corrupt + SUB dstorig, dst, tmp1 + MOVD tmp1, ret+72(FP) + RET + + // The error cases have distinct labels so we can put different + // return codes here when debugging, or if the error returns need to + // be changed. +shortDict: +shortDst: +shortSrc: +corrupt: + MOVD $-1, tmp1 + MOVD tmp1, ret+72(FP) + RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go new file mode 100644 index 000000000..8d9023d10 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go @@ -0,0 +1,10 @@ +//go:build (amd64 || arm || arm64) && !appengine && gc && !noasm +// +build amd64 arm arm64 +// +build !appengine +// +build gc +// +build !noasm + +package lz4block + +//go:noescape +func decodeBlock(dst, src, dict []byte) int diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go new file mode 100644 index 000000000..9f568fbb1 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go @@ -0,0 +1,139 @@ +//go:build (!amd64 && !arm && !arm64) || appengine || !gc || noasm +// +build !amd64,!arm,!arm64 appengine !gc noasm + +package lz4block + +import ( + "encoding/binary" +) + +func decodeBlock(dst, src, dict []byte) (ret int) { + // Restrict capacities so we don't read or write out of bounds. + dst = dst[:len(dst):len(dst)] + src = src[:len(src):len(src)] + + const hasError = -2 + + if len(src) == 0 { + return hasError + } + + defer func() { + if recover() != nil { + ret = hasError + } + }() + + var si, di uint + for si < uint(len(src)) { + // Literals and match lengths (token). + b := uint(src[si]) + si++ + + // Literals. + if lLen := b >> 4; lLen > 0 { + switch { + case lLen < 0xF && si+16 < uint(len(src)): + // Shortcut 1 + // if we have enough room in src and dst, and the literals length + // is small enough (0..14) then copy all 16 bytes, even if not all + // are part of the literals. + copy(dst[di:], src[si:si+16]) + si += lLen + di += lLen + if mLen := b & 0xF; mLen < 0xF { + // Shortcut 2 + // if the match length (4..18) fits within the literals, then copy + // all 18 bytes, even if not all are part of the literals. + mLen += 4 + if offset := u16(src[si:]); mLen <= offset && offset < di { + i := di - offset + // The remaining buffer may not hold 18 bytes. + // See https://github.com/pierrec/lz4/issues/51. + if end := i + 18; end <= uint(len(dst)) { + copy(dst[di:], dst[i:end]) + si += 2 + di += mLen + continue + } + } + } + case lLen == 0xF: + for { + x := uint(src[si]) + if lLen += x; int(lLen) < 0 { + return hasError + } + si++ + if x != 0xFF { + break + } + } + fallthrough + default: + copy(dst[di:di+lLen], src[si:si+lLen]) + si += lLen + di += lLen + } + } + + mLen := b & 0xF + if si == uint(len(src)) && mLen == 0 { + break + } else if si >= uint(len(src)) { + return hasError + } + + offset := u16(src[si:]) + if offset == 0 { + return hasError + } + si += 2 + + // Match. + mLen += minMatch + if mLen == minMatch+0xF { + for { + x := uint(src[si]) + if mLen += x; int(mLen) < 0 { + return hasError + } + si++ + if x != 0xFF { + break + } + } + } + + // Copy the match. + if di < offset { + // The match is beyond our block, meaning the first part + // is in the dictionary. + fromDict := dict[uint(len(dict))+di-offset:] + n := uint(copy(dst[di:di+mLen], fromDict)) + di += n + if mLen -= n; mLen == 0 { + continue + } + // We copied n = offset-di bytes from the dictionary, + // then set di = di+n = offset, so the following code + // copies from dst[di-offset:] = dst[0:]. + } + + expanded := dst[di-offset:] + if mLen > offset { + // Efficiently copy the match dst[di-offset:di] into the dst slice. + bytesToCopy := offset * (mLen / offset) + for n := offset; n <= bytesToCopy+offset; n *= 2 { + copy(expanded[n:], expanded[:n]) + } + di += bytesToCopy + mLen -= bytesToCopy + } + di += uint(copy(dst[di:di+mLen], expanded[:mLen])) + } + + return int(di) +} + +func u16(p []byte) uint { return uint(binary.LittleEndian.Uint16(p)) } diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go new file mode 100644 index 000000000..710ea4281 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go @@ -0,0 +1,19 @@ +package lz4errors + +type Error string + +func (e Error) Error() string { return string(e) } + +const ( + ErrInvalidSourceShortBuffer Error = "lz4: invalid source or destination buffer too short" + ErrInvalidFrame Error = "lz4: bad magic number" + ErrInternalUnhandledState Error = "lz4: unhandled state" + ErrInvalidHeaderChecksum Error = "lz4: invalid header checksum" + ErrInvalidBlockChecksum Error = "lz4: invalid block checksum" + ErrInvalidFrameChecksum Error = "lz4: invalid frame checksum" + ErrOptionInvalidCompressionLevel Error = "lz4: invalid compression level" + ErrOptionClosedOrError Error = "lz4: cannot apply options on closed or in error object" + ErrOptionInvalidBlockSize Error = "lz4: invalid block size" + ErrOptionNotApplicable Error = "lz4: option not applicable" + ErrWriterNotClosed Error = "lz4: writer not closed" +) diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go new file mode 100644 index 000000000..459086f09 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go @@ -0,0 +1,350 @@ +package lz4stream + +import ( + "encoding/binary" + "fmt" + "io" + "sync" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/xxh32" +) + +type Blocks struct { + Block *FrameDataBlock + Blocks chan chan *FrameDataBlock + mu sync.Mutex + err error +} + +func (b *Blocks) initW(f *Frame, dst io.Writer, num int) { + if num == 1 { + b.Blocks = nil + b.Block = NewFrameDataBlock(f) + return + } + b.Block = nil + if cap(b.Blocks) != num { + b.Blocks = make(chan chan *FrameDataBlock, num) + } + // goroutine managing concurrent block compression goroutines. + go func() { + // Process next block compression item. + for c := range b.Blocks { + // Read the next compressed block result. + // Waiting here ensures that the blocks are output in the order they were sent. + // The incoming channel is always closed as it indicates to the caller that + // the block has been processed. + block := <-c + if block == nil { + // Notify the block compression routine that we are done with its result. + // This is used when a sentinel block is sent to terminate the compression. + close(c) + return + } + // Do not attempt to write the block upon any previous failure. + if b.err == nil { + // Write the block. + if err := block.Write(f, dst); err != nil { + // Keep the first error. + b.err = err + // All pending compression goroutines need to shut down, so we need to keep going. + } + } + close(c) + } + }() +} + +func (b *Blocks) close(f *Frame, num int) error { + if num == 1 { + if b.Block != nil { + b.Block.Close(f) + } + err := b.err + b.err = nil + return err + } + if b.Blocks == nil { + err := b.err + b.err = nil + return err + } + c := make(chan *FrameDataBlock) + b.Blocks <- c + c <- nil + <-c + err := b.err + b.err = nil + return err +} + +// ErrorR returns any error set while uncompressing a stream. +func (b *Blocks) ErrorR() error { + b.mu.Lock() + defer b.mu.Unlock() + return b.err +} + +// initR returns a channel that streams the uncompressed blocks if in concurrent +// mode and no error. When the channel is closed, check for any error with b.ErrorR. +// +// If not in concurrent mode, the uncompressed block is b.Block and the returned error +// needs to be checked. +func (b *Blocks) initR(f *Frame, num int, src io.Reader) (chan []byte, error) { + size := f.Descriptor.Flags.BlockSizeIndex() + if num == 1 { + b.Blocks = nil + b.Block = NewFrameDataBlock(f) + return nil, nil + } + b.Block = nil + blocks := make(chan chan []byte, num) + // data receives the uncompressed blocks. + data := make(chan []byte) + // Read blocks from the source sequentially + // and uncompress them concurrently. + + // In legacy mode, accrue the uncompress sizes in cum. + var cum uint32 + go func() { + var cumx uint32 + var err error + for b.ErrorR() == nil { + block := NewFrameDataBlock(f) + cumx, err = block.Read(f, src, 0) + if err != nil { + block.Close(f) + break + } + // Recheck for an error as reading may be slow and uncompressing is expensive. + if b.ErrorR() != nil { + block.Close(f) + break + } + c := make(chan []byte) + blocks <- c + go func() { + defer block.Close(f) + data, err := block.Uncompress(f, size.Get(), nil, false) + if err != nil { + b.closeR(err) + // Close the block channel to indicate an error. + close(c) + } else { + c <- data + } + }() + } + // End the collection loop and the data channel. + c := make(chan []byte) + blocks <- c + c <- nil // signal the collection loop that we are done + <-c // wait for the collect loop to complete + if f.isLegacy() && cum == cumx { + err = io.EOF + } + b.closeR(err) + close(data) + }() + // Collect the uncompressed blocks and make them available + // on the returned channel. + go func(leg bool) { + defer close(blocks) + skipBlocks := false + for c := range blocks { + buf, ok := <-c + if !ok { + // A closed channel indicates an error. + // All remaining channels should be discarded. + skipBlocks = true + continue + } + if buf == nil { + // Signal to end the loop. + close(c) + return + } + if skipBlocks { + // A previous error has occurred, skipping remaining channels. + continue + } + // Perform checksum now as the blocks are received in order. + if f.Descriptor.Flags.ContentChecksum() { + _, _ = f.checksum.Write(buf) + } + if leg { + cum += uint32(len(buf)) + } + data <- buf + close(c) + } + }(f.isLegacy()) + return data, nil +} + +// closeR safely sets the error on b if not already set. +func (b *Blocks) closeR(err error) { + b.mu.Lock() + if b.err == nil { + b.err = err + } + b.mu.Unlock() +} + +func NewFrameDataBlock(f *Frame) *FrameDataBlock { + buf := f.Descriptor.Flags.BlockSizeIndex().Get() + return &FrameDataBlock{Data: buf, data: buf} +} + +type FrameDataBlock struct { + Size DataBlockSize + Data []byte // compressed or uncompressed data (.data or .src) + Checksum uint32 + data []byte // buffer for compressed data + src []byte // uncompressed data + err error // used in concurrent mode +} + +func (b *FrameDataBlock) Close(f *Frame) { + b.Size = 0 + b.Checksum = 0 + b.err = nil + if b.data != nil { + // Block was not already closed. + lz4block.Put(b.data) + b.Data = nil + b.data = nil + b.src = nil + } +} + +// Block compression errors are ignored since the buffer is sized appropriately. +func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock { + data := b.data + if f.isLegacy() { + // In legacy mode, the buffer is sized according to CompressBlockBound, + // but only 8Mb is buffered for compression. + src = src[:8<<20] + } else { + data = data[:len(src)] // trigger the incompressible flag in CompressBlock + } + var n int + switch level { + case lz4block.Fast: + n, _ = lz4block.CompressBlock(src, data) + default: + n, _ = lz4block.CompressBlockHC(src, data, level) + } + if n == 0 { + b.Size.UncompressedSet(true) + b.Data = src + } else { + b.Size.UncompressedSet(false) + b.Data = data[:n] + } + b.Size.sizeSet(len(b.Data)) + b.src = src // keep track of the source for content checksum + + if f.Descriptor.Flags.BlockChecksum() { + b.Checksum = xxh32.ChecksumZero(src) + } + return b +} + +func (b *FrameDataBlock) Write(f *Frame, dst io.Writer) error { + // Write is called in the same order as blocks are compressed, + // so content checksum must be done here. + if f.Descriptor.Flags.ContentChecksum() { + _, _ = f.checksum.Write(b.src) + } + buf := f.buf[:] + binary.LittleEndian.PutUint32(buf, uint32(b.Size)) + if _, err := dst.Write(buf[:4]); err != nil { + return err + } + + if _, err := dst.Write(b.Data); err != nil { + return err + } + + if b.Checksum == 0 { + return nil + } + binary.LittleEndian.PutUint32(buf, b.Checksum) + _, err := dst.Write(buf[:4]) + return err +} + +// Read updates b with the next block data, size and checksum if available. +func (b *FrameDataBlock) Read(f *Frame, src io.Reader, cum uint32) (uint32, error) { + x, err := f.readUint32(src) + if err != nil { + return 0, err + } + if f.isLegacy() { + switch x { + case frameMagicLegacy: + // Concatenated legacy frame. + return b.Read(f, src, cum) + case cum: + // Only works in non concurrent mode, for concurrent mode + // it is handled separately. + // Linux kernel format appends the total uncompressed size at the end. + return 0, io.EOF + } + } else if x == 0 { + // Marker for end of stream. + return 0, io.EOF + } + b.Size = DataBlockSize(x) + + size := b.Size.size() + if size > cap(b.data) { + return x, lz4errors.ErrOptionInvalidBlockSize + } + b.data = b.data[:size] + if _, err := io.ReadFull(src, b.data); err != nil { + return x, err + } + if f.Descriptor.Flags.BlockChecksum() { + sum, err := f.readUint32(src) + if err != nil { + return 0, err + } + b.Checksum = sum + } + return x, nil +} + +func (b *FrameDataBlock) Uncompress(f *Frame, dst, dict []byte, sum bool) ([]byte, error) { + if b.Size.Uncompressed() { + n := copy(dst, b.data) + dst = dst[:n] + } else { + n, err := lz4block.UncompressBlock(b.data, dst, dict) + if err != nil { + return nil, err + } + dst = dst[:n] + } + if f.Descriptor.Flags.BlockChecksum() { + if c := xxh32.ChecksumZero(dst); c != b.Checksum { + err := fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidBlockChecksum, c, b.Checksum) + return nil, err + } + } + if sum && f.Descriptor.Flags.ContentChecksum() { + _, _ = f.checksum.Write(dst) + } + return dst, nil +} + +func (f *Frame) readUint32(r io.Reader) (x uint32, err error) { + if _, err = io.ReadFull(r, f.buf[:4]); err != nil { + return + } + x = binary.LittleEndian.Uint32(f.buf[:4]) + return +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go new file mode 100644 index 000000000..18192a943 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go @@ -0,0 +1,204 @@ +// Package lz4stream provides the types that support reading and writing LZ4 data streams. +package lz4stream + +import ( + "encoding/binary" + "fmt" + "io" + "io/ioutil" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/xxh32" +) + +//go:generate go run gen.go + +const ( + frameMagic uint32 = 0x184D2204 + frameSkipMagic uint32 = 0x184D2A50 + frameMagicLegacy uint32 = 0x184C2102 +) + +func NewFrame() *Frame { + return &Frame{} +} + +type Frame struct { + buf [15]byte // frame descriptor needs at most 4(magic)+4+8+1=11 bytes + Magic uint32 + Descriptor FrameDescriptor + Blocks Blocks + Checksum uint32 + checksum xxh32.XXHZero +} + +// Reset allows reusing the Frame. +// The Descriptor configuration is not modified. +func (f *Frame) Reset(num int) { + f.Magic = 0 + f.Descriptor.Checksum = 0 + f.Descriptor.ContentSize = 0 + _ = f.Blocks.close(f, num) + f.Checksum = 0 +} + +func (f *Frame) InitW(dst io.Writer, num int, legacy bool) { + if legacy { + f.Magic = frameMagicLegacy + idx := lz4block.Index(lz4block.Block8Mb) + f.Descriptor.Flags.BlockSizeIndexSet(idx) + } else { + f.Magic = frameMagic + f.Descriptor.initW() + } + f.Blocks.initW(f, dst, num) + f.checksum.Reset() +} + +func (f *Frame) CloseW(dst io.Writer, num int) error { + if err := f.Blocks.close(f, num); err != nil { + return err + } + if f.isLegacy() { + return nil + } + buf := f.buf[:0] + // End mark (data block size of uint32(0)). + buf = append(buf, 0, 0, 0, 0) + if f.Descriptor.Flags.ContentChecksum() { + buf = f.checksum.Sum(buf) + } + _, err := dst.Write(buf) + return err +} + +func (f *Frame) isLegacy() bool { + return f.Magic == frameMagicLegacy +} + +func (f *Frame) ParseHeaders(src io.Reader) error { + if f.Magic > 0 { + // Header already read. + return nil + } + +newFrame: + var err error + if f.Magic, err = f.readUint32(src); err != nil { + return err + } + switch m := f.Magic; { + case m == frameMagic || m == frameMagicLegacy: + // All 16 values of frameSkipMagic are valid. + case m>>8 == frameSkipMagic>>8: + skip, err := f.readUint32(src) + if err != nil { + return err + } + if _, err := io.CopyN(ioutil.Discard, src, int64(skip)); err != nil { + return err + } + goto newFrame + default: + return lz4errors.ErrInvalidFrame + } + if err := f.Descriptor.initR(f, src); err != nil { + return err + } + f.checksum.Reset() + return nil +} + +func (f *Frame) InitR(src io.Reader, num int) (chan []byte, error) { + return f.Blocks.initR(f, num, src) +} + +func (f *Frame) CloseR(src io.Reader) (err error) { + if f.isLegacy() { + return nil + } + if !f.Descriptor.Flags.ContentChecksum() { + return nil + } + if f.Checksum, err = f.readUint32(src); err != nil { + return err + } + if c := f.checksum.Sum32(); c != f.Checksum { + return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidFrameChecksum, c, f.Checksum) + } + return nil +} + +type FrameDescriptor struct { + Flags DescriptorFlags + ContentSize uint64 + Checksum uint8 +} + +func (fd *FrameDescriptor) initW() { + fd.Flags.VersionSet(1) + fd.Flags.BlockIndependenceSet(true) +} + +func (fd *FrameDescriptor) Write(f *Frame, dst io.Writer) error { + if fd.Checksum > 0 { + // Header already written. + return nil + } + + buf := f.buf[:4] + // Write the magic number here even though it belongs to the Frame. + binary.LittleEndian.PutUint32(buf, f.Magic) + if !f.isLegacy() { + buf = buf[:4+2] + binary.LittleEndian.PutUint16(buf[4:], uint16(fd.Flags)) + + if fd.Flags.Size() { + buf = buf[:4+2+8] + binary.LittleEndian.PutUint64(buf[4+2:], fd.ContentSize) + } + fd.Checksum = descriptorChecksum(buf[4:]) + buf = append(buf, fd.Checksum) + } + + _, err := dst.Write(buf) + return err +} + +func (fd *FrameDescriptor) initR(f *Frame, src io.Reader) error { + if f.isLegacy() { + idx := lz4block.Index(lz4block.Block8Mb) + f.Descriptor.Flags.BlockSizeIndexSet(idx) + return nil + } + // Read the flags and the checksum, hoping that there is not content size. + buf := f.buf[:3] + if _, err := io.ReadFull(src, buf); err != nil { + return err + } + descr := binary.LittleEndian.Uint16(buf) + fd.Flags = DescriptorFlags(descr) + if fd.Flags.Size() { + // Append the 8 missing bytes. + buf = buf[:3+8] + if _, err := io.ReadFull(src, buf[3:]); err != nil { + return err + } + fd.ContentSize = binary.LittleEndian.Uint64(buf[2:]) + } + fd.Checksum = buf[len(buf)-1] // the checksum is the last byte + buf = buf[:len(buf)-1] // all descriptor fields except checksum + if c := descriptorChecksum(buf); fd.Checksum != c { + return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidHeaderChecksum, c, fd.Checksum) + } + // Validate the elements that can be. + if idx := fd.Flags.BlockSizeIndex(); !idx.IsValid() { + return lz4errors.ErrOptionInvalidBlockSize + } + return nil +} + +func descriptorChecksum(buf []byte) byte { + return byte(xxh32.ChecksumZero(buf) >> 8) +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go new file mode 100644 index 000000000..d33a6be95 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go @@ -0,0 +1,103 @@ +// Code generated by `gen.exe`. DO NOT EDIT. + +package lz4stream + +import "github.com/pierrec/lz4/v4/internal/lz4block" + +// DescriptorFlags is defined as follow: +// field bits +// ----- ---- +// _ 2 +// ContentChecksum 1 +// Size 1 +// BlockChecksum 1 +// BlockIndependence 1 +// Version 2 +// _ 4 +// BlockSizeIndex 3 +// _ 1 +type DescriptorFlags uint16 + +// Getters. +func (x DescriptorFlags) ContentChecksum() bool { return x>>2&1 != 0 } +func (x DescriptorFlags) Size() bool { return x>>3&1 != 0 } +func (x DescriptorFlags) BlockChecksum() bool { return x>>4&1 != 0 } +func (x DescriptorFlags) BlockIndependence() bool { return x>>5&1 != 0 } +func (x DescriptorFlags) Version() uint16 { return uint16(x >> 6 & 0x3) } +func (x DescriptorFlags) BlockSizeIndex() lz4block.BlockSizeIndex { + return lz4block.BlockSizeIndex(x >> 12 & 0x7) +} + +// Setters. +func (x *DescriptorFlags) ContentChecksumSet(v bool) *DescriptorFlags { + const b = 1 << 2 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} +func (x *DescriptorFlags) SizeSet(v bool) *DescriptorFlags { + const b = 1 << 3 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} +func (x *DescriptorFlags) BlockChecksumSet(v bool) *DescriptorFlags { + const b = 1 << 4 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} +func (x *DescriptorFlags) BlockIndependenceSet(v bool) *DescriptorFlags { + const b = 1 << 5 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} +func (x *DescriptorFlags) VersionSet(v uint16) *DescriptorFlags { + *x = *x&^(0x3<<6) | (DescriptorFlags(v) & 0x3 << 6) + return x +} +func (x *DescriptorFlags) BlockSizeIndexSet(v lz4block.BlockSizeIndex) *DescriptorFlags { + *x = *x&^(0x7<<12) | (DescriptorFlags(v) & 0x7 << 12) + return x +} + +// Code generated by `gen.exe`. DO NOT EDIT. + +// DataBlockSize is defined as follow: +// field bits +// ----- ---- +// size 31 +// Uncompressed 1 +type DataBlockSize uint32 + +// Getters. +func (x DataBlockSize) size() int { return int(x & 0x7FFFFFFF) } +func (x DataBlockSize) Uncompressed() bool { return x>>31&1 != 0 } + +// Setters. +func (x *DataBlockSize) sizeSet(v int) *DataBlockSize { + *x = *x&^0x7FFFFFFF | DataBlockSize(v)&0x7FFFFFFF + return x +} +func (x *DataBlockSize) UncompressedSet(v bool) *DataBlockSize { + const b = 1 << 31 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go new file mode 100644 index 000000000..651d10c10 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go @@ -0,0 +1,212 @@ +// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). +// (ported from the reference implementation https://github.com/Cyan4973/xxHash/) +package xxh32 + +import ( + "encoding/binary" +) + +const ( + prime1 uint32 = 2654435761 + prime2 uint32 = 2246822519 + prime3 uint32 = 3266489917 + prime4 uint32 = 668265263 + prime5 uint32 = 374761393 + + primeMask = 0xFFFFFFFF + prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984 + prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535 +) + +// XXHZero represents an xxhash32 object with seed 0. +type XXHZero struct { + v [4]uint32 + totalLen uint64 + buf [16]byte + bufused int +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (xxh XXHZero) Sum(b []byte) []byte { + h32 := xxh.Sum32() + return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) +} + +// Reset resets the Hash to its initial state. +func (xxh *XXHZero) Reset() { + xxh.v[0] = prime1plus2 + xxh.v[1] = prime2 + xxh.v[2] = 0 + xxh.v[3] = prime1minus + xxh.totalLen = 0 + xxh.bufused = 0 +} + +// Size returns the number of bytes returned by Sum(). +func (xxh *XXHZero) Size() int { + return 4 +} + +// BlockSizeIndex gives the minimum number of bytes accepted by Write(). +func (xxh *XXHZero) BlockSize() int { + return 1 +} + +// Write adds input bytes to the Hash. +// It never returns an error. +func (xxh *XXHZero) Write(input []byte) (int, error) { + if xxh.totalLen == 0 { + xxh.Reset() + } + n := len(input) + m := xxh.bufused + + xxh.totalLen += uint64(n) + + r := len(xxh.buf) - m + if n < r { + copy(xxh.buf[m:], input) + xxh.bufused += len(input) + return n, nil + } + + var buf *[16]byte + if m != 0 { + // some data left from previous update + buf = &xxh.buf + c := copy(buf[m:], input) + n -= c + input = input[c:] + } + update(&xxh.v, buf, input) + xxh.bufused = copy(xxh.buf[:], input[n-n%16:]) + + return n, nil +} + +// Portable version of update. This updates v by processing all of buf +// (if not nil) and all full 16-byte blocks of input. +func updateGo(v *[4]uint32, buf *[16]byte, input []byte) { + // Causes compiler to work directly from registers instead of stack: + v1, v2, v3, v4 := v[0], v[1], v[2], v[3] + + if buf != nil { + v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1 + } + + for ; len(input) >= 16; input = input[16:] { + sub := input[:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 + } + v[0], v[1], v[2], v[3] = v1, v2, v3, v4 +} + +// Sum32 returns the 32 bits Hash value. +func (xxh *XXHZero) Sum32() uint32 { + h32 := uint32(xxh.totalLen) + if h32 >= 16 { + h32 += rol1(xxh.v[0]) + rol7(xxh.v[1]) + rol12(xxh.v[2]) + rol18(xxh.v[3]) + } else { + h32 += prime5 + } + + p := 0 + n := xxh.bufused + buf := xxh.buf + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3 + h32 = rol17(h32) * prime4 + } + for ; p < n; p++ { + h32 += uint32(buf[p]) * prime5 + h32 = rol11(h32) * prime1 + } + + h32 ^= h32 >> 15 + h32 *= prime2 + h32 ^= h32 >> 13 + h32 *= prime3 + h32 ^= h32 >> 16 + + return h32 +} + +// Portable version of ChecksumZero. +func checksumZeroGo(input []byte) uint32 { + n := len(input) + h32 := uint32(n) + + if n < 16 { + h32 += prime5 + } else { + v1 := prime1plus2 + v2 := prime2 + v3 := uint32(0) + v4 := prime1minus + p := 0 + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 + } + input = input[p:] + n -= p + h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + } + + p := 0 + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3 + h32 = rol17(h32) * prime4 + } + for p < n { + h32 += uint32(input[p]) * prime5 + h32 = rol11(h32) * prime1 + p++ + } + + h32 ^= h32 >> 15 + h32 *= prime2 + h32 ^= h32 >> 13 + h32 *= prime3 + h32 ^= h32 >> 16 + + return h32 +} + +func rol1(u uint32) uint32 { + return u<<1 | u>>31 +} + +func rol7(u uint32) uint32 { + return u<<7 | u>>25 +} + +func rol11(u uint32) uint32 { + return u<<11 | u>>21 +} + +func rol12(u uint32) uint32 { + return u<<12 | u>>20 +} + +func rol13(u uint32) uint32 { + return u<<13 | u>>19 +} + +func rol17(u uint32) uint32 { + return u<<17 | u>>15 +} + +func rol18(u uint32) uint32 { + return u<<18 | u>>14 +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go new file mode 100644 index 000000000..0978b2665 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go @@ -0,0 +1,11 @@ +// +build !noasm + +package xxh32 + +// ChecksumZero returns the 32-bit hash of input. +// +//go:noescape +func ChecksumZero(input []byte) uint32 + +//go:noescape +func update(v *[4]uint32, buf *[16]byte, input []byte) diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s new file mode 100644 index 000000000..c18ffd574 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s @@ -0,0 +1,251 @@ +// +build !noasm + +#include "go_asm.h" +#include "textflag.h" + +// Register allocation. +#define p R0 +#define n R1 +#define h R2 +#define v1 R2 // Alias for h. +#define v2 R3 +#define v3 R4 +#define v4 R5 +#define x1 R6 +#define x2 R7 +#define x3 R8 +#define x4 R9 + +// We need the primes in registers. The 16-byte loop only uses prime{1,2}. +#define prime1r R11 +#define prime2r R12 +#define prime3r R3 // The rest can alias v{2-4}. +#define prime4r R4 +#define prime5r R5 + +// Update round macros. These read from and increment p. + +#define round16aligned \ + MOVM.IA.W (p), [x1, x2, x3, x4] \ + \ + MULA x1, prime2r, v1, v1 \ + MULA x2, prime2r, v2, v2 \ + MULA x3, prime2r, v3, v3 \ + MULA x4, prime2r, v4, v4 \ + \ + MOVW v1 @> 19, v1 \ + MOVW v2 @> 19, v2 \ + MOVW v3 @> 19, v3 \ + MOVW v4 @> 19, v4 \ + \ + MUL prime1r, v1 \ + MUL prime1r, v2 \ + MUL prime1r, v3 \ + MUL prime1r, v4 \ + +#define round16unaligned \ + MOVBU.P 16(p), x1 \ + MOVBU -15(p), x2 \ + ORR x2 << 8, x1 \ + MOVBU -14(p), x3 \ + MOVBU -13(p), x4 \ + ORR x4 << 8, x3 \ + ORR x3 << 16, x1 \ + \ + MULA x1, prime2r, v1, v1 \ + MOVW v1 @> 19, v1 \ + MUL prime1r, v1 \ + \ + MOVBU -12(p), x1 \ + MOVBU -11(p), x2 \ + ORR x2 << 8, x1 \ + MOVBU -10(p), x3 \ + MOVBU -9(p), x4 \ + ORR x4 << 8, x3 \ + ORR x3 << 16, x1 \ + \ + MULA x1, prime2r, v2, v2 \ + MOVW v2 @> 19, v2 \ + MUL prime1r, v2 \ + \ + MOVBU -8(p), x1 \ + MOVBU -7(p), x2 \ + ORR x2 << 8, x1 \ + MOVBU -6(p), x3 \ + MOVBU -5(p), x4 \ + ORR x4 << 8, x3 \ + ORR x3 << 16, x1 \ + \ + MULA x1, prime2r, v3, v3 \ + MOVW v3 @> 19, v3 \ + MUL prime1r, v3 \ + \ + MOVBU -4(p), x1 \ + MOVBU -3(p), x2 \ + ORR x2 << 8, x1 \ + MOVBU -2(p), x3 \ + MOVBU -1(p), x4 \ + ORR x4 << 8, x3 \ + ORR x3 << 16, x1 \ + \ + MULA x1, prime2r, v4, v4 \ + MOVW v4 @> 19, v4 \ + MUL prime1r, v4 \ + + +// func ChecksumZero([]byte) uint32 +TEXT ·ChecksumZero(SB), NOFRAME|NOSPLIT, $-4-16 + MOVW input_base+0(FP), p + MOVW input_len+4(FP), n + + MOVW $const_prime1, prime1r + MOVW $const_prime2, prime2r + + // Set up h for n < 16. It's tempting to say {ADD prime5, n, h} + // here, but that's a pseudo-op that generates a load through R11. + MOVW $const_prime5, prime5r + ADD prime5r, n, h + CMP $0, n + BEQ end + + // We let n go negative so we can do comparisons with SUB.S + // instead of separate CMP. + SUB.S $16, n + BMI loop16done + + ADD prime1r, prime2r, v1 + MOVW prime2r, v2 + MOVW $0, v3 + RSB $0, prime1r, v4 + + TST $3, p + BNE loop16unaligned + +loop16aligned: + SUB.S $16, n + round16aligned + BPL loop16aligned + B loop16finish + +loop16unaligned: + SUB.S $16, n + round16unaligned + BPL loop16unaligned + +loop16finish: + MOVW v1 @> 31, h + ADD v2 @> 25, h + ADD v3 @> 20, h + ADD v4 @> 14, h + + // h += len(input) with v2 as temporary. + MOVW input_len+4(FP), v2 + ADD v2, h + +loop16done: + ADD $16, n // Restore number of bytes left. + + SUB.S $4, n + MOVW $const_prime3, prime3r + BMI loop4done + MOVW $const_prime4, prime4r + + TST $3, p + BNE loop4unaligned + +loop4aligned: + SUB.S $4, n + + MOVW.P 4(p), x1 + MULA prime3r, x1, h, h + MOVW h @> 15, h + MUL prime4r, h + + BPL loop4aligned + B loop4done + +loop4unaligned: + SUB.S $4, n + + MOVBU.P 4(p), x1 + MOVBU -3(p), x2 + ORR x2 << 8, x1 + MOVBU -2(p), x3 + ORR x3 << 16, x1 + MOVBU -1(p), x4 + ORR x4 << 24, x1 + + MULA prime3r, x1, h, h + MOVW h @> 15, h + MUL prime4r, h + + BPL loop4unaligned + +loop4done: + ADD.S $4, n // Restore number of bytes left. + BEQ end + + MOVW $const_prime5, prime5r + +loop1: + SUB.S $1, n + + MOVBU.P 1(p), x1 + MULA prime5r, x1, h, h + MOVW h @> 21, h + MUL prime1r, h + + BNE loop1 + +end: + MOVW $const_prime3, prime3r + EOR h >> 15, h + MUL prime2r, h + EOR h >> 13, h + MUL prime3r, h + EOR h >> 16, h + + MOVW h, ret+12(FP) + RET + + +// func update(v *[4]uint64, buf *[16]byte, p []byte) +TEXT ·update(SB), NOFRAME|NOSPLIT, $-4-20 + MOVW v+0(FP), p + MOVM.IA (p), [v1, v2, v3, v4] + + MOVW $const_prime1, prime1r + MOVW $const_prime2, prime2r + + // Process buf, if not nil. + MOVW buf+4(FP), p + CMP $0, p + BEQ noBuffered + + round16aligned + +noBuffered: + MOVW input_base +8(FP), p + MOVW input_len +12(FP), n + + SUB.S $16, n + BMI end + + TST $3, p + BNE loop16unaligned + +loop16aligned: + SUB.S $16, n + round16aligned + BPL loop16aligned + B end + +loop16unaligned: + SUB.S $16, n + round16unaligned + BPL loop16unaligned + +end: + MOVW v+0(FP), p + MOVM.IA [v1, v2, v3, v4], (p) + RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go new file mode 100644 index 000000000..c96b59b8c --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go @@ -0,0 +1,10 @@ +// +build !arm noasm + +package xxh32 + +// ChecksumZero returns the 32-bit hash of input. +func ChecksumZero(input []byte) uint32 { return checksumZeroGo(input) } + +func update(v *[4]uint32, buf *[16]byte, input []byte) { + updateGo(v, buf, input) +} diff --git a/vendor/github.com/pierrec/lz4/v4/lz4.go b/vendor/github.com/pierrec/lz4/v4/lz4.go new file mode 100644 index 000000000..a62022e08 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/lz4.go @@ -0,0 +1,157 @@ +// Package lz4 implements reading and writing lz4 compressed data. +// +// The package supports both the LZ4 stream format, +// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html, +// and the LZ4 block format, defined at +// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html. +// +// See https://github.com/lz4/lz4 for the reference C implementation. +package lz4 + +import ( + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" +) + +func _() { + // Safety checks for duplicated elements. + var x [1]struct{} + _ = x[lz4block.CompressionLevel(Fast)-lz4block.Fast] + _ = x[Block64Kb-BlockSize(lz4block.Block64Kb)] + _ = x[Block256Kb-BlockSize(lz4block.Block256Kb)] + _ = x[Block1Mb-BlockSize(lz4block.Block1Mb)] + _ = x[Block4Mb-BlockSize(lz4block.Block4Mb)] +} + +// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. +func CompressBlockBound(n int) int { + return lz4block.CompressBlockBound(n) +} + +// UncompressBlock uncompresses the source buffer into the destination one, +// and returns the uncompressed size. +// +// The destination buffer must be sized appropriately. +// +// An error is returned if the source data is invalid or the destination buffer is too small. +func UncompressBlock(src, dst []byte) (int, error) { + return lz4block.UncompressBlock(src, dst, nil) +} + +// UncompressBlockWithDict uncompresses the source buffer into the destination one using a +// dictionary, and returns the uncompressed size. +// +// The destination buffer must be sized appropriately. +// +// An error is returned if the source data is invalid or the destination buffer is too small. +func UncompressBlockWithDict(src, dst, dict []byte) (int, error) { + return lz4block.UncompressBlock(src, dst, dict) +} + +// A Compressor compresses data into the LZ4 block format. +// It uses a fast compression algorithm. +// +// A Compressor is not safe for concurrent use by multiple goroutines. +// +// Use a Writer to compress into the LZ4 stream format. +type Compressor struct{ c lz4block.Compressor } + +// CompressBlock compresses the source buffer src into the destination dst. +// +// If compression is successful, the first return value is the size of the +// compressed data, which is always >0. +// +// If dst has length at least CompressBlockBound(len(src)), compression always +// succeeds. Otherwise, the first return value is zero. The error return is +// non-nil if the compressed data does not fit in dst, but it might fit in a +// larger buffer that is still smaller than CompressBlockBound(len(src)). The +// return value (0, nil) means the data is likely incompressible and a buffer +// of length CompressBlockBound(len(src)) should be passed in. +func (c *Compressor) CompressBlock(src, dst []byte) (int, error) { + return c.c.CompressBlock(src, dst) +} + +// CompressBlock compresses the source buffer into the destination one. +// This is the fast version of LZ4 compression and also the default one. +// +// The argument hashTable is scratch space for a hash table used by the +// compressor. If provided, it should have length at least 1<<16. If it is +// shorter (or nil), CompressBlock allocates its own hash table. +// +// The size of the compressed data is returned. +// +// If the destination buffer size is lower than CompressBlockBound and +// the compressed size is 0 and no error, then the data is incompressible. +// +// An error is returned if the destination buffer is too small. + +// CompressBlock is equivalent to Compressor.CompressBlock. +// The final argument is ignored and should be set to nil. +// +// This function is deprecated. Use a Compressor instead. +func CompressBlock(src, dst []byte, _ []int) (int, error) { + return lz4block.CompressBlock(src, dst) +} + +// A CompressorHC compresses data into the LZ4 block format. +// Its compression ratio is potentially better than that of a Compressor, +// but it is also slower and requires more memory. +// +// A Compressor is not safe for concurrent use by multiple goroutines. +// +// Use a Writer to compress into the LZ4 stream format. +type CompressorHC struct { + // Level is the maximum search depth for compression. + // Values <= 0 mean no maximum. + Level CompressionLevel + c lz4block.CompressorHC +} + +// CompressBlock compresses the source buffer src into the destination dst. +// +// If compression is successful, the first return value is the size of the +// compressed data, which is always >0. +// +// If dst has length at least CompressBlockBound(len(src)), compression always +// succeeds. Otherwise, the first return value is zero. The error return is +// non-nil if the compressed data does not fit in dst, but it might fit in a +// larger buffer that is still smaller than CompressBlockBound(len(src)). The +// return value (0, nil) means the data is likely incompressible and a buffer +// of length CompressBlockBound(len(src)) should be passed in. +func (c *CompressorHC) CompressBlock(src, dst []byte) (int, error) { + return c.c.CompressBlock(src, dst, lz4block.CompressionLevel(c.Level)) +} + +// CompressBlockHC is equivalent to CompressorHC.CompressBlock. +// The final two arguments are ignored and should be set to nil. +// +// This function is deprecated. Use a CompressorHC instead. +func CompressBlockHC(src, dst []byte, depth CompressionLevel, _, _ []int) (int, error) { + return lz4block.CompressBlockHC(src, dst, lz4block.CompressionLevel(depth)) +} + +const ( + // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed + // block is corrupted or the destination buffer is not large enough for the uncompressed data. + ErrInvalidSourceShortBuffer = lz4errors.ErrInvalidSourceShortBuffer + // ErrInvalidFrame is returned when reading an invalid LZ4 archive. + ErrInvalidFrame = lz4errors.ErrInvalidFrame + // ErrInternalUnhandledState is an internal error. + ErrInternalUnhandledState = lz4errors.ErrInternalUnhandledState + // ErrInvalidHeaderChecksum is returned when reading a frame. + ErrInvalidHeaderChecksum = lz4errors.ErrInvalidHeaderChecksum + // ErrInvalidBlockChecksum is returned when reading a frame. + ErrInvalidBlockChecksum = lz4errors.ErrInvalidBlockChecksum + // ErrInvalidFrameChecksum is returned when reading a frame. + ErrInvalidFrameChecksum = lz4errors.ErrInvalidFrameChecksum + // ErrOptionInvalidCompressionLevel is returned when the supplied compression level is invalid. + ErrOptionInvalidCompressionLevel = lz4errors.ErrOptionInvalidCompressionLevel + // ErrOptionClosedOrError is returned when an option is applied to a closed or in error object. + ErrOptionClosedOrError = lz4errors.ErrOptionClosedOrError + // ErrOptionInvalidBlockSize is returned when + ErrOptionInvalidBlockSize = lz4errors.ErrOptionInvalidBlockSize + // ErrOptionNotApplicable is returned when trying to apply an option to an object not supporting it. + ErrOptionNotApplicable = lz4errors.ErrOptionNotApplicable + // ErrWriterNotClosed is returned when attempting to reset an unclosed writer. + ErrWriterNotClosed = lz4errors.ErrWriterNotClosed +) diff --git a/vendor/github.com/pierrec/lz4/v4/options.go b/vendor/github.com/pierrec/lz4/v4/options.go new file mode 100644 index 000000000..46a873803 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/options.go @@ -0,0 +1,214 @@ +package lz4 + +import ( + "fmt" + "reflect" + "runtime" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" +) + +//go:generate go run golang.org/x/tools/cmd/stringer -type=BlockSize,CompressionLevel -output options_gen.go + +type ( + applier interface { + Apply(...Option) error + private() + } + // Option defines the parameters to setup an LZ4 Writer or Reader. + Option func(applier) error +) + +// String returns a string representation of the option with its parameter(s). +func (o Option) String() string { + return o(nil).Error() +} + +// Default options. +var ( + DefaultBlockSizeOption = BlockSizeOption(Block4Mb) + DefaultChecksumOption = ChecksumOption(true) + DefaultConcurrency = ConcurrencyOption(1) + defaultOnBlockDone = OnBlockDoneOption(nil) +) + +const ( + Block64Kb BlockSize = 1 << (16 + iota*2) + Block256Kb + Block1Mb + Block4Mb +) + +// BlockSizeIndex defines the size of the blocks to be compressed. +type BlockSize uint32 + +// BlockSizeOption defines the maximum size of compressed blocks (default=Block4Mb). +func BlockSizeOption(size BlockSize) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("BlockSizeOption(%s)", size) + return lz4errors.Error(s) + case *Writer: + size := uint32(size) + if !lz4block.IsValid(size) { + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size) + } + w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size)) + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// BlockChecksumOption enables or disables block checksum (default=false). +func BlockChecksumOption(flag bool) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("BlockChecksumOption(%v)", flag) + return lz4errors.Error(s) + case *Writer: + w.frame.Descriptor.Flags.BlockChecksumSet(flag) + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// ChecksumOption enables/disables all blocks or content checksum (default=true). +func ChecksumOption(flag bool) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("ChecksumOption(%v)", flag) + return lz4errors.Error(s) + case *Writer: + w.frame.Descriptor.Flags.ContentChecksumSet(flag) + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// SizeOption sets the size of the original uncompressed data (default=0). It is useful to know the size of the +// whole uncompressed data stream. +func SizeOption(size uint64) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("SizeOption(%d)", size) + return lz4errors.Error(s) + case *Writer: + w.frame.Descriptor.Flags.SizeSet(size > 0) + w.frame.Descriptor.ContentSize = size + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// ConcurrencyOption sets the number of go routines used for compression. +// If n <= 0, then the output of runtime.GOMAXPROCS(0) is used. +func ConcurrencyOption(n int) Option { + if n <= 0 { + n = runtime.GOMAXPROCS(0) + } + return func(a applier) error { + switch rw := a.(type) { + case nil: + s := fmt.Sprintf("ConcurrencyOption(%d)", n) + return lz4errors.Error(s) + case *Writer: + rw.num = n + return nil + case *Reader: + rw.num = n + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// CompressionLevel defines the level of compression to use. The higher the better, but slower, compression. +type CompressionLevel uint32 + +const ( + Fast CompressionLevel = 0 + Level1 CompressionLevel = 1 << (8 + iota) + Level2 + Level3 + Level4 + Level5 + Level6 + Level7 + Level8 + Level9 +) + +// CompressionLevelOption defines the compression level (default=Fast). +func CompressionLevelOption(level CompressionLevel) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("CompressionLevelOption(%s)", level) + return lz4errors.Error(s) + case *Writer: + switch level { + case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9: + default: + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level) + } + w.level = lz4block.CompressionLevel(level) + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +func onBlockDone(int) {} + +// OnBlockDoneOption is triggered when a block has been processed. For a Writer, it is when is has been compressed, +// for a Reader, it is when it has been uncompressed. +func OnBlockDoneOption(handler func(size int)) Option { + if handler == nil { + handler = onBlockDone + } + return func(a applier) error { + switch rw := a.(type) { + case nil: + s := fmt.Sprintf("OnBlockDoneOption(%s)", reflect.TypeOf(handler).String()) + return lz4errors.Error(s) + case *Writer: + rw.handler = handler + return nil + case *Reader: + rw.handler = handler + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// LegacyOption provides support for writing LZ4 frames in the legacy format. +// +// See https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame. +// +// NB. compressed Linux kernel images use a tweaked LZ4 legacy format where +// the compressed stream is followed by the original (uncompressed) size of +// the kernel (https://events.static.linuxfound.org/sites/events/files/lcjpcojp13_klee.pdf). +// This is also supported as a special case. +func LegacyOption(legacy bool) Option { + return func(a applier) error { + switch rw := a.(type) { + case nil: + s := fmt.Sprintf("LegacyOption(%v)", legacy) + return lz4errors.Error(s) + case *Writer: + rw.legacy = legacy + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} diff --git a/vendor/github.com/pierrec/lz4/v4/options_gen.go b/vendor/github.com/pierrec/lz4/v4/options_gen.go new file mode 100644 index 000000000..2de814909 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/options_gen.go @@ -0,0 +1,92 @@ +// Code generated by "stringer -type=BlockSize,CompressionLevel -output options_gen.go"; DO NOT EDIT. + +package lz4 + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Block64Kb-65536] + _ = x[Block256Kb-262144] + _ = x[Block1Mb-1048576] + _ = x[Block4Mb-4194304] +} + +const ( + _BlockSize_name_0 = "Block64Kb" + _BlockSize_name_1 = "Block256Kb" + _BlockSize_name_2 = "Block1Mb" + _BlockSize_name_3 = "Block4Mb" +) + +func (i BlockSize) String() string { + switch { + case i == 65536: + return _BlockSize_name_0 + case i == 262144: + return _BlockSize_name_1 + case i == 1048576: + return _BlockSize_name_2 + case i == 4194304: + return _BlockSize_name_3 + default: + return "BlockSize(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Fast-0] + _ = x[Level1-512] + _ = x[Level2-1024] + _ = x[Level3-2048] + _ = x[Level4-4096] + _ = x[Level5-8192] + _ = x[Level6-16384] + _ = x[Level7-32768] + _ = x[Level8-65536] + _ = x[Level9-131072] +} + +const ( + _CompressionLevel_name_0 = "Fast" + _CompressionLevel_name_1 = "Level1" + _CompressionLevel_name_2 = "Level2" + _CompressionLevel_name_3 = "Level3" + _CompressionLevel_name_4 = "Level4" + _CompressionLevel_name_5 = "Level5" + _CompressionLevel_name_6 = "Level6" + _CompressionLevel_name_7 = "Level7" + _CompressionLevel_name_8 = "Level8" + _CompressionLevel_name_9 = "Level9" +) + +func (i CompressionLevel) String() string { + switch { + case i == 0: + return _CompressionLevel_name_0 + case i == 512: + return _CompressionLevel_name_1 + case i == 1024: + return _CompressionLevel_name_2 + case i == 2048: + return _CompressionLevel_name_3 + case i == 4096: + return _CompressionLevel_name_4 + case i == 8192: + return _CompressionLevel_name_5 + case i == 16384: + return _CompressionLevel_name_6 + case i == 32768: + return _CompressionLevel_name_7 + case i == 65536: + return _CompressionLevel_name_8 + case i == 131072: + return _CompressionLevel_name_9 + default: + return "CompressionLevel(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/pierrec/lz4/v4/reader.go b/vendor/github.com/pierrec/lz4/v4/reader.go new file mode 100644 index 000000000..275daad7c --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/reader.go @@ -0,0 +1,275 @@ +package lz4 + +import ( + "bytes" + "io" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/lz4stream" +) + +var readerStates = []aState{ + noState: newState, + errorState: newState, + newState: readState, + readState: closedState, + closedState: newState, +} + +// NewReader returns a new LZ4 frame decoder. +func NewReader(r io.Reader) *Reader { + return newReader(r, false) +} + +func newReader(r io.Reader, legacy bool) *Reader { + zr := &Reader{frame: lz4stream.NewFrame()} + zr.state.init(readerStates) + _ = zr.Apply(DefaultConcurrency, defaultOnBlockDone) + zr.Reset(r) + return zr +} + +// Reader allows reading an LZ4 stream. +type Reader struct { + state _State + src io.Reader // source reader + num int // concurrency level + frame *lz4stream.Frame // frame being read + data []byte // block buffer allocated in non concurrent mode + reads chan []byte // pending data + idx int // size of pending data + handler func(int) + cum uint32 + dict []byte +} + +func (*Reader) private() {} + +func (r *Reader) Apply(options ...Option) (err error) { + defer r.state.check(&err) + switch r.state.state { + case newState: + case errorState: + return r.state.err + default: + return lz4errors.ErrOptionClosedOrError + } + for _, o := range options { + if err = o(r); err != nil { + return + } + } + return +} + +// Size returns the size of the underlying uncompressed data, if set in the stream. +func (r *Reader) Size() int { + switch r.state.state { + case readState, closedState: + if r.frame.Descriptor.Flags.Size() { + return int(r.frame.Descriptor.ContentSize) + } + } + return 0 +} + +func (r *Reader) isNotConcurrent() bool { + return r.num == 1 +} + +func (r *Reader) init() error { + err := r.frame.ParseHeaders(r.src) + if err != nil { + return err + } + if !r.frame.Descriptor.Flags.BlockIndependence() { + // We can't decompress dependent blocks concurrently. + // Instead of throwing an error to the user, silently drop concurrency + r.num = 1 + } + data, err := r.frame.InitR(r.src, r.num) + if err != nil { + return err + } + r.reads = data + r.idx = 0 + size := r.frame.Descriptor.Flags.BlockSizeIndex() + r.data = size.Get() + r.cum = 0 + return nil +} + +func (r *Reader) Read(buf []byte) (n int, err error) { + defer r.state.check(&err) + switch r.state.state { + case readState: + case closedState, errorState: + return 0, r.state.err + case newState: + // First initialization. + if err = r.init(); r.state.next(err) { + return + } + default: + return 0, r.state.fail() + } + for len(buf) > 0 { + var bn int + if r.idx == 0 { + if r.isNotConcurrent() { + bn, err = r.read(buf) + } else { + lz4block.Put(r.data) + r.data = <-r.reads + if len(r.data) == 0 { + // No uncompressed data: something went wrong or we are done. + err = r.frame.Blocks.ErrorR() + } + } + switch err { + case nil: + case io.EOF: + if er := r.frame.CloseR(r.src); er != nil { + err = er + } + lz4block.Put(r.data) + r.data = nil + return + default: + return + } + } + if bn == 0 { + // Fill buf with buffered data. + bn = copy(buf, r.data[r.idx:]) + r.idx += bn + if r.idx == len(r.data) { + // All data read, get ready for the next Read. + r.idx = 0 + } + } + buf = buf[bn:] + n += bn + r.handler(bn) + } + return +} + +// read uncompresses the next block as follow: +// - if buf has enough room, the block is uncompressed into it directly +// and the lenght of used space is returned +// - else, the uncompress data is stored in r.data and 0 is returned +func (r *Reader) read(buf []byte) (int, error) { + block := r.frame.Blocks.Block + _, err := block.Read(r.frame, r.src, r.cum) + if err != nil { + return 0, err + } + var direct bool + dst := r.data[:cap(r.data)] + if len(buf) >= len(dst) { + // Uncompress directly into buf. + direct = true + dst = buf + } + dst, err = block.Uncompress(r.frame, dst, r.dict, true) + if err != nil { + return 0, err + } + if !r.frame.Descriptor.Flags.BlockIndependence() { + if len(r.dict)+len(dst) > 128*1024 { + preserveSize := 64*1024 - len(dst) + if preserveSize < 0 { + preserveSize = 0 + } + r.dict = r.dict[len(r.dict)-preserveSize:] + } + r.dict = append(r.dict, dst...) + } + r.cum += uint32(len(dst)) + if direct { + return len(dst), nil + } + r.data = dst + return 0, nil +} + +// Reset clears the state of the Reader r such that it is equivalent to its +// initial state from NewReader, but instead reading from reader. +// No access to reader is performed. +func (r *Reader) Reset(reader io.Reader) { + if r.data != nil { + lz4block.Put(r.data) + r.data = nil + } + r.frame.Reset(r.num) + r.state.reset() + r.src = reader + r.reads = nil +} + +// WriteTo efficiently uncompresses the data from the Reader underlying source to w. +func (r *Reader) WriteTo(w io.Writer) (n int64, err error) { + switch r.state.state { + case closedState, errorState: + return 0, r.state.err + case newState: + if err = r.init(); r.state.next(err) { + return + } + default: + return 0, r.state.fail() + } + defer r.state.nextd(&err) + + var data []byte + if r.isNotConcurrent() { + size := r.frame.Descriptor.Flags.BlockSizeIndex() + data = size.Get() + defer lz4block.Put(data) + } + for { + var bn int + var dst []byte + if r.isNotConcurrent() { + bn, err = r.read(data) + dst = data[:bn] + } else { + lz4block.Put(dst) + dst = <-r.reads + bn = len(dst) + if bn == 0 { + // No uncompressed data: something went wrong or we are done. + err = r.frame.Blocks.ErrorR() + } + } + switch err { + case nil: + case io.EOF: + err = r.frame.CloseR(r.src) + return + default: + return + } + r.handler(bn) + bn, err = w.Write(dst) + n += int64(bn) + if err != nil { + return + } + } +} + +// ValidFrameHeader returns a bool indicating if the given bytes slice matches a LZ4 header. +func ValidFrameHeader(in []byte) (bool, error) { + f := lz4stream.NewFrame() + err := f.ParseHeaders(bytes.NewReader(in)) + if err == nil { + return true, nil + } + if err == lz4errors.ErrInvalidFrame { + return false, nil + } + return false, err +} diff --git a/vendor/github.com/pierrec/lz4/v4/state.go b/vendor/github.com/pierrec/lz4/v4/state.go new file mode 100644 index 000000000..d94f04d05 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/state.go @@ -0,0 +1,75 @@ +package lz4 + +import ( + "errors" + "fmt" + "io" + + "github.com/pierrec/lz4/v4/internal/lz4errors" +) + +//go:generate go run golang.org/x/tools/cmd/stringer -type=aState -output state_gen.go + +const ( + noState aState = iota // uninitialized reader + errorState // unrecoverable error encountered + newState // instantiated object + readState // reading data + writeState // writing data + closedState // all done +) + +type ( + aState uint8 + _State struct { + states []aState + state aState + err error + } +) + +func (s *_State) init(states []aState) { + s.states = states + s.state = states[0] +} + +func (s *_State) reset() { + s.state = s.states[0] + s.err = nil +} + +// next sets the state to the next one unless it is passed a non nil error. +// It returns whether or not it is in error. +func (s *_State) next(err error) bool { + if err != nil { + s.err = fmt.Errorf("%s: %w", s.state, err) + s.state = errorState + return true + } + s.state = s.states[s.state] + return false +} + +// nextd is like next but for defers. +func (s *_State) nextd(errp *error) bool { + return errp != nil && s.next(*errp) +} + +// check sets s in error if not already in error and if the error is not nil or io.EOF, +func (s *_State) check(errp *error) { + if s.state == errorState || errp == nil { + return + } + if err := *errp; err != nil { + s.err = fmt.Errorf("%w[%s]", err, s.state) + if !errors.Is(err, io.EOF) { + s.state = errorState + } + } +} + +func (s *_State) fail() error { + s.state = errorState + s.err = fmt.Errorf("%w[%s]", lz4errors.ErrInternalUnhandledState, s.state) + return s.err +} diff --git a/vendor/github.com/pierrec/lz4/v4/state_gen.go b/vendor/github.com/pierrec/lz4/v4/state_gen.go new file mode 100644 index 000000000..75fb82892 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/state_gen.go @@ -0,0 +1,28 @@ +// Code generated by "stringer -type=aState -output state_gen.go"; DO NOT EDIT. + +package lz4 + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[noState-0] + _ = x[errorState-1] + _ = x[newState-2] + _ = x[readState-3] + _ = x[writeState-4] + _ = x[closedState-5] +} + +const _aState_name = "noStateerrorStatenewStatereadStatewriteStateclosedState" + +var _aState_index = [...]uint8{0, 7, 17, 25, 34, 44, 55} + +func (i aState) String() string { + if i >= aState(len(_aState_index)-1) { + return "aState(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _aState_name[_aState_index[i]:_aState_index[i+1]] +} diff --git a/vendor/github.com/pierrec/lz4/v4/writer.go b/vendor/github.com/pierrec/lz4/v4/writer.go new file mode 100644 index 000000000..77699f2b5 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/writer.go @@ -0,0 +1,238 @@ +package lz4 + +import ( + "io" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/lz4stream" +) + +var writerStates = []aState{ + noState: newState, + newState: writeState, + writeState: closedState, + closedState: newState, + errorState: newState, +} + +// NewWriter returns a new LZ4 frame encoder. +func NewWriter(w io.Writer) *Writer { + zw := &Writer{frame: lz4stream.NewFrame()} + zw.state.init(writerStates) + _ = zw.Apply(DefaultBlockSizeOption, DefaultChecksumOption, DefaultConcurrency, defaultOnBlockDone) + zw.Reset(w) + return zw +} + +// Writer allows writing an LZ4 stream. +type Writer struct { + state _State + src io.Writer // destination writer + level lz4block.CompressionLevel // how hard to try + num int // concurrency level + frame *lz4stream.Frame // frame being built + data []byte // pending data + idx int // size of pending data + handler func(int) + legacy bool +} + +func (*Writer) private() {} + +func (w *Writer) Apply(options ...Option) (err error) { + defer w.state.check(&err) + switch w.state.state { + case newState: + case errorState: + return w.state.err + default: + return lz4errors.ErrOptionClosedOrError + } + w.Reset(w.src) + for _, o := range options { + if err = o(w); err != nil { + return + } + } + return +} + +func (w *Writer) isNotConcurrent() bool { + return w.num == 1 +} + +// init sets up the Writer when in newState. It does not change the Writer state. +func (w *Writer) init() error { + w.frame.InitW(w.src, w.num, w.legacy) + size := w.frame.Descriptor.Flags.BlockSizeIndex() + w.data = size.Get() + w.idx = 0 + return w.frame.Descriptor.Write(w.frame, w.src) +} + +func (w *Writer) Write(buf []byte) (n int, err error) { + defer w.state.check(&err) + switch w.state.state { + case writeState: + case closedState, errorState: + return 0, w.state.err + case newState: + if err = w.init(); w.state.next(err) { + return + } + default: + return 0, w.state.fail() + } + + zn := len(w.data) + for len(buf) > 0 { + if w.isNotConcurrent() && w.idx == 0 && len(buf) >= zn { + // Avoid a copy as there is enough data for a block. + if err = w.write(buf[:zn], false); err != nil { + return + } + n += zn + buf = buf[zn:] + continue + } + // Accumulate the data to be compressed. + m := copy(w.data[w.idx:], buf) + n += m + w.idx += m + buf = buf[m:] + + if w.idx < len(w.data) { + // Buffer not filled. + return + } + + // Buffer full. + if err = w.write(w.data, true); err != nil { + return + } + if !w.isNotConcurrent() { + size := w.frame.Descriptor.Flags.BlockSizeIndex() + w.data = size.Get() + } + w.idx = 0 + } + return +} + +func (w *Writer) write(data []byte, safe bool) error { + if w.isNotConcurrent() { + block := w.frame.Blocks.Block + err := block.Compress(w.frame, data, w.level).Write(w.frame, w.src) + w.handler(len(block.Data)) + return err + } + c := make(chan *lz4stream.FrameDataBlock) + w.frame.Blocks.Blocks <- c + go func(c chan *lz4stream.FrameDataBlock, data []byte, safe bool) { + b := lz4stream.NewFrameDataBlock(w.frame) + c <- b.Compress(w.frame, data, w.level) + <-c + w.handler(len(b.Data)) + b.Close(w.frame) + if safe { + // safe to put it back as the last usage of it was FrameDataBlock.Write() called before c is closed + lz4block.Put(data) + } + }(c, data, safe) + + return nil +} + +// Flush any buffered data to the underlying writer immediately. +func (w *Writer) Flush() (err error) { + switch w.state.state { + case writeState: + case errorState: + return w.state.err + default: + return nil + } + + if w.idx > 0 { + // Flush pending data, disable w.data freeing as it is done later on. + if err = w.write(w.data[:w.idx], false); err != nil { + return err + } + w.idx = 0 + } + return nil +} + +// Close closes the Writer, flushing any unwritten data to the underlying writer +// without closing it. +func (w *Writer) Close() error { + if err := w.Flush(); err != nil { + return err + } + err := w.frame.CloseW(w.src, w.num) + // It is now safe to free the buffer. + if w.data != nil { + lz4block.Put(w.data) + w.data = nil + } + return err +} + +// Reset clears the state of the Writer w such that it is equivalent to its +// initial state from NewWriter, but instead writing to writer. +// Reset keeps the previous options unless overwritten by the supplied ones. +// No access to writer is performed. +// +// w.Close must be called before Reset or pending data may be dropped. +func (w *Writer) Reset(writer io.Writer) { + w.frame.Reset(w.num) + w.state.reset() + w.src = writer +} + +// ReadFrom efficiently reads from r and compressed into the Writer destination. +func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { + switch w.state.state { + case closedState, errorState: + return 0, w.state.err + case newState: + if err = w.init(); w.state.next(err) { + return + } + default: + return 0, w.state.fail() + } + defer w.state.check(&err) + + size := w.frame.Descriptor.Flags.BlockSizeIndex() + var done bool + var rn int + data := size.Get() + if w.isNotConcurrent() { + // Keep the same buffer for the whole process. + defer lz4block.Put(data) + } + for !done { + rn, err = io.ReadFull(r, data) + switch err { + case nil: + case io.EOF, io.ErrUnexpectedEOF: // read may be partial + done = true + default: + return + } + n += int64(rn) + err = w.write(data[:rn], true) + if err != nil { + return + } + w.handler(rn) + if !done && !w.isNotConcurrent() { + // The buffer will be returned automatically by go routines (safe=true) + // so get a new one fo the next round. + data = size.Get() + } + } + return +} diff --git a/vendor/golang.org/x/net/http/httpproxy/proxy.go b/vendor/golang.org/x/net/http/httpproxy/proxy.go new file mode 100644 index 000000000..163645b86 --- /dev/null +++ b/vendor/golang.org/x/net/http/httpproxy/proxy.go @@ -0,0 +1,370 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httpproxy provides support for HTTP proxy determination +// based on environment variables, as provided by net/http's +// ProxyFromEnvironment function. +// +// The API is not subject to the Go 1 compatibility promise and may change at +// any time. +package httpproxy + +import ( + "errors" + "fmt" + "net" + "net/url" + "os" + "strings" + "unicode/utf8" + + "golang.org/x/net/idna" +) + +// Config holds configuration for HTTP proxy settings. See +// FromEnvironment for details. +type Config struct { + // HTTPProxy represents the value of the HTTP_PROXY or + // http_proxy environment variable. It will be used as the proxy + // URL for HTTP requests and HTTPS requests unless overridden by + // HTTPSProxy or NoProxy. + HTTPProxy string + + // HTTPSProxy represents the HTTPS_PROXY or https_proxy + // environment variable. It will be used as the proxy URL for + // HTTPS requests unless overridden by NoProxy. + HTTPSProxy string + + // NoProxy represents the NO_PROXY or no_proxy environment + // variable. It specifies a string that contains comma-separated values + // specifying hosts that should be excluded from proxying. Each value is + // represented by an IP address prefix (1.2.3.4), an IP address prefix in + // CIDR notation (1.2.3.4/8), a domain name, or a special DNS label (*). + // An IP address prefix and domain name can also include a literal port + // number (1.2.3.4:80). + // A domain name matches that name and all subdomains. A domain name with + // a leading "." matches subdomains only. For example "foo.com" matches + // "foo.com" and "bar.foo.com"; ".y.com" matches "x.y.com" but not "y.com". + // A single asterisk (*) indicates that no proxying should be done. + // A best effort is made to parse the string and errors are + // ignored. + NoProxy string + + // CGI holds whether the current process is running + // as a CGI handler (FromEnvironment infers this from the + // presence of a REQUEST_METHOD environment variable). + // When this is set, ProxyForURL will return an error + // when HTTPProxy applies, because a client could be + // setting HTTP_PROXY maliciously. See https://golang.org/s/cgihttpproxy. + CGI bool +} + +// config holds the parsed configuration for HTTP proxy settings. +type config struct { + // Config represents the original configuration as defined above. + Config + + // httpsProxy is the parsed URL of the HTTPSProxy if defined. + httpsProxy *url.URL + + // httpProxy is the parsed URL of the HTTPProxy if defined. + httpProxy *url.URL + + // ipMatchers represent all values in the NoProxy that are IP address + // prefixes or an IP address in CIDR notation. + ipMatchers []matcher + + // domainMatchers represent all values in the NoProxy that are a domain + // name or hostname & domain name + domainMatchers []matcher +} + +// FromEnvironment returns a Config instance populated from the +// environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the +// lowercase versions thereof). HTTPS_PROXY takes precedence over +// HTTP_PROXY for https requests. +// +// The environment values may be either a complete URL or a +// "host[:port]", in which case the "http" scheme is assumed. An error +// is returned if the value is a different form. +func FromEnvironment() *Config { + return &Config{ + HTTPProxy: getEnvAny("HTTP_PROXY", "http_proxy"), + HTTPSProxy: getEnvAny("HTTPS_PROXY", "https_proxy"), + NoProxy: getEnvAny("NO_PROXY", "no_proxy"), + CGI: os.Getenv("REQUEST_METHOD") != "", + } +} + +func getEnvAny(names ...string) string { + for _, n := range names { + if val := os.Getenv(n); val != "" { + return val + } + } + return "" +} + +// ProxyFunc returns a function that determines the proxy URL to use for +// a given request URL. Changing the contents of cfg will not affect +// proxy functions created earlier. +// +// A nil URL and nil error are returned if no proxy is defined in the +// environment, or a proxy should not be used for the given request, as +// defined by NO_PROXY. +// +// As a special case, if req.URL.Host is "localhost" (with or without a +// port number), then a nil URL and nil error will be returned. +func (cfg *Config) ProxyFunc() func(reqURL *url.URL) (*url.URL, error) { + // Preprocess the Config settings for more efficient evaluation. + cfg1 := &config{ + Config: *cfg, + } + cfg1.init() + return cfg1.proxyForURL +} + +func (cfg *config) proxyForURL(reqURL *url.URL) (*url.URL, error) { + var proxy *url.URL + if reqURL.Scheme == "https" { + proxy = cfg.httpsProxy + } + if proxy == nil { + proxy = cfg.httpProxy + if proxy != nil && cfg.CGI { + return nil, errors.New("refusing to use HTTP_PROXY value in CGI environment; see golang.org/s/cgihttpproxy") + } + } + if proxy == nil { + return nil, nil + } + if !cfg.useProxy(canonicalAddr(reqURL)) { + return nil, nil + } + + return proxy, nil +} + +func parseProxy(proxy string) (*url.URL, error) { + if proxy == "" { + return nil, nil + } + + proxyURL, err := url.Parse(proxy) + if err != nil || + (proxyURL.Scheme != "http" && + proxyURL.Scheme != "https" && + proxyURL.Scheme != "socks5") { + // proxy was bogus. Try prepending "http://" to it and + // see if that parses correctly. If not, we fall + // through and complain about the original one. + if proxyURL, err := url.Parse("http://" + proxy); err == nil { + return proxyURL, nil + } + } + if err != nil { + return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) + } + return proxyURL, nil +} + +// useProxy reports whether requests to addr should use a proxy, +// according to the NO_PROXY or no_proxy environment variable. +// addr is always a canonicalAddr with a host and port. +func (cfg *config) useProxy(addr string) bool { + if len(addr) == 0 { + return true + } + host, port, err := net.SplitHostPort(addr) + if err != nil { + return false + } + if host == "localhost" { + return false + } + ip := net.ParseIP(host) + if ip != nil { + if ip.IsLoopback() { + return false + } + } + + addr = strings.ToLower(strings.TrimSpace(host)) + + if ip != nil { + for _, m := range cfg.ipMatchers { + if m.match(addr, port, ip) { + return false + } + } + } + for _, m := range cfg.domainMatchers { + if m.match(addr, port, ip) { + return false + } + } + return true +} + +func (c *config) init() { + if parsed, err := parseProxy(c.HTTPProxy); err == nil { + c.httpProxy = parsed + } + if parsed, err := parseProxy(c.HTTPSProxy); err == nil { + c.httpsProxy = parsed + } + + for _, p := range strings.Split(c.NoProxy, ",") { + p = strings.ToLower(strings.TrimSpace(p)) + if len(p) == 0 { + continue + } + + if p == "*" { + c.ipMatchers = []matcher{allMatch{}} + c.domainMatchers = []matcher{allMatch{}} + return + } + + // IPv4/CIDR, IPv6/CIDR + if _, pnet, err := net.ParseCIDR(p); err == nil { + c.ipMatchers = append(c.ipMatchers, cidrMatch{cidr: pnet}) + continue + } + + // IPv4:port, [IPv6]:port + phost, pport, err := net.SplitHostPort(p) + if err == nil { + if len(phost) == 0 { + // There is no host part, likely the entry is malformed; ignore. + continue + } + if phost[0] == '[' && phost[len(phost)-1] == ']' { + phost = phost[1 : len(phost)-1] + } + } else { + phost = p + } + // IPv4, IPv6 + if pip := net.ParseIP(phost); pip != nil { + c.ipMatchers = append(c.ipMatchers, ipMatch{ip: pip, port: pport}) + continue + } + + if len(phost) == 0 { + // There is no host part, likely the entry is malformed; ignore. + continue + } + + // domain.com or domain.com:80 + // foo.com matches bar.foo.com + // .domain.com or .domain.com:port + // *.domain.com or *.domain.com:port + if strings.HasPrefix(phost, "*.") { + phost = phost[1:] + } + matchHost := false + if phost[0] != '.' { + matchHost = true + phost = "." + phost + } + c.domainMatchers = append(c.domainMatchers, domainMatch{host: phost, port: pport, matchHost: matchHost}) + } +} + +var portMap = map[string]string{ + "http": "80", + "https": "443", + "socks5": "1080", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +func canonicalAddr(url *url.URL) string { + addr := url.Hostname() + if v, err := idnaASCII(addr); err == nil { + addr = v + } + port := url.Port() + if port == "" { + port = portMap[url.Scheme] + } + return net.JoinHostPort(addr, port) +} + +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +func idnaASCII(v string) (string, error) { + // TODO: Consider removing this check after verifying performance is okay. + // Right now punycode verification, length checks, context checks, and the + // permissible character tests are all omitted. It also prevents the ToASCII + // call from salvaging an invalid IDN, when possible. As a result it may be + // possible to have two IDNs that appear identical to the user where the + // ASCII-only version causes an error downstream whereas the non-ASCII + // version does not. + // Note that for correct ASCII IDNs ToASCII will only do considerably more + // work, but it will not cause an allocation. + if isASCII(v) { + return v, nil + } + return idna.Lookup.ToASCII(v) +} + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +// matcher represents the matching rule for a given value in the NO_PROXY list +type matcher interface { + // match returns true if the host and optional port or ip and optional port + // are allowed + match(host, port string, ip net.IP) bool +} + +// allMatch matches on all possible inputs +type allMatch struct{} + +func (a allMatch) match(host, port string, ip net.IP) bool { + return true +} + +type cidrMatch struct { + cidr *net.IPNet +} + +func (m cidrMatch) match(host, port string, ip net.IP) bool { + return m.cidr.Contains(ip) +} + +type ipMatch struct { + ip net.IP + port string +} + +func (m ipMatch) match(host, port string, ip net.IP) bool { + if m.ip.Equal(ip) { + return m.port == "" || m.port == port + } + return false +} + +type domainMatch struct { + host string + port string + + matchHost bool +} + +func (m domainMatch) match(host, port string, ip net.IP) bool { + if strings.HasSuffix(host, m.host) || (m.matchHost && host == m.host[1:]) { + return m.port == "" || m.port == port + } + return false +} diff --git a/vendor/modules.txt b/vendor/modules.txt index fc188678e..04ed063ce 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -68,7 +68,7 @@ github.com/gorilla/handlers github.com/honestbee/jobq # github.com/jmespath/go-jmespath v0.3.0 github.com/jmespath/go-jmespath -# github.com/longhorn/backupstore v0.0.0-20220913112826-5f5c95274f2a +# github.com/longhorn/backupstore v0.0.0-20230125201126-0c2bd550ebc3 github.com/longhorn/backupstore github.com/longhorn/backupstore/fsops github.com/longhorn/backupstore/http @@ -96,6 +96,12 @@ github.com/longhorn/sparse-tools/sparse github.com/mschoch/smat # github.com/philhofer/fwd v1.0.0 github.com/philhofer/fwd +# github.com/pierrec/lz4/v4 v4.1.17 +github.com/pierrec/lz4/v4 +github.com/pierrec/lz4/v4/internal/lz4block +github.com/pierrec/lz4/v4/internal/lz4errors +github.com/pierrec/lz4/v4/internal/lz4stream +github.com/pierrec/lz4/v4/internal/xxh32 # github.com/pkg/errors v0.9.1 github.com/pkg/errors # github.com/rancher/go-fibmap v0.0.0-20160418233256-5fc9f8c1ed47 @@ -115,6 +121,7 @@ github.com/willf/bitset # golang.org/x/net v0.0.0-20200202094626-16171245cfb2 golang.org/x/net/context golang.org/x/net/http/httpguts +golang.org/x/net/http/httpproxy golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna